Merge branch 'vitessio:master' into am_find_all_shards_in_keyspace

Signed-off-by: Andrew Mason <amason@slack-corp.com>
This commit is contained in:
Andrew Mason 2020-12-17 16:15:32 -05:00
Родитель 0d14b7edb2 55bfc0041a
Коммит eae87a73e0
109 изменённых файлов: 9018 добавлений и 6149 удалений

Просмотреть файл

@ -1135,7 +1135,7 @@ The scoping rule works in our favor here because this type of name generation co
For the sake of simplicity, well not allow dependencies to use non-standard column names. For example, well fail a query like this:
`select * from (select a, count(*) from t1) t where t.`count(*)` > 0`
`select * from (select a, count(*) from t1) t where t.count(*) > 0`
This should instead be rewritten as:

Просмотреть файл

@ -34,7 +34,7 @@ FROM registry.access.redhat.com/ubi7/ubi:latest
# Install keys and dependencies
RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& yum install --setopt=alwaysprompt=no gnupg \
&& yum install -y --setopt=alwaysprompt=no gnupg \
&& ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 \
|| gpg --keyserver ha.pool.sks-keyservers.net --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) \
&& gpg --export --armor 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A > ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 \
@ -47,15 +47,10 @@ RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& rpmkeys --checksig /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm \
&& rpm -Uvh /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm \
&& rm -f /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm
RUN curl -L --retry-delay 10 --retry 3 -o /tmp/libev.rpm http://mirror.centos.org/centos/7/extras/x86_64/Packages/libev-4.15-7.el7.x86_64.rpm \
&& curl -L --retry-delay 10 --retry 3 -o /tmp/gperf.rpm http://mirror.centos.org/centos/7/os/x86_64/Packages/gperftools-libs-2.6.1-1.el7.x86_64.rpm \
&& curl -L --retry-delay 10 --retry 3 -o /tmp/numactl.rpm http://mirror.centos.org/centos/7/os/x86_64/Packages/numactl-libs-2.0.12-5.el7.x86_64.rpm \
&& curl -L --retry-delay 10 --retry 3 -o /tmp/sysstat.rpm http://mirror.centos.org/centos/7/os/x86_64/Packages/sysstat-10.1.5-19.el7.x86_64.rpm \
&& curl -L --retry-delay 10 --retry 3 -o /tmp/strace.rpm http://mirror.centos.org/centos/7/os/x86_64/Packages/strace-4.24-4.el7.x86_64.rpm \
&& rpm -i --nodeps /tmp/libev.rpm /tmp/gperf.rpm /tmp/numactl.rpm /tmp/sysstat.rpm /tmp/strace.rpm \
&& rm -f /tmp/libev.rpm /tmp/gperf.rpm /tmp/numactl.rpm /tmp/sysstat.rpm /tmp/strace.rpm
RUN yum update \
&& yum install --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \
RUN echo H4sICH852V8CA2ZvbwC1jr0OgkAQhPt7CgrbY7W6xOQaDaEgRqKxMMTiOFYg/F2WI9G39xCttKGg2UxmJrNfokWqeryxVjUo99ja45kLj3s757IxGqiWhbVmC9CURB352rW63u8oh0mCAHdWY1uRLoDlJtcF6kpuRlnhU97LGt0CoNVgqhLINNxFcIoPPIxDHgVX/v3OsFVpjZlcM5ZoMZhMWex/ES9TMIPyM7UYKj4sqT+kwdufAToNLcP5AvRgmV7zAQAA | base64 -d | gzip -dc > /etc/yum.repos.d/CentOS-Base.repo \
&& yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs --enablerepo c7base --enablerepo c7updates --enablerepo c7extras install libev gperftools-libs numactl-libs sysstat strace
RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \
&& yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \
jemalloc gperftools-libs procps-ng rsync wget openssl hostname curl tzdata make \
# Can't use alwaysprompt=no here, since we need to pick up deps
# No way to separate key imports and accept deps separately in yum/dnf
@ -63,7 +58,7 @@ RUN yum update \
mysql-community-client mysql-community-server \
# Have to use hacks to ignore conflicts on /etc/my.cnf install
&& mkdir -p /tmp/1 \
&& yum install --setopt=alwaysprompt=no --downloadonly --downloaddir=/tmp/1 --enablerepo mysql57-community --disablerepo mysql80-community percona-xtrabackup-24 \
&& yum install -y --setopt=alwaysprompt=no --downloadonly --downloaddir=/tmp/1 --enablerepo mysql57-community --disablerepo mysql80-community percona-xtrabackup-24 \
&& rpm -Uvh --replacefiles /tmp/1/*rpm \
&& rm -rf /tmp/1 \
&& yum clean all \

Просмотреть файл

@ -34,7 +34,7 @@ FROM registry.access.redhat.com/ubi7/ubi:latest
# Install keys and dependencies
RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& yum install --setopt=alwaysprompt=no gnupg \
&& yum install -y --setopt=alwaysprompt=no gnupg \
&& ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 \
|| gpg --keyserver ha.pool.sks-keyservers.net --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) \
&& gpg --export --armor 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A > ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 \
@ -47,15 +47,10 @@ RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& rpmkeys --checksig /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm \
&& rpm -Uvh /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm \
&& rm -f /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm
RUN curl -L --retry-delay 10 --retry 3 -o /tmp/libev.rpm http://mirror.centos.org/centos/7/extras/x86_64/Packages/libev-4.15-7.el7.x86_64.rpm \
&& curl -L --retry-delay 10 --retry 3 -o /tmp/gperf.rpm http://mirror.centos.org/centos/7/os/x86_64/Packages/gperftools-libs-2.6.1-1.el7.x86_64.rpm \
&& curl -L --retry-delay 10 --retry 3 -o /tmp/numactl.rpm http://mirror.centos.org/centos/7/os/x86_64/Packages/numactl-libs-2.0.12-5.el7.x86_64.rpm \
&& curl -L --retry-delay 10 --retry 3 -o /tmp/sysstat.rpm http://mirror.centos.org/centos/7/os/x86_64/Packages/sysstat-10.1.5-19.el7.x86_64.rpm \
&& curl -L --retry-delay 10 --retry 3 -o /tmp/strace.rpm http://mirror.centos.org/centos/7/os/x86_64/Packages/strace-4.24-4.el7.x86_64.rpm \
&& rpm -i --nodeps /tmp/libev.rpm /tmp/gperf.rpm /tmp/numactl.rpm /tmp/sysstat.rpm /tmp/strace.rpm \
&& rm -f /tmp/libev.rpm /tmp/gperf.rpm /tmp/numactl.rpm /tmp/sysstat.rpm /tmp/strace.rpm
RUN yum update \
&& yum install --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \
RUN echo H4sICH852V8CA2ZvbwC1jr0OgkAQhPt7CgrbY7W6xOQaDaEgRqKxMMTiOFYg/F2WI9G39xCttKGg2UxmJrNfokWqeryxVjUo99ja45kLj3s757IxGqiWhbVmC9CURB352rW63u8oh0mCAHdWY1uRLoDlJtcF6kpuRlnhU97LGt0CoNVgqhLINNxFcIoPPIxDHgVX/v3OsFVpjZlcM5ZoMZhMWex/ES9TMIPyM7UYKj4sqT+kwdufAToNLcP5AvRgmV7zAQAA | base64 -d | gzip -dc > /etc/yum.repos.d/CentOS-Base.repo \
&& yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs --enablerepo c7base --enablerepo c7updates --enablerepo c7extras install libev gperftools-libs numactl-libs sysstat strace
RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \
&& yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \
jemalloc gperftools-libs procps-ng rsync wget openssl hostname curl tzdata make \
# Can't use alwaysprompt=no here, since we need to pick up deps
# No way to separate key imports and accept deps separately in yum/dnf
@ -63,7 +58,7 @@ RUN yum update \
mysql-community-client mysql-community-server \
# Have to use hacks to ignore conflicts on /etc/my.cnf install
&& mkdir -p /tmp/1 \
&& yum install --setopt=alwaysprompt=no --downloadonly --downloaddir=/tmp/1 --enablerepo mysql80-community --disablerepo mysql57-community percona-xtrabackup-80 \
&& yum install -y --setopt=alwaysprompt=no --downloadonly --downloaddir=/tmp/1 --enablerepo mysql80-community --disablerepo mysql57-community percona-xtrabackup-80 \
&& rpm -Uvh --replacefiles /tmp/1/*rpm \
&& rm -rf /tmp/1 \
&& yum clean all \

Просмотреть файл

@ -34,7 +34,7 @@ FROM registry.access.redhat.com/ubi7/ubi:latest
# Install keys and dependencies
RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& yum install --setopt=alwaysprompt=no gnupg \
&& yum install -y --setopt=alwaysprompt=no gnupg \
&& ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 \
|| gpg --keyserver ha.pool.sks-keyservers.net --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 ) \
&& gpg --export --armor 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A > ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 \
@ -45,15 +45,10 @@ RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& rpmkeys --checksig /tmp/perconarepo.rpm \
&& rpm -Uvh /tmp/perconarepo.rpm \
&& rm -f /tmp/perconarepo.rpm
RUN curl -L --retry-delay 10 --retry 3 -o /tmp/libev.rpm http://mirror.centos.org/centos/7/extras/x86_64/Packages/libev-4.15-7.el7.x86_64.rpm \
&& curl -L --retry-delay 10 --retry 3 -o /tmp/gperf.rpm http://mirror.centos.org/centos/7/os/x86_64/Packages/gperftools-libs-2.6.1-1.el7.x86_64.rpm \
&& curl -L --retry-delay 10 --retry 3 -o /tmp/numactl.rpm http://mirror.centos.org/centos/7/os/x86_64/Packages/numactl-libs-2.0.12-5.el7.x86_64.rpm \
&& curl -L --retry-delay 10 --retry 3 -o /tmp/sysstat.rpm http://mirror.centos.org/centos/7/os/x86_64/Packages/sysstat-10.1.5-19.el7.x86_64.rpm \
&& curl -L --retry-delay 10 --retry 3 -o /tmp/strace.rpm http://mirror.centos.org/centos/7/os/x86_64/Packages/strace-4.24-4.el7.x86_64.rpm \
&& rpm -i --nodeps /tmp/libev.rpm /tmp/gperf.rpm /tmp/numactl.rpm /tmp/sysstat.rpm /tmp/strace.rpm \
&& rm -f /tmp/libev.rpm /tmp/gperf.rpm /tmp/numactl.rpm /tmp/sysstat.rpm /tmp/strace.rpm
RUN yum update \
&& yum install --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \
RUN echo H4sICH852V8CA2ZvbwC1jr0OgkAQhPt7CgrbY7W6xOQaDaEgRqKxMMTiOFYg/F2WI9G39xCttKGg2UxmJrNfokWqeryxVjUo99ja45kLj3s757IxGqiWhbVmC9CURB352rW63u8oh0mCAHdWY1uRLoDlJtcF6kpuRlnhU97LGt0CoNVgqhLINNxFcIoPPIxDHgVX/v3OsFVpjZlcM5ZoMZhMWex/ES9TMIPyM7UYKj4sqT+kwdufAToNLcP5AvRgmV7zAQAA | base64 -d | gzip -dc > /etc/yum.repos.d/CentOS-Base.repo \
&& yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs --enablerepo c7base --enablerepo c7updates --enablerepo c7extras install libev gperftools-libs numactl-libs sysstat strace
RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \
&& yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \
jemalloc gperftools-libs procps-ng rsync wget openssl hostname curl tzdata make \
# Can't use alwaysprompt=no here, since we need to pick up deps
# No way to separate key imports and accept deps separately in yum/dnf

Просмотреть файл

@ -34,7 +34,7 @@ FROM registry.access.redhat.com/ubi7/ubi:latest
# Install keys and dependencies
RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& yum install --setopt=alwaysprompt=no gnupg \
&& yum install -y --setopt=alwaysprompt=no gnupg \
&& ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 \
|| gpg --keyserver ha.pool.sks-keyservers.net --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 ) \
&& gpg --export --armor 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A > ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 \
@ -45,15 +45,10 @@ RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& rpmkeys --checksig /tmp/perconarepo.rpm \
&& rpm -Uvh /tmp/perconarepo.rpm \
&& rm -f /tmp/perconarepo.rpm
RUN curl -L --retry-delay 10 --retry 3 -o /tmp/libev.rpm http://mirror.centos.org/centos/7/extras/x86_64/Packages/libev-4.15-7.el7.x86_64.rpm \
&& curl -L --retry-delay 10 --retry 3 -o /tmp/gperf.rpm http://mirror.centos.org/centos/7/os/x86_64/Packages/gperftools-libs-2.6.1-1.el7.x86_64.rpm \
&& curl -L --retry-delay 10 --retry 3 -o /tmp/numactl.rpm http://mirror.centos.org/centos/7/os/x86_64/Packages/numactl-libs-2.0.12-5.el7.x86_64.rpm \
&& curl -L --retry-delay 10 --retry 3 -o /tmp/sysstat.rpm http://mirror.centos.org/centos/7/os/x86_64/Packages/sysstat-10.1.5-19.el7.x86_64.rpm \
&& curl -L --retry-delay 10 --retry 3 -o /tmp/strace.rpm http://mirror.centos.org/centos/7/os/x86_64/Packages/strace-4.24-4.el7.x86_64.rpm \
&& rpm -i --nodeps /tmp/libev.rpm /tmp/gperf.rpm /tmp/numactl.rpm /tmp/sysstat.rpm /tmp/strace.rpm \
&& rm -f /tmp/libev.rpm /tmp/gperf.rpm /tmp/numactl.rpm /tmp/sysstat.rpm /tmp/strace.rpm
RUN yum update \
&& yum install --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \
RUN echo H4sICH852V8CA2ZvbwC1jr0OgkAQhPt7CgrbY7W6xOQaDaEgRqKxMMTiOFYg/F2WI9G39xCttKGg2UxmJrNfokWqeryxVjUo99ja45kLj3s757IxGqiWhbVmC9CURB352rW63u8oh0mCAHdWY1uRLoDlJtcF6kpuRlnhU97LGt0CoNVgqhLINNxFcIoPPIxDHgVX/v3OsFVpjZlcM5ZoMZhMWex/ES9TMIPyM7UYKj4sqT+kwdufAToNLcP5AvRgmV7zAQAA | base64 -d | gzip -dc > /etc/yum.repos.d/CentOS-Base.repo \
&& yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs --enablerepo c7base --enablerepo c7updates --enablerepo c7extras install libev gperftools-libs numactl-libs sysstat strace
RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \
&& yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \
jemalloc gperftools-libs procps-ng rsync wget openssl hostname curl tzdata make \
&& percona-release setup ps80 \
# Without this pause, the subsequent yum install fails downloads

Просмотреть файл

@ -8,10 +8,10 @@ metadata:
name: example
spec:
images:
vtctld: vitess/lite:v6.0.20-20200429
vtgate: vitess/lite:v6.0.20-20200429
vttablet: vitess/lite:v6.0.20-20200429
vtbackup: vitess/lite:v6.0.20-20200429
vtctld: vitess/lite:v8.0.0
vtgate: vitess/lite:v8.0.0
vttablet: vitess/lite:v8.0.0
vtbackup: vitess/lite:v8.0.0
mysqld:
mysql56Compatible: vitess/lite:v6.0.20-20200429
mysqldExporter: prom/mysqld-exporter:v0.11.0

Просмотреть файл

@ -4,10 +4,10 @@ metadata:
name: example
spec:
images:
vtctld: vitess/lite:v6.0.20-20200429
vtgate: vitess/lite:v6.0.20-20200429
vttablet: vitess/lite:v6.0.20-20200429
vtbackup: vitess/lite:v6.0.20-20200429
vtctld: vitess/lite:v8.0.0
vtgate: vitess/lite:v8.0.0
vttablet: vitess/lite:v8.0.0
vtbackup: vitess/lite:v8.0.0
mysqld:
mysql56Compatible: vitess/lite:v6.0.20-20200429
mysqldExporter: prom/mysqld-exporter:v0.11.0

Просмотреть файл

@ -4,12 +4,12 @@ metadata:
name: example
spec:
images:
vtctld: us.gcr.io/planetscale-vitess/lite:2020-04-24.228e6fe
vtgate: us.gcr.io/planetscale-vitess/lite:2020-04-24.228e6fe
vttablet: us.gcr.io/planetscale-vitess/lite:2020-04-24.228e6fe
vtbackup: us.gcr.io/planetscale-vitess/lite:2020-04-24.228e6fe
vtctld: vitess/lite:v8.0.0
vtgate: vitess/lite:v8.0.0
vttablet: vitess/lite:v8.0.0
vtbackup: vitess/lite:v8.0.0
mysqld:
mysql56Compatible: us.gcr.io/planetscale-vitess/lite:2020-04-24.228e6fe
mysql56Compatible: vitess/lite:v8.0.0
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1

Просмотреть файл

@ -4,10 +4,10 @@ metadata:
name: example
spec:
images:
vtctld: us.gcr.io/planetscale-vitess/lite:2020-04-24.228e6fe
vtgate: us.gcr.io/planetscale-vitess/lite:2020-04-24.228e6fe
vttablet: us.gcr.io/planetscale-vitess/lite:2020-04-24.228e6fe
vtbackup: us.gcr.io/planetscale-vitess/lite:2020-04-24.228e6fe
vtctld: vitess/lite:v8.0.0
vtgate: vitess/lite:v8.0.0
vttablet: vitess/lite:v8.0.0
vtbackup: vitess/lite:v8.0.0
mysqld:
mysql56Compatible: us.gcr.io/planetscale-vitess/lite:2020-04-24.228e6fe
mysqldExporter: prom/mysqld-exporter:v0.11.0

Просмотреть файл

@ -5773,7 +5773,7 @@ spec:
fieldPath: metadata.name
- name: OPERATOR_NAME
value: vitess-operator
image: planetscale/vitess-operator:v2.0.0
image: planetscale/vitess-operator:v2.2.0
imagePullPolicy: IfNotPresent
name: vitess-operator
resources:

13
go.mod
Просмотреть файл

@ -3,13 +3,10 @@ module vitess.io/vitess
go 1.13
require (
cloud.google.com/go v0.46.3
cloud.google.com/go/storage v1.0.0
github.com/Azure/azure-pipeline-go v0.2.2
github.com/Azure/azure-sdk-for-go v46.4.0+incompatible
github.com/Azure/azure-storage-blob-go v0.10.0
github.com/Azure/go-autorest/autorest v0.10.0
github.com/Azure/go-autorest/autorest/validation v0.3.0 // indirect
github.com/Azure/go-autorest/autorest v0.10.0 // indirect
github.com/DataDog/datadog-go v2.2.0+incompatible
github.com/GeertJohan/go.rice v1.0.0
github.com/PuerkitoBio/goquery v1.5.1
@ -20,10 +17,8 @@ require (
github.com/cespare/xxhash/v2 v2.1.1
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 // indirect
github.com/coreos/bbolt v1.3.2 // indirect
github.com/coreos/etcd v3.3.13+incompatible
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect
github.com/corpix/uarand v0.1.1 // indirect
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
github.com/evanphx/json-patch v4.5.0+incompatible
@ -83,20 +78,17 @@ require (
github.com/prometheus/common v0.9.1
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e
github.com/satori/go.uuid v1.2.0
github.com/satori/go.uuid v1.2.0 // indirect
github.com/sjmudd/stopwatch v0.0.0-20170613150411-f380bf8a9be1
github.com/smartystreets/goconvey v1.6.4 // indirect
github.com/spf13/cobra v1.1.1
github.com/spyzhov/ajson v0.4.2
github.com/stretchr/testify v1.4.0
github.com/tchap/go-patricia v0.0.0-20160729071656-dd168db6051b
github.com/tebeka/selenium v0.9.9
github.com/tinylib/msgp v1.1.1 // indirect
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect
github.com/uber-go/atomic v1.4.0 // indirect
github.com/uber/jaeger-client-go v2.16.0+incompatible
github.com/uber/jaeger-lib v2.0.0+incompatible // indirect
github.com/ugorji/go v1.1.7 // indirect
github.com/z-division/go-zookeeper v0.0.0-20190128072838-6d7457066b9b
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
golang.org/x/lint v0.0.0-20190930215403-16217165b5de
@ -111,7 +103,6 @@ require (
gopkg.in/DataDog/dd-trace-go.v1 v1.17.0
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
gopkg.in/gcfg.v1 v1.2.3
gopkg.in/ini.v1 v1.51.0 // indirect
gopkg.in/ldap.v2 v2.5.0
gopkg.in/warnings.v0 v0.1.2 // indirect
gotest.tools v2.2.0+incompatible

38
go.sum
Просмотреть файл

@ -8,38 +8,29 @@ cloud.google.com/go v0.45.1 h1:lRi0CHyU+ytlvylOlFKKq0af6JncuyoRh1J+QJBqQx0=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo=
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
github.com/Azure/azure-sdk-for-go v0.2.0-beta h1:wYBqYNMWr0WL2lcEZi+dlK9n+N0wJ0Pjs4BKeOnDjfQ=
github.com/Azure/azure-sdk-for-go v46.4.0+incompatible h1:fCN6Pi+tEiEwFa8RSmtVlFHRXEZ+DJm9gfx/MKqYWw4=
github.com/Azure/azure-sdk-for-go v46.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o=
github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0=
github.com/Azure/azure-storage-blob-go v0.10.0 h1:evCwGreYo3XLeBV4vSxLbLiYb6e0SzsJiXQVRGsRXxs=
github.com/Azure/azure-storage-blob-go v0.10.0/go.mod h1:ep1edmW+kNQx4UfWM9heESNmQdijykocJ0YOxmMX8SE=
github.com/Azure/azure-storage-file-go v0.8.0 h1:OX8DGsleWLUE6Mw4R/OeWEZMvsTIpwN94J59zqKQnTI=
github.com/Azure/azure-storage-file-go v0.8.0/go.mod h1:3w3mufGcMjcOJ3w+4Gs+5wsSgkT7xDwWWqMMIrXtW4c=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v1.1.1 h1:4G9tVCqooRY3vDTB2bA1Z01PlSALtnUbji0AfzthUSs=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.10.0 h1:mvdtztBqcL8se7MdrUweNieTNi4kfNG6GOJuurQJpuY=
github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
github.com/Azure/go-autorest/autorest v0.11.10 h1:j5sGbX7uj1ieYYkQ3Mpvewd4DCsEQ+ZeJpqnSM9pjnM=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0=
github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/adal v0.8.3 h1:O1AGG9Xig71FxdX9HO5pGNyZ7TbSyHaVg+5eJO/jSGw=
github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
@ -48,8 +39,6 @@ github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxB
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/autorest/validation v0.3.0 h1:3I9AAI63HfcLtphd9g39ruUwRI+Ca+z/f36KHPFRUss=
github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
@ -404,6 +393,7 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
@ -456,8 +446,6 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
@ -676,11 +664,7 @@ github.com/uber/jaeger-client-go v2.16.0+incompatible h1:Q2Pp6v3QYiocMxomCaJuwQG
github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v2.0.0+incompatible h1:iMSCV0rmXEogjNWPh2D0xk9YVKvrtGoHJNe9ebLu/pw=
github.com/uber/jaeger-lib v2.0.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
@ -727,8 +711,6 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -737,6 +719,7 @@ golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 h1:A1gGSx58LAGVHUUsOf7IiR0u8Xb6W51gRwfDBhkdcaw=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@ -753,7 +736,6 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -782,12 +764,11 @@ golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -825,7 +806,6 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -878,8 +858,6 @@ golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191219041853-979b82bfef62 h1:vDaiisQl0rGVXqk3wT2yc43gSnwlj4haEG5J78IGZP4=
golang.org/x/tools v0.0.0-20191219041853-979b82bfef62/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20201202200335-bef1c476418a h1:TYqOq/v+Ri5aADpldxXOj6PmvcPMOJbLjdALzZDQT2M=
golang.org/x/tools v0.0.0-20201202200335-bef1c476418a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -912,8 +890,6 @@ google.golang.org/genproto v0.0.0-20190626174449-989357319d63/go.mod h1:z3L6/3dT
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20190926190326-7ee9db18f195 h1:dWzgMaXfaHsnkRKZ1l3iJLDmTEB40JMl/dqRbJX4D/o=
google.golang.org/genproto v0.0.0-20190926190326-7ee9db18f195/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=

Просмотреть файл

@ -19,5 +19,6 @@ package main
// Imports and register the gRPC vtgateservice server
import (
_ "vitess.io/vitess/go/vt/vtgate/grpcvtgateconn"
_ "vitess.io/vitess/go/vt/vtgate/grpcvtgateservice"
)

Просмотреть файл

@ -75,7 +75,8 @@ func commandGetKeyspace(cmd *cobra.Command, args []string) error {
return err
}
fmt.Printf("%+v\n", resp)
fmt.Printf("%+v\n", resp.Keyspace)
return nil
}
@ -86,6 +87,7 @@ func commandGetKeyspaces(cmd *cobra.Command, args []string) error {
}
fmt.Printf("%+v\n", resp.Keyspaces)
return nil
}

Просмотреть файл

@ -19,10 +19,13 @@ package main
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"strings"
"testing"
"time"
"vitess.io/vitess/go/vt/tlstest"
@ -62,6 +65,47 @@ func TestRunsVschemaMigrations(t *testing.T) {
assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table1", vindex: "my_vdx", vindexType: "hash", column: "id"})
}
func TestCanVtGateExecute(t *testing.T) {
cluster, err := startCluster()
assert.NoError(t, err)
defer cluster.TearDown()
args := os.Args
defer resetFlags(args)
client, err := vtctlclient.New(fmt.Sprintf("localhost:%v", cluster.GrpcPort()))
assert.NoError(t, err)
defer client.Close()
stream, err := client.ExecuteVtctlCommand(
context.Background(),
[]string{
"VtGateExecute",
"-server",
fmt.Sprintf("localhost:%v", cluster.GrpcPort()),
"select 'success';",
},
30*time.Second,
)
assert.NoError(t, err)
var b strings.Builder
b.Grow(1024)
Out:
for {
e, err := stream.Recv()
switch err {
case nil:
b.WriteString(e.Value)
case io.EOF:
break Out
default:
assert.FailNow(t, err.Error())
}
}
assert.Contains(t, b.String(), "success")
}
func TestMtlsAuth(t *testing.T) {
// Our test root.
root, err := ioutil.TempDir("", "tlstest")
@ -103,7 +147,7 @@ func TestMtlsAuth(t *testing.T) {
assertColumnVindex(t, cluster, columnVindex{keyspace: "app_customer", table: "customers", vindex: "hash", vindexType: "hash", column: "id"})
}
func TestMtlsAuthUnaothorizedFails(t *testing.T) {
func TestMtlsAuthUnauthorizedFails(t *testing.T) {
// Our test root.
root, err := ioutil.TempDir("", "tlstest")
if err != nil {

Просмотреть файл

@ -49,33 +49,41 @@ var (
ddlStrategyUnchanged = "-"
createTable = `
CREATE TABLE %s (
id BIGINT(20) not NULL,
msg varchar(64),
PRIMARY KEY (id)
id bigint(20) NOT NULL,
msg varchar(64),
PRIMARY KEY (id)
) ENGINE=InnoDB;`
// To verify non online-DDL behavior
alterTableNormalStatement = `
ALTER TABLE %s
ADD COLUMN non_online INT UNSIGNED NOT NULL`
ADD COLUMN non_online int UNSIGNED NOT NULL`
// A trivial statement which must succeed and does not change the schema
alterTableTrivialStatement = `
ALTER TABLE %s
ENGINE=InnoDB`
ENGINE=InnoDB`
// The following statement is valid
alterTableSuccessfulStatement = `
ALTER TABLE %s
MODIFY id BIGINT UNSIGNED NOT NULL,
ADD COLUMN ghost_col INT NOT NULL,
ADD INDEX idx_msg(msg)`
MODIFY id bigint UNSIGNED NOT NULL,
ADD COLUMN ghost_col int NOT NULL,
ADD INDEX idx_msg(msg)`
// The following statement will fail because gh-ost requires some shared unique key
alterTableFailedStatement = `
ALTER TABLE %s
DROP PRIMARY KEY,
DROP COLUMN ghost_col`
DROP PRIMARY KEY,
DROP COLUMN ghost_col`
// We will run this query with "gh-ost --max-load=Threads_running=1"
alterTableThrottlingStatement = `
ALTER TABLE %s
DROP COLUMN ghost_col`
DROP COLUMN ghost_col`
onlineDDLCreateTableStatement = `
CREATE TABLE %s (
id bigint NOT NULL,
online_ddl_create_col INT NOT NULL,
PRIMARY KEY (id)
) ENGINE=InnoDB;`
onlineDDLDropTableStatement = `
DROP TABLE %s`
)
func fullWordUUIDRegexp(uuid, searchWord string) *regexp.Regexp {
@ -106,6 +114,7 @@ func TestMain(m *testing.M) {
clusterInstance.VtTabletExtraArgs = []string{
"-migration_check_interval", "5s",
"-gh-ost-path", os.Getenv("VITESS_ENDTOEND_GH_OST_PATH"), // leave env variable empty/unset to get the default behavior. Override in Mac.
}
clusterInstance.VtGateExtraArgs = []string{
"-ddl_strategy", "gh-ost",
@ -157,29 +166,29 @@ func TestSchemaChange(t *testing.T) {
assert.Equal(t, 2, len(clusterInstance.Keyspaces[0].Shards))
testWithInitialSchema(t)
{
_ = testAlterTable(t, alterTableNormalStatement, string(schema.DDLStrategyNormal), "vtctl", "non_online")
_ = testOnlineDDLStatement(t, alterTableNormalStatement, string(schema.DDLStrategyNormal), "vtctl", "non_online")
}
{
uuid := testAlterTable(t, alterTableSuccessfulStatement, ddlStrategyUnchanged, "vtgate", "ghost_col")
uuid := testOnlineDDLStatement(t, alterTableSuccessfulStatement, ddlStrategyUnchanged, "vtgate", "ghost_col")
checkRecentMigrations(t, uuid, schema.OnlineDDLStatusComplete)
checkCancelMigration(t, uuid, false)
checkRetryMigration(t, uuid, false)
}
{
uuid := testAlterTable(t, alterTableTrivialStatement, "gh-ost", "vtctl", "ghost_col")
uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "gh-ost", "vtctl", "ghost_col")
checkRecentMigrations(t, uuid, schema.OnlineDDLStatusComplete)
checkCancelMigration(t, uuid, false)
checkRetryMigration(t, uuid, false)
}
{
uuid := testAlterTable(t, alterTableThrottlingStatement, "gh-ost --max-load=Threads_running=1", "vtgate", "ghost_col")
uuid := testOnlineDDLStatement(t, alterTableThrottlingStatement, "gh-ost --max-load=Threads_running=1", "vtgate", "ghost_col")
checkRecentMigrations(t, uuid, schema.OnlineDDLStatusRunning)
checkCancelMigration(t, uuid, true)
time.Sleep(2 * time.Second)
checkRecentMigrations(t, uuid, schema.OnlineDDLStatusFailed)
}
{
uuid := testAlterTable(t, alterTableFailedStatement, "gh-ost", "vtgate", "ghost_col")
uuid := testOnlineDDLStatement(t, alterTableFailedStatement, "gh-ost", "vtgate", "ghost_col")
checkRecentMigrations(t, uuid, schema.OnlineDDLStatusFailed)
checkCancelMigration(t, uuid, false)
checkRetryMigration(t, uuid, true)
@ -198,12 +207,24 @@ func TestSchemaChange(t *testing.T) {
wg.Add(1)
go func() {
defer wg.Done()
_ = testAlterTable(t, alterTableThrottlingStatement, "gh-ost --max-load=Threads_running=1", "vtgate", "ghost_col")
_ = testOnlineDDLStatement(t, alterTableThrottlingStatement, "gh-ost --max-load=Threads_running=1", "vtgate", "ghost_col")
}()
}
wg.Wait()
checkCancelAllMigrations(t, count)
}
{
uuid := testOnlineDDLStatement(t, onlineDDLDropTableStatement, "gh-ost", "vtctl", "")
checkRecentMigrations(t, uuid, schema.OnlineDDLStatusComplete)
checkCancelMigration(t, uuid, false)
checkRetryMigration(t, uuid, false)
}
{
uuid := testOnlineDDLStatement(t, onlineDDLCreateTableStatement, "gh-ost", "vtctl", "online_ddl_create_col")
checkRecentMigrations(t, uuid, schema.OnlineDDLStatusComplete)
checkCancelMigration(t, uuid, false)
checkRetryMigration(t, uuid, false)
}
}
func testWithInitialSchema(t *testing.T) {
@ -219,8 +240,8 @@ func testWithInitialSchema(t *testing.T) {
checkTables(t, totalTableCount)
}
// testAlterTable runs an online DDL, ALTER statement
func testAlterTable(t *testing.T, alterStatement string, ddlStrategy string, executeStrategy string, expectColumn string) (uuid string) {
// testOnlineDDLStatement runs an online DDL, ALTER statement
func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, executeStrategy string, expectColumn string) (uuid string) {
tableName := fmt.Sprintf("vt_onlineddl_test_%02d", 3)
sqlQuery := fmt.Sprintf(alterStatement, tableName)
if executeStrategy == "vtgate" {
@ -245,7 +266,9 @@ func testAlterTable(t *testing.T, alterStatement string, ddlStrategy string, exe
time.Sleep(time.Second * 20)
}
checkMigratedTable(t, tableName, expectColumn)
if expectColumn != "" {
checkMigratedTable(t, tableName, expectColumn)
}
return uuid
}

Просмотреть файл

@ -223,7 +223,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl
switchWrites(t, ksWorkflow)
ksShards := []string{"product/0", "customer/-80", "customer/80-"}
printShardPositions(vc, ksShards)
insertQuery2 := "insert into customer(name) values('tempCustomer2')"
insertQuery2 := "insert into customer(name, cid) values('tempCustomer2', 100)"
matchInsertQuery2 := "insert into customer(`name`, cid) values (:vtg1, :_cid0)"
require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", insertQuery2, matchInsertQuery2))
@ -236,7 +236,6 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl
if testReverse {
//Reverse Replicate
switchReads(t, allCellNames, reverseKsWorkflow)
printShardPositions(vc, ksShards)
switchWrites(t, reverseKsWorkflow)
insertQuery1 = "insert into customer(cid, name) values(1002, 'tempCustomer5')"
@ -250,6 +249,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl
//Go forward again
switchReads(t, allCellNames, ksWorkflow)
switchWrites(t, ksWorkflow)
dropSourcesDryRun(t, ksWorkflow, false, dryRunResultsDropSourcesDropCustomerShard)
dropSourcesDryRun(t, ksWorkflow, true, dryRunResultsDropSourcesRenameCustomerShard)
@ -371,7 +371,7 @@ func reshardMerchant3to1Merge(t *testing.T) {
func reshardCustomer3to2SplitMerge(t *testing.T) { //-40,40-80,80-c0 => merge/split, c0- stays the same ending up with 3
ksName := "customer"
counts := map[string]int{"zone1-1000": 7, "zone1-1100": 9, "zone1-1200": 5}
counts := map[string]int{"zone1-1000": 8, "zone1-1100": 8, "zone1-1200": 5}
reshard(t, ksName, "customer", "c4c3", "-40,40-80,80-c0", "-60,60-c0", 1000, counts, nil, nil, "")
}
@ -649,11 +649,48 @@ func switchWritesDryRun(t *testing.T, ksWorkflow string, dryRunResults []string)
validateDryRunResults(t, output, dryRunResults)
}
func printSwitchWritesExtraDebug(t *testing.T, ksWorkflow, msg string) {
// Temporary code: print lots of info for debugging occasional flaky failures in customer reshard in CI for multicell test
debug := true
if debug {
fmt.Printf("------------------- START Extra debug info %s SwitchWrites %s\n", msg, ksWorkflow)
ksShards := []string{"product/0", "customer/-80", "customer/80-"}
printShardPositions(vc, ksShards)
custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"]
customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet
customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet
productKs := vc.Cells[defaultCell.Name].Keyspaces["product"]
productTab := productKs.Shards["0"].Tablets["zone1-100"].Vttablet
tabs := []*cluster.VttabletProcess{productTab, customerTab1, customerTab2}
queries := []string{
"select id, workflow, pos, stop_pos, cell, tablet_types, time_updated, transaction_timestamp, state, message from _vt.vreplication",
"select * from _vt.copy_state",
"select * from _vt.resharding_journal",
}
for _, tab := range tabs {
for _, query := range queries {
qr, err := tab.QueryTablet(query, "", false)
require.NoError(t, err)
fmt.Printf("\nTablet:%s.%s.%s.%d\nQuery: %s\n%+v\n\n",
tab.Cell, tab.Keyspace, tab.Shard, tab.TabletUID, query, qr.Rows)
}
}
fmt.Printf("------------------- END Extra debug info %s SwitchWrites %s\n", msg, ksWorkflow)
}
}
func switchWrites(t *testing.T, ksWorkflow string) {
const SwitchWritesTimeout = "91s" // max: 3 tablet picker 30s waits + 1
output, err := vc.VtctlClient.ExecuteCommandWithOutput("SwitchWrites",
"-filtered_replication_wait_time="+SwitchWritesTimeout, ksWorkflow)
require.NoError(t, err, fmt.Sprintf("SwitchWrites Error: %s: %s", err, output))
if output != "" {
fmt.Printf("Output of SwitchWrites for %s:\n++++++\n%s\n--------\n", ksWorkflow, output)
}
//printSwitchWritesExtraDebug is useful when debugging failures in SwitchWrites due to corner cases/races
_ = printSwitchWritesExtraDebug
if err != nil {
require.FailNow(t, fmt.Sprintf("SwitchWrites Error: %s: %s", err, output))
}
}
func dropSourcesDryRun(t *testing.T, ksWorkflow string, renameTables bool, dryRunResults []string) {

Просмотреть файл

@ -479,6 +479,23 @@ func TestCreateIndex(t *testing.T) {
require.NoError(t, err)
}
func TestCreateView(t *testing.T) {
// The test wont work since we cant change the vschema without reloading the vtgate.
t.Skip()
defer cluster.PanicHandler(t)
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
require.NoError(t, err)
defer conn.Close()
defer exec(t, conn, `delete from t1`)
// Test that create view works and the output is as expected
exec(t, conn, `create view v1 as select * from t1`)
exec(t, conn, `insert into t1(id1, id2) values (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)`)
// This wont work, since ALTER VSCHEMA ADD TABLE is only supported for unsharded keyspaces
exec(t, conn, "alter vschema add table v1")
assertMatches(t, conn, "select * from v1", `[[INT64(1) INT64(1)] [INT64(2) INT64(2)] [INT64(3) INT64(3)] [INT64(4) INT64(4)] [INT64(5) INT64(5)]]`)
}
func assertMatches(t *testing.T, conn *mysql.Conn, query, expected string) {
t.Helper()
qr := exec(t, conn, query)

Просмотреть файл

@ -18,6 +18,7 @@ package vtgate
import (
"context"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
@ -125,3 +126,60 @@ func TestFKConstraintUsingInformationSchema(t *testing.T) {
query := "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = 't7_fk' and rc.constraint_schema = database() and rc.table_name = 't7_fk'"
assertMatches(t, conn, query, `[[VARCHAR("t7_xxhash") VARCHAR("uid") VARCHAR("t7_uid") VARCHAR("t7_fk_ibfk_1") VARCHAR("CASCADE") VARCHAR("SET NULL")]]`)
}
func TestConnectWithSystemSchema(t *testing.T) {
defer cluster.PanicHandler(t)
ctx := context.Background()
for _, dbname := range []string{"information_schema", "mysql", "performance_schema", "sys"} {
connParams := vtParams
connParams.DbName = dbname
conn, err := mysql.Connect(ctx, &connParams)
require.NoError(t, err)
conn.Close()
}
}
func TestUseSystemSchema(t *testing.T) {
defer cluster.PanicHandler(t)
ctx := context.Background()
for _, dbname := range []string{"information_schema", "mysql", "performance_schema", "sys"} {
conn, err := mysql.Connect(ctx, &vtParams)
require.NoError(t, err)
exec(t, conn, fmt.Sprintf("use %s", dbname))
conn.Close()
}
}
func TestSystemSchemaQueryWithoutQualifier(t *testing.T) {
defer cluster.PanicHandler(t)
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
require.NoError(t, err)
defer conn.Close()
queryWithQualifier := fmt.Sprintf("select t.table_schema,t.table_name,c.column_name,c.column_type "+
"from information_schema.tables t "+
"join information_schema.columns c "+
"on c.table_schema = t.table_schema and c.table_name = t.table_name "+
"where t.table_schema = '%s' and c.table_schema = '%s'", KeyspaceName, KeyspaceName)
qr1 := exec(t, conn, queryWithQualifier)
queryWithoutQualifier := fmt.Sprintf("select t.table_schema,t.table_name,c.column_name,c.column_type "+
"from tables t "+
"join columns c "+
"on c.table_schema = t.table_schema and c.table_name = t.table_name "+
"where t.table_schema = '%s' and c.table_schema = '%s'", KeyspaceName, KeyspaceName)
exec(t, conn, "use information_schema")
qr2 := exec(t, conn, queryWithoutQualifier)
require.Equal(t, qr1, qr2)
connParams := vtParams
connParams.DbName = "information_schema"
conn2, err := mysql.Connect(ctx, &connParams)
require.NoError(t, err)
defer conn2.Close()
qr3 := exec(t, conn2, queryWithoutQualifier)
require.Equal(t, qr2, qr3)
}

Просмотреть файл

@ -193,6 +193,24 @@ func TestInsertAllDefaults(t *testing.T) {
assertMatches(t, conn, `select * from allDefaults`, "[[INT64(1) NULL]]")
}
func TestCreateViewUnsharded(t *testing.T) {
defer cluster.PanicHandler(t)
ctx := context.Background()
vtParams := mysql.ConnParams{
Host: "localhost",
Port: clusterInstance.VtgateMySQLPort,
}
conn, err := mysql.Connect(ctx, &vtParams)
require.NoError(t, err)
defer conn.Close()
defer exec(t, conn, `delete from t1`)
// Test that create view works and the output is as expected
_, err = conn.ExecuteFetch(`create view v1 as select * from t1`, 1000, true)
require.NoError(t, err)
exec(t, conn, `insert into t1(c1, c2, c3, c4) values (300,100,300,'abc'),(30,10,30,'ac'),(3,0,3,'a')`)
assertMatches(t, conn, "select * from v1", `[[INT64(3) INT64(0) INT64(3) VARCHAR("a")] [INT64(30) INT64(10) INT64(30) VARCHAR("ac")] [INT64(300) INT64(100) INT64(300) VARCHAR("abc")]]`)
}
func exec(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result {
t.Helper()
qr, err := conn.ExecuteFetch(query, 1000, true)

Просмотреть файл

@ -397,6 +397,11 @@ func (hc *HealthCheckImpl) deleteTablet(tablet *topodata.Tablet) {
return
}
delete(ths, tabletAlias)
// delete from healthy list
healthy, ok := hc.healthy[key]
if ok && len(healthy) > 0 {
hc.recomputeHealthy(key)
}
}
func (hc *HealthCheckImpl) updateHealth(th *TabletHealth, shr *query.StreamHealthResponse, currentTarget *query.Target, trivialNonMasterUpdate bool, isMasterUpdate bool, isMasterChange bool) {
@ -446,27 +451,11 @@ func (hc *HealthCheckImpl) updateHealth(th *TabletHealth, shr *query.StreamHealt
// Tablets from other cells for non-master targets should not trigger a re-sort;
// they should also be excluded from healthy list.
if shr.Target.TabletType != topodata.TabletType_MASTER && hc.isIncluded(shr.Target.TabletType, shr.TabletAlias) {
all := hc.healthData[targetKey]
allArray := make([]*TabletHealth, 0, len(all))
for _, s := range all {
// Only tablets in same cell / cellAlias are included in healthy list.
if hc.isIncluded(s.Tablet.Type, s.Tablet.Alias) {
allArray = append(allArray, s)
}
}
hc.healthy[targetKey] = FilterStatsByReplicationLag(allArray)
hc.recomputeHealthy(targetKey)
}
if targetChanged && currentTarget.TabletType != topodata.TabletType_MASTER && hc.isIncluded(shr.Target.TabletType, shr.TabletAlias) { // also recompute old target's healthy list
oldTargetKey := hc.keyFromTarget(currentTarget)
all := hc.healthData[oldTargetKey]
allArray := make([]*TabletHealth, 0, len(all))
for _, s := range all {
// Only tablets in same cell / cellAlias are included in healthy list.
if hc.isIncluded(s.Tablet.Type, s.Tablet.Alias) {
allArray = append(allArray, s)
}
}
hc.healthy[oldTargetKey] = FilterStatsByReplicationLag(allArray)
hc.recomputeHealthy(oldTargetKey)
}
}
if isMasterChange {
@ -478,6 +467,18 @@ func (hc *HealthCheckImpl) updateHealth(th *TabletHealth, shr *query.StreamHealt
}
func (hc *HealthCheckImpl) recomputeHealthy(key keyspaceShardTabletType) {
all := hc.healthData[key]
allArray := make([]*TabletHealth, 0, len(all))
for _, s := range all {
// Only tablets in same cell / cellAlias are included in healthy list.
if hc.isIncluded(s.Tablet.Type, s.Tablet.Alias) {
allArray = append(allArray, s)
}
}
hc.healthy[key] = FilterStatsByReplicationLag(allArray)
}
// Subscribe adds a listener. Used by vtgate buffer to learn about master changes.
func (hc *HealthCheckImpl) Subscribe() chan *TabletHealth {
hc.subMu.Lock()

Просмотреть файл

@ -514,6 +514,48 @@ func TestWaitForAllServingTablets(t *testing.T) {
KeyspacesToWatch = []string{}
}
// TestRemoveTablet tests the behavior when a tablet goes away.
func TestRemoveTablet(t *testing.T) {
ts := memorytopo.NewServer("cell")
hc := createTestHc(ts)
defer hc.Close()
tablet := createTestTablet(0, "cell", "a")
tablet.Type = topodatapb.TabletType_REPLICA
input := make(chan *querypb.StreamHealthResponse)
createFakeConn(tablet, input)
// create a channel and subscribe to healthcheck
resultChan := hc.Subscribe()
hc.AddTablet(tablet)
// there will be a first result, get and discard it
<-resultChan
shr := &querypb.StreamHealthResponse{
TabletAlias: tablet.Alias,
Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA},
Serving: true,
TabletExternallyReparentedTimestamp: 0,
RealtimeStats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.2},
}
want := []*TabletHealth{{
Tablet: tablet,
Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA},
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.2},
MasterTermStartTime: 0,
}}
input <- shr
<-resultChan
// check it's there
a := hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA})
mustMatch(t, want, a, "unexpected result")
// delete the tablet
hc.RemoveTablet(tablet)
a = hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA})
assert.Empty(t, a, "wrong result, expected empty list")
}
// TestGetHealthyTablets tests the functionality of GetHealthyTabletStats.
func TestGetHealthyTablets(t *testing.T) {
ts := memorytopo.NewServer("cell")

Просмотреть файл

@ -221,6 +221,45 @@ func (m *GetKeyspaceRequest) GetKeyspace() string {
return ""
}
type GetKeyspaceResponse struct {
Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetKeyspaceResponse) Reset() { *m = GetKeyspaceResponse{} }
func (m *GetKeyspaceResponse) String() string { return proto.CompactTextString(m) }
func (*GetKeyspaceResponse) ProtoMessage() {}
func (*GetKeyspaceResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_f41247b323a1ab2e, []int{5}
}
func (m *GetKeyspaceResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetKeyspaceResponse.Unmarshal(m, b)
}
func (m *GetKeyspaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetKeyspaceResponse.Marshal(b, m, deterministic)
}
func (m *GetKeyspaceResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetKeyspaceResponse.Merge(m, src)
}
func (m *GetKeyspaceResponse) XXX_Size() int {
return xxx_messageInfo_GetKeyspaceResponse.Size(m)
}
func (m *GetKeyspaceResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GetKeyspaceResponse.DiscardUnknown(m)
}
var xxx_messageInfo_GetKeyspaceResponse proto.InternalMessageInfo
func (m *GetKeyspaceResponse) GetKeyspace() *Keyspace {
if m != nil {
return m.Keyspace
}
return nil
}
type Keyspace struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Keyspace *topodata.Keyspace `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
@ -233,7 +272,7 @@ func (m *Keyspace) Reset() { *m = Keyspace{} }
func (m *Keyspace) String() string { return proto.CompactTextString(m) }
func (*Keyspace) ProtoMessage() {}
func (*Keyspace) Descriptor() ([]byte, []int) {
return fileDescriptor_f41247b323a1ab2e, []int{5}
return fileDescriptor_f41247b323a1ab2e, []int{6}
}
func (m *Keyspace) XXX_Unmarshal(b []byte) error {
@ -279,7 +318,7 @@ func (m *FindAllShardsInKeyspaceRequest) Reset() { *m = FindAllShardsInK
func (m *FindAllShardsInKeyspaceRequest) String() string { return proto.CompactTextString(m) }
func (*FindAllShardsInKeyspaceRequest) ProtoMessage() {}
func (*FindAllShardsInKeyspaceRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_f41247b323a1ab2e, []int{6}
return fileDescriptor_f41247b323a1ab2e, []int{7}
}
func (m *FindAllShardsInKeyspaceRequest) XXX_Unmarshal(b []byte) error {
@ -318,7 +357,7 @@ func (m *FindAllShardsInKeyspaceResponse) Reset() { *m = FindAllShardsIn
func (m *FindAllShardsInKeyspaceResponse) String() string { return proto.CompactTextString(m) }
func (*FindAllShardsInKeyspaceResponse) ProtoMessage() {}
func (*FindAllShardsInKeyspaceResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_f41247b323a1ab2e, []int{7}
return fileDescriptor_f41247b323a1ab2e, []int{8}
}
func (m *FindAllShardsInKeyspaceResponse) XXX_Unmarshal(b []byte) error {
@ -359,7 +398,7 @@ func (m *Shard) Reset() { *m = Shard{} }
func (m *Shard) String() string { return proto.CompactTextString(m) }
func (*Shard) ProtoMessage() {}
func (*Shard) Descriptor() ([]byte, []int) {
return fileDescriptor_f41247b323a1ab2e, []int{8}
return fileDescriptor_f41247b323a1ab2e, []int{9}
}
func (m *Shard) XXX_Unmarshal(b []byte) error {
@ -419,7 +458,7 @@ func (m *TableMaterializeSettings) Reset() { *m = TableMaterializeSettin
func (m *TableMaterializeSettings) String() string { return proto.CompactTextString(m) }
func (*TableMaterializeSettings) ProtoMessage() {}
func (*TableMaterializeSettings) Descriptor() ([]byte, []int) {
return fileDescriptor_f41247b323a1ab2e, []int{9}
return fileDescriptor_f41247b323a1ab2e, []int{10}
}
func (m *TableMaterializeSettings) XXX_Unmarshal(b []byte) error {
@ -482,7 +521,7 @@ func (m *MaterializeSettings) Reset() { *m = MaterializeSettings{} }
func (m *MaterializeSettings) String() string { return proto.CompactTextString(m) }
func (*MaterializeSettings) ProtoMessage() {}
func (*MaterializeSettings) Descriptor() ([]byte, []int) {
return fileDescriptor_f41247b323a1ab2e, []int{10}
return fileDescriptor_f41247b323a1ab2e, []int{11}
}
func (m *MaterializeSettings) XXX_Unmarshal(b []byte) error {
@ -558,6 +597,7 @@ func init() {
proto.RegisterType((*GetKeyspacesRequest)(nil), "vtctldata.GetKeyspacesRequest")
proto.RegisterType((*GetKeyspacesResponse)(nil), "vtctldata.GetKeyspacesResponse")
proto.RegisterType((*GetKeyspaceRequest)(nil), "vtctldata.GetKeyspaceRequest")
proto.RegisterType((*GetKeyspaceResponse)(nil), "vtctldata.GetKeyspaceResponse")
proto.RegisterType((*Keyspace)(nil), "vtctldata.Keyspace")
proto.RegisterType((*FindAllShardsInKeyspaceRequest)(nil), "vtctldata.FindAllShardsInKeyspaceRequest")
proto.RegisterType((*FindAllShardsInKeyspaceResponse)(nil), "vtctldata.FindAllShardsInKeyspaceResponse")
@ -570,44 +610,45 @@ func init() {
func init() { proto.RegisterFile("vtctldata.proto", fileDescriptor_f41247b323a1ab2e) }
var fileDescriptor_f41247b323a1ab2e = []byte{
// 620 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xdf, 0x6e, 0xd3, 0x4a,
0x10, 0xc6, 0xe5, 0xa4, 0xe9, 0x89, 0x27, 0x27, 0x49, 0xcf, 0xf6, 0x20, 0x59, 0x41, 0x40, 0x30,
0xb4, 0x8d, 0x84, 0xe4, 0x40, 0x91, 0x10, 0x42, 0xdc, 0x94, 0x12, 0x50, 0xa9, 0xe8, 0x85, 0x5b,
0x81, 0xc4, 0x05, 0xd6, 0xd6, 0x9e, 0x06, 0xab, 0x1b, 0xaf, 0xf1, 0x4e, 0xd2, 0x86, 0x37, 0xe0,
0x65, 0x78, 0x04, 0x9e, 0x0d, 0x79, 0xd7, 0x76, 0x5c, 0xa9, 0x05, 0x71, 0x37, 0xf9, 0xcd, 0xbf,
0x6f, 0x3e, 0xaf, 0x02, 0xfd, 0x05, 0x85, 0x24, 0x22, 0x4e, 0xdc, 0x4b, 0x33, 0x49, 0x92, 0xd9,
0x15, 0x18, 0x74, 0x85, 0x9c, 0xce, 0x29, 0x16, 0x26, 0x33, 0xe8, 0x91, 0x4c, 0xe5, 0xaa, 0xd2,
0xfd, 0x08, 0x83, 0xc9, 0x25, 0x86, 0x73, 0xc2, 0x0f, 0x79, 0xcb, 0xbe, 0x9c, 0xcd, 0x78, 0x12,
0xf9, 0xf8, 0x75, 0x8e, 0x8a, 0x18, 0x83, 0x35, 0x9e, 0x4d, 0x95, 0x63, 0x0d, 0x9b, 0x23, 0xdb,
0xd7, 0x31, 0xdb, 0x82, 0x1e, 0x0f, 0x29, 0x96, 0x49, 0x40, 0xf1, 0x0c, 0xe5, 0x9c, 0x9c, 0xc6,
0xd0, 0x1a, 0x35, 0xfd, 0xae, 0xa1, 0x27, 0x06, 0xba, 0xfb, 0x70, 0xfb, 0xda, 0xc1, 0x2a, 0x95,
0x89, 0x42, 0xf6, 0x10, 0x5a, 0xb8, 0xc0, 0x84, 0x1c, 0x6b, 0x68, 0x8d, 0x3a, 0xbb, 0x3d, 0xaf,
0x94, 0x39, 0xc9, 0xa9, 0x6f, 0x92, 0xee, 0x2d, 0xd8, 0x7c, 0x8b, 0x74, 0x88, 0x4b, 0x95, 0xf2,
0x10, 0x55, 0x21, 0xcb, 0x3d, 0x80, 0xff, 0xaf, 0xe2, 0x62, 0xe8, 0x13, 0xb0, 0xcf, 0x4b, 0xa8,
0x35, 0x77, 0x76, 0x37, 0xbd, 0x95, 0x37, 0x65, 0x83, 0xbf, 0xaa, 0x72, 0x1f, 0x03, 0xab, 0x8d,
0x2a, 0xef, 0x1e, 0x40, 0xbb, 0x2c, 0xd1, 0x02, 0x6d, 0xbf, 0xfa, 0xed, 0x1e, 0x41, 0xbb, 0x2c,
0xcf, 0xfd, 0x49, 0xf8, 0xac, 0xac, 0xd1, 0x31, 0xf3, 0x6a, 0xbd, 0x0d, 0x7d, 0x1c, 0xf3, 0x2a,
0xd3, 0xab, 0x45, 0xab, 0x79, 0x2f, 0xe1, 0xee, 0x9b, 0x38, 0x89, 0xf6, 0x84, 0x38, 0xfe, 0xc2,
0xb3, 0x48, 0x1d, 0x24, 0x7f, 0xa3, 0xe6, 0xa7, 0x05, 0xf7, 0x6e, 0x6c, 0x2f, 0x6c, 0x39, 0x82,
0x75, 0xa5, 0x73, 0x85, 0x27, 0xcf, 0x6a, 0x9e, 0xfc, 0xa1, 0xd7, 0x33, 0x89, 0x49, 0x42, 0xd9,
0xd2, 0x2f, 0xa6, 0x0c, 0x0e, 0xa1, 0x53, 0xc3, 0x6c, 0x03, 0x9a, 0xe7, 0xb8, 0x2c, 0x94, 0xe5,
0x21, 0xdb, 0x86, 0xd6, 0x82, 0x8b, 0x79, 0x79, 0xff, 0x46, 0x6d, 0x9f, 0x6e, 0xf4, 0x4d, 0xfa,
0x45, 0xe3, 0xb9, 0xe5, 0x7e, 0x86, 0x96, 0x66, 0xbf, 0xbb, 0xb2, 0xf2, 0xb9, 0x51, 0xf3, 0x79,
0x0b, 0x5a, 0x5a, 0x8f, 0xd3, 0xd4, 0x4b, 0xfa, 0x2b, 0x93, 0x8b, 0x1d, 0x3a, 0xeb, 0x7e, 0xb7,
0xc0, 0x39, 0xe1, 0xa7, 0x02, 0xdf, 0x73, 0xc2, 0x2c, 0xe6, 0x22, 0xfe, 0x86, 0xc7, 0x48, 0x14,
0x27, 0x53, 0xc5, 0xee, 0xc3, 0xbf, 0xc4, 0xb3, 0x29, 0x52, 0x40, 0x79, 0x49, 0xb1, 0xb7, 0x63,
0x98, 0xee, 0x62, 0x8f, 0xe0, 0x3f, 0x25, 0xe7, 0x59, 0x88, 0x01, 0x5e, 0xa6, 0x19, 0x2a, 0x15,
0xcb, 0xa4, 0xd0, 0xb1, 0x61, 0x12, 0x93, 0x8a, 0xb3, 0x3b, 0x00, 0x61, 0x86, 0x9c, 0x30, 0x88,
0x22, 0xa1, 0x85, 0xd9, 0xbe, 0x6d, 0xc8, 0xeb, 0x48, 0xb8, 0x3f, 0x1a, 0xb0, 0x79, 0x9d, 0x8c,
0x01, 0xb4, 0x2f, 0x64, 0x76, 0x7e, 0x26, 0xe4, 0x45, 0x79, 0x7a, 0xf9, 0x9b, 0xed, 0x40, 0xbf,
0xd8, 0x7f, 0xe5, 0x55, 0xd9, 0x7e, 0xcf, 0xe0, 0xea, 0x2d, 0xee, 0x40, 0xbf, 0xb8, 0xa5, 0x2a,
0x34, 0x02, 0x7a, 0x06, 0x57, 0x85, 0xdb, 0xd0, 0x57, 0x24, 0xd3, 0x80, 0x9f, 0x11, 0x66, 0x41,
0x28, 0xd3, 0xa5, 0xb3, 0x36, 0xb4, 0x46, 0x6d, 0xbf, 0x9b, 0xe3, 0xbd, 0x9c, 0xee, 0xcb, 0x74,
0xc9, 0xde, 0x41, 0x4f, 0xbb, 0x12, 0xa8, 0x42, 0xa7, 0xd3, 0xd2, 0xcf, 0xe7, 0x41, 0xed, 0x73,
0xde, 0xe4, 0xac, 0xdf, 0xd5, 0xad, 0xd5, 0x85, 0x0c, 0xd6, 0x42, 0x14, 0xc2, 0x59, 0x37, 0x1f,
0x30, 0x8f, 0x8d, 0xf9, 0xa7, 0x22, 0x37, 0x7f, 0x99, 0xa2, 0x72, 0xfe, 0x29, 0xcd, 0xcf, 0xd9,
0x49, 0x8e, 0x5e, 0x8d, 0x3e, 0x6d, 0x2f, 0x62, 0x42, 0xa5, 0xbc, 0x58, 0x8e, 0x4d, 0x34, 0x9e,
0xca, 0xf1, 0x82, 0xc6, 0xfa, 0xdf, 0x6b, 0x5c, 0x09, 0x39, 0x5d, 0xd7, 0xe0, 0xe9, 0xaf, 0x00,
0x00, 0x00, 0xff, 0xff, 0x25, 0x63, 0x6b, 0x01, 0x0b, 0x05, 0x00, 0x00,
// 629 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xdf, 0x6e, 0xd3, 0x30,
0x14, 0xc6, 0x95, 0x76, 0x1d, 0xed, 0x29, 0x6d, 0x87, 0x07, 0x52, 0x54, 0x04, 0x94, 0xc0, 0xb6,
0x4a, 0x48, 0x29, 0x0c, 0x09, 0x21, 0xc4, 0xcd, 0x18, 0x1d, 0x1a, 0x13, 0xbb, 0xc8, 0x26, 0x90,
0xb8, 0x20, 0xf2, 0x92, 0xb3, 0x12, 0xcd, 0x8d, 0x43, 0x7c, 0xda, 0xad, 0xbc, 0x01, 0x2f, 0xc3,
0x23, 0xf0, 0x6c, 0x28, 0x76, 0x92, 0x66, 0x68, 0x03, 0x71, 0xe7, 0xfc, 0xce, 0xbf, 0xef, 0x7c,
0xb6, 0x02, 0xbd, 0x39, 0x05, 0x24, 0x42, 0x4e, 0xdc, 0x4d, 0x52, 0x49, 0x92, 0xb5, 0x4a, 0xd0,
0xef, 0x08, 0x39, 0x99, 0x51, 0x24, 0x4c, 0xa4, 0xdf, 0x25, 0x99, 0xc8, 0x65, 0xa6, 0xf3, 0x09,
0xfa, 0xe3, 0x0b, 0x0c, 0x66, 0x84, 0x1f, 0xb3, 0x92, 0x5d, 0x39, 0x9d, 0xf2, 0x38, 0xf4, 0xf0,
0xdb, 0x0c, 0x15, 0x31, 0x06, 0x2b, 0x3c, 0x9d, 0x28, 0xdb, 0x1a, 0xd4, 0x87, 0x2d, 0x4f, 0x9f,
0xd9, 0x06, 0x74, 0x79, 0x40, 0x91, 0x8c, 0x7d, 0x8a, 0xa6, 0x28, 0x67, 0x64, 0xd7, 0x06, 0xd6,
0xb0, 0xee, 0x75, 0x0c, 0x3d, 0x36, 0xd0, 0xd9, 0x85, 0xbb, 0x57, 0x36, 0x56, 0x89, 0x8c, 0x15,
0xb2, 0xc7, 0xd0, 0xc0, 0x39, 0xc6, 0x64, 0x5b, 0x03, 0x6b, 0xd8, 0xde, 0xee, 0xba, 0x85, 0xcc,
0x71, 0x46, 0x3d, 0x13, 0x74, 0xee, 0xc0, 0xfa, 0x3b, 0xa4, 0x03, 0x5c, 0xa8, 0x84, 0x07, 0xa8,
0x72, 0x59, 0xce, 0x3e, 0xdc, 0xbe, 0x8c, 0xf3, 0xa6, 0xcf, 0xa0, 0x75, 0x56, 0x40, 0xad, 0xb9,
0xbd, 0xbd, 0xee, 0x2e, 0xbd, 0x29, 0x0a, 0xbc, 0x65, 0x96, 0xf3, 0x14, 0x58, 0xa5, 0x55, 0xb1,
0x77, 0x1f, 0x9a, 0x45, 0x8a, 0x16, 0xd8, 0xf2, 0xca, 0x6f, 0x67, 0xef, 0x92, 0xa6, 0x72, 0xf6,
0xe8, 0x8f, 0x92, 0x6b, 0x46, 0x2f, 0xfb, 0x1c, 0x42, 0xb3, 0xa0, 0x99, 0xcf, 0x31, 0x9f, 0x16,
0xb3, 0xf4, 0x99, 0xb9, 0x95, 0x86, 0x35, 0xdd, 0x90, 0xb9, 0xe5, 0xe5, 0x5d, 0xd1, 0xef, 0x35,
0xdc, 0xdf, 0x8b, 0xe2, 0x70, 0x47, 0x88, 0xa3, 0xaf, 0x3c, 0x0d, 0xd5, 0x7e, 0xfc, 0x3f, 0x5b,
0xfd, 0xb2, 0xe0, 0xc1, 0xb5, 0xe5, 0xf9, 0x8a, 0x87, 0xb0, 0xaa, 0x74, 0x2c, 0xf7, 0xf6, 0x45,
0x65, 0xc1, 0x7f, 0xd4, 0xba, 0x26, 0x30, 0x8e, 0x29, 0x5d, 0x78, 0x79, 0x97, 0xfe, 0x01, 0xb4,
0x2b, 0x98, 0xad, 0x41, 0xfd, 0x0c, 0x17, 0xb9, 0xb2, 0xec, 0xc8, 0x36, 0xa1, 0x31, 0xe7, 0x62,
0x56, 0xec, 0xbf, 0x56, 0x99, 0xa7, 0x0b, 0x3d, 0x13, 0x7e, 0x55, 0x7b, 0x69, 0x39, 0x5f, 0xa0,
0xa1, 0xd9, 0xdf, 0xb6, 0x2c, 0x7d, 0xae, 0x55, 0x7c, 0xde, 0x80, 0x86, 0xd6, 0x63, 0xd7, 0xf5,
0x90, 0xde, 0xd2, 0xe4, 0x7c, 0x86, 0x8e, 0x3a, 0x3f, 0x2c, 0xb0, 0x8f, 0xf9, 0x89, 0xc0, 0x0f,
0x9c, 0x30, 0x8d, 0xb8, 0x88, 0xbe, 0xe3, 0x11, 0x12, 0x45, 0xf1, 0x44, 0xb1, 0x87, 0x70, 0x93,
0x78, 0x3a, 0x41, 0xf2, 0x29, 0x4b, 0xc9, 0xe7, 0xb6, 0x0d, 0xd3, 0x55, 0xec, 0x09, 0xdc, 0x52,
0x72, 0x96, 0x06, 0xe8, 0xe3, 0x45, 0x92, 0xa2, 0x52, 0x91, 0x8c, 0x73, 0x1d, 0x6b, 0x26, 0x30,
0x2e, 0x39, 0xbb, 0x07, 0x10, 0xa4, 0xc8, 0x09, 0xfd, 0x30, 0x14, 0x5a, 0x58, 0xcb, 0x6b, 0x19,
0xf2, 0x36, 0x14, 0xce, 0xcf, 0x1a, 0xac, 0x5f, 0x25, 0xa3, 0x0f, 0xcd, 0x73, 0x99, 0x9e, 0x9d,
0x0a, 0x79, 0x5e, 0xac, 0x5e, 0x7c, 0xb3, 0x2d, 0xe8, 0xe5, 0xf3, 0x2f, 0xbd, 0xaa, 0x96, 0xd7,
0x35, 0xb8, 0x7c, 0x8b, 0x5b, 0xd0, 0xcb, 0x77, 0x29, 0x13, 0x8d, 0x80, 0xae, 0xc1, 0x65, 0xe2,
0x26, 0xf4, 0x14, 0xc9, 0xc4, 0xe7, 0xa7, 0x84, 0xa9, 0x1f, 0xc8, 0x64, 0x61, 0xaf, 0x0c, 0xac,
0x61, 0xd3, 0xeb, 0x64, 0x78, 0x27, 0xa3, 0xbb, 0x32, 0x59, 0xb0, 0xf7, 0xd0, 0xd5, 0xae, 0xf8,
0x2a, 0xd7, 0x69, 0x37, 0xf4, 0xf3, 0x79, 0x54, 0xb9, 0xce, 0xeb, 0x9c, 0xf5, 0x3a, 0xba, 0xb4,
0xdc, 0x90, 0xc1, 0x4a, 0x80, 0x42, 0xd8, 0xab, 0xe6, 0x02, 0xb3, 0xb3, 0x31, 0xff, 0x44, 0x64,
0xe6, 0x2f, 0x12, 0x54, 0xf6, 0x8d, 0xc2, 0xfc, 0x8c, 0x1d, 0x67, 0xe8, 0xcd, 0xf0, 0xf3, 0xe6,
0x3c, 0x22, 0x54, 0xca, 0x8d, 0xe4, 0xc8, 0x9c, 0x46, 0x13, 0x39, 0x9a, 0xd3, 0x48, 0xff, 0x05,
0x47, 0xa5, 0x90, 0x93, 0x55, 0x0d, 0x9e, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x2e, 0xa9, 0x4e,
0xcf, 0x53, 0x05, 0x00, 0x00,
}

Просмотреть файл

@ -29,22 +29,22 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
func init() { proto.RegisterFile("vtctlservice.proto", fileDescriptor_27055cdbb1148d2b) }
var fileDescriptor_27055cdbb1148d2b = []byte{
// 239 bytes of a gzipped FileDescriptorProto
// 235 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2a, 0x2b, 0x49, 0x2e,
0xc9, 0x29, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2,
0x41, 0x16, 0x93, 0xe2, 0x07, 0xf3, 0x52, 0x12, 0x4b, 0x12, 0x21, 0xd2, 0x46, 0x85, 0x5c, 0xac,
0x61, 0x20, 0x21, 0xa1, 0x0c, 0x2e, 0x61, 0xd7, 0x8a, 0xd4, 0xe4, 0xd2, 0x92, 0x54, 0x30, 0xdf,
0x39, 0x3f, 0x37, 0x37, 0x31, 0x2f, 0x45, 0x48, 0x55, 0x0f, 0xa1, 0x03, 0x8b, 0x7c, 0x50, 0x6a,
0x61, 0x69, 0x6a, 0x71, 0x89, 0x94, 0x1a, 0x21, 0x65, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x4a,
0x0c, 0x06, 0x8c, 0x46, 0x53, 0x98, 0xb8, 0xd8, 0xc0, 0x92, 0x29, 0x42, 0x45, 0x5c, 0xe2, 0x6e,
0x0c, 0x06, 0x8c, 0x46, 0xf3, 0x99, 0xb8, 0xd8, 0xc0, 0x92, 0x29, 0x42, 0x45, 0x5c, 0xe2, 0x6e,
0x99, 0x79, 0x29, 0x8e, 0x39, 0x39, 0xc1, 0x19, 0x89, 0x45, 0x29, 0xc5, 0x9e, 0x79, 0xde, 0xa9,
0x95, 0xc5, 0x05, 0x89, 0xc9, 0xa9, 0x42, 0x9a, 0x48, 0x26, 0xe2, 0x50, 0x03, 0xb3, 0x5c, 0x8b,
0x18, 0xa5, 0x30, 0x07, 0x08, 0x39, 0x73, 0x71, 0xbb, 0xa7, 0x96, 0xc0, 0xed, 0x91, 0x45, 0xd2,
0x8c, 0x24, 0x0e, 0x33, 0x5b, 0x18, 0x49, 0x1a, 0x26, 0xa7, 0xc4, 0x20, 0x14, 0xc8, 0xc5, 0x83,
0xa4, 0xb8, 0x58, 0x48, 0x0e, 0xbb, 0x29, 0xc5, 0x30, 0x63, 0xe4, 0x71, 0xca, 0xc3, 0xdc, 0xe5,
0xa4, 0x1d, 0xa5, 0x59, 0x96, 0x59, 0x92, 0x5a, 0x5c, 0xac, 0x97, 0x99, 0xaf, 0x0f, 0x61, 0xe9,
0xa7, 0xe7, 0xeb, 0x97, 0x95, 0xe8, 0x83, 0x63, 0x4a, 0x1f, 0x39, 0x1e, 0x93, 0xd8, 0xc0, 0x62,
0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa6, 0x9d, 0x46, 0x8d, 0xf2, 0x01, 0x00, 0x00,
0x18, 0xa5, 0x30, 0x07, 0x08, 0xf9, 0x71, 0x71, 0xbb, 0xa7, 0x96, 0xc0, 0xed, 0x91, 0x45, 0xd2,
0x8c, 0x24, 0x0e, 0x33, 0x5b, 0x0e, 0x97, 0x34, 0xdc, 0xbc, 0x40, 0x2e, 0x1e, 0x24, 0x89, 0x62,
0x21, 0x1c, 0x3a, 0x8a, 0x61, 0x26, 0xca, 0xe3, 0x94, 0x87, 0x19, 0xe9, 0xa4, 0x1d, 0xa5, 0x59,
0x96, 0x59, 0x92, 0x5a, 0x5c, 0xac, 0x97, 0x99, 0xaf, 0x0f, 0x61, 0xe9, 0xa7, 0xe7, 0xeb, 0x97,
0x95, 0xe8, 0x83, 0x23, 0x4d, 0x1f, 0x39, 0x4a, 0x93, 0xd8, 0xc0, 0x62, 0xc6, 0x80, 0x00, 0x00,
0x00, 0xff, 0xff, 0xd5, 0x49, 0x16, 0xd1, 0xfd, 0x01, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@ -161,7 +161,7 @@ type VtctldClient interface {
// FindAllShardsInKeyspace returns a map of shard names to shard references for a given keyspace.
FindAllShardsInKeyspace(ctx context.Context, in *vtctldata.FindAllShardsInKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.FindAllShardsInKeyspaceResponse, error)
// GetKeyspace reads the given keyspace from the topo and returns it.
GetKeyspace(ctx context.Context, in *vtctldata.GetKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.Keyspace, error)
GetKeyspace(ctx context.Context, in *vtctldata.GetKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.GetKeyspaceResponse, error)
// GetKeyspaces returns the keyspace struct of all keyspaces in the topo.
GetKeyspaces(ctx context.Context, in *vtctldata.GetKeyspacesRequest, opts ...grpc.CallOption) (*vtctldata.GetKeyspacesResponse, error)
}
@ -183,8 +183,8 @@ func (c *vtctldClient) FindAllShardsInKeyspace(ctx context.Context, in *vtctldat
return out, nil
}
func (c *vtctldClient) GetKeyspace(ctx context.Context, in *vtctldata.GetKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.Keyspace, error) {
out := new(vtctldata.Keyspace)
func (c *vtctldClient) GetKeyspace(ctx context.Context, in *vtctldata.GetKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.GetKeyspaceResponse, error) {
out := new(vtctldata.GetKeyspaceResponse)
err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetKeyspace", in, out, opts...)
if err != nil {
return nil, err
@ -206,7 +206,7 @@ type VtctldServer interface {
// FindAllShardsInKeyspace returns a map of shard names to shard references for a given keyspace.
FindAllShardsInKeyspace(context.Context, *vtctldata.FindAllShardsInKeyspaceRequest) (*vtctldata.FindAllShardsInKeyspaceResponse, error)
// GetKeyspace reads the given keyspace from the topo and returns it.
GetKeyspace(context.Context, *vtctldata.GetKeyspaceRequest) (*vtctldata.Keyspace, error)
GetKeyspace(context.Context, *vtctldata.GetKeyspaceRequest) (*vtctldata.GetKeyspaceResponse, error)
// GetKeyspaces returns the keyspace struct of all keyspaces in the topo.
GetKeyspaces(context.Context, *vtctldata.GetKeyspacesRequest) (*vtctldata.GetKeyspacesResponse, error)
}
@ -218,7 +218,7 @@ type UnimplementedVtctldServer struct {
func (*UnimplementedVtctldServer) FindAllShardsInKeyspace(ctx context.Context, req *vtctldata.FindAllShardsInKeyspaceRequest) (*vtctldata.FindAllShardsInKeyspaceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method FindAllShardsInKeyspace not implemented")
}
func (*UnimplementedVtctldServer) GetKeyspace(ctx context.Context, req *vtctldata.GetKeyspaceRequest) (*vtctldata.Keyspace, error) {
func (*UnimplementedVtctldServer) GetKeyspace(ctx context.Context, req *vtctldata.GetKeyspaceRequest) (*vtctldata.GetKeyspaceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetKeyspace not implemented")
}
func (*UnimplementedVtctldServer) GetKeyspaces(ctx context.Context, req *vtctldata.GetKeyspacesRequest) (*vtctldata.GetKeyspacesResponse, error) {

Просмотреть файл

@ -23,6 +23,7 @@ import (
"regexp"
"time"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/topo"
)
@ -138,6 +139,20 @@ func ReadTopo(ctx context.Context, conn topo.Conn, entryPath string) (*OnlineDDL
return onlineDDL, nil
}
// getOnlineDDLAction parses the given SQL into a statement and returns the action type of the DDL statement, or error
// if the statement is not a DDL
func getOnlineDDLAction(sql string) (action sqlparser.DDLAction, ddlStmt sqlparser.DDLStatement, err error) {
stmt, err := sqlparser.Parse(sql)
if err != nil {
return action, ddlStmt, fmt.Errorf("Error parsing statement: SQL=%s, error=%+v", sql, err)
}
switch ddlStmt := stmt.(type) {
case sqlparser.DDLStatement:
return ddlStmt.GetAction(), ddlStmt, nil
}
return action, ddlStmt, fmt.Errorf("Unsupported query type: %s", sql)
}
// NewOnlineDDL creates a schema change request with self generated UUID and RequestTime
func NewOnlineDDL(keyspace string, table string, sql string, strategy DDLStrategy, options string, requestContext string) (*OnlineDDL, error) {
u, err := createUUID("_")
@ -172,6 +187,29 @@ func (onlineDDL *OnlineDDL) ToJSON() ([]byte, error) {
return json.Marshal(onlineDDL)
}
// GetAction extracts the DDL action type from the online DDL statement
func (onlineDDL *OnlineDDL) GetAction() (action sqlparser.DDLAction, err error) {
action, _, err = getOnlineDDLAction(onlineDDL.SQL)
return action, err
}
// GetActionStr returns a string representation of the DDL action
func (onlineDDL *OnlineDDL) GetActionStr() (actionStr string, err error) {
action, err := onlineDDL.GetAction()
if err != nil {
return actionStr, err
}
switch action {
case sqlparser.CreateDDLAction:
return sqlparser.CreateStr, nil
case sqlparser.AlterDDLAction:
return sqlparser.AlterStr, nil
case sqlparser.DropDDLAction:
return sqlparser.DropStr, nil
}
return "", fmt.Errorf("Unsupported online DDL action. SQL=%s", onlineDDL.SQL)
}
// ToString returns a simple string representation of this instance
func (onlineDDL *OnlineDDL) ToString() string {
return fmt.Sprintf("OnlineDDL: keyspace=%s, table=%s, sql=%s", onlineDDL.Keyspace, onlineDDL.Table, onlineDDL.SQL)

Просмотреть файл

@ -20,6 +20,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"vitess.io/vitess/go/vt/sqlparser"
)
func TestCreateUUID(t *testing.T) {
@ -81,6 +82,41 @@ func TestIsOnlineDDLUUID(t *testing.T) {
}
}
func TestGetActionStr(t *testing.T) {
tt := []struct {
statement string
actionStr string
isError bool
}{
{
statement: "create table t (id int primary key)",
actionStr: sqlparser.CreateStr,
},
{
statement: "alter table t drop column c",
actionStr: sqlparser.AlterStr,
},
{
statement: "drop table t",
actionStr: sqlparser.DropStr,
},
{
statement: "rename table t to t2",
isError: true,
},
}
for _, ts := range tt {
onlineDDL := &OnlineDDL{SQL: ts.statement}
actionStr, err := onlineDDL.GetActionStr()
if ts.isError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, actionStr, ts.actionStr)
}
}
}
func TestIsOnlineDDLTableName(t *testing.T) {
names := []string{
"_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014_gho",

Просмотреть файл

@ -19,8 +19,16 @@ package schema
import (
"regexp"
"strings"
"vitess.io/vitess/go/vt/sqlparser"
)
// NormalizedDDLQuery contains a query which is online-ddl -normalized
type NormalizedDDLQuery struct {
SQL string
TableName sqlparser.TableName
}
var (
// ALTER TABLE
alterTableBasicPattern = `(?s)(?i)\balter\s+table\s+`
@ -64,3 +72,25 @@ func ParseAlterTableOptions(alterStatement string) (explicitSchema, explicitTabl
}
return explicitSchema, explicitTable, alterOptions
}
// NormalizeOnlineDDL normalizes a given query for OnlineDDL, possibly exploding it into multiple distinct queries
func NormalizeOnlineDDL(sql string) (normalized []*NormalizedDDLQuery, err error) {
action, ddlStmt, err := getOnlineDDLAction(sql)
if err != nil {
return normalized, err
}
switch action {
case sqlparser.DropDDLAction:
tables := ddlStmt.GetFromTables()
for _, table := range tables {
ddlStmt.SetFromTables([]sqlparser.TableName{table})
normalized = append(normalized, &NormalizedDDLQuery{SQL: sqlparser.String(ddlStmt), TableName: table})
}
return normalized, nil
}
if ddlStmt.IsFullyParsed() {
sql = sqlparser.String(ddlStmt)
}
n := &NormalizedDDLQuery{SQL: sql, TableName: ddlStmt.GetTable()}
return []*NormalizedDDLQuery{n}, nil
}

Просмотреть файл

@ -17,7 +17,10 @@ limitations under the License.
package schema
import (
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestParseAlterTableOptions(t *testing.T) {
@ -40,14 +43,49 @@ func TestParseAlterTableOptions(t *testing.T) {
}
for query, expect := range tests {
schema, table, options := ParseAlterTableOptions(query)
if schema != expect.schema {
t.Errorf("schema: %+v, want:%+v", schema, expect.schema)
}
if table != expect.table {
t.Errorf("table: %+v, want:%+v", table, expect.table)
}
if options != expect.options {
t.Errorf("options: %+v, want:%+v", options, expect.options)
}
assert.Equal(t, expect.schema, schema)
assert.Equal(t, expect.table, table)
assert.Equal(t, expect.options, options)
}
}
func TestNormalizeOnlineDDL(t *testing.T) {
type expect struct {
sqls []string
isError bool
}
tests := map[string]expect{
"alter table t add column i int, drop column d": {sqls: []string{"alter table t add column i int, drop column d"}},
"create table t (id int primary key)": {sqls: []string{"create table t (id int primary key)"}},
"drop table t": {sqls: []string{"drop table t"}},
"drop table if exists t": {sqls: []string{"drop table if exists t"}},
"drop table t1, t2, t3": {sqls: []string{"drop table t1", "drop table t2", "drop table t3"}},
"drop table if exists t1, t2, t3": {sqls: []string{"drop table if exists t1", "drop table if exists t2", "drop table if exists t3"}},
"create index i_idx on t(id)": {sqls: []string{"alter table t add index i_idx (id)"}},
"create index i_idx on t(name(12))": {sqls: []string{"alter table t add index i_idx (`name`(12))"}},
"create index i_idx on t(id, `ts`, name(12))": {sqls: []string{"alter table t add index i_idx (id, ts, `name`(12))"}},
"create unique index i_idx on t(id)": {sqls: []string{"alter table t add unique index i_idx (id)"}},
"create index i_idx using btree on t(id)": {sqls: []string{"alter table t add index i_idx (id) using btree"}},
"create index with syntax error i_idx on t(id)": {isError: true},
"select * from t": {isError: true},
"drop database t": {isError: true},
}
for query, expect := range tests {
t.Run(query, func(t *testing.T) {
normalized, err := NormalizeOnlineDDL(query)
if expect.isError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
sqls := []string{}
for _, n := range normalized {
sql := n.SQL
sql = strings.ReplaceAll(sql, "\n", "")
sql = strings.ReplaceAll(sql, "\t", "")
sqls = append(sqls, sql)
}
assert.Equal(t, expect.sqls, sqls)
}
})
}
}

Просмотреть файл

@ -157,9 +157,6 @@ func (exec *TabletExecutor) isOnlineSchemaDDL(ddl sqlparser.DDLStatement) (isOnl
if ddl == nil {
return false, strategy, options
}
if ddl.GetAction() != sqlparser.AlterDDLAction {
return false, strategy, options
}
strategy, options, _ = schema.ParseDDLStrategy(exec.ddlStrategy)
if strategy != schema.DDLStrategyNormal {
return true, strategy, options
@ -214,6 +211,31 @@ func (exec *TabletExecutor) preflightSchemaChanges(ctx context.Context, sqls []s
return err
}
// executeSQL executes a single SQL statement either as online DDL or synchronously on all tablets.
// In online DDL case, the query may be exploded into multiple queries during
func (exec *TabletExecutor) executeSQL(ctx context.Context, sql string, execResult *ExecuteResult) error {
stat, err := sqlparser.Parse(sql)
if err != nil {
return err
}
switch ddl := stat.(type) {
case sqlparser.DDLStatement:
if isOnlineDDL, strategy, options := exec.isOnlineSchemaDDL(ddl); isOnlineDDL {
exec.wr.Logger().Infof("Received online DDL request. strategy=%+v", strategy)
normalizedQueries, err := schema.NormalizeOnlineDDL(sql)
if err != nil {
return err
}
for _, normalized := range normalizedQueries {
exec.executeOnlineDDL(ctx, execResult, normalized.SQL, normalized.TableName.Name.String(), strategy, options)
}
return nil
}
}
exec.executeOnAllTablets(ctx, execResult, sql)
return nil
}
// Execute applies schema changes
func (exec *TabletExecutor) Execute(ctx context.Context, sqls []string) *ExecuteResult {
execResult := ExecuteResult{}
@ -250,25 +272,11 @@ func (exec *TabletExecutor) Execute(ctx context.Context, sqls []string) *Execute
for index, sql := range sqls {
execResult.CurSQLIndex = index
stat, err := sqlparser.Parse(sql)
if err != nil {
if err := exec.executeSQL(ctx, sql, &execResult); err != nil {
execResult.ExecutorErr = err.Error()
return &execResult
}
isOnlineDDL, strategy, options := exec.isOnlineSchemaDDL(nil)
tableName := ""
switch ddl := stat.(type) {
case sqlparser.DDLStatement:
tableName = ddl.GetTable().Name.String()
isOnlineDDL, strategy, options = exec.isOnlineSchemaDDL(ddl)
}
exec.wr.Logger().Infof("Received DDL request. strategy=%+v", strategy)
if isOnlineDDL {
exec.executeOnlineDDL(ctx, &execResult, sql, tableName, strategy, options)
} else {
exec.executeOnAllTablets(ctx, &execResult, sql)
}
if len(execResult.FailedShards) > 0 {
break
}

Просмотреть файл

@ -236,7 +236,8 @@ func TestIsOnlineSchemaDDL(t *testing.T) {
{
query: "CREATE TABLE t(id int)",
ddlStrategy: "gh-ost",
isOnlineDDL: false,
isOnlineDDL: true,
strategy: schema.DDLStrategyGhost,
},
{
query: "ALTER TABLE t ADD COLUMN i INT",

Просмотреть файл

@ -57,6 +57,8 @@ const (
StmtSRollback
StmtRelease
StmtVStream
StmtLockTables
StmtUnlockTables
)
//ASTToStatementType returns a StatementType from an AST stmt
@ -74,7 +76,7 @@ func ASTToStatementType(stmt Statement) StatementType {
return StmtSet
case *Show:
return StmtShow
case DDLStatement, DBDDLStatement:
case DDLStatement, DBDDLStatement, *AlterVschema:
return StmtDDL
case *Use:
return StmtUse
@ -94,6 +96,10 @@ func ASTToStatementType(stmt Statement) StatementType {
return StmtSRollback
case *Release:
return StmtRelease
case *LockTables:
return StmtLockTables
case *UnlockTables:
return StmtUnlockTables
default:
return StmtUnknown
}
@ -151,6 +157,10 @@ func Preview(sql string) StatementType {
return StmtDelete
case "savepoint":
return StmtSavepoint
case "lock":
return StmtLockTables
case "unlock":
return StmtUnlockTables
}
// For the following statements it is not sufficient to rely
// on loweredFirstWord. This is because they are not statements
@ -231,6 +241,10 @@ func (s StatementType) String() string {
return "SAVEPOINT_ROLLBACK"
case StmtRelease:
return "RELEASE"
case StmtLockTables:
return "LOCK_TABLES"
case StmtUnlockTables:
return "UNLOCK_TABLES"
default:
return "UNKNOWN"
}
@ -255,18 +269,6 @@ func IsDMLStatement(stmt Statement) bool {
return false
}
//IsVschemaDDL returns true if the query is an Vschema alter ddl.
func IsVschemaDDL(ddl DDLStatement) bool {
switch ddlStatement := ddl.(type) {
case *DDL:
switch ddlStatement.Action {
case CreateVindexDDLAction, DropVindexDDLAction, AddVschemaTableDDLAction, DropVschemaTableDDLAction, AddColVindexDDLAction, DropColVindexDDLAction, AddSequenceDDLAction, AddAutoIncDDLAction:
return true
}
}
return false
}
// SplitAndExpression breaks up the Expr into AND-separated conditions
// and appends them to filters. Outer parenthesis are removed. Precedence
// should be taken into account if expressions are recombined.

Просмотреть файл

@ -63,13 +63,11 @@ type (
GetAction() DDLAction
GetOptLike() *OptLike
GetTableSpec() *TableSpec
GetVindexSpec() *VindexSpec
GetFromTables() TableNames
GetToTables() TableNames
GetAutoIncSpec() *AutoIncSpec
GetVindexCols() []ColIdent
AffectedTables() TableNames
SetTable(qualifier string, name string)
SetFromTables(tables TableNames)
Statement
}
@ -275,6 +273,12 @@ type (
TableSpec *TableSpec
OptLike *OptLike
PartitionSpec *PartitionSpec
}
// AlterVschema represents a ALTER VSCHEMA statement.
AlterVschema struct {
Action DDLAction
Table TableName
// VindexSpec is set for CreateVindexDDLAction, DropVindexDDLAction, AddColVindexDDLAction, DropColVindexDDLAction.
VindexSpec *VindexSpec
@ -290,7 +294,6 @@ type (
CreateIndex struct {
Constraint string
Name ColIdent
IndexType string
Table TableName
Columns []*IndexColumn
Options []*IndexOption
@ -306,6 +309,18 @@ type (
FullyParsed bool
}
// CreateView represents a CREATE VIEW query
CreateView struct {
ViewName TableName
Algorithm string
Definer string
Security string
Columns Columns
Select SelectStatement
CheckOption string
IsReplace bool
}
// DDLAction is an enum for DDL.Action
DDLAction int8
@ -371,6 +386,26 @@ type (
// It should be used only as an indicator. It does not contain
// the full AST for the statement.
OtherAdmin struct{}
// LockType is an enum for Lock Types
LockType int8
// TableAndLockType contains table and lock association
TableAndLockType struct {
Table TableExpr
Lock LockType
}
// TableAndLockTypes is a slice of TableAndLockType
TableAndLockTypes []*TableAndLockType
// LockTables represents the lock statement
LockTables struct {
Tables TableAndLockTypes
}
// UnlockTables represents the unlock statement
UnlockTables struct{}
)
func (*Union) iStatement() {}
@ -403,9 +438,14 @@ func (*CreateIndex) iStatement() {}
func (*CreateDatabase) iStatement() {}
func (*AlterDatabase) iStatement() {}
func (*CreateTable) iStatement() {}
func (*CreateView) iStatement() {}
func (*LockTables) iStatement() {}
func (*UnlockTables) iStatement() {}
func (*AlterVschema) iStatement() {}
func (*DDL) iDDLStatement() {}
func (*CreateIndex) iDDLStatement() {}
func (*CreateView) iDDLStatement() {}
func (*CreateTable) iDDLStatement() {}
// IsFullyParsed implements the DDLStatement interface
@ -423,6 +463,11 @@ func (node *CreateTable) IsFullyParsed() bool {
return node.FullyParsed
}
// IsFullyParsed implements the DDLStatement interface
func (node *CreateView) IsFullyParsed() bool {
return true
}
// GetTable implements the DDLStatement interface
func (node *CreateIndex) GetTable() TableName {
return node.Table
@ -433,11 +478,21 @@ func (node *CreateTable) GetTable() TableName {
return node.Table
}
// GetTable implements the DDLStatement interface
func (node *CreateView) GetTable() TableName {
return node.ViewName
}
// GetTable implements the DDLStatement interface
func (node *DDL) GetTable() TableName {
return node.Table
}
// GetAction implements the DDLStatement interface
func (node *DDL) GetAction() DDLAction {
return node.Action
}
// GetAction implements the DDLStatement interface
func (node *CreateIndex) GetAction() DDLAction {
return AlterDDLAction
@ -448,6 +503,11 @@ func (node *CreateTable) GetAction() DDLAction {
return CreateDDLAction
}
// GetAction implements the DDLStatement interface
func (node *CreateView) GetAction() DDLAction {
return CreateDDLAction
}
// GetOptLike implements the DDLStatement interface
func (node *DDL) GetOptLike() *OptLike {
return node.OptLike
@ -463,6 +523,11 @@ func (node *CreateIndex) GetOptLike() *OptLike {
return nil
}
// GetOptLike implements the DDLStatement interface
func (node *CreateView) GetOptLike() *OptLike {
return nil
}
// GetTableSpec implements the DDLStatement interface
func (node *DDL) GetTableSpec() *TableSpec {
return node.TableSpec
@ -478,18 +543,8 @@ func (node *CreateIndex) GetTableSpec() *TableSpec {
return nil
}
// GetVindexSpec implements the DDLStatement interface
func (node *DDL) GetVindexSpec() *VindexSpec {
return node.VindexSpec
}
// GetVindexSpec implements the DDLStatement interface
func (node *CreateIndex) GetVindexSpec() *VindexSpec {
return nil
}
// GetVindexSpec implements the DDLStatement interface
func (node *CreateTable) GetVindexSpec() *VindexSpec {
// GetTableSpec implements the DDLStatement interface
func (node *CreateView) GetTableSpec() *TableSpec {
return nil
}
@ -508,6 +563,31 @@ func (node *CreateTable) GetFromTables() TableNames {
return nil
}
// GetFromTables implements the DDLStatement interface
func (node *CreateView) GetFromTables() TableNames {
return nil
}
// SetFromTables implements DDLStatement.
func (node *DDL) SetFromTables(tables TableNames) {
node.FromTables = tables
}
// SetFromTables implements DDLStatement.
func (node *CreateIndex) SetFromTables(tables TableNames) {
// irrelevant
}
// SetFromTables implements DDLStatement.
func (node *CreateTable) SetFromTables(tables TableNames) {
// irrelevant
}
// SetFromTables implements DDLStatement.
func (node *CreateView) SetFromTables(tables TableNames) {
// irrelevant
}
// GetToTables implements the DDLStatement interface
func (node *DDL) GetToTables() TableNames {
return node.ToTables
@ -518,46 +598,16 @@ func (node *CreateIndex) GetToTables() TableNames {
return nil
}
// GetToTables implements the DDLStatement interface
func (node *CreateView) GetToTables() TableNames {
return nil
}
// GetToTables implements the DDLStatement interface
func (node *CreateTable) GetToTables() TableNames {
return nil
}
// GetAutoIncSpec implements the DDLStatement interface
func (node *DDL) GetAutoIncSpec() *AutoIncSpec {
return node.AutoIncSpec
}
// GetAutoIncSpec implements the DDLStatement interface
func (node *CreateIndex) GetAutoIncSpec() *AutoIncSpec {
return nil
}
// GetAutoIncSpec implements the DDLStatement interface
func (node *CreateTable) GetAutoIncSpec() *AutoIncSpec {
return nil
}
// GetVindexCols implements the DDLStatement interface
func (node *DDL) GetVindexCols() []ColIdent {
return node.VindexCols
}
// GetVindexCols implements the DDLStatement interface
func (node *CreateIndex) GetVindexCols() []ColIdent {
return nil
}
// GetVindexCols implements the DDLStatement interface
func (node *CreateTable) GetVindexCols() []ColIdent {
return nil
}
// GetAction implements the DDLStatement interface
func (node *DDL) GetAction() DDLAction {
return node.Action
}
// AffectedTables returns the list table names affected by the DDLStatement.
func (node *DDL) AffectedTables() TableNames {
if node.Action == RenameDDLAction || node.Action == DropDDLAction {
@ -579,6 +629,11 @@ func (node *CreateTable) AffectedTables() TableNames {
return TableNames{node.Table}
}
// AffectedTables implements DDLStatement.
func (node *CreateView) AffectedTables() TableNames {
return TableNames{node.ViewName}
}
// SetTable implements DDLStatement.
func (node *CreateIndex) SetTable(qualifier string, name string) {
node.Table.Qualifier = NewTableIdent(qualifier)
@ -597,6 +652,12 @@ func (node *CreateTable) SetTable(qualifier string, name string) {
node.Table.Name = NewTableIdent(name)
}
// SetTable implements DDLStatement.
func (node *CreateView) SetTable(qualifier string, name string) {
node.ViewName.Qualifier = NewTableIdent(qualifier)
node.ViewName.Name = NewTableIdent(name)
}
func (*DropDatabase) iDBDDLStatement() {}
func (*CreateDatabase) iDBDDLStatement() {}
func (*AlterDatabase) iDBDDLStatement() {}
@ -636,40 +697,53 @@ func (node *AlterDatabase) GetDatabaseName() string {
// of SelectStatement.
func (*ParenSelect) iStatement() {}
//ShowInternal will represent all the show statement types.
type ShowInternal interface {
isShowInternal()
SQLNode
}
type (
//ShowLegacy is of ShowInternal type, holds the legacy show ast struct.
type ShowLegacy struct {
Extended string
Type string
OnTable TableName
Table TableName
ShowTablesOpt *ShowTablesOpt
Scope Scope
ShowCollationFilterOpt Expr
}
// ShowInternal will represent all the show statement types.
ShowInternal interface {
isShowInternal()
SQLNode
}
//ShowColumns is of ShowInternal type, holds the show columns statement.
type ShowColumns struct {
Full string
Table TableName
DbName string
Filter *ShowFilter
}
// ShowLegacy is of ShowInternal type, holds the legacy show ast struct.
ShowLegacy struct {
Extended string
Type string
OnTable TableName
Table TableName
ShowTablesOpt *ShowTablesOpt
Scope Scope
ShowCollationFilterOpt Expr
}
// ShowTableStatus is of ShowInternal type, holds SHOW TABLE STATUS queries.
type ShowTableStatus struct {
DatabaseName string
Filter *ShowFilter
}
// ShowColumns is of ShowInternal type, holds the show columns statement.
ShowColumns struct {
Full string
Table TableName
DbName string
Filter *ShowFilter
}
// ShowTableStatus is of ShowInternal type, holds SHOW TABLE STATUS queries.
ShowTableStatus struct {
DatabaseName string
Filter *ShowFilter
}
// ShowCommandType represents the show statement type.
ShowCommandType int8
// ShowBasic is of ShowInternal type, holds Simple SHOW queries with a filter.
ShowBasic struct {
Command ShowCommandType
Filter *ShowFilter
}
)
func (*ShowLegacy) isShowInternal() {}
func (*ShowColumns) isShowInternal() {}
func (*ShowTableStatus) isShowInternal() {}
func (*ShowBasic) isShowInternal() {}
// InsertRows represents the rows for an INSERT statement.
type InsertRows interface {
@ -1460,6 +1534,14 @@ func (node *DDL) Format(buf *TrackedBuffer) {
}
case FlushDDLAction:
buf.astPrintf(node, "%s", FlushStr)
default:
buf.astPrintf(node, "%s table %v", node.Action.ToString(), node.Table)
}
}
// Format formats the node.
func (node *AlterVschema) Format(buf *TrackedBuffer) {
switch node.Action {
case CreateVindexDDLAction:
buf.astPrintf(node, "alter vschema create vindex %v %v", node.Table, node.VindexSpec)
case DropVindexDDLAction:
@ -2434,6 +2516,11 @@ func (node *ShowTableStatus) Format(buf *TrackedBuffer) {
buf.astPrintf(node, "%v", node.Filter)
}
// Format formats the node.
func (node *ShowBasic) Format(buf *TrackedBuffer) {
buf.astPrintf(node, "show%s%v", node.Command.ToString(), node.Filter)
}
// Format formats the node.
func (node *SelectInto) Format(buf *TrackedBuffer) {
if node == nil {
@ -2448,15 +2535,13 @@ func (node *SelectInto) Format(buf *TrackedBuffer) {
// Format formats the node.
func (node *CreateIndex) Format(buf *TrackedBuffer) {
buf.WriteString("create")
buf.astPrintf(node, "alter table %v add", node.Table)
if node.Constraint != "" {
buf.WriteString(" " + node.Constraint)
}
buf.astPrintf(node, " index %v", node.Name)
if node.IndexType != "" {
buf.WriteString(" using " + node.IndexType)
}
buf.astPrintf(node, " on %v (", node.Table)
buf.WriteString(" (")
for i, col := range node.Columns {
if i != 0 {
buf.astPrintf(node, ", %v", col.Column)
@ -2472,6 +2557,9 @@ func (node *CreateIndex) Format(buf *TrackedBuffer) {
}
buf.astPrintf(node, ")")
for _, opt := range node.Options {
//if opt == nil {
// continue
//}
buf.WriteString(" " + strings.ToLower(opt.Name))
if opt.String != "" {
buf.WriteString(" " + opt.String)
@ -2529,3 +2617,38 @@ func (node *CreateTable) Format(buf *TrackedBuffer) {
buf.astPrintf(node, " %v", node.TableSpec)
}
}
// Format formats the node.
func (node *CreateView) Format(buf *TrackedBuffer) {
buf.WriteString("create")
if node.IsReplace {
buf.WriteString(" or replace")
}
if node.Algorithm != "" {
buf.astPrintf(node, " algorithm = %s", node.Algorithm)
}
if node.Definer != "" {
buf.astPrintf(node, " definer = %s", node.Definer)
}
if node.Security != "" {
buf.astPrintf(node, " sql security %s", node.Security)
}
buf.astPrintf(node, " view %v", node.ViewName)
buf.astPrintf(node, "%v as %v", node.Columns, node.Select)
if node.CheckOption != "" {
buf.astPrintf(node, " with %s check option", node.CheckOption)
}
}
// Format formats the LockTables node.
func (node *LockTables) Format(buf *TrackedBuffer) {
buf.astPrintf(node, "lock tables %v %s", node.Tables[0].Table, node.Tables[0].Lock.ToString())
for i := 1; i < len(node.Tables); i++ {
buf.astPrintf(node, ", %v %s", node.Tables[i].Table, node.Tables[i].Lock.ToString())
}
}
// Format formats the UnlockTables node.
func (node *UnlockTables) Format(buf *TrackedBuffer) {
buf.WriteString("unlock tables")
}

Просмотреть файл

@ -1182,6 +1182,50 @@ func (node CollateAndCharsetType) ToString() string {
}
}
// ToString returns the type as a string
func (ty LockType) ToString() string {
switch ty {
case Read:
return ReadStr
case ReadLocal:
return ReadLocalStr
case Write:
return WriteStr
case LowPriorityWrite:
return LowPriorityWriteStr
default:
return "Unknown LockType"
}
}
// ToString returns ShowCommandType as a string
func (ty ShowCommandType) ToString() string {
switch ty {
case Charset:
return CharsetStr
case Collation:
return CollationStr
case Database:
return DatabaseStr
case Function:
return FunctionStr
case Privilege:
return PrivilegeStr
case Procedure:
return ProcedureStr
case StatusGlobal:
return StatusGlobalStr
case StatusSession:
return StatusSessionStr
case VariableGlobal:
return VariableGlobalStr
case VariableSession:
return VariableSessionStr
default:
return "Unknown ShowCommandType"
}
}
// AtCount represents the '@' count in ColIdent
type AtCount int

Просмотреть файл

@ -33,16 +33,16 @@ type RewriteASTResult struct {
}
// PrepareAST will normalize the query
func PrepareAST(in Statement, bindVars map[string]*querypb.BindVariable, prefix string, parameterize bool) (*RewriteASTResult, error) {
func PrepareAST(in Statement, bindVars map[string]*querypb.BindVariable, prefix string, parameterize bool, keyspace string) (*RewriteASTResult, error) {
if parameterize {
Normalize(in, bindVars, prefix)
}
return RewriteAST(in)
return RewriteAST(in, keyspace)
}
// RewriteAST rewrites the whole AST, replacing function calls and adding column aliases to queries
func RewriteAST(in Statement) (*RewriteASTResult, error) {
er := newExpressionRewriter()
func RewriteAST(in Statement, keyspace string) (*RewriteASTResult, error) {
er := newExpressionRewriter(keyspace)
er.shouldRewriteDatabaseFunc = shouldRewriteDatabaseFunc(in)
setRewriter := &setNormalizer{}
out, ok := Rewrite(in, er.rewrite, setRewriter.rewriteSetComingUp).(Statement)
@ -86,10 +86,12 @@ type expressionRewriter struct {
// we need to know this to make a decision if we can safely rewrite JOIN USING => JOIN ON
hasStarInSelect bool
keyspace string
}
func newExpressionRewriter() *expressionRewriter {
return &expressionRewriter{bindVars: &BindVarNeeds{}}
func newExpressionRewriter(keyspace string) *expressionRewriter {
return &expressionRewriter{bindVars: &BindVarNeeds{}, keyspace: keyspace}
}
const (
@ -110,7 +112,7 @@ const (
)
func (er *expressionRewriter) rewriteAliasedExpr(node *AliasedExpr) (*BindVarNeeds, error) {
inner := newExpressionRewriter()
inner := newExpressionRewriter(er.keyspace)
inner.shouldRewriteDatabaseFunc = er.shouldRewriteDatabaseFunc
tmp := Rewrite(node.Expr, inner.rewrite, nil)
newExpr, ok := tmp.(Expr)
@ -157,52 +159,71 @@ func (er *expressionRewriter) rewrite(cursor *Cursor) bool {
}
case *Subquery:
er.unnestSubQueries(cursor, node)
case JoinCondition:
if node.Using != nil && !er.hasStarInSelect {
joinTableExpr, ok := cursor.Parent().(*JoinTableExpr)
if !ok {
// this is not possible with the current AST
break
}
leftTable, leftOk := joinTableExpr.LeftExpr.(*AliasedTableExpr)
rightTable, rightOk := joinTableExpr.RightExpr.(*AliasedTableExpr)
if !(leftOk && rightOk) {
// we only deal with simple FROM A JOIN B USING queries at the moment
break
}
lft, err := leftTable.TableName()
if err != nil {
er.err = err
break
}
rgt, err := rightTable.TableName()
if err != nil {
er.err = err
break
}
newCondition := JoinCondition{}
for _, colIdent := range node.Using {
lftCol := NewColNameWithQualifier(colIdent.String(), lft)
rgtCol := NewColNameWithQualifier(colIdent.String(), rgt)
cmp := &ComparisonExpr{
Operator: EqualOp,
Left: lftCol,
Right: rgtCol,
}
if newCondition.On == nil {
newCondition.On = cmp
} else {
newCondition.On = &AndExpr{Left: newCondition.On, Right: cmp}
}
}
cursor.Replace(newCondition)
er.rewriteJoinCondition(cursor, node)
case *AliasedTableExpr:
if !SystemSchema(er.keyspace) {
break
}
aliasTableName, ok := node.Expr.(TableName)
if !ok {
return true
}
// Qualifier should not be added to dual table
if aliasTableName.Name.String() == "dual" {
break
}
if er.keyspace != "" && aliasTableName.Qualifier.IsEmpty() {
aliasTableName.Qualifier = NewTableIdent(er.keyspace)
node.Expr = aliasTableName
cursor.Replace(node)
}
}
return true
}
func (er *expressionRewriter) rewriteJoinCondition(cursor *Cursor, node JoinCondition) {
if node.Using != nil && !er.hasStarInSelect {
joinTableExpr, ok := cursor.Parent().(*JoinTableExpr)
if !ok {
// this is not possible with the current AST
return
}
leftTable, leftOk := joinTableExpr.LeftExpr.(*AliasedTableExpr)
rightTable, rightOk := joinTableExpr.RightExpr.(*AliasedTableExpr)
if !(leftOk && rightOk) {
// we only deal with simple FROM A JOIN B USING queries at the moment
return
}
lft, err := leftTable.TableName()
if err != nil {
er.err = err
return
}
rgt, err := rightTable.TableName()
if err != nil {
er.err = err
return
}
newCondition := JoinCondition{}
for _, colIdent := range node.Using {
lftCol := NewColNameWithQualifier(colIdent.String(), lft)
rgtCol := NewColNameWithQualifier(colIdent.String(), rgt)
cmp := &ComparisonExpr{
Operator: EqualOp,
Left: lftCol,
Right: rgtCol,
}
if newCondition.On == nil {
newCondition.On = cmp
} else {
newCondition.On = &AndExpr{Left: newCondition.On, Right: cmp}
}
}
cursor.Replace(newCondition)
}
}
func (er *expressionRewriter) sysVarRewrite(cursor *Cursor, node *ColName) {
lowered := node.Name.Lowered()
switch lowered {
@ -287,3 +308,11 @@ func (er *expressionRewriter) unnestSubQueries(cursor *Cursor, subquery *Subquer
func bindVarExpression(name string) Expr {
return NewArgument([]byte(":" + name))
}
// SystemSchema returns true if the schema passed is system schema
func SystemSchema(schema string) bool {
return strings.EqualFold(schema, "information_schema") ||
strings.EqualFold(schema, "performance_schema") ||
strings.EqualFold(schema, "sys") ||
strings.EqualFold(schema, "mysql")
}

Просмотреть файл

@ -172,7 +172,7 @@ func TestRewrites(in *testing.T) {
stmt, err := Parse(tc.in)
require.NoError(err)
result, err := RewriteAST(stmt)
result, err := RewriteAST(stmt, "ks") // passing `ks` just to test that no rewriting happens as it is not system schema
require.NoError(err)
expected, err := Parse(tc.expected)
@ -199,3 +199,53 @@ func TestRewrites(in *testing.T) {
})
}
}
func TestRewritesWithDefaultKeyspace(in *testing.T) {
tests := []myTestCase{{
in: "SELECT 1 from x.test",
expected: "SELECT 1 from x.test", // no change
}, {
in: "SELECT x.col as c from x.test",
expected: "SELECT x.col as c from x.test", // no change
}, {
in: "SELECT 1 from test",
expected: "SELECT 1 from sys.test",
}, {
in: "SELECT 1 from test as t",
expected: "SELECT 1 from sys.test as t",
}, {
in: "SELECT 1 from `test 24` as t",
expected: "SELECT 1 from sys.`test 24` as t",
}, {
in: "SELECT 1, (select 1 from test) from x.y",
expected: "SELECT 1, (select 1 from sys.test) from x.y",
}, {
in: "SELECT 1 from (select 2 from test) t",
expected: "SELECT 1 from (select 2 from sys.test) t",
}, {
in: "SELECT 1 from test where exists (select 2 from test)",
expected: "SELECT 1 from sys.test where exists (select 2 from sys.test)",
}, {
in: "SELECT 1 from dual",
expected: "SELECT 1 from dual",
}, {
in: "SELECT (select 2 from dual) from DUAL",
expected: "SELECT 2 as `(select 2 from dual)` from DUAL",
}}
for _, tc := range tests {
in.Run(tc.in, func(t *testing.T) {
require := require.New(t)
stmt, err := Parse(tc.in)
require.NoError(err)
result, err := RewriteAST(stmt, "sys")
require.NoError(err)
expected, err := Parse(tc.expected)
require.NoError(err, "test expectation does not parse [%s]", tc.expected)
assert.Equal(t, String(expected), String(result.AST))
})
}
}

Просмотреть файл

@ -147,7 +147,6 @@ const (
// ConvertType.Operator
CharacterSetStr = " character set"
NoOperatorStr = ""
CharsetStr = "charset"
// CollateAndCharset.Type
CollateStr = " collate"
@ -188,6 +187,24 @@ const (
VitessStr = "vitess"
TraditionalStr = "traditional"
AnalyzeStr = "analyze"
// Lock Types
ReadStr = "read"
ReadLocalStr = "read local"
WriteStr = "write"
LowPriorityWriteStr = "low_priority write"
// ShowCommand Types
CharsetStr = " charset"
CollationStr = " collation"
DatabaseStr = " databases"
FunctionStr = " function status"
PrivilegeStr = " privileges"
ProcedureStr = " procedure status"
StatusGlobalStr = " global status"
StatusSessionStr = " status"
VariableGlobalStr = " global variables"
VariableSessionStr = " variables"
)
// Constants for Enum type - AccessMode
@ -380,3 +397,27 @@ const (
CollateType CollateAndCharsetType = iota
CharacterSetType
)
// LockType constants
const (
UnknownLockType LockType = iota
Read
ReadLocal
Write
LowPriorityWrite
)
// ShowCommandType constants
const (
UnknownCommandType ShowCommandType = iota
Charset
Collation
Database
Function
Privilege
Procedure
StatusGlobal
StatusSession
VariableGlobal
VariableSession
)

Просмотреть файл

@ -190,6 +190,9 @@ var (
}, {
input: "select /* column alias as string without as */ a \"b\" from t",
output: "select /* column alias as string without as */ a as b from t",
}, {
input: "select /* column alias with non_reserved keyword */ a as auto_increment from t",
output: "select /* column alias with non_reserved keyword */ a as `auto_increment` from t",
}, {
input: "select /* a.* */ a.* from t",
}, {
@ -1156,27 +1159,46 @@ var (
input: "alter vschema on a drop vindex `add`",
output: "alter vschema on a drop vindex `add`",
}, {
input: "create index a on b (col1)",
input: "create index a on b (col1)",
output: "alter table b add index a (col1)",
}, {
input: "create unique index a on b (col1)",
input: "create unique index a on b (col1)",
output: "alter table b add unique index a (col1)",
}, {
input: "create unique index a using foo on b (col1 desc)",
input: "create unique index a using foo on b (col1 desc)",
output: "alter table b add unique index a (col1 desc) using foo",
}, {
input: "create fulltext index a using foo on b (col1)",
input: "create fulltext index a on b (col1) with parser a",
output: "alter table b add fulltext index a (col1) with parser a",
}, {
input: "create spatial index a using foo on b (col1)",
input: "create spatial index a on b (col1)",
output: "alter table b add spatial index a (col1)",
}, {
input: "create index a on b (col1) using btree key_block_size 12 with parser 'a' comment 'string' algorithm inplace lock none",
input: "create fulltext index a on b (col1) key_block_size=12 with parser a comment 'string' algorithm inplace lock none",
output: "alter table b add fulltext index a (col1) key_block_size 12 with parser a comment 'string' algorithm inplace lock none",
}, {
input: "create index a on b ((col1 + col2), (col1*col2))",
output: "create index a on b ()",
output: "alter table b add index a ()",
partialDDL: true,
}, {
input: "create view a",
output: "create table a",
input: "create algorithm = merge sql security definer view a as select * from e",
}, {
input: "create or replace view a",
output: "create table a",
input: "create view ks.a as select * from e",
}, {
input: "create algorithm = merge sql security definer view a (b,c,d) as select * from e",
output: "create algorithm = merge sql security definer view a(b, c, d) as select * from e",
}, {
input: "create algorithm = merge sql security definer view a (b,c,d) as select * from e with cascaded check option",
output: "create algorithm = merge sql security definer view a(b, c, d) as select * from e with cascaded check option",
}, {
input: "create algorithm = temptable definer = a@b.c.d view a(b,c,d) as select * from e with local check option",
output: "create algorithm = temptable definer = a@b.c.d view a(b, c, d) as select * from e with local check option",
}, {
input: "create or replace algorithm = temptable definer = a@b.c.d sql security definer view a(b,c,d) as select * from e with local check option",
output: "create or replace algorithm = temptable definer = a@b.c.d sql security definer view a(b, c, d) as select * from e with local check option",
}, {
input: "create definer = 'sa'@b.c.d view a(b,c,d) as select * from e",
output: "create definer = 'sa'@b.c.d view a(b, c, d) as select * from e",
}, {
input: "alter view a",
output: "alter table a",
@ -1250,8 +1272,7 @@ var (
input: "show create event e",
output: "show create event",
}, {
input: "show create function f",
output: "show create function",
input: "show create function f",
}, {
input: "show create procedure p",
output: "show create procedure",
@ -1275,31 +1296,26 @@ var (
output: "show databases like '%'",
}, {
input: "show schemas",
output: "show schemas",
output: "show databases",
}, {
input: "show schemas like '%'",
output: "show schemas like '%'",
output: "show databases like '%'",
}, {
input: "show engine INNODB",
output: "show engine",
}, {
input: "show engines",
output: "show engines",
input: "show engines",
}, {
input: "show storage engines",
output: "show storage",
}, {
input: "show errors",
output: "show errors",
input: "show errors",
}, {
input: "show events",
output: "show events",
input: "show events",
}, {
input: "show function code func",
output: "show function",
input: "show function code func",
}, {
input: "show function status",
output: "show function",
input: "show function status",
}, {
input: "show grants for 'root@localhost'",
output: "show grants",
@ -1322,11 +1338,9 @@ var (
input: "show privileges",
output: "show privileges",
}, {
input: "show procedure code p",
output: "show procedure",
input: "show procedure code p",
}, {
input: "show procedure status",
output: "show procedure",
input: "show procedure status",
}, {
input: "show processlist",
output: "show processlist",
@ -1356,7 +1370,7 @@ var (
output: "show global status",
}, {
input: "show session status",
output: "show session status",
output: "show status",
}, {
input: "show table status",
}, {
@ -1419,11 +1433,13 @@ var (
output: "show global variables",
}, {
input: "show session variables",
output: "show session variables",
output: "show variables",
}, {
input: "show vitess_keyspaces",
input: "show vitess_keyspaces",
output: "show databases",
}, {
input: "show vitess_keyspaces like '%'",
input: "show vitess_keyspaces like '%'",
output: "show databases like '%'",
}, {
input: "show vitess_shards",
}, {
@ -1516,11 +1532,20 @@ var (
input: "optimize foo",
output: "otheradmin",
}, {
input: "lock tables foo",
output: "otheradmin",
input: "lock tables foo read",
output: "lock tables foo read",
}, {
input: "unlock tables foo",
output: "otheradmin",
input: "lock tables foo write",
output: "lock tables foo write",
}, {
input: "lock tables foo read local",
output: "lock tables foo read local",
}, {
input: "lock tables foo low_priority write",
output: "lock tables foo low_priority write",
}, {
input: "unlock tables",
output: "unlock tables",
}, {
input: "select /* EQ true */ 1 from t where a = true",
}, {
@ -1874,7 +1899,8 @@ func TestCaseSensitivity(t *testing.T) {
input: "create table A (\n\t`B` int\n)",
output: "create table A (\n\tB int\n)",
}, {
input: "create index b on A (col1 desc)",
input: "create index b on A (col1 desc)",
output: "alter table A add index b (col1 desc)",
}, {
input: "alter table A foo",
output: "alter table A",
@ -1933,8 +1959,8 @@ func TestCaseSensitivity(t *testing.T) {
input: "CREATE TABLE A (\n\t`A` int\n)",
output: "create table A (\n\tA int\n)",
}, {
input: "create view A",
output: "create table a",
input: "create view A as select * from b",
output: "create view a as select * from b",
}, {
input: "alter view A",
output: "alter table a",
@ -2036,7 +2062,7 @@ func TestKeywords(t *testing.T) {
input: "select /* share and mode as cols */ share, mode from t where share = 'foo'",
output: "select /* share and mode as cols */ `share`, `mode` from t where `share` = 'foo'",
}, {
input: "select /* unused keywords as cols */ write, varying from t where trailing = 'foo'",
input: "select /* unused keywords as cols */ `write`, varying from t where trailing = 'foo'",
output: "select /* unused keywords as cols */ `write`, `varying` from t where `trailing` = 'foo'",
}, {
input: "select status from t",

Просмотреть файл

@ -40,6 +40,28 @@ func replaceAliasedTableExprPartitions(newNode, parent SQLNode) {
parent.(*AliasedTableExpr).Partitions = newNode.(Partitions)
}
func replaceAlterVschemaAutoIncSpec(newNode, parent SQLNode) {
parent.(*AlterVschema).AutoIncSpec = newNode.(*AutoIncSpec)
}
func replaceAlterVschemaTable(newNode, parent SQLNode) {
parent.(*AlterVschema).Table = newNode.(TableName)
}
type replaceAlterVschemaVindexCols int
func (r *replaceAlterVschemaVindexCols) replace(newNode, container SQLNode) {
container.(*AlterVschema).VindexCols[int(*r)] = newNode.(ColIdent)
}
func (r *replaceAlterVschemaVindexCols) inc() {
*r++
}
func replaceAlterVschemaVindexSpec(newNode, parent SQLNode) {
parent.(*AlterVschema).VindexSpec = newNode.(*VindexSpec)
}
func replaceAndExprLeft(newNode, parent SQLNode) {
parent.(*AndExpr).Left = newNode.(Expr)
}
@ -188,6 +210,18 @@ func replaceCreateTableTableSpec(newNode, parent SQLNode) {
parent.(*CreateTable).TableSpec = newNode.(*TableSpec)
}
func replaceCreateViewColumns(newNode, parent SQLNode) {
parent.(*CreateView).Columns = newNode.(Columns)
}
func replaceCreateViewSelect(newNode, parent SQLNode) {
parent.(*CreateView).Select = newNode.(SelectStatement)
}
func replaceCreateViewViewName(newNode, parent SQLNode) {
parent.(*CreateView).ViewName = newNode.(TableName)
}
func replaceCurTimeFuncExprFsp(newNode, parent SQLNode) {
parent.(*CurTimeFuncExpr).Fsp = newNode.(Expr)
}
@ -196,10 +230,6 @@ func replaceCurTimeFuncExprName(newNode, parent SQLNode) {
parent.(*CurTimeFuncExpr).Name = newNode.(ColIdent)
}
func replaceDDLAutoIncSpec(newNode, parent SQLNode) {
parent.(*DDL).AutoIncSpec = newNode.(*AutoIncSpec)
}
func replaceDDLFromTables(newNode, parent SQLNode) {
parent.(*DDL).FromTables = newNode.(TableNames)
}
@ -224,20 +254,6 @@ func replaceDDLToTables(newNode, parent SQLNode) {
parent.(*DDL).ToTables = newNode.(TableNames)
}
type replaceDDLVindexCols int
func (r *replaceDDLVindexCols) replace(newNode, container SQLNode) {
container.(*DDL).VindexCols[int(*r)] = newNode.(ColIdent)
}
func (r *replaceDDLVindexCols) inc() {
*r++
}
func replaceDDLVindexSpec(newNode, parent SQLNode) {
parent.(*DDL).VindexSpec = newNode.(*VindexSpec)
}
func replaceDeleteComments(newNode, parent SQLNode) {
parent.(*Delete).Comments = newNode.(Comments)
}
@ -629,6 +645,10 @@ func replaceShowInternal(newNode, parent SQLNode) {
parent.(*Show).Internal = newNode.(ShowInternal)
}
func replaceShowBasicFilter(newNode, parent SQLNode) {
parent.(*ShowBasic).Filter = newNode.(*ShowFilter)
}
func replaceShowColumnsFilter(newNode, parent SQLNode) {
parent.(*ShowColumns).Filter = newNode.(*ShowFilter)
}
@ -960,6 +980,17 @@ func (a *application) apply(parent, node SQLNode, replacer replacerFunc) {
case *AlterDatabase:
case *AlterVschema:
a.apply(node, n.AutoIncSpec, replaceAlterVschemaAutoIncSpec)
a.apply(node, n.Table, replaceAlterVschemaTable)
replacerVindexCols := replaceAlterVschemaVindexCols(0)
replacerVindexColsB := &replacerVindexCols
for _, item := range n.VindexCols {
a.apply(node, item, replacerVindexColsB.replace)
replacerVindexColsB.inc()
}
a.apply(node, n.VindexSpec, replaceAlterVschemaVindexSpec)
case *AndExpr:
a.apply(node, n.Left, replaceAndExprLeft)
a.apply(node, n.Right, replaceAndExprRight)
@ -1052,25 +1083,22 @@ func (a *application) apply(parent, node SQLNode, replacer replacerFunc) {
a.apply(node, n.Table, replaceCreateTableTable)
a.apply(node, n.TableSpec, replaceCreateTableTableSpec)
case *CreateView:
a.apply(node, n.Columns, replaceCreateViewColumns)
a.apply(node, n.Select, replaceCreateViewSelect)
a.apply(node, n.ViewName, replaceCreateViewViewName)
case *CurTimeFuncExpr:
a.apply(node, n.Fsp, replaceCurTimeFuncExprFsp)
a.apply(node, n.Name, replaceCurTimeFuncExprName)
case *DDL:
a.apply(node, n.AutoIncSpec, replaceDDLAutoIncSpec)
a.apply(node, n.FromTables, replaceDDLFromTables)
a.apply(node, n.OptLike, replaceDDLOptLike)
a.apply(node, n.PartitionSpec, replaceDDLPartitionSpec)
a.apply(node, n.Table, replaceDDLTable)
a.apply(node, n.TableSpec, replaceDDLTableSpec)
a.apply(node, n.ToTables, replaceDDLToTables)
replacerVindexCols := replaceDDLVindexCols(0)
replacerVindexColsB := &replacerVindexCols
for _, item := range n.VindexCols {
a.apply(node, item, replacerVindexColsB.replace)
replacerVindexColsB.inc()
}
a.apply(node, n.VindexSpec, replaceDDLVindexSpec)
case *Default:
@ -1176,6 +1204,8 @@ func (a *application) apply(parent, node SQLNode, replacer replacerFunc) {
case *Load:
case *LockTables:
case *MatchExpr:
a.apply(node, n.Columns, replaceMatchExprColumns)
a.apply(node, n.Expr, replaceMatchExprExpr)
@ -1312,6 +1342,9 @@ func (a *application) apply(parent, node SQLNode, replacer replacerFunc) {
case *Show:
a.apply(node, n.Internal, replaceShowInternal)
case *ShowBasic:
a.apply(node, n.Filter, replaceShowBasicFilter)
case *ShowColumns:
a.apply(node, n.Filter, replaceShowColumnsFilter)
a.apply(node, n.Table, replaceShowColumnsTable)
@ -1407,6 +1440,8 @@ func (a *application) apply(parent, node SQLNode, replacer replacerFunc) {
case *UnionSelect:
a.apply(node, n.Statement, replaceUnionSelectStatement)
case *UnlockTables:
case *Update:
a.apply(node, n.Comments, replaceUpdateComments)
a.apply(node, n.Exprs, replaceUpdateExprs)

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -137,6 +137,9 @@ func skipToEnd(yylex interface{}) {
collateAndCharset CollateAndCharset
collateAndCharsets []CollateAndCharset
createTable *CreateTable
tableAndLockTypes []*TableAndLockType
tableAndLockType *TableAndLockType
lockType LockType
}
%token LEX_ERROR
@ -190,8 +193,8 @@ func skipToEnd(yylex interface{}) {
%token <bytes> SHOW DESCRIBE EXPLAIN DATE ESCAPE REPAIR OPTIMIZE TRUNCATE
%token <bytes> MAXVALUE PARTITION REORGANIZE LESS THAN PROCEDURE TRIGGER
%token <bytes> VINDEX VINDEXES DIRECTORY NAME UPGRADE
%token <bytes> STATUS VARIABLES WARNINGS
%token <bytes> SEQUENCE
%token <bytes> STATUS VARIABLES WARNINGS CASCADED DEFINER OPTION SQL UNDEFINED
%token <bytes> SEQUENCE MERGE TEMPTABLE INVOKER SECURITY
// Transaction Tokens
%token <bytes> BEGIN START TRANSACTION COMMIT ROLLBACK SAVEPOINT RELEASE WORK
@ -208,16 +211,16 @@ func skipToEnd(yylex interface{}) {
// Type Modifiers
%token <bytes> NULLX AUTO_INCREMENT APPROXNUM SIGNED UNSIGNED ZEROFILL
// Supported SHOW tokens
// SHOW tokens
%token <bytes> COLLATION DATABASES SCHEMAS TABLES VITESS_METADATA VSCHEMA FULL PROCESSLIST COLUMNS FIELDS ENGINES PLUGINS EXTENDED
%token <bytes> KEYSPACES VITESS_KEYSPACES VITESS_SHARDS VITESS_TABLETS
%token <bytes> KEYSPACES VITESS_KEYSPACES VITESS_SHARDS VITESS_TABLETS CODE PRIVILEGES FUNCTION
// SET tokens
%token <bytes> NAMES CHARSET GLOBAL SESSION ISOLATION LEVEL READ WRITE ONLY REPEATABLE COMMITTED UNCOMMITTED SERIALIZABLE
// Functions
%token <bytes> CURRENT_TIMESTAMP DATABASE CURRENT_DATE
%token <bytes> CURRENT_TIME LOCALTIME LOCALTIMESTAMP
%token <bytes> CURRENT_TIME LOCALTIME LOCALTIMESTAMP CURRENT_USER
%token <bytes> UTC_DATE UTC_TIME UTC_TIMESTAMP
%token <bytes> REPLACE
%token <bytes> CONVERT CAST
@ -240,6 +243,9 @@ func skipToEnd(yylex interface{}) {
// Explain tokens
%token <bytes> FORMAT TREE VITESS TRADITIONAL
// Lock type tokens
%token <bytes> LOCAL LOW_PRIORITY
%type <statement> command
%type <selStmt> simple_select select_statement base_select union_rhs
%type <statement> explain_statement explainable_statement
@ -255,19 +261,21 @@ func skipToEnd(yylex interface{}) {
%type <boolean> default_optional
%type <statement> analyze_statement show_statement use_statement other_statement
%type <statement> begin_statement commit_statement rollback_statement savepoint_statement release_statement load_statement
%type <statement> lock_statement unlock_statement
%type <bytes2> comment_opt comment_list
%type <str> wild_opt
%type <str> wild_opt check_option_opt cascade_or_local_opt
%type <explainType> explain_format_opt
%type <insertAction> insert_or_replace
%type <bytes> explain_synonyms
%type <str> cache_opt separator_opt
%type <matchExprOption> match_option
%type <boolean> distinct_opt union_op
%type <boolean> distinct_opt union_op replace_opt
%type <expr> like_escape_opt
%type <selectExprs> select_expression_list select_expression_list_opt
%type <selectExpr> select_expression
%type <strs> select_options
%type <str> select_option
%type <str> select_option algorithm_view security_view security_view_opt
%type <str> definer_opt user
%type <expr> expression
%type <tableExprs> from_opt table_references
%type <tableExpr> table_reference table_factor join_table
@ -304,9 +312,9 @@ func skipToEnd(yylex interface{}) {
%type <limit> limit_opt
%type <selectInto> into_option
%type <str> header_opt export_options manifest_opt overwrite_opt format_opt optionally_opt
%type <str> fields_opt lines_opt terminated_by_opt starting_by_opt enclosed_by_opt escaped_by_opt constraint_opt using_opt
%type <str> fields_opt lines_opt terminated_by_opt starting_by_opt enclosed_by_opt escaped_by_opt constraint_opt
%type <lock> lock_opt
%type <columns> ins_column_list column_list
%type <columns> ins_column_list column_list column_list_opt
%type <partitions> opt_partition_clause partition_list
%type <updateExprs> on_dup_opt
%type <updateExprs> update_list
@ -331,7 +339,7 @@ func skipToEnd(yylex interface{}) {
%type <empty> as_opt work_opt savepoint_opt
%type <empty> skip_to_end ddl_skip_to_end
%type <str> charset
%type <scope> set_session_or_global show_session_or_global
%type <scope> set_session_or_global
%type <convertType> convert_type
%type <columnType> column_type
%type <columnType> int_type decimal_type numeric_type time_type char_type spatial_type
@ -354,8 +362,8 @@ func skipToEnd(yylex interface{}) {
%type <indexInfo> index_info
%type <indexColumn> index_column
%type <indexColumns> index_column_list
%type <indexOption> index_option lock_index algorithm_index
%type <indexOptions> index_option_list index_option_list_opt algorithm_lock_opt
%type <indexOption> index_option lock_index algorithm_index using_index_type
%type <indexOptions> index_option_list index_option_list_opt algorithm_lock_opt using_opt
%type <constraintInfo> constraint_info check_constraint_info
%type <partDefs> partition_definitions
%type <partDef> partition_definition
@ -366,6 +374,11 @@ func skipToEnd(yylex interface{}) {
%type <bytes> alter_object_type database_or_schema
%type <ReferenceAction> fk_reference_action fk_on_delete fk_on_update
%type <str> vitess_topo
%type <tableAndLockTypes> lock_table_list
%type <tableAndLockType> lock_table
%type <lockType> lock_type
%type <empty> session_or_local_opt
%start any_command
@ -411,6 +424,8 @@ command:
| flush_statement
| do_statement
| load_statement
| lock_statement
| unlock_statement
| /*empty*/
{
setParseTree(yylex, nil)
@ -712,17 +727,14 @@ create_statement:
| create_index_prefix '(' index_column_list ')' index_option_list_opt algorithm_lock_opt
{
$1.Columns = $3
$1.Options = append($5,$6...)
$1.Options = append($1.Options,$5...)
$1.Options = append($1.Options,$6...)
$1.FullyParsed = true
$$ = $1
}
| CREATE VIEW table_name ddl_skip_to_end
| CREATE replace_opt algorithm_view definer_opt security_view_opt VIEW table_name column_list_opt AS select_statement check_option_opt
{
$$ = &DDL{Action: CreateDDLAction, Table: $3.ToViewName()}
}
| CREATE OR REPLACE VIEW table_name ddl_skip_to_end
{
$$ = &DDL{Action: CreateDDLAction, Table: $5.ToViewName()}
$$ = &CreateView{ViewName: $7.ToViewName(), IsReplace:$2, Algorithm:$3, Definer: $4 ,Security:$5, Columns:$8, Select: $10, CheckOption: $11 }
}
| create_database_prefix create_options_opt
{
@ -731,6 +743,15 @@ create_statement:
$$ = $1
}
replace_opt:
{
$$ = false
}
| OR REPLACE
{
$$ = true
}
vindex_type_opt:
{
$$ = NewColIdent("")
@ -783,7 +804,7 @@ create_table_prefix:
create_index_prefix:
CREATE constraint_opt INDEX id_or_var using_opt ON table_name
{
$$ = &CreateIndex{Constraint: $2, Name: $4, IndexType: $5, Table: $7}
$$ = &CreateIndex{Constraint: $2, Name: $4, Options: $5, Table: $7}
setDDL(yylex, $$)
}
@ -1321,9 +1342,9 @@ index_option_list:
}
index_option:
USING id_or_var
using_index_type
{
$$ = &IndexOption{Name: string($1), String: string($2.String())}
$$ = $1
}
| KEY_BLOCK_SIZE equal_opt INTEGRAL
{
@ -1334,9 +1355,9 @@ index_option:
{
$$ = &IndexOption{Name: string($1), Value: NewStrLiteral($2)}
}
| WITH PARSER STRING
| WITH PARSER id_or_var
{
$$ = &IndexOption{Name: string($1) + " " + string($2), Value: NewStrLiteral($3)}
$$ = &IndexOption{Name: string($1) + " " + string($2), String: $3.String()}
}
equal_opt:
@ -1620,7 +1641,7 @@ alter_statement:
}
| ALTER VSCHEMA CREATE VINDEX table_name vindex_type_opt vindex_params_opt
{
$$ = &DDL{
$$ = &AlterVschema{
Action: CreateVindexDDLAction,
Table: $5,
VindexSpec: &VindexSpec{
@ -1632,7 +1653,7 @@ alter_statement:
}
| ALTER VSCHEMA DROP VINDEX table_name
{
$$ = &DDL{
$$ = &AlterVschema{
Action: DropVindexDDLAction,
Table: $5,
VindexSpec: &VindexSpec{
@ -1642,15 +1663,15 @@ alter_statement:
}
| ALTER VSCHEMA ADD TABLE table_name
{
$$ = &DDL{Action: AddVschemaTableDDLAction, Table: $5}
$$ = &AlterVschema{Action: AddVschemaTableDDLAction, Table: $5}
}
| ALTER VSCHEMA DROP TABLE table_name
{
$$ = &DDL{Action: DropVschemaTableDDLAction, Table: $5}
$$ = &AlterVschema{Action: DropVschemaTableDDLAction, Table: $5}
}
| ALTER VSCHEMA ON table_name ADD VINDEX sql_id '(' column_list ')' vindex_type_opt vindex_params_opt
{
$$ = &DDL{
$$ = &AlterVschema{
Action: AddColVindexDDLAction,
Table: $4,
VindexSpec: &VindexSpec{
@ -1663,7 +1684,7 @@ alter_statement:
}
| ALTER VSCHEMA ON table_name DROP VINDEX sql_id
{
$$ = &DDL{
$$ = &AlterVschema{
Action: DropColVindexDDLAction,
Table: $4,
VindexSpec: &VindexSpec{
@ -1673,11 +1694,11 @@ alter_statement:
}
| ALTER VSCHEMA ADD SEQUENCE table_name
{
$$ = &DDL{Action: AddSequenceDDLAction, Table: $5}
$$ = &AlterVschema{Action: AddSequenceDDLAction, Table: $5}
}
| ALTER VSCHEMA ON table_name ADD AUTO_INCREMENT sql_id USING table_name
{
$$ = &DDL{
$$ = &AlterVschema{
Action: AddAutoIncDDLAction,
Table: $4,
AutoIncSpec: &AutoIncSpec{
@ -1782,25 +1803,82 @@ analyze_statement:
}
show_statement:
SHOW BINARY id_or_var ddl_skip_to_end /* SHOW BINARY LOGS */
SHOW CHARACTER SET like_or_where_opt
{
$$ = &Show{&ShowLegacy{Type: string($2) + " " + string($3.String()), Scope: ImplicitScope}}
}
/* SHOW CHARACTER SET and SHOW CHARSET are equivalent */
| SHOW CHARACTER SET like_or_where_opt
{
showTablesOpt := &ShowTablesOpt{Filter: $4}
$$ = &Show{&ShowLegacy{Type: CharsetStr, ShowTablesOpt: showTablesOpt, Scope: ImplicitScope}}
$$ = &Show{&ShowBasic{Command: Charset, Filter: $4}}
}
| SHOW CHARSET like_or_where_opt
{
showTablesOpt := &ShowTablesOpt{Filter: $3}
$$ = &Show{&ShowLegacy{Type: string($2), ShowTablesOpt: showTablesOpt, Scope: ImplicitScope}}
$$ = &Show{&ShowBasic{Command: Charset, Filter: $3}}
}
| SHOW COLLATION like_or_where_opt
{
$$ = &Show{&ShowBasic{Command: Collation, Filter: $3}}
}
| SHOW DATABASES like_or_where_opt
{
$$ = &Show{&ShowBasic{Command: Database, Filter: $3}}
}
| SHOW SCHEMAS like_or_where_opt
{
$$ = &Show{&ShowBasic{Command: Database, Filter: $3}}
}
| SHOW KEYSPACES like_or_where_opt
{
$$ = &Show{&ShowBasic{Command: Database, Filter: $3}}
}
| SHOW VITESS_KEYSPACES like_or_where_opt
{
$$ = &Show{&ShowBasic{Command: Database, Filter: $3}}
}
| SHOW FUNCTION STATUS like_or_where_opt
{
$$ = &Show{&ShowBasic{Command: Function, Filter: $4}}
}
| SHOW PRIVILEGES
{
$$ = &Show{&ShowBasic{Command: Privilege}}
}
| SHOW PROCEDURE STATUS like_or_where_opt
{
$$ = &Show{&ShowBasic{Command: Procedure, Filter: $4}}
}
| SHOW session_or_local_opt STATUS like_or_where_opt
{
$$ = &Show{&ShowBasic{Command: StatusSession, Filter: $4}}
}
| SHOW GLOBAL STATUS like_or_where_opt
{
$$ = &Show{&ShowBasic{Command: StatusGlobal, Filter: $4}}
}
| SHOW session_or_local_opt VARIABLES like_or_where_opt
{
$$ = &Show{&ShowBasic{Command: VariableSession, Filter: $4}}
}
| SHOW GLOBAL VARIABLES like_or_where_opt
{
$$ = &Show{&ShowBasic{Command: VariableGlobal, Filter: $4}}
}
| SHOW TABLE STATUS from_database_opt like_or_where_opt
{
$$ = &Show{&ShowTableStatus{DatabaseName:$4, Filter:$5}}
}
| SHOW full_opt columns_or_fields from_or_in table_name from_database_opt like_or_where_opt
{
$$ = &Show{&ShowColumns{Full: $2, Table: $5, DbName: $6, Filter: $7}}
}
| SHOW BINARY id_or_var ddl_skip_to_end /* SHOW BINARY LOGS */
{
$$ = &Show{&ShowLegacy{Type: string($2) + " " + string($3.String()), Scope: ImplicitScope}}
}
| SHOW CREATE DATABASE ddl_skip_to_end
{
$$ = &Show{&ShowLegacy{Type: string($2) + " " + string($3), Scope: ImplicitScope}}
}
| SHOW CREATE FUNCTION table_name
{
$$ = &Show{&ShowLegacy{Type: string($2) + " " + string($3), Table: $4, Scope: ImplicitScope}}
}
/* Rule to handle SHOW CREATE EVENT, SHOW CREATE FUNCTION, etc. */
| SHOW CREATE id_or_var ddl_skip_to_end
{
@ -1822,30 +1900,14 @@ show_statement:
{
$$ = &Show{&ShowLegacy{Type: string($2) + " " + string($3), Scope: ImplicitScope}}
}
| SHOW DATABASES like_opt
{
showTablesOpt := &ShowTablesOpt{Filter: $3}
$$ = &Show{&ShowLegacy{Type: string($2), ShowTablesOpt: showTablesOpt, Scope: ImplicitScope}}
}
| SHOW SCHEMAS like_opt
{
showTablesOpt := &ShowTablesOpt{Filter: $3}
$$ = &Show{&ShowLegacy{Type: string($2), ShowTablesOpt: showTablesOpt, Scope: ImplicitScope}}
}
| SHOW KEYSPACES like_opt
{
showTablesOpt := &ShowTablesOpt{Filter: $3}
$$ = &Show{&ShowLegacy{Type: string($2), ShowTablesOpt: showTablesOpt, Scope: ImplicitScope}}
}
| SHOW VITESS_KEYSPACES like_opt
{
showTablesOpt := &ShowTablesOpt{Filter: $3}
$$ = &Show{&ShowLegacy{Type: string($2), ShowTablesOpt: showTablesOpt, Scope: ImplicitScope}}
}
| SHOW ENGINES
{
$$ = &Show{&ShowLegacy{Type: string($2), Scope: ImplicitScope}}
}
| SHOW FUNCTION CODE table_name
{
$$ = &Show{&ShowLegacy{Type: string($2) + " " + string($3), Table: $4, Scope: ImplicitScope}}
}
| SHOW extended_opt index_symbols from_or_in table_name from_database_opt like_or_where_opt
{
showTablesOpt := &ShowTablesOpt{DbName:$6, Filter:$7}
@ -1855,21 +1917,9 @@ show_statement:
{
$$ = &Show{&ShowLegacy{Type: string($2), Scope: ImplicitScope}}
}
| SHOW PROCEDURE ddl_skip_to_end
| SHOW PROCEDURE CODE table_name
{
$$ = &Show{&ShowLegacy{Type: string($2), Scope: ImplicitScope}}
}
| SHOW show_session_or_global STATUS ddl_skip_to_end
{
$$ = &Show{&ShowLegacy{Scope: $2, Type: string($3)}}
}
| SHOW TABLE STATUS from_database_opt like_or_where_opt
{
$$ = &Show{&ShowTableStatus{DatabaseName:$4, Filter:$5}}
}
| SHOW full_opt columns_or_fields from_or_in table_name from_database_opt like_or_where_opt
{
$$ = &Show{&ShowColumns{Full: $2, Table: $5, DbName: $6, Filter: $7}}
$$ = &Show{&ShowLegacy{Type: string($2) + " " + string($3), Table: $4, Scope: ImplicitScope}}
}
| SHOW full_opt tables_or_processlist from_database_opt like_or_where_opt
{
@ -1881,18 +1931,6 @@ show_statement:
$$ = &Show{&ShowLegacy{Type: $3, ShowTablesOpt: showTablesOpt, Scope: ImplicitScope}}
}
}
| SHOW show_session_or_global VARIABLES ddl_skip_to_end
{
$$ = &Show{&ShowLegacy{Scope: $2, Type: string($3)}}
}
| SHOW COLLATION
{
$$ = &Show{&ShowLegacy{Type: string($2), Scope: ImplicitScope}}
}
| SHOW COLLATION WHERE expression
{
$$ = &Show{&ShowLegacy{Type: string($2), ShowCollationFilterOpt: $4, Scope: ImplicitScope}}
}
| SHOW VITESS_METADATA VARIABLES like_opt
{
showTablesOpt := &ShowTablesOpt{Filter: $4}
@ -2022,18 +2060,18 @@ like_opt:
$$ = &ShowFilter{Like:string($2)}
}
show_session_or_global:
session_or_local_opt:
/* empty */
{
$$ = ImplicitScope
$$ = struct{}{}
}
| SESSION
{
$$ = SessionScope
$$ = struct{}{}
}
| GLOBAL
| LOCAL
{
$$ = GlobalScope
$$ = struct{}{}
}
use_statement:
@ -2184,13 +2222,51 @@ other_statement:
{
$$ = &OtherAdmin{}
}
| LOCK TABLES skip_to_end
lock_statement:
LOCK TABLES lock_table_list
{
$$ = &OtherAdmin{}
$$ = &LockTables{Tables: $3}
}
| UNLOCK TABLES skip_to_end
lock_table_list:
lock_table
{
$$ = &OtherAdmin{}
$$ = TableAndLockTypes{$1}
}
| lock_table_list ',' lock_table
{
$$ = append($1, $3)
}
lock_table:
aliased_table_name lock_type
{
$$ = &TableAndLockType{Table:$1, Lock:$2}
}
lock_type:
READ
{
$$ = Read
}
| READ LOCAL
{
$$ = ReadLocal
}
| WRITE
{
$$ = Write
}
| LOW_PRIORITY WRITE
{
$$ = LowPriorityWrite
}
unlock_statement:
UNLOCK TABLES
{
$$ = &UnlockTables{}
}
flush_statement:
@ -2414,6 +2490,15 @@ table_name as_opt_id index_hint_list
$$ = &AliasedTableExpr{Expr:$1, Partitions: $4, As: $6, Hints: $7}
}
column_list_opt:
{
$$ = nil
}
| '(' column_list ')'
{
$$ = $2
}
column_list:
sql_id
{
@ -3474,6 +3559,91 @@ algorithm_index:
$$ = &IndexOption{Name: string($1), String: string($3)}
}
algorithm_view:
{
$$ = ""
}
| ALGORITHM '=' UNDEFINED
{
$$ = string($3)
}
| ALGORITHM '=' MERGE
{
$$ = string($3)
}
| ALGORITHM '=' TEMPTABLE
{
$$ = string($3)
}
security_view_opt:
{
$$ = ""
}
| SQL SECURITY security_view
{
$$ = $3
}
security_view:
DEFINER
{
$$ = string($1)
}
| INVOKER
{
$$ = string($1)
}
check_option_opt:
{
$$ = ""
}
| WITH cascade_or_local_opt CHECK OPTION
{
$$ = $2
}
cascade_or_local_opt:
{
$$ = "cascaded"
}
| CASCADED
{
$$ = string($1)
}
| LOCAL
{
$$ = string($1)
}
definer_opt:
{
$$ = ""
}
| DEFINER '=' user
{
$$ = $3
}
user:
CURRENT_USER
{
$$ = string($1)
}
| CURRENT_USER '(' ')'
{
$$ = string($1)
}
| STRING AT_ID
{
$$ = "'" + string($1) + "'@" + string($2)
}
| ID
{
$$ = string($1)
}
lock_opt:
{
$$ = NoLock
@ -3849,9 +4019,15 @@ constraint_opt:
{ $$ = string($1) }
using_opt:
{ $$ = "" }
| USING sql_id
{ $$ = $2.val }
{ $$ = nil }
| using_index_type
{ $$ = []*IndexOption{$1} }
using_index_type:
USING sql_id
{
$$ = &IndexOption{Name: string($1), String: string($2.String())}
}
sql_id:
id_or_var
@ -3886,7 +4062,6 @@ reserved_table_id:
{
$$ = NewTableIdent(string($1))
}
/*
These are not all necessarily reserved in MySQL, but some are.
@ -3902,7 +4077,6 @@ reserved_keyword:
| AND
| AS
| ASC
| AUTO_INCREMENT
| BETWEEN
| BINARY
| BY
@ -3915,6 +4089,7 @@ reserved_keyword:
| CURRENT_DATE
| CURRENT_TIME
| CURRENT_TIMESTAMP
| CURRENT_USER
| SUBSTR
| SUBSTRING
| DATABASE
@ -3964,6 +4139,7 @@ reserved_keyword:
| LOCALTIME
| LOCALTIMESTAMP
| LOCK
| LOW_PRIORITY
| MEMBER
| MATCH
| MAXVALUE
@ -3984,6 +4160,7 @@ reserved_keyword:
| OVER
| PERCENT_RANK
| RANK
| READ
| RECURSIVE
| REGEXP
| RENAME
@ -4014,9 +4191,11 @@ reserved_keyword:
| UTC_TIME
| UTC_TIMESTAMP
| VALUES
| WITH
| WHEN
| WHERE
| WINDOW
| WRITE
| XOR
/*
@ -4032,6 +4211,7 @@ non_reserved_keyword:
| ACTIVE
| ADMIN
| ALGORITHM
| AUTO_INCREMENT
| BEGIN
| BIGINT
| BIT
@ -4040,11 +4220,13 @@ non_reserved_keyword:
| BOOLEAN
| BUCKETS
| CASCADE
| CASCADED
| CHAR
| CHARACTER
| CHARSET
| CHECK
| CLONE
| CODE
| COLLATION
| COLUMNS
| COMMENT_KEYWORD
@ -4057,6 +4239,7 @@ non_reserved_keyword:
| DATE
| DATETIME
| DECIMAL
| DEFINER
| DEFINITION
| DESCRIPTION
| DIRECTORY
@ -4079,6 +4262,7 @@ non_reserved_keyword:
| FOREIGN
| FORMAT
| FULLTEXT
| FUNCTION
| GEOMCOLLECTION
| GEOMETRY
| GEOMETRYCOLLECTION
@ -4092,6 +4276,7 @@ non_reserved_keyword:
| INT
| INTEGER
| INVISIBLE
| INVOKER
| INDEXES
| ISOLATION
| JSON
@ -4105,6 +4290,7 @@ non_reserved_keyword:
| LINES
| LINESTRING
| LOAD
| LOCAL
| LOCKED
| LONGBLOB
| LONGTEXT
@ -4116,6 +4302,7 @@ non_reserved_keyword:
| MEDIUMBLOB
| MEDIUMINT
| MEDIUMTEXT
| MERGE
| MODE
| MULTILINESTRING
| MULTIPOINT
@ -4133,6 +4320,7 @@ non_reserved_keyword:
| OFFSET
| OJ
| OLD
| OPTION
| OPTIONAL
| OPTIONALLY
| ORDINALITY
@ -4148,6 +4336,7 @@ non_reserved_keyword:
| PERSIST_ONLY
| PRECEDING
| PRIVILEGE_CHECKS_USER
| PRIVILEGES
| PROCESS
| PLUGINS
| POINT
@ -4157,7 +4346,6 @@ non_reserved_keyword:
| PROCESSLIST
| QUERY
| RANDOM
| READ
| REAL
| REFERENCE
| REFERENCES
@ -4178,6 +4366,7 @@ non_reserved_keyword:
| SECONDARY_ENGINE
| SECONDARY_LOAD
| SECONDARY_UNLOAD
| SECURITY
| SEQUENCE
| SESSION
| SERIALIZABLE
@ -4187,11 +4376,13 @@ non_reserved_keyword:
| SKIP
| SMALLINT
| SPATIAL
| SQL
| SRID
| START
| STARTING
| STATUS
| TABLES
| TEMPTABLE
| TERMINATED
| TEXT
| THAN
@ -4208,6 +4399,7 @@ non_reserved_keyword:
| TRUNCATE
| UNBOUNDED
| UNCOMMITTED
| UNDEFINED
| UNSIGNED
| UNUSED
| UPGRADE
@ -4226,8 +4418,6 @@ non_reserved_keyword:
| VITESS_TABLETS
| VSCHEMA
| WARNINGS
| WITH
| WRITE
| YEAR
| ZEROFILL

Просмотреть файл

@ -114,6 +114,7 @@ var keywords = map[string]int{
"by": BY,
"call": UNUSED,
"cascade": CASCADE,
"cascaded": CASCADED,
"case": CASE,
"cast": CAST,
"change": UNUSED,
@ -121,6 +122,7 @@ var keywords = map[string]int{
"character": CHARACTER,
"charset": CHARSET,
"check": CHECK,
"code": CODE,
"collate": COLLATE,
"collation": COLLATION,
"column": COLUMN,
@ -141,7 +143,7 @@ var keywords = map[string]int{
"current_date": CURRENT_DATE,
"current_time": CURRENT_TIME,
"current_timestamp": CURRENT_TIMESTAMP,
"current_user": UNUSED,
"current_user": CURRENT_USER,
"cursor": UNUSED,
"data": DATA,
"database": DATABASE,
@ -156,6 +158,7 @@ var keywords = map[string]int{
"decimal": DECIMAL,
"declare": UNUSED,
"default": DEFAULT,
"definer": DEFINER,
"delayed": UNUSED,
"delete": DELETE,
"desc": DESC,
@ -199,6 +202,7 @@ var keywords = map[string]int{
"from": FROM,
"full": FULL,
"fulltext": FULLTEXT,
"function": FUNCTION,
"generated": UNUSED,
"geometry": GEOMETRY,
"geometrycollection": GEOMETRYCOLLECTION,
@ -237,6 +241,7 @@ var keywords = map[string]int{
"is": IS,
"isolation": ISOLATION,
"iterate": UNUSED,
"invoker": INVOKER,
"join": JOIN,
"json": JSON,
"key": KEY,
@ -257,6 +262,7 @@ var keywords = map[string]int{
"lines": LINES,
"linestring": LINESTRING,
"load": LOAD,
"local": LOCAL,
"localtime": LOCALTIME,
"localtimestamp": LOCALTIMESTAMP,
"lock": LOCK,
@ -264,7 +270,7 @@ var keywords = map[string]int{
"longblob": LONGBLOB,
"longtext": LONGTEXT,
"loop": UNUSED,
"low_priority": UNUSED,
"low_priority": LOW_PRIORITY,
"manifest": MANIFEST,
"master_bind": UNUSED,
"match": MATCH,
@ -272,6 +278,7 @@ var keywords = map[string]int{
"mediumblob": MEDIUMBLOB,
"mediumint": MEDIUMINT,
"mediumtext": MEDIUMTEXT,
"merge": MERGE,
"middleint": UNUSED,
"minute_microsecond": UNUSED,
"minute_second": UNUSED,
@ -298,7 +305,7 @@ var keywords = map[string]int{
"only": ONLY,
"optimize": OPTIMIZE,
"optimizer_costs": UNUSED,
"option": UNUSED,
"option": OPTION,
"optionally": OPTIONALLY,
"or": OR,
"order": ORDER,
@ -313,6 +320,7 @@ var keywords = map[string]int{
"polygon": POLYGON,
"precision": UNUSED,
"primary": PRIMARY,
"privileges": PRIVILEGES,
"processlist": PROCESSLIST,
"procedure": PROCEDURE,
"query": QUERY,
@ -343,6 +351,7 @@ var keywords = map[string]int{
"schema": SCHEMA,
"schemas": SCHEMAS,
"second_microsecond": UNUSED,
"security": SECURITY,
"select": SELECT,
"sensitive": UNUSED,
"separator": SEPARATOR,
@ -358,7 +367,7 @@ var keywords = map[string]int{
"smallint": SMALLINT,
"spatial": SPATIAL,
"specific": UNUSED,
"sql": UNUSED,
"sql": SQL,
"sqlexception": UNUSED,
"sqlstate": UNUSED,
"sqlwarning": UNUSED,
@ -377,6 +386,7 @@ var keywords = map[string]int{
"vstream": VSTREAM,
"table": TABLE,
"tables": TABLES,
"temptable": TEMPTABLE,
"terminated": TERMINATED,
"text": TEXT,
"than": THAN,
@ -397,6 +407,7 @@ var keywords = map[string]int{
"true": TRUE,
"truncate": TRUNCATE,
"uncommitted": UNCOMMITTED,
"undefined": UNDEFINED,
"undo": UNUSED,
"union": UNION,
"unique": UNIQUE,
@ -709,6 +720,9 @@ func (tkn *Tokenizer) scanIdentifier(firstByte byte, isVariable bool) (int, []by
isDigit(tkn.lastChar) ||
tkn.lastChar == '@' ||
(isVariable && isCarat(tkn.lastChar)) {
if tkn.lastChar == '@' {
isVariable = true
}
buffer.WriteByte(byte(tkn.lastChar))
tkn.next()
}

Просмотреть файл

@ -29,7 +29,7 @@ import (
// ApplyVSchemaDDL applies the given DDL statement to the vschema
// keyspace definition and returns the modified keyspace object.
func ApplyVSchemaDDL(ksName string, ks *vschemapb.Keyspace, ddl sqlparser.DDLStatement) (*vschemapb.Keyspace, error) {
func ApplyVSchemaDDL(ksName string, ks *vschemapb.Keyspace, alterVschema *sqlparser.AlterVschema) (*vschemapb.Keyspace, error) {
if ks == nil {
ks = new(vschemapb.Keyspace)
}
@ -44,14 +44,14 @@ func ApplyVSchemaDDL(ksName string, ks *vschemapb.Keyspace, ddl sqlparser.DDLSta
var tableName string
var table *vschemapb.Table
if !ddl.GetTable().IsEmpty() {
tableName = ddl.GetTable().Name.String()
if !alterVschema.Table.IsEmpty() {
tableName = alterVschema.Table.Name.String()
table = ks.Tables[tableName]
}
switch ddl.GetAction() {
switch alterVschema.Action {
case sqlparser.CreateVindexDDLAction:
name := ddl.GetVindexSpec().Name.String()
name := alterVschema.VindexSpec.Name.String()
if _, ok := ks.Vindexes[name]; ok {
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vindex %s already exists in keyspace %s", name, ksName)
}
@ -62,9 +62,9 @@ func ApplyVSchemaDDL(ksName string, ks *vschemapb.Keyspace, ddl sqlparser.DDLSta
ks.Sharded = true
}
owner, params := ddl.GetVindexSpec().ParseParams()
owner, params := alterVschema.VindexSpec.ParseParams()
ks.Vindexes[name] = &vschemapb.Vindex{
Type: ddl.GetVindexSpec().Type.String(),
Type: alterVschema.VindexSpec.Type.String(),
Params: params,
Owner: owner,
}
@ -72,7 +72,7 @@ func ApplyVSchemaDDL(ksName string, ks *vschemapb.Keyspace, ddl sqlparser.DDLSta
return ks, nil
case sqlparser.DropVindexDDLAction:
name := ddl.GetVindexSpec().Name.String()
name := alterVschema.VindexSpec.Name.String()
if _, ok := ks.Vindexes[name]; !ok {
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vindex %s does not exists in keyspace %s", name, ksName)
}
@ -95,7 +95,7 @@ func ApplyVSchemaDDL(ksName string, ks *vschemapb.Keyspace, ddl sqlparser.DDLSta
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "add vschema table: unsupported on sharded keyspace %s", ksName)
}
name := ddl.GetTable().Name.String()
name := alterVschema.Table.Name.String()
if _, ok := ks.Tables[name]; ok {
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vschema already contains table %s in keyspace %s", name, ksName)
}
@ -105,7 +105,7 @@ func ApplyVSchemaDDL(ksName string, ks *vschemapb.Keyspace, ddl sqlparser.DDLSta
return ks, nil
case sqlparser.DropVschemaTableDDLAction:
name := ddl.GetTable().Name.String()
name := alterVschema.Table.Name.String()
if _, ok := ks.Tables[name]; !ok {
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vschema does not contain table %s in keyspace %s", name, ksName)
}
@ -123,7 +123,7 @@ func ApplyVSchemaDDL(ksName string, ks *vschemapb.Keyspace, ddl sqlparser.DDLSta
//
// 2. The vindex type is not specified. Make sure the vindex
// already exists.
spec := ddl.GetVindexSpec()
spec := alterVschema.VindexSpec
name := spec.Name.String()
if !spec.Type.IsEmpty() {
owner, params := spec.ParseParams()
@ -171,8 +171,8 @@ func ApplyVSchemaDDL(ksName string, ks *vschemapb.Keyspace, ddl sqlparser.DDLSta
}
}
columns := make([]string, len(ddl.GetVindexCols()))
for i, col := range ddl.GetVindexCols() {
columns := make([]string, len(alterVschema.VindexCols))
for i, col := range alterVschema.VindexCols {
columns[i] = col.String()
}
table.ColumnVindexes = append(table.ColumnVindexes, &vschemapb.ColumnVindex{
@ -184,7 +184,7 @@ func ApplyVSchemaDDL(ksName string, ks *vschemapb.Keyspace, ddl sqlparser.DDLSta
return ks, nil
case sqlparser.DropColVindexDDLAction:
spec := ddl.GetVindexSpec()
spec := alterVschema.VindexSpec
name := spec.Name.String()
if table == nil {
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "table %s.%s not defined in vschema", ksName, tableName)
@ -206,7 +206,7 @@ func ApplyVSchemaDDL(ksName string, ks *vschemapb.Keyspace, ddl sqlparser.DDLSta
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "add sequence table: unsupported on sharded keyspace %s", ksName)
}
name := ddl.GetTable().Name.String()
name := alterVschema.Table.Name.String()
if _, ok := ks.Tables[name]; ok {
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vschema already contains sequence %s in keyspace %s", name, ksName)
}
@ -216,7 +216,7 @@ func ApplyVSchemaDDL(ksName string, ks *vschemapb.Keyspace, ddl sqlparser.DDLSta
return ks, nil
case sqlparser.AddAutoIncDDLAction:
name := ddl.GetTable().Name.String()
name := alterVschema.Table.Name.String()
table := ks.Tables[name]
if table == nil {
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vschema does not contain table %s in keyspace %s", name, ksName)
@ -226,19 +226,19 @@ func ApplyVSchemaDDL(ksName string, ks *vschemapb.Keyspace, ddl sqlparser.DDLSta
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vschema already contains auto inc %v on table %s in keyspace %s", table.AutoIncrement, name, ksName)
}
sequence := ddl.GetAutoIncSpec().Sequence
sequence := alterVschema.AutoIncSpec.Sequence
sequenceFqn := sequence.Name.String()
if sequence.Qualifier.String() != "" {
sequenceFqn = fmt.Sprintf("%s.%s", sequence.Qualifier.String(), sequenceFqn)
}
table.AutoIncrement = &vschemapb.AutoIncrement{
Column: ddl.GetAutoIncSpec().Column.String(),
Column: alterVschema.AutoIncSpec.Column.String(),
Sequence: sequenceFqn,
}
return ks, nil
}
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected vindex ddl operation %s", ddl.GetAction().ToString())
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected vindex ddl operation %s", alterVschema.Action.ToString())
}

Просмотреть файл

@ -93,7 +93,7 @@ func (client *gRPCVtctldClient) FindAllShardsInKeyspace(ctx context.Context, in
return client.c.FindAllShardsInKeyspace(ctx, in, opts...)
}
func (client *gRPCVtctldClient) GetKeyspace(ctx context.Context, in *vtctldatapb.GetKeyspaceRequest, opts ...grpc.CallOption) (*vtctldatapb.Keyspace, error) {
func (client *gRPCVtctldClient) GetKeyspace(ctx context.Context, in *vtctldatapb.GetKeyspaceRequest, opts ...grpc.CallOption) (*vtctldatapb.GetKeyspaceResponse, error) {
if client.c == nil {
return nil, status.Error(codes.Unavailable, connClosedMsg)
}

Просмотреть файл

@ -30,6 +30,7 @@ import (
"vitess.io/vitess/go/vt/vtctl/vtctldclient"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/proto/vtctldata"
vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice"
)
@ -114,15 +115,17 @@ func TestGetKeyspace(t *testing.T) {
vtctld := grpcvtctldserver.NewVtctldServer(ts)
withTestServer(t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) {
expected := &vtctldatapb.Keyspace{
Name: "testkeyspace",
Keyspace: &topodatapb.Keyspace{
ShardingColumnName: "col1",
expected := &vtctldatapb.GetKeyspaceResponse{
Keyspace: &vtctldata.Keyspace{
Name: "testkeyspace",
Keyspace: &topodatapb.Keyspace{
ShardingColumnName: "col1",
},
},
}
addKeyspace(ctx, t, ts, expected)
addKeyspace(ctx, t, ts, expected.Keyspace)
resp, err := client.GetKeyspace(ctx, &vtctldatapb.GetKeyspaceRequest{Keyspace: expected.Name})
resp, err := client.GetKeyspace(ctx, &vtctldatapb.GetKeyspaceRequest{Keyspace: expected.Keyspace.Name})
assert.NoError(t, err)
assert.Equal(t, expected, resp)

Просмотреть файл

@ -58,15 +58,17 @@ func (s *VtctldServer) FindAllShardsInKeyspace(ctx context.Context, req *vtctlda
}
// GetKeyspace is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) GetKeyspace(ctx context.Context, req *vtctldatapb.GetKeyspaceRequest) (*vtctldatapb.Keyspace, error) {
func (s *VtctldServer) GetKeyspace(ctx context.Context, req *vtctldatapb.GetKeyspaceRequest) (*vtctldatapb.GetKeyspaceResponse, error) {
keyspace, err := s.ts.GetKeyspace(ctx, req.Keyspace)
if err != nil {
return nil, err
}
return &vtctldatapb.Keyspace{
Name: req.Keyspace,
Keyspace: keyspace.Keyspace,
return &vtctldatapb.GetKeyspaceResponse{
Keyspace: &vtctldatapb.Keyspace{
Name: req.Keyspace,
Keyspace: keyspace.Keyspace,
},
}, nil
}
@ -85,7 +87,7 @@ func (s *VtctldServer) GetKeyspaces(ctx context.Context, req *vtctldatapb.GetKey
return nil, err
}
keyspaces[i] = ks
keyspaces[i] = ks.Keyspace
}
return &vtctldatapb.GetKeyspacesResponse{Keyspaces: keyspaces}, nil

Просмотреть файл

@ -74,15 +74,17 @@ func TestGetKeyspace(t *testing.T) {
ts := memorytopo.NewServer("cell1")
vtctld := NewVtctldServer(ts)
expected := &vtctldatapb.Keyspace{
Name: "testkeyspace",
Keyspace: &topodatapb.Keyspace{
ShardingColumnName: "col1",
expected := &vtctldatapb.GetKeyspaceResponse{
Keyspace: &vtctldatapb.Keyspace{
Name: "testkeyspace",
Keyspace: &topodatapb.Keyspace{
ShardingColumnName: "col1",
},
},
}
addKeyspace(ctx, t, ts, expected)
addKeyspace(ctx, t, ts, expected.Keyspace)
ks, err := vtctld.GetKeyspace(ctx, &vtctldatapb.GetKeyspaceRequest{Keyspace: expected.Name})
ks, err := vtctld.GetKeyspace(ctx, &vtctldatapb.GetKeyspaceRequest{Keyspace: expected.Keyspace.Name})
assert.NoError(t, err)
assert.Equal(t, expected, ks)

Просмотреть файл

@ -1773,7 +1773,7 @@ func commandGetKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl
return err
}
// Pass the embedded proto directly or jsonpb will panic.
return printJSON(wr.Logger(), keyspaceInfo.Keyspace)
return printJSON(wr.Logger(), keyspaceInfo.Keyspace.Keyspace)
}
func commandGetKeyspaces(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error {
@ -2529,7 +2529,7 @@ func commandOnlineDDL(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag
condition = fmt.Sprintf("migration_uuid='%s'", uuid)
}
query = fmt.Sprintf(`select
shard, mysql_schema, mysql_table, migration_uuid, strategy, started_timestamp, completed_timestamp, migration_status
shard, mysql_schema, mysql_table, ddl_action, migration_uuid, strategy, started_timestamp, completed_timestamp, migration_status
from _vt.schema_migrations where %s`, condition)
}
case "retry":
@ -2771,7 +2771,7 @@ func commandApplyVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *f
if err != nil {
return fmt.Errorf("error parsing vschema statement `%s`: %v", *sql, err)
}
ddl, ok := stmt.(sqlparser.DDLStatement)
ddl, ok := stmt.(*sqlparser.AlterVschema)
if !ok {
return fmt.Errorf("error parsing vschema statement `%s`: not a ddl statement", *sql)
}

Просмотреть файл

@ -615,6 +615,7 @@ func initAPI(ctx context.Context, ts *topo.Server, actions *ActionRepository, re
req := struct {
Keyspace, SQL string
ReplicaTimeoutSeconds int
DDLStrategy string `json:"ddl_strategy,omitempty"`
}{}
if err := unmarshalRequest(r, &req); err != nil {
return fmt.Errorf("can't unmarshal request: %v", err)
@ -635,6 +636,10 @@ func initAPI(ctx context.Context, ts *topo.Server, actions *ActionRepository, re
requestContext := fmt.Sprintf("vtctld/api:%s", apiCallUUID)
executor := schemamanager.NewTabletExecutor(requestContext, wr, time.Duration(req.ReplicaTimeoutSeconds)*time.Second)
if err := executor.SetDDLStrategy(req.DDLStrategy); err != nil {
return fmt.Errorf("error setting DDL strategy: %v", err)
}
return schemamanager.Run(ctx,
schemamanager.NewUIController(req.SQL, req.Keyspace, w), executor)
})

Просмотреть файл

@ -78,6 +78,10 @@ func reviewMigrationRequest(ctx context.Context, ts *topo.Server, tmClient tmcli
if err != nil {
return err
}
actionStr, err := onlineDDL.GetActionStr()
if err != nil {
return err
}
log.Infof("Found schema migration request: %+v", onlineDDL)
onlineDDL.Status = schema.OnlineDDLStatusQueued
@ -94,11 +98,12 @@ func reviewMigrationRequest(ctx context.Context, ts *topo.Server, tmClient tmcli
migration_statement,
strategy,
options,
ddl_action,
requested_timestamp,
migration_context,
migration_status
) VALUES (
%a, %a, %a, %a, %a, %a, %a, %a, FROM_UNIXTIME(%a), %a, %a
%a, %a, %a, %a, %a, %a, %a, %a, %a, FROM_UNIXTIME(%a), %a, %a
)`
parsed := sqlparser.BuildParsedQuery(sqlInsertSchemaMigration, "_vt",
":migration_uuid",
@ -109,6 +114,7 @@ func reviewMigrationRequest(ctx context.Context, ts *topo.Server, tmClient tmcli
":migration_statement",
":strategy",
":options",
":ddl_action",
":requested_timestamp",
":migration_context",
":migration_status",
@ -122,6 +128,7 @@ func reviewMigrationRequest(ctx context.Context, ts *topo.Server, tmClient tmcli
"migration_statement": sqltypes.StringBindVariable(onlineDDL.SQL),
"strategy": sqltypes.StringBindVariable(string(onlineDDL.Strategy)),
"options": sqltypes.StringBindVariable(onlineDDL.Options),
"ddl_action": sqltypes.StringBindVariable(actionStr),
"requested_timestamp": sqltypes.Int64BindVariable(onlineDDL.RequestTimeSeconds()),
"migration_context": sqltypes.StringBindVariable(onlineDDL.RequestContext),
"migration_status": sqltypes.StringBindVariable(string(onlineDDL.Status)),

Просмотреть файл

@ -371,7 +371,12 @@ func initTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options) error {
showTableRows := make([][]sqltypes.Value, 0, 4)
for _, ddl := range ddls {
table := ddl.GetTable().Name.String()
showTableRows = append(showTableRows, mysql.BaseShowTablesRow(table, false, ""))
options := ""
spec := ddl.GetTableSpec()
if spec != nil && strings.Contains(spec.Options, "vitess_sequence") {
options = "vitess_sequence"
}
showTableRows = append(showTableRows, mysql.BaseShowTablesRow(table, false, options))
}
schemaQueries[mysql.BaseShowTables] = &sqltypes.Result{
Fields: mysql.BaseShowTablesFields,

Просмотреть файл

@ -20,6 +20,8 @@ import (
"encoding/json"
"testing"
"vitess.io/vitess/go/vt/vttablet/tabletserver/schema"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
@ -43,6 +45,13 @@ create table t4 like t3;
create table t5 (like t2);
create table t1_seq(
id int,
next_id bigint,
cache bigint,
primary key(id)
) comment 'vitess_sequence';
create table test_partitioned (
id bigint,
date_create int,
@ -114,6 +123,11 @@ create table test_partitioned (
if t5.HasPrimary() || len(t5.PKColumns) != 0 {
t.Errorf("expected !HasPrimary && t5.PKColumns == [] got %v", t5.PKColumns)
}
seq := tables["t1_seq"]
if seq.Type != schema.Sequence {
t.Errorf("expected t1_seq to be a sequence table but is type %v", seq.Type)
}
}
func TestErrParseSchema(t *testing.T) {

Просмотреть файл

@ -116,7 +116,7 @@ func (t noopVCursor) ShardSession() []*srvtopo.ResolvedShard {
panic("implement me")
}
func (t noopVCursor) ExecuteVSchema(keyspace string, vschemaDDL sqlparser.DDLStatement) error {
func (t noopVCursor) ExecuteVSchema(keyspace string, vschemaDDL *sqlparser.AlterVschema) error {
panic("implement me")
}
@ -281,7 +281,7 @@ func (f *loggingVCursor) ShardSession() []*srvtopo.ResolvedShard {
return nil
}
func (f *loggingVCursor) ExecuteVSchema(string, sqlparser.DDLStatement) error {
func (f *loggingVCursor) ExecuteVSchema(string, *sqlparser.AlterVschema) error {
panic("implement me")
}

Просмотреть файл

@ -84,7 +84,7 @@ type (
// Will replace all of the Topo functions.
ResolveDestinations(keyspace string, ids []*querypb.Value, destinations []key.Destination) ([]*srvtopo.ResolvedShard, [][]*querypb.Value, error)
ExecuteVSchema(keyspace string, vschemaDDL sqlparser.DDLStatement) error
ExecuteVSchema(keyspace string, vschemaDDL *sqlparser.AlterVschema) error
SubmitOnlineDDL(onlineDDl *schema.OnlineDDL) error

Просмотреть файл

@ -31,7 +31,7 @@ var _ Primitive = (*AlterVSchema)(nil)
type AlterVSchema struct {
Keyspace *vindexes.Keyspace
DDL sqlparser.DDLStatement
AlterVschemaDDL *sqlparser.AlterVschema
noTxNeeded
@ -43,7 +43,7 @@ func (v *AlterVSchema) description() PrimitiveDescription {
OperatorType: "AlterVSchema",
Keyspace: v.Keyspace,
Other: map[string]interface{}{
"query": sqlparser.String(v.DDL),
"query": sqlparser.String(v.AlterVschemaDDL),
},
}
}
@ -60,12 +60,12 @@ func (v *AlterVSchema) GetKeyspaceName() string {
//GetTableName implements the Primitive interface
func (v *AlterVSchema) GetTableName() string {
return v.DDL.GetTable().Name.String()
return v.AlterVschemaDDL.Table.Name.String()
}
//Execute implements the Primitive interface
func (v *AlterVSchema) Execute(vcursor VCursor, bindVars map[string]*query.BindVariable, wantfields bool) (*sqltypes.Result, error) {
err := vcursor.ExecuteVSchema(v.Keyspace.Name, v.DDL)
err := vcursor.ExecuteVSchema(v.Keyspace.Name, v.AlterVschemaDDL)
if err != nil {
return nil, err
}

Просмотреть файл

@ -327,3 +327,15 @@ func evaluateByType(val *querypb.BindVariable) (EvalResult, error) {
func (e *EvalResult) debugString() string {
return fmt.Sprintf("(%s) %d %d %f %s", querypb.Type_name[int32(e.typ)], e.ival, e.uval, e.fval, string(e.bytes))
}
// AreExprEqual checks if the provided Expr are the same or not
func AreExprEqual(expr1 Expr, expr2 Expr) bool {
// Check the types of the two expressions, if they don't match then the two are not equal
if fmt.Sprintf("%T", expr1) != fmt.Sprintf("%T", expr2) {
return false
}
if expr1.String() == expr2.String() {
return true
}
return false
}

Просмотреть файл

@ -156,6 +156,7 @@ func (e *Executor) Execute(ctx context.Context, method string, safeSession *Safe
saveSessionStats(safeSession, stmtType, result, err)
if result != nil && len(result.Rows) > *warnMemoryRows {
warnings.Add("ResultsExceeded", 1)
log.Warningf("%q exceeds warning threshold of max memory rows: %v", sql, *warnMemoryRows)
}
logStats.Send()
@ -433,7 +434,7 @@ func (e *Executor) handleSet(ctx context.Context, sql string, logStats *LogStats
if err != nil {
return nil, err
}
rewrittenAST, err := sqlparser.PrepareAST(stmt, nil, "vtg", false)
rewrittenAST, err := sqlparser.PrepareAST(stmt, nil, "vtg", false, "")
if err != nil {
return nil, err
}
@ -599,28 +600,10 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql
execStart := time.Now()
defer func() { logStats.ExecuteTime = time.Since(execStart) }()
switch strings.ToLower(show.Type) {
case sqlparser.KeywordString(sqlparser.COLLATION), sqlparser.KeywordString(sqlparser.VARIABLES):
case sqlparser.KeywordString(sqlparser.VARIABLES):
if show.Scope == sqlparser.VitessMetadataScope {
return e.handleShowVitessMetadata(ctx, show.ShowTablesOpt)
}
if destKeyspace == "" {
keyspaces, err := e.resolver.resolver.GetAllKeyspaces(ctx)
if err != nil {
return nil, err
}
if len(keyspaces) == 0 {
return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no keyspaces available")
}
return e.handleOther(ctx, safeSession, sql, bindVars, dest, keyspaces[0], destTabletType, logStats, ignoreMaxMemoryRows)
}
// for STATUS, return empty result set
case sqlparser.KeywordString(sqlparser.STATUS):
return &sqltypes.Result{
Fields: buildVarCharFields("Variable_name", "Value"),
Rows: make([][]sqltypes.Value, 0, 2),
RowsAffected: 0,
}, nil
// for ENGINES, we want to return just InnoDB
case sqlparser.KeywordString(sqlparser.ENGINES):
rows := make([][]sqltypes.Value, 0, 6)
@ -652,25 +635,6 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql
Rows: rows,
RowsAffected: 1,
}, nil
// CHARSET & CHARACTER SET return utf8mb4 & utf8
case sqlparser.KeywordString(sqlparser.CHARSET):
fields := buildVarCharFields("Charset", "Description", "Default collation")
maxLenField := &querypb.Field{Name: "Maxlen", Type: sqltypes.Int32}
fields = append(fields, maxLenField)
charsets := []string{utf8, utf8mb4}
filter := show.ShowTablesOpt.Filter
rows, err := generateCharsetRows(filter, charsets)
if err != nil {
return nil, err
}
rowsAffected := uint64(len(rows))
return &sqltypes.Result{
Fields: fields,
Rows: rows,
RowsAffected: rowsAffected,
}, err
case "create table":
if !show.Table.Qualifier.IsEmpty() {
// Explicit keyspace was passed. Use that for targeting but remove from the query itself.
@ -720,34 +684,6 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql
show.ShowTablesOpt.DbName = ""
}
sql = sqlparser.String(show)
case sqlparser.KeywordString(sqlparser.DATABASES), sqlparser.KeywordString(sqlparser.SCHEMAS), sqlparser.KeywordString(sqlparser.VITESS_KEYSPACES), sqlparser.KeywordString(sqlparser.KEYSPACES):
keyspaces, err := e.resolver.resolver.GetAllKeyspaces(ctx)
if err != nil {
return nil, err
}
var filter *regexp.Regexp
if show.ShowTablesOpt != nil && show.ShowTablesOpt.Filter != nil {
filter = sqlparser.LikeToRegexp(show.ShowTablesOpt.Filter.Like)
}
if filter == nil {
filter = regexp.MustCompile(".*")
}
rows := make([][]sqltypes.Value, 0, len(keyspaces))
for _, v := range keyspaces {
if filter.MatchString(v) {
rows = append(rows, buildVarCharRow(v))
}
}
return &sqltypes.Result{
Fields: buildVarCharFields("Database"),
Rows: rows,
RowsAffected: uint64(len(rows)),
}, nil
case sqlparser.KeywordString(sqlparser.VITESS_SHARDS):
showVitessShardsFilters := func(show *sqlparser.ShowLegacy) ([]func(string) bool, []func(string, *topodatapb.ShardReference) bool) {
keyspaceFilters := []func(string) bool{}
@ -1361,7 +1297,7 @@ func (e *Executor) getPlan(vcursor *vcursorImpl, sql string, comments sqlparser.
// Normalize if possible and retry.
if (e.normalize && sqlparser.CanNormalize(stmt)) || sqlparser.IsSetStatement(stmt) {
parameterize := e.normalize // the public flag is called normalize
result, err := sqlparser.PrepareAST(stmt, bindVars, "vtg", parameterize)
result, err := sqlparser.PrepareAST(stmt, bindVars, "vtg", parameterize, vcursor.keyspace)
if err != nil {
return nil, err
}

Просмотреть файл

@ -2417,7 +2417,7 @@ func TestSelectFromInformationSchema(t *testing.T) {
// check failure when trying to query two keyspaces
_, err := exec(executor, session, "SELECT B.TABLE_NAME FROM INFORMATION_SCHEMA.TABLES AS A, INFORMATION_SCHEMA.COLUMNS AS B WHERE A.TABLE_SCHEMA = 'TestExecutor' AND A.TABLE_SCHEMA = 'TestXBadSharding'")
require.Error(t, err)
require.Contains(t, err.Error(), "two predicates for table_schema not supported")
require.Contains(t, err.Error(), "two predicates for specifying the database are not supported")
// we pick a keyspace and query for table_schema = database(). should be routed to the picked keyspace
session.TargetString = "TestExecutor"

Просмотреть файл

@ -439,20 +439,7 @@ func TestExecutorShow(t *testing.T) {
for _, query := range []string{"show databases", "show vitess_keyspaces", "show keyspaces", "show DATABASES", "show schemas", "show SCHEMAS"} {
qr, err := executor.Execute(ctx, "TestExecute", session, query, nil)
require.NoError(t, err)
wantqr := &sqltypes.Result{
Fields: buildVarCharFields("Database"),
Rows: [][]sqltypes.Value{
buildVarCharRow("TestExecutor"),
buildVarCharRow(KsTestSharded),
buildVarCharRow(KsTestUnsharded),
buildVarCharRow("TestXBadSharding"),
buildVarCharRow(KsTestBadVSchema),
},
RowsAffected: 5,
}
utils.MustMatch(t, wantqr, qr, fmt.Sprintf("unexpected results running query: %s", query))
require.EqualValues(t, 5, qr.RowsAffected, fmt.Sprintf("unexpected results running query: %s", query))
}
_, err := executor.Execute(ctx, "TestExecute", session, "show variables", nil)
require.NoError(t, err)
@ -1867,8 +1854,8 @@ func TestGenerateCharsetRows(t *testing.T) {
t.Run(tc.input, func(t *testing.T) {
stmt, err := sqlparser.Parse(tc.input)
require.NoError(t, err)
match := stmt.(*sqlparser.Show).Internal.(*sqlparser.ShowLegacy)
filter := match.ShowTablesOpt.Filter
match := stmt.(*sqlparser.Show).Internal.(*sqlparser.ShowBasic)
filter := match.Filter
actual, err := generateCharsetRows(filter, charsets)
require.NoError(t, err)
require.Equal(t, tc.expected, actual)

Просмотреть файл

@ -47,20 +47,21 @@ type ContextVSchema interface {
FirstSortedKeyspace() (*vindexes.Keyspace, error)
SysVarSetEnabled() bool
KeyspaceExists(keyspace string) bool
AllKeyspace() ([]*vindexes.Keyspace, error)
}
type truncater interface {
SetTruncateColumnCount(int)
}
// Build builds a plan for a query based on the specified vschema.
// TestBuilder builds a plan for a query based on the specified vschema.
// This method is only used from tests
func Build(query string, vschema ContextVSchema) (*engine.Plan, error) {
func TestBuilder(query string, vschema ContextVSchema) (*engine.Plan, error) {
stmt, err := sqlparser.Parse(query)
if err != nil {
return nil, err
}
result, err := sqlparser.RewriteAST(stmt)
result, err := sqlparser.RewriteAST(stmt, "")
if err != nil {
return nil, err
}
@ -106,10 +107,9 @@ func createInstructionFor(query string, stmt sqlparser.Statement, vschema Contex
case *sqlparser.Union:
return buildRoutePlan(stmt, vschema, buildUnionPlan)
case sqlparser.DDLStatement:
if sqlparser.IsVschemaDDL(stmt) {
return buildVSchemaDDLPlan(stmt, vschema)
}
return buildGeneralDDLPlan(query, stmt, vschema)
case *sqlparser.AlterVschema:
return buildVSchemaDDLPlan(stmt, vschema)
case *sqlparser.Use:
return buildUsePlan(stmt, vschema)
case *sqlparser.Explain:
@ -136,6 +136,10 @@ func createInstructionFor(query string, stmt sqlparser.Statement, vschema Contex
return nil, nil
case *sqlparser.Show:
return buildShowPlan(stmt, vschema)
case *sqlparser.LockTables:
return buildRoutePlan(stmt, vschema, buildLockPlan)
case *sqlparser.UnlockTables:
return buildRoutePlan(stmt, vschema, buildUnlockPlan)
}
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: unexpected statement type: %T", stmt)
@ -201,3 +205,14 @@ func buildLoadPlan(query string, vschema ContextVSchema) (engine.Primitive, erro
SingleShardOnly: true,
}, nil
}
func buildVSchemaDDLPlan(stmt *sqlparser.AlterVschema, vschema ContextVSchema) (engine.Primitive, error) {
_, keyspace, _, err := vschema.TargetDestination(stmt.Table.Qualifier.String())
if err != nil {
return nil, err
}
return &engine.AlterVSchema{
Keyspace: keyspace,
AlterVschemaDDL: stmt,
}, nil
}

Просмотреть файл

@ -10,6 +10,12 @@ import (
"vitess.io/vitess/go/vt/vtgate/engine"
)
// Error messages for CreateView queries
const (
CreateViewDifferentKeyspace string = "Select query does not belong to the same keyspace as the view statement"
CreateViewComplex string = "Complex select queries are not supported in create view statements"
)
// buildGeneralDDLPlan builds a general DDL plan, which can be either normal DDL or online DDL.
// The two behave compeltely differently, and have two very different primitives.
// We want to be able to dynamically choose between normal/online plans according to Session settings.
@ -18,62 +24,90 @@ import (
// This is why we return a compound primitive (DDL) which contains fully populated primitives (Send & OnlineDDL),
// and which chooses which of the two to invoke at runtime.
func buildGeneralDDLPlan(sql string, ddlStatement sqlparser.DDLStatement, vschema ContextVSchema) (engine.Primitive, error) {
normalDDLPlan, err := buildDDLPlan(sql, ddlStatement, vschema)
normalDDLPlan, onlineDDLPlan, err := buildDDLPlans(sql, ddlStatement, vschema)
if err != nil {
return nil, err
}
onlineDDLPlan, err := buildOnlineDDLPlan(sql, ddlStatement, vschema)
if err != nil {
return nil, err
}
query := sql
// If the query is fully parsed, generate the query from the ast. Otherwise, use the original query
if ddlStatement.IsFullyParsed() {
query = sqlparser.String(ddlStatement)
}
return &engine.DDL{
Keyspace: normalDDLPlan.Keyspace,
SQL: query,
SQL: normalDDLPlan.Query,
DDL: ddlStatement,
NormalDDL: normalDDLPlan,
OnlineDDL: onlineDDLPlan,
}, nil
}
func buildDDLPlan(sql string, ddlStatement sqlparser.DDLStatement, vschema ContextVSchema) (*engine.Send, error) {
func buildDDLPlans(sql string, ddlStatement sqlparser.DDLStatement, vschema ContextVSchema) (*engine.Send, *engine.OnlineDDL, error) {
var table *vindexes.Table
var destination key.Destination
var keyspace *vindexes.Keyspace
var err error
switch ddlStatement.(type) {
switch ddl := ddlStatement.(type) {
case *sqlparser.CreateIndex:
// For Create index, the table must already exist
// We should find the target of the query from this tables location
table, _, _, _, destination, err = vschema.FindTableOrVindex(ddlStatement.GetTable())
keyspace = table.Keyspace
if err != nil {
return nil, err
return nil, nil, err
}
if table == nil {
return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "table does not exists: %s", ddlStatement.GetTable().Name.String())
return nil, nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "table does not exists: %s", ddlStatement.GetTable().Name.String())
}
ddlStatement.SetTable("", table.Name.String())
case *sqlparser.DDL:
// For DDL, it is only required that the keyspace exist
// We should remove the keyspace name from the table name, as the database name in MySQL might be different than the keyspace name
destination, keyspace, _, err = vschema.TargetDestination(ddlStatement.GetTable().Qualifier.String())
if err != nil {
return nil, err
return nil, nil, err
}
ddlStatement.SetTable("", ddlStatement.GetTable().Name.String())
case *sqlparser.CreateView:
// For Create View, we require that the keyspace exist and the select query can be satisfied within the keyspace itself
// We should remove the keyspace name from the table name, as the database name in MySQL might be different than the keyspace name
destination, keyspace, _, err = vschema.TargetDestination(ddl.ViewName.Qualifier.String())
if err != nil {
return nil, nil, err
}
ddl.ViewName.Qualifier = sqlparser.NewTableIdent("")
var selectPlan engine.Primitive
selectPlan, err = createInstructionFor(sqlparser.String(ddl.Select), ddl.Select, vschema)
if err != nil {
return nil, nil, err
}
routePlan, isRoute := selectPlan.(*engine.Route)
if !isRoute {
return nil, nil, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, CreateViewComplex)
}
if keyspace.Name != routePlan.GetKeyspaceName() {
return nil, nil, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, CreateViewDifferentKeyspace)
}
if routePlan.Opcode != engine.SelectUnsharded && routePlan.Opcode != engine.SelectEqualUnique && routePlan.Opcode != engine.SelectScatter {
return nil, nil, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, CreateViewComplex)
}
sqlparser.Rewrite(ddl.Select, func(cursor *sqlparser.Cursor) bool {
switch tableName := cursor.Node().(type) {
case sqlparser.TableName:
cursor.Replace(sqlparser.TableName{
Name: tableName.Name,
})
}
return true
}, nil)
case *sqlparser.CreateTable:
destination, keyspace, _, err = vschema.TargetDestination(ddlStatement.GetTable().Qualifier.String())
// Remove the keyspace name as the database name might be different.
ddlStatement.SetTable("", ddlStatement.GetTable().Name.String())
if err != nil {
return nil, err
return nil, nil, err
}
default:
return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "BUG: unexpected statement type: %T", ddlStatement)
return nil, nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "BUG: unexpected statement type: %T", ddlStatement)
}
if destination == nil {
@ -87,38 +121,14 @@ func buildDDLPlan(sql string, ddlStatement sqlparser.DDLStatement, vschema Conte
}
return &engine.Send{
Keyspace: keyspace,
TargetDestination: destination,
Query: query,
IsDML: false,
SingleShardOnly: false,
}, nil
}
func buildOnlineDDLPlan(query string, ddlStatement sqlparser.DDLStatement, vschema ContextVSchema) (*engine.OnlineDDL, error) {
_, keyspace, _, err := vschema.TargetDestination(ddlStatement.GetTable().Qualifier.String())
if err != nil {
return nil, err
}
// strategy and options will be computed in real time, on Execute()
return &engine.OnlineDDL{
Keyspace: keyspace,
DDL: ddlStatement,
SQL: query,
}, nil
}
func buildVSchemaDDLPlan(ddlStmt sqlparser.DDLStatement, vschema ContextVSchema) (engine.Primitive, error) {
stmt, ok := ddlStmt.(*sqlparser.DDL)
if !ok {
return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "Incorrect type %T", ddlStmt)
}
_, keyspace, _, err := vschema.TargetDestination(stmt.Table.Qualifier.String())
if err != nil {
return nil, err
}
return &engine.AlterVSchema{
Keyspace: keyspace,
DDL: stmt,
}, nil
Keyspace: keyspace,
TargetDestination: destination,
Query: query,
IsDML: false,
SingleShardOnly: false,
}, &engine.OnlineDDL{
Keyspace: keyspace,
DDL: ddlStatement,
SQL: query,
}, nil
}

Просмотреть файл

@ -103,7 +103,7 @@ func nameMatch(node sqlparser.Expr, col sqlparser.ColIdent) bool {
func buildDMLPlan(vschema ContextVSchema, dmlType string, stmt sqlparser.Statement, tableExprs sqlparser.TableExprs, where *sqlparser.Where, orderBy sqlparser.OrderBy, limit *sqlparser.Limit, comments sqlparser.Comments, nodes ...sqlparser.SQLNode) (*engine.DML, vindexes.SingleColumn, string, error) {
edml := &engine.DML{}
pb := newPrimitiveBuilder(vschema, newJointab(sqlparser.GetBindvars(stmt)))
rb, err := pb.processDMLTable(tableExprs)
rb, err := pb.processDMLTable(tableExprs, nil)
if err != nil {
return nil, nil, "", err
}

Просмотреть файл

@ -32,8 +32,8 @@ import (
// This file has functions to analyze the FROM clause.
// processDMLTable analyzes the FROM clause for DMLs and returns a route.
func (pb *primitiveBuilder) processDMLTable(tableExprs sqlparser.TableExprs) (*route, error) {
if err := pb.processTableExprs(tableExprs); err != nil {
func (pb *primitiveBuilder) processDMLTable(tableExprs sqlparser.TableExprs, where sqlparser.Expr) (*route, error) {
if err := pb.processTableExprs(tableExprs, where); err != nil {
return nil, err
}
rb, ok := pb.plan.(*route)
@ -48,28 +48,28 @@ func (pb *primitiveBuilder) processDMLTable(tableExprs sqlparser.TableExprs) (*r
// processTableExprs analyzes the FROM clause. It produces a logicalPlan
// with all the routes identified.
func (pb *primitiveBuilder) processTableExprs(tableExprs sqlparser.TableExprs) error {
func (pb *primitiveBuilder) processTableExprs(tableExprs sqlparser.TableExprs, where sqlparser.Expr) error {
if len(tableExprs) == 1 {
return pb.processTableExpr(tableExprs[0])
return pb.processTableExpr(tableExprs[0], where)
}
if err := pb.processTableExpr(tableExprs[0]); err != nil {
if err := pb.processTableExpr(tableExprs[0], where); err != nil {
return err
}
rpb := newPrimitiveBuilder(pb.vschema, pb.jt)
if err := rpb.processTableExprs(tableExprs[1:]); err != nil {
if err := rpb.processTableExprs(tableExprs[1:], where); err != nil {
return err
}
return pb.join(rpb, nil)
return pb.join(rpb, nil, where)
}
// processTableExpr produces a logicalPlan subtree for the given TableExpr.
func (pb *primitiveBuilder) processTableExpr(tableExpr sqlparser.TableExpr) error {
func (pb *primitiveBuilder) processTableExpr(tableExpr sqlparser.TableExpr, where sqlparser.Expr) error {
switch tableExpr := tableExpr.(type) {
case *sqlparser.AliasedTableExpr:
return pb.processAliasedTable(tableExpr)
case *sqlparser.ParenTableExpr:
err := pb.processTableExprs(tableExpr.Exprs)
err := pb.processTableExprs(tableExpr.Exprs, where)
// If it's a route, preserve the parenthesis so things
// don't associate differently when more things are pushed
// into it. FROM a, (b, c) should not become FROM a, b, c.
@ -83,7 +83,7 @@ func (pb *primitiveBuilder) processTableExpr(tableExpr sqlparser.TableExpr) erro
}
return err
case *sqlparser.JoinTableExpr:
return pb.processJoin(tableExpr)
return pb.processJoin(tableExpr, where)
}
return fmt.Errorf("BUG: unexpected table expression type: %T", tableExpr)
}
@ -179,7 +179,7 @@ func (pb *primitiveBuilder) buildTablePrimitive(tableExpr *sqlparser.AliasedTabl
}
sel := &sqlparser.Select{From: sqlparser.TableExprs([]sqlparser.TableExpr{tableExpr})}
if systemTable(tableName.Qualifier.String()) {
if sqlparser.SystemSchema(tableName.Qualifier.String()) {
ks, err := pb.vschema.AnyKeyspace()
if err != nil {
return err
@ -268,7 +268,7 @@ func (pb *primitiveBuilder) buildTablePrimitive(tableExpr *sqlparser.AliasedTabl
// processJoin produces a logicalPlan subtree for the given Join.
// If the left and right nodes can be part of the same route,
// then it's a route. Otherwise, it's a join.
func (pb *primitiveBuilder) processJoin(ajoin *sqlparser.JoinTableExpr) error {
func (pb *primitiveBuilder) processJoin(ajoin *sqlparser.JoinTableExpr, where sqlparser.Expr) error {
switch ajoin.Join {
case sqlparser.NormalJoinType, sqlparser.StraightJoinType, sqlparser.LeftJoinType:
case sqlparser.RightJoinType:
@ -276,14 +276,14 @@ func (pb *primitiveBuilder) processJoin(ajoin *sqlparser.JoinTableExpr) error {
default:
return fmt.Errorf("unsupported: %s", ajoin.Join.ToString())
}
if err := pb.processTableExpr(ajoin.LeftExpr); err != nil {
if err := pb.processTableExpr(ajoin.LeftExpr, where); err != nil {
return err
}
rpb := newPrimitiveBuilder(pb.vschema, pb.jt)
if err := rpb.processTableExpr(ajoin.RightExpr); err != nil {
if err := rpb.processTableExpr(ajoin.RightExpr, where); err != nil {
return err
}
return pb.join(rpb, ajoin)
return pb.join(rpb, ajoin, where)
}
// convertToLeftJoin converts a right join into a left join.
@ -300,7 +300,7 @@ func convertToLeftJoin(ajoin *sqlparser.JoinTableExpr) {
ajoin.Join = sqlparser.LeftJoinType
}
func (pb *primitiveBuilder) join(rpb *primitiveBuilder, ajoin *sqlparser.JoinTableExpr) error {
func (pb *primitiveBuilder) join(rpb *primitiveBuilder, ajoin *sqlparser.JoinTableExpr, where sqlparser.Expr) error {
// Merge the symbol tables. In the case of a left join, we have to
// ideally create new symbols that originate from the join primitive.
// However, this is not worth it for now, because the Push functions
@ -317,7 +317,7 @@ func (pb *primitiveBuilder) join(rpb *primitiveBuilder, ajoin *sqlparser.JoinTab
}
// Try merging the routes.
if !lRoute.JoinCanMerge(pb, rRoute, ajoin) {
if !lRoute.JoinCanMerge(pb, rRoute, ajoin, where) {
return newJoin(pb, rpb, ajoin)
}

Просмотреть файл

@ -34,7 +34,7 @@ func buildInsertPlan(stmt sqlparser.Statement, vschema ContextVSchema) (engine.P
ins := stmt.(*sqlparser.Insert)
pb := newPrimitiveBuilder(vschema, newJointab(sqlparser.GetBindvars(ins)))
exprs := sqlparser.TableExprs{&sqlparser.AliasedTableExpr{Expr: ins.Table}}
rb, err := pb.processDMLTable(exprs)
rb, err := pb.processDMLTable(exprs, nil)
if err != nil {
return nil, err
}

Просмотреть файл

@ -0,0 +1,37 @@
/*
Copyright 2020 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package planbuilder
import (
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/log"
querypb "vitess.io/vitess/go/vt/proto/query"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vtgate/engine"
)
// buildLockPlan plans lock tables statement.
func buildLockPlan(stmt sqlparser.Statement, vschema ContextVSchema) (engine.Primitive, error) {
log.Warningf("Lock Tables statement is ignored: %v", stmt)
return engine.NewRowsPrimitive(make([][]sqltypes.Value, 0), make([]*querypb.Field, 0)), nil
}
// buildUnlockPlan plans lock tables statement.
func buildUnlockPlan(stmt sqlparser.Statement, vschema ContextVSchema) (engine.Primitive, error) {
log.Warningf("Unlock Tables statement is ignored: %v", stmt)
return engine.NewRowsPrimitive(make([][]sqltypes.Value, 0), make([]*querypb.Field, 0)), nil
}

Просмотреть файл

@ -60,6 +60,18 @@ func newMemorySort(plan logicalPlan, orderBy sqlparser.OrderBy) (*memorySort, er
break
}
}
case *sqlparser.UnaryExpr:
colName, ok := expr.Expr.(*sqlparser.ColName)
if !ok {
return nil, fmt.Errorf("unsupported: memory sort: complex order by expression: %s", sqlparser.String(expr))
}
c := colName.Metadata.(*column)
for i, rc := range ms.ResultColumns() {
if rc.column == c {
colNumber = i
break
}
}
default:
return nil, fmt.Errorf("unsupported: memory sort: complex order by expression: %s", sqlparser.String(expr))
}

Просмотреть файл

@ -91,6 +91,12 @@ func planOAOrdering(pb *primitiveBuilder, orderBy sqlparser.OrderBy, oa *ordered
orderByCol = oa.resultColumns[num].column
case *sqlparser.ColName:
orderByCol = expr.Metadata.(*column)
case *sqlparser.UnaryExpr:
col, ok := expr.Expr.(*sqlparser.ColName)
if !ok {
return nil, fmt.Errorf("unsupported: in scatter query: complex order by expression: %s", sqlparser.String(expr))
}
orderByCol = col.Metadata.(*column)
default:
return nil, fmt.Errorf("unsupported: in scatter query: complex order by expression: %v", sqlparser.String(expr))
}
@ -263,6 +269,18 @@ func planRouteOrdering(orderBy sqlparser.OrderBy, node *route) (logicalPlan, err
break
}
}
case *sqlparser.UnaryExpr:
col, ok := expr.Expr.(*sqlparser.ColName)
if !ok {
return nil, fmt.Errorf("unsupported: in scatter query: complex order by expression: %s", sqlparser.String(expr))
}
c := col.Metadata.(*column)
for i, rc := range node.resultColumns {
if rc.column == c {
colNumber = i
break
}
}
default:
return nil, fmt.Errorf("unsupported: in scatter query: complex order by expression: %s", sqlparser.String(expr))
}

Просмотреть файл

@ -19,6 +19,7 @@ package planbuilder
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
@ -233,6 +234,7 @@ func TestWithDefaultKeyspaceFromFile(t *testing.T) {
tabletType: topodatapb.TabletType_MASTER,
}
testFile(t, "alterVschema_cases.txt", testOutputTempDir, vschema)
testFile(t, "ddl_cases.txt", testOutputTempDir, vschema)
testFile(t, "show_cases.txt", testOutputTempDir, vschema)
}
@ -282,6 +284,13 @@ type vschemaWrapper struct {
sysVarEnabled bool
}
func (vw *vschemaWrapper) AllKeyspace() ([]*vindexes.Keyspace, error) {
if vw.keyspace == nil {
return nil, errors.New("keyspace not available")
}
return []*vindexes.Keyspace{vw.keyspace}, nil
}
func (vw *vschemaWrapper) KeyspaceExists(keyspace string) bool {
if vw.keyspace != nil {
return vw.keyspace.Name == keyspace
@ -366,7 +375,7 @@ func testFile(t *testing.T, filename, tempDir string, vschema *vschemaWrapper) {
fail := false
for tcase := range iterateExecFile(filename) {
t.Run(tcase.comments, func(t *testing.T) {
plan, err := Build(tcase.input, vschema)
plan, err := TestBuilder(tcase.input, vschema)
out := getPlanOrErrorOutput(err, plan)

Просмотреть файл

@ -17,8 +17,6 @@ limitations under the License.
package planbuilder
import (
"strings"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
@ -190,7 +188,7 @@ func (rb *route) Wireup(plan logicalPlan, jt *jointab) error {
return
}
case sqlparser.TableName:
if !systemTable(node.Qualifier.String()) {
if !sqlparser.SystemSchema(node.Qualifier.String()) {
node.Name.Format(buf)
return
}
@ -206,13 +204,6 @@ func (rb *route) Wireup(plan logicalPlan, jt *jointab) error {
return nil
}
func systemTable(qualifier string) bool {
return strings.EqualFold(qualifier, "information_schema") ||
strings.EqualFold(qualifier, "performance_schema") ||
strings.EqualFold(qualifier, "sys") ||
strings.EqualFold(qualifier, "mysql")
}
// procureValues procures and converts the input into
// the expected types for rb.Values.
func (rb *route) procureValues(plan logicalPlan, jt *jointab, val sqlparser.Expr) (sqltypes.PlanValue, error) {
@ -252,7 +243,7 @@ func (rb *route) generateFieldQuery(sel sqlparser.SelectStatement, jt *jointab)
return
}
case sqlparser.TableName:
if !systemTable(node.Qualifier.String()) {
if !sqlparser.SystemSchema(node.Qualifier.String()) {
node.Name.Format(buf)
return
}
@ -304,11 +295,17 @@ func (rb *route) SupplyWeightString(colNumber int) (weightcolNumber int, err err
return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected AST struct for query")
}
aliasExpr, ok := s.SelectExprs[colNumber].(*sqlparser.AliasedExpr)
if !ok {
return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected AST struct for query %T", s.SelectExprs[colNumber])
}
expr := &sqlparser.AliasedExpr{
Expr: &sqlparser.FuncExpr{
Name: sqlparser.NewColIdent("weight_string"),
Exprs: []sqlparser.SelectExpr{
s.SelectExprs[colNumber],
&sqlparser.AliasedExpr{
Expr: aliasExpr.Expr,
},
},
},
}
@ -368,7 +365,7 @@ func (rb *route) isSingleShard() bool {
// JoinCanMerge, SubqueryCanMerge and unionCanMerge have subtly different behaviors.
// The difference in behavior is around SelectReference.
// It's not worth trying to reuse the code between them.
func (rb *route) JoinCanMerge(pb *primitiveBuilder, rrb *route, ajoin *sqlparser.JoinTableExpr) bool {
func (rb *route) JoinCanMerge(pb *primitiveBuilder, rrb *route, ajoin *sqlparser.JoinTableExpr, where sqlparser.Expr) bool {
if rb.eroute.Keyspace.Name != rrb.eroute.Keyspace.Name {
return false
}
@ -386,8 +383,24 @@ func (rb *route) JoinCanMerge(pb *primitiveBuilder, rrb *route, ajoin *sqlparser
}
case engine.SelectReference:
return true
case engine.SelectNext, engine.SelectDBA:
case engine.SelectNext:
return false
case engine.SelectDBA:
if rrb.eroute.Opcode != engine.SelectDBA {
return false
}
if where == nil {
return true
}
hasRuntimeRoutingPredicates := false
sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
col, ok := node.(*sqlparser.ColName)
if ok {
hasRuntimeRoutingPredicates = hasRuntimeRoutingPredicates || isTableNameCol(col) || isDbNameCol(col)
}
return !hasRuntimeRoutingPredicates, nil
}, where)
return !hasRuntimeRoutingPredicates
}
if ajoin == nil {
return false

Просмотреть файл

@ -50,27 +50,30 @@ func TestJoinCanMerge(t *testing.T) {
{false, false, false, false, false, false, false, false, true, false},
{false, false, false, false, false, false, false, false, true, false},
{false, false, false, false, false, false, false, false, true, false},
{false, false, false, false, false, false, false, false, true, false},
{false, false, false, false, false, false, false, true, true, false},
{true, true, true, true, true, true, true, true, true, true},
{false, false, false, false, false, false, false, false, true, false},
}
ks := &vindexes.Keyspace{}
lRoute := &route{
// Setting condition will make SelectEqualUnique match itself.
condition: &sqlparser.ColName{},
}
pb := &primitiveBuilder{
plan: lRoute,
}
rRoute := &route{
condition: &sqlparser.ColName{},
}
for left, vals := range testcases {
lRoute.eroute = engine.NewSimpleRoute(engine.RouteOpcode(left), ks)
for right, val := range vals {
rRoute.eroute = engine.NewSimpleRoute(engine.RouteOpcode(right), ks)
assert.Equal(t, val, lRoute.JoinCanMerge(pb, rRoute, nil), fmt.Sprintf("%v:%v", lRoute.eroute.RouteType(), rRoute.eroute.RouteType()))
name := fmt.Sprintf("%d:%d", left, right)
t.Run(name, func(t *testing.T) {
lRoute := &route{
// Setting condition will make SelectEqualUnique match itself.
condition: &sqlparser.ColName{},
}
pb := &primitiveBuilder{
plan: lRoute,
}
rRoute := &route{
condition: &sqlparser.ColName{},
}
lRoute.eroute = engine.NewSimpleRoute(engine.RouteOpcode(left), ks)
rRoute.eroute = engine.NewSimpleRoute(engine.RouteOpcode(right), ks)
assert.Equal(t, val, lRoute.JoinCanMerge(pb, rRoute, nil, nil), fmt.Sprintf("%v:%v", lRoute.eroute.RouteType(), rRoute.eroute.RouteType()))
})
}
}
}

Просмотреть файл

@ -118,7 +118,11 @@ func (pb *primitiveBuilder) processSelect(sel *sqlparser.Select, outer *symtab,
return mysql.NewSQLError(mysql.ERCantUseOptionHere, mysql.SSSyntaxErrorOrAccessViolation, "Incorrect usage/placement of 'INTO'")
}
if err := pb.processTableExprs(sel.From); err != nil {
var where sqlparser.Expr
if sel.Where != nil {
where = sel.Where.Expr
}
if err := pb.processTableExprs(sel.From, where); err != nil {
return err
}

Просмотреть файл

@ -17,15 +17,30 @@ limitations under the License.
package planbuilder
import (
"regexp"
"strings"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/key"
"vitess.io/vitess/go/vt/proto/vtrpc"
querypb "vitess.io/vitess/go/vt/proto/query"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
)
const (
utf8 = "utf8"
utf8mb4 = "utf8mb4"
both = "both"
charset = "charset"
)
func buildShowPlan(stmt *sqlparser.Show, vschema ContextVSchema) (engine.Primitive, error) {
switch show := stmt.Internal.(type) {
case *sqlparser.ShowBasic:
return buildShowBasicPlan(show, vschema)
case *sqlparser.ShowColumns:
return buildShowColumnsPlan(show, vschema)
case *sqlparser.ShowTableStatus:
@ -35,6 +50,71 @@ func buildShowPlan(stmt *sqlparser.Show, vschema ContextVSchema) (engine.Primiti
}
}
func buildShowBasicPlan(show *sqlparser.ShowBasic, vschema ContextVSchema) (engine.Primitive, error) {
switch show.Command {
case sqlparser.Charset:
return showCharset(show)
case sqlparser.Collation, sqlparser.Function, sqlparser.Privilege, sqlparser.Procedure,
sqlparser.VariableGlobal, sqlparser.VariableSession:
return showSendAnywhere(show, vschema)
case sqlparser.Database:
ks, err := vschema.AllKeyspace()
if err != nil {
return nil, err
}
var filter *regexp.Regexp
if show.Filter != nil {
filter = sqlparser.LikeToRegexp(show.Filter.Like)
}
if filter == nil {
filter = regexp.MustCompile(".*")
}
rows := make([][]sqltypes.Value, 0, len(ks))
for _, v := range ks {
if filter.MatchString(v.Name) {
rows = append(rows, buildVarCharRow(v.Name))
}
}
return engine.NewRowsPrimitive(rows, buildVarCharFields("Database")), nil
case sqlparser.StatusGlobal, sqlparser.StatusSession:
return engine.NewRowsPrimitive(make([][]sqltypes.Value, 0, 2), buildVarCharFields("Variable_name", "Value")), nil
}
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: unknown show query type %s", show.Command.ToString())
}
func showSendAnywhere(show *sqlparser.ShowBasic, vschema ContextVSchema) (engine.Primitive, error) {
ks, err := vschema.FirstSortedKeyspace()
if err != nil {
return nil, err
}
return &engine.Send{
Keyspace: ks,
TargetDestination: key.DestinationAnyShard{},
Query: sqlparser.String(show),
IsDML: false,
SingleShardOnly: true,
}, nil
}
func showCharset(show *sqlparser.ShowBasic) (engine.Primitive, error) {
fields := buildVarCharFields("Charset", "Description", "Default collation")
maxLenField := &querypb.Field{Name: "Maxlen", Type: sqltypes.Int32}
fields = append(fields, maxLenField)
charsets := []string{utf8, utf8mb4}
rows, err := generateCharsetRows(show.Filter, charsets)
if err != nil {
return nil, err
}
return engine.NewRowsPrimitive(rows, fields), nil
}
func buildShowColumnsPlan(show *sqlparser.ShowColumns, vschema ContextVSchema) (engine.Primitive, error) {
if show.DbName != "" {
show.Table.Qualifier = sqlparser.NewTableIdent(show.DbName)
@ -44,7 +124,7 @@ func buildShowColumnsPlan(show *sqlparser.ShowColumns, vschema ContextVSchema) (
return nil, err
}
if table == nil {
return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "table does not exists: %s", show.Table.Name.String())
return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "table does not exists: %s", show.Table.Name.String())
}
if destination == nil {
destination = key.DestinationAnyShard{}
@ -86,3 +166,116 @@ func buildShowTableStatusPlan(show *sqlparser.ShowTableStatus, vschema ContextVS
}, nil
}
func buildVarCharFields(names ...string) []*querypb.Field {
fields := make([]*querypb.Field, len(names))
for i, v := range names {
fields[i] = &querypb.Field{
Name: v,
Type: sqltypes.VarChar,
Charset: mysql.CharacterSetUtf8,
Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG),
}
}
return fields
}
func buildVarCharRow(values ...string) []sqltypes.Value {
row := make([]sqltypes.Value, len(values))
for i, v := range values {
row[i] = sqltypes.NewVarChar(v)
}
return row
}
func generateCharsetRows(showFilter *sqlparser.ShowFilter, colNames []string) ([][]sqltypes.Value, error) {
if showFilter == nil {
return buildCharsetRows(both), nil
}
var filteredColName string
var err error
if showFilter.Like != "" {
filteredColName, err = checkLikeOpt(showFilter.Like, colNames)
if err != nil {
return nil, err
}
} else {
cmpExp, ok := showFilter.Filter.(*sqlparser.ComparisonExpr)
if !ok {
return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "expect a 'LIKE' or '=' expression")
}
left, ok := cmpExp.Left.(*sqlparser.ColName)
if !ok {
return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "expect left side to be 'charset'")
}
leftOk := left.Name.EqualString(charset)
if leftOk {
literal, ok := cmpExp.Right.(*sqlparser.Literal)
if !ok {
return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "we expect the right side to be a string")
}
rightString := string(literal.Val)
switch cmpExp.Operator {
case sqlparser.EqualOp:
for _, colName := range colNames {
if rightString == colName {
filteredColName = colName
}
}
case sqlparser.LikeOp:
filteredColName, err = checkLikeOpt(rightString, colNames)
if err != nil {
return nil, err
}
}
}
}
return buildCharsetRows(filteredColName), nil
}
func buildCharsetRows(colName string) [][]sqltypes.Value {
row0 := buildVarCharRow(
"utf8",
"UTF-8 Unicode",
"utf8_general_ci")
row0 = append(row0, sqltypes.NewInt32(3))
row1 := buildVarCharRow(
"utf8mb4",
"UTF-8 Unicode",
"utf8mb4_general_ci")
row1 = append(row1, sqltypes.NewInt32(4))
switch colName {
case utf8:
return [][]sqltypes.Value{row0}
case utf8mb4:
return [][]sqltypes.Value{row1}
case both:
return [][]sqltypes.Value{row0, row1}
}
return [][]sqltypes.Value{}
}
func checkLikeOpt(likeOpt string, colNames []string) (string, error) {
likeRegexp := strings.ReplaceAll(likeOpt, "%", ".*")
for _, v := range colNames {
match, err := regexp.MatchString(likeRegexp, v)
if err != nil {
return "", err
}
if match {
return v, nil
}
}
return "", nil
}

Просмотреть файл

@ -36,12 +36,12 @@ func (pb *primitiveBuilder) findSysInfoRoutingPredicates(expr sqlparser.Expr, ru
}
if isTableSchema {
if rut.eroute.SysTableTableSchema != nil {
return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "two predicates for table_schema not supported")
if rut.eroute.SysTableTableSchema != nil && !evalengine.AreExprEqual(rut.eroute.SysTableTableSchema, out) {
return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "two predicates for specifying the database are not supported")
}
rut.eroute.SysTableTableSchema = out
} else {
if rut.eroute.SysTableTableName != nil {
if rut.eroute.SysTableTableName != nil && !evalengine.AreExprEqual(rut.eroute.SysTableTableName, out) {
return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "two predicates for table_name not supported")
}
rut.eroute.SysTableTableName = out
@ -70,7 +70,15 @@ func isTableSchemaOrName(e sqlparser.Expr) (isTableSchema bool, isTableName bool
if !ok {
return false, false
}
return col.Name.EqualString("table_schema"), col.Name.EqualString("table_name")
return isDbNameCol(col), isTableNameCol(col)
}
func isDbNameCol(col *sqlparser.ColName) bool {
return col.Name.EqualString("table_schema") || col.Name.EqualString("constraint_schema") || col.Name.EqualString("schema_name")
}
func isTableNameCol(col *sqlparser.ColName) bool {
return col.Name.EqualString("table_name")
}
func extractInfoSchemaRoutingPredicate(in sqlparser.Expr) (bool, evalengine.Expr, error) {

134
go/vt/vtgate/planbuilder/testdata/alterVschema_cases.txt поставляемый Normal file
Просмотреть файл

@ -0,0 +1,134 @@
# Create vindex
"alter vschema create vindex hash_vdx using hash"
{
"QueryType": "DDL",
"Original": "alter vschema create vindex hash_vdx using hash",
"Instructions": {
"OperatorType": "AlterVSchema",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"query": "alter vschema create vindex hash_vdx using hash"
}
}
# Create vindex with qualifier
"alter vschema create vindex user.hash_vdx using hash"
{
"QueryType": "DDL",
"Original": "alter vschema create vindex user.hash_vdx using hash",
"Instructions": {
"OperatorType": "AlterVSchema",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"query": "alter vschema create vindex user.hash_vdx using hash"
}
}
# Drop vindex
"alter vschema drop vindex hash_vdx"
{
"QueryType": "DDL",
"Original": "alter vschema drop vindex hash_vdx",
"Instructions": {
"OperatorType": "AlterVSchema",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"query": "alter vschema drop vindex hash_vdx"
}
}
# Add table
"alter vschema add table a"
{
"QueryType": "DDL",
"Original": "alter vschema add table a",
"Instructions": {
"OperatorType": "AlterVSchema",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"query": "alter vschema add table a"
}
}
# Add sequence
"alter vschema add sequence a_seq"
{
"QueryType": "DDL",
"Original": "alter vschema add sequence a_seq",
"Instructions": {
"OperatorType": "AlterVSchema",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"query": "alter vschema add sequence a_seq"
}
}
# Add auto_increment with qualifier
"alter vschema on user.a add auto_increment id using a_seq"
{
"QueryType": "DDL",
"Original": "alter vschema on user.a add auto_increment id using a_seq",
"Instructions": {
"OperatorType": "AlterVSchema",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"query": "alter vschema on user.a add auto_increment id using a_seq"
}
}
# Drop table
"alter vschema drop table a"
{
"QueryType": "DDL",
"Original": "alter vschema drop table a",
"Instructions": {
"OperatorType": "AlterVSchema",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"query": "alter vschema drop table a"
}
}
# Add Vindex
"alter vschema on a add vindex hash (id)"
{
"QueryType": "DDL",
"Original": "alter vschema on a add vindex hash (id)",
"Instructions": {
"OperatorType": "AlterVSchema",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"query": "alter vschema on a add vindex hash (id)"
}
}
# Drop Vindex
"alter vschema on a drop vindex hash"
{
"QueryType": "DDL",
"Original": "alter vschema on a drop vindex hash",
"Instructions": {
"OperatorType": "AlterVSchema",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"query": "alter vschema on a drop vindex hash"
}
}

Просмотреть файл

@ -140,7 +140,7 @@
"Name": "user",
"Sharded": true
},
"Query": "create index a on user (id)"
"Query": "alter table user add index a (id)"
}
}
@ -155,7 +155,7 @@
"Name": "user",
"Sharded": true
},
"Query": "create index a on user (id)"
"Query": "alter table user add index a (id)"
}
}
@ -170,6 +170,520 @@
"Name": "main",
"Sharded": false
},
"Query": "create index a on unknown (id)"
"Query": "alter table unknown add index a (id)"
}
}
# Create View with qualifier
"create view user.a as select* from user"
{
"QueryType": "DDL",
"Original": "create view user.a as select* from user",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view a as select * from user"
}
}
# create view with qualifier in select as well
"create view user.a as select* from user.user"
{
"QueryType": "DDL",
"Original": "create view user.a as select* from user.user",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view a as select * from user"
}
}
# create view with No column referenced
"create view user.view_a as select 1 from user"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select 1 from user",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select 1 from user"
}
}
# create view with '*' expression for simple route
"create view user.view_a as select user.* from user"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select user.* from user",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select user.* from user"
}
}
# create view with unqualified '*' expression for simple route
"create view user.view_a as select * from user"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select * from user",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select * from user"
}
}
# create view with fully qualified '*' expression for simple route
"create view user.view_a as select user.user.* from user.user"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select user.user.* from user.user",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select user.* from user"
}
}
# create view with select * from authoritative table
"create view user.view_a as select * from authoritative"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select * from authoritative",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select * from authoritative"
}
}
# create view with select * from join of authoritative tables
"create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select * from authoritative as a join authoritative as b on a.user_id = b.user_id"
}
}
# create view with select * from qualified authoritative table
"create view user.view_a as select a.* from authoritative a"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select a.* from authoritative a",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select a.* from authoritative as a"
}
}
# create view with select * from intermixing of authoritative table with non-authoritative results in no expansion
"create view user.view_a as select * from authoritative join user on authoritative.user_id=user.id"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select * from authoritative join user on authoritative.user_id=user.id",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select * from authoritative join user on authoritative.user_id = user.id"
}
}
# create view with select authoritative.* with intermixing still expands
"create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select user.id, a.*, user.col1 from authoritative as a join user on a.user_id = user.id"
}
}
# create view with auto-resolve anonymous columns for simple route
"create view user.view_a as select col from user join user_extra on user.id = user_extra.user_id"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select col from user join user_extra on user.id = user_extra.user_id",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select col from user join user_extra on user.id = user_extra.user_id"
}
}
# create view with join that can be solved in each shard separately
"create view user.view_a as select user.id from user join user_extra on user.id = user_extra.user_id"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select user.id from user join user_extra on user.id = user_extra.user_id",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select user.id from user join user_extra on user.id = user_extra.user_id"
}
}
# create view with last_insert_id for unsharded route
"create view main.view_a as select last_insert_id() as x from main.unsharded"
{
"QueryType": "DDL",
"Original": "create view main.view_a as select last_insert_id() as x from main.unsharded",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"Query": "create view view_a as select :__lastInsertId as x from unsharded"
}
}
# create view with select from pinned table
"create view user.view_a as select * from pin_test"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select * from pin_test",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select * from pin_test"
}
}
# create view with Expression with single-route reference
"create view user.view_a as select user.col, user_extra.id + user_extra.col from user join user_extra on user.id = user_extra.user_id"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select user.col, user_extra.id + user_extra.col from user join user_extra on user.id = user_extra.user_id",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select user.col, user_extra.id + user_extra.col from user join user_extra on user.id = user_extra.user_id"
}
}
# create view with Comments
"create view user.view_a as select /* comment */ user.col from user join user_extra on user.id = user_extra.user_id"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select /* comment */ user.col from user join user_extra on user.id = user_extra.user_id",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select /* comment */ user.col from user join user_extra on user.id = user_extra.user_id"
}
}
# create view with for update
"create view user.view_a as select user.col from user join user_extra on user.id = user_extra.user_id for update"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select user.col from user join user_extra on user.id = user_extra.user_id for update",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select user.col from user join user_extra on user.id = user_extra.user_id for update"
}
}
# create view with Case preservation
"create view user.view_a as select user.Col, user_extra.Id from user join user_extra on user.id = user_extra.user_id"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select user.Col, user_extra.Id from user join user_extra on user.id = user_extra.user_id",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select user.Col, user_extra.Id from user join user_extra on user.id = user_extra.user_id"
}
}
# create view with syntax error
"create view user.view_a as the quick brown fox"
"syntax error at position 31 near 'the'"
# create view with Hex number is not treated as a simple value
"create view user.view_a as select * from user where id = 0x04"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select * from user where id = 0x04",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select * from user where id = 0x04"
}
}
# create view with limit works if it can be dropped
"create view user.view_a as select * from user where name ='abc' AND (id = 4) limit 5"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select * from user where name ='abc' AND (id = 4) limit 5",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select * from user where `name` = 'abc' and id = 4 limit 5"
}
}
# create view with Multiple parenthesized expressions
"create view user.view_a as select * from user where (id = 4) AND (name ='abc') limit 5"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select * from user where (id = 4) AND (name ='abc') limit 5",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select * from user where id = 4 and `name` = 'abc' limit 5"
}
}
# create view with Multiple parenthesized expressions
"create view user.view_a as select * from user where (id = 4 and name ='abc') limit 5"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select * from user where (id = 4 and name ='abc') limit 5",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select * from user where id = 4 and `name` = 'abc' limit 5"
}
}
# create view with Column Aliasing with Table.Column
"create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select user0_.col as col0_ from user as user0_ where id = 1 order by user0_.col asc"
}
}
# create view with Column Aliasing with Column
"create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select user0_.col as col0_ from user as user0_ where id = 1 order by col0_ desc"
}
}
# create view with Booleans and parenthesis
"create view user.view_a as select * from user where (id = 1) AND name = true"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select * from user where (id = 1) AND name = true",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select * from user where id = 1 and `name` = true"
}
}
# create view with union with the same target shard
"create view user.view_a as select * from music where user_id = 1 union select * from user where id = 1"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select * from music where user_id = 1 union select * from user where id = 1",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select * from music where user_id = 1 union select * from user where id = 1"
}
}
# create view with subquery in unsharded keyspace
"create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a"
{
"QueryType": "DDL",
"Original": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"Query": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a"
}
}
# create view with subquery in unsharded keyspace with IN clause
"create view view_a as select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)"
{
"QueryType": "DDL",
"Original": "create view view_a as select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"Query": "create view view_a as select id, `name` from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)"
}
}
# create view with subquery in unsharded keyspace with UNION clause
"create view view_a as (select id from unsharded) union (select id from unsharded_auto) order by id limit 5"
{
"QueryType": "DDL",
"Original": "create view view_a as (select id from unsharded) union (select id from unsharded_auto) order by id limit 5",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"Query": "create view view_a as (select id from unsharded) union (select id from unsharded_auto) order by id asc limit 5"
}
}
# create view with subquery in unsharded keyspace with multiple UNION clauses
"create view view_a as select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)"
{
"QueryType": "DDL",
"Original": "create view view_a as select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"Query": "create view view_a as select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)"
}
}
# create view with subquery in unsharded keyspace with UNION clauses in subqueries
"create view view_a as (select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)"
{
"QueryType": "DDL",
"Original": "create view view_a as (select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"Query": "create view view_a as (select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select `name` from unsharded)"
}
}
# create view with testing SingleRow Projection
"create view user.view_a as select 42 from user"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select 42 from user",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select 42 from user"
}
}
# create view with sql_calc_found_rows without limit
"create view user.view_a as select sql_calc_found_rows * from music where user_id = 1"
{
"QueryType": "DDL",
"Original": "create view user.view_a as select sql_calc_found_rows * from music where user_id = 1",
"Instructions": {
"OperatorType": "DDL",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"Query": "create view view_a as select * from music where user_id = 1"
}
}

Просмотреть файл

@ -1683,7 +1683,7 @@
# query trying to query two different keyspaces at the same time
"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'"
"two predicates for table_schema not supported"
"two predicates for specifying the database are not supported"
# information_schema query using database() func
"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()"

Просмотреть файл

@ -301,32 +301,14 @@
"QueryType": "SELECT",
"Original": "select a.id,b.id from information_schema.a as a, information_schema.b as b",
"Instructions": {
"OperatorType": "Join",
"Variant": "Join",
"JoinColumnIndexes": "-1,1",
"TableName": "_",
"Inputs": [
{
"OperatorType": "Route",
"Variant": "SelectDBA",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"FieldQuery": "select a.id from information_schema.a as a where 1 != 1",
"Query": "select a.id from information_schema.a as a"
},
{
"OperatorType": "Route",
"Variant": "SelectDBA",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"FieldQuery": "select b.id from information_schema.b as b where 1 != 1",
"Query": "select b.id from information_schema.b as b"
}
]
"OperatorType": "Route",
"Variant": "SelectDBA",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"FieldQuery": "select a.id, b.id from information_schema.a as a, information_schema.b as b where 1 != 1",
"Query": "select a.id, b.id from information_schema.a as a, information_schema.b as b"
}
}
@ -2188,37 +2170,19 @@
}
# information schema join
"select 42 from information_schema.a join information_schema.b"
"select * from information_schema.a join information_schema.b"
{
"QueryType": "SELECT",
"Original": "select 42 from information_schema.a join information_schema.b",
"Original": "select * from information_schema.a join information_schema.b",
"Instructions": {
"OperatorType": "Join",
"Variant": "Join",
"JoinColumnIndexes": "-1",
"TableName": "_",
"Inputs": [
{
"OperatorType": "Route",
"Variant": "SelectDBA",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"FieldQuery": "select 42 from information_schema.a where 1 != 1",
"Query": "select 42 from information_schema.a"
},
{
"OperatorType": "Route",
"Variant": "SelectDBA",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"FieldQuery": "select 1 from information_schema.b where 1 != 1",
"Query": "select 1 from information_schema.b"
}
]
"OperatorType": "Route",
"Variant": "SelectDBA",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"FieldQuery": "select * from information_schema.a join information_schema.b where 1 != 1",
"Query": "select * from information_schema.a join information_schema.b"
}
}
@ -2258,3 +2222,228 @@
]
}
}
#rails_query 2
"SELECT * FROM information_schema.schemata WHERE schema_name = 'user'"
{
"QueryType": "SELECT",
"Original": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'",
"Instructions": {
"OperatorType": "Route",
"Variant": "SelectDBA",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"FieldQuery": "select * from information_schema.schemata where 1 != 1",
"Query": "select * from information_schema.schemata where schema_name = :__vtschemaname",
"SysTableTableSchema": "VARBINARY(\"user\")"
}
}
#rails_query 3
"SELECT table_comment FROM information_schema.tables WHERE table_schema = 'schema_name' AND table_name = 'table_name'"
{
"QueryType": "SELECT",
"Original": "SELECT table_comment FROM information_schema.tables WHERE table_schema = 'schema_name' AND table_name = 'table_name'",
"Instructions": {
"OperatorType": "Route",
"Variant": "SelectDBA",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"FieldQuery": "select table_comment from information_schema.`tables` where 1 != 1",
"Query": "select table_comment from information_schema.`tables` where table_schema = :__vtschemaname and table_name = :__vttablename",
"SysTableTableName": "VARBINARY(\"table_name\")",
"SysTableTableSchema": "VARBINARY(\"schema_name\")"
}
}
#rails_query 4
"SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'"
{
"QueryType": "SELECT",
"Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'",
"Instructions": {
"OperatorType": "Join",
"Variant": "Join",
"JoinColumnIndexes": "1,2,3,4,-1,-2",
"TableName": "_",
"Inputs": [
{
"OperatorType": "Route",
"Variant": "SelectDBA",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"FieldQuery": "select rc.update_rule as on_update, rc.delete_rule as on_delete, rc.constraint_schema, rc.constraint_name from information_schema.referential_constraints as rc where 1 != 1",
"Query": "select rc.update_rule as on_update, rc.delete_rule as on_delete, rc.constraint_schema, rc.constraint_name from information_schema.referential_constraints as rc where rc.constraint_schema = :__vtschemaname and rc.table_name = :__vttablename",
"SysTableTableName": "VARBINARY(\"table_name\")",
"SysTableTableSchema": "VARBINARY(\"table_schema\")"
},
{
"OperatorType": "Route",
"Variant": "SelectDBA",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name` from information_schema.key_column_usage as fk where 1 != 1",
"Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name` from information_schema.key_column_usage as fk where fk.constraint_schema = :rc_constraint_schema and fk.constraint_name = :rc_constraint_name and fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname and fk.table_name = :__vttablename",
"SysTableTableName": "VARBINARY(\"table_name\")",
"SysTableTableSchema": "VARBINARY(\"table_schema\")"
}
]
}
}
#rails_query 5
"SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'"
{
"QueryType": "SELECT",
"Original": "SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'",
"Instructions": {
"OperatorType": "Join",
"Variant": "Join",
"JoinColumnIndexes": "-1,-2",
"TableName": "_",
"Inputs": [
{
"OperatorType": "Route",
"Variant": "SelectDBA",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"FieldQuery": "select cc.constraint_name as `name`, cc.check_clause as expression, cc.constraint_schema from information_schema.check_constraints as cc where 1 != 1",
"Query": "select cc.constraint_name as `name`, cc.check_clause as expression, cc.constraint_schema from information_schema.check_constraints as cc where cc.constraint_schema = :__vtschemaname",
"SysTableTableSchema": "VARBINARY(\"constraint_schema\")"
},
{
"OperatorType": "Route",
"Variant": "SelectDBA",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"FieldQuery": "select 1 from information_schema.table_constraints as tc where 1 != 1",
"Query": "select 1 from information_schema.table_constraints as tc where tc.constraint_schema = :cc_constraint_schema and tc.constraint_name = :cc_constraint_name and tc.table_schema = :__vtschemaname and tc.table_name = :__vttablename",
"SysTableTableName": "VARBINARY(\"table_name\")",
"SysTableTableSchema": "VARBINARY(\"table_schema\")"
}
]
}
}
#rails_query 6
"SELECT column_name FROM information_schema.statistics WHERE index_name = 'PRIMARY' AND table_schema = 'table_schema' AND table_name = 'table_name' ORDER BY seq_in_index"
{
"QueryType": "SELECT",
"Original": "SELECT column_name FROM information_schema.statistics WHERE index_name = 'PRIMARY' AND table_schema = 'table_schema' AND table_name = 'table_name' ORDER BY seq_in_index",
"Instructions": {
"OperatorType": "Route",
"Variant": "SelectDBA",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"FieldQuery": "select column_name from information_schema.statistics where 1 != 1",
"Query": "select column_name from information_schema.statistics where index_name = 'PRIMARY' and table_schema = :__vtschemaname and table_name = :__vttablename order by seq_in_index asc",
"SysTableTableName": "VARBINARY(\"table_name\")",
"SysTableTableSchema": "VARBINARY(\"table_schema\")"
}
}
#rails_query 7
"SELECT generation_expression FROM information_schema.columns WHERE table_schema = 'table_schema' AND table_name = 'table_name' AND column_name = 'column_name'"
{
"QueryType": "SELECT",
"Original": "SELECT generation_expression FROM information_schema.columns WHERE table_schema = 'table_schema' AND table_name = 'table_name' AND column_name = 'column_name'",
"Instructions": {
"OperatorType": "Route",
"Variant": "SelectDBA",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"FieldQuery": "select generation_expression from information_schema.`columns` where 1 != 1",
"Query": "select generation_expression from information_schema.`columns` where table_schema = :__vtschemaname and table_name = :__vttablename and column_name = 'column_name'",
"SysTableTableName": "VARBINARY(\"table_name\")",
"SysTableTableSchema": "VARBINARY(\"table_schema\")"
}
}
#rails_query 8
"SELECT id FROM information_schema.processlist WHERE info LIKE '% FOR UPDATE'"
{
"QueryType": "SELECT",
"Original": "SELECT id FROM information_schema.processlist WHERE info LIKE '% FOR UPDATE'",
"Instructions": {
"OperatorType": "Route",
"Variant": "SelectDBA",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"FieldQuery": "select id from information_schema.`processlist` where 1 != 1",
"Query": "select id from information_schema.`processlist` where info like '% FOR UPDATE'"
}
}
#rails_query 9
"SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery"
{
"QueryType": "SELECT",
"Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery",
"Instructions": {
"OperatorType": "Route",
"Variant": "SelectDBA",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
"Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery",
"SysTableTableSchema": "VARBINARY(\"table_schema\")"
}
}
#rails_query 10
"SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'"
{
"QueryType": "SELECT",
"Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'",
"Instructions": {
"OperatorType": "Route",
"Variant": "SelectDBA",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
"Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery where _subquery.table_type = 'table_type' and _subquery.table_name = :__vttablename",
"SysTableTableName": "VARBINARY(\"table_name\")",
"SysTableTableSchema": "VARBINARY(\"table_schema\")"
}
}
# two predicates specifying the database for the same table work if the database is the same
"SELECT cc.constraint_name AS 'name' FROM information_schema.check_constraints cc WHERE cc.constraint_schema = 'a' AND cc.table_schema = 'a'"
{
"QueryType": "SELECT",
"Original": "SELECT cc.constraint_name AS 'name' FROM information_schema.check_constraints cc WHERE cc.constraint_schema = 'a' AND cc.table_schema = 'a'",
"Instructions": {
"OperatorType": "Route",
"Variant": "SelectDBA",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"FieldQuery": "select cc.constraint_name as `name` from information_schema.check_constraints as cc where 1 != 1",
"Query": "select cc.constraint_name as `name` from information_schema.check_constraints as cc where cc.constraint_schema = :__vtschemaname and cc.table_schema = :__vtschemaname",
"SysTableTableSchema": "VARBINARY(\"a\")"
}
}

Просмотреть файл

@ -29,3 +29,33 @@
"Query": "select is_free_lock('xyz') from dual"
}
}
# lock tables read
"lock tables t as x read local"
{
"QueryType": "LOCK_TABLES",
"Original": "lock tables t as x read local",
"Instructions": {
"OperatorType": "Rows"
}
}
# lock tables write
"lock tables t low_priority write"
{
"QueryType": "LOCK_TABLES",
"Original": "lock tables t low_priority write",
"Instructions": {
"OperatorType": "Rows"
}
}
# unlock tables
"unlock tables"
{
"QueryType": "UNLOCK_TABLES",
"Original": "unlock tables",
"Instructions": {
"OperatorType": "Rows"
}
}

Просмотреть файл

@ -182,10 +182,10 @@
# scatter aggregate with memory sort and order by number, reuse weight_string
# we have to use a meaningless construct to test this.
"select textcol1, count(*) k from user group by textcol1 order by textcol1, k, textcol1"
"select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1"
{
"QueryType": "SELECT",
"Original": "select textcol1, count(*) k from user group by textcol1 order by textcol1, k, textcol1",
"Original": "select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1",
"Instructions": {
"OperatorType": "Sort",
"Variant": "Memory",
@ -205,9 +205,9 @@
"Name": "user",
"Sharded": true
},
"FieldQuery": "select textcol1, count(*) as k, weight_string(textcol1) from user where 1 != 1 group by textcol1",
"FieldQuery": "select textcol1 as t, count(*) as k, weight_string(textcol1) from user where 1 != 1 group by textcol1",
"OrderBy": "2 ASC, 2 ASC",
"Query": "select textcol1, count(*) as k, weight_string(textcol1) from user group by textcol1 order by textcol1 asc, textcol1 asc",
"Query": "select textcol1 as t, count(*) as k, weight_string(textcol1) from user group by textcol1 order by textcol1 asc, textcol1 asc",
"Table": "user"
}
]
@ -491,3 +491,60 @@
]
}
}
# unary expression
"select a from user order by binary a desc"
{
"QueryType": "SELECT",
"Original": "select a from user order by binary a desc",
"Instructions": {
"OperatorType": "Route",
"Variant": "SelectScatter",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"FieldQuery": "select a from user where 1 != 1",
"OrderBy": "0 DESC",
"Query": "select a from user order by binary a desc",
"Table": "user"
}
}
# unary expression in join query
"select u.a from user u join music m on u.a = m.a order by binary a desc"
{
"QueryType": "SELECT",
"Original": "select u.a from user u join music m on u.a = m.a order by binary a desc",
"Instructions": {
"OperatorType": "Join",
"Variant": "Join",
"JoinColumnIndexes": "-1",
"TableName": "user_music",
"Inputs": [
{
"OperatorType": "Route",
"Variant": "SelectScatter",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"FieldQuery": "select u.a from user as u where 1 != 1",
"OrderBy": "0 DESC",
"Query": "select u.a from user as u order by binary a desc",
"Table": "user"
},
{
"OperatorType": "Route",
"Variant": "SelectScatter",
"Keyspace": {
"Name": "user",
"Sharded": true
},
"FieldQuery": "select 1 from music as m where 1 != 1",
"Query": "select 1 from music as m where m.a = :u_a",
"Table": "music"
}
]
}
}

Просмотреть файл

@ -167,3 +167,97 @@
# show columns fails as table does not exists in user keyspace
"show full columns from user.unsharded"
"table unsharded not found"
# show charset
"show charset"
{
"QueryType": "SHOW",
"Original": "show charset",
"Instructions": {
"OperatorType": "Rows"
}
}
# show function
"show function status"
{
"QueryType": "SHOW",
"Original": "show function status",
"Instructions": {
"OperatorType": "Send",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"TargetDestination": "AnyShard()",
"IsDML": false,
"Query": "show function status",
"SingleShardOnly": true
}
}
# show privileges
"show privileges"
{
"QueryType": "SHOW",
"Original": "show privileges",
"Instructions": {
"OperatorType": "Send",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"TargetDestination": "AnyShard()",
"IsDML": false,
"Query": "show privileges",
"SingleShardOnly": true
}
}
# show procedure status
"show procedure status"
{
"QueryType": "SHOW",
"Original": "show procedure status",
"Instructions": {
"OperatorType": "Send",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"TargetDestination": "AnyShard()",
"IsDML": false,
"Query": "show procedure status",
"SingleShardOnly": true
}
}
# show variables
"show variables"
{
"QueryType": "SHOW",
"Original": "show variables",
"Instructions": {
"OperatorType": "Send",
"Keyspace": {
"Name": "main",
"Sharded": false
},
"TargetDestination": "AnyShard()",
"IsDML": false,
"Query": "show variables",
"SingleShardOnly": true
}
}
# show databases
"show databases"
{
"QueryType": "SHOW",
"Original": "show databases",
"Instructions": {
"OperatorType": "Rows"
}
}

Просмотреть файл

@ -377,3 +377,35 @@
# Multi shard query using into outfile s3
"select * from user into outfile s3 'out_file_name'"
"unsupported: this construct is not supported on sharded keyspace"
# unsupported two predicates specifying the database for the same table if they are different
"SELECT cc.constraint_name AS 'name' FROM information_schema.check_constraints cc WHERE cc.constraint_schema = 'constraint_schema' AND cc.table_schema = 'a'"
"two predicates for specifying the database are not supported"
# create view with Cannot auto-resolve for cross-shard joins
"create view user.view_a as select col from user join user_extra"
"symbol col not found"
# create view with join that cannot be served in each shard separately
"create view user.view_a as select user_extra.id from user join user_extra"
"Complex select queries are not supported in create view statements"
# create view with sharded limit
"create view user.view_a as select id from user order by id limit 10"
"Complex select queries are not supported in create view statements"
# create view with top level subquery in select
"create view user.view_a as select a, (select col from user) from unsharded"
"Complex select queries are not supported in create view statements"
# create view with sql_calc_found_rows with limit
"create view user.view_a as select sql_calc_found_rows * from music limit 100"
"Complex select queries are not supported in create view statements"
# create view with sql_calc_found_rows with group by and having
"create view user.view_a as select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2"
"Complex select queries are not supported in create view statements"
# create view with incompatible keyspaces
"create view main.view_a as select * from user.user_extra"
"Select query does not belong to the same keyspace as the view statement"

Просмотреть файл

@ -103,7 +103,7 @@ func (vc *vcursorImpl) GetKeyspace() string {
return vc.keyspace
}
func (vc *vcursorImpl) ExecuteVSchema(keyspace string, vschemaDDL sqlparser.DDLStatement) error {
func (vc *vcursorImpl) ExecuteVSchema(keyspace string, vschemaDDL *sqlparser.AlterVschema) error {
srvVschema := vc.vm.GetCurrentSrvVschema()
if srvVschema == nil {
return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vschema not loaded")
@ -117,8 +117,8 @@ func (vc *vcursorImpl) ExecuteVSchema(keyspace string, vschemaDDL sqlparser.DDLS
// Resolve the keyspace either from the table qualifier or the target keyspace
var ksName string
if !vschemaDDL.GetTable().IsEmpty() {
ksName = vschemaDDL.GetTable().Qualifier.String()
if !vschemaDDL.Table.IsEmpty() {
ksName = vschemaDDL.Table.Qualifier.String()
}
if ksName == "" {
ksName = keyspace
@ -275,7 +275,7 @@ func (vc *vcursorImpl) FindTableOrVindex(name sqlparser.TableName) (*vindexes.Ta
// if there is one. If the keyspace specified in the target cannot be
// identified, it returns an error.
func (vc *vcursorImpl) DefaultKeyspace() (*vindexes.Keyspace, error) {
if vc.keyspace == "" {
if ignoreKeyspace(vc.keyspace) {
return nil, errNoKeyspace
}
ks, ok := vc.vschema.Keyspaces[vc.keyspace]
@ -332,6 +332,17 @@ func (vc *vcursorImpl) KeyspaceExists(ks string) bool {
return vc.vschema.Keyspaces[ks] != nil
}
func (vc *vcursorImpl) AllKeyspace() ([]*vindexes.Keyspace, error) {
if len(vc.vschema.Keyspaces) == 0 {
return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "no keyspaces available")
}
var kss []*vindexes.Keyspace
for _, ks := range vc.vschema.Keyspaces {
kss = append(kss, ks.Keyspace)
}
return kss, nil
}
// TargetString returns the current TargetString of the session.
func (vc *vcursorImpl) TargetString() string {
return vc.safeSession.TargetString
@ -451,7 +462,7 @@ func (vc *vcursorImpl) SetTarget(target string) error {
if err != nil {
return err
}
if _, ok := vc.vschema.Keyspaces[keyspace]; keyspace != "" && !ok {
if _, ok := vc.vschema.Keyspaces[keyspace]; !ignoreKeyspace(keyspace) && !ok {
return mysql.NewSQLError(mysql.ERBadDb, mysql.SSSyntaxErrorOrAccessViolation, "Unknown database '%s'", keyspace)
}
@ -462,6 +473,10 @@ func (vc *vcursorImpl) SetTarget(target string) error {
return nil
}
func ignoreKeyspace(keyspace string) bool {
return keyspace == "" || sqlparser.SystemSchema(keyspace)
}
func (vc *vcursorImpl) SetUDV(key string, value interface{}) error {
bindValue, err := sqltypes.BuildBindVariable(value)
if err != nil {

Просмотреть файл

@ -80,11 +80,12 @@ var vexecInsertTemplates = []string{
migration_statement,
strategy,
options,
ddl_action,
requested_timestamp,
migration_context,
migration_status
) VALUES (
'val', 'val', 'val', 'val', 'val', 'val', 'val', 'val', FROM_UNIXTIME(0), 'val', 'val'
'val', 'val', 'val', 'val', 'val', 'val', 'val', 'val', 'val', FROM_UNIXTIME(0), 'val', 'val'
)`,
}
@ -95,10 +96,12 @@ var emptyResult = &sqltypes.Result{
var ghostOverridePath = flag.String("gh-ost-path", "", "override default gh-ost binary full path")
var ptOSCOverridePath = flag.String("pt-osc-path", "", "override default pt-online-schema-change binary full path")
var migrationCheckInterval = flag.Duration("migration_check_interval", 1*time.Minute, "Interval between migration checks")
var migrationNextCheckInterval = 5 * time.Second
const (
maxPasswordLength = 32 // MySQL's *replication* password may not exceed 32 characters
staleMigrationMinutes = 10
maxPasswordLength = 32 // MySQL's *replication* password may not exceed 32 characters
staleMigrationMinutes = 10
progressPctFull float64 = 100.0
)
var (
@ -119,10 +122,11 @@ type Executor struct {
shard string
dbName string
initMutex sync.Mutex
migrationMutex sync.Mutex
migrationRunning int64
lastMigrationUUID string
initMutex sync.Mutex
migrationMutex sync.Mutex
migrationRunning int64
lastMigrationUUID string
tickReentranceFlag int64
ticks *timer.Timer
isOpen bool
@ -223,6 +227,7 @@ func (e *Executor) Open() error {
}
e.pool.Open(e.env.Config().DB.AppWithDB(), e.env.Config().DB.DbaWithDB(), e.env.Config().DB.AppDebugWithDB())
e.ticks.Start(e.onMigrationCheckTick)
e.triggerNextCheckInterval()
if _, err := sqlparser.QueryMatchesTemplates("select 1 from dual", vexecUpdateTemplates); err != nil {
// this validates vexecUpdateTemplates
@ -247,6 +252,11 @@ func (e *Executor) Close() {
e.isOpen = false
}
// triggerNextCheckInterval the next tick sooner than normal
func (e *Executor) triggerNextCheckInterval() {
e.ticks.TriggerAfter(migrationNextCheckInterval)
}
func (e *Executor) ghostPanicFlagFileName(uuid string) string {
return path.Join(os.TempDir(), fmt.Sprintf("ghost.%s.panic.flag", uuid))
}
@ -351,6 +361,25 @@ func (e *Executor) tableExists(ctx context.Context, tableName string) (bool, err
return (row != nil), nil
}
// executeDirectly runs a DDL query directly on the backend MySQL server
func (e *Executor) executeDirectly(ctx context.Context, onlineDDL *schema.OnlineDDL) error {
e.migrationMutex.Lock()
defer e.migrationMutex.Unlock()
conn, err := dbconnpool.NewDBConnection(ctx, e.env.Config().DB.DbaWithDB())
if err != nil {
return err
}
defer conn.Close()
if _, err := conn.ExecuteFetch(onlineDDL.SQL, 0, false); err != nil {
return err
}
_ = e.onSchemaMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusComplete, false, progressPctFull)
return nil
}
// ExecuteWithGhost validates and runs a gh-ost process.
// Validation included testing the backend MySQL server and the gh-ost binary itself
// Execution runs first a dry run, then an actual migration
@ -958,6 +987,46 @@ func (e *Executor) scheduleNextMigration(ctx context.Context) error {
return err
}
func (e *Executor) executeMigration(ctx context.Context, onlineDDL *schema.OnlineDDL) error {
failMigration := func(err error) error {
_ = e.updateMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusFailed)
return err
}
ddlAction, err := onlineDDL.GetAction()
if err != nil {
return failMigration(err)
}
switch ddlAction {
case sqlparser.CreateDDLAction, sqlparser.DropDDLAction:
go func() {
if err := e.executeDirectly(ctx, onlineDDL); err != nil {
failMigration(err)
}
}()
case sqlparser.AlterDDLAction:
switch onlineDDL.Strategy {
case schema.DDLStrategyGhost:
go func() {
if err := e.ExecuteWithGhost(ctx, onlineDDL); err != nil {
failMigration(err)
}
}()
case schema.DDLStrategyPTOSC:
go func() {
if err := e.ExecuteWithPTOSC(ctx, onlineDDL); err != nil {
failMigration(err)
}
}()
default:
{
return failMigration(fmt.Errorf("Unsupported strategy: %+v", onlineDDL.Strategy))
}
}
}
return nil
}
func (e *Executor) runNextMigration(ctx context.Context) error {
e.migrationMutex.Lock()
defer e.migrationMutex.Unlock()
@ -983,25 +1052,7 @@ func (e *Executor) runNextMigration(ctx context.Context) error {
Options: row["options"].ToString(),
Status: schema.OnlineDDLStatus(row["migration_status"].ToString()),
}
switch onlineDDL.Strategy {
case schema.DDLStrategyGhost:
go func() {
if err := e.ExecuteWithGhost(ctx, onlineDDL); err != nil {
_ = e.updateMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusFailed)
}
}()
case schema.DDLStrategyPTOSC:
go func() {
if err := e.ExecuteWithPTOSC(ctx, onlineDDL); err != nil {
_ = e.updateMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusFailed)
}
}()
default:
{
_ = e.updateMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusFailed)
return fmt.Errorf("Unsupported strategy: %+v", onlineDDL.Strategy)
}
}
e.executeMigration(ctx, onlineDDL)
// the query should only ever return a single row at the most
// but let's make it also explicit here that we only run a single migration
if i == 0 {
@ -1222,6 +1273,20 @@ func (e *Executor) gcArtifacts(ctx context.Context) error {
// onMigrationCheckTick runs all migrations life cycle
func (e *Executor) onMigrationCheckTick() {
// This function can be called by multiple triggers. First, there's the normal ticker.
// Then, any time a migration completes, we set a timer to trigger this function.
// also, any time a new INSERT arrives, we set a timer to trigger this function.
// Some of these may be correlated. To avoid spamming of this function we:
// - ensure the function is non-reentrant, using tickReentranceFlag
// - clean up tickReentranceFlag 1 second after function completes; this throttles calls to
// this function at no more than 1/sec rate.
if atomic.CompareAndSwapInt64(&e.tickReentranceFlag, 0, 1) {
defer time.AfterFunc(time.Second, func() { atomic.StoreInt64(&e.tickReentranceFlag, 0) })
} else {
// An instance of this function is already running
return
}
if e.tabletTypeFunc() != topodatapb.TabletType_MASTER {
return
}
@ -1229,6 +1294,7 @@ func (e *Executor) onMigrationCheckTick() {
log.Errorf("Executor.onMigrationCheckTick(): empty keyspace")
return
}
ctx := context.Background()
if err := e.initSchema(ctx); err != nil {
log.Error(err)
@ -1390,6 +1456,54 @@ func (e *Executor) retryMigration(ctx context.Context, whereExpr string) (result
return result, err
}
// onSchemaMigrationStatus is called when a status is set/changed for a running migration
func (e *Executor) onSchemaMigrationStatus(ctx context.Context, uuid string, status schema.OnlineDDLStatus, dryRun bool, progressPct float64) (err error) {
if dryRun && status != schema.OnlineDDLStatusFailed {
// We don't consider dry-run reports unless there's a failure
return nil
}
switch status {
case schema.OnlineDDLStatusReady:
{
err = e.updateMigrationTimestamp(ctx, "ready_timestamp", uuid)
}
case schema.OnlineDDLStatusRunning:
{
_ = e.updateMigrationStartedTimestamp(ctx, uuid)
err = e.updateMigrationTimestamp(ctx, "liveness_timestamp", uuid)
}
case schema.OnlineDDLStatusComplete:
{
progressPct = progressPctFull
_ = e.updateMigrationStartedTimestamp(ctx, uuid)
err = e.updateMigrationTimestamp(ctx, "completed_timestamp", uuid)
}
case schema.OnlineDDLStatusFailed:
{
_ = e.updateMigrationStartedTimestamp(ctx, uuid)
err = e.updateMigrationTimestamp(ctx, "completed_timestamp", uuid)
}
}
if err != nil {
return err
}
if err = e.updateMigrationStatus(ctx, uuid, status); err != nil {
return err
}
if err = e.updateMigrationProgress(ctx, uuid, progressPct); err != nil {
return err
}
if !dryRun {
switch status {
case schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed:
e.triggerNextCheckInterval()
}
}
return nil
}
// OnSchemaMigrationStatus is called by TabletServer's API, which is invoked by a running gh-ost migration's hooks.
func (e *Executor) OnSchemaMigrationStatus(ctx context.Context, uuidParam, statusParam, dryrunParam, progressParam string) (err error) {
status := schema.OnlineDDLStatus(statusParam)
@ -1399,42 +1513,7 @@ func (e *Executor) OnSchemaMigrationStatus(ctx context.Context, uuidParam, statu
progressPct = pct
}
if dryRun && status != schema.OnlineDDLStatusFailed {
// We don't consider dry-run reports unless there's a failure
return nil
}
switch status {
case schema.OnlineDDLStatusReady:
{
err = e.updateMigrationTimestamp(ctx, "ready_timestamp", uuidParam)
}
case schema.OnlineDDLStatusRunning:
{
_ = e.updateMigrationStartedTimestamp(ctx, uuidParam)
err = e.updateMigrationTimestamp(ctx, "liveness_timestamp", uuidParam)
}
case schema.OnlineDDLStatusComplete:
{
_ = e.updateMigrationStartedTimestamp(ctx, uuidParam)
err = e.updateMigrationTimestamp(ctx, "completed_timestamp", uuidParam)
}
case schema.OnlineDDLStatusFailed:
{
_ = e.updateMigrationStartedTimestamp(ctx, uuidParam)
err = e.updateMigrationTimestamp(ctx, "completed_timestamp", uuidParam)
}
}
if err != nil {
return err
}
if err = e.updateMigrationStatus(ctx, uuidParam, status); err != nil {
return err
}
if err = e.updateMigrationProgress(ctx, uuidParam, progressPct); err != nil {
return err
}
return nil
return e.onSchemaMigrationStatus(ctx, uuidParam, status, dryRun, progressPct)
}
// VExec is called by a VExec invocation
@ -1464,6 +1543,7 @@ func (e *Executor) VExec(ctx context.Context, vx *vexec.TabletVExec) (qr *queryp
vx.ReplaceInsertColumnVal("shard", vx.ToStringVal(e.shard))
vx.ReplaceInsertColumnVal("mysql_schema", vx.ToStringVal(e.dbName))
vx.AddOrReplaceInsertColumnVal("tablet", vx.ToStringVal(e.TabletAliasString()))
e.triggerNextCheckInterval()
return response(e.execQuery(ctx, vx.Query))
case *sqlparser.Update:
match, err := sqlparser.QueryMatchesTemplates(vx.Query, vexecUpdateTemplates)

Просмотреть файл

@ -57,6 +57,7 @@ const (
alterSchemaMigrationsTableTabletFailureIndex = "ALTER TABLE %s.schema_migrations add KEY tablet_failure_idx (tablet_failure, migration_status, retries)"
alterSchemaMigrationsTableProgress = "ALTER TABLE %s.schema_migrations add column progress float NOT NULL DEFAULT 0"
alterSchemaMigrationsTableContext = "ALTER TABLE %s.schema_migrations add column migration_context varchar(1024) NOT NULL DEFAULT ''"
alterSchemaMigrationsTableDDLAction = "ALTER TABLE %s.schema_migrations add column ddl_action varchar(16) NOT NULL DEFAULT ''"
sqlScheduleSingleMigration = `UPDATE %s.schema_migrations
SET
@ -247,4 +248,5 @@ var applyDDL = []string{
fmt.Sprintf(alterSchemaMigrationsTableTabletFailureIndex, "_vt"),
fmt.Sprintf(alterSchemaMigrationsTableProgress, "_vt"),
fmt.Sprintf(alterSchemaMigrationsTableContext, "_vt"),
fmt.Sprintf(alterSchemaMigrationsTableDDLAction, "_vt"),
}

Просмотреть файл

@ -207,7 +207,6 @@ func (ct *controller) runBlp(ctx context.Context) (err error) {
log.Infof("found a tablet eligible for vreplication. stream id: %v tablet: %s", ct.id, tablet.Alias.String())
ct.sourceTablet.Set(tablet.Alias.String())
}
switch {
case len(ct.source.Tables) > 0:
// Table names can have search patterns. Resolve them against the schema.

Просмотреть файл

@ -92,8 +92,8 @@ func (vc *vdbClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Result,
// Execute is ExecuteFetch without the maxrows.
func (vc *vdbClient) Execute(query string) (*sqltypes.Result, error) {
// Number of rows should never exceed relayLogMaxSize.
return vc.ExecuteFetch(query, relayLogMaxSize)
// Number of rows should never exceed relayLogMaxItems.
return vc.ExecuteFetch(query, *relayLogMaxItems)
}
func (vc *vdbClient) ExecuteWithRetry(ctx context.Context, query string) (*sqltypes.Result, error) {
@ -128,8 +128,8 @@ func (vc *vdbClient) Retry() (*sqltypes.Result, error) {
}
continue
}
// Number of rows should never exceed relayLogMaxSize.
result, err := vc.DBClient.ExecuteFetch(q, relayLogMaxSize)
// Number of rows should never exceed relayLogMaxItems.
result, err := vc.DBClient.ExecuteFetch(q, *relayLogMaxItems)
if err != nil {
return nil, err
}

Просмотреть файл

@ -136,7 +136,7 @@ func (vp *vplayer) fetchAndApply(ctx context.Context) (err error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
relay := newRelayLog(ctx, relayLogMaxItems, relayLogMaxSize)
relay := newRelayLog(ctx, *relayLogMaxItems, *relayLogMaxSize)
streamErr := make(chan error, 1)
go func() {

Просмотреть файл

@ -2115,11 +2115,11 @@ func TestPlayerRelayLogMaxSize(t *testing.T) {
case 0:
savedSize := relayLogMaxSize
defer func() { relayLogMaxSize = savedSize }()
relayLogMaxSize = 10
*relayLogMaxSize = 10
case 1:
savedLen := relayLogMaxItems
defer func() { relayLogMaxItems = savedLen }()
relayLogMaxItems = 2
*relayLogMaxItems = 2
}
execStatements(t, []string{

Просмотреть файл

@ -17,6 +17,7 @@ limitations under the License.
package vreplication
import (
"flag"
"fmt"
"strings"
"time"
@ -42,8 +43,8 @@ var (
// between the two timeouts.
idleTimeout = 1100 * time.Millisecond
dbLockRetryDelay = 1 * time.Second
relayLogMaxSize = 30000
relayLogMaxItems = 1000
relayLogMaxSize = flag.Int("relay_log_max_size", 250000, "Maximum buffer size (in bytes) for VReplication target buffering. If single rows are larger than this, a single row is buffered at a time.")
relayLogMaxItems = flag.Int("relay_log_max_items", 5000, "Maximum number of rows for VReplication target buffering.")
copyTimeout = 1 * time.Hour
replicaLagTolerance = 10 * time.Second
)

Просмотреть файл

@ -67,6 +67,8 @@ const (
PlanShowTables
// PlanLoad is for Load data statements
PlanLoad
PlanLockTables
PlanUnlockTables
NumPlans
)
@ -93,6 +95,8 @@ var planName = []string{
"RollbackSavepoint",
"ShowTables",
"Load",
"LockTables",
"UnlockTables",
}
func (pt PlanType) String() string {

Просмотреть файл

@ -652,9 +652,22 @@ options:PassthroughDMLs
"Role": 2
}
],
"FullQuery":"create index a on b (id)"
"FullQuery":"alter table b add index a (id)"
}
# create view
"create view a as select * from b"
{
"PlanID": "DDL",
"TableName": "",
"Permissions": [
{
"TableName": "a",
"Role": 2
}
],
"FullQuery":"create view a as select * from b"
}
# alter
"alter table a add column(a int)"

Просмотреть файл

@ -361,6 +361,12 @@ func (qre *QueryExecutor) checkAccess(authorized *tableacl.ACLResult, tableName
qre.tsv.Stats().TableaclPseudoDenied.Add(statsKey, 1)
return nil
}
// Skip ACL check for queries against the dummy dual table
if tableName == "dual" {
return nil
}
if qre.tsv.qe.strictTableACL {
errStr := fmt.Sprintf("table acl error: %q %v cannot run %v on table %q", callerID.Username, callerID.Groups, qre.plan.PlanID, tableName)
qre.tsv.Stats().TableaclDenied.Add(statsKey, 1)

Просмотреть файл

@ -222,13 +222,13 @@ func TestQueryExecutorPlans(t *testing.T) {
}, {
input: "create index a on user(id)",
dbResponses: []dbResponse{{
query: "create index a on user (id)",
query: "alter table user add index a (id)",
result: emptyResult,
}},
resultWant: emptyResult,
planWant: "DDL",
logWant: "create index a on user (id)",
inTxWant: "create index a on user (id)",
logWant: "alter table user add index a (id)",
inTxWant: "alter table user add index a (id)",
}, {
input: "create index a on user(id1 + id2)",
dbResponses: []dbResponse{{
@ -816,9 +816,8 @@ func TestQueryExecutorTableAclDualTableExempt(t *testing.T) {
db := setUpQueryExecutorTest(t)
defer db.Close()
username := "Sleve McDichael"
callerID := &querypb.VTGateCallerID{
Username: username,
Username: "basic_username",
}
ctx := callerid.NewContext(context.Background(), nil, callerID)
@ -854,6 +853,14 @@ func TestQueryExecutorTableAclDualTableExempt(t *testing.T) {
if err != nil {
t.Fatalf("qre.Execute: %v, want: nil", err)
}
query = "(select 0 as x from dual where 1 != 1) union (select 1 as y from dual where 1 != 1)"
ctx = callerid.NewContext(context.Background(), nil, callerID)
qre = newTestQueryExecutor(ctx, tsv, query, 0)
_, err = qre.Execute()
if err != nil {
t.Fatalf("qre.Execute: %v, want: nil", err)
}
}
func TestQueryExecutorTableAclExemptACL(t *testing.T) {
@ -1246,6 +1253,20 @@ func getQueryExecutorSupportedQueries(testTableHasMultipleUniqueKeys bool) map[s
},
RowsAffected: 1,
},
"(select 0 as x from dual where 1 != 1) union (select 1 as y from dual where 1 != 1)": {
Fields: []*querypb.Field{{
Type: sqltypes.Uint64,
}},
Rows: [][]sqltypes.Value{},
RowsAffected: 0,
},
"(select 0 as x from dual where 1 != 1) union (select 1 as y from dual where 1 != 1) limit 10001": {
Fields: []*querypb.Field{{
Type: sqltypes.Uint64,
}},
Rows: [][]sqltypes.Value{},
RowsAffected: 0,
},
mysql.BaseShowTables: {
Fields: mysql.BaseShowTablesFields,
Rows: [][]sqltypes.Value{

Просмотреть файл

@ -25,6 +25,8 @@ import (
"strings"
"time"
vtschema "vitess.io/vitess/go/vt/schema"
"github.com/golang/protobuf/proto"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
@ -40,7 +42,7 @@ import (
)
// PacketSize is the suggested packet size for VReplication streamer.
var PacketSize = flag.Int("vstream_packet_size", 30000, "Suggested packet size for VReplication streamer. This is used only as a recommendation. The actual packet size may be more or less than this amount.")
var PacketSize = flag.Int("vstream_packet_size", 250000, "Suggested packet size for VReplication streamer. This is used only as a recommendation. The actual packet size may be more or less than this amount.")
// HeartbeatTime is set to slightly below 1s, compared to idleTimeout
// set by VPlayer at slightly above 1s. This minimizes conflicts
@ -505,6 +507,10 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e
vs.plans[id] = nil
return nil, nil
}
if vtschema.IsInternalOperationTableName(tm.Name) { // ignore tables created by onlineddl/gh-ost/pt-osc
vs.plans[id] = nil
return nil, nil
}
if !ruleMatches(tm.Name, vs.filter) {
return nil, nil
}

Просмотреть файл

@ -1443,6 +1443,62 @@ func TestBestEffortNameInFieldEvent(t *testing.T) {
runCases(t, filter, testcases, position, nil)
}
// test that vstreamer ignores tables created by OnlineDDL
func TestInternalTables(t *testing.T) {
if testing.Short() {
t.Skip()
}
filter := &binlogdatapb.Filter{
FieldEventMode: binlogdatapb.Filter_BEST_EFFORT,
Rules: []*binlogdatapb.Rule{{
Match: "/.*/",
}},
}
// Modeled after vttablet endtoend compatibility tests.
execStatements(t, []string{
"create table vitess_test(id int, val varbinary(128), primary key(id))",
"create table _1e275eef_3b20_11eb_a38f_04ed332e05c2_20201210204529_gho(id int, val varbinary(128), primary key(id))",
"create table _vt_PURGE_1f9194b43b2011eb8a0104ed332e05c2_20201210194431(id int, val varbinary(128), primary key(id))",
"create table _product_old(id int, val varbinary(128), primary key(id))",
})
position := masterPosition(t)
execStatements(t, []string{
"insert into vitess_test values(1, 'abc')",
"insert into _1e275eef_3b20_11eb_a38f_04ed332e05c2_20201210204529_gho values(1, 'abc')",
"insert into _vt_PURGE_1f9194b43b2011eb8a0104ed332e05c2_20201210194431 values(1, 'abc')",
"insert into _product_old values(1, 'abc')",
})
defer execStatements(t, []string{
"drop table vitess_test",
"drop table _1e275eef_3b20_11eb_a38f_04ed332e05c2_20201210204529_gho",
"drop table _vt_PURGE_1f9194b43b2011eb8a0104ed332e05c2_20201210194431",
"drop table _product_old",
})
engine.se.Reload(context.Background())
testcases := []testcase{{
input: []string{
"insert into vitess_test values(2, 'abc')",
},
// In this case, we don't have information about vitess_test since it was renamed to vitess_test_test.
// information returned by binlog for val column == varchar (rather than varbinary).
output: [][]string{{
`begin`,
`type:FIELD field_event:<table_name:"vitess_test" fields:<name:"id" type:INT32 table:"vitess_test" org_table:"vitess_test" database:"vttest" org_name:"id" column_length:11 charset:63 > fields:<name:"val" type:VARBINARY table:"vitess_test" org_table:"vitess_test" database:"vttest" org_name:"val" column_length:128 charset:63 > > `,
`type:ROW row_event:<table_name:"vitess_test" row_changes:<after:<lengths:1 lengths:3 values:"1abc" > > > `,
`gtid`,
`commit`,
}, {`begin`, `gtid`, `commit`}, {`begin`, `gtid`, `commit`}, {`begin`, `gtid`, `commit`}, // => inserts into the three internal comments
{
`begin`,
`type:ROW row_event:<table_name:"vitess_test" row_changes:<after:<lengths:1 lengths:3 values:"2abc" > > > `,
`gtid`,
`commit`,
}},
}}
runCases(t, filter, testcases, position, nil)
}
func TestTypes(t *testing.T) {
if testing.Short() {
t.Skip()

Просмотреть файл

@ -184,7 +184,9 @@ func (env *LocalTestEnv) VtcomboArguments() []string {
return []string{
"-service_map", strings.Join(
[]string{"grpc-vtgateservice", "grpc-vtctl"}, ",",
)}
),
"-enable_queries",
}
}
// LogDirectory implements LogDirectory for LocalTestEnv.

Просмотреть файл

@ -745,12 +745,17 @@ func (ts *trafficSwitcher) stopSourceWrites(ctx context.Context) error {
err = ts.changeShardsAccess(ctx, ts.sourceKeyspace, ts.sourceShards(), disallowWrites)
}
if err != nil {
log.Warningf("Error: %s", err)
return err
}
return ts.forAllSources(func(source *tsSource) error {
var err error
source.position, err = ts.wr.tmc.MasterPosition(ctx, source.master.Tablet)
ts.wr.Logger().Infof("Position for source %v:%v: %v", ts.sourceKeyspace, source.si.ShardName(), source.position)
ts.wr.Logger().Infof("Stopped Source Writes. Position for source %v:%v: %v",
ts.sourceKeyspace, source.si.ShardName(), source.position)
if err != nil {
log.Warningf("Error: %s", err)
}
return err
})
}
@ -769,39 +774,34 @@ func (ts *trafficSwitcher) changeTableSourceWrites(ctx context.Context, access a
func (ts *trafficSwitcher) waitForCatchup(ctx context.Context, filteredReplicationWaitTime time.Duration) error {
ctx, cancel := context.WithTimeout(ctx, filteredReplicationWaitTime)
defer cancel()
var mu sync.Mutex
return ts.forAllUids(func(target *tsTarget, uid uint32) error {
ts.wr.Logger().Infof("uid: %d, target master %s, target position %s, shard %s", uid,
target.master.AliasString(), target.position, target.si.String())
log.Infof("uid: %d, target master %s, target position %s, shard %s", uid,
// source writes have been stopped, wait for all streams on targets to catch up
if err := ts.forAllUids(func(target *tsTarget, uid uint32) error {
ts.wr.Logger().Infof("Before Catchup: uid: %d, target master %s, target position %s, shard %s", uid,
target.master.AliasString(), target.position, target.si.String())
bls := target.sources[uid]
source := ts.sources[bls.Shard]
ts.wr.Logger().Infof("waiting for keyspace:shard: %v:%v, source position %v, uid %d",
ts.wr.Logger().Infof("Before Catchup: waiting for keyspace:shard: %v:%v to reach source position %v, uid %d",
ts.targetKeyspace, target.si.ShardName(), source.position, uid)
if err := ts.wr.tmc.VReplicationWaitForPos(ctx, target.master.Tablet, int(uid), source.position); err != nil {
return err
}
log.Infof("waiting for keyspace:shard: %v:%v, source position %v, uid %d",
log.Infof("After catchup: target keyspace:shard: %v:%v, source position %v, uid %d",
ts.targetKeyspace, target.si.ShardName(), source.position, uid)
ts.wr.Logger().Infof("position for keyspace:shard: %v:%v reached, uid %d", ts.targetKeyspace, target.si.ShardName(), uid)
log.Infof("position for keyspace:shard: %v:%v reached, uid %d", ts.targetKeyspace, target.si.ShardName(), uid)
ts.wr.Logger().Infof("After catchup: position for keyspace:shard: %v:%v reached, uid %d",
ts.targetKeyspace, target.si.ShardName(), uid)
if _, err := ts.wr.tmc.VReplicationExec(ctx, target.master.Tablet, binlogplayer.StopVReplication(uid, "stopped for cutover")); err != nil {
log.Infof("error marking stopped for cutover on %s, uid %d", target.master.AliasString(), uid)
return err
}
// Need lock because a target can have multiple uids.
mu.Lock()
defer mu.Unlock()
if target.position != "" {
return nil
}
return nil
}); err != nil {
return err
}
// all targets have caught up, record their positions for setting up reverse workflows
return ts.forAllTargets(func(target *tsTarget) error {
var err error
target.position, err = ts.wr.tmc.MasterPosition(ctx, target.master.Tablet)
ts.wr.Logger().Infof("Position for target master %s, uid %v: %v", target.master.AliasString(), uid, target.position)
log.Infof("Position for target master %s, uid %v: %v", target.master.AliasString(), uid, target.position)
ts.wr.Logger().Infof("After catchup, position for target master %s, %v", target.master.AliasString(), target.position)
return err
})
}
@ -894,7 +894,8 @@ func (ts *trafficSwitcher) createReverseVReplication(ctx context.Context) error
Filter: filter,
})
}
log.Infof("Creating reverse workflow vreplication stream on tablet %s: workflow %s, startPos %s",
source.master.Alias, ts.reverseWorkflow, target.position)
_, err := ts.wr.VReplicationExec(ctx, source.master.Alias, binlogplayer.CreateVReplicationState(ts.reverseWorkflow, reverseBls, target.position, binlogplayer.BlpStopped, source.master.DbName()))
if err != nil {
return err
@ -903,6 +904,7 @@ func (ts *trafficSwitcher) createReverseVReplication(ctx context.Context) error
// if user has defined the cell/tablet_types parameters in the forward workflow, update the reverse workflow as well
updateQuery := ts.getReverseVReplicationUpdateQuery(target.master.Alias.Cell, source.master.Alias.Cell, source.master.DbName())
if updateQuery != "" {
log.Infof("Updating vreplication stream entry on %s with: %s", source.master.Alias, updateQuery)
_, err = ts.wr.VReplicationExec(ctx, source.master.Alias, updateQuery)
return err
}

Просмотреть файл

@ -135,11 +135,12 @@ func newSchemaMigrationsPlanner(vx *vexec) vexecPlanner {
migration_statement,
strategy,
options,
ddl_action,
requested_timestamp,
migration_context,
migration_status
) VALUES (
'val', 'val', 'val', 'val', 'val', 'val', 'val', 'val', FROM_UNIXTIME(0), 'val', 'val'
'val', 'val', 'val', 'val', 'val', 'val', 'val', 'val', 'val', FROM_UNIXTIME(0), 'val', 'val'
)`,
},
},

Просмотреть файл

@ -82,7 +82,7 @@
<plugin>
<groupId>org.xolstice.maven.plugins</groupId>
<artifactId>protobuf-maven-plugin</artifactId>
<version>0.5.0</version>
<version>0.6.1</version>
<configuration>
<protocArtifact>com.google.protobuf:protoc:${protobuf.protoc.version}:exe:${os.detected.classifier}</protocArtifact>
<protoSourceRoot>../../proto</protoSourceRoot>

Просмотреть файл

@ -22,6 +22,10 @@
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</dependency>
<dependency>
<groupId>javax.annotation</groupId>
<artifactId>javax.annotation-api</artifactId>
</dependency>
<dependency>
<groupId>io.grpc</groupId>
@ -43,6 +47,10 @@
<groupId>io.grpc</groupId>
<artifactId>grpc-context</artifactId>
</dependency>
<dependency>
<groupId>io.grpc</groupId>
<artifactId>grpc-api</artifactId>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-handler</artifactId>
@ -51,7 +59,6 @@
<groupId>io.netty</groupId>
<artifactId>netty-tcnative-boringssl-static</artifactId>
</dependency>
<dependency>
<groupId>io.vitess</groupId>
<artifactId>vitess-client</artifactId>
@ -104,7 +111,7 @@
<plugin>
<groupId>org.xolstice.maven.plugins</groupId>
<artifactId>protobuf-maven-plugin</artifactId>
<version>0.5.0</version>
<version>0.6.1</version>
<configuration>
<protocArtifact>com.google.protobuf:protoc:${protobuf.protoc.version}:exe:${os.detected.classifier}</protocArtifact>
<pluginId>grpc-java</pluginId>
@ -129,6 +136,7 @@
<!-- If this is not imported, tests fail with "Jetty ALPN/NPN has not
been properly configured." -->
<usedDependency>io.netty:netty-tcnative-boringssl-static</usedDependency>
<usedDependency>javax.annotation:javax.annotation-api</usedDependency>
</usedDependencies>
</configuration>
</plugin>

Просмотреть файл

@ -19,6 +19,8 @@ package io.vitess.client.grpc;
import io.grpc.CallCredentials;
import io.grpc.ClientInterceptor;
import io.grpc.LoadBalancer;
import io.grpc.LoadBalancerProvider;
import io.grpc.LoadBalancerRegistry;
import io.grpc.NameResolver;
import io.grpc.netty.GrpcSslContexts;
import io.grpc.netty.NegotiationType;
@ -55,7 +57,7 @@ public class GrpcClientFactory implements RpcClientFactory {
private RetryingInterceptorConfig config;
private final boolean useTracing;
private CallCredentials callCredentials;
private LoadBalancer.Factory loadBalancerFactory;
private String loadBalancerPolicy;
private NameResolver.Factory nameResolverFactory;
public GrpcClientFactory() {
@ -73,7 +75,11 @@ public class GrpcClientFactory implements RpcClientFactory {
}
public GrpcClientFactory setLoadBalancerFactory(LoadBalancer.Factory value) {
loadBalancerFactory = value;
VitessLoadBalancer provider = new VitessLoadBalancer(value);
LoadBalancerRegistry registry = LoadBalancerRegistry.getDefaultRegistry();
registry.deregister(provider);
registry.register(provider);
loadBalancerPolicy = "vitess_lb";
return this;
}
@ -96,8 +102,8 @@ public class GrpcClientFactory implements RpcClientFactory {
NettyChannelBuilder channel = channelBuilder(target)
.negotiationType(NegotiationType.PLAINTEXT)
.intercept(interceptors);
if (loadBalancerFactory != null) {
channel.loadBalancerFactory(loadBalancerFactory);
if (loadBalancerPolicy != null) {
channel.defaultLoadBalancingPolicy(loadBalancerPolicy);
}
if (nameResolverFactory != null) {
channel.nameResolverFactory(nameResolverFactory);
@ -417,4 +423,35 @@ public class GrpcClientFactory implements RpcClientFactory {
}
}
private class VitessLoadBalancer extends LoadBalancerProvider {
private LoadBalancer.Factory base;
public VitessLoadBalancer(LoadBalancer.Factory base) {
base = base;
}
@Override
public LoadBalancer newLoadBalancer(LoadBalancer.Helper helper) {
return base.newLoadBalancer(helper);
}
@Override
public boolean isAvailable() {
return true;
}
@Override
public int getPriority() {
return 10;
}
@Override
public String getPolicyName() {
return "vitess_lb";
}
}
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше