diff --git a/bootstrap.sh b/bootstrap.sh index 70108e7d37..971034fd7c 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -23,6 +23,7 @@ # 3. Detection of installed MySQL and setting MYSQL_FLAVOR. # 4. Installation of development related steps e.g. creating Git hooks. +BUILD_TESTS=${BUILD_TESTS:-1} # # 0. Initialization and helper methods. @@ -45,9 +46,14 @@ function fail() { [[ "$(dirname "$0")" = "." ]] || fail "bootstrap.sh must be run from its current directory" go version &>/dev/null || fail "Go is not installed or is not on \$PATH" +[[ "$(go version 2>&1)" =~ go1\.[1-9][1-9] ]] || fail "Go is not version 1.11+" # Set up the proper GOPATH for go get below. -source ./dev.env +if [ "$BUILD_TESTS" == 1 ] ; then + source ./dev.env +else + source ./build.env +fi # Create main directories. mkdir -p "$VTROOT/dist" @@ -55,15 +61,21 @@ mkdir -p "$VTROOT/bin" mkdir -p "$VTROOT/lib" mkdir -p "$VTROOT/vthook" -# Set up required soft links. -# TODO(mberlin): Which of these can be deleted? -ln -snf "$VTTOP/config" "$VTROOT/config" -ln -snf "$VTTOP/data" "$VTROOT/data" -ln -snf "$VTTOP/py" "$VTROOT/py-vtdb" -ln -snf "$VTTOP/go/vt/zkctl/zksrv.sh" "$VTROOT/bin/zksrv.sh" -ln -snf "$VTTOP/test/vthook-test.sh" "$VTROOT/vthook/test.sh" -ln -snf "$VTTOP/test/vthook-test_backup_error" "$VTROOT/vthook/test_backup_error" -ln -snf "$VTTOP/test/vthook-test_backup_transform" "$VTROOT/vthook/test_backup_transform" +if [ "$BUILD_TESTS" == 1 ] ; then + # Set up required soft links. + # TODO(mberlin): Which of these can be deleted? + ln -snf "$VTTOP/config" "$VTROOT/config" + ln -snf "$VTTOP/data" "$VTROOT/data" + ln -snf "$VTTOP/py" "$VTROOT/py-vtdb" + ln -snf "$VTTOP/go/vt/zkctl/zksrv.sh" "$VTROOT/bin/zksrv.sh" + ln -snf "$VTTOP/test/vthook-test.sh" "$VTROOT/vthook/test.sh" + ln -snf "$VTTOP/test/vthook-test_backup_error" "$VTROOT/vthook/test_backup_error" + ln -snf "$VTTOP/test/vthook-test_backup_transform" "$VTROOT/vthook/test_backup_transform" +else + ln -snf "$VTTOP/config" "$VTROOT/config" + ln -snf "$VTTOP/data" "$VTROOT/data" + ln -snf "$VTTOP/go/vt/zkctl/zksrv.sh" "$VTROOT/bin/zksrv.sh" +fi # install_dep is a helper function to generalize the download and installation of dependencies. # @@ -136,8 +148,10 @@ function install_grpc() { grpcio_ver=$version $PIP install --upgrade grpcio=="$grpcio_ver" grpcio-tools=="$grpcio_ver" } -install_dep "gRPC" "1.16.0" "$VTROOT/dist/grpc" install_grpc +if [ "$BUILD_TESTS" == 1 ] ; then + install_dep "gRPC" "1.16.0" "$VTROOT/dist/grpc" install_grpc +fi # Install protoc. function install_protoc() { @@ -225,8 +239,9 @@ function install_pymock() { popd >/dev/null } pymock_version=1.0.1 -install_dep "py-mock" "$pymock_version" "$VTROOT/dist/py-mock-$pymock_version" install_pymock - +if [ "$BUILD_TESTS" == 1 ] ; then + install_dep "py-mock" "$pymock_version" "$VTROOT/dist/py-mock-$pymock_version" install_pymock +fi # Download Selenium (necessary to run test/vtctld_web_test.py). function install_selenium() { @@ -239,7 +254,9 @@ function install_selenium() { # instead of go/dist/selenium/lib/python3.5/site-packages and then can't find module 'pip._vendor.requests' PYTHONPATH='' $PIP install selenium } -install_dep "Selenium" "latest" "$VTROOT/dist/selenium" install_selenium +if [ "$BUILD_TESTS" == 1 ] ; then + install_dep "Selenium" "latest" "$VTROOT/dist/selenium" install_selenium +fi # Download chromedriver (necessary to run test/vtctld_web_test.py). @@ -247,11 +264,13 @@ function install_chromedriver() { local version="$1" local dist="$2" - curl -sL "http://chromedriver.storage.googleapis.com/$version/chromedriver_linux64.zip" > chromedriver_linux64.zip + curl -sL "https://chromedriver.storage.googleapis.com/$version/chromedriver_linux64.zip" > chromedriver_linux64.zip unzip -o -q chromedriver_linux64.zip -d "$dist" rm chromedriver_linux64.zip } -install_dep "chromedriver" "2.44" "$VTROOT/dist/chromedriver" install_chromedriver +if [ "$BUILD_TESTS" == 1 ] ; then + install_dep "chromedriver" "2.44" "$VTROOT/dist/chromedriver" install_chromedriver +fi # @@ -300,47 +319,52 @@ govendor sync || fail "Failed to download/update dependencies with govendor. Ple # find mysql and prepare to use libmysqlclient -if [ -z "$MYSQL_FLAVOR" ]; then - export MYSQL_FLAVOR=MySQL56 - echo "MYSQL_FLAVOR environment variable not set. Using default: $MYSQL_FLAVOR" + +if [ "$BUILD_TESTS" == 1 ] ; then + if [ -z "$MYSQL_FLAVOR" ]; then + export MYSQL_FLAVOR=MySQL56 + echo "MYSQL_FLAVOR environment variable not set. Using default: $MYSQL_FLAVOR" + fi + case "$MYSQL_FLAVOR" in + "MySQL56") + myversion="$("$VT_MYSQL_ROOT/bin/mysql" --version)" + [[ "$myversion" =~ Distrib\ 5\.[67] || "$myversion" =~ Ver\ 8\. ]] || fail "Couldn't find MySQL 5.6+ in $VT_MYSQL_ROOT. Set VT_MYSQL_ROOT to override search location." + echo "Found MySQL 5.6+ installation in $VT_MYSQL_ROOT." + ;; + + "MariaDB") + myversion="$("$VT_MYSQL_ROOT/bin/mysql" --version)" + [[ "$myversion" =~ MariaDB ]] || fail "Couldn't find MariaDB in $VT_MYSQL_ROOT. Set VT_MYSQL_ROOT to override search location." + echo "Found MariaDB installation in $VT_MYSQL_ROOT." + ;; + + *) + fail "Unsupported MYSQL_FLAVOR $MYSQL_FLAVOR" + ;; + + esac + # save the flavor that was used in bootstrap, so it can be restored + # every time dev.env is sourced. + echo "$MYSQL_FLAVOR" > "$VTROOT/dist/MYSQL_FLAVOR" fi -case "$MYSQL_FLAVOR" in - "MySQL56") - myversion="$("$VT_MYSQL_ROOT/bin/mysql" --version)" - [[ "$myversion" =~ Distrib\ 5\.[67] || "$myversion" =~ Ver\ 8\. ]] || fail "Couldn't find MySQL 5.6+ in $VT_MYSQL_ROOT. Set VT_MYSQL_ROOT to override search location." - echo "Found MySQL 5.6+ installation in $VT_MYSQL_ROOT." - ;; - - "MariaDB") - myversion="$("$VT_MYSQL_ROOT/bin/mysql" --version)" - [[ "$myversion" =~ MariaDB ]] || fail "Couldn't find MariaDB in $VT_MYSQL_ROOT. Set VT_MYSQL_ROOT to override search location." - echo "Found MariaDB installation in $VT_MYSQL_ROOT." - ;; - - *) - fail "Unsupported MYSQL_FLAVOR $MYSQL_FLAVOR" - ;; - -esac - -# save the flavor that was used in bootstrap, so it can be restored -# every time dev.env is sourced. -echo "$MYSQL_FLAVOR" > "$VTROOT/dist/MYSQL_FLAVOR" - # # 4. Installation of development related steps e.g. creating Git hooks. # - -# Create the Git hooks. -echo "creating git hooks" -mkdir -p "$VTTOP/.git/hooks" -ln -sf "$VTTOP/misc/git/pre-commit" "$VTTOP/.git/hooks/pre-commit" -ln -sf "$VTTOP/misc/git/prepare-commit-msg.bugnumber" "$VTTOP/.git/hooks/prepare-commit-msg" -ln -sf "$VTTOP/misc/git/commit-msg" "$VTTOP/.git/hooks/commit-msg" -(cd "$VTTOP" && git config core.hooksPath "$VTTOP/.git/hooks") +if [ "$BUILD_TESTS" == 1 ] ; then + # Create the Git hooks. + echo "creating git hooks" + mkdir -p "$VTTOP/.git/hooks" + ln -sf "$VTTOP/misc/git/pre-commit" "$VTTOP/.git/hooks/pre-commit" + ln -sf "$VTTOP/misc/git/prepare-commit-msg.bugnumber" "$VTTOP/.git/hooks/prepare-commit-msg" + ln -sf "$VTTOP/misc/git/commit-msg" "$VTTOP/.git/hooks/commit-msg" + (cd "$VTTOP" && git config core.hooksPath "$VTTOP/.git/hooks") + echo + echo "bootstrap finished - run 'source dev.env' in your shell before building." +else + echo + echo "bootstrap finished - run 'source build.env' in your shell before building." +fi -echo -echo "bootstrap finished - run 'source dev.env' in your shell before building." diff --git a/build.env b/build.env new file mode 100644 index 0000000000..a9d46a99b9 --- /dev/null +++ b/build.env @@ -0,0 +1,39 @@ +# No shebang line as this script is sourced from an external shell. + +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Plese ensure dev.env is written in a way which is POSIX (bourne) +# shell compatible. +# - Some build systems like rpm require the different scriptlets used +# to build a package to be run under a POSIX shell so non-POSIX +# syntax will break that as dev.env will not be sourced by bash.. + +# Import prepend_path function. +dir="$(dirname "${BASH_SOURCE[0]}")" +# shellcheck source=tools/shell_functions.inc +if ! source "${dir}/tools/shell_functions.inc"; then + echo "failed to load tools/shell_functions.inc" + return 1 +fi + +VTTOP=$(pwd) +export VTTOP +VTROOT="${VTROOT:-${VTTOP/\/src\/vitess.io\/vitess/}}" +export VTROOT +# VTTOP sanity check +if [[ "$VTTOP" == "${VTTOP/\/src\/vitess.io\/vitess/}" ]]; then + echo "WARNING: VTTOP($VTTOP) does not contain src/vitess.io/vitess" +fi + diff --git a/config/init_db.sql b/config/init_db.sql index 4a278549dd..6bf2ac5202 100644 --- a/config/init_db.sql +++ b/config/init_db.sql @@ -84,3 +84,5 @@ GRANT SELECT FLUSH PRIVILEGES; +RESET SLAVE ALL; +RESET MASTER; diff --git a/data/test/cgzip_eof.gz b/data/test/cgzip_eof.gz deleted file mode 100644 index 019e9f190b..0000000000 Binary files a/data/test/cgzip_eof.gz and /dev/null differ diff --git a/dev.env b/dev.env index dbb3fe8640..e31c16167e 100644 --- a/dev.env +++ b/dev.env @@ -20,22 +20,8 @@ # to build a package to be run under a POSIX shell so non-POSIX # syntax will break that as dev.env will not be sourced by bash.. -# Import prepend_path function. -dir="$(dirname "${BASH_SOURCE[0]}")" -# shellcheck source=tools/shell_functions.inc -if ! source "${dir}/tools/shell_functions.inc"; then - echo "failed to load tools/shell_functions.inc" - return 1 -fi +source ./build.env -VTTOP=$(pwd) -export VTTOP -VTROOT="${VTROOT:-${VTTOP/\/src\/vitess.io\/vitess/}}" -export VTROOT -# VTTOP sanity check -if [[ "$VTTOP" == "${VTTOP/\/src\/vitess.io\/vitess/}" ]]; then - echo "WARNING: VTTOP($VTTOP) does not contain src/vitess.io/vitess" -fi export GOTOP=$VTTOP/go export PYTOP=$VTTOP/py diff --git a/doc/FAQ.md b/doc/FAQ.md index b84d14740d..d34acbc4b0 100644 --- a/doc/FAQ.md +++ b/doc/FAQ.md @@ -20,7 +20,7 @@ If no tablet type was specified, then VTGate chooses its default, which can be o Vitess supports different modes. In OLTP mode, the result size is typically limited to a preset number (10,000 rows by default). This limit can be adjusted based on your needs. -However, OLAP mode has no limit to the number of rows returned. In order to change to this mode, you may issue the following command command before executing your query: +However, OLAP mode has no limit to the number of rows returned. In order to change to this mode, you may issue the following command before executing your query: ``` set workload='olap' @@ -32,7 +32,7 @@ The general convention is to send OLTP queries to `REPLICA` tablet types, and OL ## Is there a list of supported/unsupported queries? -The list of unsupported constructs is currently in the form of test cases contained in this [test file](https://github.com/vitessio/vitess/blob/master/data/test/vtgate/unsupported_cases.txt). However, contrary to the test cases, there is limited support for SET, DDL and DBA constructs. This will be documented soon. +The list of unsupported constructs is currently in the form of test cases contained in this [test file](https://github.com/vitessio/vitess/blob/master/go/vt/vtgate/planbuilder/testdata/unsupported_cases.txt). However, contrary to the test cases, there is limited support for SET, DDL and DBA constructs. This will be documented soon. ## If I have a log of all queries from my app. Is there a way I can try them against vitess to see how they’ll work? diff --git a/doc/GettingStarted.md b/doc/GettingStarted.md index f65f812a69..587156137a 100644 --- a/doc/GettingStarted.md +++ b/doc/GettingStarted.md @@ -168,7 +168,7 @@ In addition, Vitess requires the software and libraries listed below. (install steps are below). 3. If Xcode is installed (with Console tools, which should be bundled automatically since the 7.1 version), all - the dev dependencies should be satisfied in this step. If no Xcode is present, it is necessery to install pkg-config. + the dev dependencies should be satisfied in this step. If no Xcode is present, it is necessary to install pkg-config. ``` sh brew install pkg-config diff --git a/doc/HorizontalReshardingWorkflowGuide.md b/doc/HorizontalReshardingWorkflowGuide.md index f28be1a0a3..a287e19466 100644 --- a/doc/HorizontalReshardingWorkflowGuide.md +++ b/doc/HorizontalReshardingWorkflowGuide.md @@ -132,7 +132,7 @@ on the clicked button. The approval buttons are cleared after the phase has finished. The next phase will only starts if its previous phase has finished successfully. -If the workflow is restored from a checkpoint, you will still see the the +If the workflow is restored from a checkpoint, you will still see the approval button with approved message when there are running tasks under this approval. But you don't need to approve the same tasks again for a restarted workflow. diff --git a/doc/Monitoring.md b/doc/Monitoring.md index 8206089a58..3864ea8726 100644 --- a/doc/Monitoring.md +++ b/doc/Monitoring.md @@ -22,11 +22,11 @@ Scraping Vitess variables is a good way to integrate Vitess into an existing mon Vitess also includes support for push-based metrics systems via plug-ins. Each Vitess component would need to be run with the `--emit_stats` flag. -By default, the stats_emit_period is 60s, so each component will push stats to the the selected backend every minute. This is configurable via the `--stats_emit_period` flag. +By default, the stats_emit_period is 60s, so each component will push stats to the selected backend every minute. This is configurable via the `--stats_emit_period` flag. -Vitess has preliminary plug-ins to support InfluxDB and OpenTSDB as push-based metrics backends. However, there is very limited support at this time, as InfluxDB itself is going through various API breaking changes. +Vitess has preliminary plug-ins to support OpenTSDB as a push-based metrics backend. -It should be fairly straightforward to write your own plug-in, if you want to support a different backend. The plug-in package simply needs to implement the `PushBackend` interface of the `stats` package. For an example, you can see the [InfluxDB plugin](https://github.com/vitessio/vitess/blob/master/go/stats/influxdbbackend/influxdb_backend.go). +It should be fairly straightforward to write your own plug-in, if you want to support a different backend. The plug-in package simply needs to implement the `PushBackend` interface of the `stats` package. For an example, you can see the [OpenTSDB plugin](https://github.com/vitessio/vitess/blob/master/go/stats/opentsdb/opentsdb.go). Once you’ve written the backend plug-in, you also need to register the plug-in from within all the relevant Vitess binaries. An example of how to do this can be seen in [this pull request](https://github.com/vitessio/vitess/pull/469). @@ -36,7 +36,7 @@ Connecting Vitess to a push-based metrics system can be useful if you’re alrea ## Monitoring with Kubernetes -The existing methods for integrating metrics are not supported in a Kubernetes environment by the Vitess team yet, but are on the roadmap for the future. However, it should be possible to get the InfluxDB backend working with Kubernetes, similar to how [Heapster for Kubernetes works](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/cluster/addons/cluster-monitoring). +The existing methods for integrating metrics are not supported in a Kubernetes environment by the Vitess team yet, but are on the roadmap for the future. However, it should be possible to get the Prometheus backend working with Kubernetes, similar to how [Heapster for Kubernetes works](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/prometheus). In the meantime, if you run into issues or have questions, please post on our [forum](https://groups.google.com/forum/#!forum/vitess). diff --git a/doc/ScalingMySQL.md b/doc/ScalingMySQL.md index 16139991c2..45b2544388 100644 --- a/doc/ScalingMySQL.md +++ b/doc/ScalingMySQL.md @@ -54,7 +54,7 @@ Setting up these components directly -- for example, writing your own topology s * *Recommended*. Vitess has basic support for identifying or changing a master, but it doesn't aim to fully address this feature. As such, we recommend using another program, like [Orchestrator](https://github.com/github/orchestrator), to monitor the health of your servers and to change your master database when necessary. (In a sharded database, each shard has a master.) -* *Recommended*. You should have a way to monitor your database topology and set up alerts as needed. Vitess components facilitate this monitoring by exporting a lot of runtime variables, like QPS over the last few minutes, error rates, and query latency. The variables are exported in JSON format, and Vitess also supports an InfluxDB plug-in. +* *Recommended*. You should have a way to monitor your database topology and set up alerts as needed. Vitess components facilitate this monitoring by exporting a lot of runtime variables, like QPS over the last few minutes, error rates, and query latency. The variables are exported in JSON format, and Vitess also supports a Prometheus plug-in. * *Optional*. Using the Kubernetes scripts as a base, you could run Vitess components with other configuration management systems (like Puppet) or frameworks (like Mesos or AWS images). diff --git a/doc/SchemaManagement.md b/doc/SchemaManagement.md index c83e049153..1bdfcc6da6 100644 --- a/doc/SchemaManagement.md +++ b/doc/SchemaManagement.md @@ -64,7 +64,7 @@ ValidateSchemaShard user/0 The [ValidateSchemaKeyspace]({% link reference/vtctl.md %}#validateschemakeyspace) command confirms that all of the tablets in a given keyspace have -the the same schema as the master tablet on shard 0 +the same schema as the master tablet on shard 0 in that keyspace. Thus, whereas the ValidateSchemaShard command confirms the consistency of the schema on tablets within a shard for a given keyspace, ValidateSchemaKeyspace confirms the diff --git a/doc/ServerConfiguration.md b/doc/ServerConfiguration.md index 0702031017..32f1908720 100644 --- a/doc/ServerConfiguration.md +++ b/doc/ServerConfiguration.md @@ -71,7 +71,7 @@ This rule is not strictly enforced. You are allowed to add these things, but at Similar guidelines should be used when deciding to bypass Vitess to send statements directly to MySQL. -Vitess also requires you to turn on STRICT_TRANS_TABLES mode. Otherwise, it cannot accurately predict what will be written to the database. +Vitess also requires you to turn on STRICT_TRANS_TABLES or STRICT_ALL_TABLES mode. Otherwise, it cannot accurately predict what will be written to the database. It’s safe to apply backward compatible DDLs directly to MySQL. VTTablets can be configured to periodically check the schema for changes. diff --git a/doc/TabletRouting.md b/doc/TabletRouting.md index 6d32f5f8ae..167975e646 100644 --- a/doc/TabletRouting.md +++ b/doc/TabletRouting.md @@ -93,36 +93,7 @@ There are two implementations of the Gateway interface: discovery section, one per cell) as a source of tablets, a HealthCheck module to watch their health, and a TabletStatsCache to collect all the health information. Based on this data, it can find the best tablet to use. -* l2VTGateGateway: It keeps a map of l2vtgate processes to send queries to. See - next section for more details. -## l2vtgate - -As we started increasing the number of tablets in a cell, it became clear that a -bottleneck of the system was going to be how many tablets a single vtgate is -connecting to. Since vtgate maintains a streaming health check connection per -tablet, the number of these connections can grow to large numbers. It is common -for vtgate to watch tablets in other cells, to be able to find the master -tablet. - -So l2vtgate came to exist, based on very similar concepts and interfaces: - -* l2vtgate is an extra hop between a vtgate pool and tablets. -* A l2vtgate pool connects to a subset of tablets, therefore it can have a - reasonable number of streaming health connections. Externally, it exposes the - QueryService RPC interface (that has the Target for the query, keyspace / - shard / tablet type). Internally, it uses a discoveryGateway, as usual. -* vtgate connects to l2vtgate pools (using the l2VTGateGateway instead of the - discoveryGateway). It has a map of which keyspace / shard / tablet type needs - to go to wich l2vtgate pool. At this point, vtgate doesn't maintain any health - information about the tablets, it lets l2vtgate handle it. - -Note l2vtgate is not an ideal solution as it is now. For instance, if there are -two cells, and the master for a shard can be in either, l2vtgate still has to -watch the tablets in both cells, to know where the master is. Ideally, we'd want -l2vtgate to be collocated with the tablets in a given cell, and not go -cross-cell. - # Extensions, work in progress ## Regions, cross-cell targeting @@ -169,31 +140,6 @@ between vtgate and l2vtgate: This would also be a good time to merge the vtgate code that uses the VSchema with the code that doesn't for SrvKeyspace access. -## Hybrid Gateway - -It would be nice to re-organize the code a bit inside vtgate to allow for an -hybrid gateway, and get rid of l2vtgate alltogether: - -* vtgate would use the discoveryGateway to watch the tablets in the current cell - (and optionally to any other cell we still want to consider local). -* vtgate would use l2vtgateGateway to watch the tablets in a different cell. -* vtgate would expose the RPC APIs currently exposed by the l2vtgate process. - -So vtgate would watch the tablets in the local cell only, but also know what -healthy tablets are in the other cells, and be able to send query to them -through their vtgate. The extra hop to the other cell vtgate should be a small -latency price to pay, compared to going cross-cell already. - -So queries would go one of two routes: - -* client(cell1) -> vtgate(cell1) -> tablet(cell1) -* client(cell1) -> vtgate(cell1) -> vtgate(cell2) -> tablet(cell2) - -If the number of tablets in a given cell is still too high for the local vtgate -pool, two or more pools can still be created, each of them knowing about a -subset of the tablets. And they would just forward queries to each others when -addressing the other tablet set. - ## Config-based routing Another possible extension would be to group all routing options for vtgate in a diff --git a/doc/Upgrading.md b/doc/Upgrading.md index efc5f59361..625c61aaaf 100644 --- a/doc/Upgrading.md +++ b/doc/Upgrading.md @@ -2,7 +2,7 @@ This document highlights things to look after when upgrading a Vitess production installation to a newer Vitess release. -Generally speaking, upgrading Vitess is a safe and and easy process because it is explicitly designed for it. This is because in YouTube we follow the practice of releasing new versions often (usually from the tip of the Git master branch). +Generally speaking, upgrading Vitess is a safe and easy process because it is explicitly designed for it. This is because in YouTube we follow the practice of releasing new versions often (usually from the tip of the Git master branch). ## Compatibility diff --git a/doc/V3HighLevelDesign.md b/doc/V3HighLevelDesign.md index ebbd647fff..b599f7e89b 100644 --- a/doc/V3HighLevelDesign.md +++ b/doc/V3HighLevelDesign.md @@ -1311,7 +1311,7 @@ When two nodes are grouped, the current join condition becomes the root of the n * If it’s a JOIN, the new property is the more restrictive of the two nodes. So, if one of them is a Route, then the new node is also a Route. * For a LEFT JOIN, the new property is the same as the LHS node. -If the grouping conditions are not met, then the node remains a join node. In this case, we have to see if the ON clause conditions can be pushed down into the left and/or right nodes. By the fact that the current join is split into two, the ON clause cannot be be pushed as is. Instead, we use associativity rules to our benefit and merge the ON clause conditions into the WHERE clauses of the underlying nodes. The rules are the same as the ones described for a normal WHERE clause. +If the grouping conditions are not met, then the node remains a join node. In this case, we have to see if the ON clause conditions can be pushed down into the left and/or right nodes. By the fact that the current join is split into two, the ON clause cannot be pushed as is. Instead, we use associativity rules to our benefit and merge the ON clause conditions into the WHERE clauses of the underlying nodes. The rules are the same as the ones described for a normal WHERE clause. But left joins are slightly different, because the join condition is applied *to the RHS only*. Also, the condition cannot be further pushed into other nested left joins, because they will change the meaning of the statement. For example: @@ -1491,7 +1491,7 @@ If a, b and c where in different groups, the output would be: a b where (b.id=a.id) and (cond1(a.col, b.col)) ``` -The cond2 expression gets pushed into the the where clause for table ‘c’ because it’s the right-most group that’s referenced by the condition. External references will be changed to appropriate bind variables by the rewiring phase. +The cond2 expression gets pushed into the where clause for table ‘c’ because it’s the right-most group that’s referenced by the condition. External references will be changed to appropriate bind variables by the rewiring phase. *Once VTGate acquires the ability to perform its own filters, should we stop pushing these conditions into the dependent queries and do it ourselves instead? The answer will usually be no. You almost always want to push down filters. This is because it will let the underlying database scan fewer rows, or choose better indexes. The more restrictive the query is, the better.* diff --git a/doc/V3VindexDesign.md b/doc/V3VindexDesign.md index f6d051b77d..a0f4e930b5 100644 --- a/doc/V3VindexDesign.md +++ b/doc/V3VindexDesign.md @@ -251,7 +251,7 @@ When you fire up the schema editor, it should take you to the load workflow. The The schema picks up the loaded JSON, parse it and display the various components of the schema in a page where the relationships are easily visualized. The vschema has four main components: keyspaces, tables, table classes and vindexes. -Keyspaces can be on a left navbar. Once you select the keyspaces, it will display the the rest of the three components in one column each. +Keyspaces can be on a left navbar. Once you select the keyspaces, it will display the rest of the three components in one column each. The schema editor will sanity check the JSON file for inconsistencies and flag them using various color codes: diff --git a/doc/VindexAsTable.md b/doc/VindexAsTable.md index 594b4ea4cd..55a9d529b2 100644 --- a/doc/VindexAsTable.md +++ b/doc/VindexAsTable.md @@ -37,7 +37,7 @@ For `select` statements, we can follow the V3 design principles, there will be a While analyzing the `WHERE` clause, if the primitive is a `vindexFunc`, we look for the three possible combinations listed above. Once they're matched, we can assign the corresponding opcode. -While analyizing the `SELECT` expression list, we verify that that the user has specified expressions as required by each opcode. +While analyzing the `SELECT` expression list, we verify that the user has specified expressions as required by each opcode. Joins and subqueries will not be allowed, at least for now. diff --git a/doc/VitessApi.md b/doc/VitessApi.md index 5333b61cb4..d4696429bd 100644 --- a/doc/VitessApi.md +++ b/doc/VitessApi.md @@ -488,7 +488,7 @@ Split a query into non-overlapping sub queries #### Request - SplitQueryRequest is the payload to SplitQuery. SplitQuery takes a "SELECT" query and generates a list of queries called "query-parts". Each query-part consists of the original query with an added WHERE clause that restricts the query-part to operate only on rows whose values in the the columns listed in the "split_column" field of the request (see below) are in a particular range. It is guaranteed that the set of rows obtained from executing each query-part on a database snapshot and merging (without deduping) the results is equal to the set of rows obtained from executing the original query on the same snapshot with the rows containing NULL values in any of the split_column's excluded. This is typically called by the MapReduce master when reading from Vitess. There it's desirable that the sets of rows returned by the query-parts have roughly the same size. + SplitQueryRequest is the payload to SplitQuery. SplitQuery takes a "SELECT" query and generates a list of queries called "query-parts". Each query-part consists of the original query with an added WHERE clause that restricts the query-part to operate only on rows whose values in the columns listed in the "split_column" field of the request (see below) are in a particular range. It is guaranteed that the set of rows obtained from executing each query-part on a database snapshot and merging (without deduping) the results is equal to the set of rows obtained from executing the original query on the same snapshot with the rows containing NULL values in any of the split_column's excluded. This is typically called by the MapReduce master when reading from Vitess. There it's desirable that the sets of rows returned by the query-parts have roughly the same size. ##### Parameters diff --git a/doc/meetups_notes/05-10-2018.md b/doc/meetups_notes/05-10-2018.md index a9ad8677d0..4f9b038105 100644 --- a/doc/meetups_notes/05-10-2018.md +++ b/doc/meetups_notes/05-10-2018.md @@ -33,7 +33,7 @@ * Should we require to add documentation for features as part of pull requests? - Not in the short term, it could discourage contributions from new comers. - - We should make easier for new comers to add documentation (add more structure and guidance in how to add documenation). + - We should make easier for new comers to add documentation (add more structure and guidance in how to add documentation). * We should be able to find a tech writer contractor that helps with editing / copy. * @zmagg knows tech writers that could help. They are remote. She will be making an intro to @jitten. * Some queries take a very long time without clear reason. diff --git a/docker/bootstrap/Dockerfile.common b/docker/bootstrap/Dockerfile.common index 8db6dd15d5..86f5ad7e34 100644 --- a/docker/bootstrap/Dockerfile.common +++ b/docker/bootstrap/Dockerfile.common @@ -52,7 +52,7 @@ ENV PKG_CONFIG_PATH $VTROOT/lib ENV USER vitess # Copy files needed for bootstrap -COPY bootstrap.sh dev.env /vt/src/vitess.io/vitess/ +COPY bootstrap.sh dev.env build.env /vt/src/vitess.io/vitess/ COPY config /vt/src/vitess.io/vitess/config COPY third_party /vt/src/vitess.io/vitess/third_party COPY tools /vt/src/vitess.io/vitess/tools diff --git a/docker/bootstrap/Dockerfile.mariadb b/docker/bootstrap/Dockerfile.mariadb index 437ea49277..bc76702faa 100644 --- a/docker/bootstrap/Dockerfile.mariadb +++ b/docker/bootstrap/Dockerfile.mariadb @@ -9,5 +9,6 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-ins # Bootstrap Vitess WORKDIR /vt/src/vitess.io/vitess +ENV MYSQL_FLAVOR MariaDB USER vitess RUN ./bootstrap.sh diff --git a/docker/k8s/orchestrator/Dockerfile b/docker/k8s/orchestrator/Dockerfile index 02c4e1e385..b41d38d82b 100644 --- a/docker/k8s/orchestrator/Dockerfile +++ b/docker/k8s/orchestrator/Dockerfile @@ -4,19 +4,16 @@ FROM debian:stretch-slim RUN apt-get update && \ apt-get upgrade -qq && \ - apt-get install wget -qq --no-install-recommends && \ - wget https://github.com/github/orchestrator/releases/download/v3.0.13/orchestrator_3.0.13_amd64.deb && \ - dpkg -i orchestrator_3.0.13_amd64.deb && \ - rm orchestrator_3.0.13_amd64.deb && \ + apt-get install wget ca-certificates jq -qq --no-install-recommends && \ + wget https://github.com/github/orchestrator/releases/download/v3.0.14/orchestrator_3.0.14_amd64.deb && \ + dpkg -i orchestrator_3.0.14_amd64.deb && \ + rm orchestrator_3.0.14_amd64.deb && \ apt-get purge wget -qq && \ apt-get autoremove -qq && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* -# Copy certs to allow https calls -COPY --from=k8s /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt - -# Copy vtctlclient to be used to notify +# Copy vtctlclient to be used to notify COPY --from=k8s /vt/bin/vtctlclient /usr/bin/ WORKDIR /usr/local/orchestrator diff --git a/docker/k8s/vtctlclient/Dockerfile b/docker/k8s/vtctlclient/Dockerfile index bfe12e24f2..554fe3e816 100644 --- a/docker/k8s/vtctlclient/Dockerfile +++ b/docker/k8s/vtctlclient/Dockerfile @@ -13,8 +13,6 @@ COPY --from=k8s /vt/bin/vtctlclient /usr/bin/ # add vitess user/group and add permissions RUN groupadd -r --gid 2000 vitess && \ - useradd -r -g vitess --uid 1000 vitess && \ - chown -R vitess:vitess /vt && \ - chown -R vitess:vitess /vtdataroot + useradd -r -g vitess --uid 1000 vitess CMD ["/usr/bin/vtctlclient"] diff --git a/docker/k8s/vttablet/Dockerfile b/docker/k8s/vttablet/Dockerfile index 887a9f4589..9af272a84a 100644 --- a/docker/k8s/vttablet/Dockerfile +++ b/docker/k8s/vttablet/Dockerfile @@ -5,7 +5,7 @@ FROM debian:stretch-slim # TODO: remove when https://github.com/vitessio/vitess/issues/3553 is fixed RUN apt-get update && \ apt-get upgrade -qq && \ - apt-get install mysql-client jq -qq --no-install-recommends && \ + apt-get install wget mysql-client jq -qq --no-install-recommends && \ apt-get autoremove && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* diff --git a/docker/test/run.sh b/docker/test/run.sh index fbc9d8ed89..15b305d220 100755 --- a/docker/test/run.sh +++ b/docker/test/run.sh @@ -162,7 +162,6 @@ esac # Construct "cp" command to copy the source code. # -# TODO(mberlin): Copy vendor/vendor.json file such that we can run a diff against the file on the image. # Copy the full source tree except: # - vendor # That's because these directories are already part of the image. @@ -172,11 +171,19 @@ esac # we do not move or overwrite the existing files while copying the other # directories. Therefore, the existing files do not count as changed and will # not be part of the new Docker layer of the cache image. -copy_src_cmd="cp -R /tmp/src/!(vendor) ." +copy_src_cmd="cp -R /tmp/src/!(vendor|bootstrap.sh) ." # Copy the .git directory because travis/check_make_proto.sh needs a working # Git repository. copy_src_cmd=$(append_cmd "$copy_src_cmd" "cp -R /tmp/src/.git .") +# Copy vendor/vendor.json file if it changed +run_bootstrap_cmd="if [[ \$(diff -w vendor/vendor.json /tmp/src/vendor/vendor.json) ]]; then cp -f /tmp/src/vendor/vendor.json vendor/; sync_vendor=1; fi" +# Copy bootstrap.sh if it changed +run_bootstrap_cmd=$(append_cmd "$run_bootstrap_cmd" "if [[ \$(diff -w bootstrap.sh /tmp/src/bootstrap.sh) ]]; then cp -f /tmp/src/bootstrap.sh .; bootstrap=1; fi") +# run bootstrap.sh if necessary +run_bootstrap_cmd=$(append_cmd "$run_bootstrap_cmd" "if [[ -n \$bootstrap ]]; then ./bootstrap.sh; else if [[ -n \$sync_vendor ]]; then govendor sync; fi; fi") +copy_src_cmd=$(append_cmd "$copy_src_cmd" "$run_bootstrap_cmd") + # Construct the command we will actually run. # # Uncomment the next line if you need to debug "bashcmd". diff --git a/examples/helm/kmysql.sh b/examples/helm/kmysql.sh index 5c997dfee2..d2a07e4cb8 100755 --- a/examples/helm/kmysql.sh +++ b/examples/helm/kmysql.sh @@ -19,4 +19,18 @@ host=$(minikube service vtgate-zone1 --format "{{.IP}}" | tail -n 1) port=$(minikube service vtgate-zone1 --format "{{.Port}}" | tail -n 1) +if [ -z $port ]; then + #This checks K8s runing on an single node by kubeadm + if [ $(kubectl get nodes | grep -v NAM | wc -l) -eq 1 -o $(kubectl get nodes | grep -v NAM | grep master | wc -l ) -eq 1 ]; then + host="127.0.0.1" + port=`kubectl describe service vtgate-zone1 | grep NodePort | grep mysql | awk '{print $3}' | awk -F'/' '{print $1}'` + fi +fi + +if [ -z $port ]; then + echo "Error: failed to obtain [host:port] minikube or kubectl." + exit 1; + +fi + mysql -h "$host" -P "$port" $* diff --git a/examples/local/README.md b/examples/local/README.md index 5ea31be0f6..245dbd4bb9 100644 --- a/examples/local/README.md +++ b/examples/local/README.md @@ -5,6 +5,6 @@ local machine, which may be useful for experimentation. These scripts can also serve as a starting point for configuring Vitess into your preferred deployment strategy or toolset. -See the [Run Vitess Locally](http://vitess.io/getting-started/local-instance/) -guide for instructions on using these scripts. +See the [Run Vitess Locally](https://vitess.io/docs/tutorials/local/) +tutorial ("Start a Vitess cluster" section) for instructions on using these scripts. diff --git a/examples/local/env.sh b/examples/local/env.sh index dab3ae00be..72398b2e1f 100644 --- a/examples/local/env.sh +++ b/examples/local/env.sh @@ -16,7 +16,7 @@ hostname=`hostname -f` vtctld_web_port=15000 # Set up environment. -export VTTOP=$VTROOT/src/vitess.io/vitess +export VTTOP=${VTTOP-$VTROOT/src/vitess.io/vitess} # Try to find mysqld_safe on PATH. if [ -z "$VT_MYSQL_ROOT" ]; then diff --git a/go/cgzip/adler32.go b/go/cgzip/adler32.go deleted file mode 100644 index 87ca8e1758..0000000000 --- a/go/cgzip/adler32.go +++ /dev/null @@ -1,79 +0,0 @@ -// +build cgo - -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cgzip - -/* -#cgo CFLAGS: -Werror=implicit -#cgo pkg-config: zlib - -#include "zlib.h" -*/ -import "C" - -import ( - "hash" - "unsafe" -) - -type adler32Hash struct { - adler C.uLong -} - -// NewAdler32 creates an empty buffer which has an adler32 of '1'. The go -// hash/adler32 does the same. -func NewAdler32() hash.Hash32 { - a := &adler32Hash{} - a.Reset() - return a -} - -// io.Writer interface -func (a *adler32Hash) Write(p []byte) (n int, err error) { - if len(p) > 0 { - a.adler = C.adler32(a.adler, (*C.Bytef)(unsafe.Pointer(&p[0])), (C.uInt)(len(p))) - } - return len(p), nil -} - -// hash.Hash interface -func (a *adler32Hash) Sum(b []byte) []byte { - s := a.Sum32() - b = append(b, byte(s>>24)) - b = append(b, byte(s>>16)) - b = append(b, byte(s>>8)) - b = append(b, byte(s)) - return b -} - -func (a *adler32Hash) Reset() { - a.adler = C.adler32(0, (*C.Bytef)(unsafe.Pointer(nil)), 0) -} - -func (a *adler32Hash) Size() int { - return 4 -} - -func (a *adler32Hash) BlockSize() int { - return 1 -} - -// hash.Hash32 interface -func (a *adler32Hash) Sum32() uint32 { - return uint32(a.adler) -} diff --git a/go/cgzip/cgzip_test.go b/go/cgzip/cgzip_test.go deleted file mode 100644 index b3e4e62184..0000000000 --- a/go/cgzip/cgzip_test.go +++ /dev/null @@ -1,276 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cgzip - -import ( - "bytes" - "compress/gzip" - "fmt" - "hash/adler32" - "hash/crc32" - "hash/crc64" - "io" - "math/rand" - "os/exec" - "sync" - "testing" - "time" -) - -type prettyTimer struct { - name string - before time.Time -} - -func newPrettyTimer(name string) *prettyTimer { - return &prettyTimer{name, time.Now()} -} - -func (pt *prettyTimer) stopAndPrintCompress(t *testing.T, size, processed int) { - duration := time.Since(pt.before) - t.Log(pt.name + ":") - t.Log(" size :", size) - t.Log(" time :", duration.String()) - if duration != 0 { - t.Logf(" speed: %.0f KB/s", float64(processed)/duration.Seconds()/1024.0) - } else { - t.Log(" processed:", processed, "B") - } -} - -func (pt *prettyTimer) stopAndPrintUncompress(t *testing.T, processed int) { - duration := time.Since(pt.before) - t.Log(" " + pt.name + ":") - t.Log(" time :", duration.String()) - if duration != 0 { - t.Logf(" speed: %.0f KB/s", float64(processed)/duration.Seconds()/1024.0) - } else { - t.Log(" processed:", processed, "B") - } -} - -func compareCompressedBuffer(t *testing.T, source []byte, compressed *bytes.Buffer) { - // compare using go's gunzip - toGunzip := bytes.NewBuffer(compressed.Bytes()) - gunzip, err := gzip.NewReader(toGunzip) - if err != nil { - t.Errorf("gzip.NewReader failed: %v", err) - } - uncompressed := &bytes.Buffer{} - pt := newPrettyTimer("go unzip") - _, err = io.Copy(uncompressed, gunzip) - if err != nil { - t.Errorf("Copy failed: %v", err) - } - pt.stopAndPrintUncompress(t, uncompressed.Len()) - if !bytes.Equal(source, uncompressed.Bytes()) { - t.Errorf("Bytes are not equal") - } - - // compare using cgzip gunzip - toGunzip = bytes.NewBuffer(compressed.Bytes()) - cgunzip, err := NewReader(toGunzip) - if err != nil { - t.Errorf("cgzip.NewReader failed: %v", err) - } - uncompressed = &bytes.Buffer{} - pt = newPrettyTimer("cgzip unzip") - _, err = io.Copy(uncompressed, cgunzip) - if err != nil { - t.Errorf("Copy failed: %v", err) - } - pt.stopAndPrintUncompress(t, uncompressed.Len()) - if !bytes.Equal(source, uncompressed.Bytes()) { - t.Errorf("Bytes are not equal") - } -} - -func testChecksums(t *testing.T, data []byte) { - t.Log("Checksums:") - - // crc64 with go library - goCrc64 := crc64.New(crc64.MakeTable(crc64.ECMA)) - toChecksum := bytes.NewBuffer(data) - pt := newPrettyTimer("go crc64") - _, err := io.Copy(goCrc64, toChecksum) - if err != nil { - t.Errorf("Copy failed: %v", err) - } - pt.stopAndPrintUncompress(t, len(data)) - - // adler32 with go library - goAdler32 := adler32.New() - toChecksum = bytes.NewBuffer(data) - pt = newPrettyTimer("go adler32") - _, err = io.Copy(goAdler32, toChecksum) - if err != nil { - t.Errorf("Copy failed: %v", err) - } - goResult := goAdler32.Sum32() - pt.stopAndPrintUncompress(t, len(data)) - t.Log(" sum :", goResult) - - // adler32 with cgzip library - cgzipAdler32 := NewAdler32() - toChecksum = bytes.NewBuffer(data) - pt = newPrettyTimer("cgzip adler32") - _, err = io.Copy(cgzipAdler32, toChecksum) - if err != nil { - t.Errorf("Copy failed: %v", err) - } - cgzipResult := cgzipAdler32.Sum32() - pt.stopAndPrintUncompress(t, len(data)) - t.Log(" sum :", cgzipResult) - - // test both results are the same - if goResult != cgzipResult { - t.Errorf("go and cgzip adler32 mismatch") - } - - // crc32 with go library - goCrc32 := crc32.New(crc32.MakeTable(crc32.IEEE)) - toChecksum = bytes.NewBuffer(data) - pt = newPrettyTimer("go crc32") - _, err = io.Copy(goCrc32, toChecksum) - if err != nil { - t.Errorf("Copy failed: %v", err) - } - goResult = goCrc32.Sum32() - pt.stopAndPrintUncompress(t, len(data)) - t.Log(" sum :", goResult) - - // crc32 with cgzip library - cgzipCrc32 := NewCrc32() - toChecksum = bytes.NewBuffer(data) - pt = newPrettyTimer("cgzip crc32") - _, err = io.Copy(cgzipCrc32, toChecksum) - if err != nil { - t.Errorf("Copy failed: %v", err) - } - cgzipResult = cgzipCrc32.Sum32() - pt.stopAndPrintUncompress(t, len(data)) - t.Log(" sum :", cgzipResult) - - // test both results are the same - if goResult != cgzipResult { - t.Errorf("go and cgzip crc32 mismatch") - } -} - -func runCompare(t *testing.T, testSize int, level int) { - - // create a test chunk, put semi-random bytes in there - // (so compression actually will compress some) - toEncode := make([]byte, testSize) - where := 0 - for where < testSize { - toFill := rand.Intn(16) - filler := 0x61 + rand.Intn(24) - for i := 0; i < toFill && where < testSize; i++ { - toEncode[where] = byte(filler) - where++ - } - } - t.Log("Original size:", len(toEncode)) - - // now time a regular gzip writer to a Buffer - compressed := &bytes.Buffer{} - reader := bytes.NewBuffer(toEncode) - pt := newPrettyTimer("Go gzip") - gz, err := gzip.NewWriterLevel(compressed, level) - _, err = io.Copy(gz, reader) - if err != nil { - t.Errorf("Copy failed: %v", err) - } - gz.Close() - pt.stopAndPrintCompress(t, compressed.Len(), len(toEncode)) - compareCompressedBuffer(t, toEncode, compressed) - - // now time a forked gzip - compressed2 := &bytes.Buffer{} - reader = bytes.NewBuffer(toEncode) - cmd := exec.Command("gzip", fmt.Sprintf("-%v", level), "-c") - stdout, err := cmd.StdoutPipe() - if err != nil { - t.Errorf("StdoutPipe failed: %v", err) - } - stdin, err := cmd.StdinPipe() - if err != nil { - t.Errorf("StdinPipe failed: %v", err) - } - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - io.Copy(compressed2, stdout) - wg.Done() - }() - if err = cmd.Start(); err != nil { - t.Errorf("Start failed: %v", err) - } - pt = newPrettyTimer("Forked gzip") - _, err = io.Copy(stdin, reader) - if err != nil { - t.Errorf("Copy failed: %v", err) - } - stdin.Close() - wg.Wait() - if err := cmd.Wait(); err != nil { - t.Errorf("Wait failed: %v", err) - } - pt.stopAndPrintCompress(t, compressed2.Len(), len(toEncode)) - compareCompressedBuffer(t, toEncode, compressed2) - - // and time the cgo version - compressed3 := &bytes.Buffer{} - reader = bytes.NewBuffer(toEncode) - pt = newPrettyTimer("cgzip") - cgz, err := NewWriterLevel(compressed3, level) - if err != nil { - t.Errorf("NewWriterLevel failed: %v", err) - } - _, err = io.Copy(cgz, reader) - if err != nil { - t.Errorf("Copy failed: %v", err) - } - if err := cgz.Flush(); err != nil { - t.Errorf("Flush failed: %v", err) - } - if err := cgz.Close(); err != nil { - t.Errorf("Close failed: %v", err) - } - pt.stopAndPrintCompress(t, compressed3.Len(), len(toEncode)) - compareCompressedBuffer(t, toEncode, compressed3) - - testChecksums(t, toEncode) -} - -// use 'go test -v' and bigger sizes to show meaningful rates -func TestCompare(t *testing.T) { - testSize := 1 * 1024 * 1024 - if testing.Short() { - testSize /= 10 - } - runCompare(t, testSize, 1) -} - -func TestCompareBest(t *testing.T) { - testSize := 1 * 1024 * 1024 - if testing.Short() { - testSize /= 10 - } - runCompare(t, testSize, 9) -} diff --git a/go/cgzip/crc32.go b/go/cgzip/crc32.go deleted file mode 100644 index 182e2eec7b..0000000000 --- a/go/cgzip/crc32.go +++ /dev/null @@ -1,79 +0,0 @@ -// +build cgo - -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cgzip - -/* -#cgo CFLAGS: -Werror=implicit -#cgo pkg-config: zlib - -#include "zlib.h" -*/ -import "C" - -import ( - "hash" - "unsafe" -) - -type crc32Hash struct { - crc C.uLong -} - -// NewCrc32 creates an empty buffer which has an crc32 of '1'. The go -// hash/crc32 does the same. -func NewCrc32() hash.Hash32 { - c := &crc32Hash{} - c.Reset() - return c -} - -// io.Writer interface -func (a *crc32Hash) Write(p []byte) (n int, err error) { - if len(p) > 0 { - a.crc = C.crc32(a.crc, (*C.Bytef)(unsafe.Pointer(&p[0])), (C.uInt)(len(p))) - } - return len(p), nil -} - -// hash.Hash interface -func (a *crc32Hash) Sum(b []byte) []byte { - s := a.Sum32() - b = append(b, byte(s>>24)) - b = append(b, byte(s>>16)) - b = append(b, byte(s>>8)) - b = append(b, byte(s)) - return b -} - -func (a *crc32Hash) Reset() { - a.crc = C.crc32(0, (*C.Bytef)(unsafe.Pointer(nil)), 0) -} - -func (a *crc32Hash) Size() int { - return 4 -} - -func (a *crc32Hash) BlockSize() int { - return 1 -} - -// hash.Hash32 interface -func (a *crc32Hash) Sum32() uint32 { - return uint32(a.crc) -} diff --git a/go/cgzip/doc.go b/go/cgzip/doc.go deleted file mode 100644 index 2f501d6c3a..0000000000 --- a/go/cgzip/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cgzip wraps the C library for gzip. -package cgzip diff --git a/go/cgzip/eof_read_test.go b/go/cgzip/eof_read_test.go deleted file mode 100644 index 3cdd9faaa1..0000000000 --- a/go/cgzip/eof_read_test.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cgzip - -import ( - "io" - "io/ioutil" - "testing" - - "vitess.io/vitess/go/testfiles" -) - -// specialReader is a test class that will return bytes it reads from a file, -// returning EOF and data in the last chunk. -type specialReader struct { - t *testing.T - contents []byte - sent int -} - -func newSpecialReader(t *testing.T, filename string) *specialReader { - filename = testfiles.Locate(filename) - b, err := ioutil.ReadFile(filename) - if err != nil { - t.Fatalf("Cannot read file %v: %v", filename, err) - } - return &specialReader{t, b, 0} -} - -// Read is the implementation of Reader -func (sr *specialReader) Read(p []byte) (int, error) { - if len(p) > len(sr.contents)-sr.sent { - toCopy := len(sr.contents) - sr.sent - sr.t.Logf("Sending %v bytes and EOF", toCopy) - sr.sent += copy(p, sr.contents[sr.sent:]) - return toCopy, io.EOF - } - toCopy := len(p) - sr.sent += copy(p, sr.contents[sr.sent:sr.sent+toCopy]) - sr.t.Logf("Sending %v bytes", toCopy) - return toCopy, nil -} - -// TestEofAndData is the main test here: if we return data and EOF, -// it needs to be fully processed. -// The file is a 55k file, that uncompresses into a 10 MB file. -// So it will be read as 32k + 22k, and decompressed into 2MB + 2MB + 1M and -// then 2MB + 2MB + 1M again. So it's a great test for corner cases. -func TestEofAndData(t *testing.T) { - r := newSpecialReader(t, "cgzip_eof.gz") - gz, err := NewReader(r) - if err != nil { - t.Fatalf("NewReader failed: %v", err) - } - - n := 0 - dst := make([]byte, 2*1024*1024) - for { - nRead, err := gz.Read(dst) - t.Logf("Got: %v %v", nRead, err) - n += nRead - switch err { - case nil: - case io.EOF: - if n != 10485760 { - t.Fatalf("Read wrong number of bytes: got %v expected 10485760", n) - } - - // test we also get 0 / EOF if we read again - nRead, err = gz.Read(dst) - if nRead != 0 || err != io.EOF { - t.Fatalf("After-EOF read got %v %v", nRead, err) - } - return - default: - t.Fatalf("Unexpected error: %v", err) - } - } -} diff --git a/go/cgzip/pure.go b/go/cgzip/pure.go deleted file mode 100644 index 55221f786a..0000000000 --- a/go/cgzip/pure.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !cgo - -// A slower, pure go alternative to cgzip to allow for cross compilation. - -package cgzip - -import ( - "compress/gzip" - "hash/adler32" - "hash/crc32" -) - -// Writer is an io.WriteCloser. Writes to a Writer are compressed. -type Writer = gzip.Writer - -var ( - Z_BEST_SPEED = gzip.BestSpeed - NewWriterLevel = gzip.NewWriterLevel - NewReader = gzip.NewReader - NewCrc32 = crc32.NewIEEE - NewAdler32 = adler32.New -) diff --git a/go/cgzip/reader.go b/go/cgzip/reader.go deleted file mode 100644 index 4766976c5b..0000000000 --- a/go/cgzip/reader.go +++ /dev/null @@ -1,119 +0,0 @@ -// +build cgo - -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cgzip - -import "io" - -// err starts out as nil -// we will call inflateEnd when we set err to a value: -// - whatever error is returned by the underlying reader -// - io.EOF if Close was called -type reader struct { - r io.Reader - in []byte - strm zstream - err error - skipIn bool -} - -// NewReader returns a new cgzip.reader for reading gzip files with the C gzip -// library. -func NewReader(r io.Reader) (io.ReadCloser, error) { - return NewReaderBuffer(r, DEFAULT_COMPRESSED_BUFFER_SIZE) -} - -// NewReaderBuffer returns a new cgzip.reader with a given buffer size for -// reading gzip files with the C gzip library. -func NewReaderBuffer(r io.Reader, bufferSize int) (io.ReadCloser, error) { - z := &reader{r: r, in: make([]byte, bufferSize)} - if err := z.strm.inflateInit(); err != nil { - return nil, err - } - return z, nil -} - -// Read reads from the gz stream. -func (z *reader) Read(p []byte) (int, error) { - if z.err != nil { - return 0, z.err - } - - if len(p) == 0 { - return 0, nil - } - - // read and deflate until the output buffer is full - z.strm.setOutBuf(p, len(p)) - - for { - // if we have no data to inflate, read more - if !z.skipIn && z.strm.availIn() == 0 { - var n int - n, z.err = z.r.Read(z.in) - // If we got data and EOF, pretend we didn't get the - // EOF. That way we will return the right values - // upstream. Note this will trigger another read - // later on, that should return (0, EOF). - if n > 0 && z.err == io.EOF { - z.err = nil - } - - // FIXME(alainjobart) this code is not compliant with - // the Reader interface. We should process all the - // data we got from the reader, and then return the - // error, whatever it is. - if (z.err != nil && z.err != io.EOF) || (n == 0 && z.err == io.EOF) { - z.strm.inflateEnd() - return 0, z.err - } - - z.strm.setInBuf(z.in, n) - } else { - z.skipIn = false - } - - // inflate some - ret, err := z.strm.inflate(zNoFlush) - if err != nil { - z.err = err - z.strm.inflateEnd() - return 0, z.err - } - - // if we read something, we're good - have := len(p) - z.strm.availOut() - if have > 0 { - z.skipIn = ret == Z_OK && z.strm.availOut() == 0 - return have, z.err - } - } -} - -// Close closes the Reader. It does not close the underlying io.Reader. -func (z *reader) Close() error { - if z.err != nil { - if z.err != io.EOF { - return z.err - } - return nil - } - z.strm.inflateEnd() - z.err = io.EOF - return nil -} diff --git a/go/cgzip/writer.go b/go/cgzip/writer.go deleted file mode 100644 index 8a5b4f7893..0000000000 --- a/go/cgzip/writer.go +++ /dev/null @@ -1,158 +0,0 @@ -// +build cgo - -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cgzip - -import ( - "fmt" - "io" -) - -const ( - // Allowed flush values - Z_NO_FLUSH = 0 - Z_PARTIAL_FLUSH = 1 - Z_SYNC_FLUSH = 2 - Z_FULL_FLUSH = 3 - Z_FINISH = 4 - Z_BLOCK = 5 - Z_TREES = 6 - - // Return codes - Z_OK = 0 - Z_STREAM_END = 1 - Z_NEED_DICT = 2 - Z_ERRNO = -1 - Z_STREAM_ERROR = -2 - Z_DATA_ERROR = -3 - Z_MEM_ERROR = -4 - Z_BUF_ERROR = -5 - Z_VERSION_ERROR = -6 - - // compression levels - Z_NO_COMPRESSION = 0 - Z_BEST_SPEED = 1 - Z_BEST_COMPRESSION = 9 - Z_DEFAULT_COMPRESSION = -1 - - // our default buffer size - // most go io functions use 32KB as buffer size, so 32KB - // works well here for compressed data buffer - DEFAULT_COMPRESSED_BUFFER_SIZE = 32 * 1024 -) - -// err starts out as nil -// we will call deflateEnd when we set err to a value: -// - whatever error is returned by the underlying writer -// - io.EOF if Close was called -type Writer struct { - w io.Writer - out []byte - strm zstream - err error -} - -func NewWriter(w io.Writer) *Writer { - z, _ := NewWriterLevelBuffer(w, Z_DEFAULT_COMPRESSION, DEFAULT_COMPRESSED_BUFFER_SIZE) - return z -} - -func NewWriterLevel(w io.Writer, level int) (*Writer, error) { - return NewWriterLevelBuffer(w, level, DEFAULT_COMPRESSED_BUFFER_SIZE) -} - -func NewWriterLevelBuffer(w io.Writer, level, bufferSize int) (*Writer, error) { - z := &Writer{w: w, out: make([]byte, bufferSize)} - if err := z.strm.deflateInit(level); err != nil { - return nil, err - } - return z, nil -} - -// this is the main function: it advances the write with either -// new data or something else to do, like a flush -func (z *Writer) write(p []byte, flush int) int { - if len(p) == 0 { - z.strm.setInBuf(nil, 0) - } else { - z.strm.setInBuf(p, len(p)) - } - // we loop until we don't get a full output buffer - // each loop completely writes the output buffer to the underlying - // writer - for { - // deflate one buffer - z.strm.setOutBuf(z.out, len(z.out)) - z.strm.deflate(flush) - - // write everything - from := 0 - have := len(z.out) - int(z.strm.availOut()) - for have > 0 { - var n int - n, z.err = z.w.Write(z.out[from:have]) - if z.err != nil { - z.strm.deflateEnd() - return 0 - } - from += n - have -= n - } - - // we stop trying if we get a partial response - if z.strm.availOut() != 0 { - break - } - } - // the library guarantees this - if z.strm.availIn() != 0 { - panic(fmt.Errorf("cgzip: Unexpected error (2)")) - } - return len(p) -} - -func (z *Writer) Write(p []byte) (n int, err error) { - if z.err != nil { - return 0, z.err - } - n = z.write(p, Z_NO_FLUSH) - return n, z.err -} - -func (z *Writer) Flush() error { - if z.err != nil { - return z.err - } - z.write(nil, Z_SYNC_FLUSH) - return z.err -} - -// Calling Close does not close the wrapped io.Writer originally -// passed to NewWriterX. -func (z *Writer) Close() error { - if z.err != nil { - return z.err - } - z.write(nil, Z_FINISH) - if z.err != nil { - return z.err - } - z.strm.deflateEnd() - z.err = io.EOF - return nil -} diff --git a/go/cgzip/zstream.go b/go/cgzip/zstream.go deleted file mode 100644 index a336b54959..0000000000 --- a/go/cgzip/zstream.go +++ /dev/null @@ -1,177 +0,0 @@ -// +build cgo - -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cgzip - -// See http://www.zlib.net/zlib_how.html for more information on this - -/* -#cgo CFLAGS: -Werror=implicit -#cgo pkg-config: zlib - -#include "zlib.h" - -// inflateInit2 is a macro, so using a wrapper function -int zstream_inflate_init(char *strm) { - ((z_stream*)strm)->zalloc = Z_NULL; - ((z_stream*)strm)->zfree = Z_NULL; - ((z_stream*)strm)->opaque = Z_NULL; - ((z_stream*)strm)->avail_in = 0; - ((z_stream*)strm)->next_in = Z_NULL; - return inflateInit2((z_stream*)strm, - 16+15); // 16 makes it understand only gzip files -} - -// deflateInit2 is a macro, so using a wrapper function -// using deflateInit2 instead of deflateInit to be able to specify gzip format -int zstream_deflate_init(char *strm, int level) { - ((z_stream*)strm)->zalloc = Z_NULL; - ((z_stream*)strm)->zfree = Z_NULL; - ((z_stream*)strm)->opaque = Z_NULL; - return deflateInit2((z_stream*)strm, level, Z_DEFLATED, - 16+15, // 16 makes it a gzip file, 15 is default - 8, Z_DEFAULT_STRATEGY); // default values -} - -unsigned int zstream_avail_in(char *strm) { - return ((z_stream*)strm)->avail_in; -} - -unsigned int zstream_avail_out(char *strm) { - return ((z_stream*)strm)->avail_out; -} - -char* zstream_msg(char *strm) { - return ((z_stream*)strm)->msg; -} - -void zstream_set_in_buf(char *strm, void *buf, unsigned int len) { - ((z_stream*)strm)->next_in = (Bytef*)buf; - ((z_stream*)strm)->avail_in = len; -} - -void zstream_set_out_buf(char *strm, void *buf, unsigned int len) { - ((z_stream*)strm)->next_out = (Bytef*)buf; - ((z_stream*)strm)->avail_out = len; -} - -int zstream_inflate(char *strm, int flag) { - return inflate((z_stream*)strm, flag); -} - -int zstream_deflate(char *strm, int flag) { - return deflate((z_stream*)strm, flag); -} - -void zstream_inflate_end(char *strm) { - inflateEnd((z_stream*)strm); -} - -void zstream_deflate_end(char *strm) { - deflateEnd((z_stream*)strm); -} -*/ -import "C" - -import ( - "fmt" - "unsafe" -) - -const ( - zNoFlush = C.Z_NO_FLUSH -) - -// z_stream is a buffer that's big enough to fit a C.z_stream. -// This lets us allocate a C.z_stream within Go, while keeping the contents -// opaque to the Go GC. Otherwise, the GC would look inside and complain that -// the pointers are invalid, since they point to objects allocated by C code. -type zstream [unsafe.Sizeof(C.z_stream{})]C.char - -func (strm *zstream) inflateInit() error { - result := C.zstream_inflate_init(&strm[0]) - if result != Z_OK { - return fmt.Errorf("cgzip: failed to initialize inflate (%v): %v", result, strm.msg()) - } - return nil -} - -func (strm *zstream) deflateInit(level int) error { - result := C.zstream_deflate_init(&strm[0], C.int(level)) - if result != Z_OK { - return fmt.Errorf("cgzip: failed to initialize deflate (%v): %v", result, strm.msg()) - } - return nil -} - -func (strm *zstream) inflateEnd() { - C.zstream_inflate_end(&strm[0]) -} - -func (strm *zstream) deflateEnd() { - C.zstream_deflate_end(&strm[0]) -} - -func (strm *zstream) availIn() int { - return int(C.zstream_avail_in(&strm[0])) -} - -func (strm *zstream) availOut() int { - return int(C.zstream_avail_out(&strm[0])) -} - -func (strm *zstream) msg() string { - return C.GoString(C.zstream_msg(&strm[0])) -} - -func (strm *zstream) setInBuf(buf []byte, size int) { - if buf == nil { - C.zstream_set_in_buf(&strm[0], nil, C.uint(size)) - } else { - C.zstream_set_in_buf(&strm[0], unsafe.Pointer(&buf[0]), C.uint(size)) - } -} - -func (strm *zstream) setOutBuf(buf []byte, size int) { - if buf == nil { - C.zstream_set_out_buf(&strm[0], nil, C.uint(size)) - } else { - C.zstream_set_out_buf(&strm[0], unsafe.Pointer(&buf[0]), C.uint(size)) - } -} - -func (strm *zstream) inflate(flag int) (int, error) { - ret := C.zstream_inflate(&strm[0], C.int(flag)) - switch ret { - case Z_NEED_DICT: - ret = Z_DATA_ERROR - fallthrough - case Z_DATA_ERROR, Z_MEM_ERROR: - return int(ret), fmt.Errorf("cgzip: failed to inflate (%v): %v", ret, strm.msg()) - } - return int(ret), nil -} - -func (strm *zstream) deflate(flag int) { - ret := C.zstream_deflate(&strm[0], C.int(flag)) - if ret == Z_STREAM_ERROR { - // all the other error cases are normal, - // and this should never happen - panic(fmt.Errorf("cgzip: Unexpected error (1)")) - } -} diff --git a/go/cmd/mysqlctl/plugin_influxdbbackend.go b/go/cmd/mysqlctl/plugin_influxdbbackend.go deleted file mode 100644 index fae7ccbb9a..0000000000 --- a/go/cmd/mysqlctl/plugin_influxdbbackend.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// This plugin imports influxdbbackend to register the influxdbbackend stats backend. - -import ( - _ "vitess.io/vitess/go/stats/influxdbbackend" -) diff --git a/go/cmd/mysqlctld/plugin_influxdbbackend.go b/go/cmd/mysqlctld/plugin_influxdbbackend.go deleted file mode 100644 index fae7ccbb9a..0000000000 --- a/go/cmd/mysqlctld/plugin_influxdbbackend.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// This plugin imports influxdbbackend to register the influxdbbackend stats backend. - -import ( - _ "vitess.io/vitess/go/stats/influxdbbackend" -) diff --git a/go/cmd/vtctl/plugin_influxdbbackend.go b/go/cmd/vtctl/plugin_influxdbbackend.go deleted file mode 100644 index fae7ccbb9a..0000000000 --- a/go/cmd/vtctl/plugin_influxdbbackend.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// This plugin imports influxdbbackend to register the influxdbbackend stats backend. - -import ( - _ "vitess.io/vitess/go/stats/influxdbbackend" -) diff --git a/go/cmd/vtctld/plugin_influxdbbackend.go b/go/cmd/vtctld/plugin_influxdbbackend.go deleted file mode 100644 index fae7ccbb9a..0000000000 --- a/go/cmd/vtctld/plugin_influxdbbackend.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// This plugin imports influxdbbackend to register the influxdbbackend stats backend. - -import ( - _ "vitess.io/vitess/go/stats/influxdbbackend" -) diff --git a/go/cmd/vtgate/plugin_grpcqueryservice.go b/go/cmd/vtgate/plugin_grpcqueryservice.go deleted file mode 100644 index 16c163d095..0000000000 --- a/go/cmd/vtgate/plugin_grpcqueryservice.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// Imports and register the gRPC queryservice server - -import ( - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vtgate" - "vitess.io/vitess/go/vt/vttablet/grpcqueryservice" - "vitess.io/vitess/go/vt/vttablet/queryservice" -) - -func init() { - vtgate.RegisterL2VTGates = append(vtgate.RegisterL2VTGates, func(qs queryservice.QueryService) { - if servenv.GRPCCheckServiceMap("queryservice") { - grpcqueryservice.Register(servenv.GRPCServer, qs) - } - }) -} diff --git a/go/cmd/vtgate/plugin_influxdbbackend.go b/go/cmd/vtgate/plugin_influxdbbackend.go deleted file mode 100644 index fae7ccbb9a..0000000000 --- a/go/cmd/vtgate/plugin_influxdbbackend.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// This plugin imports influxdbbackend to register the influxdbbackend stats backend. - -import ( - _ "vitess.io/vitess/go/stats/influxdbbackend" -) diff --git a/go/cmd/vttablet/plugin_influxdbbackend.go b/go/cmd/vttablet/plugin_influxdbbackend.go deleted file mode 100644 index fae7ccbb9a..0000000000 --- a/go/cmd/vttablet/plugin_influxdbbackend.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// This plugin imports influxdbbackend to register the influxdbbackend stats backend. - -import ( - _ "vitess.io/vitess/go/stats/influxdbbackend" -) diff --git a/go/cmd/vtworker/plugin_influxdbbackend.go b/go/cmd/vtworker/plugin_influxdbbackend.go deleted file mode 100644 index fae7ccbb9a..0000000000 --- a/go/cmd/vtworker/plugin_influxdbbackend.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// This plugin imports influxdbbackend to register the influxdbbackend stats backend. - -import ( - _ "vitess.io/vitess/go/stats/influxdbbackend" -) diff --git a/go/cmd/zkctld/plugin_influxdbbackend.go b/go/cmd/zkctld/plugin_influxdbbackend.go deleted file mode 100644 index fae7ccbb9a..0000000000 --- a/go/cmd/zkctld/plugin_influxdbbackend.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// This plugin imports influxdbbackend to register the influxdbbackend stats backend. - -import ( - _ "vitess.io/vitess/go/stats/influxdbbackend" -) diff --git a/go/mysql/auth_server_static.go b/go/mysql/auth_server_static.go index 34fcd7c884..290de7a2b8 100644 --- a/go/mysql/auth_server_static.go +++ b/go/mysql/auth_server_static.go @@ -72,6 +72,7 @@ type AuthServerStaticEntry struct { Password string UserData string SourceHost string + Groups []string } // InitAuthServerStatic Handles initializing the AuthServerStatic if necessary. @@ -147,8 +148,7 @@ func (a *AuthServerStatic) installSignalHandlers() { sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGHUP) go func() { - for { - <-sigChan + for range sigChan { a.loadConfigFromParams(*mysqlAuthServerStaticFile, "") } }() @@ -204,24 +204,24 @@ func (a *AuthServerStatic) ValidateHash(salt []byte, user string, authResponse [ a.mu.Unlock() if !ok { - return &StaticUserData{""}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) } for _, entry := range entries { if entry.MysqlNativePassword != "" { isPass := isPassScrambleMysqlNativePassword(authResponse, salt, entry.MysqlNativePassword) if matchSourceHost(remoteAddr, entry.SourceHost) && isPass { - return &StaticUserData{entry.UserData}, nil + return &StaticUserData{entry.UserData, entry.Groups}, nil } } else { computedAuthResponse := ScramblePassword(salt, []byte(entry.Password)) // Validate the password. if matchSourceHost(remoteAddr, entry.SourceHost) && bytes.Compare(authResponse, computedAuthResponse) == 0 { - return &StaticUserData{entry.UserData}, nil + return &StaticUserData{entry.UserData, entry.Groups}, nil } } } - return &StaticUserData{""}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) } // Negotiate is part of the AuthServer interface. @@ -239,15 +239,15 @@ func (a *AuthServerStatic) Negotiate(c *Conn, user string, remoteAddr net.Addr) a.mu.Unlock() if !ok { - return &StaticUserData{""}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) } for _, entry := range entries { // Validate the password. if matchSourceHost(remoteAddr, entry.SourceHost) && entry.Password == password { - return &StaticUserData{entry.UserData}, nil + return &StaticUserData{entry.UserData, entry.Groups}, nil } } - return &StaticUserData{""}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) } func matchSourceHost(remoteAddr net.Addr, targetSourceHost string) bool { @@ -264,12 +264,13 @@ func matchSourceHost(remoteAddr net.Addr, targetSourceHost string) bool { return false } -// StaticUserData holds the username +// StaticUserData holds the username and groups type StaticUserData struct { - value string + username string + groups []string } -// Get returns the wrapped username +// Get returns the wrapped username and groups func (sud *StaticUserData) Get() *querypb.VTGateCallerID { - return &querypb.VTGateCallerID{Username: sud.value} + return &querypb.VTGateCallerID{Username: sud.username, Groups: sud.groups} } diff --git a/go/mysql/auth_server_static_test.go b/go/mysql/auth_server_static_test.go index a1fd2809d1..ef4e9bd47b 100644 --- a/go/mysql/auth_server_static_test.go +++ b/go/mysql/auth_server_static_test.go @@ -42,17 +42,53 @@ func TestJsonConfigParser(t *testing.T) { t.Fatalf("mysql_user config size should be equal to 1") } // works with new format - jsonConfig = "{\"mysql_user\":[{\"Password\":\"123\", \"UserData\":\"dummy\", \"SourceHost\": \"localhost\"}, {\"Password\": \"123\", \"UserData\": \"mysql_user_all\"}]}" + jsonConfig = `{"mysql_user":[ + {"Password":"123", "UserData":"dummy", "SourceHost": "localhost"}, + {"Password": "123", "UserData": "mysql_user_all"}, + {"Password": "456", "UserData": "mysql_user_with_groups", "Groups": ["user_group"]} + ]}` err = parseConfig([]byte(jsonConfig), &config) if err != nil { t.Fatalf("should not get an error, but got: %v", err) } - if len(config["mysql_user"]) != 2 { - t.Fatalf("mysql_user config size should be equal to 2") + if len(config["mysql_user"]) != 3 { + t.Fatalf("mysql_user config size should be equal to 3") } if config["mysql_user"][0].SourceHost != "localhost" { - t.Fatalf("SourceHost should be equal localhost") + t.Fatalf("SourceHost should be equal to localhost") + } + + if len(config["mysql_user"][2].Groups) != 1 || config["mysql_user"][2].Groups[0] != "user_group" { + t.Fatalf("Groups should be equal to [\"user_group\"]") + } +} + +func TestValidateHashGetter(t *testing.T) { + jsonConfig := `{"mysql_user": [{"Password": "password", "UserData": "user.name", "Groups": ["user_group"]}]}` + + auth := NewAuthServerStatic() + auth.loadConfigFromParams("", jsonConfig) + ip := net.ParseIP("127.0.0.1") + addr := &net.IPAddr{IP: ip, Zone: ""} + + salt, err := NewSalt() + if err != nil { + t.Fatalf("error generating salt: %v", err) + } + + scrambled := ScramblePassword(salt, []byte("password")) + getter, err := auth.ValidateHash(salt, "mysql_user", scrambled, addr) + if err != nil { + t.Fatalf("error validating password: %v", err) + } + + callerId := getter.Get() + if callerId.Username != "user.name" { + t.Fatalf("getter username incorrect, expected \"user.name\", got %v", callerId.Username) + } + if len(callerId.Groups) != 1 || callerId.Groups[0] != "user_group" { + t.Fatalf("getter groups incorrect, expected [\"user_group\"], got %v", callerId.Groups) } } diff --git a/go/mysql/binlog_event.go b/go/mysql/binlog_event.go index 3e3909f78f..dd0faa17c9 100644 --- a/go/mysql/binlog_event.go +++ b/go/mysql/binlog_event.go @@ -70,13 +70,13 @@ type BinlogEvent interface { // RBR events. - // IsTableMapEvent returns true if this is a TABLE_MAP_EVENT. + // IsTableMap returns true if this is a TABLE_MAP_EVENT. IsTableMap() bool - // IsWriteRowsEvent returns true if this is a WRITE_ROWS_EVENT. + // IsWriteRows returns true if this is a WRITE_ROWS_EVENT. IsWriteRows() bool - // IsUpdateRowsEvent returns true if this is a UPDATE_ROWS_EVENT. + // IsUpdateRows returns true if this is a UPDATE_ROWS_EVENT. IsUpdateRows() bool - // IsDeleteRowsEvent returns true if this is a DELETE_ROWS_EVENT. + // IsDeleteRows returns true if this is a DELETE_ROWS_EVENT. IsDeleteRows() bool // Timestamp returns the timestamp from the event header. diff --git a/go/mysql/binlog_event_make.go b/go/mysql/binlog_event_make.go index d5250fa05d..b898c0a4dc 100644 --- a/go/mysql/binlog_event_make.go +++ b/go/mysql/binlog_event_make.go @@ -127,7 +127,7 @@ func NewFormatDescriptionEvent(f BinlogFormat, s *FakeBinlogStream) BinlogEvent 1 // (undocumented) checksum algorithm data := make([]byte, length) binary.LittleEndian.PutUint16(data[0:2], f.FormatVersion) - copy(data[2:52], []byte(f.ServerVersion)) + copy(data[2:52], f.ServerVersion) binary.LittleEndian.PutUint32(data[52:56], s.Timestamp) data[56] = f.HeaderLength copy(data[57:], f.HeaderSizes) @@ -197,7 +197,7 @@ func NewQueryEvent(f BinlogFormat, s *FakeBinlogStream, q Query) BinlogEvent { data[pos+6] = byte(q.Charset.Server >> 8) pos += 7 } - pos += copy(data[pos:pos+len(q.Database)], []byte(q.Database)) + pos += copy(data[pos:pos+len(q.Database)], q.Database) data[pos] = 0 pos++ copy(data[pos:], q.SQL) @@ -310,11 +310,11 @@ func NewTableMapEvent(f BinlogFormat, s *FakeBinlogStream, tableID uint64, tm *T data[6] = byte(tm.Flags) data[7] = byte(tm.Flags >> 8) data[8] = byte(len(tm.Database)) - pos := 6 + 2 + 1 + copy(data[9:], []byte(tm.Database)) + pos := 6 + 2 + 1 + copy(data[9:], tm.Database) data[pos] = 0 pos++ data[pos] = byte(len(tm.Name)) - pos += 1 + copy(data[pos+1:], []byte(tm.Name)) + pos += 1 + copy(data[pos+1:], tm.Name) data[pos] = 0 pos++ diff --git a/go/mysql/binlog_event_rbr.go b/go/mysql/binlog_event_rbr.go index 7e61f83b20..a8ec52dd05 100644 --- a/go/mysql/binlog_event_rbr.go +++ b/go/mysql/binlog_event_rbr.go @@ -853,14 +853,26 @@ func CellValue(data []byte, pos int, typ byte, metadata uint16, styp querypb.Typ max := int((((metadata >> 4) & 0x300) ^ 0x300) + (metadata & 0xff)) // Length is encoded in 1 or 2 bytes. if max > 255 { + // This code path exists due to https://bugs.mysql.com/bug.php?id=37426. + // CHAR types need to allocate 3 bytes per char. So, the length for CHAR(255) + // cannot be represented in 1 byte. This also means that this rule does not + // apply to BINARY data. l := int(uint64(data[pos]) | uint64(data[pos+1])<<8) return sqltypes.MakeTrusted(querypb.Type_VARCHAR, data[pos+2:pos+2+l]), l + 2, nil } l := int(data[pos]) - return sqltypes.MakeTrusted(querypb.Type_VARCHAR, - data[pos+1:pos+1+l]), l + 1, nil + mdata := data[pos+1 : pos+1+l] + if sqltypes.IsBinary(styp) { + // Fixed length binaries have to be padded with zeroes + // up to the length of the field. Otherwise, equality checks + // fail against saved data. See https://github.com/vitessio/vitess/issues/3984. + ret := make([]byte, max) + copy(ret, mdata) + return sqltypes.MakeTrusted(querypb.Type_BINARY, ret), l + 1, nil + } + return sqltypes.MakeTrusted(querypb.Type_VARCHAR, mdata), l + 1, nil case TypeGeometry: l := 0 diff --git a/go/mysql/conn.go b/go/mysql/conn.go index 8f24ae5355..e14ffa941d 100644 --- a/go/mysql/conn.go +++ b/go/mysql/conn.go @@ -31,6 +31,7 @@ import ( "vitess.io/vitess/go/sync2" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" ) const ( @@ -723,65 +724,28 @@ func (c *Conn) handleNextCommand(handler Handler) error { queryStart := time.Now() query := c.parseComQuery(data) c.recycleReadPacket() - fieldSent := false - // sendFinished is set if the response should just be an OK packet. - sendFinished := false - err := handler.ComQuery(c, query, func(qr *sqltypes.Result) error { - if sendFinished { - // Failsafe: Unreachable if server is well-behaved. - return io.EOF - } - - if !fieldSent { - fieldSent = true - - if len(qr.Fields) == 0 { - sendFinished = true - - // A successful callback with no fields means that this was a - // DML or other write-only operation. - // - // We should not send any more packets after this, but make sure - // to extract the affected rows and last insert id from the result - // struct here since clients expect it. - return c.writeOKPacket(qr.RowsAffected, qr.InsertID, c.StatusFlags, handler.WarningCount(c)) + var queries []string + if c.Capabilities&CapabilityClientMultiStatements != 0 { + queries, err = sqlparser.SplitStatementToPieces(query) + if err != nil { + log.Errorf("Conn %v: Error splitting query: %v", c, err) + if werr := c.writeErrorPacketFromError(err); werr != nil { + // If we can't even write the error, we're done. + log.Errorf("Conn %v: Error writing query error: %v", c, werr) + return werr } - if err := c.writeFields(qr); err != nil { - return err - } - } - - return c.writeRows(qr) - }) - - // If no field was sent, we expect an error. - if !fieldSent { - // This is just a failsafe. Should never happen. - if err == nil || err == io.EOF { - err = NewSQLErrorFromError(errors.New("unexpected: query ended without no results and no error")) - } - if werr := c.writeErrorPacketFromError(err); werr != nil { - // If we can't even write the error, we're done. - log.Errorf("Error writing query error to %s: %v", c, werr) - return werr } } else { - if err != nil { - // We can't send an error in the middle of a stream. - // All we can do is abort the send, which will cause a 2013. - log.Errorf("Error in the middle of a stream to %s: %v", c, err) - return err + queries = []string{query} + } + for index, sql := range queries { + more := false + if index != len(queries)-1 { + more = true } - - // Send the end packet only sendFinished is false (results were streamed). - // In this case the affectedRows and lastInsertID are always 0 since it - // was a read operation. - if !sendFinished { - if err := c.writeEndResult(false, 0, 0, handler.WarningCount(c)); err != nil { - log.Errorf("Error writing result to %s: %v", c, err) - return err - } + if err := c.execQuery(sql, handler, more); err != nil { + return err } } @@ -807,14 +771,16 @@ func (c *Conn) handleNextCommand(handler Handler) error { } } case ComSetOption: - if operation, ok := c.parseComSetOption(data); ok { + operation, ok := c.parseComSetOption(data) + c.recycleReadPacket() + if ok { switch operation { case 0: c.Capabilities |= CapabilityClientMultiStatements case 1: c.Capabilities &^= CapabilityClientMultiStatements default: - log.Errorf("Got unhandled packet from client %v, returning error: %v", c.ConnectionID, data) + log.Errorf("Got unhandled packet (ComSetOption default) from client %v, returning error: %v", c.ConnectionID, data) if err := c.writeErrorPacket(ERUnknownComError, SSUnknownComError, "error handling packet: %v", data); err != nil { log.Errorf("Error writing error packet to client: %v", err) return err @@ -825,14 +791,14 @@ func (c *Conn) handleNextCommand(handler Handler) error { return err } } else { - log.Errorf("Got unhandled packet from client %v, returning error: %v", c.ConnectionID, data) + log.Errorf("Got unhandled packet (ComSetOption else) from client %v, returning error: %v", c.ConnectionID, data) if err := c.writeErrorPacket(ERUnknownComError, SSUnknownComError, "error handling packet: %v", data); err != nil { log.Errorf("Error writing error packet to client: %v", err) return err } } default: - log.Errorf("Got unhandled packet from %s, returning error: %v", c, data) + log.Errorf("Got unhandled packet (default) from %s, returning error: %v", c, data) c.recycleReadPacket() if err := c.writeErrorPacket(ERUnknownComError, SSUnknownComError, "command handling not implemented yet: %v", data[0]); err != nil { log.Errorf("Error writing error packet to %s: %s", c, err) @@ -843,6 +809,76 @@ func (c *Conn) handleNextCommand(handler Handler) error { return nil } +func (c *Conn) execQuery(query string, handler Handler, more bool) error { + fieldSent := false + // sendFinished is set if the response should just be an OK packet. + sendFinished := false + + err := handler.ComQuery(c, query, func(qr *sqltypes.Result) error { + flag := c.StatusFlags + if more { + flag |= ServerMoreResultsExists + } + if sendFinished { + // Failsafe: Unreachable if server is well-behaved. + return io.EOF + } + + if !fieldSent { + fieldSent = true + + if len(qr.Fields) == 0 { + sendFinished = true + + // A successful callback with no fields means that this was a + // DML or other write-only operation. + // + // We should not send any more packets after this, but make sure + // to extract the affected rows and last insert id from the result + // struct here since clients expect it. + return c.writeOKPacket(qr.RowsAffected, qr.InsertID, flag, handler.WarningCount(c)) + } + if err := c.writeFields(qr); err != nil { + return err + } + } + + return c.writeRows(qr) + }) + + // If no field was sent, we expect an error. + if !fieldSent { + // This is just a failsafe. Should never happen. + if err == nil || err == io.EOF { + err = NewSQLErrorFromError(errors.New("unexpected: query ended without no results and no error")) + } + if werr := c.writeErrorPacketFromError(err); werr != nil { + // If we can't even write the error, we're done. + log.Errorf("Error writing query error to %s: %v", c, werr) + return werr + } + } else { + if err != nil { + // We can't send an error in the middle of a stream. + // All we can do is abort the send, which will cause a 2013. + log.Errorf("Error in the middle of a stream to %s: %v", c, err) + return err + } + + // Send the end packet only sendFinished is false (results were streamed). + // In this case the affectedRows and lastInsertID are always 0 since it + // was a read operation. + if !sendFinished { + if err := c.writeEndResult(more, 0, 0, handler.WarningCount(c)); err != nil { + log.Errorf("Error writing result to %s: %v", c, err) + return err + } + } + } + + return nil +} + // // Packet parsing methods, for generic packets. // diff --git a/go/mysql/endtoend/client_test.go b/go/mysql/endtoend/client_test.go index 0a19094495..569f4c9ae5 100644 --- a/go/mysql/endtoend/client_test.go +++ b/go/mysql/endtoend/client_test.go @@ -150,11 +150,6 @@ func doTestMultiResult(t *testing.T, disableClientDeprecateEOF bool) { expectNoError(t, err) defer conn.Close() - connParams.DisableClientDeprecateEOF = false - - expectFlag(t, "Negotiated ClientDeprecateEOF flag", (conn.Capabilities&mysql.CapabilityClientDeprecateEOF) != 0, !disableClientDeprecateEOF) - defer conn.Close() - qr, more, err := conn.ExecuteFetchMulti("select 1 from dual; set autocommit=1; select 1 from dual", 10, true) expectNoError(t, err) expectFlag(t, "ExecuteMultiFetch(multi result)", more, true) diff --git a/go/mysql/endtoend/query_test.go b/go/mysql/endtoend/query_test.go index 185a6de562..8621c1bb87 100644 --- a/go/mysql/endtoend/query_test.go +++ b/go/mysql/endtoend/query_test.go @@ -247,11 +247,6 @@ func doTestWarnings(t *testing.T, disableClientDeprecateEOF bool) { expectNoError(t, err) defer conn.Close() - connParams.DisableClientDeprecateEOF = false - - expectFlag(t, "Negotiated ClientDeprecateEOF flag", (conn.Capabilities&mysql.CapabilityClientDeprecateEOF) != 0, !disableClientDeprecateEOF) - defer conn.Close() - result, err := conn.ExecuteFetch("create table a(id int, val int not null, primary key(id))", 0, false) if err != nil { t.Fatalf("create table failed: %v", err) diff --git a/go/mysql/endtoend/schema_test.go b/go/mysql/endtoend/schema_test.go index 3467a4ffba..49ed267547 100644 --- a/go/mysql/endtoend/schema_test.go +++ b/go/mysql/endtoend/schema_test.go @@ -52,10 +52,9 @@ func testDescribeTable(t *testing.T) { t.Fatal(err) } - // MariaDB has '81' instead of '90' of Extra ColumnLength. - // Just try it and see if it's the only difference. - if conn.IsMariaDB() && result.Fields[5].ColumnLength == 81 { - result.Fields[5].ColumnLength = 90 + // Zero-out the column lengths, because they can't be compared. + for i := range result.Fields { + result.Fields[i].ColumnLength = 0 } if !sqltypes.FieldsEqual(result.Fields, mysql.DescribeTableFields) { diff --git a/go/mysql/flavor.go b/go/mysql/flavor.go index 69cf7434b9..f0a9a53a20 100644 --- a/go/mysql/flavor.go +++ b/go/mysql/flavor.go @@ -45,6 +45,10 @@ type flavor interface { // startSlave returns the command to start the slave. startSlaveCommand() string + // startSlaveUntilAfter will restart replication, but only allow it + // to run until `pos` is reached. After reaching pos, replication will be stopped again + startSlaveUntilAfter(pos Position) string + // stopSlave returns the command to stop the slave. stopSlaveCommand() string @@ -146,6 +150,11 @@ func (c *Conn) StartSlaveCommand() string { return c.flavor.startSlaveCommand() } +// StartSlaveUntilAfterCommand returns the command to start the slave. +func (c *Conn) StartSlaveUntilAfterCommand(pos Position) string { + return c.flavor.startSlaveUntilAfter(pos) +} + // StopSlaveCommand returns the command to stop the slave. func (c *Conn) StopSlaveCommand() string { return c.flavor.stopSlaveCommand() diff --git a/go/mysql/flavor_mariadb.go b/go/mysql/flavor_mariadb.go index 7374f52d47..329b6a04b5 100644 --- a/go/mysql/flavor_mariadb.go +++ b/go/mysql/flavor_mariadb.go @@ -41,6 +41,10 @@ func (mariadbFlavor) masterGTIDSet(c *Conn) (GTIDSet, error) { return parseMariadbGTIDSet(qr.Rows[0][0].ToString()) } +func (mariadbFlavor) startSlaveUntilAfter(pos Position) string { + return fmt.Sprintf("START SLAVE UNTIL master_gtid_pos = \"%s\"", pos) +} + func (mariadbFlavor) startSlaveCommand() string { return "START SLAVE" } diff --git a/go/mysql/flavor_mysql.go b/go/mysql/flavor_mysql.go index d8b8eab6c9..3a3fd99400 100644 --- a/go/mysql/flavor_mysql.go +++ b/go/mysql/flavor_mysql.go @@ -43,6 +43,10 @@ func (mysqlFlavor) startSlaveCommand() string { return "START SLAVE" } +func (mysqlFlavor) startSlaveUntilAfter(pos Position) string { + return fmt.Sprintf("START SLAVE UNTIL SQL_AFTER_GTIDS = '%s'", pos) +} + func (mysqlFlavor) stopSlaveCommand() string { return "STOP SLAVE" } diff --git a/go/mysql/schema.go b/go/mysql/schema.go index 3fc122c29b..6de7d42b6f 100644 --- a/go/mysql/schema.go +++ b/go/mysql/schema.go @@ -34,6 +34,8 @@ import ( // DescribeTableFields contains the fields returned by a // 'describe ' command. They are validated by the testDescribeTable // test. +// Column lengths returned seem to differ between versions. So, we +// don't compare them. var DescribeTableFields = []*querypb.Field{ { Name: "Field", @@ -42,7 +44,7 @@ var DescribeTableFields = []*querypb.Field{ OrgTable: "COLUMNS", Database: "information_schema", OrgName: "COLUMN_NAME", - ColumnLength: 192, + ColumnLength: 0, Charset: 33, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG), }, @@ -53,7 +55,7 @@ var DescribeTableFields = []*querypb.Field{ OrgTable: "COLUMNS", Database: "information_schema", OrgName: "COLUMN_TYPE", - ColumnLength: 589815, + ColumnLength: 0, Charset: 33, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_BLOB_FLAG), }, @@ -64,7 +66,7 @@ var DescribeTableFields = []*querypb.Field{ OrgTable: "COLUMNS", Database: "information_schema", OrgName: "IS_NULLABLE", - ColumnLength: 9, + ColumnLength: 0, Charset: 33, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG), }, @@ -75,7 +77,7 @@ var DescribeTableFields = []*querypb.Field{ OrgTable: "COLUMNS", Database: "information_schema", OrgName: "COLUMN_KEY", - ColumnLength: 9, + ColumnLength: 0, Charset: 33, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG), }, @@ -86,7 +88,7 @@ var DescribeTableFields = []*querypb.Field{ OrgTable: "COLUMNS", Database: "information_schema", OrgName: "COLUMN_DEFAULT", - ColumnLength: 589815, + ColumnLength: 0, Charset: 33, Flags: uint32(querypb.MySqlFlag_BLOB_FLAG), }, @@ -97,7 +99,7 @@ var DescribeTableFields = []*querypb.Field{ OrgTable: "COLUMNS", Database: "information_schema", OrgName: "EXTRA", - ColumnLength: 90, + ColumnLength: 0, Charset: 33, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG), }, diff --git a/go/stats/export.go b/go/stats/export.go index b5e516cf97..8bac7618c0 100644 --- a/go/stats/export.go +++ b/go/stats/export.go @@ -41,7 +41,7 @@ import ( var emitStats = flag.Bool("emit_stats", false, "true iff we should emit stats to push-based monitoring/stats backends") var statsEmitPeriod = flag.Duration("stats_emit_period", time.Duration(60*time.Second), "Interval between emitting stats to all registered backends") -var statsBackend = flag.String("stats_backend", "influxdb", "The name of the registered push-based monitoring/stats backend to use") +var statsBackend = flag.String("stats_backend", "", "The name of the registered push-based monitoring/stats backend to use") // NewVarHook is the type of a hook to export variables in a different way type NewVarHook func(name string, v expvar.Var) diff --git a/go/stats/influxdbbackend/influxdb_backend.go b/go/stats/influxdbbackend/influxdb_backend.go deleted file mode 100644 index 54a4e41a82..0000000000 --- a/go/stats/influxdbbackend/influxdb_backend.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package influxdbbackend is useful for publishing metrics to an InfluxDB backend (tested on v0.88). -// It requires a database to already have been created in InfluxDB, and then specified via the -// "--influxdb_database" flag. -// -// It's still a work in progress, as it publishes almost all stats as key-value string pairs, -// instead of better JSON representations. This limitation will hopefully be fixed after the -// release of InfluxDB v0.9, as it has better support for arbitrary metadata dicts in the -// form of tags. -package influxdbbackend - -import ( - "expvar" - "flag" - - influxClient "github.com/influxdb/influxdb/client" - "vitess.io/vitess/go/stats" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/servenv" -) - -var influxDBHost = flag.String("influxdb_host", "localhost:8086", "the influxdb host (with port)") -var influxDBDatabase = flag.String("influxdb_database", "vitess", "the name of the influxdb database") -var influxDBUsername = flag.String("influxdb_username", "root", "influxdb username") -var influxDBPassword = flag.String("influxdb_password", "root", "influxdb password") - -// InfluxDBBackend implements stats.PushBackend -type InfluxDBBackend struct { - client *influxClient.Client -} - -// init attempts to create a singleton InfluxDBBackend and register it as a PushBackend. -// If it fails to create one, this is a noop. -func init() { - // Needs to happen in servenv.OnRun() instead of init because it requires flag parsing and logging - servenv.OnRun(func() { - config := &influxClient.ClientConfig{ - Host: *influxDBHost, - Username: *influxDBUsername, - Password: *influxDBPassword, - Database: *influxDBDatabase, - } - client, err := influxClient.NewClient(config) - if err != nil { - log.Errorf("Unable to create an InfluxDB client: %v", err) - return - } - - stats.RegisterPushBackend("influxdb", &InfluxDBBackend{ - client: client, - }) - }) -} - -// PushAll pushes all expvar stats to InfluxDB -func (backend *InfluxDBBackend) PushAll() error { - series := []*influxClient.Series{} - expvar.Do(func(kv expvar.KeyValue) { - series = append(series, &influxClient.Series{ - Name: "stats", - // TODO(aaijazi): This would be much better suited to InfluxDB v0.90's tags. - // Ideally, we'd use some of the expvars as tags, and some as values. - // However, as of 03/11/2015, InfluxDB v0.90 hasn't proven quite stable enough to use. - Columns: []string{"key", "value"}, - Points: [][]interface{}{ - {kv.Key, statToValue(kv.Value)}, - }, - }) - }) - err := backend.client.WriteSeries(series) - return err -} - -// statToValue converts from a stats.Stat type to a JSON representable value. -// This is preferred to just calling the String() for things like numbers, so that -// InfluxDB can also represent the metrics as numbers. -// TODO(aaijazi): this needs to be extended to support better serialization of other types.. -// It's probably good to do this after InfluxDB 0.9 is released, as it has has better support -// for arbitrary dict values (as tags). -func statToValue(v expvar.Var) interface{} { - switch v := v.(type) { - case *stats.Counter: - return v.Get() - case stats.FloatFunc: - return v() - default: - return v.String() - } -} diff --git a/go/sync2/batcher.go b/go/sync2/batcher.go index 161531d927..8e9e14434d 100644 --- a/go/sync2/batcher.go +++ b/go/sync2/batcher.go @@ -32,6 +32,7 @@ type Batcher struct { queue chan int waiters AtomicInt32 nextID AtomicInt32 + after func(time.Duration) <-chan time.Time } // NewBatcher returns a new Batcher @@ -41,6 +42,19 @@ func NewBatcher(interval time.Duration) *Batcher { queue: make(chan int), waiters: NewAtomicInt32(0), nextID: NewAtomicInt32(0), + after: time.After, + } +} + +// newBatcherForTest returns a Batcher for testing where time.After can +// be replaced by a fake alternative. +func newBatcherForTest(interval time.Duration, after func(time.Duration) <-chan time.Time) *Batcher { + return &Batcher{ + interval: interval, + queue: make(chan int), + waiters: NewAtomicInt32(0), + nextID: NewAtomicInt32(0), + after: after, } } @@ -56,7 +70,7 @@ func (b *Batcher) Wait() int { // newBatch starts a new batch func (b *Batcher) newBatch() { go func() { - time.Sleep(b.interval) + <-b.after(b.interval) id := b.nextID.Add(1) diff --git a/go/sync2/batcher_flaky_test.go b/go/sync2/batcher_flaky_test.go deleted file mode 100644 index 5fe89989c5..0000000000 --- a/go/sync2/batcher_flaky_test.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sync2 - -import ( - "testing" - "time" -) - -func expectBatch(testcase string, b *Batcher, want int, t *testing.T) { - id := b.Wait() - if id != want { - t.Errorf("%s: got %d, want %d", testcase, id, want) - } -} - -func TestBatcher(t *testing.T) { - interval := time.Duration(50 * time.Millisecond) - b := NewBatcher(interval) - - // test single waiter - go expectBatch("single waiter", b, 1, t) - time.Sleep(interval * 2) - - // multiple waiters all at once - go expectBatch("concurrent waiter", b, 2, t) - go expectBatch("concurrent waiter", b, 2, t) - go expectBatch("concurrent waiter", b, 2, t) - time.Sleep(interval * 2) - - // stagger the waiters out in time but cross two intervals - go expectBatch("staggered waiter", b, 3, t) - time.Sleep(interval / 5) - go expectBatch("staggered waiter", b, 3, t) - time.Sleep(interval / 5) - go expectBatch("staggered waiter", b, 3, t) - time.Sleep(interval / 5) - go expectBatch("staggered waiter", b, 3, t) - time.Sleep(interval / 5) - go expectBatch("staggered waiter", b, 3, t) - time.Sleep(interval / 5) - - go expectBatch("staggered waiter 2", b, 4, t) - time.Sleep(interval / 5) - go expectBatch("staggered waiter 2", b, 4, t) - time.Sleep(interval / 5) - go expectBatch("staggered waiter 2", b, 4, t) - time.Sleep(interval / 5) - go expectBatch("staggered waiter 2", b, 4, t) - time.Sleep(interval / 5) - go expectBatch("staggered waiter 2", b, 4, t) - time.Sleep(interval / 5) - - time.Sleep(interval * 2) -} diff --git a/go/sync2/batcher_test.go b/go/sync2/batcher_test.go new file mode 100644 index 0000000000..6e90c33850 --- /dev/null +++ b/go/sync2/batcher_test.go @@ -0,0 +1,101 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sync2 + +import ( + "testing" + "time" +) + +// makeAfterFnWithLatch returns a fake alternative to time.After that blocks until +// the release function is called. The fake doesn't support having multiple concurrent +// calls to the After function, which is ok because Batcher should never do that. +func makeAfterFnWithLatch(t *testing.T) (func(time.Duration) <-chan time.Time, func()) { + latch := make(chan time.Time, 1) + afterFn := func(d time.Duration) <-chan time.Time { + return latch + } + + releaseFn := func() { + select { + case latch <- time.Now(): + default: + t.Errorf("Previous batch still hasn't been released") + } + } + return afterFn, releaseFn +} + +func TestBatcher(t *testing.T) { + interval := time.Duration(50 * time.Millisecond) + + afterFn, releaseBatch := makeAfterFnWithLatch(t) + b := newBatcherForTest(interval, afterFn) + + waitersFinished := NewAtomicInt32(0) + + startWaiter := func(testcase string, want int) { + go func() { + id := b.Wait() + if id != want { + t.Errorf("%s: got %d, want %d", testcase, id, want) + } + waitersFinished.Add(1) + }() + } + + awaitVal := func(name string, val *AtomicInt32, expected int32) { + for count := 0; val.Get() != expected; count++ { + time.Sleep(50 * time.Millisecond) + if count > 5 { + t.Errorf("Timed out waiting for %s to be %v", name, expected) + return + } + } + } + + awaitBatch := func(name string, n int32) { + // Wait for all the waiters to register + awaitVal("Batcher.waiters for "+name, &b.waiters, n) + // Release the batch and wait for the batcher to catch up. + if waitersFinished.Get() != 0 { + t.Errorf("Waiters finished before being released") + } + releaseBatch() + awaitVal("Batcher.waiters for "+name, &b.waiters, 0) + // Make sure the waiters actually run so they can verify their batch number. + awaitVal("waitersFinshed for "+name, &waitersFinished, n) + waitersFinished.Set(0) + } + + // test single waiter + startWaiter("single waiter", 1) + awaitBatch("single waiter", 1) + + // multiple waiters all at once + startWaiter("concurrent waiter", 2) + startWaiter("concurrent waiter", 2) + startWaiter("concurrent waiter", 2) + awaitBatch("concurrent waiter", 3) + + startWaiter("more waiters", 3) + startWaiter("more waiters", 3) + startWaiter("more waiters", 3) + startWaiter("more waiters", 3) + startWaiter("more waiters", 3) + awaitBatch("more waiters", 5) +} diff --git a/go/testfiles/locate.go b/go/testfiles/locate.go deleted file mode 100644 index 6ba6b32afa..0000000000 --- a/go/testfiles/locate.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package testfiles locates test files within the Vitess directory tree. -// It also handles test port allocation. -package testfiles - -import ( - "fmt" - "os" - "path" - "path/filepath" -) - -// Locate returns a file path that came from $VTROOT/data/test. -func Locate(filename string) string { - vtroot := os.Getenv("VTROOT") - if vtroot == "" { - panic(fmt.Errorf("VTROOT is not set")) - } - return path.Join(vtroot, "data", "test", filename) -} - -// Glob returns all files matching a pattern in $VTROOT/data/test. -func Glob(pattern string) []string { - vtroot := os.Getenv("VTROOT") - if vtroot == "" { - panic(fmt.Errorf("VTROOT is not set")) - } - dir := path.Join(vtroot, "data", "test") - if exists, err := exists(dir); !exists { - panic(err) - } - resolved := path.Join(dir, pattern) - out, err := filepath.Glob(resolved) - if err != nil { - panic(err) - } - return out -} - -func exists(path string) (bool, error) { - _, err := os.Stat(path) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, err - } - return false, err -} diff --git a/go/vt/automation/wait_for_filtered_replication_task.go b/go/vt/automation/wait_for_filtered_replication_task.go index 63d910f4d5..c820a75801 100644 --- a/go/vt/automation/wait_for_filtered_replication_task.go +++ b/go/vt/automation/wait_for_filtered_replication_task.go @@ -23,7 +23,7 @@ import ( ) // WaitForFilteredReplicationTask runs vtctl WaitForFilteredReplication to block until the destination master -// (i.e. the receiving side of the filtered replication) has caught up up to max_delay with the source shard. +// (i.e. the receiving side of the filtered replication) has caught up to max_delay with the source shard. type WaitForFilteredReplicationTask struct { } diff --git a/go/vt/binlog/binlogplayer/binlog_player.go b/go/vt/binlog/binlogplayer/binlog_player.go index b76fa52f42..f66256cc66 100644 --- a/go/vt/binlog/binlogplayer/binlog_player.go +++ b/go/vt/binlog/binlogplayer/binlog_player.go @@ -170,7 +170,7 @@ func NewBinlogPlayerTables(dbClient DBClient, tablet *topodatapb.Tablet, tables // If an error is encountered, it updates the vreplication state to "Error". // If a stop position was specifed, and reached, the state is updated to "Stopped". func (blp *BinlogPlayer) ApplyBinlogEvents(ctx context.Context) error { - if err := setVReplicationState(blp.dbClient, blp.uid, BlpRunning, ""); err != nil { + if err := SetVReplicationState(blp.dbClient, blp.uid, BlpRunning, ""); err != nil { log.Errorf("Error writing Running state: %v", err) } @@ -180,7 +180,7 @@ func (blp *BinlogPlayer) ApplyBinlogEvents(ctx context.Context) error { Time: time.Now(), Message: msg, }) - if err := setVReplicationState(blp.dbClient, blp.uid, BlpError, msg); err != nil { + if err := SetVReplicationState(blp.dbClient, blp.uid, BlpError, msg); err != nil { log.Errorf("Error writing stop state: %v", err) } return err @@ -191,7 +191,7 @@ func (blp *BinlogPlayer) ApplyBinlogEvents(ctx context.Context) error { // applyEvents returns a recordable status message on termination or an error otherwise. func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { // Read starting values for vreplication. - pos, stopPos, maxTPS, maxReplicationLag, err := readVRSettings(blp.dbClient, blp.uid) + pos, stopPos, maxTPS, maxReplicationLag, err := ReadVRSettings(blp.dbClient, blp.uid) if err != nil { log.Error(err) return err @@ -233,8 +233,8 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { } else { log.Infof("BinlogPlayer client %v for keyrange '%v-%v' starting @ '%v', server: %v", blp.uid, - hex.EncodeToString(blp.keyRange.Start), - hex.EncodeToString(blp.keyRange.End), + hex.EncodeToString(blp.keyRange.GetStart()), + hex.EncodeToString(blp.keyRange.GetEnd()), blp.position, blp.tablet, ) @@ -244,14 +244,14 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { case blp.position.Equal(blp.stopPosition): msg := fmt.Sprintf("not starting BinlogPlayer, we're already at the desired position %v", blp.stopPosition) log.Info(msg) - if err := setVReplicationState(blp.dbClient, blp.uid, BlpStopped, msg); err != nil { + if err := SetVReplicationState(blp.dbClient, blp.uid, BlpStopped, msg); err != nil { log.Errorf("Error writing stop state: %v", err) } return nil case blp.position.AtLeast(blp.stopPosition): msg := fmt.Sprintf("starting point %v greater than stopping point %v", blp.position, blp.stopPosition) log.Error(msg) - if err := setVReplicationState(blp.dbClient, blp.uid, BlpStopped, msg); err != nil { + if err := SetVReplicationState(blp.dbClient, blp.uid, BlpStopped, msg); err != nil { log.Errorf("Error writing stop state: %v", err) } // Don't return an error. Otherwise, it will keep retrying. @@ -351,7 +351,7 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { if blp.position.AtLeast(blp.stopPosition) { msg := "Reached stopping position, done playing logs" log.Info(msg) - if err := setVReplicationState(blp.dbClient, blp.uid, BlpStopped, msg); err != nil { + if err := SetVReplicationState(blp.dbClient, blp.uid, BlpStopped, msg); err != nil { log.Errorf("Error writing stop state: %v", err) } return nil @@ -447,7 +447,7 @@ func (blp *BinlogPlayer) writeRecoveryPosition(tx *binlogdatapb.BinlogTransactio } now := time.Now().Unix() - updateRecovery := updateVReplicationPos(blp.uid, position, now, tx.EventToken.Timestamp) + updateRecovery := GenerateUpdatePos(blp.uid, position, now, tx.EventToken.Timestamp) qr, err := blp.exec(updateRecovery) if err != nil { @@ -503,8 +503,8 @@ func CreateVReplicationTable() []string { ) ENGINE=InnoDB`} } -// setVReplicationState updates the state in the _vt.vreplication table. -func setVReplicationState(dbClient DBClient, uid uint32, state, message string) error { +// SetVReplicationState updates the state in the _vt.vreplication table. +func SetVReplicationState(dbClient DBClient, uid uint32, state, message string) error { query := fmt.Sprintf("update _vt.vreplication set state='%v', message=%v where id=%v", state, encodeString(message), uid) if _, err := dbClient.ExecuteFetch(query, 1); err != nil { return fmt.Errorf("could not set state: %v: %v", query, err) @@ -512,9 +512,9 @@ func setVReplicationState(dbClient DBClient, uid uint32, state, message string) return nil } -// readVRSettings retrieves the throttler settings for +// ReadVRSettings retrieves the throttler settings for // vreplication from the checkpoint table. -func readVRSettings(dbClient DBClient, uid uint32) (pos, stopPos string, maxTPS, maxReplicationLag int64, err error) { +func ReadVRSettings(dbClient DBClient, uid uint32) (pos, stopPos string, maxTPS, maxReplicationLag int64, err error) { query := fmt.Sprintf("select pos, stop_pos, max_tps, max_replication_lag from _vt.vreplication where id=%v", uid) qr, err := dbClient.ExecuteFetch(query, 1) if err != nil { @@ -554,9 +554,9 @@ func CreateVReplicationStopped(workflow string, source *binlogdatapb.BinlogSourc encodeString(workflow), encodeString(source.String()), encodeString(position), throttler.MaxRateModuleDisabled, throttler.ReplicationLagModuleDisabled, time.Now().Unix(), BlpStopped) } -// updateVReplicationPos returns a statement to update a value in the +// GenerateUpdatePos returns a statement to update a value in the // _vt.vreplication table. -func updateVReplicationPos(uid uint32, pos mysql.Position, timeUpdated int64, txTimestamp int64) string { +func GenerateUpdatePos(uid uint32, pos mysql.Position, timeUpdated int64, txTimestamp int64) string { if txTimestamp != 0 { return fmt.Sprintf( "update _vt.vreplication set pos=%v, time_updated=%v, transaction_timestamp=%v where id=%v", @@ -601,11 +601,17 @@ func encodeString(in string) string { } // ReadVReplicationPos returns a statement to query the gtid for a -// given shard from the _vt.vreplication table. +// given stream from the _vt.vreplication table. func ReadVReplicationPos(index uint32) string { return fmt.Sprintf("select pos from _vt.vreplication where id=%v", index) } +// ReadVReplicationStatus returns a statement to query the status fields for a +// given stream from the _vt.vreplication table. +func ReadVReplicationStatus(index uint32) string { + return fmt.Sprintf("select pos, state, message from _vt.vreplication where id=%v", index) +} + // StatsHistoryRecord is used to store a Message with timestamp type StatsHistoryRecord struct { Time time.Time diff --git a/go/vt/binlog/binlogplayer/binlog_player_test.go b/go/vt/binlog/binlogplayer/binlog_player_test.go index 8f4376f821..4493dc33a3 100644 --- a/go/vt/binlog/binlogplayer/binlog_player_test.go +++ b/go/vt/binlog/binlogplayer/binlog_player_test.go @@ -355,7 +355,7 @@ func TestUpdateVReplicationPos(t *testing.T) { "set pos='MariaDB/0-1-8283', time_updated=88822 " + "where id=78522" - got := updateVReplicationPos(78522, mysql.Position{GTIDSet: gtid.GTIDSet()}, 88822, 0) + got := GenerateUpdatePos(78522, mysql.Position{GTIDSet: gtid.GTIDSet()}, 88822, 0) if got != want { t.Errorf("updateVReplicationPos() = %#v, want %#v", got, want) } @@ -367,7 +367,7 @@ func TestUpdateVReplicationTimestamp(t *testing.T) { "set pos='MariaDB/0-2-582', time_updated=88822, transaction_timestamp=481828 " + "where id=78522" - got := updateVReplicationPos(78522, mysql.Position{GTIDSet: gtid.GTIDSet()}, 88822, 481828) + got := GenerateUpdatePos(78522, mysql.Position{GTIDSet: gtid.GTIDSet()}, 88822, 481828) if got != want { t.Errorf("updateVReplicationPos() = %#v, want %#v", got, want) } @@ -377,6 +377,14 @@ func TestReadVReplicationPos(t *testing.T) { want := "select pos from _vt.vreplication where id=482821" got := ReadVReplicationPos(482821) if got != want { - t.Errorf("ReadVReplicationThrottlerSettings(482821) = %#v, want %#v", got, want) + t.Errorf("ReadVReplicationPos(482821) = %#v, want %#v", got, want) + } +} + +func TestReadVReplicationStatus(t *testing.T) { + want := "select pos, state, message from _vt.vreplication where id=482821" + got := ReadVReplicationStatus(482821) + if got != want { + t.Errorf("ReadVReplicationStatus(482821) = %#v, want %#v", got, want) } } diff --git a/go/vt/binlog/binlogplayertest/player.go b/go/vt/binlog/binlogplayertest/player.go index 5c6a76cd94..c8c0155a8d 100644 --- a/go/vt/binlog/binlogplayertest/player.go +++ b/go/vt/binlog/binlogplayertest/player.go @@ -96,7 +96,7 @@ var testBinlogTransaction = &binlogdatapb.BinlogTransaction{ }, } -// StreamKeyRange is part of the the UpdateStream interface +// StreamKeyRange is part of the UpdateStream interface func (fake *FakeBinlogStreamer) StreamKeyRange(ctx context.Context, position string, keyRange *topodatapb.KeyRange, charset *binlogdatapb.Charset, callback func(reply *binlogdatapb.BinlogTransaction) error) error { if fake.panics { panic(fmt.Errorf("test-triggered panic")) @@ -162,7 +162,7 @@ var testTablesRequest = &tablesRequest{ }, } -// StreamTables is part of the the UpdateStream interface +// StreamTables is part of the UpdateStream interface func (fake *FakeBinlogStreamer) StreamTables(ctx context.Context, position string, tables []string, charset *binlogdatapb.Charset, callback func(reply *binlogdatapb.BinlogTransaction) error) error { if fake.panics { panic(fmt.Errorf("test-triggered panic")) @@ -214,7 +214,7 @@ func testStreamTablesPanics(t *testing.T, bpc binlogplayer.Client) { } } -// HandlePanic is part of the the UpdateStream interface +// HandlePanic is part of the UpdateStream interface func (fake *FakeBinlogStreamer) HandlePanic(err *error) { if x := recover(); x != nil { *err = fmt.Errorf("Caught panic: %v", x) diff --git a/go/vt/dbconfigs/credentials.go b/go/vt/dbconfigs/credentials.go index ab73e9d569..3062e7a215 100644 --- a/go/vt/dbconfigs/credentials.go +++ b/go/vt/dbconfigs/credentials.go @@ -26,7 +26,10 @@ import ( "errors" "flag" "io/ioutil" + "os" + "os/signal" "sync" + "syscall" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/log" @@ -37,7 +40,7 @@ var ( dbCredentialsServer = flag.String("db-credentials-server", "file", "db credentials server type (use 'file' for the file implementation)") // 'file' implementation flags - dbCredentialsFile = flag.String("db-credentials-file", "", "db credentials file") + dbCredentialsFile = flag.String("db-credentials-file", "", "db credentials file; send SIGHUP to reload this file") // ErrUnknownUser is returned by credential server when the // user doesn't exist @@ -126,4 +129,16 @@ func WithCredentials(cp *mysql.ConnParams) (*mysql.ConnParams, error) { func init() { AllCredentialsServers["file"] = &FileCredentialsServer{} + + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGHUP) + go func() { + for range sigChan { + if fcs, ok := AllCredentialsServers["file"].(*FileCredentialsServer); ok { + fcs.mu.Lock() + fcs.dbCredentials = nil + fcs.mu.Unlock() + } + } + }() } diff --git a/go/vt/dbconfigs/dbconfigs_test.go b/go/vt/dbconfigs/dbconfigs_test.go index 0f6e87fd4c..86fbd75758 100644 --- a/go/vt/dbconfigs/dbconfigs_test.go +++ b/go/vt/dbconfigs/dbconfigs_test.go @@ -17,8 +17,13 @@ limitations under the License. package dbconfigs import ( + "fmt" + "io/ioutil" + "os" "reflect" + "syscall" "testing" + "time" "vitess.io/vitess/go/mysql" ) @@ -217,3 +222,47 @@ func TestCopy(t *testing.T) { t.Errorf("DBConfig: %v, want %v", got, want) } } + +func TestCredentialsFileHUP(t *testing.T) { + tmpFile, err := ioutil.TempFile("", "credentials.json") + if err != nil { + t.Fatalf("couldn't create temp file: %v", err) + } + defer os.Remove(tmpFile.Name()) + *dbCredentialsFile = tmpFile.Name() + *dbCredentialsServer = "file" + oldStr := "str1" + jsonConfig := fmt.Sprintf("{\"%s\": [\"%s\"]}", oldStr, oldStr) + if err := ioutil.WriteFile(tmpFile.Name(), []byte(jsonConfig), 0600); err != nil { + t.Fatalf("couldn't write temp file: %v", err) + } + cs := GetCredentialsServer() + _, pass, err := cs.GetUserAndPassword(oldStr) + if pass != oldStr { + t.Fatalf("%s's Password should still be '%s'", oldStr, oldStr) + } + hupTest(t, tmpFile, oldStr, "str2") + hupTest(t, tmpFile, "str2", "str3") // still handling the signal +} + +func hupTest(t *testing.T, tmpFile *os.File, oldStr, newStr string) { + cs := GetCredentialsServer() + jsonConfig := fmt.Sprintf("{\"%s\": [\"%s\"]}", newStr, newStr) + if err := ioutil.WriteFile(tmpFile.Name(), []byte(jsonConfig), 0600); err != nil { + t.Fatalf("couldn't overwrite temp file: %v", err) + } + _, pass, err := cs.GetUserAndPassword(oldStr) + if pass != oldStr { + t.Fatalf("%s's Password should still be '%s'", oldStr, oldStr) + } + syscall.Kill(syscall.Getpid(), syscall.SIGHUP) + time.Sleep(100 * time.Millisecond) // wait for signal handler + _, pass, err = cs.GetUserAndPassword(oldStr) + if err != ErrUnknownUser { + t.Fatalf("Should not have old %s after config reload", oldStr) + } + _, pass, err = cs.GetUserAndPassword(newStr) + if pass != newStr { + t.Fatalf("%s's Password should be '%s'", newStr, newStr) + } +} diff --git a/go/vt/discovery/tablet_stats_cache.go b/go/vt/discovery/tablet_stats_cache.go index 13b60a9225..7fa1809e5c 100644 --- a/go/vt/discovery/tablet_stats_cache.go +++ b/go/vt/discovery/tablet_stats_cache.go @@ -68,7 +68,7 @@ type tabletStatsCacheEntry struct { all map[string]*TabletStats // healthy only has the healthy ones. healthy []*TabletStats - // aggregates has the per-cell aggregates. + // aggregates has the per-region aggregates. aggregates map[string]*querypb.AggregateStats } @@ -141,7 +141,6 @@ func newTabletStatsCache(hc HealthCheck, ts *topo.Server, cell string, setListen // upon type change. hc.SetListener(tc, true /*sendDownEvents*/) } - go tc.broadcastAggregateStats() return tc } @@ -266,18 +265,18 @@ func (tc *TabletStatsCache) StatsUpdate(ts *TabletStats) { tc.updateAggregateMap(ts.Target.Keyspace, ts.Target.Shard, ts.Target.TabletType, e, allArray) } -// MakeAggregateMap takes a list of TabletStats and builds a per-cell +// makeAggregateMap takes a list of TabletStats and builds a per-region // AggregateStats map. -func MakeAggregateMap(stats []*TabletStats) map[string]*querypb.AggregateStats { +func (tc *TabletStatsCache) makeAggregateMap(stats []*TabletStats) map[string]*querypb.AggregateStats { result := make(map[string]*querypb.AggregateStats) for _, ts := range stats { - cell := ts.Tablet.Alias.Cell - agg, ok := result[cell] + region := tc.getRegionByCell(ts.Tablet.Alias.Cell) + agg, ok := result[region] if !ok { agg = &querypb.AggregateStats{ SecondsBehindMasterMin: math.MaxUint32, } - result[cell] = agg + result[region] = agg } if ts.Serving && ts.LastError == nil { @@ -295,101 +294,12 @@ func MakeAggregateMap(stats []*TabletStats) map[string]*querypb.AggregateStats { return result } -// MakeAggregateMapDiff computes the entries that need to be broadcast -// when the map goes from oldMap to newMap. -func MakeAggregateMapDiff(keyspace, shard string, tabletType topodatapb.TabletType, ter int64, oldMap map[string]*querypb.AggregateStats, newMap map[string]*querypb.AggregateStats) []*srvtopo.TargetStatsEntry { - var result []*srvtopo.TargetStatsEntry - for cell, oldValue := range oldMap { - newValue, ok := newMap[cell] - if ok { - // We have both an old and a new value. If equal, - // skip it. - if oldValue.HealthyTabletCount == newValue.HealthyTabletCount && - oldValue.UnhealthyTabletCount == newValue.UnhealthyTabletCount && - oldValue.SecondsBehindMasterMin == newValue.SecondsBehindMasterMin && - oldValue.SecondsBehindMasterMax == newValue.SecondsBehindMasterMax { - continue - } - // The new value is different, send it. - result = append(result, &srvtopo.TargetStatsEntry{ - Target: &querypb.Target{ - Keyspace: keyspace, - Shard: shard, - TabletType: tabletType, - Cell: cell, - }, - Stats: newValue, - TabletExternallyReparentedTimestamp: ter, - }) - } else { - // We only have the old value, send an empty - // record to clear it. - result = append(result, &srvtopo.TargetStatsEntry{ - Target: &querypb.Target{ - Keyspace: keyspace, - Shard: shard, - TabletType: tabletType, - Cell: cell, - }, - }) - } - } - - for cell, newValue := range newMap { - if _, ok := oldMap[cell]; ok { - continue - } - // New value, no old value, just send it. - result = append(result, &srvtopo.TargetStatsEntry{ - Target: &querypb.Target{ - Keyspace: keyspace, - Shard: shard, - TabletType: tabletType, - Cell: cell, - }, - Stats: newValue, - TabletExternallyReparentedTimestamp: ter, - }) - } - return result -} - // updateAggregateMap will update the aggregate map for the // tabletStatsCacheEntry. It may broadcast the changes too if we have listeners. // e.mu needs to be locked. func (tc *TabletStatsCache) updateAggregateMap(keyspace, shard string, tabletType topodatapb.TabletType, e *tabletStatsCacheEntry, stats []*TabletStats) { // Save the new value - oldAgg := e.aggregates - newAgg := MakeAggregateMap(stats) - e.aggregates = newAgg - - // And broadcast the change in the background, if we need to. - tc.mu.RLock() - if !tc.tsm.HasSubscribers() { - // Shortcut: no subscriber, we can be done. - tc.mu.RUnlock() - return - } - tc.mu.RUnlock() - - var ter int64 - if len(stats) > 0 { - ter = stats[0].TabletExternallyReparentedTimestamp - } - diffs := MakeAggregateMapDiff(keyspace, shard, tabletType, ter, oldAgg, newAgg) - tc.aggregatesChan <- diffs -} - -// broadcastAggregateStats is called in the background to send aggregate stats -// in the right order to our subscribers. -func (tc *TabletStatsCache) broadcastAggregateStats() { - for diffs := range tc.aggregatesChan { - tc.mu.RLock() - for _, d := range diffs { - tc.tsm.Broadcast(d) - } - tc.mu.RUnlock() - } + e.aggregates = tc.makeAggregateMap(stats) } // GetTabletStats returns the full list of available targets. @@ -436,51 +346,6 @@ func (tc *TabletStatsCache) ResetForTesting() { tc.entries = make(map[string]map[string]map[topodatapb.TabletType]*tabletStatsCacheEntry) } -// Subscribe is part of the TargetStatsListener interface. -func (tc *TabletStatsCache) Subscribe() (int, []srvtopo.TargetStatsEntry, <-chan (*srvtopo.TargetStatsEntry), error) { - var allTS []srvtopo.TargetStatsEntry - - // Make sure the map cannot change. Also blocks any update from - // propagating. - tc.mu.Lock() - defer tc.mu.Unlock() - for keyspace, shardMap := range tc.entries { - for shard, typeMap := range shardMap { - for tabletType, e := range typeMap { - e.mu.RLock() - var ter int64 - if len(e.healthy) > 0 { - ter = e.healthy[0].TabletExternallyReparentedTimestamp - } - for cell, agg := range e.aggregates { - allTS = append(allTS, srvtopo.TargetStatsEntry{ - Target: &querypb.Target{ - Keyspace: keyspace, - Shard: shard, - TabletType: tabletType, - Cell: cell, - }, - Stats: agg, - TabletExternallyReparentedTimestamp: ter, - }) - } - e.mu.RUnlock() - } - } - } - - // Now create the listener, add it to our list. - id, c := tc.tsm.Subscribe() - return id, allTS, c, nil -} - -// Unsubscribe is part of the TargetStatsListener interface. -func (tc *TabletStatsCache) Unsubscribe(i int) error { - tc.mu.Lock() - defer tc.mu.Unlock() - return tc.tsm.Unsubscribe(i) -} - // GetAggregateStats is part of the TargetStatsListener interface. func (tc *TabletStatsCache) GetAggregateStats(target *querypb.Target) (*querypb.AggregateStats, error) { e := tc.getEntry(target.Keyspace, target.Shard, target.TabletType) @@ -498,7 +363,8 @@ func (tc *TabletStatsCache) GetAggregateStats(target *querypb.Target) (*querypb. return agg, nil } } - agg, ok := e.aggregates[target.Cell] + targetRegion := tc.getRegionByCell(target.Cell) + agg, ok := e.aggregates[targetRegion] if !ok { return nil, topo.NewError(topo.NoNode, topotools.TargetIdent(target)) } @@ -530,4 +396,3 @@ func (tc *TabletStatsCache) GetMasterCell(keyspace, shard string) (cell string, // Compile-time interface check. var _ HealthCheckStatsListener = (*TabletStatsCache)(nil) -var _ srvtopo.TargetStatsListener = (*TabletStatsCache)(nil) diff --git a/go/vt/key/destination.go b/go/vt/key/destination.go index 2f3e32c88c..ca7867fa50 100644 --- a/go/vt/key/destination.go +++ b/go/vt/key/destination.go @@ -19,6 +19,7 @@ package key import ( "bytes" "encoding/hex" + "math/rand" "strings" "vitess.io/vitess/go/vt/vterrors" @@ -27,6 +28,9 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) +// AnyShardPicker makes a choice on what shard to use when any shard will do. Used for testing. +var AnyShardPicker DestinationAnyShardPicker = DestinationAnyShardPickerRandomShard{} + // Destination is an interface definition for a query destination, // within a given Keyspace / Tablet Type. It is meant to be an internal // data structure, with multiple possible implementations. @@ -369,12 +373,25 @@ func (d DestinationKeyspaceIDs) String() string { return buffer.String() } +// DestinationAnyShardPicker exposes an interface that will pick an index given a number of available shards. +type DestinationAnyShardPicker interface { + // PickShard picks a shard given a number of shards + PickShard(shardCount int) int +} + +// DestinationAnyShardPickerRandomShard picks a random shard. +type DestinationAnyShardPickerRandomShard struct{} + +// PickShard is DestinationAnyShardPickerRandomShard's implmentation. +func (dp DestinationAnyShardPickerRandomShard) PickShard(shardCount int) int { + return rand.Intn(shardCount) +} + // // DestinationAnyShard // -// DestinationAnyShard is the destination for any one shard in the -// keyspace. This usually maps to the first one in the list. +// DestinationAnyShard is the destination for any one shard in the keyspace. // It implements the Destination interface. type DestinationAnyShard struct{} @@ -388,7 +405,7 @@ func (d DestinationAnyShard) Resolve(allShards []*topodatapb.ShardReference, add if len(allShards) == 0 { return vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "no shard in keyspace") } - return addShard(allShards[0].Name) + return addShard(allShards[AnyShardPicker.PickShard(len(allShards))].Name) } // String is part of the Destination interface. diff --git a/go/vt/log/log.go b/go/vt/log/log.go index 4f06ec38cc..d2d0adf1df 100644 --- a/go/vt/log/log.go +++ b/go/vt/log/log.go @@ -5,7 +5,10 @@ package log -import "github.com/golang/glog" +import ( + "flag" + "github.com/golang/glog" +) // Level is used with V() to test log verbosity. type Level = glog.Level @@ -52,3 +55,7 @@ var ( // FatalDepth formats arguments like fmt.Print and uses depth to choose which call frame to log. FatalDepth = glog.FatalDepth ) + +func init() { + flag.Uint64Var(&glog.MaxSize, "log_rotate_max_size", glog.MaxSize, "size in bytes at which logs are rotated (glog.MaxSize)") +} diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index ccecaee943..f97a727245 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -164,7 +164,7 @@ func isDbDir(p string) bool { return true } - // Look for at least one .frm file + // Look for at least one database file fis, err := ioutil.ReadDir(p) if err != nil { return false @@ -174,6 +174,12 @@ func isDbDir(p string) bool { return true } + // the MyRocks engine stores data in RocksDB .sst files + // https://github.com/facebook/rocksdb/wiki/Rocksdb-BlockBasedTable-Format + if strings.HasSuffix(fi.Name(), ".sst") { + return true + } + // .frm files were removed in MySQL 8, so we need to check for two other file types // https://dev.mysql.com/doc/refman/8.0/en/data-dictionary-file-removal.html if strings.HasSuffix(fi.Name(), ".ibd") { @@ -820,6 +826,12 @@ func Restore( if len(bhs) == 0 { // There are no backups (not even broken/incomplete ones). logger.Errorf("No backup to restore on BackupStorage for directory %v. Starting up empty.", dir) + // Since this Was an empty database make sure we start replication at the beginning + if err = mysqld.ResetReplication(ctx); err == nil { + logger.Errorf("Error reseting slave replication: %v. Continuing", err) + err = ErrNoBackup + } + if err = PopulateMetadataTables(mysqld, localMetadata); err == nil { err = ErrNoBackup } diff --git a/go/vt/mysqlctl/backup_test.go b/go/vt/mysqlctl/backup_test.go index ae08b5321c..6fa324ae11 100644 --- a/go/vt/mysqlctl/backup_test.go +++ b/go/vt/mysqlctl/backup_test.go @@ -39,7 +39,9 @@ func TestFindFilesToBackup(t *testing.T) { dataDbDir := path.Join(dataDir, "vt_db") extraDir := path.Join(dataDir, "extra_dir") outsideDbDir := path.Join(root, "outside_db") - for _, s := range []string{innodbDataDir, innodbLogDir, dataDbDir, extraDir, outsideDbDir} { + rocksdbDir := path.Join(dataDir, ".rocksdb") + sdiOnlyDir := path.Join(dataDir, "sdi_dir") + for _, s := range []string{innodbDataDir, innodbLogDir, dataDbDir, extraDir, outsideDbDir, rocksdbDir, sdiOnlyDir} { if err := os.MkdirAll(s, os.ModePerm); err != nil { t.Fatalf("failed to create directory %v: %v", s, err) } @@ -62,6 +64,12 @@ func TestFindFilesToBackup(t *testing.T) { if err := os.Symlink(outsideDbDir, path.Join(dataDir, "vt_symlink")); err != nil { t.Fatalf("failed to symlink vt_symlink: %v", err) } + if err := ioutil.WriteFile(path.Join(rocksdbDir, "000011.sst"), []byte("rocksdb file"), os.ModePerm); err != nil { + t.Fatalf("failed to write file 000011.sst: %v", err) + } + if err := ioutil.WriteFile(path.Join(sdiOnlyDir, "table1.sdi"), []byte("sdi file"), os.ModePerm); err != nil { + t.Fatalf("failed to write file table1.sdi: %v", err) + } cnf := &Mycnf{ InnodbDataHomeDir: innodbDataDir, @@ -76,6 +84,14 @@ func TestFindFilesToBackup(t *testing.T) { sort.Sort(forTest(result)) t.Logf("findFilesToBackup returned: %v", result) expected := []FileEntry{ + { + Base: "Data", + Name: ".rocksdb/000011.sst", + }, + { + Base: "Data", + Name: "sdi_dir/table1.sdi", + }, { Base: "Data", Name: "vt_db/db.opt", diff --git a/go/vt/mysqlctl/cephbackupstorage/ceph.go b/go/vt/mysqlctl/cephbackupstorage/ceph.go index 7d5f3c6467..d047f4af0e 100644 --- a/go/vt/mysqlctl/cephbackupstorage/ceph.go +++ b/go/vt/mysqlctl/cephbackupstorage/ceph.go @@ -88,7 +88,7 @@ func (bh *CephBackupHandle) AddFile(ctx context.Context, filename string, filesi // Give PutObject() the read end of the pipe. object := objName(bh.dir, bh.name, filename) - _, err := bh.client.PutObject(bucket, object, reader, "application/octet-stream") + _, err := bh.client.PutObjectWithContext(ctx, bucket, object, reader, -1, minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err != nil { // Signal the writer that an error occurred, in case it's not done writing yet. reader.CloseWithError(err) @@ -126,7 +126,7 @@ func (bh *CephBackupHandle) ReadFile(ctx context.Context, filename string) (io.R // ceph bucket name bucket := alterBucketName(bh.dir) object := objName(bh.dir, bh.name, filename) - return bh.client.GetObject(bucket, object) + return bh.client.GetObjectWithContext(ctx, bucket, object, minio.GetObjectOptions{}) } // CephBackupStorage implements BackupStorage for Ceph Cloud Storage. @@ -154,7 +154,7 @@ func (bs *CephBackupStorage) ListBackups(ctx context.Context, dir string) ([]bac doneCh := make(chan struct{}) for object := range c.ListObjects(bucket, searchPrefix, false, doneCh) { if object.Err != nil { - err := c.BucketExists(bucket) + _, err := c.BucketExists(bucket) if err != nil { return nil, nil } @@ -190,8 +190,13 @@ func (bs *CephBackupStorage) StartBackup(ctx context.Context, dir, name string) // ceph bucket name bucket := alterBucketName(dir) - err = c.BucketExists(bucket) + found, err := c.BucketExists(bucket) + if err != nil { + log.Info("Error from BucketExists: %v, quitting", bucket) + return nil, errors.New("Error checking whether bucket exists: " + bucket) + } + if !found { log.Info("Bucket: %v doesn't exist, creating new bucket with the required name", bucket) err = c.MakeBucket(bucket, "") if err != nil { diff --git a/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go b/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go index a7d945f06b..6b9bb4b43f 100644 --- a/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go +++ b/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go @@ -80,6 +80,9 @@ type FakeMysqlDaemon struct { // If it doesn't match, SetSlavePosition will return an error. SetSlavePositionPos mysql.Position + // StartSlaveUntilAfterPos is matched against the input + StartSlaveUntilAfterPos mysql.Position + // SetMasterInput is matched against the input of SetMaster // (as "%v:%v"). If it doesn't match, SetMaster will return an error. SetMasterInput string @@ -240,6 +243,17 @@ func (fmd *FakeMysqlDaemon) StartSlave(hookExtraEnv map[string]string) error { }) } +// StartSlaveUntilAfter is part of the MysqlDaemon interface. +func (fmd *FakeMysqlDaemon) StartSlaveUntilAfter(ctx context.Context, pos mysql.Position) error { + if !reflect.DeepEqual(fmd.StartSlaveUntilAfterPos, pos) { + return fmt.Errorf("wrong pos for StartSlaveUntilAfter: expected %v got %v", fmd.SetSlavePositionPos, pos) + } + + return fmd.ExecuteSuperQueryList(context.Background(), []string{ + "START SLAVE UNTIL AFTER", + }) +} + // StopSlave is part of the MysqlDaemon interface. func (fmd *FakeMysqlDaemon) StopSlave(hookExtraEnv map[string]string) error { return fmd.ExecuteSuperQueryList(context.Background(), []string{ @@ -383,6 +397,16 @@ func (fmd *FakeMysqlDaemon) GetSchema(dbName string, tables, excludeTables []str return tmutils.FilterTables(fmd.Schema, tables, excludeTables, includeViews) } +// GetColumns is part of the MysqlDaemon interface +func (fmd *FakeMysqlDaemon) GetColumns(dbName, table string) ([]string, error) { + return []string{}, nil +} + +// GetPrimaryKeyColumns is part of the MysqlDaemon interface +func (fmd *FakeMysqlDaemon) GetPrimaryKeyColumns(dbName, table string) ([]string, error) { + return []string{}, nil +} + // PreflightSchemaChange is part of the MysqlDaemon interface func (fmd *FakeMysqlDaemon) PreflightSchemaChange(dbName string, changes []string) ([]*tabletmanagerdatapb.SchemaChangeResult, error) { if fmd.PreflightSchemaChangeResult == nil { diff --git a/go/vt/mysqlctl/fileutil.go b/go/vt/mysqlctl/fileutil.go index b5aee7f100..331b637878 100644 --- a/go/vt/mysqlctl/fileutil.go +++ b/go/vt/mysqlctl/fileutil.go @@ -1,5 +1,5 @@ /* -Copyright 2017 Google Inc. +Copyright 2018 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,13 +17,10 @@ limitations under the License. package mysqlctl import ( - // "crypto/md5" "encoding/hex" "hash" - // "hash/crc64" + "hash/crc32" "os" - - "vitess.io/vitess/go/cgzip" ) // Use this to simulate failures in tests @@ -36,39 +33,13 @@ func init() { simulateFailures = statErr == nil } -// our hasher, implemented using md5 -// type hasher struct { -// hash.Hash -// } - -// func newHasher() *hasher { -// return &hasher{md5.New()} -// } - -// func (h *hasher) HashString() string { -// return hex.EncodeToString(h.Sum(nil)) -// } - -// our hasher, implemented using crc64 -//type hasher struct { -// hash.Hash64 -//} - -//func newHasher() *hasher { -// return &hasher{crc64.New(crc64.MakeTable(crc64.ECMA))} -//} - -//func (h *hasher) HashString() string { -// return hex.EncodeToString(h.Sum(nil)) -//} - -// our hasher, implemented using cgzip crc32 +// our hasher, implemented using crc32 type hasher struct { hash.Hash32 } func newHasher() *hasher { - return &hasher{cgzip.NewCrc32()} + return &hasher{crc32.NewIEEE()} } func (h *hasher) HashString() string { diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index c8426e8915..9d6e18295d 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -41,6 +41,7 @@ type MysqlDaemon interface { // replication related methods StartSlave(hookExtraEnv map[string]string) error + StartSlaveUntilAfter(ctx context.Context, pos mysql.Position) error StopSlave(hookExtraEnv map[string]string) error SlaveStatus() (mysql.SlaveStatus, error) SetSemiSyncEnabled(master, slave bool) error @@ -69,6 +70,8 @@ type MysqlDaemon interface { // Schema related methods GetSchema(dbName string, tables, excludeTables []string, includeViews bool) (*tabletmanagerdatapb.SchemaDefinition, error) + GetColumns(dbName, table string) ([]string, error) + GetPrimaryKeyColumns(dbName, table string) ([]string, error) PreflightSchemaChange(dbName string, changes []string) ([]*tabletmanagerdatapb.SchemaChangeResult, error) ApplySchemaChange(dbName string, change *tmutils.SchemaChange) (*tabletmanagerdatapb.SchemaChangeResult, error) diff --git a/go/vt/mysqlctl/mysqld.go b/go/vt/mysqlctl/mysqld.go index 03a3b10eea..713e1679a7 100644 --- a/go/vt/mysqlctl/mysqld.go +++ b/go/vt/mysqlctl/mysqld.go @@ -210,7 +210,7 @@ func (mysqld *Mysqld) startNoWait(ctx context.Context, cnf *Mycnf, mysqldArgs .. name, err = binaryPath(dir, "mysqld_safe") if err != nil { // The movement to use systemd means that mysqld_safe is not always provided. - // This should not be considered an issue do do not generate a warning. + // This should not be considered an issue do not generate a warning. log.Infof("%v: trying to launch mysqld instead", err) name, err = binaryPath(dir, "mysqld") // If this also fails, return an error. diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index 70e08f625e..12aef08d9a 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -83,6 +83,19 @@ func (mysqld *Mysqld) StartSlave(hookExtraEnv map[string]string) error { return h.ExecuteOptional() } +// StartSlaveUntilAfter starts a slave until replication has come to `targetPos`, then it stops replication +func (mysqld *Mysqld) StartSlaveUntilAfter(ctx context.Context, targetPos mysql.Position) error { + conn, err := getPoolReconnect(ctx, mysqld.dbaPool) + if err != nil { + return err + } + defer conn.Recycle() + + queries := []string{conn.StartSlaveUntilAfterCommand(targetPos)} + + return mysqld.executeSuperQueryListConn(ctx, conn, queries) +} + // StopSlave stops a slave. func (mysqld *Mysqld) StopSlave(hookExtraEnv map[string]string) error { h := hook.NewSimpleHook("preflight_stop_slave") diff --git a/go/vt/proto/automation/automation.pb.go b/go/vt/proto/automation/automation.pb.go index 9cfe471fe1..e810986dae 100644 --- a/go/vt/proto/automation/automation.pb.go +++ b/go/vt/proto/automation/automation.pb.go @@ -44,7 +44,7 @@ func (x ClusterOperationState) String() string { return proto.EnumName(ClusterOperationState_name, int32(x)) } func (ClusterOperationState) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_automation_7092712054bc689e, []int{0} + return fileDescriptor_automation_4d7d55680fa173cc, []int{0} } type TaskState int32 @@ -73,7 +73,7 @@ func (x TaskState) String() string { return proto.EnumName(TaskState_name, int32(x)) } func (TaskState) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_automation_7092712054bc689e, []int{1} + return fileDescriptor_automation_4d7d55680fa173cc, []int{1} } type ClusterOperation struct { @@ -93,7 +93,7 @@ func (m *ClusterOperation) Reset() { *m = ClusterOperation{} } func (m *ClusterOperation) String() string { return proto.CompactTextString(m) } func (*ClusterOperation) ProtoMessage() {} func (*ClusterOperation) Descriptor() ([]byte, []int) { - return fileDescriptor_automation_7092712054bc689e, []int{0} + return fileDescriptor_automation_4d7d55680fa173cc, []int{0} } func (m *ClusterOperation) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ClusterOperation.Unmarshal(m, b) @@ -155,7 +155,7 @@ func (m *TaskContainer) Reset() { *m = TaskContainer{} } func (m *TaskContainer) String() string { return proto.CompactTextString(m) } func (*TaskContainer) ProtoMessage() {} func (*TaskContainer) Descriptor() ([]byte, []int) { - return fileDescriptor_automation_7092712054bc689e, []int{1} + return fileDescriptor_automation_4d7d55680fa173cc, []int{1} } func (m *TaskContainer) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_TaskContainer.Unmarshal(m, b) @@ -210,7 +210,7 @@ func (m *Task) Reset() { *m = Task{} } func (m *Task) String() string { return proto.CompactTextString(m) } func (*Task) ProtoMessage() {} func (*Task) Descriptor() ([]byte, []int) { - return fileDescriptor_automation_7092712054bc689e, []int{2} + return fileDescriptor_automation_4d7d55680fa173cc, []int{2} } func (m *Task) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Task.Unmarshal(m, b) @@ -284,7 +284,7 @@ func (m *EnqueueClusterOperationRequest) Reset() { *m = EnqueueClusterOp func (m *EnqueueClusterOperationRequest) String() string { return proto.CompactTextString(m) } func (*EnqueueClusterOperationRequest) ProtoMessage() {} func (*EnqueueClusterOperationRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_automation_7092712054bc689e, []int{3} + return fileDescriptor_automation_4d7d55680fa173cc, []int{3} } func (m *EnqueueClusterOperationRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnqueueClusterOperationRequest.Unmarshal(m, b) @@ -329,7 +329,7 @@ func (m *EnqueueClusterOperationResponse) Reset() { *m = EnqueueClusterO func (m *EnqueueClusterOperationResponse) String() string { return proto.CompactTextString(m) } func (*EnqueueClusterOperationResponse) ProtoMessage() {} func (*EnqueueClusterOperationResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_automation_7092712054bc689e, []int{4} + return fileDescriptor_automation_4d7d55680fa173cc, []int{4} } func (m *EnqueueClusterOperationResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnqueueClusterOperationResponse.Unmarshal(m, b) @@ -367,7 +367,7 @@ func (m *GetClusterOperationStateRequest) Reset() { *m = GetClusterOpera func (m *GetClusterOperationStateRequest) String() string { return proto.CompactTextString(m) } func (*GetClusterOperationStateRequest) ProtoMessage() {} func (*GetClusterOperationStateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_automation_7092712054bc689e, []int{5} + return fileDescriptor_automation_4d7d55680fa173cc, []int{5} } func (m *GetClusterOperationStateRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetClusterOperationStateRequest.Unmarshal(m, b) @@ -405,7 +405,7 @@ func (m *GetClusterOperationStateResponse) Reset() { *m = GetClusterOper func (m *GetClusterOperationStateResponse) String() string { return proto.CompactTextString(m) } func (*GetClusterOperationStateResponse) ProtoMessage() {} func (*GetClusterOperationStateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_automation_7092712054bc689e, []int{6} + return fileDescriptor_automation_4d7d55680fa173cc, []int{6} } func (m *GetClusterOperationStateResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetClusterOperationStateResponse.Unmarshal(m, b) @@ -443,7 +443,7 @@ func (m *GetClusterOperationDetailsRequest) Reset() { *m = GetClusterOpe func (m *GetClusterOperationDetailsRequest) String() string { return proto.CompactTextString(m) } func (*GetClusterOperationDetailsRequest) ProtoMessage() {} func (*GetClusterOperationDetailsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_automation_7092712054bc689e, []int{7} + return fileDescriptor_automation_4d7d55680fa173cc, []int{7} } func (m *GetClusterOperationDetailsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetClusterOperationDetailsRequest.Unmarshal(m, b) @@ -482,7 +482,7 @@ func (m *GetClusterOperationDetailsResponse) Reset() { *m = GetClusterOp func (m *GetClusterOperationDetailsResponse) String() string { return proto.CompactTextString(m) } func (*GetClusterOperationDetailsResponse) ProtoMessage() {} func (*GetClusterOperationDetailsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_automation_7092712054bc689e, []int{8} + return fileDescriptor_automation_4d7d55680fa173cc, []int{8} } func (m *GetClusterOperationDetailsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetClusterOperationDetailsResponse.Unmarshal(m, b) @@ -525,9 +525,9 @@ func init() { proto.RegisterEnum("automation.TaskState", TaskState_name, TaskState_value) } -func init() { proto.RegisterFile("automation.proto", fileDescriptor_automation_7092712054bc689e) } +func init() { proto.RegisterFile("automation.proto", fileDescriptor_automation_4d7d55680fa173cc) } -var fileDescriptor_automation_7092712054bc689e = []byte{ +var fileDescriptor_automation_4d7d55680fa173cc = []byte{ // 588 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xdd, 0x6e, 0xd3, 0x3e, 0x18, 0xc6, 0xff, 0x49, 0xdb, 0xfd, 0xe9, 0x1b, 0xb6, 0x45, 0x16, 0x9b, 0xb2, 0x89, 0xb1, 0x2c, diff --git a/go/vt/proto/automationservice/automationservice.pb.go b/go/vt/proto/automationservice/automationservice.pb.go index e3c00ceddd..61ac349af8 100644 --- a/go/vt/proto/automationservice/automationservice.pb.go +++ b/go/vt/proto/automationservice/automationservice.pb.go @@ -136,10 +136,10 @@ var _Automation_serviceDesc = grpc.ServiceDesc{ } func init() { - proto.RegisterFile("automationservice.proto", fileDescriptor_automationservice_42ff8d484b987c6f) + proto.RegisterFile("automationservice.proto", fileDescriptor_automationservice_5369cb995212ce22) } -var fileDescriptor_automationservice_42ff8d484b987c6f = []byte{ +var fileDescriptor_automationservice_5369cb995212ce22 = []byte{ // 178 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0x2c, 0x2d, 0xc9, 0xcf, 0x4d, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, diff --git a/go/vt/proto/binlogdata/binlogdata.pb.go b/go/vt/proto/binlogdata/binlogdata.pb.go index c520a6a4f7..2683321fd1 100644 --- a/go/vt/proto/binlogdata/binlogdata.pb.go +++ b/go/vt/proto/binlogdata/binlogdata.pb.go @@ -8,6 +8,7 @@ import fmt "fmt" import math "math" import query "vitess.io/vitess/go/vt/proto/query" import topodata "vitess.io/vitess/go/vt/proto/topodata" +import vtrpc "vitess.io/vitess/go/vt/proto/vtrpc" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -20,6 +21,98 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +// OnDDLAction lists the possible actions for DDLs. +type OnDDLAction int32 + +const ( + OnDDLAction_IGNORE OnDDLAction = 0 + OnDDLAction_STOP OnDDLAction = 1 + OnDDLAction_EXEC OnDDLAction = 2 + OnDDLAction_EXEC_IGNORE OnDDLAction = 3 +) + +var OnDDLAction_name = map[int32]string{ + 0: "IGNORE", + 1: "STOP", + 2: "EXEC", + 3: "EXEC_IGNORE", +} +var OnDDLAction_value = map[string]int32{ + "IGNORE": 0, + "STOP": 1, + "EXEC": 2, + "EXEC_IGNORE": 3, +} + +func (x OnDDLAction) String() string { + return proto.EnumName(OnDDLAction_name, int32(x)) +} +func (OnDDLAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_6d214635eb8c538c, []int{0} +} + +// VEventType enumerates the event types. +// This list is comprehensive. Many of these types +// will not be encountered in RBR mode. +type VEventType int32 + +const ( + VEventType_UNKNOWN VEventType = 0 + VEventType_GTID VEventType = 1 + VEventType_BEGIN VEventType = 2 + VEventType_COMMIT VEventType = 3 + VEventType_ROLLBACK VEventType = 4 + VEventType_DDL VEventType = 5 + VEventType_INSERT VEventType = 6 + VEventType_REPLACE VEventType = 7 + VEventType_UPDATE VEventType = 8 + VEventType_DELETE VEventType = 9 + VEventType_SET VEventType = 10 + VEventType_OTHER VEventType = 11 + VEventType_ROW VEventType = 12 + VEventType_FIELD VEventType = 13 +) + +var VEventType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "GTID", + 2: "BEGIN", + 3: "COMMIT", + 4: "ROLLBACK", + 5: "DDL", + 6: "INSERT", + 7: "REPLACE", + 8: "UPDATE", + 9: "DELETE", + 10: "SET", + 11: "OTHER", + 12: "ROW", + 13: "FIELD", +} +var VEventType_value = map[string]int32{ + "UNKNOWN": 0, + "GTID": 1, + "BEGIN": 2, + "COMMIT": 3, + "ROLLBACK": 4, + "DDL": 5, + "INSERT": 6, + "REPLACE": 7, + "UPDATE": 8, + "DELETE": 9, + "SET": 10, + "OTHER": 11, + "ROW": 12, + "FIELD": 13, +} + +func (x VEventType) String() string { + return proto.EnumName(VEventType_name, int32(x)) +} +func (VEventType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_6d214635eb8c538c, []int{1} +} + type BinlogTransaction_Statement_Category int32 const ( @@ -65,7 +158,7 @@ func (x BinlogTransaction_Statement_Category) String() string { return proto.EnumName(BinlogTransaction_Statement_Category_name, int32(x)) } func (BinlogTransaction_Statement_Category) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_ac14f15f6b19a931, []int{1, 0, 0} + return fileDescriptor_binlogdata_6d214635eb8c538c, []int{1, 0, 0} } // Charset is the per-statement charset info from a QUERY_EVENT binlog entry. @@ -85,7 +178,7 @@ func (m *Charset) Reset() { *m = Charset{} } func (m *Charset) String() string { return proto.CompactTextString(m) } func (*Charset) ProtoMessage() {} func (*Charset) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_ac14f15f6b19a931, []int{0} + return fileDescriptor_binlogdata_6d214635eb8c538c, []int{0} } func (m *Charset) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Charset.Unmarshal(m, b) @@ -142,7 +235,7 @@ func (m *BinlogTransaction) Reset() { *m = BinlogTransaction{} } func (m *BinlogTransaction) String() string { return proto.CompactTextString(m) } func (*BinlogTransaction) ProtoMessage() {} func (*BinlogTransaction) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_ac14f15f6b19a931, []int{1} + return fileDescriptor_binlogdata_6d214635eb8c538c, []int{1} } func (m *BinlogTransaction) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BinlogTransaction.Unmarshal(m, b) @@ -192,7 +285,7 @@ func (m *BinlogTransaction_Statement) Reset() { *m = BinlogTransaction_S func (m *BinlogTransaction_Statement) String() string { return proto.CompactTextString(m) } func (*BinlogTransaction_Statement) ProtoMessage() {} func (*BinlogTransaction_Statement) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_ac14f15f6b19a931, []int{1, 0} + return fileDescriptor_binlogdata_6d214635eb8c538c, []int{1, 0} } func (m *BinlogTransaction_Statement) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BinlogTransaction_Statement.Unmarshal(m, b) @@ -250,7 +343,7 @@ func (m *StreamKeyRangeRequest) Reset() { *m = StreamKeyRangeRequest{} } func (m *StreamKeyRangeRequest) String() string { return proto.CompactTextString(m) } func (*StreamKeyRangeRequest) ProtoMessage() {} func (*StreamKeyRangeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_ac14f15f6b19a931, []int{2} + return fileDescriptor_binlogdata_6d214635eb8c538c, []int{2} } func (m *StreamKeyRangeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamKeyRangeRequest.Unmarshal(m, b) @@ -303,7 +396,7 @@ func (m *StreamKeyRangeResponse) Reset() { *m = StreamKeyRangeResponse{} func (m *StreamKeyRangeResponse) String() string { return proto.CompactTextString(m) } func (*StreamKeyRangeResponse) ProtoMessage() {} func (*StreamKeyRangeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_ac14f15f6b19a931, []int{3} + return fileDescriptor_binlogdata_6d214635eb8c538c, []int{3} } func (m *StreamKeyRangeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamKeyRangeResponse.Unmarshal(m, b) @@ -347,7 +440,7 @@ func (m *StreamTablesRequest) Reset() { *m = StreamTablesRequest{} } func (m *StreamTablesRequest) String() string { return proto.CompactTextString(m) } func (*StreamTablesRequest) ProtoMessage() {} func (*StreamTablesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_ac14f15f6b19a931, []int{4} + return fileDescriptor_binlogdata_6d214635eb8c538c, []int{4} } func (m *StreamTablesRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTablesRequest.Unmarshal(m, b) @@ -400,7 +493,7 @@ func (m *StreamTablesResponse) Reset() { *m = StreamTablesResponse{} } func (m *StreamTablesResponse) String() string { return proto.CompactTextString(m) } func (*StreamTablesResponse) ProtoMessage() {} func (*StreamTablesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_ac14f15f6b19a931, []int{5} + return fileDescriptor_binlogdata_6d214635eb8c538c, []int{5} } func (m *StreamTablesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTablesResponse.Unmarshal(m, b) @@ -427,6 +520,98 @@ func (m *StreamTablesResponse) GetBinlogTransaction() *BinlogTransaction { return nil } +// Rule represents one rule. +type Rule struct { + // match can be a table name or a regular expression + // delineated by '/' and '/'. + Match string `protobuf:"bytes,1,opt,name=match,proto3" json:"match,omitempty"` + // filter can be an empty string or keyrange if the match + // is a regular expression. Otherwise, it must be a select + // query. + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Rule) Reset() { *m = Rule{} } +func (m *Rule) String() string { return proto.CompactTextString(m) } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_6d214635eb8c538c, []int{6} +} +func (m *Rule) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Rule.Unmarshal(m, b) +} +func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Rule.Marshal(b, m, deterministic) +} +func (dst *Rule) XXX_Merge(src proto.Message) { + xxx_messageInfo_Rule.Merge(dst, src) +} +func (m *Rule) XXX_Size() int { + return xxx_messageInfo_Rule.Size(m) +} +func (m *Rule) XXX_DiscardUnknown() { + xxx_messageInfo_Rule.DiscardUnknown(m) +} + +var xxx_messageInfo_Rule proto.InternalMessageInfo + +func (m *Rule) GetMatch() string { + if m != nil { + return m.Match + } + return "" +} + +func (m *Rule) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +// Filter represents a list of ordered rules. First match +// wins. +type Filter struct { + Rules []*Rule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Filter) Reset() { *m = Filter{} } +func (m *Filter) String() string { return proto.CompactTextString(m) } +func (*Filter) ProtoMessage() {} +func (*Filter) Descriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_6d214635eb8c538c, []int{7} +} +func (m *Filter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Filter.Unmarshal(m, b) +} +func (m *Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Filter.Marshal(b, m, deterministic) +} +func (dst *Filter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Filter.Merge(dst, src) +} +func (m *Filter) XXX_Size() int { + return xxx_messageInfo_Filter.Size(m) +} +func (m *Filter) XXX_DiscardUnknown() { + xxx_messageInfo_Filter.DiscardUnknown(m) +} + +var xxx_messageInfo_Filter proto.InternalMessageInfo + +func (m *Filter) GetRules() []*Rule { + if m != nil { + return m.Rules + } + return nil +} + // BinlogSource specifies the source and filter parameters for // Filtered Replication. It currently supports a keyrange // or a list of tables. @@ -440,17 +625,22 @@ type BinlogSource struct { // key_range is set if the request is for a keyrange KeyRange *topodata.KeyRange `protobuf:"bytes,4,opt,name=key_range,json=keyRange,proto3" json:"key_range,omitempty"` // tables is set if the request is for a list of tables - Tables []string `protobuf:"bytes,5,rep,name=tables,proto3" json:"tables,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Tables []string `protobuf:"bytes,5,rep,name=tables,proto3" json:"tables,omitempty"` + // filter is set if we're using the generalized representation + // for the filter. + Filter *Filter `protobuf:"bytes,6,opt,name=filter,proto3" json:"filter,omitempty"` + // on_ddl specifies the action to be taken when a DDL is encountered. + OnDdl OnDDLAction `protobuf:"varint,7,opt,name=on_ddl,json=onDdl,proto3,enum=binlogdata.OnDDLAction" json:"on_ddl,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *BinlogSource) Reset() { *m = BinlogSource{} } func (m *BinlogSource) String() string { return proto.CompactTextString(m) } func (*BinlogSource) ProtoMessage() {} func (*BinlogSource) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_ac14f15f6b19a931, []int{6} + return fileDescriptor_binlogdata_6d214635eb8c538c, []int{8} } func (m *BinlogSource) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BinlogSource.Unmarshal(m, b) @@ -505,6 +695,349 @@ func (m *BinlogSource) GetTables() []string { return nil } +func (m *BinlogSource) GetFilter() *Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *BinlogSource) GetOnDdl() OnDDLAction { + if m != nil { + return m.OnDdl + } + return OnDDLAction_IGNORE +} + +// RowChange represents one row change +type RowChange struct { + Before *query.Row `protobuf:"bytes,1,opt,name=before,proto3" json:"before,omitempty"` + After *query.Row `protobuf:"bytes,2,opt,name=after,proto3" json:"after,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RowChange) Reset() { *m = RowChange{} } +func (m *RowChange) String() string { return proto.CompactTextString(m) } +func (*RowChange) ProtoMessage() {} +func (*RowChange) Descriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_6d214635eb8c538c, []int{9} +} +func (m *RowChange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RowChange.Unmarshal(m, b) +} +func (m *RowChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RowChange.Marshal(b, m, deterministic) +} +func (dst *RowChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_RowChange.Merge(dst, src) +} +func (m *RowChange) XXX_Size() int { + return xxx_messageInfo_RowChange.Size(m) +} +func (m *RowChange) XXX_DiscardUnknown() { + xxx_messageInfo_RowChange.DiscardUnknown(m) +} + +var xxx_messageInfo_RowChange proto.InternalMessageInfo + +func (m *RowChange) GetBefore() *query.Row { + if m != nil { + return m.Before + } + return nil +} + +func (m *RowChange) GetAfter() *query.Row { + if m != nil { + return m.After + } + return nil +} + +// RowEvent represent row events for one table +type RowEvent struct { + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"` + RowChanges []*RowChange `protobuf:"bytes,2,rep,name=row_changes,json=rowChanges,proto3" json:"row_changes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RowEvent) Reset() { *m = RowEvent{} } +func (m *RowEvent) String() string { return proto.CompactTextString(m) } +func (*RowEvent) ProtoMessage() {} +func (*RowEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_6d214635eb8c538c, []int{10} +} +func (m *RowEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RowEvent.Unmarshal(m, b) +} +func (m *RowEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RowEvent.Marshal(b, m, deterministic) +} +func (dst *RowEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_RowEvent.Merge(dst, src) +} +func (m *RowEvent) XXX_Size() int { + return xxx_messageInfo_RowEvent.Size(m) +} +func (m *RowEvent) XXX_DiscardUnknown() { + xxx_messageInfo_RowEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_RowEvent proto.InternalMessageInfo + +func (m *RowEvent) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *RowEvent) GetRowChanges() []*RowChange { + if m != nil { + return m.RowChanges + } + return nil +} + +type FieldEvent struct { + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"` + Fields []*query.Field `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldEvent) Reset() { *m = FieldEvent{} } +func (m *FieldEvent) String() string { return proto.CompactTextString(m) } +func (*FieldEvent) ProtoMessage() {} +func (*FieldEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_6d214635eb8c538c, []int{11} +} +func (m *FieldEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldEvent.Unmarshal(m, b) +} +func (m *FieldEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldEvent.Marshal(b, m, deterministic) +} +func (dst *FieldEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldEvent.Merge(dst, src) +} +func (m *FieldEvent) XXX_Size() int { + return xxx_messageInfo_FieldEvent.Size(m) +} +func (m *FieldEvent) XXX_DiscardUnknown() { + xxx_messageInfo_FieldEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldEvent proto.InternalMessageInfo + +func (m *FieldEvent) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *FieldEvent) GetFields() []*query.Field { + if m != nil { + return m.Fields + } + return nil +} + +// VEvent represents a vstream event +type VEvent struct { + Type VEventType `protobuf:"varint,1,opt,name=type,proto3,enum=binlogdata.VEventType" json:"type,omitempty"` + Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Gtid string `protobuf:"bytes,3,opt,name=gtid,proto3" json:"gtid,omitempty"` + Ddl string `protobuf:"bytes,4,opt,name=ddl,proto3" json:"ddl,omitempty"` + RowEvent *RowEvent `protobuf:"bytes,5,opt,name=row_event,json=rowEvent,proto3" json:"row_event,omitempty"` + FieldEvent *FieldEvent `protobuf:"bytes,6,opt,name=field_event,json=fieldEvent,proto3" json:"field_event,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VEvent) Reset() { *m = VEvent{} } +func (m *VEvent) String() string { return proto.CompactTextString(m) } +func (*VEvent) ProtoMessage() {} +func (*VEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_6d214635eb8c538c, []int{12} +} +func (m *VEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VEvent.Unmarshal(m, b) +} +func (m *VEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VEvent.Marshal(b, m, deterministic) +} +func (dst *VEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_VEvent.Merge(dst, src) +} +func (m *VEvent) XXX_Size() int { + return xxx_messageInfo_VEvent.Size(m) +} +func (m *VEvent) XXX_DiscardUnknown() { + xxx_messageInfo_VEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_VEvent proto.InternalMessageInfo + +func (m *VEvent) GetType() VEventType { + if m != nil { + return m.Type + } + return VEventType_UNKNOWN +} + +func (m *VEvent) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *VEvent) GetGtid() string { + if m != nil { + return m.Gtid + } + return "" +} + +func (m *VEvent) GetDdl() string { + if m != nil { + return m.Ddl + } + return "" +} + +func (m *VEvent) GetRowEvent() *RowEvent { + if m != nil { + return m.RowEvent + } + return nil +} + +func (m *VEvent) GetFieldEvent() *FieldEvent { + if m != nil { + return m.FieldEvent + } + return nil +} + +// VStreamRequest is the payload for VStream +type VStreamRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *query.VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *query.Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + Position string `protobuf:"bytes,4,opt,name=position,proto3" json:"position,omitempty"` + Filter *Filter `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VStreamRequest) Reset() { *m = VStreamRequest{} } +func (m *VStreamRequest) String() string { return proto.CompactTextString(m) } +func (*VStreamRequest) ProtoMessage() {} +func (*VStreamRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_6d214635eb8c538c, []int{13} +} +func (m *VStreamRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VStreamRequest.Unmarshal(m, b) +} +func (m *VStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VStreamRequest.Marshal(b, m, deterministic) +} +func (dst *VStreamRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VStreamRequest.Merge(dst, src) +} +func (m *VStreamRequest) XXX_Size() int { + return xxx_messageInfo_VStreamRequest.Size(m) +} +func (m *VStreamRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VStreamRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VStreamRequest proto.InternalMessageInfo + +func (m *VStreamRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *VStreamRequest) GetImmediateCallerId() *query.VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *VStreamRequest) GetTarget() *query.Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *VStreamRequest) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +func (m *VStreamRequest) GetFilter() *Filter { + if m != nil { + return m.Filter + } + return nil +} + +// VStreamResponse is the response from VStream +type VStreamResponse struct { + Events []*VEvent `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VStreamResponse) Reset() { *m = VStreamResponse{} } +func (m *VStreamResponse) String() string { return proto.CompactTextString(m) } +func (*VStreamResponse) ProtoMessage() {} +func (*VStreamResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_6d214635eb8c538c, []int{14} +} +func (m *VStreamResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VStreamResponse.Unmarshal(m, b) +} +func (m *VStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VStreamResponse.Marshal(b, m, deterministic) +} +func (dst *VStreamResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VStreamResponse.Merge(dst, src) +} +func (m *VStreamResponse) XXX_Size() int { + return xxx_messageInfo_VStreamResponse.Size(m) +} +func (m *VStreamResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VStreamResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_VStreamResponse proto.InternalMessageInfo + +func (m *VStreamResponse) GetEvents() []*VEvent { + if m != nil { + return m.Events + } + return nil +} + func init() { proto.RegisterType((*Charset)(nil), "binlogdata.Charset") proto.RegisterType((*BinlogTransaction)(nil), "binlogdata.BinlogTransaction") @@ -513,52 +1046,96 @@ func init() { proto.RegisterType((*StreamKeyRangeResponse)(nil), "binlogdata.StreamKeyRangeResponse") proto.RegisterType((*StreamTablesRequest)(nil), "binlogdata.StreamTablesRequest") proto.RegisterType((*StreamTablesResponse)(nil), "binlogdata.StreamTablesResponse") + proto.RegisterType((*Rule)(nil), "binlogdata.Rule") + proto.RegisterType((*Filter)(nil), "binlogdata.Filter") proto.RegisterType((*BinlogSource)(nil), "binlogdata.BinlogSource") + proto.RegisterType((*RowChange)(nil), "binlogdata.RowChange") + proto.RegisterType((*RowEvent)(nil), "binlogdata.RowEvent") + proto.RegisterType((*FieldEvent)(nil), "binlogdata.FieldEvent") + proto.RegisterType((*VEvent)(nil), "binlogdata.VEvent") + proto.RegisterType((*VStreamRequest)(nil), "binlogdata.VStreamRequest") + proto.RegisterType((*VStreamResponse)(nil), "binlogdata.VStreamResponse") + proto.RegisterEnum("binlogdata.OnDDLAction", OnDDLAction_name, OnDDLAction_value) + proto.RegisterEnum("binlogdata.VEventType", VEventType_name, VEventType_value) proto.RegisterEnum("binlogdata.BinlogTransaction_Statement_Category", BinlogTransaction_Statement_Category_name, BinlogTransaction_Statement_Category_value) } -func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_binlogdata_ac14f15f6b19a931) } +func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_binlogdata_6d214635eb8c538c) } -var fileDescriptor_binlogdata_ac14f15f6b19a931 = []byte{ - // 640 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0xcd, 0x6e, 0xda, 0x4a, - 0x14, 0xbe, 0xc6, 0x40, 0xec, 0xe3, 0xdc, 0x64, 0x98, 0xfc, 0x08, 0x21, 0x5d, 0x09, 0xb1, 0x09, - 0x77, 0x71, 0xcd, 0x95, 0xab, 0x3e, 0x40, 0x8c, 0xad, 0x88, 0xc4, 0x90, 0x68, 0x70, 0x36, 0xd9, - 0x58, 0xc6, 0x99, 0x12, 0x04, 0xf1, 0x38, 0x9e, 0x09, 0xaa, 0x9f, 0xa3, 0x4f, 0xd1, 0xb7, 0xe8, - 0xaa, 0x6f, 0xd2, 0xf7, 0xa8, 0x3c, 0x36, 0x86, 0xa4, 0x52, 0x9b, 0x2e, 0xba, 0x3b, 0xdf, 0x99, - 0xef, 0x9c, 0x39, 0xdf, 0x37, 0x47, 0x03, 0x68, 0xb6, 0x88, 0x57, 0x6c, 0x7e, 0x1f, 0x8a, 0xd0, - 0x4c, 0x52, 0x26, 0x18, 0x86, 0x6d, 0xa6, 0x63, 0x3c, 0x3d, 0xd3, 0x34, 0x2b, 0x0e, 0x3a, 0x07, - 0x82, 0x25, 0x6c, 0x4b, 0xec, 0x8d, 0x61, 0x6f, 0xf8, 0x10, 0xa6, 0x9c, 0x0a, 0x7c, 0x0a, 0xcd, - 0x68, 0xb5, 0xa0, 0xb1, 0x68, 0x2b, 0x5d, 0xa5, 0xdf, 0x20, 0x25, 0xc2, 0x18, 0xea, 0x11, 0x8b, - 0xe3, 0x76, 0x4d, 0x66, 0x65, 0x9c, 0x73, 0x39, 0x4d, 0xd7, 0x34, 0x6d, 0xab, 0x05, 0xb7, 0x40, - 0xbd, 0x6f, 0x2a, 0xb4, 0x6c, 0x79, 0xb5, 0x9f, 0x86, 0x31, 0x0f, 0x23, 0xb1, 0x60, 0x31, 0xbe, - 0x00, 0xe0, 0x22, 0x14, 0xf4, 0x91, 0xc6, 0x82, 0xb7, 0x95, 0xae, 0xda, 0x37, 0xac, 0x33, 0x73, - 0x67, 0xe8, 0x1f, 0x4a, 0xcc, 0xe9, 0x86, 0x4f, 0x76, 0x4a, 0xb1, 0x05, 0x06, 0x5d, 0xd3, 0x58, - 0x04, 0x82, 0x2d, 0x69, 0xdc, 0xae, 0x77, 0x95, 0xbe, 0x61, 0xb5, 0xcc, 0x42, 0xa0, 0x9b, 0x9f, - 0xf8, 0xf9, 0x01, 0x01, 0x5a, 0xc5, 0x9d, 0xaf, 0x35, 0xd0, 0xab, 0x6e, 0xd8, 0x03, 0x2d, 0x0a, - 0x05, 0x9d, 0xb3, 0x34, 0x93, 0x32, 0x0f, 0xac, 0xff, 0xdf, 0x38, 0x88, 0x39, 0x2c, 0xeb, 0x48, - 0xd5, 0x01, 0xff, 0x07, 0x7b, 0x51, 0xe1, 0x9e, 0x74, 0xc7, 0xb0, 0x8e, 0x76, 0x9b, 0x95, 0xc6, - 0x92, 0x0d, 0x07, 0x23, 0x50, 0xf9, 0xd3, 0x4a, 0x5a, 0xb6, 0x4f, 0xf2, 0xb0, 0xf7, 0x59, 0x01, - 0x6d, 0xd3, 0x17, 0x1f, 0xc1, 0xa1, 0xed, 0x05, 0xb7, 0x13, 0xe2, 0x0e, 0xaf, 0x2f, 0x26, 0xa3, - 0x3b, 0xd7, 0x41, 0x7f, 0xe1, 0x7d, 0xd0, 0x6c, 0x2f, 0xb0, 0xdd, 0x8b, 0xd1, 0x04, 0x29, 0xf8, - 0x6f, 0xd0, 0x6d, 0x2f, 0x18, 0x5e, 0x8f, 0xc7, 0x23, 0x1f, 0xd5, 0xf0, 0x21, 0x18, 0xb6, 0x17, - 0x90, 0x6b, 0xcf, 0xb3, 0xcf, 0x87, 0x57, 0x48, 0xc5, 0x27, 0xd0, 0xb2, 0xbd, 0xc0, 0x19, 0x7b, - 0x81, 0xe3, 0xde, 0x10, 0x77, 0x78, 0xee, 0xbb, 0x0e, 0xaa, 0x63, 0x80, 0x66, 0x9e, 0x76, 0x3c, - 0xd4, 0x28, 0xe3, 0xa9, 0xeb, 0xa3, 0x66, 0xd9, 0x6e, 0x34, 0x99, 0xba, 0xc4, 0x47, 0x7b, 0x25, - 0xbc, 0xbd, 0x71, 0xce, 0x7d, 0x17, 0x69, 0x25, 0x74, 0x5c, 0xcf, 0xf5, 0x5d, 0xa4, 0x5f, 0xd6, - 0xb5, 0x1a, 0x52, 0x2f, 0xeb, 0x9a, 0x8a, 0xea, 0xbd, 0x4f, 0x0a, 0x9c, 0x4c, 0x45, 0x4a, 0xc3, - 0xc7, 0x2b, 0x9a, 0x91, 0x30, 0x9e, 0x53, 0x42, 0x9f, 0x9e, 0x29, 0x17, 0xb8, 0x03, 0x5a, 0xc2, - 0xf8, 0x22, 0xf7, 0x4e, 0x1a, 0xac, 0x93, 0x0a, 0xe3, 0x01, 0xe8, 0x4b, 0x9a, 0x05, 0x69, 0xce, - 0x2f, 0x0d, 0xc3, 0x66, 0xb5, 0x90, 0x55, 0x27, 0x6d, 0x59, 0x46, 0xbb, 0xfe, 0xaa, 0xbf, 0xf6, - 0xb7, 0xf7, 0x01, 0x4e, 0x5f, 0x0f, 0xc5, 0x13, 0x16, 0x73, 0x8a, 0x3d, 0xc0, 0x45, 0x61, 0x20, - 0xb6, 0x6f, 0x2b, 0xe7, 0x33, 0xac, 0x7f, 0x7e, 0xba, 0x00, 0xa4, 0x35, 0x7b, 0x9d, 0xea, 0x7d, - 0x84, 0xa3, 0xe2, 0x1e, 0x3f, 0x9c, 0xad, 0x28, 0x7f, 0x8b, 0xf4, 0x53, 0x68, 0x0a, 0x49, 0x6e, - 0xd7, 0xba, 0x6a, 0x5f, 0x27, 0x25, 0xfa, 0x5d, 0x85, 0xf7, 0x70, 0xfc, 0xf2, 0xe6, 0x3f, 0xa2, - 0xef, 0x8b, 0x02, 0xfb, 0x05, 0x71, 0xca, 0x9e, 0xd3, 0x88, 0xe6, 0xca, 0x96, 0x34, 0xe3, 0x49, - 0x18, 0xd1, 0x8d, 0xb2, 0x0d, 0xc6, 0xc7, 0xd0, 0xe0, 0x0f, 0x61, 0x7a, 0x2f, 0x1f, 0x54, 0x27, - 0x05, 0xc0, 0xef, 0xc1, 0x90, 0x0a, 0x45, 0x20, 0xb2, 0x84, 0x4a, 0x6d, 0x07, 0xd6, 0xf1, 0xf6, - 0xb1, 0xe5, 0xfc, 0xc2, 0xcf, 0x12, 0x4a, 0x40, 0x54, 0xf1, 0xcb, 0x0d, 0xa9, 0xbf, 0x61, 0x43, - 0xb6, 0xbe, 0x36, 0x76, 0x7d, 0xb5, 0xff, 0xbd, 0x3b, 0x5b, 0x2f, 0x04, 0xe5, 0xdc, 0x5c, 0xb0, - 0x41, 0x11, 0x0d, 0xe6, 0x6c, 0xb0, 0x16, 0x03, 0xf9, 0xef, 0x0d, 0xb6, 0x96, 0xcc, 0x9a, 0x32, - 0xf3, 0xee, 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x08, 0xae, 0x13, 0x46, 0x05, 0x00, 0x00, +var fileDescriptor_binlogdata_6d214635eb8c538c = []byte{ + // 1184 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x5b, 0x6e, 0xdb, 0x56, + 0x13, 0x8e, 0x44, 0x8a, 0x12, 0x87, 0x8e, 0x4d, 0x1f, 0x5f, 0x7e, 0xc1, 0xf8, 0x03, 0x18, 0x44, + 0xdb, 0xb8, 0x06, 0x2a, 0xa7, 0xea, 0xed, 0xa9, 0x2d, 0x2c, 0x91, 0x71, 0x95, 0xd0, 0x92, 0x73, + 0xcc, 0x24, 0x45, 0x5e, 0x08, 0x9a, 0x3c, 0xb2, 0x09, 0x53, 0xa4, 0x4c, 0x1e, 0xcb, 0xd5, 0x0a, + 0xba, 0x80, 0xbe, 0x76, 0x03, 0xed, 0x42, 0xba, 0x92, 0x76, 0x1f, 0xc5, 0xb9, 0x90, 0x92, 0x1c, + 0xa0, 0x71, 0x1f, 0xfa, 0x36, 0xf7, 0x33, 0xf3, 0xcd, 0x70, 0x86, 0x60, 0x5e, 0xc4, 0x69, 0x92, + 0x5d, 0x46, 0x01, 0x0d, 0x3a, 0xd3, 0x3c, 0xa3, 0x19, 0x82, 0x85, 0x64, 0xcf, 0x98, 0xd1, 0x7c, + 0x1a, 0x0a, 0xc5, 0x9e, 0x71, 0x73, 0x4b, 0xf2, 0xb9, 0x64, 0xd6, 0x69, 0x36, 0xcd, 0x16, 0x5e, + 0xd6, 0x29, 0x34, 0xfb, 0x57, 0x41, 0x5e, 0x10, 0x8a, 0x76, 0x41, 0x0b, 0x93, 0x98, 0xa4, 0xb4, + 0x5d, 0xdb, 0xaf, 0x1d, 0x34, 0xb0, 0xe4, 0x10, 0x02, 0x35, 0xcc, 0xd2, 0xb4, 0x5d, 0xe7, 0x52, + 0x4e, 0x33, 0xdb, 0x82, 0xe4, 0x33, 0x92, 0xb7, 0x15, 0x61, 0x2b, 0x38, 0xeb, 0x2f, 0x05, 0x36, + 0x7b, 0x3c, 0x0f, 0x2f, 0x0f, 0xd2, 0x22, 0x08, 0x69, 0x9c, 0xa5, 0xe8, 0x04, 0xa0, 0xa0, 0x01, + 0x25, 0x13, 0x92, 0xd2, 0xa2, 0x5d, 0xdb, 0x57, 0x0e, 0x8c, 0xee, 0xd3, 0xce, 0x52, 0x05, 0xef, + 0xb9, 0x74, 0xce, 0x4b, 0x7b, 0xbc, 0xe4, 0x8a, 0xba, 0x60, 0x90, 0x19, 0x49, 0xa9, 0x4f, 0xb3, + 0x6b, 0x92, 0xb6, 0xd5, 0xfd, 0xda, 0x81, 0xd1, 0xdd, 0xec, 0x88, 0x02, 0x1d, 0xa6, 0xf1, 0x98, + 0x02, 0x03, 0xa9, 0xe8, 0xbd, 0x3f, 0xea, 0xa0, 0x57, 0xd1, 0x90, 0x0b, 0xad, 0x30, 0xa0, 0xe4, + 0x32, 0xcb, 0xe7, 0xbc, 0xcc, 0xf5, 0xee, 0xb3, 0x07, 0x26, 0xd2, 0xe9, 0x4b, 0x3f, 0x5c, 0x45, + 0x40, 0x9f, 0x41, 0x33, 0x14, 0xe8, 0x71, 0x74, 0x8c, 0xee, 0xd6, 0x72, 0x30, 0x09, 0x2c, 0x2e, + 0x6d, 0x90, 0x09, 0x4a, 0x71, 0x93, 0x70, 0xc8, 0xd6, 0x30, 0x23, 0xad, 0xdf, 0x6a, 0xd0, 0x2a, + 0xe3, 0xa2, 0x2d, 0xd8, 0xe8, 0xb9, 0xfe, 0xeb, 0x21, 0x76, 0xfa, 0xa3, 0x93, 0xe1, 0xe0, 0x9d, + 0x63, 0x9b, 0x8f, 0xd0, 0x1a, 0xb4, 0x7a, 0xae, 0xdf, 0x73, 0x4e, 0x06, 0x43, 0xb3, 0x86, 0x1e, + 0x83, 0xde, 0x73, 0xfd, 0xfe, 0xe8, 0xf4, 0x74, 0xe0, 0x99, 0x75, 0xb4, 0x01, 0x46, 0xcf, 0xf5, + 0xf1, 0xc8, 0x75, 0x7b, 0xc7, 0xfd, 0x97, 0xa6, 0x82, 0x76, 0x60, 0xb3, 0xe7, 0xfa, 0xf6, 0xa9, + 0xeb, 0xdb, 0xce, 0x19, 0x76, 0xfa, 0xc7, 0x9e, 0x63, 0x9b, 0x2a, 0x02, 0xd0, 0x98, 0xd8, 0x76, + 0xcd, 0x86, 0xa4, 0xcf, 0x1d, 0xcf, 0xd4, 0x64, 0xb8, 0xc1, 0xf0, 0xdc, 0xc1, 0x9e, 0xd9, 0x94, + 0xec, 0xeb, 0x33, 0xfb, 0xd8, 0x73, 0xcc, 0x96, 0x64, 0x6d, 0xc7, 0x75, 0x3c, 0xc7, 0xd4, 0x5f, + 0xa8, 0xad, 0xba, 0xa9, 0xbc, 0x50, 0x5b, 0x8a, 0xa9, 0x5a, 0xbf, 0xd4, 0x60, 0xe7, 0x9c, 0xe6, + 0x24, 0x98, 0xbc, 0x24, 0x73, 0x1c, 0xa4, 0x97, 0x04, 0x93, 0x9b, 0x5b, 0x52, 0x50, 0xb4, 0x07, + 0xad, 0x69, 0x56, 0xc4, 0x0c, 0x3b, 0x0e, 0xb0, 0x8e, 0x2b, 0x1e, 0x1d, 0x81, 0x7e, 0x4d, 0xe6, + 0x7e, 0xce, 0xec, 0x25, 0x60, 0xa8, 0x53, 0x0d, 0x64, 0x15, 0xa9, 0x75, 0x2d, 0xa9, 0x65, 0x7c, + 0x95, 0x0f, 0xe3, 0x6b, 0x8d, 0x61, 0xf7, 0x7e, 0x52, 0xc5, 0x34, 0x4b, 0x0b, 0x82, 0x5c, 0x40, + 0xc2, 0xd1, 0xa7, 0x8b, 0xde, 0xf2, 0xfc, 0x8c, 0xee, 0x93, 0x7f, 0x1c, 0x00, 0xbc, 0x79, 0x71, + 0x5f, 0x64, 0xfd, 0x04, 0x5b, 0xe2, 0x1d, 0x2f, 0xb8, 0x48, 0x48, 0xf1, 0x90, 0xd2, 0x77, 0x41, + 0xa3, 0xdc, 0xb8, 0x5d, 0xdf, 0x57, 0x0e, 0x74, 0x2c, 0xb9, 0x7f, 0x5b, 0x61, 0x04, 0xdb, 0xab, + 0x2f, 0xff, 0x27, 0xf5, 0x7d, 0x09, 0x2a, 0xbe, 0x4d, 0x08, 0xda, 0x86, 0xc6, 0x24, 0xa0, 0xe1, + 0x95, 0xac, 0x46, 0x30, 0xac, 0x94, 0x71, 0x9c, 0x50, 0x92, 0xf3, 0x16, 0xea, 0x58, 0x72, 0xd6, + 0x33, 0xd0, 0x9e, 0x73, 0x0a, 0x7d, 0x02, 0x8d, 0xfc, 0x96, 0xd5, 0x2a, 0x3e, 0x75, 0x73, 0x39, + 0x01, 0x16, 0x18, 0x0b, 0xb5, 0xf5, 0x6b, 0x1d, 0xd6, 0x44, 0x42, 0xe7, 0xd9, 0x6d, 0x1e, 0x12, + 0x86, 0xe0, 0x35, 0x99, 0x17, 0xd3, 0x20, 0x24, 0x25, 0x82, 0x25, 0xcf, 0x92, 0x29, 0xae, 0x82, + 0x3c, 0x92, 0xaf, 0x0a, 0x06, 0x7d, 0x05, 0x06, 0x47, 0x92, 0xfa, 0x74, 0x3e, 0x25, 0x1c, 0xc3, + 0xf5, 0xee, 0xf6, 0x62, 0xa8, 0x38, 0x4e, 0xd4, 0x9b, 0x4f, 0x09, 0x06, 0x5a, 0xd1, 0xab, 0x93, + 0xa8, 0x3e, 0x60, 0x12, 0x17, 0xfd, 0x6b, 0xac, 0xf4, 0xef, 0xb0, 0x02, 0x43, 0x93, 0x51, 0x96, + 0x6a, 0x15, 0x70, 0x94, 0x00, 0xa1, 0x0e, 0x68, 0x59, 0xea, 0x47, 0x51, 0xd2, 0x6e, 0xf2, 0x34, + 0xff, 0xb7, 0x6c, 0x3b, 0x4a, 0x6d, 0xdb, 0x3d, 0x16, 0x2d, 0x69, 0x64, 0xa9, 0x1d, 0x25, 0xd6, + 0x2b, 0xd0, 0x71, 0x76, 0xd7, 0xbf, 0xe2, 0x09, 0x58, 0xa0, 0x5d, 0x90, 0x71, 0x96, 0x13, 0xd9, + 0x55, 0x90, 0x5b, 0x0f, 0x67, 0x77, 0x58, 0x6a, 0xd0, 0x3e, 0x34, 0x82, 0x71, 0xd9, 0x98, 0x55, + 0x13, 0xa1, 0xb0, 0x02, 0x68, 0xe1, 0xec, 0x8e, 0x6f, 0x4a, 0xf4, 0x04, 0x04, 0x22, 0x7e, 0x1a, + 0x4c, 0x4a, 0xb8, 0x75, 0x2e, 0x19, 0x06, 0x13, 0x82, 0xbe, 0x06, 0x23, 0xcf, 0xee, 0xfc, 0x90, + 0x3f, 0x2f, 0xc6, 0xd6, 0xe8, 0xee, 0xac, 0xb4, 0xb2, 0x4c, 0x0e, 0x43, 0x5e, 0x92, 0x85, 0xf5, + 0x0a, 0xe0, 0x79, 0x4c, 0x92, 0xe8, 0x41, 0x8f, 0x7c, 0xc4, 0xe0, 0x23, 0x49, 0x54, 0xc6, 0x5f, + 0x93, 0x29, 0xf3, 0x08, 0x58, 0xea, 0xac, 0x3f, 0x6b, 0xa0, 0xbd, 0x11, 0xf1, 0x0e, 0x41, 0xe5, + 0x8d, 0x16, 0xbb, 0x7b, 0x77, 0x39, 0x1d, 0x61, 0xc1, 0x5b, 0xcd, 0x6d, 0xd0, 0xff, 0x41, 0xa7, + 0xf1, 0x84, 0x14, 0x34, 0x98, 0x4c, 0x39, 0x24, 0x0a, 0x5e, 0x08, 0xd8, 0x59, 0xbb, 0xa4, 0x71, + 0xc4, 0x47, 0x46, 0xc7, 0x9c, 0x66, 0x0b, 0x9a, 0xb5, 0x47, 0xe5, 0x22, 0x46, 0xa2, 0xcf, 0x41, + 0x67, 0x28, 0xf0, 0x7b, 0xd2, 0x6e, 0x70, 0x58, 0xb7, 0xef, 0x61, 0xc0, 0x9f, 0xc5, 0xad, 0xbc, + 0xc4, 0xf5, 0x1b, 0x30, 0x78, 0xde, 0xd2, 0x49, 0xcc, 0xc5, 0xee, 0xea, 0x5c, 0x94, 0xf8, 0x60, + 0x18, 0x57, 0xb4, 0xf5, 0x73, 0x1d, 0xd6, 0xdf, 0x88, 0xcf, 0xbb, 0x5c, 0x29, 0xdf, 0xc3, 0x16, + 0x19, 0x8f, 0x49, 0x48, 0xe3, 0x19, 0xf1, 0xc3, 0x20, 0x49, 0x48, 0xee, 0xc7, 0x91, 0x1c, 0x81, + 0x8d, 0x8e, 0x38, 0xf3, 0x7d, 0x2e, 0x1f, 0xd8, 0x78, 0xb3, 0xb2, 0x95, 0xa2, 0x08, 0x39, 0xb0, + 0x15, 0x4f, 0x26, 0x24, 0x8a, 0x03, 0xba, 0x1c, 0x40, 0x0c, 0xc8, 0x8e, 0x44, 0xfb, 0x8d, 0x77, + 0x12, 0x50, 0xb2, 0x08, 0x53, 0x79, 0x54, 0x61, 0x3e, 0x66, 0xe3, 0x9f, 0x5f, 0x56, 0x5b, 0xea, + 0xb1, 0xf4, 0xf4, 0xb8, 0x10, 0x4b, 0xe5, 0xca, 0x06, 0x54, 0xef, 0x6d, 0xc0, 0xc5, 0x97, 0xd2, + 0xf8, 0xd0, 0x97, 0x62, 0x7d, 0x0b, 0x1b, 0x15, 0x10, 0x72, 0xc3, 0x1d, 0x82, 0xc6, 0xf1, 0x2c, + 0x97, 0x0a, 0x7a, 0xbf, 0xf5, 0x58, 0x5a, 0x1c, 0x7e, 0x07, 0xc6, 0xd2, 0xe7, 0xc4, 0x2e, 0xde, + 0xe0, 0x64, 0x38, 0xc2, 0x8e, 0xf9, 0x08, 0xb5, 0x40, 0x3d, 0xf7, 0x46, 0x67, 0x66, 0x8d, 0x51, + 0xce, 0x8f, 0x4e, 0x5f, 0x5c, 0x51, 0x46, 0xf9, 0xd2, 0x48, 0x39, 0xfc, 0xbd, 0x06, 0xb0, 0x98, + 0x26, 0x64, 0x40, 0xf3, 0xf5, 0xf0, 0xe5, 0x70, 0xf4, 0x76, 0x28, 0x02, 0x9c, 0x78, 0x03, 0xdb, + 0xac, 0x21, 0x1d, 0x1a, 0xe2, 0x2c, 0xd7, 0xd9, 0x0b, 0xf2, 0x26, 0x2b, 0xec, 0x60, 0x57, 0x07, + 0x59, 0x45, 0x4d, 0x50, 0xaa, 0xb3, 0x2b, 0xef, 0xac, 0xc6, 0x02, 0x62, 0xe7, 0xcc, 0x3d, 0xee, + 0x3b, 0x66, 0x93, 0x29, 0xaa, 0x8b, 0x0b, 0xa0, 0x95, 0xe7, 0x96, 0x79, 0xb2, 0x23, 0x0d, 0xec, + 0x9d, 0x91, 0xf7, 0x83, 0x83, 0x4d, 0x83, 0xc9, 0xf0, 0xe8, 0xad, 0xb9, 0xc6, 0x64, 0xcf, 0x07, + 0x8e, 0x6b, 0x9b, 0x8f, 0x7b, 0x9f, 0xbe, 0x7b, 0x3a, 0x8b, 0x29, 0x29, 0x8a, 0x4e, 0x9c, 0x1d, + 0x09, 0xea, 0xe8, 0x32, 0x3b, 0x9a, 0xd1, 0x23, 0xfe, 0x87, 0x77, 0xb4, 0x80, 0xe9, 0x42, 0xe3, + 0x92, 0x2f, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x68, 0xbd, 0x20, 0x05, 0x3d, 0x0a, 0x00, 0x00, } diff --git a/go/vt/proto/binlogservice/binlogservice.pb.go b/go/vt/proto/binlogservice/binlogservice.pb.go index 164dddef66..790a92f4a2 100644 --- a/go/vt/proto/binlogservice/binlogservice.pb.go +++ b/go/vt/proto/binlogservice/binlogservice.pb.go @@ -191,9 +191,9 @@ var _UpdateStream_serviceDesc = grpc.ServiceDesc{ Metadata: "binlogservice.proto", } -func init() { proto.RegisterFile("binlogservice.proto", fileDescriptor_binlogservice_0e1eb8b2f97a2dc1) } +func init() { proto.RegisterFile("binlogservice.proto", fileDescriptor_binlogservice_bfebf84e565603b8) } -var fileDescriptor_binlogservice_0e1eb8b2f97a2dc1 = []byte{ +var fileDescriptor_binlogservice_bfebf84e565603b8 = []byte{ // 177 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4e, 0xca, 0xcc, 0xcb, 0xc9, 0x4f, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, diff --git a/go/vt/proto/logutil/logutil.pb.go b/go/vt/proto/logutil/logutil.pb.go index 7b1c092c26..f120217326 100644 --- a/go/vt/proto/logutil/logutil.pb.go +++ b/go/vt/proto/logutil/logutil.pb.go @@ -49,7 +49,7 @@ func (x Level) String() string { return proto.EnumName(Level_name, int32(x)) } func (Level) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_logutil_39c26af5691dd7cd, []int{0} + return fileDescriptor_logutil_1922c06158165cc5, []int{0} } // Time represents a time stamp in nanoseconds. In go, use logutil library @@ -66,7 +66,7 @@ func (m *Time) Reset() { *m = Time{} } func (m *Time) String() string { return proto.CompactTextString(m) } func (*Time) ProtoMessage() {} func (*Time) Descriptor() ([]byte, []int) { - return fileDescriptor_logutil_39c26af5691dd7cd, []int{0} + return fileDescriptor_logutil_1922c06158165cc5, []int{0} } func (m *Time) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Time.Unmarshal(m, b) @@ -116,7 +116,7 @@ func (m *Event) Reset() { *m = Event{} } func (m *Event) String() string { return proto.CompactTextString(m) } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_logutil_39c26af5691dd7cd, []int{1} + return fileDescriptor_logutil_1922c06158165cc5, []int{1} } func (m *Event) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Event.Unmarshal(m, b) @@ -177,9 +177,9 @@ func init() { proto.RegisterEnum("logutil.Level", Level_name, Level_value) } -func init() { proto.RegisterFile("logutil.proto", fileDescriptor_logutil_39c26af5691dd7cd) } +func init() { proto.RegisterFile("logutil.proto", fileDescriptor_logutil_1922c06158165cc5) } -var fileDescriptor_logutil_39c26af5691dd7cd = []byte{ +var fileDescriptor_logutil_1922c06158165cc5 = []byte{ // 260 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x90, 0x41, 0x4b, 0xfb, 0x40, 0x10, 0xc5, 0xff, 0xdb, 0x64, 0xff, 0xb1, 0x13, 0x5a, 0xc2, 0xe0, 0x21, 0xc7, 0x58, 0x8a, 0x04, diff --git a/go/vt/proto/mysqlctl/mysqlctl.pb.go b/go/vt/proto/mysqlctl/mysqlctl.pb.go index f499dce3cd..b75568fd4b 100644 --- a/go/vt/proto/mysqlctl/mysqlctl.pb.go +++ b/go/vt/proto/mysqlctl/mysqlctl.pb.go @@ -34,7 +34,7 @@ func (m *StartRequest) Reset() { *m = StartRequest{} } func (m *StartRequest) String() string { return proto.CompactTextString(m) } func (*StartRequest) ProtoMessage() {} func (*StartRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{0} + return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{0} } func (m *StartRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StartRequest.Unmarshal(m, b) @@ -71,7 +71,7 @@ func (m *StartResponse) Reset() { *m = StartResponse{} } func (m *StartResponse) String() string { return proto.CompactTextString(m) } func (*StartResponse) ProtoMessage() {} func (*StartResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{1} + return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{1} } func (m *StartResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StartResponse.Unmarshal(m, b) @@ -102,7 +102,7 @@ func (m *ShutdownRequest) Reset() { *m = ShutdownRequest{} } func (m *ShutdownRequest) String() string { return proto.CompactTextString(m) } func (*ShutdownRequest) ProtoMessage() {} func (*ShutdownRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{2} + return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{2} } func (m *ShutdownRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ShutdownRequest.Unmarshal(m, b) @@ -139,7 +139,7 @@ func (m *ShutdownResponse) Reset() { *m = ShutdownResponse{} } func (m *ShutdownResponse) String() string { return proto.CompactTextString(m) } func (*ShutdownResponse) ProtoMessage() {} func (*ShutdownResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{3} + return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{3} } func (m *ShutdownResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ShutdownResponse.Unmarshal(m, b) @@ -169,7 +169,7 @@ func (m *RunMysqlUpgradeRequest) Reset() { *m = RunMysqlUpgradeRequest{} func (m *RunMysqlUpgradeRequest) String() string { return proto.CompactTextString(m) } func (*RunMysqlUpgradeRequest) ProtoMessage() {} func (*RunMysqlUpgradeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{4} + return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{4} } func (m *RunMysqlUpgradeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RunMysqlUpgradeRequest.Unmarshal(m, b) @@ -199,7 +199,7 @@ func (m *RunMysqlUpgradeResponse) Reset() { *m = RunMysqlUpgradeResponse func (m *RunMysqlUpgradeResponse) String() string { return proto.CompactTextString(m) } func (*RunMysqlUpgradeResponse) ProtoMessage() {} func (*RunMysqlUpgradeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{5} + return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{5} } func (m *RunMysqlUpgradeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RunMysqlUpgradeResponse.Unmarshal(m, b) @@ -229,7 +229,7 @@ func (m *ReinitConfigRequest) Reset() { *m = ReinitConfigRequest{} } func (m *ReinitConfigRequest) String() string { return proto.CompactTextString(m) } func (*ReinitConfigRequest) ProtoMessage() {} func (*ReinitConfigRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{6} + return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{6} } func (m *ReinitConfigRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ReinitConfigRequest.Unmarshal(m, b) @@ -259,7 +259,7 @@ func (m *ReinitConfigResponse) Reset() { *m = ReinitConfigResponse{} } func (m *ReinitConfigResponse) String() string { return proto.CompactTextString(m) } func (*ReinitConfigResponse) ProtoMessage() {} func (*ReinitConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{7} + return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{7} } func (m *ReinitConfigResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ReinitConfigResponse.Unmarshal(m, b) @@ -289,7 +289,7 @@ func (m *RefreshConfigRequest) Reset() { *m = RefreshConfigRequest{} } func (m *RefreshConfigRequest) String() string { return proto.CompactTextString(m) } func (*RefreshConfigRequest) ProtoMessage() {} func (*RefreshConfigRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{8} + return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{8} } func (m *RefreshConfigRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefreshConfigRequest.Unmarshal(m, b) @@ -319,7 +319,7 @@ func (m *RefreshConfigResponse) Reset() { *m = RefreshConfigResponse{} } func (m *RefreshConfigResponse) String() string { return proto.CompactTextString(m) } func (*RefreshConfigResponse) ProtoMessage() {} func (*RefreshConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{9} + return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{9} } func (m *RefreshConfigResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefreshConfigResponse.Unmarshal(m, b) @@ -556,9 +556,9 @@ var _MysqlCtl_serviceDesc = grpc.ServiceDesc{ Metadata: "mysqlctl.proto", } -func init() { proto.RegisterFile("mysqlctl.proto", fileDescriptor_mysqlctl_dc3430948664e7fa) } +func init() { proto.RegisterFile("mysqlctl.proto", fileDescriptor_mysqlctl_6cf72a3618d6fe7c) } -var fileDescriptor_mysqlctl_dc3430948664e7fa = []byte{ +var fileDescriptor_mysqlctl_6cf72a3618d6fe7c = []byte{ // 339 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0x4d, 0x4f, 0xfa, 0x30, 0x1c, 0xc7, 0xff, 0x84, 0xfc, 0xcd, 0xfc, 0x09, 0xce, 0x54, 0x79, 0x6a, 0xa2, 0xe0, 0x12, 0x95, diff --git a/go/vt/proto/query/query.pb.go b/go/vt/proto/query/query.pb.go index 483b516fef..34f04da29f 100644 --- a/go/vt/proto/query/query.pb.go +++ b/go/vt/proto/query/query.pb.go @@ -95,7 +95,7 @@ func (x MySqlFlag) String() string { return proto.EnumName(MySqlFlag_name, int32(x)) } func (MySqlFlag) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{0} + return fileDescriptor_query_b0bca749772b6512, []int{0} } // Flag allows us to qualify types by their common properties. @@ -134,7 +134,7 @@ func (x Flag) String() string { return proto.EnumName(Flag_name, int32(x)) } func (Flag) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{1} + return fileDescriptor_query_b0bca749772b6512, []int{1} } // Type defines the various supported data types in bind vars @@ -315,7 +315,7 @@ func (x Type) String() string { return proto.EnumName(Type_name, int32(x)) } func (Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{2} + return fileDescriptor_query_b0bca749772b6512, []int{2} } // TransactionState represents the state of a distributed transaction. @@ -345,7 +345,7 @@ func (x TransactionState) String() string { return proto.EnumName(TransactionState_name, int32(x)) } func (TransactionState) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{3} + return fileDescriptor_query_b0bca749772b6512, []int{3} } type ExecuteOptions_IncludedFields int32 @@ -371,7 +371,7 @@ func (x ExecuteOptions_IncludedFields) String() string { return proto.EnumName(ExecuteOptions_IncludedFields_name, int32(x)) } func (ExecuteOptions_IncludedFields) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{6, 0} + return fileDescriptor_query_b0bca749772b6512, []int{6, 0} } type ExecuteOptions_Workload int32 @@ -400,7 +400,7 @@ func (x ExecuteOptions_Workload) String() string { return proto.EnumName(ExecuteOptions_Workload_name, int32(x)) } func (ExecuteOptions_Workload) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{6, 1} + return fileDescriptor_query_b0bca749772b6512, []int{6, 1} } type ExecuteOptions_TransactionIsolation int32 @@ -411,6 +411,9 @@ const ( ExecuteOptions_READ_COMMITTED ExecuteOptions_TransactionIsolation = 2 ExecuteOptions_READ_UNCOMMITTED ExecuteOptions_TransactionIsolation = 3 ExecuteOptions_SERIALIZABLE ExecuteOptions_TransactionIsolation = 4 + // This is not an "official" transaction level but it will do a + // START TRANSACTION WITH CONSISTENT SNAPSHOT, READ ONLY + ExecuteOptions_CONSISTENT_SNAPSHOT_READ_ONLY ExecuteOptions_TransactionIsolation = 5 ) var ExecuteOptions_TransactionIsolation_name = map[int32]string{ @@ -419,20 +422,22 @@ var ExecuteOptions_TransactionIsolation_name = map[int32]string{ 2: "READ_COMMITTED", 3: "READ_UNCOMMITTED", 4: "SERIALIZABLE", + 5: "CONSISTENT_SNAPSHOT_READ_ONLY", } var ExecuteOptions_TransactionIsolation_value = map[string]int32{ - "DEFAULT": 0, - "REPEATABLE_READ": 1, - "READ_COMMITTED": 2, - "READ_UNCOMMITTED": 3, - "SERIALIZABLE": 4, + "DEFAULT": 0, + "REPEATABLE_READ": 1, + "READ_COMMITTED": 2, + "READ_UNCOMMITTED": 3, + "SERIALIZABLE": 4, + "CONSISTENT_SNAPSHOT_READ_ONLY": 5, } func (x ExecuteOptions_TransactionIsolation) String() string { return proto.EnumName(ExecuteOptions_TransactionIsolation_name, int32(x)) } func (ExecuteOptions_TransactionIsolation) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{6, 2} + return fileDescriptor_query_b0bca749772b6512, []int{6, 2} } // The category of one statement. @@ -459,7 +464,7 @@ func (x StreamEvent_Statement_Category) String() string { return proto.EnumName(StreamEvent_Statement_Category_name, int32(x)) } func (StreamEvent_Statement_Category) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{12, 0, 0} + return fileDescriptor_query_b0bca749772b6512, []int{12, 0, 0} } type SplitQueryRequest_Algorithm int32 @@ -482,7 +487,7 @@ func (x SplitQueryRequest_Algorithm) String() string { return proto.EnumName(SplitQueryRequest_Algorithm_name, int32(x)) } func (SplitQueryRequest_Algorithm) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{50, 0} + return fileDescriptor_query_b0bca749772b6512, []int{50, 0} } // Target describes what the client expects the tablet is. @@ -503,7 +508,7 @@ func (m *Target) Reset() { *m = Target{} } func (m *Target) String() string { return proto.CompactTextString(m) } func (*Target) ProtoMessage() {} func (*Target) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{0} + return fileDescriptor_query_b0bca749772b6512, []int{0} } func (m *Target) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Target.Unmarshal(m, b) @@ -571,7 +576,7 @@ func (m *VTGateCallerID) Reset() { *m = VTGateCallerID{} } func (m *VTGateCallerID) String() string { return proto.CompactTextString(m) } func (*VTGateCallerID) ProtoMessage() {} func (*VTGateCallerID) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{1} + return fileDescriptor_query_b0bca749772b6512, []int{1} } func (m *VTGateCallerID) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VTGateCallerID.Unmarshal(m, b) @@ -627,7 +632,7 @@ func (m *EventToken) Reset() { *m = EventToken{} } func (m *EventToken) String() string { return proto.CompactTextString(m) } func (*EventToken) ProtoMessage() {} func (*EventToken) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{2} + return fileDescriptor_query_b0bca749772b6512, []int{2} } func (m *EventToken) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EventToken.Unmarshal(m, b) @@ -681,7 +686,7 @@ func (m *Value) Reset() { *m = Value{} } func (m *Value) String() string { return proto.CompactTextString(m) } func (*Value) ProtoMessage() {} func (*Value) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{3} + return fileDescriptor_query_b0bca749772b6512, []int{3} } func (m *Value) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Value.Unmarshal(m, b) @@ -730,7 +735,7 @@ func (m *BindVariable) Reset() { *m = BindVariable{} } func (m *BindVariable) String() string { return proto.CompactTextString(m) } func (*BindVariable) ProtoMessage() {} func (*BindVariable) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{4} + return fileDescriptor_query_b0bca749772b6512, []int{4} } func (m *BindVariable) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BindVariable.Unmarshal(m, b) @@ -787,7 +792,7 @@ func (m *BoundQuery) Reset() { *m = BoundQuery{} } func (m *BoundQuery) String() string { return proto.CompactTextString(m) } func (*BoundQuery) ProtoMessage() {} func (*BoundQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{5} + return fileDescriptor_query_b0bca749772b6512, []int{5} } func (m *BoundQuery) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BoundQuery.Unmarshal(m, b) @@ -861,7 +866,7 @@ func (m *ExecuteOptions) Reset() { *m = ExecuteOptions{} } func (m *ExecuteOptions) String() string { return proto.CompactTextString(m) } func (*ExecuteOptions) ProtoMessage() {} func (*ExecuteOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{6} + return fileDescriptor_query_b0bca749772b6512, []int{6} } func (m *ExecuteOptions) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteOptions.Unmarshal(m, b) @@ -967,7 +972,7 @@ func (m *Field) Reset() { *m = Field{} } func (m *Field) String() string { return proto.CompactTextString(m) } func (*Field) ProtoMessage() {} func (*Field) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{7} + return fileDescriptor_query_b0bca749772b6512, []int{7} } func (m *Field) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Field.Unmarshal(m, b) @@ -1075,7 +1080,7 @@ func (m *Row) Reset() { *m = Row{} } func (m *Row) String() string { return proto.CompactTextString(m) } func (*Row) ProtoMessage() {} func (*Row) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{8} + return fileDescriptor_query_b0bca749772b6512, []int{8} } func (m *Row) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Row.Unmarshal(m, b) @@ -1127,7 +1132,7 @@ func (m *ResultExtras) Reset() { *m = ResultExtras{} } func (m *ResultExtras) String() string { return proto.CompactTextString(m) } func (*ResultExtras) ProtoMessage() {} func (*ResultExtras) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{9} + return fileDescriptor_query_b0bca749772b6512, []int{9} } func (m *ResultExtras) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ResultExtras.Unmarshal(m, b) @@ -1185,7 +1190,7 @@ func (m *QueryResult) Reset() { *m = QueryResult{} } func (m *QueryResult) String() string { return proto.CompactTextString(m) } func (*QueryResult) ProtoMessage() {} func (*QueryResult) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{10} + return fileDescriptor_query_b0bca749772b6512, []int{10} } func (m *QueryResult) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_QueryResult.Unmarshal(m, b) @@ -1254,7 +1259,7 @@ func (m *QueryWarning) Reset() { *m = QueryWarning{} } func (m *QueryWarning) String() string { return proto.CompactTextString(m) } func (*QueryWarning) ProtoMessage() {} func (*QueryWarning) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{11} + return fileDescriptor_query_b0bca749772b6512, []int{11} } func (m *QueryWarning) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_QueryWarning.Unmarshal(m, b) @@ -1305,7 +1310,7 @@ func (m *StreamEvent) Reset() { *m = StreamEvent{} } func (m *StreamEvent) String() string { return proto.CompactTextString(m) } func (*StreamEvent) ProtoMessage() {} func (*StreamEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{12} + return fileDescriptor_query_b0bca749772b6512, []int{12} } func (m *StreamEvent) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamEvent.Unmarshal(m, b) @@ -1358,7 +1363,7 @@ func (m *StreamEvent_Statement) Reset() { *m = StreamEvent_Statement{} } func (m *StreamEvent_Statement) String() string { return proto.CompactTextString(m) } func (*StreamEvent_Statement) ProtoMessage() {} func (*StreamEvent_Statement) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{12, 0} + return fileDescriptor_query_b0bca749772b6512, []int{12, 0} } func (m *StreamEvent_Statement) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamEvent_Statement.Unmarshal(m, b) @@ -1430,7 +1435,7 @@ func (m *ExecuteRequest) Reset() { *m = ExecuteRequest{} } func (m *ExecuteRequest) String() string { return proto.CompactTextString(m) } func (*ExecuteRequest) ProtoMessage() {} func (*ExecuteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{13} + return fileDescriptor_query_b0bca749772b6512, []int{13} } func (m *ExecuteRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteRequest.Unmarshal(m, b) @@ -1504,7 +1509,7 @@ func (m *ExecuteResponse) Reset() { *m = ExecuteResponse{} } func (m *ExecuteResponse) String() string { return proto.CompactTextString(m) } func (*ExecuteResponse) ProtoMessage() {} func (*ExecuteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{14} + return fileDescriptor_query_b0bca749772b6512, []int{14} } func (m *ExecuteResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteResponse.Unmarshal(m, b) @@ -1548,7 +1553,7 @@ func (m *ResultWithError) Reset() { *m = ResultWithError{} } func (m *ResultWithError) String() string { return proto.CompactTextString(m) } func (*ResultWithError) ProtoMessage() {} func (*ResultWithError) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{15} + return fileDescriptor_query_b0bca749772b6512, []int{15} } func (m *ResultWithError) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ResultWithError.Unmarshal(m, b) @@ -1600,7 +1605,7 @@ func (m *ExecuteBatchRequest) Reset() { *m = ExecuteBatchRequest{} } func (m *ExecuteBatchRequest) String() string { return proto.CompactTextString(m) } func (*ExecuteBatchRequest) ProtoMessage() {} func (*ExecuteBatchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{16} + return fileDescriptor_query_b0bca749772b6512, []int{16} } func (m *ExecuteBatchRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteBatchRequest.Unmarshal(m, b) @@ -1681,7 +1686,7 @@ func (m *ExecuteBatchResponse) Reset() { *m = ExecuteBatchResponse{} } func (m *ExecuteBatchResponse) String() string { return proto.CompactTextString(m) } func (*ExecuteBatchResponse) ProtoMessage() {} func (*ExecuteBatchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{17} + return fileDescriptor_query_b0bca749772b6512, []int{17} } func (m *ExecuteBatchResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteBatchResponse.Unmarshal(m, b) @@ -1715,6 +1720,7 @@ type StreamExecuteRequest struct { Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` Query *BoundQuery `protobuf:"bytes,4,opt,name=query,proto3" json:"query,omitempty"` Options *ExecuteOptions `protobuf:"bytes,5,opt,name=options,proto3" json:"options,omitempty"` + TransactionId int64 `protobuf:"varint,6,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1724,7 +1730,7 @@ func (m *StreamExecuteRequest) Reset() { *m = StreamExecuteRequest{} } func (m *StreamExecuteRequest) String() string { return proto.CompactTextString(m) } func (*StreamExecuteRequest) ProtoMessage() {} func (*StreamExecuteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{18} + return fileDescriptor_query_b0bca749772b6512, []int{18} } func (m *StreamExecuteRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamExecuteRequest.Unmarshal(m, b) @@ -1779,6 +1785,13 @@ func (m *StreamExecuteRequest) GetOptions() *ExecuteOptions { return nil } +func (m *StreamExecuteRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + // StreamExecuteResponse is the returned value from StreamExecute type StreamExecuteResponse struct { Result *QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` @@ -1791,7 +1804,7 @@ func (m *StreamExecuteResponse) Reset() { *m = StreamExecuteResponse{} } func (m *StreamExecuteResponse) String() string { return proto.CompactTextString(m) } func (*StreamExecuteResponse) ProtoMessage() {} func (*StreamExecuteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{19} + return fileDescriptor_query_b0bca749772b6512, []int{19} } func (m *StreamExecuteResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamExecuteResponse.Unmarshal(m, b) @@ -1833,7 +1846,7 @@ func (m *BeginRequest) Reset() { *m = BeginRequest{} } func (m *BeginRequest) String() string { return proto.CompactTextString(m) } func (*BeginRequest) ProtoMessage() {} func (*BeginRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{20} + return fileDescriptor_query_b0bca749772b6512, []int{20} } func (m *BeginRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BeginRequest.Unmarshal(m, b) @@ -1893,7 +1906,7 @@ func (m *BeginResponse) Reset() { *m = BeginResponse{} } func (m *BeginResponse) String() string { return proto.CompactTextString(m) } func (*BeginResponse) ProtoMessage() {} func (*BeginResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{21} + return fileDescriptor_query_b0bca749772b6512, []int{21} } func (m *BeginResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BeginResponse.Unmarshal(m, b) @@ -1935,7 +1948,7 @@ func (m *CommitRequest) Reset() { *m = CommitRequest{} } func (m *CommitRequest) String() string { return proto.CompactTextString(m) } func (*CommitRequest) ProtoMessage() {} func (*CommitRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{22} + return fileDescriptor_query_b0bca749772b6512, []int{22} } func (m *CommitRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CommitRequest.Unmarshal(m, b) @@ -1994,7 +2007,7 @@ func (m *CommitResponse) Reset() { *m = CommitResponse{} } func (m *CommitResponse) String() string { return proto.CompactTextString(m) } func (*CommitResponse) ProtoMessage() {} func (*CommitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{23} + return fileDescriptor_query_b0bca749772b6512, []int{23} } func (m *CommitResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CommitResponse.Unmarshal(m, b) @@ -2029,7 +2042,7 @@ func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } func (m *RollbackRequest) String() string { return proto.CompactTextString(m) } func (*RollbackRequest) ProtoMessage() {} func (*RollbackRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{24} + return fileDescriptor_query_b0bca749772b6512, []int{24} } func (m *RollbackRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RollbackRequest.Unmarshal(m, b) @@ -2088,7 +2101,7 @@ func (m *RollbackResponse) Reset() { *m = RollbackResponse{} } func (m *RollbackResponse) String() string { return proto.CompactTextString(m) } func (*RollbackResponse) ProtoMessage() {} func (*RollbackResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{25} + return fileDescriptor_query_b0bca749772b6512, []int{25} } func (m *RollbackResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RollbackResponse.Unmarshal(m, b) @@ -2124,7 +2137,7 @@ func (m *PrepareRequest) Reset() { *m = PrepareRequest{} } func (m *PrepareRequest) String() string { return proto.CompactTextString(m) } func (*PrepareRequest) ProtoMessage() {} func (*PrepareRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{26} + return fileDescriptor_query_b0bca749772b6512, []int{26} } func (m *PrepareRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PrepareRequest.Unmarshal(m, b) @@ -2190,7 +2203,7 @@ func (m *PrepareResponse) Reset() { *m = PrepareResponse{} } func (m *PrepareResponse) String() string { return proto.CompactTextString(m) } func (*PrepareResponse) ProtoMessage() {} func (*PrepareResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{27} + return fileDescriptor_query_b0bca749772b6512, []int{27} } func (m *PrepareResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PrepareResponse.Unmarshal(m, b) @@ -2225,7 +2238,7 @@ func (m *CommitPreparedRequest) Reset() { *m = CommitPreparedRequest{} } func (m *CommitPreparedRequest) String() string { return proto.CompactTextString(m) } func (*CommitPreparedRequest) ProtoMessage() {} func (*CommitPreparedRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{28} + return fileDescriptor_query_b0bca749772b6512, []int{28} } func (m *CommitPreparedRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CommitPreparedRequest.Unmarshal(m, b) @@ -2284,7 +2297,7 @@ func (m *CommitPreparedResponse) Reset() { *m = CommitPreparedResponse{} func (m *CommitPreparedResponse) String() string { return proto.CompactTextString(m) } func (*CommitPreparedResponse) ProtoMessage() {} func (*CommitPreparedResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{29} + return fileDescriptor_query_b0bca749772b6512, []int{29} } func (m *CommitPreparedResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CommitPreparedResponse.Unmarshal(m, b) @@ -2320,7 +2333,7 @@ func (m *RollbackPreparedRequest) Reset() { *m = RollbackPreparedRequest func (m *RollbackPreparedRequest) String() string { return proto.CompactTextString(m) } func (*RollbackPreparedRequest) ProtoMessage() {} func (*RollbackPreparedRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{30} + return fileDescriptor_query_b0bca749772b6512, []int{30} } func (m *RollbackPreparedRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RollbackPreparedRequest.Unmarshal(m, b) @@ -2386,7 +2399,7 @@ func (m *RollbackPreparedResponse) Reset() { *m = RollbackPreparedRespon func (m *RollbackPreparedResponse) String() string { return proto.CompactTextString(m) } func (*RollbackPreparedResponse) ProtoMessage() {} func (*RollbackPreparedResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{31} + return fileDescriptor_query_b0bca749772b6512, []int{31} } func (m *RollbackPreparedResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RollbackPreparedResponse.Unmarshal(m, b) @@ -2422,7 +2435,7 @@ func (m *CreateTransactionRequest) Reset() { *m = CreateTransactionReque func (m *CreateTransactionRequest) String() string { return proto.CompactTextString(m) } func (*CreateTransactionRequest) ProtoMessage() {} func (*CreateTransactionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{32} + return fileDescriptor_query_b0bca749772b6512, []int{32} } func (m *CreateTransactionRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CreateTransactionRequest.Unmarshal(m, b) @@ -2488,7 +2501,7 @@ func (m *CreateTransactionResponse) Reset() { *m = CreateTransactionResp func (m *CreateTransactionResponse) String() string { return proto.CompactTextString(m) } func (*CreateTransactionResponse) ProtoMessage() {} func (*CreateTransactionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{33} + return fileDescriptor_query_b0bca749772b6512, []int{33} } func (m *CreateTransactionResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CreateTransactionResponse.Unmarshal(m, b) @@ -2524,7 +2537,7 @@ func (m *StartCommitRequest) Reset() { *m = StartCommitRequest{} } func (m *StartCommitRequest) String() string { return proto.CompactTextString(m) } func (*StartCommitRequest) ProtoMessage() {} func (*StartCommitRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{34} + return fileDescriptor_query_b0bca749772b6512, []int{34} } func (m *StartCommitRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StartCommitRequest.Unmarshal(m, b) @@ -2590,7 +2603,7 @@ func (m *StartCommitResponse) Reset() { *m = StartCommitResponse{} } func (m *StartCommitResponse) String() string { return proto.CompactTextString(m) } func (*StartCommitResponse) ProtoMessage() {} func (*StartCommitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{35} + return fileDescriptor_query_b0bca749772b6512, []int{35} } func (m *StartCommitResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StartCommitResponse.Unmarshal(m, b) @@ -2626,7 +2639,7 @@ func (m *SetRollbackRequest) Reset() { *m = SetRollbackRequest{} } func (m *SetRollbackRequest) String() string { return proto.CompactTextString(m) } func (*SetRollbackRequest) ProtoMessage() {} func (*SetRollbackRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{36} + return fileDescriptor_query_b0bca749772b6512, []int{36} } func (m *SetRollbackRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SetRollbackRequest.Unmarshal(m, b) @@ -2692,7 +2705,7 @@ func (m *SetRollbackResponse) Reset() { *m = SetRollbackResponse{} } func (m *SetRollbackResponse) String() string { return proto.CompactTextString(m) } func (*SetRollbackResponse) ProtoMessage() {} func (*SetRollbackResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{37} + return fileDescriptor_query_b0bca749772b6512, []int{37} } func (m *SetRollbackResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SetRollbackResponse.Unmarshal(m, b) @@ -2727,7 +2740,7 @@ func (m *ConcludeTransactionRequest) Reset() { *m = ConcludeTransactionR func (m *ConcludeTransactionRequest) String() string { return proto.CompactTextString(m) } func (*ConcludeTransactionRequest) ProtoMessage() {} func (*ConcludeTransactionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{38} + return fileDescriptor_query_b0bca749772b6512, []int{38} } func (m *ConcludeTransactionRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ConcludeTransactionRequest.Unmarshal(m, b) @@ -2786,7 +2799,7 @@ func (m *ConcludeTransactionResponse) Reset() { *m = ConcludeTransaction func (m *ConcludeTransactionResponse) String() string { return proto.CompactTextString(m) } func (*ConcludeTransactionResponse) ProtoMessage() {} func (*ConcludeTransactionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{39} + return fileDescriptor_query_b0bca749772b6512, []int{39} } func (m *ConcludeTransactionResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ConcludeTransactionResponse.Unmarshal(m, b) @@ -2821,7 +2834,7 @@ func (m *ReadTransactionRequest) Reset() { *m = ReadTransactionRequest{} func (m *ReadTransactionRequest) String() string { return proto.CompactTextString(m) } func (*ReadTransactionRequest) ProtoMessage() {} func (*ReadTransactionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{40} + return fileDescriptor_query_b0bca749772b6512, []int{40} } func (m *ReadTransactionRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ReadTransactionRequest.Unmarshal(m, b) @@ -2881,7 +2894,7 @@ func (m *ReadTransactionResponse) Reset() { *m = ReadTransactionResponse func (m *ReadTransactionResponse) String() string { return proto.CompactTextString(m) } func (*ReadTransactionResponse) ProtoMessage() {} func (*ReadTransactionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{41} + return fileDescriptor_query_b0bca749772b6512, []int{41} } func (m *ReadTransactionResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ReadTransactionResponse.Unmarshal(m, b) @@ -2924,7 +2937,7 @@ func (m *BeginExecuteRequest) Reset() { *m = BeginExecuteRequest{} } func (m *BeginExecuteRequest) String() string { return proto.CompactTextString(m) } func (*BeginExecuteRequest) ProtoMessage() {} func (*BeginExecuteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{42} + return fileDescriptor_query_b0bca749772b6512, []int{42} } func (m *BeginExecuteRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BeginExecuteRequest.Unmarshal(m, b) @@ -2997,7 +3010,7 @@ func (m *BeginExecuteResponse) Reset() { *m = BeginExecuteResponse{} } func (m *BeginExecuteResponse) String() string { return proto.CompactTextString(m) } func (*BeginExecuteResponse) ProtoMessage() {} func (*BeginExecuteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{43} + return fileDescriptor_query_b0bca749772b6512, []int{43} } func (m *BeginExecuteResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BeginExecuteResponse.Unmarshal(m, b) @@ -3055,7 +3068,7 @@ func (m *BeginExecuteBatchRequest) Reset() { *m = BeginExecuteBatchReque func (m *BeginExecuteBatchRequest) String() string { return proto.CompactTextString(m) } func (*BeginExecuteBatchRequest) ProtoMessage() {} func (*BeginExecuteBatchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{44} + return fileDescriptor_query_b0bca749772b6512, []int{44} } func (m *BeginExecuteBatchRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BeginExecuteBatchRequest.Unmarshal(m, b) @@ -3135,7 +3148,7 @@ func (m *BeginExecuteBatchResponse) Reset() { *m = BeginExecuteBatchResp func (m *BeginExecuteBatchResponse) String() string { return proto.CompactTextString(m) } func (*BeginExecuteBatchResponse) ProtoMessage() {} func (*BeginExecuteBatchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{45} + return fileDescriptor_query_b0bca749772b6512, []int{45} } func (m *BeginExecuteBatchResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BeginExecuteBatchResponse.Unmarshal(m, b) @@ -3192,7 +3205,7 @@ func (m *MessageStreamRequest) Reset() { *m = MessageStreamRequest{} } func (m *MessageStreamRequest) String() string { return proto.CompactTextString(m) } func (*MessageStreamRequest) ProtoMessage() {} func (*MessageStreamRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{46} + return fileDescriptor_query_b0bca749772b6512, []int{46} } func (m *MessageStreamRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MessageStreamRequest.Unmarshal(m, b) @@ -3252,7 +3265,7 @@ func (m *MessageStreamResponse) Reset() { *m = MessageStreamResponse{} } func (m *MessageStreamResponse) String() string { return proto.CompactTextString(m) } func (*MessageStreamResponse) ProtoMessage() {} func (*MessageStreamResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{47} + return fileDescriptor_query_b0bca749772b6512, []int{47} } func (m *MessageStreamResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MessageStreamResponse.Unmarshal(m, b) @@ -3296,7 +3309,7 @@ func (m *MessageAckRequest) Reset() { *m = MessageAckRequest{} } func (m *MessageAckRequest) String() string { return proto.CompactTextString(m) } func (*MessageAckRequest) ProtoMessage() {} func (*MessageAckRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{48} + return fileDescriptor_query_b0bca749772b6512, []int{48} } func (m *MessageAckRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MessageAckRequest.Unmarshal(m, b) @@ -3366,7 +3379,7 @@ func (m *MessageAckResponse) Reset() { *m = MessageAckResponse{} } func (m *MessageAckResponse) String() string { return proto.CompactTextString(m) } func (*MessageAckResponse) ProtoMessage() {} func (*MessageAckResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{49} + return fileDescriptor_query_b0bca749772b6512, []int{49} } func (m *MessageAckResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MessageAckResponse.Unmarshal(m, b) @@ -3414,7 +3427,7 @@ func (m *SplitQueryRequest) Reset() { *m = SplitQueryRequest{} } func (m *SplitQueryRequest) String() string { return proto.CompactTextString(m) } func (*SplitQueryRequest) ProtoMessage() {} func (*SplitQueryRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{50} + return fileDescriptor_query_b0bca749772b6512, []int{50} } func (m *SplitQueryRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SplitQueryRequest.Unmarshal(m, b) @@ -3505,7 +3518,7 @@ func (m *QuerySplit) Reset() { *m = QuerySplit{} } func (m *QuerySplit) String() string { return proto.CompactTextString(m) } func (*QuerySplit) ProtoMessage() {} func (*QuerySplit) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{51} + return fileDescriptor_query_b0bca749772b6512, []int{51} } func (m *QuerySplit) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_QuerySplit.Unmarshal(m, b) @@ -3552,7 +3565,7 @@ func (m *SplitQueryResponse) Reset() { *m = SplitQueryResponse{} } func (m *SplitQueryResponse) String() string { return proto.CompactTextString(m) } func (*SplitQueryResponse) ProtoMessage() {} func (*SplitQueryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{52} + return fileDescriptor_query_b0bca749772b6512, []int{52} } func (m *SplitQueryResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SplitQueryResponse.Unmarshal(m, b) @@ -3590,7 +3603,7 @@ func (m *StreamHealthRequest) Reset() { *m = StreamHealthRequest{} } func (m *StreamHealthRequest) String() string { return proto.CompactTextString(m) } func (*StreamHealthRequest) ProtoMessage() {} func (*StreamHealthRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{53} + return fileDescriptor_query_b0bca749772b6512, []int{53} } func (m *StreamHealthRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamHealthRequest.Unmarshal(m, b) @@ -3649,7 +3662,7 @@ func (m *RealtimeStats) Reset() { *m = RealtimeStats{} } func (m *RealtimeStats) String() string { return proto.CompactTextString(m) } func (*RealtimeStats) ProtoMessage() {} func (*RealtimeStats) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{54} + return fileDescriptor_query_b0bca749772b6512, []int{54} } func (m *RealtimeStats) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RealtimeStats.Unmarshal(m, b) @@ -3737,7 +3750,7 @@ func (m *AggregateStats) Reset() { *m = AggregateStats{} } func (m *AggregateStats) String() string { return proto.CompactTextString(m) } func (*AggregateStats) ProtoMessage() {} func (*AggregateStats) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{55} + return fileDescriptor_query_b0bca749772b6512, []int{55} } func (m *AggregateStats) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_AggregateStats.Unmarshal(m, b) @@ -3849,7 +3862,7 @@ func (m *StreamHealthResponse) Reset() { *m = StreamHealthResponse{} } func (m *StreamHealthResponse) String() string { return proto.CompactTextString(m) } func (*StreamHealthResponse) ProtoMessage() {} func (*StreamHealthResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{56} + return fileDescriptor_query_b0bca749772b6512, []int{56} } func (m *StreamHealthResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamHealthResponse.Unmarshal(m, b) @@ -3933,7 +3946,7 @@ func (m *UpdateStreamRequest) Reset() { *m = UpdateStreamRequest{} } func (m *UpdateStreamRequest) String() string { return proto.CompactTextString(m) } func (*UpdateStreamRequest) ProtoMessage() {} func (*UpdateStreamRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{57} + return fileDescriptor_query_b0bca749772b6512, []int{57} } func (m *UpdateStreamRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UpdateStreamRequest.Unmarshal(m, b) @@ -4000,7 +4013,7 @@ func (m *UpdateStreamResponse) Reset() { *m = UpdateStreamResponse{} } func (m *UpdateStreamResponse) String() string { return proto.CompactTextString(m) } func (*UpdateStreamResponse) ProtoMessage() {} func (*UpdateStreamResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{58} + return fileDescriptor_query_b0bca749772b6512, []int{58} } func (m *UpdateStreamResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UpdateStreamResponse.Unmarshal(m, b) @@ -4042,7 +4055,7 @@ func (m *TransactionMetadata) Reset() { *m = TransactionMetadata{} } func (m *TransactionMetadata) String() string { return proto.CompactTextString(m) } func (*TransactionMetadata) ProtoMessage() {} func (*TransactionMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_query_9111254583ad7475, []int{59} + return fileDescriptor_query_b0bca749772b6512, []int{59} } func (m *TransactionMetadata) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_TransactionMetadata.Unmarshal(m, b) @@ -4164,210 +4177,212 @@ func init() { proto.RegisterEnum("query.SplitQueryRequest_Algorithm", SplitQueryRequest_Algorithm_name, SplitQueryRequest_Algorithm_value) } -func init() { proto.RegisterFile("query.proto", fileDescriptor_query_9111254583ad7475) } +func init() { proto.RegisterFile("query.proto", fileDescriptor_query_b0bca749772b6512) } -var fileDescriptor_query_9111254583ad7475 = []byte{ - // 3231 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0x4b, 0x73, 0x1b, 0xc7, - 0x76, 0xd6, 0xe0, 0x41, 0x02, 0x07, 0x04, 0xd8, 0x6c, 0x90, 0x12, 0x44, 0xf9, 0xda, 0xcc, 0xdc, - 0xab, 0x7b, 0x19, 0xde, 0x1b, 0x4a, 0xa6, 0x64, 0x45, 0xb1, 0x1d, 0x47, 0x43, 0x70, 0x28, 0xc3, - 0xc2, 0x4b, 0x8d, 0x81, 0x64, 0xa9, 0x5c, 0x35, 0x35, 0x04, 0x5a, 0xe0, 0x14, 0x07, 0x33, 0xd0, - 0xcc, 0x80, 0x14, 0x77, 0x4a, 0x1c, 0xe7, 0xfd, 0x70, 0x9e, 0x8e, 0x93, 0x8a, 0x93, 0xaa, 0xec, - 0xf3, 0x1b, 0x52, 0xf9, 0x01, 0xd9, 0x65, 0x91, 0x64, 0x91, 0x45, 0x2a, 0x95, 0x45, 0xaa, 0x5c, - 0x59, 0x65, 0x91, 0x45, 0x2a, 0xd5, 0x8f, 0x19, 0x0c, 0x48, 0xe8, 0x61, 0xe5, 0x6e, 0x28, 0x7b, - 0xd7, 0x7d, 0xce, 0xe9, 0xc7, 0xf7, 0x9d, 0x33, 0xa7, 0x7b, 0xba, 0x1b, 0x0a, 0x8f, 0xc7, 0xd4, - 0x3f, 0xde, 0x1c, 0xf9, 0x5e, 0xe8, 0xe1, 0x2c, 0xaf, 0xac, 0x96, 0x42, 0x6f, 0xe4, 0xf5, 0xad, - 0xd0, 0x12, 0xe2, 0xd5, 0xc2, 0x61, 0xe8, 0x8f, 0x7a, 0xa2, 0xa2, 0x7e, 0xa6, 0xc0, 0x9c, 0x61, - 0xf9, 0x03, 0x1a, 0xe2, 0x55, 0xc8, 0x1d, 0xd0, 0xe3, 0x60, 0x64, 0xf5, 0x68, 0x45, 0x59, 0x53, - 0xd6, 0xf3, 0x24, 0xae, 0xe3, 0x65, 0xc8, 0x06, 0xfb, 0x96, 0xdf, 0xaf, 0xa4, 0xb8, 0x42, 0x54, - 0xf0, 0x3b, 0x50, 0x08, 0xad, 0x3d, 0x87, 0x86, 0x66, 0x78, 0x3c, 0xa2, 0x95, 0xf4, 0x9a, 0xb2, - 0x5e, 0xda, 0x5a, 0xde, 0x8c, 0xc7, 0x33, 0xb8, 0xd2, 0x38, 0x1e, 0x51, 0x02, 0x61, 0x5c, 0xc6, - 0x18, 0x32, 0x3d, 0xea, 0x38, 0x95, 0x0c, 0xef, 0x8b, 0x97, 0xd5, 0x1d, 0x28, 0xdd, 0x33, 0x6e, - 0x5b, 0x21, 0xad, 0x5a, 0x8e, 0x43, 0xfd, 0xda, 0x0e, 0x9b, 0xce, 0x38, 0xa0, 0xbe, 0x6b, 0x0d, - 0xe3, 0xe9, 0x44, 0x75, 0x7c, 0x1e, 0xe6, 0x06, 0xbe, 0x37, 0x1e, 0x05, 0x95, 0xd4, 0x5a, 0x7a, - 0x3d, 0x4f, 0x64, 0x4d, 0xfd, 0x04, 0x40, 0x3f, 0xa4, 0x6e, 0x68, 0x78, 0x07, 0xd4, 0xc5, 0x6f, - 0x40, 0x3e, 0xb4, 0x87, 0x34, 0x08, 0xad, 0xe1, 0x88, 0x77, 0x91, 0x26, 0x13, 0xc1, 0x33, 0x20, - 0xad, 0x42, 0x6e, 0xe4, 0x05, 0x76, 0x68, 0x7b, 0x2e, 0xc7, 0x93, 0x27, 0x71, 0x5d, 0xfd, 0x00, - 0xb2, 0xf7, 0x2c, 0x67, 0x4c, 0xf1, 0x5b, 0x90, 0xe1, 0x80, 0x15, 0x0e, 0xb8, 0xb0, 0x29, 0x48, - 0xe7, 0x38, 0xb9, 0x82, 0xf5, 0x7d, 0xc8, 0x2c, 0x79, 0xdf, 0x0b, 0x44, 0x54, 0xd4, 0x03, 0x58, - 0xd8, 0xb6, 0xdd, 0xfe, 0x3d, 0xcb, 0xb7, 0x19, 0x19, 0xaf, 0xd8, 0x0d, 0xfe, 0x01, 0xcc, 0xf1, - 0x42, 0x50, 0x49, 0xaf, 0xa5, 0xd7, 0x0b, 0x5b, 0x0b, 0xb2, 0x21, 0x9f, 0x1b, 0x91, 0x3a, 0xf5, - 0xef, 0x15, 0x80, 0x6d, 0x6f, 0xec, 0xf6, 0xef, 0x32, 0x25, 0x46, 0x90, 0x0e, 0x1e, 0x3b, 0x92, - 0x48, 0x56, 0xc4, 0x77, 0xa0, 0xb4, 0x67, 0xbb, 0x7d, 0xf3, 0x50, 0x4e, 0x47, 0x70, 0x59, 0xd8, - 0xfa, 0x81, 0xec, 0x6e, 0xd2, 0x78, 0x33, 0x39, 0xeb, 0x40, 0x77, 0x43, 0xff, 0x98, 0x14, 0xf7, - 0x92, 0xb2, 0xd5, 0x2e, 0xe0, 0xd3, 0x46, 0x6c, 0xd0, 0x03, 0x7a, 0x1c, 0x0d, 0x7a, 0x40, 0x8f, - 0xf1, 0xcf, 0x26, 0x11, 0x15, 0xb6, 0xca, 0xd1, 0x58, 0x89, 0xb6, 0x12, 0xe6, 0xbb, 0xa9, 0x9b, - 0x8a, 0xfa, 0x9f, 0x59, 0x28, 0xe9, 0x4f, 0x68, 0x6f, 0x1c, 0xd2, 0xd6, 0x88, 0xf9, 0x20, 0xc0, - 0x9b, 0x50, 0xb6, 0xdd, 0x9e, 0x33, 0xee, 0x53, 0x93, 0x32, 0x57, 0x9b, 0x21, 0xf3, 0x35, 0xef, - 0x2f, 0x47, 0x96, 0xa4, 0x2a, 0x11, 0x04, 0x1a, 0x94, 0x7b, 0xde, 0x70, 0x64, 0xf9, 0xd3, 0xf6, - 0x69, 0x3e, 0xfe, 0x92, 0x1c, 0x7f, 0x62, 0x4f, 0x96, 0xa4, 0x75, 0xa2, 0x8b, 0x06, 0x2c, 0xca, - 0x7e, 0xfb, 0xe6, 0x23, 0x9b, 0x3a, 0xfd, 0x80, 0x87, 0x6e, 0x29, 0xa6, 0x6a, 0x7a, 0x8a, 0x9b, - 0x35, 0x69, 0xbc, 0xcb, 0x6d, 0x49, 0xc9, 0x9e, 0xaa, 0xe3, 0x0d, 0x58, 0xea, 0x39, 0x36, 0x9b, - 0xca, 0x23, 0x46, 0xb1, 0xe9, 0x7b, 0x47, 0x41, 0x25, 0xcb, 0xe7, 0xbf, 0x28, 0x14, 0xbb, 0x4c, - 0x4e, 0xbc, 0xa3, 0x00, 0xbf, 0x0b, 0xb9, 0x23, 0xcf, 0x3f, 0x70, 0x3c, 0xab, 0x5f, 0x99, 0xe3, - 0x63, 0xbe, 0x39, 0x7b, 0xcc, 0xfb, 0xd2, 0x8a, 0xc4, 0xf6, 0x78, 0x1d, 0x50, 0xf0, 0xd8, 0x31, - 0x03, 0xea, 0xd0, 0x5e, 0x68, 0x3a, 0xf6, 0xd0, 0x0e, 0x2b, 0x39, 0xfe, 0x15, 0x94, 0x82, 0xc7, - 0x4e, 0x87, 0x8b, 0xeb, 0x4c, 0x8a, 0x4d, 0x58, 0x09, 0x7d, 0xcb, 0x0d, 0xac, 0x1e, 0xeb, 0xcc, - 0xb4, 0x03, 0xcf, 0xb1, 0xf8, 0x17, 0x90, 0xe7, 0x43, 0x6e, 0xcc, 0x1e, 0xd2, 0x98, 0x34, 0xa9, - 0x45, 0x2d, 0xc8, 0x72, 0x38, 0x43, 0x8a, 0xdf, 0x86, 0x95, 0xe0, 0xc0, 0x1e, 0x99, 0xbc, 0x1f, - 0x73, 0xe4, 0x58, 0xae, 0xd9, 0xb3, 0x7a, 0xfb, 0xb4, 0x02, 0x1c, 0x36, 0x66, 0x4a, 0x1e, 0x6a, - 0x6d, 0xc7, 0x72, 0xab, 0x4c, 0xa3, 0xbe, 0x07, 0xa5, 0x69, 0x1e, 0xf1, 0x12, 0x14, 0x8d, 0x07, - 0x6d, 0xdd, 0xd4, 0x9a, 0x3b, 0x66, 0x53, 0x6b, 0xe8, 0xe8, 0x1c, 0x2e, 0x42, 0x9e, 0x8b, 0x5a, - 0xcd, 0xfa, 0x03, 0xa4, 0xe0, 0x79, 0x48, 0x6b, 0xf5, 0x3a, 0x4a, 0xa9, 0x37, 0x21, 0x17, 0x11, - 0x82, 0x17, 0xa1, 0xd0, 0x6d, 0x76, 0xda, 0x7a, 0xb5, 0xb6, 0x5b, 0xd3, 0x77, 0xd0, 0x39, 0x9c, - 0x83, 0x4c, 0xab, 0x6e, 0xb4, 0x91, 0x22, 0x4a, 0x5a, 0x1b, 0xa5, 0x58, 0xcb, 0x9d, 0x6d, 0x0d, - 0xa5, 0xd5, 0x10, 0x96, 0x67, 0xe1, 0xc2, 0x05, 0x98, 0xdf, 0xd1, 0x77, 0xb5, 0x6e, 0xdd, 0x40, - 0xe7, 0x70, 0x19, 0x16, 0x89, 0xde, 0xd6, 0x35, 0x43, 0xdb, 0xae, 0xeb, 0x26, 0xd1, 0xb5, 0x1d, - 0xa4, 0x60, 0x0c, 0x25, 0x56, 0x32, 0xab, 0xad, 0x46, 0xa3, 0x66, 0x18, 0xfa, 0x0e, 0x4a, 0xe1, - 0x65, 0x40, 0x5c, 0xd6, 0x6d, 0x4e, 0xa4, 0x69, 0x8c, 0x60, 0xa1, 0xa3, 0x93, 0x9a, 0x56, 0xaf, - 0x3d, 0x64, 0x1d, 0xa0, 0xcc, 0x47, 0x99, 0x9c, 0x82, 0x52, 0xea, 0x17, 0x29, 0xc8, 0x72, 0xac, - 0x2c, 0x43, 0x26, 0xf2, 0x1e, 0x2f, 0xc7, 0xd9, 0x22, 0xf5, 0x9c, 0x6c, 0xc1, 0x93, 0xac, 0xcc, - 0x5b, 0xa2, 0x82, 0x2f, 0x41, 0xde, 0xf3, 0x07, 0xa6, 0xd0, 0x88, 0x8c, 0x9b, 0xf3, 0xfc, 0x01, - 0x4f, 0xcd, 0x2c, 0xdb, 0xb1, 0x44, 0xbd, 0x67, 0x05, 0x94, 0x47, 0x60, 0x9e, 0xc4, 0x75, 0x7c, - 0x11, 0x98, 0x9d, 0xc9, 0xe7, 0x31, 0xc7, 0x75, 0xf3, 0x9e, 0x3f, 0x68, 0xb2, 0xa9, 0x7c, 0x1f, - 0x8a, 0x3d, 0xcf, 0x19, 0x0f, 0x5d, 0xd3, 0xa1, 0xee, 0x20, 0xdc, 0xaf, 0xcc, 0xaf, 0x29, 0xeb, - 0x45, 0xb2, 0x20, 0x84, 0x75, 0x2e, 0xc3, 0x15, 0x98, 0xef, 0xed, 0x5b, 0x7e, 0x40, 0x45, 0xd4, - 0x15, 0x49, 0x54, 0xe5, 0xa3, 0xd2, 0x9e, 0x3d, 0xb4, 0x9c, 0x80, 0x47, 0x58, 0x91, 0xc4, 0x75, - 0x06, 0xe2, 0x91, 0x63, 0x0d, 0x02, 0x1e, 0x19, 0x45, 0x22, 0x2a, 0xea, 0xcf, 0x43, 0x9a, 0x78, - 0x47, 0xac, 0x4b, 0x31, 0x60, 0x50, 0x51, 0xd6, 0xd2, 0xeb, 0x98, 0x44, 0x55, 0xb6, 0x20, 0xc8, - 0x9c, 0x28, 0x52, 0x65, 0x94, 0x05, 0x3f, 0x81, 0x05, 0x42, 0x83, 0xb1, 0x13, 0xea, 0x4f, 0x42, - 0xdf, 0x0a, 0xf0, 0x16, 0x14, 0x92, 0x59, 0x40, 0x79, 0x56, 0x16, 0x00, 0x3a, 0xf9, 0xfc, 0x2b, - 0x30, 0xff, 0xc8, 0xa7, 0xc1, 0x3e, 0xf5, 0x65, 0x96, 0x89, 0xaa, 0x2c, 0xc7, 0x16, 0x78, 0xd8, - 0x8a, 0x31, 0x58, 0x66, 0x96, 0xf9, 0x41, 0x99, 0xca, 0xcc, 0xdc, 0xa9, 0x44, 0xea, 0x18, 0x7b, - 0xec, 0x93, 0x37, 0xad, 0x47, 0x8f, 0x68, 0x2f, 0xa4, 0x62, 0x01, 0xca, 0x90, 0x05, 0x26, 0xd4, - 0xa4, 0x8c, 0xb9, 0xcd, 0x76, 0x03, 0xea, 0x87, 0xa6, 0xdd, 0xe7, 0x0e, 0xcd, 0x90, 0x9c, 0x10, - 0xd4, 0xfa, 0xf8, 0x4d, 0xc8, 0xf0, 0xa4, 0x91, 0xe1, 0xa3, 0x80, 0x1c, 0x85, 0x78, 0x47, 0x84, - 0xcb, 0xf1, 0x8f, 0x61, 0x8e, 0x72, 0xbc, 0xdc, 0xa9, 0x93, 0x34, 0x9b, 0xa4, 0x82, 0x48, 0x13, - 0xf5, 0x7d, 0x58, 0xe0, 0x18, 0xee, 0x5b, 0xbe, 0x6b, 0xbb, 0x03, 0xbe, 0x3a, 0x7b, 0x7d, 0x11, - 0x7b, 0x45, 0xc2, 0xcb, 0x8c, 0x82, 0x21, 0x0d, 0x02, 0x6b, 0x40, 0xe5, 0x6a, 0x19, 0x55, 0xd5, - 0xbf, 0x4e, 0x43, 0xa1, 0x13, 0xfa, 0xd4, 0x1a, 0x72, 0xf6, 0xf0, 0xfb, 0x00, 0x41, 0x68, 0x85, - 0x74, 0x48, 0xdd, 0x30, 0xa2, 0xe1, 0x0d, 0x39, 0x7c, 0xc2, 0x6e, 0xb3, 0x13, 0x19, 0x91, 0x84, - 0xfd, 0x49, 0xf7, 0xa4, 0x5e, 0xc2, 0x3d, 0xab, 0x5f, 0xa5, 0x20, 0x1f, 0xf7, 0x86, 0x35, 0xc8, - 0xf5, 0xac, 0x90, 0x0e, 0x3c, 0xff, 0x58, 0xae, 0xab, 0x97, 0x9f, 0x37, 0xfa, 0x66, 0x55, 0x1a, - 0x93, 0xb8, 0x19, 0xfe, 0x1e, 0x88, 0xcd, 0x8a, 0x08, 0x7d, 0x81, 0x37, 0xcf, 0x25, 0x3c, 0xf8, - 0xdf, 0x05, 0x3c, 0xf2, 0xed, 0xa1, 0xe5, 0x1f, 0x9b, 0x07, 0xf4, 0x38, 0x5a, 0x10, 0xd2, 0x33, - 0x1c, 0x8e, 0xa4, 0xdd, 0x1d, 0x7a, 0x2c, 0x53, 0xd8, 0xcd, 0xe9, 0xb6, 0x32, 0x64, 0x4f, 0xbb, - 0x31, 0xd1, 0x92, 0xaf, 0xea, 0x41, 0xb4, 0x7e, 0x67, 0x79, 0x74, 0xb3, 0xa2, 0xfa, 0x23, 0xc8, - 0x45, 0x93, 0xc7, 0x79, 0xc8, 0xea, 0xbe, 0xef, 0xf9, 0xe8, 0x1c, 0xcf, 0x64, 0x8d, 0xba, 0x48, - 0x86, 0x3b, 0x3b, 0x2c, 0x19, 0xfe, 0x5d, 0x2a, 0x5e, 0x44, 0x09, 0x7d, 0x3c, 0xa6, 0x41, 0x88, - 0x7f, 0x09, 0xca, 0x94, 0x47, 0x9a, 0x7d, 0x48, 0xcd, 0x1e, 0xdf, 0x71, 0xb1, 0x38, 0x13, 0x9f, - 0xc3, 0xe2, 0xa6, 0xd8, 0x20, 0x46, 0x3b, 0x31, 0xb2, 0x14, 0xdb, 0x4a, 0x51, 0x1f, 0xeb, 0x50, - 0xb6, 0x87, 0x43, 0xda, 0xb7, 0xad, 0x30, 0xd9, 0x81, 0x70, 0xd8, 0x4a, 0xb4, 0x21, 0x99, 0xda, - 0xd0, 0x91, 0xa5, 0xb8, 0x45, 0xdc, 0xcd, 0x65, 0x98, 0x0b, 0xf9, 0xe6, 0x53, 0xae, 0xc7, 0xc5, - 0x28, 0xab, 0x71, 0x21, 0x91, 0x4a, 0xfc, 0x23, 0x10, 0x5b, 0x59, 0x9e, 0xbf, 0x26, 0x01, 0x31, - 0xd9, 0xa1, 0x10, 0xa1, 0xc7, 0x97, 0xa1, 0x34, 0xb5, 0x90, 0xf5, 0x39, 0x61, 0x69, 0x52, 0x4c, - 0xae, 0x4a, 0x7d, 0x7c, 0x05, 0xe6, 0x3d, 0xb1, 0x88, 0xf1, 0xcc, 0x36, 0x99, 0xf1, 0xf4, 0x0a, - 0x47, 0x22, 0x2b, 0xf5, 0x17, 0x61, 0x31, 0x66, 0x30, 0x18, 0x79, 0x6e, 0x40, 0xf1, 0x06, 0xcc, - 0xf9, 0xfc, 0x73, 0x92, 0xac, 0x61, 0xd9, 0x45, 0x22, 0x1f, 0x10, 0x69, 0xa1, 0xf6, 0x61, 0x51, - 0x48, 0xee, 0xdb, 0xe1, 0x3e, 0x77, 0x14, 0xbe, 0x0c, 0x59, 0xca, 0x0a, 0x27, 0x38, 0x27, 0xed, - 0x2a, 0xd7, 0x13, 0xa1, 0x4d, 0x8c, 0x92, 0x7a, 0xe1, 0x28, 0xff, 0x95, 0x82, 0xb2, 0x9c, 0xe5, - 0xb6, 0x15, 0xf6, 0xf6, 0xcf, 0xa8, 0xb3, 0x7f, 0x0c, 0xf3, 0x4c, 0x6e, 0xc7, 0x1f, 0xc6, 0x0c, - 0x77, 0x47, 0x16, 0xcc, 0xe1, 0x56, 0x60, 0x26, 0xbc, 0x2b, 0x37, 0x52, 0x45, 0x2b, 0x48, 0x2c, - 0xe3, 0x33, 0xe2, 0x62, 0xee, 0x05, 0x71, 0x31, 0xff, 0x52, 0x71, 0xb1, 0x03, 0xcb, 0xd3, 0x8c, - 0xcb, 0xe0, 0xf8, 0x09, 0xcc, 0x0b, 0xa7, 0x44, 0x29, 0x70, 0x96, 0xdf, 0x22, 0x13, 0xf5, 0xaf, - 0x52, 0xb0, 0x2c, 0xb3, 0xd3, 0xb7, 0xe3, 0x33, 0x4d, 0xf0, 0x9c, 0x7d, 0x29, 0x9e, 0xab, 0xb0, - 0x72, 0x82, 0xa0, 0x57, 0xf8, 0x0a, 0xbf, 0x56, 0x60, 0x61, 0x9b, 0x0e, 0x6c, 0xf7, 0x8c, 0xd2, - 0x9b, 0x60, 0x2d, 0xf3, 0x52, 0xac, 0xdd, 0x80, 0xa2, 0xc4, 0x2b, 0xd9, 0x3a, 0xfd, 0x19, 0x28, - 0x33, 0x3e, 0x03, 0xf5, 0xdf, 0x15, 0x28, 0x56, 0xbd, 0xe1, 0xd0, 0x0e, 0xcf, 0x28, 0x53, 0xa7, - 0x71, 0x66, 0x66, 0xe1, 0x44, 0x50, 0x8a, 0x60, 0x0a, 0x82, 0xd4, 0xff, 0x50, 0x60, 0x91, 0x78, - 0x8e, 0xb3, 0x67, 0xf5, 0x0e, 0x5e, 0x6f, 0xec, 0x18, 0xd0, 0x04, 0xa8, 0x44, 0xff, 0x3f, 0x0a, - 0x94, 0xda, 0x3e, 0x65, 0x7f, 0xbf, 0xaf, 0x35, 0x78, 0xb6, 0xc5, 0xed, 0x87, 0x72, 0x73, 0x90, - 0x27, 0xbc, 0xac, 0x2e, 0xc1, 0x62, 0x8c, 0x5d, 0xf2, 0xf1, 0xcf, 0x0a, 0xac, 0x88, 0x00, 0x91, - 0x9a, 0xfe, 0x19, 0xa5, 0x25, 0xc2, 0x9b, 0x49, 0xe0, 0xad, 0xc0, 0xf9, 0x93, 0xd8, 0x24, 0xec, - 0x4f, 0x53, 0x70, 0x21, 0x8a, 0x8d, 0x33, 0x0e, 0xfc, 0xff, 0x11, 0x0f, 0xab, 0x50, 0x39, 0x4d, - 0x82, 0x64, 0xe8, 0xf3, 0x14, 0x54, 0xaa, 0x3e, 0xb5, 0x42, 0x9a, 0xd8, 0x64, 0xbc, 0x3e, 0xb1, - 0x81, 0xdf, 0x86, 0x85, 0x91, 0xe5, 0x87, 0x76, 0xcf, 0x1e, 0x59, 0xec, 0x37, 0x2e, 0xcb, 0xf7, - 0x30, 0x27, 0x3a, 0x98, 0x32, 0x51, 0x2f, 0xc1, 0xc5, 0x19, 0x8c, 0x48, 0xbe, 0xfe, 0x57, 0x01, - 0xdc, 0x09, 0x2d, 0x3f, 0xfc, 0x16, 0xac, 0x2a, 0x33, 0x83, 0x69, 0x05, 0xca, 0x53, 0xf8, 0x93, - 0xbc, 0xd0, 0xf0, 0x5b, 0xb1, 0xe2, 0x3c, 0x93, 0x97, 0x24, 0x7e, 0xc9, 0xcb, 0xbf, 0x2a, 0xb0, - 0x5a, 0xf5, 0xc4, 0xe9, 0xdf, 0x6b, 0xf9, 0x85, 0xa9, 0xdf, 0x83, 0x4b, 0x33, 0x01, 0x4a, 0x02, - 0xfe, 0x45, 0x81, 0xf3, 0x84, 0x5a, 0xfd, 0xd7, 0x13, 0xfc, 0x5d, 0xb8, 0x70, 0x0a, 0x9c, 0xdc, - 0xa1, 0xde, 0x80, 0xdc, 0x90, 0x86, 0x56, 0xdf, 0x0a, 0x2d, 0x09, 0x69, 0x35, 0xea, 0x77, 0x62, - 0xdd, 0x90, 0x16, 0x24, 0xb6, 0x55, 0xbf, 0x4a, 0x41, 0x99, 0xef, 0x75, 0xbf, 0xfb, 0x83, 0x9a, - 0xfd, 0x2f, 0xf0, 0xb9, 0x02, 0xcb, 0xd3, 0x04, 0xc5, 0xff, 0x04, 0x3f, 0xed, 0x83, 0x88, 0x19, - 0x09, 0x21, 0x3d, 0x6b, 0x0b, 0xfa, 0x0f, 0x29, 0xa8, 0x24, 0xa7, 0xf4, 0xdd, 0xa1, 0xc5, 0xf4, - 0xa1, 0xc5, 0x37, 0x3e, 0xa5, 0xfa, 0x42, 0x81, 0x8b, 0x33, 0x08, 0xfd, 0x66, 0x8e, 0x4e, 0x1c, - 0x5d, 0xa4, 0x5e, 0x78, 0x74, 0xf1, 0xb2, 0xae, 0xfe, 0x27, 0x05, 0x96, 0x1b, 0xe2, 0xc4, 0x58, - 0xfc, 0xc7, 0x9f, 0xdd, 0x6c, 0xc6, 0x0f, 0x85, 0x33, 0x93, 0x7b, 0x19, 0xb5, 0x0a, 0x2b, 0x27, - 0xa0, 0xbd, 0xc2, 0xd9, 0xc4, 0x7f, 0x2b, 0xb0, 0x24, 0x7b, 0xd1, 0xce, 0xec, 0x46, 0x60, 0x06, - 0x3b, 0xf8, 0x4d, 0x48, 0xdb, 0xfd, 0x68, 0x07, 0x39, 0x7d, 0x53, 0xcd, 0x14, 0xea, 0x2d, 0xc0, - 0x49, 0xdc, 0xaf, 0x40, 0xdd, 0x3f, 0xa6, 0x61, 0xa9, 0x33, 0x72, 0xec, 0x50, 0x2a, 0x5f, 0xef, - 0xc4, 0xff, 0x33, 0xb0, 0x10, 0x30, 0xb0, 0xa6, 0xb8, 0x6b, 0xe3, 0xc4, 0xe6, 0x49, 0x81, 0xcb, - 0xaa, 0x5c, 0x84, 0xdf, 0x82, 0x42, 0x64, 0x32, 0x76, 0x43, 0x79, 0xd2, 0x09, 0xd2, 0x62, 0xec, - 0x86, 0xf8, 0x3a, 0x5c, 0x70, 0xc7, 0x43, 0x7e, 0xef, 0x6c, 0x8e, 0xa8, 0x1f, 0xdd, 0xca, 0x5a, - 0x7e, 0x74, 0x3f, 0x5c, 0x76, 0xc7, 0x43, 0xe2, 0x1d, 0x05, 0x6d, 0xea, 0x8b, 0x5b, 0x59, 0xcb, - 0x0f, 0xf1, 0x2d, 0xc8, 0x5b, 0xce, 0xc0, 0xf3, 0xed, 0x70, 0x7f, 0x28, 0x2f, 0x86, 0xd5, 0xe8, - 0x6a, 0xe5, 0x24, 0xfd, 0x9b, 0x5a, 0x64, 0x49, 0x26, 0x8d, 0xd4, 0x9f, 0x40, 0x3e, 0x96, 0x63, - 0x04, 0x0b, 0xfa, 0xdd, 0xae, 0x56, 0x37, 0x3b, 0xed, 0x7a, 0xcd, 0xe8, 0x88, 0xcb, 0xdc, 0xdd, - 0x6e, 0xbd, 0x6e, 0x76, 0xaa, 0x5a, 0x13, 0x29, 0x2a, 0x01, 0xe0, 0x5d, 0xf2, 0xce, 0x27, 0x04, - 0x29, 0x2f, 0x20, 0xe8, 0x12, 0xe4, 0x7d, 0xef, 0x48, 0x62, 0x4f, 0x71, 0x38, 0x39, 0xdf, 0x3b, - 0xe2, 0xc8, 0x55, 0x0d, 0x70, 0x72, 0xae, 0x32, 0xda, 0x12, 0xc9, 0x5b, 0x99, 0x4a, 0xde, 0x93, - 0xf1, 0xe3, 0xe4, 0x2d, 0xb6, 0xf2, 0xec, 0x3b, 0xff, 0x90, 0x5a, 0x4e, 0x18, 0xad, 0x57, 0xea, - 0xdf, 0xa4, 0xa0, 0x48, 0x98, 0xc4, 0x1e, 0xd2, 0x4e, 0x68, 0x85, 0x01, 0xf3, 0xd4, 0x3e, 0x37, - 0x31, 0x27, 0x69, 0x37, 0x4f, 0x0a, 0x42, 0x26, 0x2e, 0x01, 0xb6, 0x60, 0x25, 0xa0, 0x3d, 0xcf, - 0xed, 0x07, 0xe6, 0x1e, 0xdd, 0xb7, 0xdd, 0xbe, 0x39, 0xb4, 0x82, 0x50, 0xde, 0x33, 0x16, 0x49, - 0x59, 0x2a, 0xb7, 0xb9, 0xae, 0xc1, 0x55, 0xf8, 0x2a, 0x2c, 0xef, 0xd9, 0xae, 0xe3, 0x0d, 0xcc, - 0x91, 0x63, 0x1d, 0x53, 0x3f, 0x90, 0x50, 0x59, 0x78, 0x65, 0x09, 0x16, 0xba, 0xb6, 0x50, 0x09, - 0x77, 0x3f, 0x84, 0x8d, 0x99, 0xa3, 0x98, 0x8f, 0x6c, 0x27, 0xa4, 0x3e, 0xed, 0x9b, 0x3e, 0x1d, - 0x39, 0x76, 0x4f, 0x5c, 0xf9, 0x8b, 0xbd, 0xfb, 0x0f, 0x67, 0x0c, 0xbd, 0x2b, 0xcd, 0xc9, 0xc4, - 0x9a, 0xb1, 0xdd, 0x1b, 0x8d, 0xcd, 0x31, 0xbf, 0x1a, 0x64, 0xab, 0x98, 0x42, 0x72, 0xbd, 0xd1, - 0xb8, 0xcb, 0xea, 0x18, 0x41, 0xfa, 0xf1, 0x48, 0x2c, 0x5e, 0x0a, 0x61, 0x45, 0xf5, 0x6b, 0x05, - 0x4a, 0xda, 0x60, 0xe0, 0xd3, 0x81, 0x15, 0x4a, 0x9a, 0xae, 0xc2, 0xb2, 0xa0, 0xe4, 0xd8, 0x94, - 0x6f, 0x89, 0x04, 0x1e, 0x45, 0xe0, 0x91, 0x3a, 0xf1, 0x92, 0x28, 0x0a, 0xdf, 0xf3, 0x63, 0x77, - 0x66, 0x9b, 0x14, 0x6f, 0xb3, 0x1c, 0x6b, 0x93, 0xad, 0x7e, 0x01, 0x2e, 0xce, 0x66, 0x61, 0x68, - 0x8b, 0xd7, 0x20, 0x45, 0x72, 0x7e, 0x06, 0xe8, 0x86, 0xed, 0x3e, 0xa7, 0xa9, 0xf5, 0x84, 0xf3, - 0xf5, 0x8c, 0xa6, 0xd6, 0x13, 0xf5, 0xdf, 0xe2, 0xa3, 0xfd, 0x28, 0x5c, 0xe2, 0xd5, 0x38, 0xca, - 0x0b, 0xca, 0xf3, 0xf2, 0x42, 0x05, 0xe6, 0x03, 0xea, 0x1f, 0xda, 0xee, 0x20, 0xba, 0x7b, 0x96, - 0x55, 0xdc, 0x81, 0x1f, 0x4a, 0xec, 0xf4, 0x49, 0x48, 0x7d, 0xd7, 0x72, 0x9c, 0x63, 0x53, 0x1c, - 0x54, 0xb8, 0x21, 0xed, 0x9b, 0x93, 0x97, 0x4f, 0x62, 0x45, 0xfe, 0xbe, 0xb0, 0xd6, 0x63, 0x63, - 0x12, 0xdb, 0x1a, 0xf1, 0x9b, 0xa8, 0xf7, 0xa0, 0xe4, 0xcb, 0x20, 0x36, 0x03, 0xe6, 0x1e, 0x99, - 0x8f, 0x96, 0xe3, 0x0b, 0xe4, 0x44, 0x84, 0x93, 0xa2, 0x3f, 0x15, 0xf0, 0x1f, 0xc0, 0xa2, 0x15, - 0xf9, 0x56, 0xb6, 0x9e, 0xde, 0xb7, 0x4c, 0x7b, 0x9e, 0x94, 0xac, 0xe9, 0x48, 0xb8, 0x09, 0x0b, - 0x12, 0x91, 0xe5, 0xd8, 0xd6, 0x64, 0x63, 0x7b, 0xe2, 0x39, 0x99, 0xc6, 0x94, 0x44, 0x3e, 0x3c, - 0xe3, 0x15, 0xf6, 0x1f, 0x5d, 0xee, 0x8e, 0xfa, 0xbc, 0xa7, 0x33, 0xbc, 0xbb, 0x48, 0xbe, 0x3d, - 0xcb, 0x4c, 0xbf, 0x3d, 0x9b, 0x7e, 0xcb, 0x96, 0x3d, 0xf1, 0x96, 0x4d, 0xbd, 0x05, 0xcb, 0xd3, - 0xf8, 0x65, 0x94, 0xad, 0x43, 0x96, 0xdf, 0x94, 0x9f, 0x58, 0x46, 0x13, 0x57, 0xe1, 0x44, 0x18, - 0xa8, 0x7f, 0xab, 0x40, 0x79, 0xc6, 0x2f, 0x56, 0xfc, 0xff, 0xa6, 0x24, 0x8e, 0x87, 0x7e, 0x0e, - 0xb2, 0xfc, 0xce, 0x5e, 0x3e, 0x45, 0xb9, 0x70, 0xfa, 0x0f, 0x8d, 0xdf, 0xaf, 0x13, 0x61, 0xc5, - 0x12, 0x21, 0x0f, 0xa8, 0x1e, 0x3f, 0x1f, 0x8a, 0x76, 0x88, 0x05, 0x26, 0x13, 0x47, 0x46, 0xa7, - 0x0f, 0x9c, 0x32, 0x2f, 0x3c, 0x70, 0xda, 0xf8, 0xc3, 0x34, 0xe4, 0x1b, 0xc7, 0x9d, 0xc7, 0xce, - 0xae, 0x63, 0x0d, 0xf8, 0x05, 0x78, 0xa3, 0x6d, 0x3c, 0x40, 0xe7, 0xf0, 0x12, 0x14, 0x9b, 0x2d, - 0xc3, 0x6c, 0xb2, 0xa5, 0x64, 0xb7, 0xae, 0xdd, 0x46, 0x0a, 0x5b, 0x6b, 0xda, 0xa4, 0x66, 0xde, - 0xd1, 0x1f, 0x08, 0x49, 0x0a, 0x97, 0x61, 0xb1, 0xdb, 0xac, 0xdd, 0xed, 0xea, 0x13, 0x61, 0x06, - 0xaf, 0xc0, 0x52, 0xa3, 0x5b, 0x37, 0x6a, 0xed, 0x7a, 0x42, 0x9c, 0x63, 0xeb, 0xd2, 0x76, 0xbd, - 0xb5, 0x2d, 0xaa, 0x88, 0xf5, 0xdf, 0x6d, 0x76, 0x6a, 0xb7, 0x9b, 0xfa, 0x8e, 0x10, 0xad, 0x31, - 0xd1, 0x43, 0x9d, 0xb4, 0x76, 0x6b, 0xd1, 0x90, 0xb7, 0x30, 0x82, 0xc2, 0x76, 0xad, 0xa9, 0x11, - 0xd9, 0xcb, 0x53, 0x05, 0x97, 0x20, 0xaf, 0x37, 0xbb, 0x0d, 0x59, 0x4f, 0xe1, 0x0a, 0x94, 0xb5, - 0xae, 0xd1, 0x32, 0x6b, 0xcd, 0x2a, 0xd1, 0x1b, 0x7a, 0xd3, 0x90, 0x9a, 0x0c, 0x2e, 0x43, 0xc9, - 0xa8, 0x35, 0xf4, 0x8e, 0xa1, 0x35, 0xda, 0x52, 0xc8, 0x66, 0x91, 0xeb, 0xe8, 0x91, 0x0d, 0xc2, - 0xab, 0xb0, 0xd2, 0x6c, 0x99, 0xf2, 0x49, 0x92, 0x79, 0x4f, 0xab, 0x77, 0x75, 0xa9, 0x5b, 0xc3, - 0x17, 0x00, 0xb7, 0x9a, 0x66, 0xb7, 0xbd, 0xa3, 0x19, 0xba, 0xd9, 0x6c, 0xdd, 0x97, 0x8a, 0x5b, - 0xb8, 0x04, 0xb9, 0xc9, 0x0c, 0x9e, 0x32, 0x16, 0x8a, 0x6d, 0x8d, 0x18, 0x13, 0xb0, 0x4f, 0x9f, - 0x32, 0xb2, 0xe0, 0x36, 0x69, 0x75, 0xdb, 0x13, 0xb3, 0x25, 0x28, 0x48, 0xb2, 0xa4, 0x28, 0xc3, - 0x44, 0xdb, 0xb5, 0x66, 0x35, 0x9e, 0xdf, 0xd3, 0xdc, 0x6a, 0x0a, 0x29, 0x1b, 0x07, 0x90, 0xe1, - 0xee, 0xc8, 0x41, 0xa6, 0xd9, 0x6a, 0xea, 0xe8, 0x1c, 0x5e, 0x04, 0xa8, 0x75, 0x6a, 0x4d, 0x43, - 0xbf, 0x4d, 0xb4, 0x3a, 0x83, 0xcd, 0x05, 0x11, 0x81, 0x0c, 0xed, 0x02, 0xcc, 0xd7, 0x3a, 0xbb, - 0xf5, 0x96, 0x66, 0x48, 0x98, 0xb5, 0xce, 0xdd, 0x6e, 0xcb, 0x60, 0x4a, 0x84, 0x0b, 0x30, 0x57, - 0xeb, 0x18, 0xfa, 0xc7, 0x06, 0xc3, 0xc5, 0x75, 0x82, 0x55, 0xf4, 0xf4, 0xd6, 0xc6, 0x97, 0x69, - 0xc8, 0xf0, 0xf7, 0xa4, 0x45, 0xc8, 0x73, 0x6f, 0x1b, 0x0f, 0xda, 0x6c, 0xc8, 0x3c, 0x64, 0x6a, - 0x4d, 0xe3, 0x26, 0xfa, 0xe5, 0x14, 0x06, 0xc8, 0x76, 0x79, 0xf9, 0x57, 0xe6, 0x58, 0xb9, 0xd6, - 0x34, 0xde, 0xbe, 0x81, 0x3e, 0x4d, 0xb1, 0x6e, 0xbb, 0xa2, 0xf2, 0xab, 0x91, 0x62, 0xeb, 0x3a, - 0xfa, 0x2c, 0x56, 0x6c, 0x5d, 0x47, 0xbf, 0x16, 0x29, 0xae, 0x6d, 0xa1, 0x5f, 0x8f, 0x15, 0xd7, - 0xb6, 0xd0, 0x6f, 0x44, 0x8a, 0x1b, 0xd7, 0xd1, 0x6f, 0xc6, 0x8a, 0x1b, 0xd7, 0xd1, 0x6f, 0xcd, - 0x31, 0x2c, 0x1c, 0xc9, 0xb5, 0x2d, 0xf4, 0xdb, 0xb9, 0xb8, 0x76, 0xe3, 0x3a, 0xfa, 0x9d, 0x1c, - 0xf3, 0x7f, 0xec, 0x55, 0xf4, 0xbb, 0x88, 0x4d, 0x93, 0x39, 0x08, 0xfd, 0x1e, 0x2f, 0x32, 0x15, - 0xfa, 0x7d, 0xc4, 0x30, 0x32, 0x29, 0xaf, 0x7e, 0xce, 0x35, 0x0f, 0x74, 0x8d, 0xa0, 0x3f, 0x98, - 0x13, 0x2f, 0xd0, 0xaa, 0xb5, 0x86, 0x56, 0x47, 0x98, 0xb7, 0x60, 0xac, 0xfc, 0xd1, 0x55, 0x56, - 0x64, 0xe1, 0x89, 0xfe, 0xb8, 0xcd, 0x06, 0xbc, 0xa7, 0x91, 0xea, 0x87, 0x1a, 0x41, 0x7f, 0x72, - 0x95, 0x0d, 0x78, 0x4f, 0x23, 0x92, 0xaf, 0x3f, 0x6d, 0x33, 0x43, 0xae, 0xfa, 0xe2, 0x2a, 0x9b, - 0xb4, 0x94, 0xff, 0x59, 0x1b, 0xe7, 0x20, 0xbd, 0x5d, 0x33, 0xd0, 0x97, 0x7c, 0x34, 0x16, 0xa2, - 0xe8, 0xcf, 0x11, 0x13, 0x76, 0x74, 0x03, 0xfd, 0x05, 0x13, 0x66, 0x8d, 0x6e, 0xbb, 0xae, 0xa3, - 0x37, 0xd8, 0xe4, 0x6e, 0xeb, 0xad, 0x86, 0x6e, 0x90, 0x07, 0xe8, 0x2f, 0xb9, 0xf9, 0x47, 0x9d, - 0x56, 0x13, 0x7d, 0x85, 0x70, 0x09, 0x40, 0xff, 0xb8, 0x4d, 0xf4, 0x4e, 0xa7, 0xd6, 0x6a, 0xa2, - 0xb7, 0x36, 0x76, 0x01, 0x9d, 0x4c, 0x07, 0x0c, 0x40, 0xb7, 0x79, 0xa7, 0xd9, 0xba, 0xdf, 0x44, - 0xe7, 0x58, 0xa5, 0x4d, 0xf4, 0xb6, 0x46, 0x74, 0xa4, 0x60, 0x80, 0x39, 0xf1, 0x3e, 0x0e, 0xa5, - 0xf0, 0x02, 0xe4, 0x48, 0xab, 0x5e, 0xdf, 0xd6, 0xaa, 0x77, 0x50, 0x7a, 0xfb, 0x1d, 0x58, 0xb4, - 0xbd, 0xcd, 0x43, 0x3b, 0xa4, 0x41, 0x20, 0x5e, 0x2c, 0x3f, 0x54, 0x65, 0xcd, 0xf6, 0xae, 0x88, - 0xd2, 0x95, 0x81, 0x77, 0xe5, 0x30, 0xbc, 0xc2, 0xb5, 0x57, 0x78, 0xc6, 0xd8, 0x9b, 0xe3, 0x95, - 0x6b, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0xda, 0x0b, 0x47, 0xfb, 0x0f, 0x2d, 0x00, 0x00, +var fileDescriptor_query_b0bca749772b6512 = []byte{ + // 3259 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x73, 0x1b, 0xc9, + 0x75, 0xd7, 0xe0, 0x8b, 0xc0, 0x03, 0x01, 0x36, 0x1b, 0xa4, 0x84, 0xe5, 0x7e, 0xd1, 0x63, 0xaf, + 0xcd, 0xd0, 0x0e, 0xa5, 0xe5, 0xca, 0x8a, 0xb2, 0x76, 0x1c, 0x0d, 0xc1, 0xa1, 0x16, 0x16, 0x30, + 0x80, 0x1a, 0x03, 0xc9, 0xda, 0x72, 0xd5, 0xd4, 0x10, 0x68, 0x81, 0x53, 0x1c, 0xcc, 0x40, 0x33, + 0x03, 0x52, 0xbc, 0x29, 0x71, 0x9c, 0xef, 0xc4, 0x9b, 0xcf, 0x8d, 0x93, 0xca, 0x56, 0xaa, 0x72, + 0xcf, 0xdf, 0x90, 0xca, 0x21, 0xc7, 0xdc, 0x72, 0x48, 0x72, 0xc8, 0x21, 0x95, 0xca, 0xcd, 0x95, + 0x53, 0x0e, 0x39, 0xa4, 0x52, 0xfd, 0x31, 0x83, 0x01, 0x89, 0x95, 0x64, 0x25, 0x17, 0x6a, 0x7d, + 0xeb, 0x7e, 0xef, 0xf5, 0xc7, 0xef, 0xf7, 0xde, 0xbc, 0xee, 0xe9, 0x6e, 0x28, 0x3f, 0x99, 0xd2, + 0xe0, 0x6c, 0x67, 0x12, 0xf8, 0x91, 0x8f, 0xf3, 0xbc, 0xb2, 0x51, 0x8d, 0xfc, 0x89, 0x3f, 0xb4, + 0x23, 0x5b, 0x88, 0x37, 0xca, 0x27, 0x51, 0x30, 0x19, 0x88, 0x8a, 0xfa, 0x43, 0x05, 0x0a, 0xa6, + 0x1d, 0x8c, 0x68, 0x84, 0x37, 0xa0, 0x78, 0x4c, 0xcf, 0xc2, 0x89, 0x3d, 0xa0, 0x75, 0x65, 0x53, + 0xd9, 0x2a, 0x91, 0xa4, 0x8e, 0xd7, 0x20, 0x1f, 0x1e, 0xd9, 0xc1, 0xb0, 0x9e, 0xe1, 0x0a, 0x51, + 0xc1, 0xdf, 0x84, 0x72, 0x64, 0x1f, 0xba, 0x34, 0xb2, 0xa2, 0xb3, 0x09, 0xad, 0x67, 0x37, 0x95, + 0xad, 0xea, 0xee, 0xda, 0x4e, 0x32, 0x9e, 0xc9, 0x95, 0xe6, 0xd9, 0x84, 0x12, 0x88, 0x92, 0x32, + 0xc6, 0x90, 0x1b, 0x50, 0xd7, 0xad, 0xe7, 0x78, 0x5f, 0xbc, 0xac, 0xee, 0x43, 0xf5, 0x81, 0x79, + 0xd7, 0x8e, 0x68, 0xc3, 0x76, 0x5d, 0x1a, 0x34, 0xf7, 0xd9, 0x74, 0xa6, 0x21, 0x0d, 0x3c, 0x7b, + 0x9c, 0x4c, 0x27, 0xae, 0xe3, 0xab, 0x50, 0x18, 0x05, 0xfe, 0x74, 0x12, 0xd6, 0x33, 0x9b, 0xd9, + 0xad, 0x12, 0x91, 0x35, 0xf5, 0xfb, 0x00, 0xfa, 0x09, 0xf5, 0x22, 0xd3, 0x3f, 0xa6, 0x1e, 0x7e, + 0x0b, 0x4a, 0x91, 0x33, 0xa6, 0x61, 0x64, 0x8f, 0x27, 0xbc, 0x8b, 0x2c, 0x99, 0x09, 0x3e, 0x07, + 0xd2, 0x06, 0x14, 0x27, 0x7e, 0xe8, 0x44, 0x8e, 0xef, 0x71, 0x3c, 0x25, 0x92, 0xd4, 0xd5, 0xef, + 0x40, 0xfe, 0x81, 0xed, 0x4e, 0x29, 0x7e, 0x17, 0x72, 0x1c, 0xb0, 0xc2, 0x01, 0x97, 0x77, 0x04, + 0xe9, 0x1c, 0x27, 0x57, 0xb0, 0xbe, 0x4f, 0x98, 0x25, 0xef, 0x7b, 0x99, 0x88, 0x8a, 0x7a, 0x0c, + 0xcb, 0x7b, 0x8e, 0x37, 0x7c, 0x60, 0x07, 0x0e, 0x23, 0xe3, 0x15, 0xbb, 0xc1, 0x5f, 0x81, 0x02, + 0x2f, 0x84, 0xf5, 0xec, 0x66, 0x76, 0xab, 0xbc, 0xbb, 0x2c, 0x1b, 0xf2, 0xb9, 0x11, 0xa9, 0x53, + 0xff, 0x4e, 0x01, 0xd8, 0xf3, 0xa7, 0xde, 0xf0, 0x3e, 0x53, 0x62, 0x04, 0xd9, 0xf0, 0x89, 0x2b, + 0x89, 0x64, 0x45, 0x7c, 0x0f, 0xaa, 0x87, 0x8e, 0x37, 0xb4, 0x4e, 0xe4, 0x74, 0x04, 0x97, 0xe5, + 0xdd, 0xaf, 0xc8, 0xee, 0x66, 0x8d, 0x77, 0xd2, 0xb3, 0x0e, 0x75, 0x2f, 0x0a, 0xce, 0x48, 0xe5, + 0x30, 0x2d, 0xdb, 0xe8, 0x03, 0xbe, 0x68, 0xc4, 0x06, 0x3d, 0xa6, 0x67, 0xf1, 0xa0, 0xc7, 0xf4, + 0x0c, 0xff, 0x5c, 0x1a, 0x51, 0x79, 0xb7, 0x16, 0x8f, 0x95, 0x6a, 0x2b, 0x61, 0x7e, 0x98, 0xb9, + 0xad, 0xa8, 0x3f, 0x2a, 0x40, 0x55, 0x7f, 0x4a, 0x07, 0xd3, 0x88, 0x76, 0x26, 0xcc, 0x07, 0x21, + 0xde, 0x81, 0x9a, 0xe3, 0x0d, 0xdc, 0xe9, 0x90, 0x5a, 0x94, 0xb9, 0xda, 0x8a, 0x98, 0xaf, 0x79, + 0x7f, 0x45, 0xb2, 0x2a, 0x55, 0xa9, 0x20, 0xd0, 0xa0, 0x36, 0xf0, 0xc7, 0x13, 0x3b, 0x98, 0xb7, + 0xcf, 0xf2, 0xf1, 0x57, 0xe5, 0xf8, 0x33, 0x7b, 0xb2, 0x2a, 0xad, 0x53, 0x5d, 0xb4, 0x61, 0x45, + 0xf6, 0x3b, 0xb4, 0x1e, 0x3b, 0xd4, 0x1d, 0x86, 0x3c, 0x74, 0xab, 0x09, 0x55, 0xf3, 0x53, 0xdc, + 0x69, 0x4a, 0xe3, 0x03, 0x6e, 0x4b, 0xaa, 0xce, 0x5c, 0x1d, 0x6f, 0xc3, 0xea, 0xc0, 0x75, 0xd8, + 0x54, 0x1e, 0x33, 0x8a, 0xad, 0xc0, 0x3f, 0x0d, 0xeb, 0x79, 0x3e, 0xff, 0x15, 0xa1, 0x38, 0x60, + 0x72, 0xe2, 0x9f, 0x86, 0xf8, 0x43, 0x28, 0x9e, 0xfa, 0xc1, 0xb1, 0xeb, 0xdb, 0xc3, 0x7a, 0x81, + 0x8f, 0xf9, 0xce, 0xe2, 0x31, 0x1f, 0x4a, 0x2b, 0x92, 0xd8, 0xe3, 0x2d, 0x40, 0xe1, 0x13, 0xd7, + 0x0a, 0xa9, 0x4b, 0x07, 0x91, 0xe5, 0x3a, 0x63, 0x27, 0xaa, 0x17, 0xf9, 0x57, 0x50, 0x0d, 0x9f, + 0xb8, 0x3d, 0x2e, 0x6e, 0x31, 0x29, 0xb6, 0x60, 0x3d, 0x0a, 0x6c, 0x2f, 0xb4, 0x07, 0xac, 0x33, + 0xcb, 0x09, 0x7d, 0xd7, 0xe6, 0x5f, 0x40, 0x89, 0x0f, 0xb9, 0xbd, 0x78, 0x48, 0x73, 0xd6, 0xa4, + 0x19, 0xb7, 0x20, 0x6b, 0xd1, 0x02, 0x29, 0x7e, 0x1f, 0xd6, 0xc3, 0x63, 0x67, 0x62, 0xf1, 0x7e, + 0xac, 0x89, 0x6b, 0x7b, 0xd6, 0xc0, 0x1e, 0x1c, 0xd1, 0x3a, 0x70, 0xd8, 0x98, 0x29, 0x79, 0xa8, + 0x75, 0x5d, 0xdb, 0x6b, 0x30, 0x8d, 0xfa, 0x2d, 0xa8, 0xce, 0xf3, 0x88, 0x57, 0xa1, 0x62, 0x3e, + 0xea, 0xea, 0x96, 0x66, 0xec, 0x5b, 0x86, 0xd6, 0xd6, 0xd1, 0x15, 0x5c, 0x81, 0x12, 0x17, 0x75, + 0x8c, 0xd6, 0x23, 0xa4, 0xe0, 0x25, 0xc8, 0x6a, 0xad, 0x16, 0xca, 0xa8, 0xb7, 0xa1, 0x18, 0x13, + 0x82, 0x57, 0xa0, 0xdc, 0x37, 0x7a, 0x5d, 0xbd, 0xd1, 0x3c, 0x68, 0xea, 0xfb, 0xe8, 0x0a, 0x2e, + 0x42, 0xae, 0xd3, 0x32, 0xbb, 0x48, 0x11, 0x25, 0xad, 0x8b, 0x32, 0xac, 0xe5, 0xfe, 0x9e, 0x86, + 0xb2, 0xea, 0xa7, 0x0a, 0xac, 0x2d, 0x02, 0x86, 0xcb, 0xb0, 0xb4, 0xaf, 0x1f, 0x68, 0xfd, 0x96, + 0x89, 0xae, 0xe0, 0x1a, 0xac, 0x10, 0xbd, 0xab, 0x6b, 0xa6, 0xb6, 0xd7, 0xd2, 0x2d, 0xa2, 0x6b, + 0xfb, 0x48, 0xc1, 0x18, 0xaa, 0xac, 0x64, 0x35, 0x3a, 0xed, 0x76, 0xd3, 0x34, 0xf5, 0x7d, 0x94, + 0xc1, 0x6b, 0x80, 0xb8, 0xac, 0x6f, 0xcc, 0xa4, 0x59, 0x8c, 0x60, 0xb9, 0xa7, 0x93, 0xa6, 0xd6, + 0x6a, 0x7e, 0xcc, 0x3a, 0x40, 0x39, 0xfc, 0x25, 0x78, 0xbb, 0xd1, 0x31, 0x7a, 0xcd, 0x9e, 0xa9, + 0x1b, 0xa6, 0xd5, 0x33, 0xb4, 0x6e, 0xef, 0xa3, 0x8e, 0xc9, 0x7b, 0x16, 0xe0, 0xf2, 0xdf, 0xcd, + 0x15, 0x15, 0x94, 0x51, 0x3f, 0xcd, 0x40, 0x9e, 0xf3, 0xc1, 0xb2, 0x68, 0x2a, 0x37, 0xf2, 0x72, + 0x92, 0x51, 0x32, 0xcf, 0xc9, 0x28, 0x3c, 0x11, 0xcb, 0xdc, 0x26, 0x2a, 0xf8, 0x4d, 0x28, 0xf9, + 0xc1, 0xc8, 0x12, 0x1a, 0x91, 0x95, 0x8b, 0x7e, 0x30, 0xe2, 0xe9, 0x9b, 0x65, 0x44, 0x96, 0xcc, + 0x0f, 0xed, 0x90, 0xf2, 0x28, 0x2d, 0x91, 0xa4, 0x8e, 0xdf, 0x00, 0x66, 0x67, 0xf1, 0x79, 0x14, + 0xb8, 0x6e, 0xc9, 0x0f, 0x46, 0x06, 0x9b, 0xca, 0x97, 0xa1, 0x32, 0xf0, 0xdd, 0xe9, 0xd8, 0xb3, + 0x5c, 0xea, 0x8d, 0xa2, 0xa3, 0xfa, 0xd2, 0xa6, 0xb2, 0x55, 0x21, 0xcb, 0x42, 0xd8, 0xe2, 0x32, + 0x5c, 0x87, 0xa5, 0xc1, 0x91, 0x1d, 0x84, 0x54, 0x44, 0x66, 0x85, 0xc4, 0x55, 0x3e, 0x2a, 0x1d, + 0x38, 0x63, 0xdb, 0x0d, 0x79, 0x14, 0x56, 0x48, 0x52, 0x67, 0x20, 0x1e, 0xbb, 0xf6, 0x28, 0xe4, + 0xd1, 0x53, 0x21, 0xa2, 0xa2, 0xfe, 0x02, 0x64, 0x89, 0x7f, 0xca, 0xba, 0x14, 0x03, 0x86, 0x75, + 0x65, 0x33, 0xbb, 0x85, 0x49, 0x5c, 0x65, 0x8b, 0x86, 0xcc, 0x9b, 0x22, 0x9d, 0xc6, 0x99, 0xf2, + 0xfb, 0xb0, 0x4c, 0x68, 0x38, 0x75, 0x23, 0xfd, 0x69, 0x14, 0xd8, 0x21, 0xde, 0x85, 0x72, 0x3a, + 0x53, 0x28, 0x9f, 0x97, 0x29, 0x80, 0xce, 0x52, 0x44, 0x1d, 0x96, 0x1e, 0x07, 0x34, 0x3c, 0xa2, + 0x81, 0xcc, 0x44, 0x71, 0x95, 0xe5, 0xe1, 0x32, 0x0f, 0x6d, 0x31, 0x06, 0xcb, 0xde, 0x32, 0x87, + 0x28, 0x73, 0xd9, 0x9b, 0x3b, 0x95, 0x48, 0x1d, 0x63, 0x8f, 0xa5, 0x05, 0xcb, 0x7e, 0xfc, 0x98, + 0x0e, 0x22, 0x2a, 0x16, 0xa9, 0x1c, 0x59, 0x66, 0x42, 0x4d, 0xca, 0x98, 0xdb, 0x1c, 0x2f, 0xa4, + 0x41, 0x64, 0x39, 0x43, 0xee, 0xd0, 0x1c, 0x29, 0x0a, 0x41, 0x73, 0x88, 0xdf, 0x81, 0x1c, 0x4f, + 0x2c, 0x39, 0x3e, 0x0a, 0xc8, 0x51, 0x88, 0x7f, 0x4a, 0xb8, 0x1c, 0x7f, 0x1d, 0x0a, 0x94, 0xe3, + 0xe5, 0x4e, 0x9d, 0xa5, 0xe2, 0x34, 0x15, 0x44, 0x9a, 0xa8, 0xdf, 0x86, 0x65, 0x8e, 0xe1, 0xa1, + 0x1d, 0x78, 0x8e, 0x37, 0xe2, 0x2b, 0xb8, 0x3f, 0x14, 0xb1, 0x57, 0x21, 0xbc, 0xcc, 0x28, 0x18, + 0xd3, 0x30, 0xb4, 0x47, 0x54, 0xae, 0xa8, 0x71, 0x55, 0xfd, 0xab, 0x2c, 0x94, 0x7b, 0x51, 0x40, + 0xed, 0x31, 0x67, 0x0f, 0x7f, 0x1b, 0x20, 0x8c, 0xec, 0x88, 0x8e, 0xa9, 0x17, 0xc5, 0x34, 0xbc, + 0x25, 0x87, 0x4f, 0xd9, 0xed, 0xf4, 0x62, 0x23, 0x92, 0xb2, 0x3f, 0xef, 0x9e, 0xcc, 0x4b, 0xb8, + 0x67, 0xe3, 0xb3, 0x0c, 0x94, 0x92, 0xde, 0xb0, 0x06, 0xc5, 0x81, 0x1d, 0xd1, 0x91, 0x1f, 0x9c, + 0xc9, 0xb5, 0xf7, 0xbd, 0xe7, 0x8d, 0xbe, 0xd3, 0x90, 0xc6, 0x24, 0x69, 0x86, 0xdf, 0x06, 0xb1, + 0xa1, 0x11, 0xa1, 0x2f, 0xf0, 0x96, 0xb8, 0x84, 0x07, 0xff, 0x87, 0x80, 0x27, 0x81, 0x33, 0xb6, + 0x83, 0x33, 0xeb, 0x98, 0x9e, 0xc5, 0x8b, 0x46, 0x76, 0x81, 0xc3, 0x91, 0xb4, 0xbb, 0x47, 0xcf, + 0x64, 0x9a, 0xbb, 0x3d, 0xdf, 0x56, 0x86, 0xec, 0x45, 0x37, 0xa6, 0x5a, 0xf2, 0x95, 0x3f, 0x8c, + 0xd7, 0xf8, 0x3c, 0x8f, 0x6e, 0x56, 0x54, 0xbf, 0x06, 0xc5, 0x78, 0xf2, 0xb8, 0x04, 0x79, 0x3d, + 0x08, 0xfc, 0x00, 0x5d, 0xe1, 0xd9, 0xae, 0xdd, 0x12, 0x09, 0x73, 0x7f, 0x9f, 0x25, 0xcc, 0xbf, + 0xcd, 0x24, 0x0b, 0x2d, 0xa1, 0x4f, 0xa6, 0x34, 0x8c, 0xf0, 0x2f, 0x43, 0x8d, 0xf2, 0x48, 0x73, + 0x4e, 0xa8, 0x35, 0xe0, 0xbb, 0x32, 0x16, 0x67, 0xe2, 0x73, 0x58, 0xd9, 0x11, 0x9b, 0xc8, 0x78, + 0xb7, 0x46, 0x56, 0x13, 0x5b, 0x29, 0x1a, 0x62, 0x1d, 0x6a, 0xce, 0x78, 0x4c, 0x87, 0x8e, 0x1d, + 0xa5, 0x3b, 0x10, 0x0e, 0x5b, 0x8f, 0x37, 0x2d, 0x73, 0x9b, 0x3e, 0xb2, 0x9a, 0xb4, 0x48, 0xba, + 0x79, 0x0f, 0x0a, 0x11, 0xdf, 0xa0, 0xca, 0x35, 0xbb, 0x12, 0x67, 0x35, 0x2e, 0x24, 0x52, 0x89, + 0xbf, 0x06, 0x62, 0xbb, 0xcb, 0xf3, 0xd7, 0x2c, 0x20, 0x66, 0xbb, 0x18, 0x22, 0xf4, 0xf8, 0x3d, + 0xa8, 0xce, 0x2d, 0x76, 0x43, 0x4e, 0x58, 0x96, 0x54, 0xd2, 0x2b, 0xd7, 0x10, 0x5f, 0x87, 0x25, + 0x5f, 0x2c, 0x74, 0x3c, 0xb3, 0xcd, 0x66, 0x3c, 0xbf, 0x0a, 0x92, 0xd8, 0x4a, 0xfd, 0x25, 0x58, + 0x49, 0x18, 0x0c, 0x27, 0xbe, 0x17, 0x52, 0xbc, 0x0d, 0x85, 0x80, 0x7f, 0x4e, 0x92, 0x35, 0x2c, + 0xbb, 0x48, 0xe5, 0x03, 0x22, 0x2d, 0xd4, 0x21, 0xac, 0x08, 0xc9, 0x43, 0x27, 0x3a, 0xe2, 0x8e, + 0xc2, 0xef, 0x41, 0x9e, 0xb2, 0xc2, 0x39, 0xce, 0x49, 0xb7, 0xc1, 0xf5, 0x44, 0x68, 0x53, 0xa3, + 0x64, 0x5e, 0x38, 0xca, 0x7f, 0x66, 0xa0, 0x26, 0x67, 0xb9, 0x67, 0x47, 0x83, 0xa3, 0x4b, 0xea, + 0xec, 0xaf, 0xc3, 0x12, 0x93, 0x3b, 0xc9, 0x87, 0xb1, 0xc0, 0xdd, 0xb1, 0x05, 0x73, 0xb8, 0x1d, + 0x5a, 0x29, 0xef, 0xca, 0xcd, 0x56, 0xc5, 0x0e, 0x53, 0x2b, 0xfd, 0x82, 0xb8, 0x28, 0xbc, 0x20, + 0x2e, 0x96, 0x5e, 0x2a, 0x2e, 0xf6, 0x61, 0x6d, 0x9e, 0x71, 0x19, 0x1c, 0xdf, 0x80, 0x25, 0xe1, + 0x94, 0x38, 0x05, 0x2e, 0xf2, 0x5b, 0x6c, 0xa2, 0xfe, 0x7d, 0x06, 0xd6, 0x64, 0x76, 0xfa, 0x62, + 0x7c, 0xa6, 0x29, 0x9e, 0xf3, 0x2f, 0xc3, 0xf3, 0x4b, 0xfa, 0x4f, 0x6d, 0xc0, 0xfa, 0x39, 0x1e, + 0x5f, 0xe1, 0x63, 0xfd, 0x89, 0x02, 0xcb, 0x7b, 0x74, 0xe4, 0x78, 0x97, 0xd4, 0x0b, 0x29, 0x72, + 0x73, 0x2f, 0x15, 0xc4, 0xb7, 0xa0, 0x22, 0xf1, 0x4a, 0xb6, 0x2e, 0xb2, 0xad, 0x2c, 0x62, 0xfb, + 0xdf, 0x15, 0xa8, 0x34, 0xfc, 0xf1, 0xd8, 0x89, 0x2e, 0x29, 0x53, 0x17, 0x71, 0xe6, 0x16, 0xe1, + 0x44, 0x50, 0x8d, 0x61, 0x0a, 0x82, 0xd4, 0xff, 0x50, 0x60, 0x85, 0xf8, 0xae, 0x7b, 0x68, 0x0f, + 0x8e, 0x5f, 0x6f, 0xec, 0x18, 0xd0, 0x0c, 0xa8, 0x44, 0xff, 0xdf, 0x0a, 0x54, 0xbb, 0x01, 0x65, + 0x3f, 0xd2, 0xaf, 0x35, 0x78, 0xb6, 0x13, 0x1e, 0x46, 0x72, 0x0f, 0x51, 0x22, 0xbc, 0xac, 0xae, + 0xc2, 0x4a, 0x82, 0x5d, 0xf2, 0xf1, 0xcf, 0x0a, 0xac, 0x8b, 0x00, 0x91, 0x9a, 0xe1, 0x25, 0xa5, + 0x25, 0xc6, 0x9b, 0x4b, 0xe1, 0xad, 0xc3, 0xd5, 0xf3, 0xd8, 0x24, 0xec, 0x1f, 0x64, 0xe0, 0x5a, + 0x1c, 0x1b, 0x97, 0x1c, 0xf8, 0xff, 0x21, 0x1e, 0x36, 0xa0, 0x7e, 0x91, 0x04, 0xc9, 0xd0, 0x27, + 0x19, 0xa8, 0x37, 0x02, 0x6a, 0x47, 0x34, 0xb5, 0x17, 0x79, 0x7d, 0x62, 0x03, 0xbf, 0x0f, 0xcb, + 0x13, 0x3b, 0x88, 0x9c, 0x81, 0x33, 0xb1, 0xd9, 0xdf, 0x5e, 0x9e, 0x6f, 0x75, 0xce, 0x75, 0x30, + 0x67, 0xa2, 0xbe, 0x09, 0x6f, 0x2c, 0x60, 0x44, 0xf2, 0xf5, 0x3f, 0x0a, 0xe0, 0x5e, 0x64, 0x07, + 0xd1, 0x17, 0x60, 0x55, 0x59, 0x18, 0x4c, 0xeb, 0x50, 0x9b, 0xc3, 0x9f, 0xe6, 0x85, 0x46, 0x5f, + 0x88, 0x15, 0xe7, 0x73, 0x79, 0x49, 0xe3, 0x97, 0xbc, 0xfc, 0xab, 0x02, 0x1b, 0x0d, 0x5f, 0x1c, + 0x24, 0xbe, 0x96, 0x5f, 0x98, 0xfa, 0x36, 0xbc, 0xb9, 0x10, 0xa0, 0x24, 0xe0, 0x5f, 0x14, 0xb8, + 0x4a, 0xa8, 0x3d, 0x7c, 0x3d, 0xc1, 0xdf, 0x87, 0x6b, 0x17, 0xc0, 0xc9, 0x1d, 0xea, 0x2d, 0x28, + 0x8e, 0x69, 0x64, 0x0f, 0xed, 0xc8, 0x96, 0x90, 0x36, 0xe2, 0x7e, 0x67, 0xd6, 0x6d, 0x69, 0x41, + 0x12, 0x5b, 0xf5, 0xb3, 0x0c, 0xd4, 0xf8, 0x5e, 0xf7, 0x67, 0x3f, 0x5a, 0x8b, 0xff, 0x05, 0x3e, + 0x51, 0x60, 0x6d, 0x9e, 0xa0, 0xe4, 0x9f, 0xe0, 0xff, 0xfb, 0xbc, 0x62, 0x41, 0x42, 0xc8, 0x2e, + 0xda, 0x82, 0xfe, 0x43, 0x06, 0xea, 0xe9, 0x29, 0xfd, 0xec, 0x6c, 0x63, 0xfe, 0x6c, 0xe3, 0xa7, + 0x3e, 0xcc, 0xfa, 0x54, 0x81, 0x37, 0x16, 0x10, 0xfa, 0xd3, 0x39, 0x3a, 0x75, 0xc2, 0x91, 0x79, + 0xe1, 0x09, 0xc7, 0xcb, 0xba, 0xfa, 0x9f, 0x14, 0x58, 0x6b, 0x8b, 0x83, 0x65, 0xf1, 0x1f, 0x7f, + 0x79, 0xb3, 0x19, 0x3f, 0x3b, 0xce, 0xcd, 0xae, 0x6f, 0xd4, 0x06, 0xac, 0x9f, 0x83, 0xf6, 0x0a, + 0x67, 0x13, 0xff, 0xa5, 0xc0, 0xaa, 0xec, 0x45, 0xbb, 0xb4, 0x1b, 0x81, 0x05, 0xec, 0xe0, 0x77, + 0x20, 0xeb, 0x0c, 0xe3, 0x1d, 0xe4, 0xfc, 0xa5, 0x37, 0x53, 0xa8, 0x77, 0x00, 0xa7, 0x71, 0xbf, + 0x02, 0x75, 0xff, 0x98, 0x85, 0xd5, 0xde, 0xc4, 0x75, 0x22, 0xa9, 0x7c, 0xbd, 0x13, 0xff, 0x97, + 0x60, 0x39, 0x64, 0x60, 0x2d, 0x71, 0x25, 0xc7, 0x89, 0x2d, 0x91, 0x32, 0x97, 0x35, 0xb8, 0x08, + 0xbf, 0x0b, 0xe5, 0xd8, 0x64, 0xea, 0x45, 0xf2, 0x40, 0x0d, 0xa4, 0xc5, 0xd4, 0x8b, 0xf0, 0x4d, + 0xb8, 0xe6, 0x4d, 0xc7, 0xfc, 0x0a, 0xdb, 0x9a, 0xd0, 0x20, 0xbe, 0xe0, 0xb5, 0x83, 0xf8, 0xaa, + 0xb9, 0xe6, 0x4d, 0xc7, 0xc4, 0x3f, 0x0d, 0xbb, 0x34, 0x10, 0x17, 0xbc, 0x76, 0x10, 0xe1, 0x3b, + 0x50, 0xb2, 0xdd, 0x91, 0x1f, 0x38, 0xd1, 0xd1, 0x58, 0xde, 0x31, 0xab, 0xf1, 0x0d, 0xcc, 0x79, + 0xfa, 0x77, 0xb4, 0xd8, 0x92, 0xcc, 0x1a, 0xa9, 0xdf, 0x80, 0x52, 0x22, 0xc7, 0x08, 0x96, 0xf5, + 0xfb, 0x7d, 0xad, 0x65, 0xf5, 0xba, 0xad, 0xa6, 0xd9, 0x13, 0xf7, 0xc2, 0x07, 0xfd, 0x56, 0xcb, + 0xea, 0x35, 0x34, 0x03, 0x29, 0x2a, 0x01, 0xe0, 0x5d, 0xf2, 0xce, 0x67, 0x04, 0x29, 0x2f, 0x20, + 0xe8, 0x4d, 0x28, 0x05, 0xfe, 0xa9, 0xc4, 0x9e, 0xe1, 0x70, 0x8a, 0x81, 0x7f, 0xca, 0x91, 0xab, + 0x1a, 0xe0, 0xf4, 0x5c, 0x65, 0xb4, 0xa5, 0x92, 0xb7, 0x32, 0x97, 0xbc, 0x67, 0xe3, 0x27, 0xc9, + 0x5b, 0x6c, 0xe5, 0xd9, 0x77, 0xfe, 0x11, 0xb5, 0xdd, 0x28, 0x5e, 0xaf, 0xd4, 0xbf, 0xce, 0x40, + 0x85, 0x30, 0x89, 0x33, 0xa6, 0xbd, 0xc8, 0x8e, 0x42, 0xe6, 0xa9, 0x23, 0x6e, 0x62, 0xcd, 0xd2, + 0x6e, 0x89, 0x94, 0x85, 0x4c, 0xdc, 0x15, 0xec, 0xc2, 0x7a, 0x48, 0x07, 0xbe, 0x37, 0x0c, 0xad, + 0x43, 0x7a, 0xe4, 0x78, 0x43, 0x6b, 0x6c, 0x87, 0x91, 0xbc, 0x8e, 0xac, 0x90, 0x9a, 0x54, 0xee, + 0x71, 0x5d, 0x9b, 0xab, 0xf0, 0x0d, 0x58, 0x3b, 0x74, 0x3c, 0xd7, 0x1f, 0x59, 0x13, 0xd7, 0x3e, + 0xa3, 0x41, 0x28, 0xa1, 0xb2, 0xf0, 0xca, 0x13, 0x2c, 0x74, 0x5d, 0xa1, 0x12, 0xee, 0xfe, 0x18, + 0xb6, 0x17, 0x8e, 0x62, 0x3d, 0x76, 0xdc, 0x88, 0x06, 0x74, 0x68, 0x05, 0x74, 0xe2, 0x3a, 0x03, + 0xf1, 0x7a, 0x40, 0xec, 0xdd, 0xbf, 0xba, 0x60, 0xe8, 0x03, 0x69, 0x4e, 0x66, 0xd6, 0x8c, 0xed, + 0xc1, 0x64, 0x6a, 0x4d, 0xf9, 0x0d, 0x22, 0x5b, 0xc5, 0x14, 0x52, 0x1c, 0x4c, 0xa6, 0x7d, 0x56, + 0xc7, 0x08, 0xb2, 0x4f, 0x26, 0x62, 0xf1, 0x52, 0x08, 0x2b, 0xaa, 0x3f, 0x51, 0xa0, 0xaa, 0x8d, + 0x46, 0x01, 0x1d, 0xd9, 0x91, 0xa4, 0xe9, 0x06, 0xac, 0x09, 0x4a, 0xce, 0x2c, 0xf9, 0x2c, 0x49, + 0xe0, 0x51, 0x04, 0x1e, 0xa9, 0x13, 0x8f, 0x92, 0xe2, 0xf0, 0xbd, 0x3a, 0xf5, 0x16, 0xb6, 0xc9, + 0xf0, 0x36, 0x6b, 0x89, 0x36, 0xdd, 0xea, 0x17, 0xe1, 0x8d, 0xc5, 0x2c, 0x8c, 0x1d, 0xf1, 0xb0, + 0xa4, 0x42, 0xae, 0x2e, 0x00, 0xdd, 0x76, 0xbc, 0xe7, 0x34, 0xb5, 0x9f, 0x72, 0xbe, 0x3e, 0xa7, + 0xa9, 0xfd, 0x54, 0xfd, 0xb7, 0xe4, 0x06, 0x20, 0x0e, 0x97, 0x64, 0x35, 0x8e, 0xf3, 0x82, 0xf2, + 0xbc, 0xbc, 0x50, 0x87, 0xa5, 0x90, 0x06, 0x27, 0x8e, 0x37, 0x8a, 0xaf, 0xa8, 0x65, 0x15, 0xf7, + 0xe0, 0xab, 0x12, 0x3b, 0x7d, 0x1a, 0xd1, 0xc0, 0xb3, 0x5d, 0xf7, 0xcc, 0x12, 0x07, 0x15, 0x5e, + 0x44, 0x87, 0xd6, 0xec, 0x11, 0x95, 0x58, 0x91, 0xbf, 0x2c, 0xac, 0xf5, 0xc4, 0x98, 0x24, 0xb6, + 0x66, 0xf2, 0xbc, 0xea, 0x5b, 0x50, 0x0d, 0x64, 0x10, 0x5b, 0x21, 0x73, 0x8f, 0xcc, 0x47, 0x6b, + 0xc9, 0x3d, 0x73, 0x2a, 0xc2, 0x49, 0x25, 0x98, 0x0b, 0xf8, 0xef, 0xc0, 0x8a, 0x1d, 0xfb, 0x56, + 0xb6, 0x9e, 0xdf, 0xb7, 0xcc, 0x7b, 0x9e, 0x54, 0xed, 0xf9, 0x48, 0xb8, 0x0d, 0xcb, 0x12, 0x91, + 0xed, 0x3a, 0xf6, 0x6c, 0x63, 0x7b, 0xee, 0x65, 0x9a, 0xc6, 0x94, 0x44, 0xbe, 0x61, 0xe3, 0x15, + 0xf6, 0x1f, 0x5d, 0xeb, 0x4f, 0x86, 0xbc, 0xa7, 0x4b, 0xbc, 0xbb, 0x48, 0x3f, 0x63, 0xcb, 0xcd, + 0x3f, 0x63, 0x9b, 0x7f, 0x16, 0x97, 0x3f, 0xf7, 0x2c, 0x4e, 0xbd, 0x03, 0x6b, 0xf3, 0xf8, 0x65, + 0x94, 0x6d, 0x41, 0x9e, 0x5f, 0xa8, 0x9f, 0x5b, 0x46, 0x53, 0x37, 0xe6, 0x44, 0x18, 0xa8, 0x7f, + 0xa3, 0x40, 0x6d, 0xc1, 0x2f, 0x56, 0xf2, 0xff, 0xa6, 0xa4, 0x8e, 0x87, 0x7e, 0x1e, 0xf2, 0xfc, + 0x6a, 0x5f, 0xbe, 0x58, 0xb9, 0x76, 0xf1, 0x0f, 0x8d, 0x5f, 0xc3, 0x13, 0x61, 0xc5, 0x12, 0x21, + 0x0f, 0xa8, 0x01, 0x3f, 0x1f, 0x8a, 0x77, 0x88, 0x65, 0x26, 0x13, 0x47, 0x46, 0x17, 0x0f, 0x9c, + 0x72, 0x2f, 0x3c, 0x70, 0xda, 0xfe, 0xc3, 0x2c, 0x94, 0xda, 0x67, 0xbd, 0x27, 0xee, 0x81, 0x6b, + 0x8f, 0xf8, 0x3d, 0x79, 0xbb, 0x6b, 0x3e, 0x42, 0x57, 0xf0, 0x2a, 0x54, 0x8c, 0x8e, 0x69, 0x19, + 0x6c, 0x29, 0x39, 0x68, 0x69, 0x77, 0x91, 0xc2, 0xd6, 0x9a, 0x2e, 0x69, 0x5a, 0xf7, 0xf4, 0x47, + 0x42, 0x92, 0xc1, 0x35, 0x58, 0xe9, 0x1b, 0xcd, 0xfb, 0x7d, 0x7d, 0x26, 0xcc, 0xe1, 0x75, 0x58, + 0x6d, 0xf7, 0x5b, 0x66, 0xb3, 0xdb, 0x4a, 0x89, 0x8b, 0x6c, 0x5d, 0xda, 0x6b, 0x75, 0xf6, 0x44, + 0x15, 0xb1, 0xfe, 0xfb, 0x46, 0xaf, 0x79, 0xd7, 0xd0, 0xf7, 0x85, 0x68, 0x93, 0x89, 0x3e, 0xd6, + 0x49, 0xe7, 0xa0, 0x19, 0x0f, 0x79, 0x07, 0x23, 0x28, 0xef, 0x35, 0x0d, 0x8d, 0xc8, 0x5e, 0x9e, + 0x29, 0xb8, 0x0a, 0x25, 0xdd, 0xe8, 0xb7, 0x65, 0x3d, 0x83, 0xeb, 0x50, 0xd3, 0xfa, 0x66, 0xc7, + 0x6a, 0x1a, 0x0d, 0xa2, 0xb7, 0x75, 0xc3, 0x94, 0x9a, 0x1c, 0xae, 0x41, 0xd5, 0x6c, 0xb6, 0xf5, + 0x9e, 0xa9, 0xb5, 0xbb, 0x52, 0xc8, 0x66, 0x51, 0xec, 0xe9, 0xb1, 0x0d, 0xc2, 0x1b, 0xb0, 0x6e, + 0x74, 0x2c, 0xf9, 0xb8, 0xc9, 0x7a, 0xa0, 0xb5, 0xfa, 0xba, 0xd4, 0x6d, 0xe2, 0x6b, 0x80, 0x3b, + 0x86, 0xd5, 0xef, 0xee, 0x6b, 0xa6, 0x6e, 0x19, 0x9d, 0x87, 0x52, 0x71, 0x07, 0x57, 0xa1, 0x38, + 0x9b, 0xc1, 0x33, 0xc6, 0x42, 0xa5, 0xab, 0x11, 0x73, 0x06, 0xf6, 0xd9, 0x33, 0x46, 0x16, 0xdc, + 0x25, 0x9d, 0x7e, 0x77, 0x66, 0xb6, 0x0a, 0x65, 0x49, 0x96, 0x14, 0xe5, 0x98, 0x68, 0xaf, 0x69, + 0x34, 0x92, 0xf9, 0x3d, 0x2b, 0x6e, 0x64, 0x90, 0xb2, 0x7d, 0x0c, 0x39, 0xee, 0x8e, 0x22, 0xe4, + 0x8c, 0x8e, 0xa1, 0xa3, 0x2b, 0x78, 0x05, 0xa0, 0xd9, 0x6b, 0x1a, 0xa6, 0x7e, 0x97, 0x68, 0x2d, + 0x06, 0x9b, 0x0b, 0x62, 0x02, 0x19, 0xda, 0x65, 0x58, 0x6a, 0xf6, 0x0e, 0x5a, 0x1d, 0xcd, 0x94, + 0x30, 0x9b, 0xbd, 0xfb, 0xfd, 0x8e, 0xc9, 0x94, 0x08, 0x97, 0xa1, 0xd0, 0xec, 0x99, 0xfa, 0xf7, + 0x4c, 0x86, 0x8b, 0xeb, 0x04, 0xab, 0xe8, 0xd9, 0x9d, 0xed, 0x1f, 0x67, 0x21, 0xc7, 0x9f, 0xa6, + 0x56, 0xa0, 0xc4, 0xbd, 0x6d, 0x3e, 0xea, 0xb2, 0x21, 0x4b, 0x90, 0x6b, 0x1a, 0xe6, 0x6d, 0xf4, + 0x2b, 0x19, 0x0c, 0x90, 0xef, 0xf3, 0xf2, 0xaf, 0x16, 0x58, 0xb9, 0x69, 0x98, 0xef, 0xdf, 0x42, + 0x3f, 0xc8, 0xb0, 0x6e, 0xfb, 0xa2, 0xf2, 0x6b, 0xb1, 0x62, 0xf7, 0x26, 0xfa, 0x61, 0xa2, 0xd8, + 0xbd, 0x89, 0x7e, 0x3d, 0x56, 0x7c, 0xb0, 0x8b, 0x7e, 0x23, 0x51, 0x7c, 0xb0, 0x8b, 0x7e, 0x33, + 0x56, 0xdc, 0xba, 0x89, 0x7e, 0x2b, 0x51, 0xdc, 0xba, 0x89, 0x7e, 0xbb, 0xc0, 0xb0, 0x70, 0x24, + 0x1f, 0xec, 0xa2, 0xdf, 0x29, 0x26, 0xb5, 0x5b, 0x37, 0xd1, 0xef, 0x16, 0x99, 0xff, 0x13, 0xaf, + 0xa2, 0xdf, 0x43, 0x6c, 0x9a, 0xcc, 0x41, 0xe8, 0xf7, 0x79, 0x91, 0xa9, 0xd0, 0x8f, 0x10, 0xc3, + 0xc8, 0xa4, 0xbc, 0xfa, 0x09, 0xd7, 0x3c, 0xd2, 0x35, 0x82, 0xfe, 0xa0, 0x20, 0xde, 0xb2, 0x35, + 0x9a, 0x6d, 0xad, 0x85, 0x30, 0x6f, 0xc1, 0x58, 0xf9, 0xa3, 0x1b, 0xac, 0xc8, 0xc2, 0x13, 0xfd, + 0x71, 0x97, 0x0d, 0xf8, 0x40, 0x23, 0x8d, 0x8f, 0x34, 0x82, 0xfe, 0xe4, 0x06, 0x1b, 0xf0, 0x81, + 0x46, 0x24, 0x5f, 0x7f, 0xda, 0x65, 0x86, 0x5c, 0xf5, 0xe9, 0x0d, 0x36, 0x69, 0x29, 0xff, 0xb3, + 0x2e, 0x2e, 0x42, 0x76, 0xaf, 0x69, 0xa2, 0x1f, 0xf3, 0xd1, 0x58, 0x88, 0xa2, 0x3f, 0x47, 0x4c, + 0xd8, 0xd3, 0x4d, 0xf4, 0x17, 0x4c, 0x98, 0x37, 0xfb, 0xdd, 0x96, 0x8e, 0xde, 0x62, 0x93, 0xbb, + 0xab, 0x77, 0xda, 0xba, 0x49, 0x1e, 0xa1, 0xbf, 0xe4, 0xe6, 0xdf, 0xed, 0x75, 0x0c, 0xf4, 0x19, + 0xc2, 0x55, 0x00, 0xfd, 0x7b, 0x5d, 0xa2, 0xf7, 0x7a, 0xcd, 0x8e, 0x81, 0xde, 0xdd, 0x3e, 0x00, + 0x74, 0x3e, 0x1d, 0x30, 0x00, 0x7d, 0xe3, 0x9e, 0xd1, 0x79, 0x68, 0xa0, 0x2b, 0xac, 0xd2, 0x25, + 0x7a, 0x57, 0x23, 0x3a, 0x52, 0x30, 0x40, 0x41, 0xbc, 0xb4, 0x43, 0x19, 0xbc, 0x0c, 0x45, 0xd2, + 0x69, 0xb5, 0xf6, 0xb4, 0xc6, 0x3d, 0x94, 0xdd, 0xfb, 0x26, 0xac, 0x38, 0xfe, 0xce, 0x89, 0x13, + 0xd1, 0x30, 0x14, 0x8f, 0x9f, 0x3f, 0x56, 0x65, 0xcd, 0xf1, 0xaf, 0x8b, 0xd2, 0xf5, 0x91, 0x7f, + 0xfd, 0x24, 0xba, 0xce, 0xb5, 0xd7, 0x79, 0xc6, 0x38, 0x2c, 0xf0, 0xca, 0x07, 0xff, 0x1b, 0x00, + 0x00, 0xff, 0xff, 0x0e, 0x62, 0xd9, 0x82, 0x5a, 0x2d, 0x00, 0x00, } diff --git a/go/vt/proto/queryservice/queryservice.pb.go b/go/vt/proto/queryservice/queryservice.pb.go index c6c7ad3be1..7a0bce4f40 100644 --- a/go/vt/proto/queryservice/queryservice.pb.go +++ b/go/vt/proto/queryservice/queryservice.pb.go @@ -6,6 +6,7 @@ package queryservice // import "vitess.io/vitess/go/vt/proto/queryservice" import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" +import binlogdata "vitess.io/vitess/go/vt/proto/binlogdata" import query "vitess.io/vitess/go/vt/proto/query" import ( @@ -85,6 +86,8 @@ type QueryClient interface { StreamHealth(ctx context.Context, in *query.StreamHealthRequest, opts ...grpc.CallOption) (Query_StreamHealthClient, error) // UpdateStream asks the server to return a stream of the updates that have been applied to its database. UpdateStream(ctx context.Context, in *query.UpdateStreamRequest, opts ...grpc.CallOption) (Query_UpdateStreamClient, error) + // VStream streams vreplication events. + VStream(ctx context.Context, in *binlogdata.VStreamRequest, opts ...grpc.CallOption) (Query_VStreamClient, error) } type queryClient struct { @@ -376,6 +379,38 @@ func (x *queryUpdateStreamClient) Recv() (*query.UpdateStreamResponse, error) { return m, nil } +func (c *queryClient) VStream(ctx context.Context, in *binlogdata.VStreamRequest, opts ...grpc.CallOption) (Query_VStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_Query_serviceDesc.Streams[4], "/queryservice.Query/VStream", opts...) + if err != nil { + return nil, err + } + x := &queryVStreamClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Query_VStreamClient interface { + Recv() (*binlogdata.VStreamResponse, error) + grpc.ClientStream +} + +type queryVStreamClient struct { + grpc.ClientStream +} + +func (x *queryVStreamClient) Recv() (*binlogdata.VStreamResponse, error) { + m := new(binlogdata.VStreamResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // QueryServer is the server API for Query service. type QueryServer interface { // Execute executes the specified SQL query (might be in a @@ -427,6 +462,8 @@ type QueryServer interface { StreamHealth(*query.StreamHealthRequest, Query_StreamHealthServer) error // UpdateStream asks the server to return a stream of the updates that have been applied to its database. UpdateStream(*query.UpdateStreamRequest, Query_UpdateStreamServer) error + // VStream streams vreplication events. + VStream(*binlogdata.VStreamRequest, Query_VStreamServer) error } func RegisterQueryServer(s *grpc.Server, srv QueryServer) { @@ -823,6 +860,27 @@ func (x *queryUpdateStreamServer) Send(m *query.UpdateStreamResponse) error { return x.ServerStream.SendMsg(m) } +func _Query_VStream_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(binlogdata.VStreamRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(QueryServer).VStream(m, &queryVStreamServer{stream}) +} + +type Query_VStreamServer interface { + Send(*binlogdata.VStreamResponse) error + grpc.ServerStream +} + +type queryVStreamServer struct { + grpc.ServerStream +} + +func (x *queryVStreamServer) Send(m *binlogdata.VStreamResponse) error { + return x.ServerStream.SendMsg(m) +} + var _Query_serviceDesc = grpc.ServiceDesc{ ServiceName: "queryservice.Query", HandlerType: (*QueryServer)(nil), @@ -917,45 +975,51 @@ var _Query_serviceDesc = grpc.ServiceDesc{ Handler: _Query_UpdateStream_Handler, ServerStreams: true, }, + { + StreamName: "VStream", + Handler: _Query_VStream_Handler, + ServerStreams: true, + }, }, Metadata: "queryservice.proto", } -func init() { proto.RegisterFile("queryservice.proto", fileDescriptor_queryservice_81e549fbfb878a8d) } +func init() { proto.RegisterFile("queryservice.proto", fileDescriptor_queryservice_17509881eb07629d) } -var fileDescriptor_queryservice_81e549fbfb878a8d = []byte{ - // 519 bytes of a gzipped FileDescriptorProto +var fileDescriptor_queryservice_17509881eb07629d = []byte{ + // 544 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x95, 0xdf, 0x6b, 0xd4, 0x40, - 0x10, 0xc7, 0xf5, 0xa1, 0xad, 0x4c, 0xe3, 0xaf, 0xad, 0x55, 0x9b, 0xd6, 0xb6, 0xf6, 0x4d, 0x84, - 0x46, 0x54, 0x10, 0x0a, 0x3e, 0xf4, 0x82, 0xa2, 0x14, 0x7f, 0xdd, 0x59, 0x10, 0x1f, 0x84, 0x6d, - 0x6e, 0x38, 0x43, 0x73, 0x49, 0xba, 0xbb, 0x77, 0xe8, 0x5f, 0xe4, 0xbf, 0x29, 0x66, 0x33, 0x93, - 0xdd, 0xbd, 0xc4, 0xb7, 0xce, 0xf7, 0x3b, 0xf3, 0x61, 0x6e, 0xa7, 0x33, 0x01, 0x71, 0xb5, 0x40, - 0xf5, 0x5b, 0xa3, 0x5a, 0xe6, 0x19, 0x1e, 0xd7, 0xaa, 0x32, 0x95, 0x88, 0x5c, 0x2d, 0xde, 0x6c, - 0x22, 0x6b, 0x3d, 0xff, 0x13, 0xc1, 0xda, 0x97, 0x7f, 0xb1, 0x38, 0x81, 0x8d, 0x37, 0xbf, 0x30, - 0x5b, 0x18, 0x14, 0xdb, 0xc7, 0x36, 0xa5, 0x8d, 0xc7, 0x78, 0xb5, 0x40, 0x6d, 0xe2, 0xfb, 0xa1, - 0xac, 0xeb, 0xaa, 0xd4, 0x78, 0x74, 0x4d, 0xbc, 0x87, 0xa8, 0x15, 0x47, 0xd2, 0x64, 0x3f, 0x45, - 0xec, 0x67, 0x36, 0x22, 0x51, 0x76, 0x7b, 0x3d, 0x46, 0x7d, 0x84, 0x9b, 0x13, 0xa3, 0x50, 0xce, - 0xa9, 0x19, 0xca, 0xf7, 0x54, 0x82, 0xed, 0xf5, 0x9b, 0x44, 0x7b, 0x76, 0x5d, 0xbc, 0x84, 0xb5, - 0x11, 0xce, 0xf2, 0x52, 0x6c, 0xb5, 0xa9, 0x4d, 0x44, 0xf5, 0xf7, 0x7c, 0x91, 0xbb, 0x78, 0x05, - 0xeb, 0x69, 0x35, 0x9f, 0xe7, 0x46, 0x50, 0x86, 0x0d, 0xa9, 0x6e, 0x3b, 0x50, 0xb9, 0xf0, 0x35, - 0xdc, 0x18, 0x57, 0x45, 0x71, 0x21, 0xb3, 0x4b, 0x41, 0xef, 0x45, 0x02, 0x15, 0x3f, 0x58, 0xd1, - 0xb9, 0xfc, 0x04, 0x36, 0x3e, 0x2b, 0xac, 0xa5, 0xea, 0x86, 0xd0, 0xc6, 0xe1, 0x10, 0x58, 0xe6, - 0xda, 0x4f, 0x70, 0xcb, 0xb6, 0xd3, 0x5a, 0x53, 0xb1, 0xe7, 0x75, 0x49, 0x32, 0x91, 0x1e, 0x0d, - 0xb8, 0x0c, 0x3c, 0x87, 0x3b, 0xd4, 0x22, 0x23, 0xf7, 0x83, 0xde, 0x43, 0xe8, 0xc1, 0xa0, 0xcf, - 0xd8, 0x6f, 0x70, 0x37, 0x55, 0x28, 0x0d, 0x7e, 0x55, 0xb2, 0xd4, 0x32, 0x33, 0x79, 0x55, 0x0a, - 0xaa, 0x5b, 0x71, 0x08, 0x7c, 0x38, 0x9c, 0xc0, 0xe4, 0xb7, 0xb0, 0x39, 0x31, 0x52, 0x99, 0x76, - 0x74, 0x3b, 0xfc, 0xcf, 0xc1, 0x1a, 0xd1, 0xe2, 0x3e, 0xcb, 0xe3, 0xa0, 0xe1, 0x39, 0x32, 0xa7, - 0xd3, 0x56, 0x38, 0xae, 0xc5, 0x9c, 0x1f, 0xb0, 0x95, 0x56, 0x65, 0x56, 0x2c, 0xa6, 0xde, 0x6f, - 0x7d, 0xcc, 0x0f, 0xbf, 0xe2, 0x11, 0xf7, 0xe8, 0x7f, 0x29, 0xcc, 0x1f, 0xc3, 0xed, 0x31, 0xca, - 0xa9, 0xcb, 0xa6, 0xa1, 0x06, 0x3a, 0x71, 0xf7, 0x87, 0x6c, 0x77, 0x95, 0x9b, 0x65, 0xa0, 0xf5, - 0x8b, 0xdd, 0x0d, 0x09, 0xb6, 0x6f, 0xb7, 0xd7, 0x73, 0x07, 0xed, 0x3a, 0xf6, 0x34, 0x1c, 0xf4, - 0xd4, 0x78, 0xf7, 0xe1, 0x70, 0x38, 0xc1, 0x3d, 0x12, 0x1f, 0x50, 0x6b, 0x39, 0x43, 0xbb, 0xf8, - 0x7c, 0x24, 0x3c, 0x35, 0x3c, 0x12, 0x81, 0xe9, 0x1c, 0x89, 0x14, 0xa0, 0x35, 0x4f, 0xb3, 0x4b, - 0xf1, 0xd0, 0xcf, 0x3f, 0xed, 0xc6, 0xbd, 0xd3, 0xe3, 0x70, 0x53, 0x29, 0xc0, 0xa4, 0x2e, 0x72, - 0x63, 0xcf, 0x29, 0x41, 0x3a, 0x29, 0x84, 0xb8, 0x0e, 0x43, 0xce, 0x20, 0xb2, 0xfd, 0xbd, 0x43, - 0x59, 0x98, 0xee, 0x92, 0xba, 0x62, 0xf8, 0xfc, 0xbe, 0xe7, 0xfc, 0xac, 0x33, 0x88, 0xce, 0xeb, - 0xa9, 0x34, 0xf4, 0x4a, 0x04, 0x73, 0xc5, 0x10, 0xe6, 0x7b, 0x1d, 0x6c, 0xf4, 0xf4, 0xfb, 0x93, - 0x65, 0x6e, 0x50, 0xeb, 0xe3, 0xbc, 0x4a, 0xec, 0x5f, 0xc9, 0xac, 0x4a, 0x96, 0x26, 0x69, 0xbe, - 0x24, 0x89, 0xfb, 0x8d, 0xb9, 0x58, 0x6f, 0xb4, 0x17, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x51, - 0x5a, 0xbc, 0xc0, 0x8e, 0x06, 0x00, 0x00, + 0x10, 0xc7, 0xf5, 0xa1, 0xad, 0x4c, 0x4f, 0xad, 0x5b, 0xab, 0x36, 0xad, 0x6d, 0xed, 0x9b, 0x08, + 0x17, 0x51, 0x41, 0x28, 0xf8, 0xd0, 0x0b, 0x16, 0xa5, 0xf8, 0xeb, 0xce, 0x16, 0xf1, 0x41, 0xd8, + 0x4b, 0x86, 0x33, 0x34, 0x97, 0x4d, 0x93, 0xbd, 0x43, 0xff, 0x6a, 0xff, 0x05, 0x31, 0x9b, 0x99, + 0xec, 0xee, 0x25, 0xbe, 0xdd, 0x7e, 0xbf, 0x33, 0x1f, 0x26, 0x3b, 0x37, 0xb3, 0x20, 0xae, 0x17, + 0x58, 0xfe, 0xae, 0xb0, 0x5c, 0xa6, 0x31, 0x0e, 0x8b, 0x52, 0x69, 0x25, 0x06, 0xb6, 0x16, 0x6c, + 0xd6, 0x27, 0x63, 0x05, 0x5b, 0xd3, 0x34, 0xcf, 0xd4, 0x2c, 0x91, 0x5a, 0x1a, 0xe5, 0xc5, 0x9f, + 0x01, 0xac, 0x7d, 0xf9, 0x17, 0x21, 0x4e, 0x60, 0xe3, 0xed, 0x2f, 0x8c, 0x17, 0x1a, 0xc5, 0xce, + 0xd0, 0x24, 0x35, 0xe7, 0x31, 0x5e, 0x2f, 0xb0, 0xd2, 0xc1, 0x03, 0x5f, 0xae, 0x0a, 0x95, 0x57, + 0x78, 0x7c, 0x43, 0xbc, 0x87, 0x41, 0x23, 0x8e, 0xa4, 0x8e, 0x7f, 0x8a, 0xc0, 0x8d, 0xac, 0x45, + 0xa2, 0xec, 0x75, 0x7a, 0x8c, 0xfa, 0x08, 0xb7, 0x27, 0xba, 0x44, 0x39, 0xa7, 0x62, 0x28, 0xde, + 0x51, 0x09, 0xb6, 0xdf, 0x6d, 0x12, 0xed, 0xf9, 0x4d, 0xf1, 0x0a, 0xd6, 0x46, 0x38, 0x4b, 0x73, + 0xb1, 0xdd, 0x84, 0xd6, 0x27, 0xca, 0xbf, 0xef, 0x8a, 0x5c, 0xc5, 0x6b, 0x58, 0x8f, 0xd4, 0x7c, + 0x9e, 0x6a, 0x41, 0x11, 0xe6, 0x48, 0x79, 0x3b, 0x9e, 0xca, 0x89, 0x6f, 0xe0, 0xd6, 0x58, 0x65, + 0xd9, 0x54, 0xc6, 0x57, 0x82, 0xee, 0x8b, 0x04, 0x4a, 0x7e, 0xb8, 0xa2, 0x73, 0xfa, 0x09, 0x6c, + 0x7c, 0x2e, 0xb1, 0x90, 0x65, 0xdb, 0x84, 0xe6, 0xec, 0x37, 0x81, 0x65, 0xce, 0xfd, 0x04, 0x77, + 0x4c, 0x39, 0x8d, 0x95, 0x88, 0x7d, 0xa7, 0x4a, 0x92, 0x89, 0xf4, 0xb8, 0xc7, 0x65, 0xe0, 0x05, + 0x6c, 0x51, 0x89, 0x8c, 0x3c, 0xf0, 0x6a, 0xf7, 0xa1, 0x87, 0xbd, 0x3e, 0x63, 0xbf, 0xc1, 0xbd, + 0xa8, 0x44, 0xa9, 0xf1, 0x6b, 0x29, 0xf3, 0x4a, 0xc6, 0x3a, 0x55, 0xb9, 0xa0, 0xbc, 0x15, 0x87, + 0xc0, 0x47, 0xfd, 0x01, 0x4c, 0x3e, 0x83, 0xcd, 0x89, 0x96, 0xa5, 0x6e, 0x5a, 0xb7, 0xcb, 0x7f, + 0x0e, 0xd6, 0x88, 0x16, 0x74, 0x59, 0x0e, 0x07, 0x35, 0xf7, 0x91, 0x39, 0xad, 0xb6, 0xc2, 0xb1, + 0x2d, 0xe6, 0xfc, 0x80, 0xed, 0x48, 0xe5, 0x71, 0xb6, 0x48, 0x9c, 0x6f, 0x7d, 0xc2, 0x17, 0xbf, + 0xe2, 0x11, 0xf7, 0xf8, 0x7f, 0x21, 0xcc, 0x1f, 0xc3, 0xdd, 0x31, 0xca, 0xc4, 0x66, 0x53, 0x53, + 0x3d, 0x9d, 0xb8, 0x07, 0x7d, 0xb6, 0x3d, 0xca, 0xf5, 0x30, 0xd0, 0xf8, 0x05, 0xf6, 0x84, 0x78, + 0xd3, 0xb7, 0xd7, 0xe9, 0xd9, 0x8d, 0xb6, 0x1d, 0xb3, 0x1a, 0x0e, 0x3b, 0x72, 0x9c, 0xfd, 0x70, + 0xd4, 0x1f, 0x60, 0x2f, 0x89, 0x0f, 0x58, 0x55, 0x72, 0x86, 0x66, 0xf0, 0x79, 0x49, 0x38, 0xaa, + 0xbf, 0x24, 0x3c, 0xd3, 0x5a, 0x12, 0x11, 0x40, 0x63, 0x9e, 0xc6, 0x57, 0xe2, 0x91, 0x1b, 0x7f, + 0xda, 0xb6, 0x7b, 0xb7, 0xc3, 0xe1, 0xa2, 0x22, 0x80, 0x49, 0x91, 0xa5, 0xda, 0xac, 0x53, 0x82, + 0xb4, 0x92, 0x0f, 0xb1, 0x1d, 0x86, 0x9c, 0xc3, 0xc0, 0xd4, 0xf7, 0x0e, 0x65, 0xa6, 0xdb, 0x4d, + 0x6a, 0x8b, 0xfe, 0xf5, 0xbb, 0x9e, 0xf5, 0x59, 0xe7, 0x30, 0xb8, 0x28, 0x12, 0xa9, 0xe9, 0x96, + 0x08, 0x66, 0x8b, 0x3e, 0xcc, 0xf5, 0x2c, 0xd8, 0x19, 0x6c, 0x5c, 0x32, 0xc7, 0x7a, 0x47, 0x2e, + 0x7d, 0x4e, 0x97, 0xd7, 0x72, 0x46, 0xcf, 0xbe, 0x3f, 0x5d, 0xa6, 0x1a, 0xab, 0x6a, 0x98, 0xaa, + 0xd0, 0xfc, 0x0a, 0x67, 0x2a, 0x5c, 0xea, 0xb0, 0x7e, 0x91, 0x42, 0xfb, 0xf5, 0x9a, 0xae, 0xd7, + 0xda, 0xcb, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x47, 0x8e, 0x80, 0xe8, 0x06, 0x00, 0x00, } diff --git a/go/vt/proto/replicationdata/replicationdata.pb.go b/go/vt/proto/replicationdata/replicationdata.pb.go index e308aa9230..5104477b7c 100644 --- a/go/vt/proto/replicationdata/replicationdata.pb.go +++ b/go/vt/proto/replicationdata/replicationdata.pb.go @@ -37,7 +37,7 @@ func (m *Status) Reset() { *m = Status{} } func (m *Status) String() string { return proto.CompactTextString(m) } func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_replicationdata_535db925ee5677f7, []int{0} + return fileDescriptor_replicationdata_1dfa1a45cfa5e522, []int{0} } func (m *Status) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Status.Unmarshal(m, b) @@ -111,10 +111,10 @@ func init() { } func init() { - proto.RegisterFile("replicationdata.proto", fileDescriptor_replicationdata_535db925ee5677f7) + proto.RegisterFile("replicationdata.proto", fileDescriptor_replicationdata_1dfa1a45cfa5e522) } -var fileDescriptor_replicationdata_535db925ee5677f7 = []byte{ +var fileDescriptor_replicationdata_1dfa1a45cfa5e522 = []byte{ // 264 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x90, 0xc1, 0x4a, 0x03, 0x31, 0x10, 0x86, 0xd9, 0x6a, 0xd7, 0x1a, 0xd1, 0x6a, 0xb4, 0x10, 0xbc, 0xb8, 0x78, 0x5a, 0x44, 0x36, diff --git a/go/vt/proto/tableacl/tableacl.pb.go b/go/vt/proto/tableacl/tableacl.pb.go index 0b80755500..917663e342 100644 --- a/go/vt/proto/tableacl/tableacl.pb.go +++ b/go/vt/proto/tableacl/tableacl.pb.go @@ -35,7 +35,7 @@ func (m *TableGroupSpec) Reset() { *m = TableGroupSpec{} } func (m *TableGroupSpec) String() string { return proto.CompactTextString(m) } func (*TableGroupSpec) ProtoMessage() {} func (*TableGroupSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_tableacl_38276fd243b9c307, []int{0} + return fileDescriptor_tableacl_82b5f1376534b35e, []int{0} } func (m *TableGroupSpec) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_TableGroupSpec.Unmarshal(m, b) @@ -101,7 +101,7 @@ func (m *Config) Reset() { *m = Config{} } func (m *Config) String() string { return proto.CompactTextString(m) } func (*Config) ProtoMessage() {} func (*Config) Descriptor() ([]byte, []int) { - return fileDescriptor_tableacl_38276fd243b9c307, []int{1} + return fileDescriptor_tableacl_82b5f1376534b35e, []int{1} } func (m *Config) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Config.Unmarshal(m, b) @@ -133,9 +133,9 @@ func init() { proto.RegisterType((*Config)(nil), "tableacl.Config") } -func init() { proto.RegisterFile("tableacl.proto", fileDescriptor_tableacl_38276fd243b9c307) } +func init() { proto.RegisterFile("tableacl.proto", fileDescriptor_tableacl_82b5f1376534b35e) } -var fileDescriptor_tableacl_38276fd243b9c307 = []byte{ +var fileDescriptor_tableacl_82b5f1376534b35e = []byte{ // 232 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0xc1, 0x4b, 0xc3, 0x30, 0x14, 0xc6, 0x89, 0x9d, 0xd5, 0xbd, 0xc9, 0x0e, 0x41, 0x34, 0xc7, 0x32, 0x10, 0x7b, 0x6a, 0x40, diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go index d780c3bd0e..432b78cad9 100644 --- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go +++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go @@ -46,7 +46,7 @@ func (m *TableDefinition) Reset() { *m = TableDefinition{} } func (m *TableDefinition) String() string { return proto.CompactTextString(m) } func (*TableDefinition) ProtoMessage() {} func (*TableDefinition) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{0} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{0} } func (m *TableDefinition) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_TableDefinition.Unmarshal(m, b) @@ -128,7 +128,7 @@ func (m *SchemaDefinition) Reset() { *m = SchemaDefinition{} } func (m *SchemaDefinition) String() string { return proto.CompactTextString(m) } func (*SchemaDefinition) ProtoMessage() {} func (*SchemaDefinition) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{1} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{1} } func (m *SchemaDefinition) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SchemaDefinition.Unmarshal(m, b) @@ -183,7 +183,7 @@ func (m *SchemaChangeResult) Reset() { *m = SchemaChangeResult{} } func (m *SchemaChangeResult) String() string { return proto.CompactTextString(m) } func (*SchemaChangeResult) ProtoMessage() {} func (*SchemaChangeResult) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{2} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{2} } func (m *SchemaChangeResult) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SchemaChangeResult.Unmarshal(m, b) @@ -234,7 +234,7 @@ func (m *UserPermission) Reset() { *m = UserPermission{} } func (m *UserPermission) String() string { return proto.CompactTextString(m) } func (*UserPermission) ProtoMessage() {} func (*UserPermission) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{3} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{3} } func (m *UserPermission) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UserPermission.Unmarshal(m, b) @@ -298,7 +298,7 @@ func (m *DbPermission) Reset() { *m = DbPermission{} } func (m *DbPermission) String() string { return proto.CompactTextString(m) } func (*DbPermission) ProtoMessage() {} func (*DbPermission) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{4} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{4} } func (m *DbPermission) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DbPermission.Unmarshal(m, b) @@ -360,7 +360,7 @@ func (m *Permissions) Reset() { *m = Permissions{} } func (m *Permissions) String() string { return proto.CompactTextString(m) } func (*Permissions) ProtoMessage() {} func (*Permissions) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{5} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{5} } func (m *Permissions) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Permissions.Unmarshal(m, b) @@ -405,7 +405,7 @@ func (m *PingRequest) Reset() { *m = PingRequest{} } func (m *PingRequest) String() string { return proto.CompactTextString(m) } func (*PingRequest) ProtoMessage() {} func (*PingRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{6} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{6} } func (m *PingRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PingRequest.Unmarshal(m, b) @@ -443,7 +443,7 @@ func (m *PingResponse) Reset() { *m = PingResponse{} } func (m *PingResponse) String() string { return proto.CompactTextString(m) } func (*PingResponse) ProtoMessage() {} func (*PingResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{7} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{7} } func (m *PingResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PingResponse.Unmarshal(m, b) @@ -482,7 +482,7 @@ func (m *SleepRequest) Reset() { *m = SleepRequest{} } func (m *SleepRequest) String() string { return proto.CompactTextString(m) } func (*SleepRequest) ProtoMessage() {} func (*SleepRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{8} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{8} } func (m *SleepRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SleepRequest.Unmarshal(m, b) @@ -519,7 +519,7 @@ func (m *SleepResponse) Reset() { *m = SleepResponse{} } func (m *SleepResponse) String() string { return proto.CompactTextString(m) } func (*SleepResponse) ProtoMessage() {} func (*SleepResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{9} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{9} } func (m *SleepResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SleepResponse.Unmarshal(m, b) @@ -552,7 +552,7 @@ func (m *ExecuteHookRequest) Reset() { *m = ExecuteHookRequest{} } func (m *ExecuteHookRequest) String() string { return proto.CompactTextString(m) } func (*ExecuteHookRequest) ProtoMessage() {} func (*ExecuteHookRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{10} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{10} } func (m *ExecuteHookRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteHookRequest.Unmarshal(m, b) @@ -606,7 +606,7 @@ func (m *ExecuteHookResponse) Reset() { *m = ExecuteHookResponse{} } func (m *ExecuteHookResponse) String() string { return proto.CompactTextString(m) } func (*ExecuteHookResponse) ProtoMessage() {} func (*ExecuteHookResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{11} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{11} } func (m *ExecuteHookResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteHookResponse.Unmarshal(m, b) @@ -660,7 +660,7 @@ func (m *GetSchemaRequest) Reset() { *m = GetSchemaRequest{} } func (m *GetSchemaRequest) String() string { return proto.CompactTextString(m) } func (*GetSchemaRequest) ProtoMessage() {} func (*GetSchemaRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{12} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{12} } func (m *GetSchemaRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetSchemaRequest.Unmarshal(m, b) @@ -712,7 +712,7 @@ func (m *GetSchemaResponse) Reset() { *m = GetSchemaResponse{} } func (m *GetSchemaResponse) String() string { return proto.CompactTextString(m) } func (*GetSchemaResponse) ProtoMessage() {} func (*GetSchemaResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{13} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{13} } func (m *GetSchemaResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetSchemaResponse.Unmarshal(m, b) @@ -749,7 +749,7 @@ func (m *GetPermissionsRequest) Reset() { *m = GetPermissionsRequest{} } func (m *GetPermissionsRequest) String() string { return proto.CompactTextString(m) } func (*GetPermissionsRequest) ProtoMessage() {} func (*GetPermissionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{14} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{14} } func (m *GetPermissionsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetPermissionsRequest.Unmarshal(m, b) @@ -780,7 +780,7 @@ func (m *GetPermissionsResponse) Reset() { *m = GetPermissionsResponse{} func (m *GetPermissionsResponse) String() string { return proto.CompactTextString(m) } func (*GetPermissionsResponse) ProtoMessage() {} func (*GetPermissionsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{15} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{15} } func (m *GetPermissionsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetPermissionsResponse.Unmarshal(m, b) @@ -817,7 +817,7 @@ func (m *SetReadOnlyRequest) Reset() { *m = SetReadOnlyRequest{} } func (m *SetReadOnlyRequest) String() string { return proto.CompactTextString(m) } func (*SetReadOnlyRequest) ProtoMessage() {} func (*SetReadOnlyRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{16} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{16} } func (m *SetReadOnlyRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SetReadOnlyRequest.Unmarshal(m, b) @@ -847,7 +847,7 @@ func (m *SetReadOnlyResponse) Reset() { *m = SetReadOnlyResponse{} } func (m *SetReadOnlyResponse) String() string { return proto.CompactTextString(m) } func (*SetReadOnlyResponse) ProtoMessage() {} func (*SetReadOnlyResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{17} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{17} } func (m *SetReadOnlyResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SetReadOnlyResponse.Unmarshal(m, b) @@ -877,7 +877,7 @@ func (m *SetReadWriteRequest) Reset() { *m = SetReadWriteRequest{} } func (m *SetReadWriteRequest) String() string { return proto.CompactTextString(m) } func (*SetReadWriteRequest) ProtoMessage() {} func (*SetReadWriteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{18} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{18} } func (m *SetReadWriteRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SetReadWriteRequest.Unmarshal(m, b) @@ -907,7 +907,7 @@ func (m *SetReadWriteResponse) Reset() { *m = SetReadWriteResponse{} } func (m *SetReadWriteResponse) String() string { return proto.CompactTextString(m) } func (*SetReadWriteResponse) ProtoMessage() {} func (*SetReadWriteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{19} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{19} } func (m *SetReadWriteResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SetReadWriteResponse.Unmarshal(m, b) @@ -938,7 +938,7 @@ func (m *ChangeTypeRequest) Reset() { *m = ChangeTypeRequest{} } func (m *ChangeTypeRequest) String() string { return proto.CompactTextString(m) } func (*ChangeTypeRequest) ProtoMessage() {} func (*ChangeTypeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{20} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{20} } func (m *ChangeTypeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ChangeTypeRequest.Unmarshal(m, b) @@ -975,7 +975,7 @@ func (m *ChangeTypeResponse) Reset() { *m = ChangeTypeResponse{} } func (m *ChangeTypeResponse) String() string { return proto.CompactTextString(m) } func (*ChangeTypeResponse) ProtoMessage() {} func (*ChangeTypeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{21} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{21} } func (m *ChangeTypeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ChangeTypeResponse.Unmarshal(m, b) @@ -1005,7 +1005,7 @@ func (m *RefreshStateRequest) Reset() { *m = RefreshStateRequest{} } func (m *RefreshStateRequest) String() string { return proto.CompactTextString(m) } func (*RefreshStateRequest) ProtoMessage() {} func (*RefreshStateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{22} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{22} } func (m *RefreshStateRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefreshStateRequest.Unmarshal(m, b) @@ -1035,7 +1035,7 @@ func (m *RefreshStateResponse) Reset() { *m = RefreshStateResponse{} } func (m *RefreshStateResponse) String() string { return proto.CompactTextString(m) } func (*RefreshStateResponse) ProtoMessage() {} func (*RefreshStateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{23} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{23} } func (m *RefreshStateResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefreshStateResponse.Unmarshal(m, b) @@ -1065,7 +1065,7 @@ func (m *RunHealthCheckRequest) Reset() { *m = RunHealthCheckRequest{} } func (m *RunHealthCheckRequest) String() string { return proto.CompactTextString(m) } func (*RunHealthCheckRequest) ProtoMessage() {} func (*RunHealthCheckRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{24} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{24} } func (m *RunHealthCheckRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RunHealthCheckRequest.Unmarshal(m, b) @@ -1095,7 +1095,7 @@ func (m *RunHealthCheckResponse) Reset() { *m = RunHealthCheckResponse{} func (m *RunHealthCheckResponse) String() string { return proto.CompactTextString(m) } func (*RunHealthCheckResponse) ProtoMessage() {} func (*RunHealthCheckResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{25} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{25} } func (m *RunHealthCheckResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RunHealthCheckResponse.Unmarshal(m, b) @@ -1126,7 +1126,7 @@ func (m *IgnoreHealthErrorRequest) Reset() { *m = IgnoreHealthErrorReque func (m *IgnoreHealthErrorRequest) String() string { return proto.CompactTextString(m) } func (*IgnoreHealthErrorRequest) ProtoMessage() {} func (*IgnoreHealthErrorRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{26} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{26} } func (m *IgnoreHealthErrorRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_IgnoreHealthErrorRequest.Unmarshal(m, b) @@ -1163,7 +1163,7 @@ func (m *IgnoreHealthErrorResponse) Reset() { *m = IgnoreHealthErrorResp func (m *IgnoreHealthErrorResponse) String() string { return proto.CompactTextString(m) } func (*IgnoreHealthErrorResponse) ProtoMessage() {} func (*IgnoreHealthErrorResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{27} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{27} } func (m *IgnoreHealthErrorResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_IgnoreHealthErrorResponse.Unmarshal(m, b) @@ -1197,7 +1197,7 @@ func (m *ReloadSchemaRequest) Reset() { *m = ReloadSchemaRequest{} } func (m *ReloadSchemaRequest) String() string { return proto.CompactTextString(m) } func (*ReloadSchemaRequest) ProtoMessage() {} func (*ReloadSchemaRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{28} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{28} } func (m *ReloadSchemaRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ReloadSchemaRequest.Unmarshal(m, b) @@ -1234,7 +1234,7 @@ func (m *ReloadSchemaResponse) Reset() { *m = ReloadSchemaResponse{} } func (m *ReloadSchemaResponse) String() string { return proto.CompactTextString(m) } func (*ReloadSchemaResponse) ProtoMessage() {} func (*ReloadSchemaResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{29} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{29} } func (m *ReloadSchemaResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ReloadSchemaResponse.Unmarshal(m, b) @@ -1265,7 +1265,7 @@ func (m *PreflightSchemaRequest) Reset() { *m = PreflightSchemaRequest{} func (m *PreflightSchemaRequest) String() string { return proto.CompactTextString(m) } func (*PreflightSchemaRequest) ProtoMessage() {} func (*PreflightSchemaRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{30} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{30} } func (m *PreflightSchemaRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PreflightSchemaRequest.Unmarshal(m, b) @@ -1305,7 +1305,7 @@ func (m *PreflightSchemaResponse) Reset() { *m = PreflightSchemaResponse func (m *PreflightSchemaResponse) String() string { return proto.CompactTextString(m) } func (*PreflightSchemaResponse) ProtoMessage() {} func (*PreflightSchemaResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{31} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{31} } func (m *PreflightSchemaResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PreflightSchemaResponse.Unmarshal(m, b) @@ -1347,7 +1347,7 @@ func (m *ApplySchemaRequest) Reset() { *m = ApplySchemaRequest{} } func (m *ApplySchemaRequest) String() string { return proto.CompactTextString(m) } func (*ApplySchemaRequest) ProtoMessage() {} func (*ApplySchemaRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{32} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{32} } func (m *ApplySchemaRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ApplySchemaRequest.Unmarshal(m, b) @@ -1414,7 +1414,7 @@ func (m *ApplySchemaResponse) Reset() { *m = ApplySchemaResponse{} } func (m *ApplySchemaResponse) String() string { return proto.CompactTextString(m) } func (*ApplySchemaResponse) ProtoMessage() {} func (*ApplySchemaResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{33} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{33} } func (m *ApplySchemaResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ApplySchemaResponse.Unmarshal(m, b) @@ -1448,6 +1448,126 @@ func (m *ApplySchemaResponse) GetAfterSchema() *SchemaDefinition { return nil } +type LockTablesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LockTablesRequest) Reset() { *m = LockTablesRequest{} } +func (m *LockTablesRequest) String() string { return proto.CompactTextString(m) } +func (*LockTablesRequest) ProtoMessage() {} +func (*LockTablesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{34} +} +func (m *LockTablesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LockTablesRequest.Unmarshal(m, b) +} +func (m *LockTablesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LockTablesRequest.Marshal(b, m, deterministic) +} +func (dst *LockTablesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LockTablesRequest.Merge(dst, src) +} +func (m *LockTablesRequest) XXX_Size() int { + return xxx_messageInfo_LockTablesRequest.Size(m) +} +func (m *LockTablesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LockTablesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LockTablesRequest proto.InternalMessageInfo + +type LockTablesResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LockTablesResponse) Reset() { *m = LockTablesResponse{} } +func (m *LockTablesResponse) String() string { return proto.CompactTextString(m) } +func (*LockTablesResponse) ProtoMessage() {} +func (*LockTablesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{35} +} +func (m *LockTablesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LockTablesResponse.Unmarshal(m, b) +} +func (m *LockTablesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LockTablesResponse.Marshal(b, m, deterministic) +} +func (dst *LockTablesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LockTablesResponse.Merge(dst, src) +} +func (m *LockTablesResponse) XXX_Size() int { + return xxx_messageInfo_LockTablesResponse.Size(m) +} +func (m *LockTablesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LockTablesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LockTablesResponse proto.InternalMessageInfo + +type UnlockTablesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UnlockTablesRequest) Reset() { *m = UnlockTablesRequest{} } +func (m *UnlockTablesRequest) String() string { return proto.CompactTextString(m) } +func (*UnlockTablesRequest) ProtoMessage() {} +func (*UnlockTablesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{36} +} +func (m *UnlockTablesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UnlockTablesRequest.Unmarshal(m, b) +} +func (m *UnlockTablesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UnlockTablesRequest.Marshal(b, m, deterministic) +} +func (dst *UnlockTablesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UnlockTablesRequest.Merge(dst, src) +} +func (m *UnlockTablesRequest) XXX_Size() int { + return xxx_messageInfo_UnlockTablesRequest.Size(m) +} +func (m *UnlockTablesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UnlockTablesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UnlockTablesRequest proto.InternalMessageInfo + +type UnlockTablesResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UnlockTablesResponse) Reset() { *m = UnlockTablesResponse{} } +func (m *UnlockTablesResponse) String() string { return proto.CompactTextString(m) } +func (*UnlockTablesResponse) ProtoMessage() {} +func (*UnlockTablesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{37} +} +func (m *UnlockTablesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UnlockTablesResponse.Unmarshal(m, b) +} +func (m *UnlockTablesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UnlockTablesResponse.Marshal(b, m, deterministic) +} +func (dst *UnlockTablesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UnlockTablesResponse.Merge(dst, src) +} +func (m *UnlockTablesResponse) XXX_Size() int { + return xxx_messageInfo_UnlockTablesResponse.Size(m) +} +func (m *UnlockTablesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UnlockTablesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UnlockTablesResponse proto.InternalMessageInfo + type ExecuteFetchAsDbaRequest struct { Query []byte `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` DbName string `protobuf:"bytes,2,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"` @@ -1463,7 +1583,7 @@ func (m *ExecuteFetchAsDbaRequest) Reset() { *m = ExecuteFetchAsDbaReque func (m *ExecuteFetchAsDbaRequest) String() string { return proto.CompactTextString(m) } func (*ExecuteFetchAsDbaRequest) ProtoMessage() {} func (*ExecuteFetchAsDbaRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{34} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{38} } func (m *ExecuteFetchAsDbaRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteFetchAsDbaRequest.Unmarshal(m, b) @@ -1529,7 +1649,7 @@ func (m *ExecuteFetchAsDbaResponse) Reset() { *m = ExecuteFetchAsDbaResp func (m *ExecuteFetchAsDbaResponse) String() string { return proto.CompactTextString(m) } func (*ExecuteFetchAsDbaResponse) ProtoMessage() {} func (*ExecuteFetchAsDbaResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{35} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{39} } func (m *ExecuteFetchAsDbaResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteFetchAsDbaResponse.Unmarshal(m, b) @@ -1570,7 +1690,7 @@ func (m *ExecuteFetchAsAllPrivsRequest) Reset() { *m = ExecuteFetchAsAll func (m *ExecuteFetchAsAllPrivsRequest) String() string { return proto.CompactTextString(m) } func (*ExecuteFetchAsAllPrivsRequest) ProtoMessage() {} func (*ExecuteFetchAsAllPrivsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{36} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{40} } func (m *ExecuteFetchAsAllPrivsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteFetchAsAllPrivsRequest.Unmarshal(m, b) @@ -1629,7 +1749,7 @@ func (m *ExecuteFetchAsAllPrivsResponse) Reset() { *m = ExecuteFetchAsAl func (m *ExecuteFetchAsAllPrivsResponse) String() string { return proto.CompactTextString(m) } func (*ExecuteFetchAsAllPrivsResponse) ProtoMessage() {} func (*ExecuteFetchAsAllPrivsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{37} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{41} } func (m *ExecuteFetchAsAllPrivsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteFetchAsAllPrivsResponse.Unmarshal(m, b) @@ -1668,7 +1788,7 @@ func (m *ExecuteFetchAsAppRequest) Reset() { *m = ExecuteFetchAsAppReque func (m *ExecuteFetchAsAppRequest) String() string { return proto.CompactTextString(m) } func (*ExecuteFetchAsAppRequest) ProtoMessage() {} func (*ExecuteFetchAsAppRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{38} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{42} } func (m *ExecuteFetchAsAppRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteFetchAsAppRequest.Unmarshal(m, b) @@ -1713,7 +1833,7 @@ func (m *ExecuteFetchAsAppResponse) Reset() { *m = ExecuteFetchAsAppResp func (m *ExecuteFetchAsAppResponse) String() string { return proto.CompactTextString(m) } func (*ExecuteFetchAsAppResponse) ProtoMessage() {} func (*ExecuteFetchAsAppResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{39} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{43} } func (m *ExecuteFetchAsAppResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteFetchAsAppResponse.Unmarshal(m, b) @@ -1750,7 +1870,7 @@ func (m *SlaveStatusRequest) Reset() { *m = SlaveStatusRequest{} } func (m *SlaveStatusRequest) String() string { return proto.CompactTextString(m) } func (*SlaveStatusRequest) ProtoMessage() {} func (*SlaveStatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{40} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{44} } func (m *SlaveStatusRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SlaveStatusRequest.Unmarshal(m, b) @@ -1781,7 +1901,7 @@ func (m *SlaveStatusResponse) Reset() { *m = SlaveStatusResponse{} } func (m *SlaveStatusResponse) String() string { return proto.CompactTextString(m) } func (*SlaveStatusResponse) ProtoMessage() {} func (*SlaveStatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{41} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{45} } func (m *SlaveStatusResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SlaveStatusResponse.Unmarshal(m, b) @@ -1818,7 +1938,7 @@ func (m *MasterPositionRequest) Reset() { *m = MasterPositionRequest{} } func (m *MasterPositionRequest) String() string { return proto.CompactTextString(m) } func (*MasterPositionRequest) ProtoMessage() {} func (*MasterPositionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{42} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{46} } func (m *MasterPositionRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MasterPositionRequest.Unmarshal(m, b) @@ -1849,7 +1969,7 @@ func (m *MasterPositionResponse) Reset() { *m = MasterPositionResponse{} func (m *MasterPositionResponse) String() string { return proto.CompactTextString(m) } func (*MasterPositionResponse) ProtoMessage() {} func (*MasterPositionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{43} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{47} } func (m *MasterPositionResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MasterPositionResponse.Unmarshal(m, b) @@ -1886,7 +2006,7 @@ func (m *StopSlaveRequest) Reset() { *m = StopSlaveRequest{} } func (m *StopSlaveRequest) String() string { return proto.CompactTextString(m) } func (*StopSlaveRequest) ProtoMessage() {} func (*StopSlaveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{44} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{48} } func (m *StopSlaveRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StopSlaveRequest.Unmarshal(m, b) @@ -1916,7 +2036,7 @@ func (m *StopSlaveResponse) Reset() { *m = StopSlaveResponse{} } func (m *StopSlaveResponse) String() string { return proto.CompactTextString(m) } func (*StopSlaveResponse) ProtoMessage() {} func (*StopSlaveResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{45} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{49} } func (m *StopSlaveResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StopSlaveResponse.Unmarshal(m, b) @@ -1948,7 +2068,7 @@ func (m *StopSlaveMinimumRequest) Reset() { *m = StopSlaveMinimumRequest func (m *StopSlaveMinimumRequest) String() string { return proto.CompactTextString(m) } func (*StopSlaveMinimumRequest) ProtoMessage() {} func (*StopSlaveMinimumRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{46} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{50} } func (m *StopSlaveMinimumRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StopSlaveMinimumRequest.Unmarshal(m, b) @@ -1993,7 +2113,7 @@ func (m *StopSlaveMinimumResponse) Reset() { *m = StopSlaveMinimumRespon func (m *StopSlaveMinimumResponse) String() string { return proto.CompactTextString(m) } func (*StopSlaveMinimumResponse) ProtoMessage() {} func (*StopSlaveMinimumResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{47} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{51} } func (m *StopSlaveMinimumResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StopSlaveMinimumResponse.Unmarshal(m, b) @@ -2030,7 +2150,7 @@ func (m *StartSlaveRequest) Reset() { *m = StartSlaveRequest{} } func (m *StartSlaveRequest) String() string { return proto.CompactTextString(m) } func (*StartSlaveRequest) ProtoMessage() {} func (*StartSlaveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{48} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{52} } func (m *StartSlaveRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StartSlaveRequest.Unmarshal(m, b) @@ -2060,7 +2180,7 @@ func (m *StartSlaveResponse) Reset() { *m = StartSlaveResponse{} } func (m *StartSlaveResponse) String() string { return proto.CompactTextString(m) } func (*StartSlaveResponse) ProtoMessage() {} func (*StartSlaveResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{49} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{53} } func (m *StartSlaveResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StartSlaveResponse.Unmarshal(m, b) @@ -2080,6 +2200,82 @@ func (m *StartSlaveResponse) XXX_DiscardUnknown() { var xxx_messageInfo_StartSlaveResponse proto.InternalMessageInfo +type StartSlaveUntilAfterRequest struct { + Position string `protobuf:"bytes,1,opt,name=position,proto3" json:"position,omitempty"` + WaitTimeout int64 `protobuf:"varint,2,opt,name=wait_timeout,json=waitTimeout,proto3" json:"wait_timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartSlaveUntilAfterRequest) Reset() { *m = StartSlaveUntilAfterRequest{} } +func (m *StartSlaveUntilAfterRequest) String() string { return proto.CompactTextString(m) } +func (*StartSlaveUntilAfterRequest) ProtoMessage() {} +func (*StartSlaveUntilAfterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{54} +} +func (m *StartSlaveUntilAfterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartSlaveUntilAfterRequest.Unmarshal(m, b) +} +func (m *StartSlaveUntilAfterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartSlaveUntilAfterRequest.Marshal(b, m, deterministic) +} +func (dst *StartSlaveUntilAfterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartSlaveUntilAfterRequest.Merge(dst, src) +} +func (m *StartSlaveUntilAfterRequest) XXX_Size() int { + return xxx_messageInfo_StartSlaveUntilAfterRequest.Size(m) +} +func (m *StartSlaveUntilAfterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartSlaveUntilAfterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartSlaveUntilAfterRequest proto.InternalMessageInfo + +func (m *StartSlaveUntilAfterRequest) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +func (m *StartSlaveUntilAfterRequest) GetWaitTimeout() int64 { + if m != nil { + return m.WaitTimeout + } + return 0 +} + +type StartSlaveUntilAfterResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartSlaveUntilAfterResponse) Reset() { *m = StartSlaveUntilAfterResponse{} } +func (m *StartSlaveUntilAfterResponse) String() string { return proto.CompactTextString(m) } +func (*StartSlaveUntilAfterResponse) ProtoMessage() {} +func (*StartSlaveUntilAfterResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{55} +} +func (m *StartSlaveUntilAfterResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartSlaveUntilAfterResponse.Unmarshal(m, b) +} +func (m *StartSlaveUntilAfterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartSlaveUntilAfterResponse.Marshal(b, m, deterministic) +} +func (dst *StartSlaveUntilAfterResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartSlaveUntilAfterResponse.Merge(dst, src) +} +func (m *StartSlaveUntilAfterResponse) XXX_Size() int { + return xxx_messageInfo_StartSlaveUntilAfterResponse.Size(m) +} +func (m *StartSlaveUntilAfterResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StartSlaveUntilAfterResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StartSlaveUntilAfterResponse proto.InternalMessageInfo + type TabletExternallyReparentedRequest struct { // external_id is an string value that may be provided by an external // agent for tracking purposes. The tablet will emit this string in @@ -2094,7 +2290,7 @@ func (m *TabletExternallyReparentedRequest) Reset() { *m = TabletExterna func (m *TabletExternallyReparentedRequest) String() string { return proto.CompactTextString(m) } func (*TabletExternallyReparentedRequest) ProtoMessage() {} func (*TabletExternallyReparentedRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{50} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{56} } func (m *TabletExternallyReparentedRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_TabletExternallyReparentedRequest.Unmarshal(m, b) @@ -2131,7 +2327,7 @@ func (m *TabletExternallyReparentedResponse) Reset() { *m = TabletExtern func (m *TabletExternallyReparentedResponse) String() string { return proto.CompactTextString(m) } func (*TabletExternallyReparentedResponse) ProtoMessage() {} func (*TabletExternallyReparentedResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{51} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{57} } func (m *TabletExternallyReparentedResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_TabletExternallyReparentedResponse.Unmarshal(m, b) @@ -2161,7 +2357,7 @@ func (m *TabletExternallyElectedRequest) Reset() { *m = TabletExternally func (m *TabletExternallyElectedRequest) String() string { return proto.CompactTextString(m) } func (*TabletExternallyElectedRequest) ProtoMessage() {} func (*TabletExternallyElectedRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{52} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{58} } func (m *TabletExternallyElectedRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_TabletExternallyElectedRequest.Unmarshal(m, b) @@ -2191,7 +2387,7 @@ func (m *TabletExternallyElectedResponse) Reset() { *m = TabletExternall func (m *TabletExternallyElectedResponse) String() string { return proto.CompactTextString(m) } func (*TabletExternallyElectedResponse) ProtoMessage() {} func (*TabletExternallyElectedResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{53} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{59} } func (m *TabletExternallyElectedResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_TabletExternallyElectedResponse.Unmarshal(m, b) @@ -2221,7 +2417,7 @@ func (m *GetSlavesRequest) Reset() { *m = GetSlavesRequest{} } func (m *GetSlavesRequest) String() string { return proto.CompactTextString(m) } func (*GetSlavesRequest) ProtoMessage() {} func (*GetSlavesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{54} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{60} } func (m *GetSlavesRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetSlavesRequest.Unmarshal(m, b) @@ -2252,7 +2448,7 @@ func (m *GetSlavesResponse) Reset() { *m = GetSlavesResponse{} } func (m *GetSlavesResponse) String() string { return proto.CompactTextString(m) } func (*GetSlavesResponse) ProtoMessage() {} func (*GetSlavesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{55} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{61} } func (m *GetSlavesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetSlavesResponse.Unmarshal(m, b) @@ -2289,7 +2485,7 @@ func (m *ResetReplicationRequest) Reset() { *m = ResetReplicationRequest func (m *ResetReplicationRequest) String() string { return proto.CompactTextString(m) } func (*ResetReplicationRequest) ProtoMessage() {} func (*ResetReplicationRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{56} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{62} } func (m *ResetReplicationRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ResetReplicationRequest.Unmarshal(m, b) @@ -2319,7 +2515,7 @@ func (m *ResetReplicationResponse) Reset() { *m = ResetReplicationRespon func (m *ResetReplicationResponse) String() string { return proto.CompactTextString(m) } func (*ResetReplicationResponse) ProtoMessage() {} func (*ResetReplicationResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{57} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{63} } func (m *ResetReplicationResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ResetReplicationResponse.Unmarshal(m, b) @@ -2350,7 +2546,7 @@ func (m *VReplicationExecRequest) Reset() { *m = VReplicationExecRequest func (m *VReplicationExecRequest) String() string { return proto.CompactTextString(m) } func (*VReplicationExecRequest) ProtoMessage() {} func (*VReplicationExecRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{58} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{64} } func (m *VReplicationExecRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VReplicationExecRequest.Unmarshal(m, b) @@ -2388,7 +2584,7 @@ func (m *VReplicationExecResponse) Reset() { *m = VReplicationExecRespon func (m *VReplicationExecResponse) String() string { return proto.CompactTextString(m) } func (*VReplicationExecResponse) ProtoMessage() {} func (*VReplicationExecResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{59} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{65} } func (m *VReplicationExecResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VReplicationExecResponse.Unmarshal(m, b) @@ -2427,7 +2623,7 @@ func (m *VReplicationWaitForPosRequest) Reset() { *m = VReplicationWaitF func (m *VReplicationWaitForPosRequest) String() string { return proto.CompactTextString(m) } func (*VReplicationWaitForPosRequest) ProtoMessage() {} func (*VReplicationWaitForPosRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{60} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{66} } func (m *VReplicationWaitForPosRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VReplicationWaitForPosRequest.Unmarshal(m, b) @@ -2471,7 +2667,7 @@ func (m *VReplicationWaitForPosResponse) Reset() { *m = VReplicationWait func (m *VReplicationWaitForPosResponse) String() string { return proto.CompactTextString(m) } func (*VReplicationWaitForPosResponse) ProtoMessage() {} func (*VReplicationWaitForPosResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{61} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{67} } func (m *VReplicationWaitForPosResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VReplicationWaitForPosResponse.Unmarshal(m, b) @@ -2501,7 +2697,7 @@ func (m *InitMasterRequest) Reset() { *m = InitMasterRequest{} } func (m *InitMasterRequest) String() string { return proto.CompactTextString(m) } func (*InitMasterRequest) ProtoMessage() {} func (*InitMasterRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{62} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{68} } func (m *InitMasterRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_InitMasterRequest.Unmarshal(m, b) @@ -2532,7 +2728,7 @@ func (m *InitMasterResponse) Reset() { *m = InitMasterResponse{} } func (m *InitMasterResponse) String() string { return proto.CompactTextString(m) } func (*InitMasterResponse) ProtoMessage() {} func (*InitMasterResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{63} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{69} } func (m *InitMasterResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_InitMasterResponse.Unmarshal(m, b) @@ -2573,7 +2769,7 @@ func (m *PopulateReparentJournalRequest) Reset() { *m = PopulateReparent func (m *PopulateReparentJournalRequest) String() string { return proto.CompactTextString(m) } func (*PopulateReparentJournalRequest) ProtoMessage() {} func (*PopulateReparentJournalRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{64} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{70} } func (m *PopulateReparentJournalRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PopulateReparentJournalRequest.Unmarshal(m, b) @@ -2631,7 +2827,7 @@ func (m *PopulateReparentJournalResponse) Reset() { *m = PopulateReparen func (m *PopulateReparentJournalResponse) String() string { return proto.CompactTextString(m) } func (*PopulateReparentJournalResponse) ProtoMessage() {} func (*PopulateReparentJournalResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{65} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{71} } func (m *PopulateReparentJournalResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PopulateReparentJournalResponse.Unmarshal(m, b) @@ -2664,7 +2860,7 @@ func (m *InitSlaveRequest) Reset() { *m = InitSlaveRequest{} } func (m *InitSlaveRequest) String() string { return proto.CompactTextString(m) } func (*InitSlaveRequest) ProtoMessage() {} func (*InitSlaveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{66} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{72} } func (m *InitSlaveRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_InitSlaveRequest.Unmarshal(m, b) @@ -2715,7 +2911,7 @@ func (m *InitSlaveResponse) Reset() { *m = InitSlaveResponse{} } func (m *InitSlaveResponse) String() string { return proto.CompactTextString(m) } func (*InitSlaveResponse) ProtoMessage() {} func (*InitSlaveResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{67} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{73} } func (m *InitSlaveResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_InitSlaveResponse.Unmarshal(m, b) @@ -2745,7 +2941,7 @@ func (m *DemoteMasterRequest) Reset() { *m = DemoteMasterRequest{} } func (m *DemoteMasterRequest) String() string { return proto.CompactTextString(m) } func (*DemoteMasterRequest) ProtoMessage() {} func (*DemoteMasterRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{68} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{74} } func (m *DemoteMasterRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DemoteMasterRequest.Unmarshal(m, b) @@ -2776,7 +2972,7 @@ func (m *DemoteMasterResponse) Reset() { *m = DemoteMasterResponse{} } func (m *DemoteMasterResponse) String() string { return proto.CompactTextString(m) } func (*DemoteMasterResponse) ProtoMessage() {} func (*DemoteMasterResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{69} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{75} } func (m *DemoteMasterResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DemoteMasterResponse.Unmarshal(m, b) @@ -2814,7 +3010,7 @@ func (m *PromoteSlaveWhenCaughtUpRequest) Reset() { *m = PromoteSlaveWhe func (m *PromoteSlaveWhenCaughtUpRequest) String() string { return proto.CompactTextString(m) } func (*PromoteSlaveWhenCaughtUpRequest) ProtoMessage() {} func (*PromoteSlaveWhenCaughtUpRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{70} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{76} } func (m *PromoteSlaveWhenCaughtUpRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PromoteSlaveWhenCaughtUpRequest.Unmarshal(m, b) @@ -2852,7 +3048,7 @@ func (m *PromoteSlaveWhenCaughtUpResponse) Reset() { *m = PromoteSlaveWh func (m *PromoteSlaveWhenCaughtUpResponse) String() string { return proto.CompactTextString(m) } func (*PromoteSlaveWhenCaughtUpResponse) ProtoMessage() {} func (*PromoteSlaveWhenCaughtUpResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{71} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{77} } func (m *PromoteSlaveWhenCaughtUpResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PromoteSlaveWhenCaughtUpResponse.Unmarshal(m, b) @@ -2889,7 +3085,7 @@ func (m *SlaveWasPromotedRequest) Reset() { *m = SlaveWasPromotedRequest func (m *SlaveWasPromotedRequest) String() string { return proto.CompactTextString(m) } func (*SlaveWasPromotedRequest) ProtoMessage() {} func (*SlaveWasPromotedRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{72} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{78} } func (m *SlaveWasPromotedRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SlaveWasPromotedRequest.Unmarshal(m, b) @@ -2919,7 +3115,7 @@ func (m *SlaveWasPromotedResponse) Reset() { *m = SlaveWasPromotedRespon func (m *SlaveWasPromotedResponse) String() string { return proto.CompactTextString(m) } func (*SlaveWasPromotedResponse) ProtoMessage() {} func (*SlaveWasPromotedResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{73} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{79} } func (m *SlaveWasPromotedResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SlaveWasPromotedResponse.Unmarshal(m, b) @@ -2952,7 +3148,7 @@ func (m *SetMasterRequest) Reset() { *m = SetMasterRequest{} } func (m *SetMasterRequest) String() string { return proto.CompactTextString(m) } func (*SetMasterRequest) ProtoMessage() {} func (*SetMasterRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{74} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{80} } func (m *SetMasterRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SetMasterRequest.Unmarshal(m, b) @@ -3003,7 +3199,7 @@ func (m *SetMasterResponse) Reset() { *m = SetMasterResponse{} } func (m *SetMasterResponse) String() string { return proto.CompactTextString(m) } func (*SetMasterResponse) ProtoMessage() {} func (*SetMasterResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{75} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{81} } func (m *SetMasterResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SetMasterResponse.Unmarshal(m, b) @@ -3035,7 +3231,7 @@ func (m *SlaveWasRestartedRequest) Reset() { *m = SlaveWasRestartedReque func (m *SlaveWasRestartedRequest) String() string { return proto.CompactTextString(m) } func (*SlaveWasRestartedRequest) ProtoMessage() {} func (*SlaveWasRestartedRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{76} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{82} } func (m *SlaveWasRestartedRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SlaveWasRestartedRequest.Unmarshal(m, b) @@ -3072,7 +3268,7 @@ func (m *SlaveWasRestartedResponse) Reset() { *m = SlaveWasRestartedResp func (m *SlaveWasRestartedResponse) String() string { return proto.CompactTextString(m) } func (*SlaveWasRestartedResponse) ProtoMessage() {} func (*SlaveWasRestartedResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{77} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{83} } func (m *SlaveWasRestartedResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SlaveWasRestartedResponse.Unmarshal(m, b) @@ -3102,7 +3298,7 @@ func (m *StopReplicationAndGetStatusRequest) Reset() { *m = StopReplicat func (m *StopReplicationAndGetStatusRequest) String() string { return proto.CompactTextString(m) } func (*StopReplicationAndGetStatusRequest) ProtoMessage() {} func (*StopReplicationAndGetStatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{78} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{84} } func (m *StopReplicationAndGetStatusRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StopReplicationAndGetStatusRequest.Unmarshal(m, b) @@ -3133,7 +3329,7 @@ func (m *StopReplicationAndGetStatusResponse) Reset() { *m = StopReplica func (m *StopReplicationAndGetStatusResponse) String() string { return proto.CompactTextString(m) } func (*StopReplicationAndGetStatusResponse) ProtoMessage() {} func (*StopReplicationAndGetStatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{79} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{85} } func (m *StopReplicationAndGetStatusResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StopReplicationAndGetStatusResponse.Unmarshal(m, b) @@ -3170,7 +3366,7 @@ func (m *PromoteSlaveRequest) Reset() { *m = PromoteSlaveRequest{} } func (m *PromoteSlaveRequest) String() string { return proto.CompactTextString(m) } func (*PromoteSlaveRequest) ProtoMessage() {} func (*PromoteSlaveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{80} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{86} } func (m *PromoteSlaveRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PromoteSlaveRequest.Unmarshal(m, b) @@ -3201,7 +3397,7 @@ func (m *PromoteSlaveResponse) Reset() { *m = PromoteSlaveResponse{} } func (m *PromoteSlaveResponse) String() string { return proto.CompactTextString(m) } func (*PromoteSlaveResponse) ProtoMessage() {} func (*PromoteSlaveResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{81} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{87} } func (m *PromoteSlaveResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PromoteSlaveResponse.Unmarshal(m, b) @@ -3239,7 +3435,7 @@ func (m *BackupRequest) Reset() { *m = BackupRequest{} } func (m *BackupRequest) String() string { return proto.CompactTextString(m) } func (*BackupRequest) ProtoMessage() {} func (*BackupRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{82} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{88} } func (m *BackupRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BackupRequest.Unmarshal(m, b) @@ -3277,7 +3473,7 @@ func (m *BackupResponse) Reset() { *m = BackupResponse{} } func (m *BackupResponse) String() string { return proto.CompactTextString(m) } func (*BackupResponse) ProtoMessage() {} func (*BackupResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{83} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{89} } func (m *BackupResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BackupResponse.Unmarshal(m, b) @@ -3314,7 +3510,7 @@ func (m *RestoreFromBackupRequest) Reset() { *m = RestoreFromBackupReque func (m *RestoreFromBackupRequest) String() string { return proto.CompactTextString(m) } func (*RestoreFromBackupRequest) ProtoMessage() {} func (*RestoreFromBackupRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{84} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{90} } func (m *RestoreFromBackupRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RestoreFromBackupRequest.Unmarshal(m, b) @@ -3345,7 +3541,7 @@ func (m *RestoreFromBackupResponse) Reset() { *m = RestoreFromBackupResp func (m *RestoreFromBackupResponse) String() string { return proto.CompactTextString(m) } func (*RestoreFromBackupResponse) ProtoMessage() {} func (*RestoreFromBackupResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f, []int{85} + return fileDescriptor_tabletmanagerdata_9e0123608316bc1a, []int{91} } func (m *RestoreFromBackupResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RestoreFromBackupResponse.Unmarshal(m, b) @@ -3410,6 +3606,10 @@ func init() { proto.RegisterType((*PreflightSchemaResponse)(nil), "tabletmanagerdata.PreflightSchemaResponse") proto.RegisterType((*ApplySchemaRequest)(nil), "tabletmanagerdata.ApplySchemaRequest") proto.RegisterType((*ApplySchemaResponse)(nil), "tabletmanagerdata.ApplySchemaResponse") + proto.RegisterType((*LockTablesRequest)(nil), "tabletmanagerdata.LockTablesRequest") + proto.RegisterType((*LockTablesResponse)(nil), "tabletmanagerdata.LockTablesResponse") + proto.RegisterType((*UnlockTablesRequest)(nil), "tabletmanagerdata.UnlockTablesRequest") + proto.RegisterType((*UnlockTablesResponse)(nil), "tabletmanagerdata.UnlockTablesResponse") proto.RegisterType((*ExecuteFetchAsDbaRequest)(nil), "tabletmanagerdata.ExecuteFetchAsDbaRequest") proto.RegisterType((*ExecuteFetchAsDbaResponse)(nil), "tabletmanagerdata.ExecuteFetchAsDbaResponse") proto.RegisterType((*ExecuteFetchAsAllPrivsRequest)(nil), "tabletmanagerdata.ExecuteFetchAsAllPrivsRequest") @@ -3426,6 +3626,8 @@ func init() { proto.RegisterType((*StopSlaveMinimumResponse)(nil), "tabletmanagerdata.StopSlaveMinimumResponse") proto.RegisterType((*StartSlaveRequest)(nil), "tabletmanagerdata.StartSlaveRequest") proto.RegisterType((*StartSlaveResponse)(nil), "tabletmanagerdata.StartSlaveResponse") + proto.RegisterType((*StartSlaveUntilAfterRequest)(nil), "tabletmanagerdata.StartSlaveUntilAfterRequest") + proto.RegisterType((*StartSlaveUntilAfterResponse)(nil), "tabletmanagerdata.StartSlaveUntilAfterResponse") proto.RegisterType((*TabletExternallyReparentedRequest)(nil), "tabletmanagerdata.TabletExternallyReparentedRequest") proto.RegisterType((*TabletExternallyReparentedResponse)(nil), "tabletmanagerdata.TabletExternallyReparentedResponse") proto.RegisterType((*TabletExternallyElectedRequest)(nil), "tabletmanagerdata.TabletExternallyElectedRequest") @@ -3465,134 +3667,138 @@ func init() { } func init() { - proto.RegisterFile("tabletmanagerdata.proto", fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f) + proto.RegisterFile("tabletmanagerdata.proto", fileDescriptor_tabletmanagerdata_9e0123608316bc1a) } -var fileDescriptor_tabletmanagerdata_7a10c2dfddcc994f = []byte{ - // 1999 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x5b, 0x6f, 0x1b, 0xc7, - 0x15, 0x06, 0x49, 0x49, 0x96, 0x0e, 0x2f, 0xa2, 0x96, 0x92, 0x48, 0x29, 0x88, 0x24, 0xaf, 0x9d, - 0x46, 0x75, 0x51, 0x2a, 0x56, 0xd2, 0x20, 0x48, 0x91, 0xa2, 0xb2, 0x2e, 0xb6, 0x13, 0x27, 0x66, - 0x56, 0x8e, 0x5d, 0xe4, 0x65, 0x31, 0xe4, 0x1e, 0x91, 0x0b, 0x2d, 0x77, 0xd6, 0x33, 0xb3, 0x94, - 0xf8, 0x27, 0xfa, 0xd6, 0xb7, 0xbe, 0x15, 0x68, 0xdf, 0xfb, 0x63, 0x52, 0xf4, 0x97, 0xf4, 0xa1, - 0x2f, 0xc5, 0x5c, 0x96, 0xdc, 0xe5, 0xc5, 0x96, 0x84, 0x14, 0xc8, 0x8b, 0xc0, 0xf3, 0x9d, 0xfb, - 0x99, 0x33, 0x67, 0xce, 0x0a, 0xea, 0x82, 0xb4, 0x03, 0x14, 0x7d, 0x12, 0x92, 0x2e, 0x32, 0x8f, - 0x08, 0xd2, 0x8c, 0x18, 0x15, 0xd4, 0x5a, 0x9b, 0x62, 0x6c, 0x17, 0xdf, 0xc6, 0xc8, 0x86, 0x9a, - 0xbf, 0x5d, 0x11, 0x34, 0xa2, 0x63, 0xf9, 0xed, 0x0d, 0x86, 0x51, 0xe0, 0x77, 0x88, 0xf0, 0x69, - 0x98, 0x82, 0xcb, 0x01, 0xed, 0xc6, 0xc2, 0x0f, 0x34, 0x69, 0xff, 0x3b, 0x07, 0xab, 0xaf, 0xa4, - 0xe1, 0x13, 0xbc, 0xf0, 0x43, 0x5f, 0x0a, 0x5b, 0x16, 0x2c, 0x84, 0xa4, 0x8f, 0x8d, 0xdc, 0x5e, - 0x6e, 0x7f, 0xc5, 0x51, 0xbf, 0xad, 0x4d, 0x58, 0xe2, 0x9d, 0x1e, 0xf6, 0x49, 0x23, 0xaf, 0x50, - 0x43, 0x59, 0x0d, 0xb8, 0xd7, 0xa1, 0x41, 0xdc, 0x0f, 0x79, 0xa3, 0xb0, 0x57, 0xd8, 0x5f, 0x71, - 0x12, 0xd2, 0x6a, 0x42, 0x2d, 0x62, 0x7e, 0x9f, 0xb0, 0xa1, 0x7b, 0x89, 0x43, 0x37, 0x91, 0x5a, - 0x50, 0x52, 0x6b, 0x86, 0xf5, 0x0d, 0x0e, 0x8f, 0x8d, 0xbc, 0x05, 0x0b, 0x62, 0x18, 0x61, 0x63, - 0x51, 0x7b, 0x95, 0xbf, 0xad, 0x5d, 0x28, 0xca, 0xd0, 0xdd, 0x00, 0xc3, 0xae, 0xe8, 0x35, 0x96, - 0xf6, 0x72, 0xfb, 0x0b, 0x0e, 0x48, 0xe8, 0x85, 0x42, 0xac, 0x0f, 0x60, 0x85, 0xd1, 0x2b, 0xb7, - 0x43, 0xe3, 0x50, 0x34, 0xee, 0x29, 0xf6, 0x32, 0xa3, 0x57, 0xc7, 0x92, 0xb6, 0xff, 0x9e, 0x83, - 0xea, 0xb9, 0x0a, 0x33, 0x95, 0xdc, 0xc7, 0xb0, 0x2a, 0xf5, 0xdb, 0x84, 0xa3, 0x6b, 0x32, 0xd2, - 0x79, 0x56, 0x12, 0x58, 0xab, 0x58, 0x2f, 0x41, 0x57, 0xdc, 0xf5, 0x46, 0xca, 0xbc, 0x91, 0xdf, - 0x2b, 0xec, 0x17, 0x0f, 0xed, 0xe6, 0xf4, 0x21, 0x4d, 0x14, 0xd1, 0xa9, 0x8a, 0x2c, 0xc0, 0x65, - 0xa9, 0x06, 0xc8, 0xb8, 0x4f, 0xc3, 0x46, 0x41, 0x79, 0x4c, 0x48, 0x19, 0xa8, 0xa5, 0xbd, 0x1e, - 0xf7, 0x48, 0xd8, 0x45, 0x07, 0x79, 0x1c, 0x08, 0xeb, 0x19, 0x94, 0xdb, 0x78, 0x41, 0x59, 0x26, - 0xd0, 0xe2, 0xe1, 0x83, 0x19, 0xde, 0x27, 0xd3, 0x74, 0x4a, 0x5a, 0xd3, 0xe4, 0x72, 0x06, 0x25, - 0x72, 0x21, 0x90, 0xb9, 0xa9, 0x33, 0xbc, 0xa1, 0xa1, 0xa2, 0x52, 0xd4, 0xb0, 0xfd, 0x9f, 0x1c, - 0x54, 0x7e, 0xe0, 0xc8, 0x5a, 0xc8, 0xfa, 0x3e, 0xe7, 0xa6, 0x59, 0x7a, 0x94, 0x8b, 0xa4, 0x59, - 0xe4, 0x6f, 0x89, 0xc5, 0x1c, 0x99, 0x69, 0x15, 0xf5, 0xdb, 0xfa, 0x0d, 0xac, 0x45, 0x84, 0xf3, - 0x2b, 0xca, 0x3c, 0xb7, 0xd3, 0xc3, 0xce, 0x25, 0x8f, 0xfb, 0xaa, 0x0e, 0x0b, 0x4e, 0x35, 0x61, - 0x1c, 0x1b, 0xdc, 0xfa, 0x1e, 0x20, 0x62, 0xfe, 0xc0, 0x0f, 0xb0, 0x8b, 0xba, 0x65, 0x8a, 0x87, - 0x8f, 0x67, 0x44, 0x9b, 0x8d, 0xa5, 0xd9, 0x1a, 0xe9, 0x9c, 0x86, 0x82, 0x0d, 0x9d, 0x94, 0x91, - 0xed, 0xaf, 0x60, 0x75, 0x82, 0x6d, 0x55, 0xa1, 0x70, 0x89, 0x43, 0x13, 0xb9, 0xfc, 0x69, 0xad, - 0xc3, 0xe2, 0x80, 0x04, 0x31, 0x9a, 0xc8, 0x35, 0xf1, 0x65, 0xfe, 0x8b, 0x9c, 0xfd, 0x53, 0x0e, - 0x4a, 0x27, 0xed, 0xf7, 0xe4, 0x5d, 0x81, 0xbc, 0xd7, 0x36, 0xba, 0x79, 0xaf, 0x3d, 0xaa, 0x43, - 0x21, 0x55, 0x87, 0x97, 0x33, 0x52, 0x3b, 0x98, 0x91, 0x5a, 0xda, 0xd9, 0xff, 0x33, 0xb1, 0xbf, - 0xe5, 0xa0, 0x38, 0xf6, 0xc4, 0xad, 0x17, 0x50, 0x95, 0x71, 0xba, 0xd1, 0x18, 0x6b, 0xe4, 0x54, - 0x94, 0xf7, 0xdf, 0x7b, 0x00, 0xce, 0x6a, 0x9c, 0xa1, 0xb9, 0x75, 0x06, 0x15, 0xaf, 0x9d, 0xb1, - 0xa5, 0x6f, 0xd0, 0xee, 0x7b, 0x32, 0x76, 0xca, 0x5e, 0x8a, 0xe2, 0xf6, 0xc7, 0x50, 0x6c, 0xf9, - 0x61, 0xd7, 0xc1, 0xb7, 0x31, 0x72, 0x21, 0xaf, 0x52, 0x44, 0x86, 0x01, 0x25, 0x9e, 0x49, 0x32, - 0x21, 0xed, 0x7d, 0x28, 0x69, 0x41, 0x1e, 0xd1, 0x90, 0xe3, 0x3b, 0x24, 0x1f, 0x41, 0xe9, 0x3c, - 0x40, 0x8c, 0x12, 0x9b, 0xdb, 0xb0, 0xec, 0xc5, 0x4c, 0x8d, 0x4b, 0x25, 0x5a, 0x70, 0x46, 0xb4, - 0xbd, 0x0a, 0x65, 0x23, 0xab, 0xcd, 0xda, 0xff, 0xca, 0x81, 0x75, 0x7a, 0x8d, 0x9d, 0x58, 0xe0, - 0x33, 0x4a, 0x2f, 0x13, 0x1b, 0xb3, 0x26, 0xe7, 0x0e, 0x40, 0x44, 0x18, 0xe9, 0xa3, 0x40, 0xa6, - 0xd3, 0x5f, 0x71, 0x52, 0x88, 0xd5, 0x82, 0x15, 0xbc, 0x16, 0x8c, 0xb8, 0x18, 0x0e, 0xd4, 0x0c, - 0x2d, 0x1e, 0x7e, 0x3a, 0xa3, 0x3a, 0xd3, 0xde, 0x9a, 0xa7, 0x52, 0xed, 0x34, 0x1c, 0xe8, 0x9e, - 0x58, 0x46, 0x43, 0x6e, 0xff, 0x1e, 0xca, 0x19, 0xd6, 0xad, 0xfa, 0xe1, 0x02, 0x6a, 0x19, 0x57, - 0xa6, 0x8e, 0xbb, 0x50, 0xc4, 0x6b, 0x5f, 0xb8, 0x5c, 0x10, 0x11, 0x73, 0x53, 0x20, 0x90, 0xd0, - 0xb9, 0x42, 0xd4, 0x03, 0x21, 0x3c, 0x1a, 0x8b, 0xd1, 0x03, 0xa1, 0x28, 0x83, 0x23, 0x4b, 0x6e, - 0x81, 0xa1, 0xec, 0x01, 0x54, 0x9f, 0xa2, 0xd0, 0x73, 0x25, 0x29, 0xdf, 0x26, 0x2c, 0xa9, 0xc4, - 0x75, 0xc7, 0xad, 0x38, 0x86, 0xb2, 0x1e, 0x40, 0xd9, 0x0f, 0x3b, 0x41, 0xec, 0xa1, 0x3b, 0xf0, - 0xf1, 0x8a, 0x2b, 0x17, 0xcb, 0x4e, 0xc9, 0x80, 0xaf, 0x25, 0x66, 0x7d, 0x04, 0x15, 0xbc, 0xd6, - 0x42, 0xc6, 0x88, 0x7e, 0x90, 0xca, 0x06, 0x55, 0x03, 0x9a, 0xdb, 0x08, 0x6b, 0x29, 0xbf, 0x26, - 0xbb, 0x16, 0xac, 0xe9, 0xc9, 0x98, 0x1a, 0xf6, 0xb7, 0x99, 0xb6, 0x55, 0x3e, 0x81, 0xd8, 0x75, - 0xd8, 0x78, 0x8a, 0x22, 0xd5, 0xc2, 0x26, 0x47, 0xfb, 0x47, 0xd8, 0x9c, 0x64, 0x98, 0x20, 0xfe, - 0x08, 0xc5, 0xec, 0xa5, 0x93, 0xee, 0x77, 0x66, 0xb8, 0x4f, 0x2b, 0xa7, 0x55, 0xec, 0x75, 0xb0, - 0xce, 0x51, 0x38, 0x48, 0xbc, 0x97, 0x61, 0x30, 0x4c, 0x3c, 0x6e, 0x40, 0x2d, 0x83, 0x9a, 0x16, - 0x1e, 0xc3, 0x6f, 0x98, 0x2f, 0x30, 0x91, 0xde, 0x84, 0xf5, 0x2c, 0x6c, 0xc4, 0xbf, 0x86, 0x35, - 0xfd, 0x38, 0xbd, 0x1a, 0x46, 0x89, 0xb0, 0xf5, 0x3b, 0x28, 0xea, 0xf0, 0x5c, 0xf5, 0x74, 0xcb, - 0x90, 0x2b, 0x87, 0xeb, 0xcd, 0xd1, 0x26, 0xa2, 0x6a, 0x2e, 0x94, 0x06, 0x88, 0xd1, 0x6f, 0x19, - 0x67, 0xda, 0xd6, 0x38, 0x20, 0x07, 0x2f, 0x18, 0xf2, 0x9e, 0x6c, 0xa9, 0x74, 0x40, 0x59, 0xd8, - 0x88, 0xd7, 0x61, 0xc3, 0x89, 0xc3, 0x67, 0x48, 0x02, 0xd1, 0x53, 0x0f, 0x47, 0xa2, 0xd0, 0x80, - 0xcd, 0x49, 0x86, 0x51, 0xf9, 0x0c, 0x1a, 0xcf, 0xbb, 0x21, 0x65, 0xa8, 0x99, 0xa7, 0x8c, 0x51, - 0x96, 0x19, 0x29, 0x42, 0x20, 0x0b, 0xc7, 0x83, 0x42, 0x91, 0xf6, 0x07, 0xb0, 0x35, 0x43, 0xcb, - 0x98, 0xfc, 0x52, 0x06, 0x2d, 0xe7, 0x49, 0xb6, 0x93, 0x1f, 0x40, 0xf9, 0x8a, 0xf8, 0xc2, 0x8d, - 0x28, 0x1f, 0x37, 0xd3, 0x8a, 0x53, 0x92, 0x60, 0xcb, 0x60, 0x3a, 0xb3, 0xb4, 0xae, 0xb1, 0x79, - 0x08, 0x9b, 0x2d, 0x86, 0x17, 0x81, 0xdf, 0xed, 0x4d, 0x5c, 0x10, 0xb9, 0x6d, 0xa9, 0xc2, 0x25, - 0x37, 0x24, 0x21, 0xed, 0x2e, 0xd4, 0xa7, 0x74, 0x4c, 0x5f, 0xbd, 0x80, 0x8a, 0x96, 0x72, 0x99, - 0xda, 0x2b, 0x92, 0x79, 0xfe, 0xd1, 0xdc, 0xce, 0x4e, 0x6f, 0x21, 0x4e, 0xb9, 0x93, 0xa2, 0xb8, - 0xfd, 0xdf, 0x1c, 0x58, 0x47, 0x51, 0x14, 0x0c, 0xb3, 0x91, 0x55, 0xa1, 0xc0, 0xdf, 0x06, 0xc9, - 0x88, 0xe1, 0x6f, 0x03, 0x39, 0x62, 0x2e, 0x28, 0xeb, 0xa0, 0xb9, 0xac, 0x9a, 0x90, 0x6b, 0x00, - 0x09, 0x02, 0x7a, 0xe5, 0xa6, 0xb6, 0x53, 0x35, 0x19, 0x96, 0x9d, 0xaa, 0x62, 0x38, 0x63, 0x7c, - 0x7a, 0x01, 0x5a, 0xf8, 0xb9, 0x16, 0xa0, 0xc5, 0x3b, 0x2e, 0x40, 0xff, 0xc8, 0x41, 0x2d, 0x93, - 0xbd, 0xa9, 0xf1, 0x2f, 0x6f, 0x55, 0xfb, 0x67, 0x0e, 0x1a, 0x66, 0x90, 0x9f, 0xa1, 0xe8, 0xf4, - 0x8e, 0xf8, 0x49, 0x7b, 0x74, 0x5a, 0xeb, 0xb0, 0xa8, 0x3e, 0x1d, 0x54, 0x98, 0x25, 0x47, 0x13, - 0x56, 0x1d, 0xee, 0x79, 0x6d, 0x57, 0x3d, 0x60, 0x66, 0x86, 0x7b, 0xed, 0xef, 0xe4, 0x13, 0xb6, - 0x05, 0xcb, 0x7d, 0x72, 0xed, 0x32, 0x7a, 0xc5, 0xcd, 0xca, 0x76, 0xaf, 0x4f, 0xae, 0x1d, 0x7a, - 0xc5, 0xd5, 0x3a, 0xed, 0x73, 0xb5, 0x27, 0xb7, 0xfd, 0x30, 0xa0, 0x5d, 0xae, 0x0e, 0x69, 0xd9, - 0xa9, 0x18, 0xf8, 0x89, 0x46, 0xe5, 0x8d, 0x60, 0xaa, 0xd9, 0xd3, 0x47, 0xb0, 0xec, 0x94, 0x58, - 0xea, 0x06, 0xd8, 0x4f, 0x61, 0x6b, 0x46, 0xcc, 0xa6, 0xc6, 0x8f, 0x60, 0x49, 0x37, 0xb0, 0x29, - 0xae, 0xd5, 0xd4, 0x9f, 0x3f, 0xdf, 0xcb, 0xbf, 0xa6, 0x59, 0x8d, 0x84, 0xfd, 0xe7, 0x1c, 0x7c, - 0x98, 0xb5, 0x74, 0x14, 0x04, 0x72, 0x4d, 0xe2, 0x3f, 0x7f, 0x09, 0xa6, 0x32, 0x5b, 0x98, 0x91, - 0xd9, 0x0b, 0xd8, 0x99, 0x17, 0xcf, 0x1d, 0xd2, 0xfb, 0x66, 0xf2, 0x6c, 0x8f, 0xa2, 0xe8, 0xdd, - 0x89, 0xa5, 0xe3, 0xcf, 0x67, 0xe2, 0x9f, 0x2e, 0xba, 0x32, 0x76, 0x87, 0xa8, 0xe4, 0xf3, 0x13, - 0x90, 0x01, 0xea, 0x8d, 0x20, 0x19, 0xc7, 0x67, 0x50, 0xcb, 0xa0, 0xc6, 0xf0, 0x81, 0xdc, 0x0b, - 0x46, 0xbb, 0x44, 0xf1, 0xb0, 0xde, 0x9c, 0xfc, 0x5e, 0x35, 0x0a, 0x46, 0x4c, 0xce, 0xfb, 0x6f, - 0x09, 0x17, 0xc8, 0x92, 0xf9, 0x99, 0x38, 0xf8, 0x0c, 0x36, 0x27, 0x19, 0xc6, 0xc7, 0x36, 0x2c, - 0x4f, 0x0c, 0xe0, 0x11, 0x6d, 0x5b, 0x50, 0x3d, 0x17, 0x34, 0x52, 0xa1, 0x25, 0x96, 0x6a, 0xb0, - 0x96, 0xc2, 0xcc, 0x34, 0xfe, 0x13, 0xd4, 0x47, 0xe0, 0xb7, 0x7e, 0xe8, 0xf7, 0xe3, 0x7e, 0x6a, - 0x65, 0x9c, 0x67, 0xdf, 0xba, 0x0f, 0x6a, 0xd8, 0xbb, 0xc2, 0xef, 0x63, 0xb2, 0x15, 0x15, 0x9c, - 0xa2, 0xc4, 0x5e, 0x69, 0xc8, 0xfe, 0x1c, 0x1a, 0xd3, 0x96, 0x6f, 0x10, 0xba, 0x0a, 0x93, 0x30, - 0x91, 0x89, 0x5d, 0x16, 0x3f, 0x05, 0x9a, 0xe0, 0x4f, 0xe0, 0xbe, 0x7e, 0x83, 0x4f, 0xaf, 0xe5, - 0x5b, 0x46, 0x02, 0xb9, 0x00, 0x44, 0x84, 0x61, 0x28, 0xd0, 0x4b, 0xd2, 0x50, 0xbb, 0x9d, 0x66, - 0xbb, 0x7e, 0xb2, 0x27, 0x43, 0x02, 0x3d, 0xf7, 0xec, 0x87, 0x60, 0xbf, 0xcb, 0x8a, 0xf1, 0xb5, - 0x07, 0x3b, 0x93, 0x52, 0xa7, 0x01, 0x76, 0xc6, 0x8e, 0xec, 0xfb, 0xb0, 0x3b, 0x57, 0xc2, 0x18, - 0xb1, 0xf4, 0x5a, 0x28, 0x93, 0x18, 0x75, 0xd0, 0xaf, 0xf5, 0xca, 0x66, 0x30, 0x53, 0xa0, 0x75, - 0x58, 0x24, 0x9e, 0xc7, 0x92, 0x87, 0x50, 0x13, 0xf6, 0x16, 0xd4, 0x1d, 0xe4, 0x72, 0x7f, 0x19, - 0xf5, 0x52, 0x62, 0x65, 0x1b, 0x1a, 0xd3, 0x2c, 0xe3, 0xf5, 0x00, 0xea, 0xaf, 0x53, 0xb8, 0xbc, - 0x0e, 0x33, 0xaf, 0xd3, 0x8a, 0xb9, 0x4e, 0xf6, 0x19, 0x34, 0xa6, 0x15, 0xee, 0x74, 0x91, 0x3f, - 0x4c, 0xdb, 0x79, 0x43, 0x7c, 0x71, 0x46, 0x65, 0x23, 0x27, 0xee, 0x2b, 0x90, 0x37, 0x47, 0x52, - 0x70, 0xf2, 0xbe, 0x97, 0xe9, 0x8b, 0xfc, 0x44, 0x5f, 0xec, 0xc1, 0xce, 0x3c, 0x63, 0x26, 0xcf, - 0x1a, 0xac, 0x3d, 0x0f, 0x7d, 0xa1, 0xaf, 0x4b, 0x52, 0x98, 0x4f, 0xc0, 0x4a, 0x83, 0x37, 0x68, - 0xc0, 0x9f, 0x72, 0xb0, 0xd3, 0xa2, 0x51, 0x1c, 0xa8, 0x7d, 0x4c, 0x37, 0xc2, 0xd7, 0x34, 0x96, - 0x27, 0x9a, 0xc4, 0xfd, 0x2b, 0x58, 0x95, 0x9d, 0xef, 0x76, 0x18, 0x12, 0x81, 0x9e, 0x1b, 0x26, - 0xdf, 0x0c, 0x65, 0x09, 0x1f, 0x6b, 0xf4, 0x3b, 0x2e, 0x7b, 0x8f, 0x74, 0xa4, 0xd1, 0xf4, 0xd0, - 0x05, 0x0d, 0xa9, 0xc1, 0xfb, 0x05, 0x94, 0xfa, 0x2a, 0x32, 0x97, 0x04, 0x3e, 0xd1, 0xc3, 0xb7, - 0x78, 0xb8, 0x31, 0xb9, 0x63, 0x1e, 0x49, 0xa6, 0x53, 0xd4, 0xa2, 0x8a, 0xb0, 0x1e, 0xc3, 0x7a, - 0x6a, 0xa4, 0x8c, 0x57, 0xb1, 0x05, 0xe5, 0xa3, 0x96, 0xe2, 0x8d, 0x36, 0xb2, 0xfb, 0xb0, 0x3b, - 0x37, 0x2f, 0x53, 0xc2, 0xbf, 0xe6, 0xa0, 0x2a, 0xcb, 0x95, 0xbe, 0x7c, 0xd6, 0x6f, 0x61, 0x49, - 0x4b, 0x9b, 0x23, 0x9f, 0x13, 0x9e, 0x11, 0x9a, 0x1b, 0x59, 0x7e, 0x6e, 0x64, 0xb3, 0xea, 0x59, - 0x98, 0x51, 0xcf, 0xe4, 0x84, 0xb3, 0x53, 0x60, 0x03, 0x6a, 0x27, 0xd8, 0xa7, 0x02, 0xb3, 0x07, - 0x7f, 0x08, 0xeb, 0x59, 0xf8, 0x06, 0x47, 0xff, 0x15, 0xec, 0xb6, 0x18, 0x95, 0x4a, 0xca, 0xc5, - 0x9b, 0x1e, 0x86, 0xc7, 0x24, 0xee, 0xf6, 0xc4, 0x0f, 0xd1, 0x0d, 0xa6, 0xa2, 0xfd, 0x07, 0xd8, - 0x9b, 0xaf, 0x7e, 0x03, 0xf7, 0x5b, 0x50, 0xd7, 0x8a, 0x84, 0x1b, 0x3b, 0x5e, 0xea, 0x7e, 0x4f, - 0xb3, 0x4c, 0x01, 0xfe, 0x92, 0x83, 0xea, 0x39, 0x66, 0xfb, 0xfe, 0xb6, 0x87, 0x36, 0xe3, 0x04, - 0xf2, 0xb3, 0x3a, 0xfa, 0x11, 0xac, 0xa9, 0x55, 0x57, 0x7e, 0x2a, 0x33, 0xe1, 0x72, 0x19, 0x93, - 0xd9, 0x70, 0x57, 0x15, 0x63, 0x3c, 0xa6, 0xd5, 0x24, 0xc7, 0x89, 0x9b, 0x67, 0x3f, 0x1f, 0x27, - 0xe2, 0xa0, 0x32, 0x32, 0x1e, 0xd5, 0xb7, 0x8b, 0x59, 0x7e, 0xba, 0xcc, 0x30, 0x65, 0xfc, 0x3c, - 0x04, 0x5b, 0x3e, 0x3f, 0xa9, 0x89, 0x71, 0x14, 0x7a, 0x72, 0xd0, 0x66, 0x9e, 0xef, 0xd7, 0xf0, - 0xe0, 0x9d, 0x52, 0x77, 0x7d, 0xce, 0x37, 0xa0, 0x96, 0xee, 0x84, 0x54, 0x4f, 0x66, 0xe1, 0x1b, - 0x34, 0xc5, 0x63, 0x28, 0x3f, 0x21, 0x9d, 0xcb, 0x78, 0xd4, 0x81, 0x7b, 0x50, 0xec, 0xd0, 0xb0, - 0x13, 0x33, 0x86, 0x61, 0x67, 0x68, 0x06, 0x4f, 0x1a, 0xb2, 0x3f, 0x87, 0x4a, 0xa2, 0x62, 0x1c, - 0x3c, 0x84, 0x45, 0x1c, 0x8c, 0x0b, 0x5b, 0x69, 0x26, 0xff, 0x27, 0x3f, 0x95, 0xa8, 0xa3, 0x99, - 0xe6, 0x11, 0x11, 0x94, 0xe1, 0x19, 0xa3, 0xfd, 0x8c, 0x57, 0xfb, 0x08, 0xb6, 0x66, 0xf0, 0x6e, - 0x63, 0xfe, 0xc9, 0x27, 0x3f, 0x36, 0x07, 0xbe, 0x40, 0xce, 0x9b, 0x3e, 0x3d, 0xd0, 0xbf, 0x0e, - 0xba, 0xf4, 0x60, 0x20, 0x0e, 0xd4, 0x7f, 0xeb, 0x0f, 0xa6, 0x3e, 0x02, 0xda, 0x4b, 0x8a, 0xf1, - 0xe9, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x7e, 0x8b, 0x0f, 0x3a, 0x37, 0x18, 0x00, 0x00, +var fileDescriptor_tabletmanagerdata_9e0123608316bc1a = []byte{ + // 2050 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x5b, 0x6f, 0x1b, 0xc7, + 0x15, 0x06, 0x49, 0x49, 0x96, 0x0e, 0x2f, 0x22, 0x97, 0x94, 0x48, 0xc9, 0x8d, 0x24, 0xaf, 0x9d, + 0xc6, 0x75, 0x51, 0x2a, 0x56, 0xd2, 0x20, 0x48, 0x91, 0xa2, 0xb2, 0x2e, 0xb6, 0x13, 0x25, 0x56, + 0x56, 0xbe, 0x14, 0x41, 0x81, 0xc5, 0x70, 0x77, 0x44, 0x2e, 0xb4, 0xdc, 0x59, 0xcf, 0xcc, 0x52, + 0xe2, 0x9f, 0xe8, 0x5b, 0xdf, 0xfa, 0x56, 0xa0, 0x7d, 0xef, 0x8f, 0x49, 0xd1, 0x5f, 0xd2, 0x87, + 0xbe, 0x14, 0x73, 0x59, 0x72, 0x96, 0x17, 0x5b, 0x12, 0x5c, 0x20, 0x2f, 0xc6, 0x9e, 0x6f, 0xce, + 0x7d, 0xce, 0x39, 0x73, 0x68, 0x41, 0x93, 0xa3, 0x4e, 0x88, 0x79, 0x1f, 0x45, 0xa8, 0x8b, 0xa9, + 0x8f, 0x38, 0x6a, 0xc7, 0x94, 0x70, 0x62, 0xd5, 0xa6, 0x0e, 0x36, 0x8b, 0x6f, 0x13, 0x4c, 0x87, + 0xea, 0x7c, 0xb3, 0xc2, 0x49, 0x4c, 0xc6, 0xfc, 0x9b, 0x6b, 0x14, 0xc7, 0x61, 0xe0, 0x21, 0x1e, + 0x90, 0xc8, 0x80, 0xcb, 0x21, 0xe9, 0x26, 0x3c, 0x08, 0x15, 0x69, 0xff, 0x3b, 0x07, 0xab, 0x2f, + 0x85, 0xe2, 0x43, 0x7c, 0x1e, 0x44, 0x81, 0x60, 0xb6, 0x2c, 0x58, 0x88, 0x50, 0x1f, 0xb7, 0x72, + 0x3b, 0xb9, 0x87, 0x2b, 0x8e, 0xfc, 0xb6, 0xd6, 0x61, 0x89, 0x79, 0x3d, 0xdc, 0x47, 0xad, 0xbc, + 0x44, 0x35, 0x65, 0xb5, 0xe0, 0x8e, 0x47, 0xc2, 0xa4, 0x1f, 0xb1, 0x56, 0x61, 0xa7, 0xf0, 0x70, + 0xc5, 0x49, 0x49, 0xab, 0x0d, 0xf5, 0x98, 0x06, 0x7d, 0x44, 0x87, 0xee, 0x05, 0x1e, 0xba, 0x29, + 0xd7, 0x82, 0xe4, 0xaa, 0xe9, 0xa3, 0x6f, 0xf1, 0xf0, 0x40, 0xf3, 0x5b, 0xb0, 0xc0, 0x87, 0x31, + 0x6e, 0x2d, 0x2a, 0xab, 0xe2, 0xdb, 0xda, 0x86, 0xa2, 0x70, 0xdd, 0x0d, 0x71, 0xd4, 0xe5, 0xbd, + 0xd6, 0xd2, 0x4e, 0xee, 0xe1, 0x82, 0x03, 0x02, 0x3a, 0x91, 0x88, 0x75, 0x17, 0x56, 0x28, 0xb9, + 0x74, 0x3d, 0x92, 0x44, 0xbc, 0x75, 0x47, 0x1e, 0x2f, 0x53, 0x72, 0x79, 0x20, 0x68, 0xfb, 0xef, + 0x39, 0xa8, 0x9e, 0x49, 0x37, 0x8d, 0xe0, 0x3e, 0x81, 0x55, 0x21, 0xdf, 0x41, 0x0c, 0xbb, 0x3a, + 0x22, 0x15, 0x67, 0x25, 0x85, 0x95, 0x88, 0xf5, 0x02, 0x54, 0xc6, 0x5d, 0x7f, 0x24, 0xcc, 0x5a, + 0xf9, 0x9d, 0xc2, 0xc3, 0xe2, 0x9e, 0xdd, 0x9e, 0xbe, 0xa4, 0x89, 0x24, 0x3a, 0x55, 0x9e, 0x05, + 0x98, 0x48, 0xd5, 0x00, 0x53, 0x16, 0x90, 0xa8, 0x55, 0x90, 0x16, 0x53, 0x52, 0x38, 0x6a, 0x29, + 0xab, 0x07, 0x3d, 0x14, 0x75, 0xb1, 0x83, 0x59, 0x12, 0x72, 0xeb, 0x19, 0x94, 0x3b, 0xf8, 0x9c, + 0xd0, 0x8c, 0xa3, 0xc5, 0xbd, 0xfb, 0x33, 0xac, 0x4f, 0x86, 0xe9, 0x94, 0x94, 0xa4, 0x8e, 0xe5, + 0x18, 0x4a, 0xe8, 0x9c, 0x63, 0xea, 0x1a, 0x77, 0x78, 0x4d, 0x45, 0x45, 0x29, 0xa8, 0x60, 0xfb, + 0x3f, 0x39, 0xa8, 0xbc, 0x62, 0x98, 0x9e, 0x62, 0xda, 0x0f, 0x18, 0xd3, 0xc5, 0xd2, 0x23, 0x8c, + 0xa7, 0xc5, 0x22, 0xbe, 0x05, 0x96, 0x30, 0x4c, 0x75, 0xa9, 0xc8, 0x6f, 0xeb, 0xd7, 0x50, 0x8b, + 0x11, 0x63, 0x97, 0x84, 0xfa, 0xae, 0xd7, 0xc3, 0xde, 0x05, 0x4b, 0xfa, 0x32, 0x0f, 0x0b, 0x4e, + 0x35, 0x3d, 0x38, 0xd0, 0xb8, 0xf5, 0x03, 0x40, 0x4c, 0x83, 0x41, 0x10, 0xe2, 0x2e, 0x56, 0x25, + 0x53, 0xdc, 0x7b, 0x3c, 0xc3, 0xdb, 0xac, 0x2f, 0xed, 0xd3, 0x91, 0xcc, 0x51, 0xc4, 0xe9, 0xd0, + 0x31, 0x94, 0x6c, 0x7e, 0x0d, 0xab, 0x13, 0xc7, 0x56, 0x15, 0x0a, 0x17, 0x78, 0xa8, 0x3d, 0x17, + 0x9f, 0x56, 0x03, 0x16, 0x07, 0x28, 0x4c, 0xb0, 0xf6, 0x5c, 0x11, 0x5f, 0xe5, 0xbf, 0xcc, 0xd9, + 0x3f, 0xe5, 0xa0, 0x74, 0xd8, 0x79, 0x4f, 0xdc, 0x15, 0xc8, 0xfb, 0x1d, 0x2d, 0x9b, 0xf7, 0x3b, + 0xa3, 0x3c, 0x14, 0x8c, 0x3c, 0xbc, 0x98, 0x11, 0xda, 0xee, 0x8c, 0xd0, 0x4c, 0x63, 0xff, 0xcf, + 0xc0, 0xfe, 0x96, 0x83, 0xe2, 0xd8, 0x12, 0xb3, 0x4e, 0xa0, 0x2a, 0xfc, 0x74, 0xe3, 0x31, 0xd6, + 0xca, 0x49, 0x2f, 0xef, 0xbd, 0xf7, 0x02, 0x9c, 0xd5, 0x24, 0x43, 0x33, 0xeb, 0x18, 0x2a, 0x7e, + 0x27, 0xa3, 0x4b, 0x75, 0xd0, 0xf6, 0x7b, 0x22, 0x76, 0xca, 0xbe, 0x41, 0x31, 0xfb, 0x13, 0x28, + 0x9e, 0x06, 0x51, 0xd7, 0xc1, 0x6f, 0x13, 0xcc, 0xb8, 0x68, 0xa5, 0x18, 0x0d, 0x43, 0x82, 0x7c, + 0x1d, 0x64, 0x4a, 0xda, 0x0f, 0xa1, 0xa4, 0x18, 0x59, 0x4c, 0x22, 0x86, 0xdf, 0xc1, 0xf9, 0x08, + 0x4a, 0x67, 0x21, 0xc6, 0x71, 0xaa, 0x73, 0x13, 0x96, 0xfd, 0x84, 0xca, 0x71, 0x29, 0x59, 0x0b, + 0xce, 0x88, 0xb6, 0x57, 0xa1, 0xac, 0x79, 0x95, 0x5a, 0xfb, 0x5f, 0x39, 0xb0, 0x8e, 0xae, 0xb0, + 0x97, 0x70, 0xfc, 0x8c, 0x90, 0x8b, 0x54, 0xc7, 0xac, 0xc9, 0xb9, 0x05, 0x10, 0x23, 0x8a, 0xfa, + 0x98, 0x63, 0xaa, 0xc2, 0x5f, 0x71, 0x0c, 0xc4, 0x3a, 0x85, 0x15, 0x7c, 0xc5, 0x29, 0x72, 0x71, + 0x34, 0x90, 0x33, 0xb4, 0xb8, 0xf7, 0xd9, 0x8c, 0xec, 0x4c, 0x5b, 0x6b, 0x1f, 0x09, 0xb1, 0xa3, + 0x68, 0xa0, 0x6a, 0x62, 0x19, 0x6b, 0x72, 0xf3, 0x77, 0x50, 0xce, 0x1c, 0xdd, 0xa8, 0x1e, 0xce, + 0xa1, 0x9e, 0x31, 0xa5, 0xf3, 0xb8, 0x0d, 0x45, 0x7c, 0x15, 0x70, 0x97, 0x71, 0xc4, 0x13, 0xa6, + 0x13, 0x04, 0x02, 0x3a, 0x93, 0x88, 0x7c, 0x20, 0xb8, 0x4f, 0x12, 0x3e, 0x7a, 0x20, 0x24, 0xa5, + 0x71, 0x4c, 0xd3, 0x2e, 0xd0, 0x94, 0x3d, 0x80, 0xea, 0x53, 0xcc, 0xd5, 0x5c, 0x49, 0xd3, 0xb7, + 0x0e, 0x4b, 0x32, 0x70, 0x55, 0x71, 0x2b, 0x8e, 0xa6, 0xac, 0xfb, 0x50, 0x0e, 0x22, 0x2f, 0x4c, + 0x7c, 0xec, 0x0e, 0x02, 0x7c, 0xc9, 0xa4, 0x89, 0x65, 0xa7, 0xa4, 0xc1, 0xd7, 0x02, 0xb3, 0x3e, + 0x86, 0x0a, 0xbe, 0x52, 0x4c, 0x5a, 0x89, 0x7a, 0x90, 0xca, 0x1a, 0x95, 0x03, 0x9a, 0xd9, 0x18, + 0x6a, 0x86, 0x5d, 0x1d, 0xdd, 0x29, 0xd4, 0xd4, 0x64, 0x34, 0x86, 0xfd, 0x4d, 0xa6, 0x6d, 0x95, + 0x4d, 0x20, 0x76, 0x13, 0xd6, 0x9e, 0x62, 0x6e, 0x94, 0xb0, 0x8e, 0xd1, 0xfe, 0x11, 0xd6, 0x27, + 0x0f, 0xb4, 0x13, 0x7f, 0x80, 0x62, 0xb6, 0xe9, 0x84, 0xf9, 0xad, 0x19, 0xe6, 0x4d, 0x61, 0x53, + 0xc4, 0x6e, 0x80, 0x75, 0x86, 0xb9, 0x83, 0x91, 0xff, 0x22, 0x0a, 0x87, 0xa9, 0xc5, 0x35, 0xa8, + 0x67, 0x50, 0x5d, 0xc2, 0x63, 0xf8, 0x0d, 0x0d, 0x38, 0x4e, 0xb9, 0xd7, 0xa1, 0x91, 0x85, 0x35, + 0xfb, 0x37, 0x50, 0x53, 0x8f, 0xd3, 0xcb, 0x61, 0x9c, 0x32, 0x5b, 0xbf, 0x85, 0xa2, 0x72, 0xcf, + 0x95, 0x4f, 0xb7, 0x70, 0xb9, 0xb2, 0xd7, 0x68, 0x8f, 0x36, 0x11, 0x99, 0x73, 0x2e, 0x25, 0x80, + 0x8f, 0xbe, 0x85, 0x9f, 0xa6, 0xae, 0xb1, 0x43, 0x0e, 0x3e, 0xa7, 0x98, 0xf5, 0x44, 0x49, 0x99, + 0x0e, 0x65, 0x61, 0xcd, 0xde, 0x84, 0x35, 0x27, 0x89, 0x9e, 0x61, 0x14, 0xf2, 0x9e, 0x7c, 0x38, + 0x52, 0x81, 0x16, 0xac, 0x4f, 0x1e, 0x68, 0x91, 0xcf, 0xa1, 0xf5, 0xbc, 0x1b, 0x11, 0x8a, 0xd5, + 0xe1, 0x11, 0xa5, 0x84, 0x66, 0x46, 0x0a, 0xe7, 0x98, 0x46, 0xe3, 0x41, 0x21, 0x49, 0xfb, 0x2e, + 0x6c, 0xcc, 0x90, 0xd2, 0x2a, 0xbf, 0x12, 0x4e, 0x8b, 0x79, 0x92, 0xad, 0xe4, 0xfb, 0x50, 0xbe, + 0x44, 0x01, 0x77, 0x63, 0xc2, 0xc6, 0xc5, 0xb4, 0xe2, 0x94, 0x04, 0x78, 0xaa, 0x31, 0x15, 0x99, + 0x29, 0xab, 0x75, 0xee, 0xc1, 0xfa, 0x29, 0xc5, 0xe7, 0x61, 0xd0, 0xed, 0x4d, 0x34, 0x88, 0xd8, + 0xb6, 0x64, 0xe2, 0xd2, 0x0e, 0x49, 0x49, 0xbb, 0x0b, 0xcd, 0x29, 0x19, 0x5d, 0x57, 0x27, 0x50, + 0x51, 0x5c, 0x2e, 0x95, 0x7b, 0x45, 0x3a, 0xcf, 0x3f, 0x9e, 0x5b, 0xd9, 0xe6, 0x16, 0xe2, 0x94, + 0x3d, 0x83, 0x62, 0xf6, 0x7f, 0x73, 0x60, 0xed, 0xc7, 0x71, 0x38, 0xcc, 0x7a, 0x56, 0x85, 0x02, + 0x7b, 0x1b, 0xa6, 0x23, 0x86, 0xbd, 0x0d, 0xc5, 0x88, 0x39, 0x27, 0xd4, 0xc3, 0xba, 0x59, 0x15, + 0x21, 0xd6, 0x00, 0x14, 0x86, 0xe4, 0xd2, 0x35, 0xb6, 0x53, 0x39, 0x19, 0x96, 0x9d, 0xaa, 0x3c, + 0x70, 0xc6, 0xf8, 0xf4, 0x02, 0xb4, 0xf0, 0xa1, 0x16, 0xa0, 0xc5, 0x5b, 0x2e, 0x40, 0xff, 0xc8, + 0x41, 0x3d, 0x13, 0xbd, 0xce, 0xf1, 0xcf, 0x6f, 0x55, 0xab, 0x43, 0xed, 0x84, 0x78, 0x17, 0x6a, + 0xea, 0xa5, 0xad, 0xd1, 0x00, 0xcb, 0x04, 0xc7, 0x8d, 0xf7, 0x2a, 0x0a, 0xa7, 0x98, 0xd7, 0xa1, + 0x91, 0x85, 0x35, 0xfb, 0x3f, 0x73, 0xd0, 0xd2, 0x4f, 0xc4, 0x31, 0xe6, 0x5e, 0x6f, 0x9f, 0x1d, + 0x76, 0x46, 0x75, 0xd0, 0x80, 0x45, 0xf9, 0xa3, 0x44, 0x26, 0xa0, 0xe4, 0x28, 0xc2, 0x6a, 0xc2, + 0x1d, 0xbf, 0xe3, 0xca, 0xa7, 0x51, 0xbf, 0x0e, 0x7e, 0xe7, 0x7b, 0xf1, 0x38, 0x6e, 0xc0, 0x72, + 0x1f, 0x5d, 0xb9, 0x94, 0x5c, 0x32, 0xbd, 0x0c, 0xde, 0xe9, 0xa3, 0x2b, 0x87, 0x5c, 0x32, 0xb9, + 0xa8, 0x07, 0x4c, 0x6e, 0xe0, 0x9d, 0x20, 0x0a, 0x49, 0x97, 0xc9, 0xeb, 0x5f, 0x76, 0x2a, 0x1a, + 0x7e, 0xa2, 0x50, 0xd1, 0x6b, 0x54, 0xb6, 0x91, 0x79, 0xb9, 0xcb, 0x4e, 0x89, 0x1a, 0xbd, 0x65, + 0x3f, 0x85, 0x8d, 0x19, 0x3e, 0xeb, 0xdb, 0x7b, 0x04, 0x4b, 0xaa, 0x35, 0xf4, 0xb5, 0x59, 0x6d, + 0xf5, 0xc3, 0xea, 0x07, 0xf1, 0xaf, 0x6e, 0x03, 0xcd, 0x61, 0xff, 0x39, 0x07, 0x1f, 0x65, 0x35, + 0xed, 0x87, 0xa1, 0x58, 0xc0, 0xd8, 0x87, 0x4f, 0xc1, 0x54, 0x64, 0x0b, 0x33, 0x22, 0x3b, 0x81, + 0xad, 0x79, 0xfe, 0xdc, 0x22, 0xbc, 0x6f, 0x27, 0xef, 0x76, 0x3f, 0x8e, 0xdf, 0x1d, 0x98, 0xe9, + 0x7f, 0x3e, 0xe3, 0xff, 0x74, 0xd2, 0xa5, 0xb2, 0x5b, 0x78, 0x25, 0x1e, 0xb6, 0x10, 0x0d, 0xb0, + 0xda, 0x35, 0xd2, 0x02, 0x3d, 0x86, 0x7a, 0x06, 0xd5, 0x8a, 0x77, 0xc5, 0xc6, 0x31, 0xda, 0x52, + 0x8a, 0x7b, 0xcd, 0xf6, 0xe4, 0x2f, 0x61, 0x2d, 0xa0, 0xd9, 0xc4, 0x4b, 0xf2, 0x1d, 0x62, 0x1c, + 0xd3, 0x74, 0x32, 0xa7, 0x06, 0x3e, 0x87, 0xf5, 0xc9, 0x03, 0x6d, 0x63, 0x13, 0x96, 0x27, 0x46, + 0xfb, 0x88, 0xb6, 0x2d, 0xa8, 0x9e, 0x71, 0x12, 0x4b, 0xd7, 0x52, 0x4d, 0x75, 0xa8, 0x19, 0x98, + 0x6e, 0xa4, 0x3f, 0x42, 0x73, 0x04, 0x7e, 0x17, 0x44, 0x41, 0x3f, 0xe9, 0x1b, 0xcb, 0xe8, 0x3c, + 0xfd, 0xd6, 0x3d, 0x90, 0xcf, 0x88, 0xcb, 0x83, 0x3e, 0x4e, 0xf7, 0xad, 0x82, 0x53, 0x14, 0xd8, + 0x4b, 0x05, 0xd9, 0x5f, 0x40, 0x6b, 0x5a, 0xf3, 0x35, 0x5c, 0x97, 0x6e, 0x22, 0xca, 0x33, 0xbe, + 0x8b, 0xe4, 0x1b, 0xa0, 0x76, 0xfe, 0x4f, 0x70, 0x77, 0x8c, 0xbe, 0x8a, 0x78, 0x10, 0xee, 0x8b, + 0xe9, 0xf3, 0x81, 0x02, 0xd8, 0x82, 0x5f, 0xcc, 0xd6, 0xae, 0xad, 0x1f, 0xc2, 0x3d, 0xb5, 0x5b, + 0x1c, 0x5d, 0x89, 0x37, 0x1a, 0x85, 0x62, 0xb1, 0x89, 0x11, 0xc5, 0x11, 0xc7, 0x7e, 0xea, 0x83, + 0xdc, 0x59, 0xd5, 0xb1, 0x1b, 0xa4, 0xfb, 0x3f, 0xa4, 0xd0, 0x73, 0xdf, 0x7e, 0x00, 0xf6, 0xbb, + 0xb4, 0x68, 0x5b, 0x3b, 0xb0, 0x35, 0xc9, 0x75, 0x14, 0x62, 0x6f, 0x6c, 0xc8, 0xbe, 0x07, 0xdb, + 0x73, 0x39, 0xb4, 0x12, 0x4b, 0xad, 0xbb, 0x22, 0x9c, 0x51, 0xfd, 0xfe, 0x4a, 0xad, 0xa2, 0x1a, + 0xd3, 0xd7, 0xd3, 0x80, 0x45, 0xe4, 0xfb, 0x34, 0x7d, 0xe0, 0x15, 0x61, 0x6f, 0x40, 0xd3, 0xc1, + 0x4c, 0xec, 0x65, 0xa3, 0x4a, 0x4e, 0xb5, 0x6c, 0x42, 0x6b, 0xfa, 0x48, 0x5b, 0xdd, 0x85, 0xe6, + 0x6b, 0x03, 0x17, 0xcd, 0x38, 0xb3, 0x99, 0x57, 0x74, 0x33, 0xdb, 0xc7, 0xd0, 0x9a, 0x16, 0xb8, + 0xd5, 0x18, 0xf9, 0xc8, 0xd4, 0xf3, 0x06, 0x05, 0xfc, 0x98, 0x88, 0x36, 0x4a, 0xcd, 0x57, 0x20, + 0xaf, 0xaf, 0xa4, 0xe0, 0xe4, 0x03, 0x3f, 0x53, 0x2f, 0xf9, 0x89, 0xaa, 0xdc, 0x81, 0xad, 0x79, + 0xca, 0x74, 0x9c, 0x75, 0xa8, 0x3d, 0x8f, 0x02, 0xae, 0x9a, 0x35, 0x4d, 0xcc, 0xa7, 0x60, 0x99, + 0xe0, 0x35, 0xca, 0xff, 0xa7, 0x1c, 0x6c, 0x9d, 0x92, 0x38, 0x09, 0xe5, 0x9e, 0xa9, 0x0a, 0xe1, + 0x1b, 0x92, 0x88, 0x1b, 0x4d, 0xfd, 0xfe, 0x25, 0xac, 0x8a, 0xb2, 0x75, 0x3d, 0x8a, 0x11, 0xc7, + 0xbe, 0x1b, 0xa5, 0xbf, 0x85, 0xca, 0x02, 0x3e, 0x50, 0xe8, 0xf7, 0x4c, 0xd4, 0x1e, 0xf2, 0x84, + 0x52, 0x73, 0xe4, 0x83, 0x82, 0xe4, 0xd8, 0xff, 0x12, 0x4a, 0x7d, 0xe9, 0x99, 0x8b, 0xc2, 0x00, + 0xa9, 0xd1, 0x5f, 0xdc, 0x5b, 0x9b, 0xdc, 0x9d, 0xf7, 0xc5, 0xa1, 0x53, 0x54, 0xac, 0x92, 0xb0, + 0x1e, 0x43, 0xc3, 0x18, 0x68, 0xe3, 0x15, 0x73, 0x41, 0xda, 0xa8, 0x1b, 0x67, 0xa3, 0x4d, 0xf3, + 0x1e, 0x6c, 0xcf, 0x8d, 0x4b, 0xa7, 0xf0, 0xaf, 0x39, 0xa8, 0x8a, 0x74, 0x99, 0xad, 0x6f, 0xfd, + 0x06, 0x96, 0x14, 0xb7, 0xbe, 0xf2, 0x39, 0xee, 0x69, 0xa6, 0xb9, 0x9e, 0xe5, 0xe7, 0x7a, 0x36, + 0x2b, 0x9f, 0x85, 0x19, 0xf9, 0x4c, 0x6f, 0x38, 0x3b, 0x83, 0xd6, 0xa0, 0x7e, 0x88, 0xfb, 0x84, + 0xe3, 0xec, 0xc5, 0xef, 0x41, 0x23, 0x0b, 0x5f, 0xe3, 0xea, 0xbf, 0x86, 0xed, 0x53, 0x4a, 0x84, + 0x90, 0x34, 0xf1, 0xa6, 0x87, 0xa3, 0x03, 0x94, 0x74, 0x7b, 0xfc, 0x55, 0x7c, 0x8d, 0x91, 0x66, + 0xff, 0x1e, 0x76, 0xe6, 0x8b, 0x5f, 0xc3, 0xfc, 0x06, 0x34, 0x95, 0x20, 0x62, 0x5a, 0x8f, 0x6f, + 0xf4, 0xf7, 0xf4, 0x91, 0x4e, 0xc0, 0x5f, 0x72, 0x50, 0x3d, 0xc3, 0xd9, 0xba, 0xbf, 0xe9, 0xa5, + 0xcd, 0xb8, 0x81, 0xfc, 0xac, 0x8a, 0x7e, 0x04, 0x35, 0xb9, 0xc2, 0xbb, 0x4c, 0x0c, 0x66, 0x97, + 0x09, 0x9f, 0xf4, 0xe6, 0xbe, 0x2a, 0x0f, 0xc6, 0x03, 0x5b, 0xbe, 0x23, 0x78, 0xa2, 0xf3, 0xec, + 0xe7, 0xe3, 0x40, 0x1c, 0x2c, 0x95, 0x8c, 0x47, 0xf5, 0xcd, 0x7c, 0x16, 0x3f, 0xc9, 0x66, 0xa8, + 0xd2, 0x76, 0x1e, 0x80, 0x2d, 0x1e, 0x3f, 0x63, 0x62, 0xec, 0x47, 0xbe, 0x18, 0xb4, 0x99, 0xe5, + 0xe1, 0x35, 0xdc, 0x7f, 0x27, 0xd7, 0x6d, 0x97, 0x89, 0x35, 0xa8, 0x9b, 0x95, 0x60, 0xd4, 0x64, + 0x16, 0xbe, 0x46, 0x51, 0x3c, 0x86, 0xf2, 0x13, 0xe4, 0x5d, 0x24, 0xa3, 0x0a, 0xdc, 0x81, 0xa2, + 0x47, 0x22, 0x2f, 0xa1, 0x14, 0x47, 0xde, 0x50, 0x0f, 0x1e, 0x13, 0xb2, 0xbf, 0x80, 0x4a, 0x2a, + 0xa2, 0x0d, 0x3c, 0x80, 0x45, 0x3c, 0x18, 0x27, 0xb6, 0xd2, 0x4e, 0xff, 0xff, 0xff, 0x48, 0xa0, + 0x8e, 0x3a, 0xd4, 0x8f, 0x08, 0x27, 0x14, 0x1f, 0x53, 0xd2, 0xcf, 0x58, 0xb5, 0xf7, 0x61, 0x63, + 0xc6, 0xd9, 0x4d, 0xd4, 0x3f, 0xf9, 0xf4, 0xc7, 0xf6, 0x20, 0xe0, 0x98, 0xb1, 0x76, 0x40, 0x76, + 0xd5, 0xd7, 0x6e, 0x97, 0xec, 0x0e, 0xf8, 0xae, 0xfc, 0x2b, 0xc4, 0xee, 0xd4, 0x8f, 0x9b, 0xce, + 0x92, 0x3c, 0xf8, 0xec, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x30, 0x7f, 0x51, 0x40, 0x0f, 0x19, + 0x00, 0x00, } diff --git a/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go b/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go index 11555ab3a4..41169028ed 100644 --- a/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go +++ b/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go @@ -56,6 +56,8 @@ type TabletManagerClient interface { ReloadSchema(ctx context.Context, in *tabletmanagerdata.ReloadSchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReloadSchemaResponse, error) PreflightSchema(ctx context.Context, in *tabletmanagerdata.PreflightSchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PreflightSchemaResponse, error) ApplySchema(ctx context.Context, in *tabletmanagerdata.ApplySchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ApplySchemaResponse, error) + LockTables(ctx context.Context, in *tabletmanagerdata.LockTablesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.LockTablesResponse, error) + UnlockTables(ctx context.Context, in *tabletmanagerdata.UnlockTablesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UnlockTablesResponse, error) ExecuteFetchAsDba(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsDbaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsDbaResponse, error) ExecuteFetchAsAllPrivs(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsAllPrivsRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsAllPrivsResponse, error) ExecuteFetchAsApp(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsAppRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsAppResponse, error) @@ -70,6 +72,9 @@ type TabletManagerClient interface { StopSlaveMinimum(ctx context.Context, in *tabletmanagerdata.StopSlaveMinimumRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StopSlaveMinimumResponse, error) // StartSlave starts the mysql replication StartSlave(ctx context.Context, in *tabletmanagerdata.StartSlaveRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StartSlaveResponse, error) + // StartSlave starts the mysql replication until and including + // the provided position + StartSlaveUntilAfter(ctx context.Context, in *tabletmanagerdata.StartSlaveUntilAfterRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StartSlaveUntilAfterResponse, error) // TabletExternallyReparented tells a tablet that its underlying MySQL is // currently the master. It is only used in environments (tabletmanagerdata.such as Vitess+MoB) // in which MySQL is reparented by some agent external to Vitess, and then @@ -265,6 +270,24 @@ func (c *tabletManagerClient) ApplySchema(ctx context.Context, in *tabletmanager return out, nil } +func (c *tabletManagerClient) LockTables(ctx context.Context, in *tabletmanagerdata.LockTablesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.LockTablesResponse, error) { + out := new(tabletmanagerdata.LockTablesResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/LockTables", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) UnlockTables(ctx context.Context, in *tabletmanagerdata.UnlockTablesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UnlockTablesResponse, error) { + out := new(tabletmanagerdata.UnlockTablesResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/UnlockTables", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *tabletManagerClient) ExecuteFetchAsDba(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsDbaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsDbaResponse, error) { out := new(tabletmanagerdata.ExecuteFetchAsDbaResponse) err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ExecuteFetchAsDba", in, out, opts...) @@ -337,6 +360,15 @@ func (c *tabletManagerClient) StartSlave(ctx context.Context, in *tabletmanagerd return out, nil } +func (c *tabletManagerClient) StartSlaveUntilAfter(ctx context.Context, in *tabletmanagerdata.StartSlaveUntilAfterRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StartSlaveUntilAfterResponse, error) { + out := new(tabletmanagerdata.StartSlaveUntilAfterResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/StartSlaveUntilAfter", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *tabletManagerClient) TabletExternallyReparented(ctx context.Context, in *tabletmanagerdata.TabletExternallyReparentedRequest, opts ...grpc.CallOption) (*tabletmanagerdata.TabletExternallyReparentedResponse, error) { out := new(tabletmanagerdata.TabletExternallyReparentedResponse) err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/TabletExternallyReparented", in, out, opts...) @@ -567,6 +599,8 @@ type TabletManagerServer interface { ReloadSchema(context.Context, *tabletmanagerdata.ReloadSchemaRequest) (*tabletmanagerdata.ReloadSchemaResponse, error) PreflightSchema(context.Context, *tabletmanagerdata.PreflightSchemaRequest) (*tabletmanagerdata.PreflightSchemaResponse, error) ApplySchema(context.Context, *tabletmanagerdata.ApplySchemaRequest) (*tabletmanagerdata.ApplySchemaResponse, error) + LockTables(context.Context, *tabletmanagerdata.LockTablesRequest) (*tabletmanagerdata.LockTablesResponse, error) + UnlockTables(context.Context, *tabletmanagerdata.UnlockTablesRequest) (*tabletmanagerdata.UnlockTablesResponse, error) ExecuteFetchAsDba(context.Context, *tabletmanagerdata.ExecuteFetchAsDbaRequest) (*tabletmanagerdata.ExecuteFetchAsDbaResponse, error) ExecuteFetchAsAllPrivs(context.Context, *tabletmanagerdata.ExecuteFetchAsAllPrivsRequest) (*tabletmanagerdata.ExecuteFetchAsAllPrivsResponse, error) ExecuteFetchAsApp(context.Context, *tabletmanagerdata.ExecuteFetchAsAppRequest) (*tabletmanagerdata.ExecuteFetchAsAppResponse, error) @@ -581,6 +615,9 @@ type TabletManagerServer interface { StopSlaveMinimum(context.Context, *tabletmanagerdata.StopSlaveMinimumRequest) (*tabletmanagerdata.StopSlaveMinimumResponse, error) // StartSlave starts the mysql replication StartSlave(context.Context, *tabletmanagerdata.StartSlaveRequest) (*tabletmanagerdata.StartSlaveResponse, error) + // StartSlave starts the mysql replication until and including + // the provided position + StartSlaveUntilAfter(context.Context, *tabletmanagerdata.StartSlaveUntilAfterRequest) (*tabletmanagerdata.StartSlaveUntilAfterResponse, error) // TabletExternallyReparented tells a tablet that its underlying MySQL is // currently the master. It is only used in environments (tabletmanagerdata.such as Vitess+MoB) // in which MySQL is reparented by some agent external to Vitess, and then @@ -898,6 +935,42 @@ func _TabletManager_ApplySchema_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } +func _TabletManager_LockTables_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.LockTablesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).LockTables(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/LockTables", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).LockTables(ctx, req.(*tabletmanagerdata.LockTablesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_UnlockTables_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.UnlockTablesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).UnlockTables(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/UnlockTables", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).UnlockTables(ctx, req.(*tabletmanagerdata.UnlockTablesRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _TabletManager_ExecuteFetchAsDba_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(tabletmanagerdata.ExecuteFetchAsDbaRequest) if err := dec(in); err != nil { @@ -1042,6 +1115,24 @@ func _TabletManager_StartSlave_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _TabletManager_StartSlaveUntilAfter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.StartSlaveUntilAfterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).StartSlaveUntilAfter(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/StartSlaveUntilAfter", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).StartSlaveUntilAfter(ctx, req.(*tabletmanagerdata.StartSlaveUntilAfterRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _TabletManager_TabletExternallyReparented_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(tabletmanagerdata.TabletExternallyReparentedRequest) if err := dec(in); err != nil { @@ -1432,6 +1523,14 @@ var _TabletManager_serviceDesc = grpc.ServiceDesc{ MethodName: "ApplySchema", Handler: _TabletManager_ApplySchema_Handler, }, + { + MethodName: "LockTables", + Handler: _TabletManager_LockTables_Handler, + }, + { + MethodName: "UnlockTables", + Handler: _TabletManager_UnlockTables_Handler, + }, { MethodName: "ExecuteFetchAsDba", Handler: _TabletManager_ExecuteFetchAsDba_Handler, @@ -1464,6 +1563,10 @@ var _TabletManager_serviceDesc = grpc.ServiceDesc{ MethodName: "StartSlave", Handler: _TabletManager_StartSlave_Handler, }, + { + MethodName: "StartSlaveUntilAfter", + Handler: _TabletManager_StartSlaveUntilAfter_Handler, + }, { MethodName: "TabletExternallyReparented", Handler: _TabletManager_TabletExternallyReparented_Handler, @@ -1545,69 +1648,73 @@ var _TabletManager_serviceDesc = grpc.ServiceDesc{ } func init() { - proto.RegisterFile("tabletmanagerservice.proto", fileDescriptor_tabletmanagerservice_a64e2f6154f58360) + proto.RegisterFile("tabletmanagerservice.proto", fileDescriptor_tabletmanagerservice_d0dfb5502bc9cb1c) } -var fileDescriptor_tabletmanagerservice_a64e2f6154f58360 = []byte{ - // 956 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0xdf, 0x8f, 0x1b, 0x35, - 0x10, 0xc7, 0x89, 0x04, 0x95, 0x30, 0x3f, 0x6b, 0x21, 0x8a, 0x0e, 0x09, 0x28, 0x6d, 0xf9, 0xd1, - 0xa2, 0x4b, 0xaf, 0x47, 0x79, 0x4f, 0xaf, 0x77, 0xed, 0x21, 0x4e, 0x84, 0xa4, 0x70, 0x08, 0x24, - 0x24, 0x5f, 0x32, 0xcd, 0x2e, 0xb7, 0x59, 0x1b, 0xdb, 0x1b, 0xdd, 0x3d, 0x21, 0x21, 0xf1, 0x84, - 0xc4, 0x1b, 0xff, 0x2f, 0xf2, 0xee, 0xda, 0x19, 0x27, 0xb3, 0x4e, 0xf2, 0x76, 0xca, 0xf7, 0x33, - 0x33, 0xf6, 0x78, 0x66, 0xec, 0x5b, 0xb6, 0x67, 0xc5, 0x45, 0x01, 0x76, 0x2e, 0x4a, 0x31, 0x03, - 0x6d, 0x40, 0x2f, 0xf2, 0x09, 0xec, 0x2b, 0x2d, 0xad, 0xe4, 0xef, 0x51, 0xda, 0xde, 0xad, 0xe8, - 0xd7, 0xa9, 0xb0, 0xa2, 0xc1, 0x1f, 0xfd, 0x77, 0x9b, 0xbd, 0xf5, 0xa2, 0xd6, 0xce, 0x1a, 0x8d, - 0x9f, 0xb2, 0x57, 0x87, 0x79, 0x39, 0xe3, 0x1f, 0xed, 0xaf, 0xdb, 0x38, 0x61, 0x04, 0x7f, 0x54, - 0x60, 0xec, 0xde, 0xc7, 0x9d, 0xba, 0x51, 0xb2, 0x34, 0xf0, 0xe9, 0x2b, 0xfc, 0x3b, 0xf6, 0xda, - 0xb8, 0x00, 0x50, 0x9c, 0x62, 0x6b, 0xc5, 0x3b, 0xfb, 0xa4, 0x1b, 0x08, 0xde, 0x7e, 0x63, 0x6f, - 0x1c, 0x5f, 0xc1, 0xa4, 0xb2, 0xf0, 0x5c, 0xca, 0x4b, 0x7e, 0x8f, 0x30, 0x41, 0xba, 0xf7, 0xfc, - 0xd9, 0x26, 0x2c, 0xf8, 0xff, 0x99, 0xbd, 0xfe, 0x0c, 0xec, 0x78, 0x92, 0xc1, 0x5c, 0xf0, 0x3b, - 0x84, 0x59, 0x50, 0xbd, 0xef, 0xbb, 0x69, 0x28, 0x78, 0x9e, 0xb1, 0xb7, 0x9f, 0x81, 0x1d, 0x82, - 0x9e, 0xe7, 0xc6, 0xe4, 0xb2, 0x34, 0xfc, 0x0b, 0xda, 0x12, 0x21, 0x3e, 0xc6, 0x97, 0x5b, 0x90, - 0x38, 0x45, 0x63, 0xb0, 0x23, 0x10, 0xd3, 0xef, 0xcb, 0xe2, 0x9a, 0x4c, 0x11, 0xd2, 0x53, 0x29, - 0x8a, 0xb0, 0xe0, 0x5f, 0xb0, 0x37, 0x5b, 0xe1, 0x5c, 0xe7, 0x16, 0x78, 0xc2, 0xb2, 0x06, 0x7c, - 0x84, 0xcf, 0x37, 0x72, 0x21, 0xc4, 0xaf, 0x8c, 0x1d, 0x65, 0xa2, 0x9c, 0xc1, 0x8b, 0x6b, 0x05, - 0x9c, 0xca, 0xf0, 0x52, 0xf6, 0xee, 0xef, 0x6d, 0xa0, 0xf0, 0xfa, 0x47, 0xf0, 0x52, 0x83, 0xc9, - 0xc6, 0x56, 0x74, 0xac, 0x1f, 0x03, 0xa9, 0xf5, 0xc7, 0x1c, 0x3e, 0xeb, 0x51, 0x55, 0x3e, 0x07, - 0x51, 0xd8, 0xec, 0x28, 0x83, 0xc9, 0x25, 0x79, 0xd6, 0x31, 0x92, 0x3a, 0xeb, 0x55, 0x32, 0x04, - 0x52, 0xec, 0xe6, 0xe9, 0xac, 0x94, 0x1a, 0x1a, 0xf9, 0x58, 0x6b, 0xa9, 0xf9, 0x03, 0xc2, 0xc3, - 0x1a, 0xe5, 0xc3, 0x7d, 0xb5, 0x1d, 0x1c, 0x67, 0xaf, 0x90, 0x62, 0xda, 0xf6, 0x08, 0x9d, 0xbd, - 0x25, 0x90, 0xce, 0x1e, 0xe6, 0x42, 0x88, 0xdf, 0xd9, 0x3b, 0x43, 0x0d, 0x2f, 0x8b, 0x7c, 0x96, - 0xf9, 0x4e, 0xa4, 0x92, 0xb2, 0xc2, 0xf8, 0x40, 0xf7, 0xb7, 0x41, 0x71, 0xb3, 0x0c, 0x94, 0x2a, - 0xae, 0xdb, 0x38, 0x54, 0x11, 0x21, 0x3d, 0xd5, 0x2c, 0x11, 0x86, 0x0f, 0xa8, 0x1d, 0x34, 0x27, - 0x60, 0x27, 0xd9, 0xc0, 0x3c, 0xbd, 0x10, 0xe4, 0x01, 0xad, 0x51, 0xa9, 0x03, 0x22, 0xe0, 0x10, - 0xf1, 0x4f, 0xf6, 0x7e, 0x2c, 0x0f, 0x8a, 0x62, 0xa8, 0xf3, 0x85, 0xe1, 0x0f, 0x37, 0x7a, 0xf2, - 0xa8, 0x8f, 0x7d, 0xb0, 0x83, 0x45, 0xf7, 0x96, 0x07, 0x4a, 0x6d, 0xb1, 0xe5, 0x81, 0x52, 0xdb, - 0x6f, 0xb9, 0x86, 0xa3, 0x89, 0x57, 0x88, 0x05, 0xb8, 0x36, 0xac, 0x0c, 0x3d, 0xf1, 0x96, 0x7a, - 0x72, 0xe2, 0x61, 0x0c, 0xb7, 0xf3, 0x99, 0x30, 0x16, 0xf4, 0x50, 0x9a, 0xdc, 0xe6, 0xb2, 0x24, - 0xdb, 0x39, 0x46, 0x52, 0xed, 0xbc, 0x4a, 0xe2, 0xdb, 0x67, 0x6c, 0xa5, 0xaa, 0x57, 0x41, 0xde, - 0x3e, 0x41, 0x4d, 0xdd, 0x3e, 0x08, 0x0a, 0x9e, 0xe7, 0xec, 0xdd, 0xf0, 0xf3, 0x59, 0x5e, 0xe6, - 0xf3, 0x6a, 0xce, 0xef, 0xa7, 0x6c, 0x5b, 0xc8, 0xc7, 0x79, 0xb0, 0x15, 0x8b, 0x07, 0xf8, 0xd8, - 0x0a, 0x6d, 0x9b, 0x9d, 0xd0, 0x8b, 0xf4, 0x72, 0x6a, 0x80, 0x63, 0x2a, 0x38, 0xff, 0xa7, 0xc7, - 0xf6, 0x9a, 0xe7, 0xca, 0xf1, 0x95, 0x05, 0x5d, 0x8a, 0xc2, 0xdd, 0x4f, 0x4a, 0x68, 0x28, 0x2d, - 0x4c, 0xf9, 0xd7, 0x84, 0x9f, 0x6e, 0xdc, 0x47, 0x7f, 0xbc, 0xa3, 0x55, 0x58, 0xcd, 0x5f, 0x3d, - 0x76, 0x6b, 0x15, 0x3c, 0x2e, 0x60, 0xe2, 0x96, 0x72, 0xb0, 0x85, 0xd3, 0x96, 0xf5, 0xeb, 0x78, - 0xb4, 0x8b, 0xc9, 0xea, 0xb3, 0xc5, 0x25, 0xca, 0x74, 0x3e, 0x5b, 0x6a, 0x75, 0xd3, 0xb3, 0xa5, - 0x85, 0x70, 0xe1, 0xfc, 0x34, 0x02, 0x55, 0xe4, 0x13, 0xe1, 0x8a, 0xd5, 0xb5, 0x21, 0x59, 0x38, - 0xab, 0x50, 0xaa, 0x70, 0xd6, 0x59, 0x3c, 0xbd, 0xb0, 0x7a, 0x2e, 0x72, 0x7b, 0x22, 0x5d, 0xab, - 0x90, 0xd3, 0x8b, 0x46, 0x53, 0xd3, 0xab, 0xcb, 0x02, 0xef, 0x77, 0x04, 0xc6, 0x3d, 0x4b, 0x02, - 0x47, 0xee, 0x77, 0x15, 0x4a, 0xed, 0x77, 0x9d, 0xc5, 0x8d, 0x72, 0x5a, 0xe6, 0xb6, 0x99, 0x08, - 0x64, 0xa3, 0x2c, 0xe5, 0x54, 0xa3, 0x60, 0x2a, 0x2a, 0xcd, 0xa1, 0x54, 0x55, 0x51, 0xbf, 0x4e, - 0x9a, 0xda, 0xfd, 0x56, 0x56, 0xae, 0x88, 0xc8, 0xd2, 0xec, 0x60, 0x53, 0xa5, 0xd9, 0x69, 0x82, - 0x4b, 0xd3, 0x2d, 0xae, 0x7b, 0xa6, 0x05, 0x35, 0x55, 0x9a, 0x08, 0xc2, 0x4f, 0x91, 0xa7, 0x30, - 0x97, 0x16, 0xda, 0xec, 0x51, 0x03, 0x1d, 0x03, 0xa9, 0xa7, 0x48, 0xcc, 0x85, 0x10, 0x7f, 0xf7, - 0xd8, 0x07, 0x43, 0x2d, 0x9d, 0x56, 0x47, 0x3f, 0xcf, 0xa0, 0x3c, 0x12, 0xd5, 0x2c, 0xb3, 0x3f, - 0x2a, 0x4e, 0xe6, 0xa3, 0x03, 0xf6, 0xb1, 0x0f, 0x77, 0xb2, 0x89, 0xc6, 0x77, 0x2d, 0x0b, 0xd3, - 0xd2, 0x53, 0x7a, 0x7c, 0xaf, 0x40, 0xc9, 0xf1, 0xbd, 0xc6, 0x46, 0xf7, 0x10, 0xf8, 0xa2, 0xbc, - 0x43, 0xbf, 0xdb, 0xe3, 0x9c, 0xde, 0x4d, 0x43, 0xf8, 0x71, 0xe0, 0xe3, 0x8e, 0xc0, 0xb8, 0xe9, - 0x0e, 0x53, 0x9e, 0x5a, 0x5d, 0xa0, 0x52, 0x8f, 0x03, 0x02, 0x0e, 0x11, 0xff, 0xed, 0xb1, 0x0f, - 0xdd, 0x4d, 0x85, 0xfa, 0x6f, 0x50, 0x4e, 0xdd, 0xa8, 0x6b, 0x5e, 0x0b, 0x8f, 0x3b, 0x6e, 0xb6, - 0x0e, 0xde, 0x2f, 0xe3, 0x9b, 0x5d, 0xcd, 0x70, 0xd9, 0xe2, 0x13, 0x27, 0xcb, 0x16, 0x03, 0xa9, - 0xb2, 0x8d, 0xb9, 0x10, 0xe2, 0x07, 0x76, 0xe3, 0x89, 0x98, 0x5c, 0x56, 0x8a, 0x53, 0xff, 0x53, - 0x37, 0x92, 0x77, 0x7b, 0x3b, 0x41, 0x78, 0x87, 0x0f, 0x7b, 0x5c, 0xb3, 0x9b, 0x2e, 0xbb, 0x52, - 0xc3, 0x89, 0x96, 0xf3, 0xd6, 0x7b, 0xc7, 0xb0, 0x8b, 0xa9, 0xd4, 0xc1, 0x11, 0xf0, 0x32, 0xe6, - 0x93, 0xc3, 0x5f, 0x0e, 0x16, 0xb9, 0x05, 0x63, 0xf6, 0x73, 0xd9, 0x6f, 0xfe, 0xea, 0xcf, 0x64, - 0x7f, 0x61, 0xfb, 0xf5, 0x77, 0x8b, 0x3e, 0xf5, 0x95, 0xe3, 0xe2, 0x46, 0xad, 0x1d, 0xfe, 0x1f, - 0x00, 0x00, 0xff, 0xff, 0x2e, 0xeb, 0x66, 0x65, 0x20, 0x11, 0x00, 0x00, +var fileDescriptor_tabletmanagerservice_d0dfb5502bc9cb1c = []byte{ + // 1012 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x5b, 0x6f, 0x1b, 0x45, + 0x14, 0xc7, 0xb1, 0x04, 0x95, 0x18, 0xae, 0x1d, 0x55, 0x14, 0x05, 0x89, 0x5b, 0x5a, 0x2e, 0x2d, + 0x8a, 0x9b, 0x86, 0xf2, 0xee, 0xa6, 0x49, 0x1b, 0xd4, 0x08, 0x63, 0x37, 0x04, 0x81, 0x84, 0x34, + 0xb1, 0x4f, 0xbc, 0x4b, 0xd6, 0x3b, 0xc3, 0xcc, 0xac, 0x95, 0x3c, 0x21, 0x21, 0xf1, 0x84, 0xc4, + 0x67, 0xe3, 0x23, 0xa1, 0xbd, 0xcc, 0xec, 0x19, 0xfb, 0xec, 0xd8, 0x7e, 0x8b, 0xfc, 0xff, 0x9d, + 0xcb, 0x9c, 0x39, 0x73, 0x66, 0xb2, 0x6c, 0xc7, 0x8a, 0x8b, 0x0c, 0xec, 0x5c, 0xe4, 0x62, 0x06, + 0xda, 0x80, 0x5e, 0xa4, 0x13, 0xd8, 0x53, 0x5a, 0x5a, 0xc9, 0xef, 0x50, 0xda, 0xce, 0xdd, 0xe0, + 0xd7, 0xa9, 0xb0, 0xa2, 0xc6, 0x1f, 0xff, 0xb7, 0xcb, 0xde, 0x79, 0x55, 0x69, 0xa7, 0xb5, 0xc6, + 0x4f, 0xd8, 0xeb, 0xc3, 0x34, 0x9f, 0xf1, 0x8f, 0xf7, 0x56, 0x6d, 0x4a, 0x61, 0x04, 0x7f, 0x14, + 0x60, 0xec, 0xce, 0x27, 0x9d, 0xba, 0x51, 0x32, 0x37, 0xf0, 0xf9, 0x6b, 0xfc, 0x25, 0x7b, 0x63, + 0x9c, 0x01, 0x28, 0x4e, 0xb1, 0x95, 0xe2, 0x9c, 0x7d, 0xda, 0x0d, 0x78, 0x6f, 0xbf, 0xb1, 0xb7, + 0x8e, 0xae, 0x61, 0x52, 0x58, 0x78, 0x21, 0xe5, 0x15, 0xbf, 0x4f, 0x98, 0x20, 0xdd, 0x79, 0xfe, + 0x62, 0x1d, 0xe6, 0xfd, 0xff, 0xcc, 0xde, 0x7c, 0x0e, 0x76, 0x3c, 0x49, 0x60, 0x2e, 0xf8, 0x2e, + 0x61, 0xe6, 0x55, 0xe7, 0xfb, 0x5e, 0x1c, 0xf2, 0x9e, 0x67, 0xec, 0xdd, 0xe7, 0x60, 0x87, 0xa0, + 0xe7, 0xa9, 0x31, 0xa9, 0xcc, 0x0d, 0xff, 0x8a, 0xb6, 0x44, 0x88, 0x8b, 0xf1, 0xf5, 0x06, 0x24, + 0x2e, 0xd1, 0x18, 0xec, 0x08, 0xc4, 0xf4, 0x87, 0x3c, 0xbb, 0x21, 0x4b, 0x84, 0xf4, 0x58, 0x89, + 0x02, 0xcc, 0xfb, 0x17, 0xec, 0xed, 0x46, 0x38, 0xd7, 0xa9, 0x05, 0x1e, 0xb1, 0xac, 0x00, 0x17, + 0xe1, 0xcb, 0xb5, 0x9c, 0x0f, 0xf1, 0x2b, 0x63, 0x87, 0x89, 0xc8, 0x67, 0xf0, 0xea, 0x46, 0x01, + 0xa7, 0x2a, 0xdc, 0xca, 0xce, 0xfd, 0xfd, 0x35, 0x14, 0xce, 0x7f, 0x04, 0x97, 0x1a, 0x4c, 0x32, + 0xb6, 0xa2, 0x23, 0x7f, 0x0c, 0xc4, 0xf2, 0x0f, 0x39, 0xbc, 0xd7, 0xa3, 0x22, 0x7f, 0x01, 0x22, + 0xb3, 0xc9, 0x61, 0x02, 0x93, 0x2b, 0x72, 0xaf, 0x43, 0x24, 0xb6, 0xd7, 0xcb, 0xa4, 0x0f, 0xa4, + 0xd8, 0xed, 0x93, 0x59, 0x2e, 0x35, 0xd4, 0xf2, 0x91, 0xd6, 0x52, 0xf3, 0x87, 0x84, 0x87, 0x15, + 0xca, 0x85, 0xfb, 0x66, 0x33, 0x38, 0xac, 0x5e, 0x26, 0xc5, 0xb4, 0x39, 0x23, 0x74, 0xf5, 0x5a, + 0x20, 0x5e, 0x3d, 0xcc, 0xf9, 0x10, 0xbf, 0xb3, 0xf7, 0x86, 0x1a, 0x2e, 0xb3, 0x74, 0x96, 0xb8, + 0x93, 0x48, 0x15, 0x65, 0x89, 0x71, 0x81, 0x1e, 0x6c, 0x82, 0xe2, 0xc3, 0x32, 0x50, 0x2a, 0xbb, + 0x69, 0xe2, 0x50, 0x4d, 0x84, 0xf4, 0xd8, 0x61, 0x09, 0x30, 0xdc, 0xc9, 0x2f, 0xe5, 0xe4, 0xaa, + 0x9a, 0xae, 0x86, 0xec, 0xe4, 0x56, 0x8e, 0x75, 0x32, 0xa6, 0xf0, 0x5e, 0x9c, 0xe5, 0x59, 0xeb, + 0x9e, 0x4a, 0x0b, 0x03, 0xb1, 0xbd, 0x08, 0x39, 0xdc, 0x60, 0xcd, 0xa0, 0x3c, 0x06, 0x3b, 0x49, + 0x06, 0xe6, 0xd9, 0x85, 0x20, 0x1b, 0x6c, 0x85, 0x8a, 0x35, 0x18, 0x01, 0xfb, 0x88, 0x7f, 0xb2, + 0x0f, 0x42, 0x79, 0x90, 0x65, 0x43, 0x9d, 0x2e, 0x0c, 0x7f, 0xb4, 0xd6, 0x93, 0x43, 0x5d, 0xec, + 0xfd, 0x2d, 0x2c, 0xba, 0x97, 0x3c, 0x50, 0x6a, 0x83, 0x25, 0x0f, 0x94, 0xda, 0x7c, 0xc9, 0x15, + 0x1c, 0x4c, 0xec, 0x4c, 0x2c, 0xa0, 0x1c, 0x23, 0x85, 0xa1, 0x27, 0x76, 0xab, 0x47, 0x27, 0x36, + 0xc6, 0xf0, 0x38, 0x3a, 0x15, 0xc6, 0x82, 0x1e, 0x4a, 0x93, 0xda, 0x54, 0xe6, 0xe4, 0x38, 0x0a, + 0x91, 0xd8, 0x38, 0x5a, 0x26, 0xf1, 0xed, 0x39, 0xb6, 0x52, 0x55, 0x59, 0x90, 0xb7, 0xa7, 0x57, + 0x63, 0xb7, 0x27, 0x82, 0xbc, 0xe7, 0x39, 0x7b, 0xdf, 0xff, 0x7c, 0x9a, 0xe6, 0xe9, 0xbc, 0x98, + 0xf3, 0x07, 0x31, 0xdb, 0x06, 0x72, 0x71, 0x1e, 0x6e, 0xc4, 0xe2, 0x63, 0x3b, 0xb6, 0x42, 0xdb, + 0x7a, 0x25, 0x74, 0x92, 0x4e, 0x8e, 0x1d, 0x5b, 0x4c, 0x79, 0xe7, 0x37, 0xec, 0x4e, 0xfb, 0xfb, + 0x59, 0x6e, 0xd3, 0x6c, 0x70, 0x69, 0x41, 0xf3, 0xbd, 0xa8, 0x83, 0x16, 0x74, 0x01, 0xfb, 0x1b, + 0xf3, 0x3e, 0xf4, 0x3f, 0x3d, 0xb6, 0x53, 0xbf, 0xf4, 0x8e, 0xae, 0x2d, 0xe8, 0x5c, 0x64, 0xe5, + 0xd5, 0xae, 0x84, 0x86, 0xdc, 0xc2, 0x94, 0x7f, 0x4b, 0x78, 0xec, 0xc6, 0x5d, 0x1e, 0x4f, 0xb6, + 0xb4, 0xf2, 0xd9, 0xfc, 0xd5, 0x63, 0x77, 0x97, 0xc1, 0xa3, 0x0c, 0x26, 0x65, 0x2a, 0xfb, 0x1b, + 0x38, 0x6d, 0x58, 0x97, 0xc7, 0xe3, 0x6d, 0x4c, 0x96, 0x5f, 0x7c, 0x65, 0xc9, 0x4c, 0xe7, 0x8b, + 0xaf, 0x52, 0xd7, 0xbd, 0xf8, 0x1a, 0x08, 0xf7, 0xec, 0x4f, 0x23, 0x50, 0x59, 0x3a, 0x11, 0xe5, + 0x39, 0x29, 0x27, 0x00, 0xd9, 0xb3, 0xcb, 0x50, 0xac, 0x67, 0x57, 0x59, 0x3c, 0x38, 0xb1, 0x7a, + 0x2e, 0x52, 0x7b, 0x2c, 0xcb, 0x53, 0x4a, 0x0e, 0x4e, 0x1a, 0x8d, 0x0d, 0xce, 0x2e, 0x0b, 0xbc, + 0xde, 0x11, 0x98, 0xf2, 0x45, 0xe7, 0x39, 0x72, 0xbd, 0xcb, 0x50, 0x6c, 0xbd, 0xab, 0x2c, 0x3e, + 0xa3, 0x27, 0x79, 0x6a, 0xeb, 0x61, 0x44, 0x9e, 0xd1, 0x56, 0x8e, 0x9d, 0x51, 0x4c, 0x05, 0xad, + 0x39, 0x94, 0xaa, 0xc8, 0xaa, 0x87, 0x5d, 0xdd, 0xbb, 0xdf, 0xcb, 0xa2, 0x6c, 0x22, 0xb2, 0x35, + 0x3b, 0xd8, 0x58, 0x6b, 0x76, 0x9a, 0xe0, 0xd6, 0x2c, 0x93, 0xeb, 0x1e, 0xa7, 0x5e, 0x8d, 0xb5, + 0x26, 0x82, 0xf0, 0xcb, 0xe1, 0x19, 0xcc, 0xa5, 0x85, 0xa6, 0x7a, 0xd4, 0x5d, 0x82, 0x81, 0xd8, + 0xcb, 0x21, 0xe4, 0x7c, 0x88, 0xbf, 0x7b, 0xec, 0xc3, 0xa1, 0x96, 0xa5, 0x56, 0x45, 0x3f, 0x4f, + 0x20, 0x3f, 0x14, 0xc5, 0x2c, 0xb1, 0x67, 0x8a, 0x93, 0xf5, 0xe8, 0x80, 0x5d, 0xec, 0x83, 0xad, + 0x6c, 0x82, 0x9b, 0xa3, 0x92, 0x85, 0x69, 0xe8, 0x29, 0x7d, 0x73, 0x2c, 0x41, 0xd1, 0x9b, 0x63, + 0x85, 0x0d, 0xae, 0x40, 0x70, 0x4d, 0xb9, 0x4b, 0xff, 0xcb, 0x13, 0xd6, 0xf4, 0x5e, 0x1c, 0xc2, + 0xef, 0x12, 0x17, 0x77, 0x04, 0xa6, 0x9c, 0xf3, 0x30, 0xe5, 0xb1, 0xec, 0x3c, 0x15, 0x7b, 0x97, + 0x10, 0xb0, 0x8f, 0xf8, 0x6f, 0x8f, 0x7d, 0x54, 0x5e, 0x92, 0xe8, 0xfc, 0x0d, 0xf2, 0x69, 0x39, + 0xea, 0xea, 0x87, 0xca, 0x93, 0x8e, 0x4b, 0xb5, 0x83, 0x77, 0x69, 0x7c, 0xb7, 0xad, 0x19, 0x6e, + 0x5b, 0xbc, 0xe3, 0x64, 0xdb, 0x62, 0x20, 0xd6, 0xb6, 0x21, 0xe7, 0x43, 0xfc, 0xc8, 0x6e, 0x3d, + 0x15, 0x93, 0xab, 0x42, 0x71, 0xea, 0x73, 0x44, 0x2d, 0x39, 0xb7, 0x9f, 0x45, 0x08, 0xe7, 0xf0, + 0x51, 0x8f, 0x6b, 0x76, 0xbb, 0xac, 0xae, 0xd4, 0x70, 0xac, 0xe5, 0xbc, 0xf1, 0xde, 0x31, 0xec, + 0x42, 0x2a, 0xb6, 0x71, 0x04, 0xdc, 0xc6, 0x7c, 0x7a, 0xf0, 0xcb, 0xfe, 0x22, 0xb5, 0x60, 0xcc, + 0x5e, 0x2a, 0xfb, 0xf5, 0x5f, 0xfd, 0x99, 0xec, 0x2f, 0x6c, 0xbf, 0xfa, 0xe4, 0xd3, 0xa7, 0x3e, + 0x10, 0x5d, 0xdc, 0xaa, 0xb4, 0x83, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x46, 0xe6, 0xae, + 0x5b, 0x12, 0x00, 0x00, } diff --git a/go/vt/proto/throttlerdata/throttlerdata.pb.go b/go/vt/proto/throttlerdata/throttlerdata.pb.go index d1bd1620f4..74788363a4 100644 --- a/go/vt/proto/throttlerdata/throttlerdata.pb.go +++ b/go/vt/proto/throttlerdata/throttlerdata.pb.go @@ -29,7 +29,7 @@ func (m *MaxRatesRequest) Reset() { *m = MaxRatesRequest{} } func (m *MaxRatesRequest) String() string { return proto.CompactTextString(m) } func (*MaxRatesRequest) ProtoMessage() {} func (*MaxRatesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{0} + return fileDescriptor_throttlerdata_d10a8d735853021e, []int{0} } func (m *MaxRatesRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MaxRatesRequest.Unmarshal(m, b) @@ -63,7 +63,7 @@ func (m *MaxRatesResponse) Reset() { *m = MaxRatesResponse{} } func (m *MaxRatesResponse) String() string { return proto.CompactTextString(m) } func (*MaxRatesResponse) ProtoMessage() {} func (*MaxRatesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{1} + return fileDescriptor_throttlerdata_d10a8d735853021e, []int{1} } func (m *MaxRatesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MaxRatesResponse.Unmarshal(m, b) @@ -102,7 +102,7 @@ func (m *SetMaxRateRequest) Reset() { *m = SetMaxRateRequest{} } func (m *SetMaxRateRequest) String() string { return proto.CompactTextString(m) } func (*SetMaxRateRequest) ProtoMessage() {} func (*SetMaxRateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{2} + return fileDescriptor_throttlerdata_d10a8d735853021e, []int{2} } func (m *SetMaxRateRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SetMaxRateRequest.Unmarshal(m, b) @@ -142,7 +142,7 @@ func (m *SetMaxRateResponse) Reset() { *m = SetMaxRateResponse{} } func (m *SetMaxRateResponse) String() string { return proto.CompactTextString(m) } func (*SetMaxRateResponse) ProtoMessage() {} func (*SetMaxRateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{3} + return fileDescriptor_throttlerdata_d10a8d735853021e, []int{3} } func (m *SetMaxRateResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SetMaxRateResponse.Unmarshal(m, b) @@ -259,7 +259,7 @@ func (m *Configuration) Reset() { *m = Configuration{} } func (m *Configuration) String() string { return proto.CompactTextString(m) } func (*Configuration) ProtoMessage() {} func (*Configuration) Descriptor() ([]byte, []int) { - return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{4} + return fileDescriptor_throttlerdata_d10a8d735853021e, []int{4} } func (m *Configuration) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Configuration.Unmarshal(m, b) @@ -391,7 +391,7 @@ func (m *GetConfigurationRequest) Reset() { *m = GetConfigurationRequest func (m *GetConfigurationRequest) String() string { return proto.CompactTextString(m) } func (*GetConfigurationRequest) ProtoMessage() {} func (*GetConfigurationRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{5} + return fileDescriptor_throttlerdata_d10a8d735853021e, []int{5} } func (m *GetConfigurationRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetConfigurationRequest.Unmarshal(m, b) @@ -432,7 +432,7 @@ func (m *GetConfigurationResponse) Reset() { *m = GetConfigurationRespon func (m *GetConfigurationResponse) String() string { return proto.CompactTextString(m) } func (*GetConfigurationResponse) ProtoMessage() {} func (*GetConfigurationResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{6} + return fileDescriptor_throttlerdata_d10a8d735853021e, []int{6} } func (m *GetConfigurationResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetConfigurationResponse.Unmarshal(m, b) @@ -478,7 +478,7 @@ func (m *UpdateConfigurationRequest) Reset() { *m = UpdateConfigurationR func (m *UpdateConfigurationRequest) String() string { return proto.CompactTextString(m) } func (*UpdateConfigurationRequest) ProtoMessage() {} func (*UpdateConfigurationRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{7} + return fileDescriptor_throttlerdata_d10a8d735853021e, []int{7} } func (m *UpdateConfigurationRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UpdateConfigurationRequest.Unmarshal(m, b) @@ -532,7 +532,7 @@ func (m *UpdateConfigurationResponse) Reset() { *m = UpdateConfiguration func (m *UpdateConfigurationResponse) String() string { return proto.CompactTextString(m) } func (*UpdateConfigurationResponse) ProtoMessage() {} func (*UpdateConfigurationResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{8} + return fileDescriptor_throttlerdata_d10a8d735853021e, []int{8} } func (m *UpdateConfigurationResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UpdateConfigurationResponse.Unmarshal(m, b) @@ -573,7 +573,7 @@ func (m *ResetConfigurationRequest) Reset() { *m = ResetConfigurationReq func (m *ResetConfigurationRequest) String() string { return proto.CompactTextString(m) } func (*ResetConfigurationRequest) ProtoMessage() {} func (*ResetConfigurationRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{9} + return fileDescriptor_throttlerdata_d10a8d735853021e, []int{9} } func (m *ResetConfigurationRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ResetConfigurationRequest.Unmarshal(m, b) @@ -613,7 +613,7 @@ func (m *ResetConfigurationResponse) Reset() { *m = ResetConfigurationRe func (m *ResetConfigurationResponse) String() string { return proto.CompactTextString(m) } func (*ResetConfigurationResponse) ProtoMessage() {} func (*ResetConfigurationResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{10} + return fileDescriptor_throttlerdata_d10a8d735853021e, []int{10} } func (m *ResetConfigurationResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ResetConfigurationResponse.Unmarshal(m, b) @@ -656,9 +656,9 @@ func init() { proto.RegisterType((*ResetConfigurationResponse)(nil), "throttlerdata.ResetConfigurationResponse") } -func init() { proto.RegisterFile("throttlerdata.proto", fileDescriptor_throttlerdata_7d084fd3a7704c85) } +func init() { proto.RegisterFile("throttlerdata.proto", fileDescriptor_throttlerdata_d10a8d735853021e) } -var fileDescriptor_throttlerdata_7d084fd3a7704c85 = []byte{ +var fileDescriptor_throttlerdata_d10a8d735853021e = []byte{ // 734 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x5f, 0x4f, 0x03, 0x45, 0x10, 0xcf, 0x51, 0x8a, 0x30, 0xa5, 0x40, 0x17, 0x84, 0xa3, 0x18, 0x53, 0x2f, 0x31, 0x36, 0x8d, diff --git a/go/vt/proto/throttlerservice/throttlerservice.pb.go b/go/vt/proto/throttlerservice/throttlerservice.pb.go index ecbe7cdac2..988d592e40 100644 --- a/go/vt/proto/throttlerservice/throttlerservice.pb.go +++ b/go/vt/proto/throttlerservice/throttlerservice.pb.go @@ -255,10 +255,10 @@ var _Throttler_serviceDesc = grpc.ServiceDesc{ } func init() { - proto.RegisterFile("throttlerservice.proto", fileDescriptor_throttlerservice_151ce3faa7ac0b15) + proto.RegisterFile("throttlerservice.proto", fileDescriptor_throttlerservice_8b1d9f2a5de89835) } -var fileDescriptor_throttlerservice_151ce3faa7ac0b15 = []byte{ +var fileDescriptor_throttlerservice_8b1d9f2a5de89835 = []byte{ // 241 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x3d, 0x4b, 0xc4, 0x40, 0x10, 0x86, 0x05, 0x41, 0x74, 0xaa, 0x63, 0x0f, 0x2c, 0xae, 0xf0, 0xab, 0x50, 0x4f, 0x30, 0x0b, diff --git a/go/vt/proto/topodata/topodata.pb.go b/go/vt/proto/topodata/topodata.pb.go index d3903dbfd8..504f39b0ea 100644 --- a/go/vt/proto/topodata/topodata.pb.go +++ b/go/vt/proto/topodata/topodata.pb.go @@ -48,7 +48,7 @@ func (x KeyspaceIdType) String() string { return proto.EnumName(KeyspaceIdType_name, int32(x)) } func (KeyspaceIdType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_topodata_23985cc74c86747c, []int{0} + return fileDescriptor_topodata_693bf5422a92a7f4, []int{0} } // TabletType represents the type of a given tablet. @@ -117,7 +117,7 @@ func (x TabletType) String() string { return proto.EnumName(TabletType_name, int32(x)) } func (TabletType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_topodata_23985cc74c86747c, []int{1} + return fileDescriptor_topodata_693bf5422a92a7f4, []int{1} } // KeyRange describes a range of sharding keys, when range-based @@ -134,7 +134,7 @@ func (m *KeyRange) Reset() { *m = KeyRange{} } func (m *KeyRange) String() string { return proto.CompactTextString(m) } func (*KeyRange) ProtoMessage() {} func (*KeyRange) Descriptor() ([]byte, []int) { - return fileDescriptor_topodata_23985cc74c86747c, []int{0} + return fileDescriptor_topodata_693bf5422a92a7f4, []int{0} } func (m *KeyRange) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_KeyRange.Unmarshal(m, b) @@ -184,7 +184,7 @@ func (m *TabletAlias) Reset() { *m = TabletAlias{} } func (m *TabletAlias) String() string { return proto.CompactTextString(m) } func (*TabletAlias) ProtoMessage() {} func (*TabletAlias) Descriptor() ([]byte, []int) { - return fileDescriptor_topodata_23985cc74c86747c, []int{1} + return fileDescriptor_topodata_693bf5422a92a7f4, []int{1} } func (m *TabletAlias) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_TabletAlias.Unmarshal(m, b) @@ -260,7 +260,7 @@ func (m *Tablet) Reset() { *m = Tablet{} } func (m *Tablet) String() string { return proto.CompactTextString(m) } func (*Tablet) ProtoMessage() {} func (*Tablet) Descriptor() ([]byte, []int) { - return fileDescriptor_topodata_23985cc74c86747c, []int{2} + return fileDescriptor_topodata_693bf5422a92a7f4, []int{2} } func (m *Tablet) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Tablet.Unmarshal(m, b) @@ -394,7 +394,7 @@ func (m *Shard) Reset() { *m = Shard{} } func (m *Shard) String() string { return proto.CompactTextString(m) } func (*Shard) ProtoMessage() {} func (*Shard) Descriptor() ([]byte, []int) { - return fileDescriptor_topodata_23985cc74c86747c, []int{3} + return fileDescriptor_topodata_693bf5422a92a7f4, []int{3} } func (m *Shard) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Shard.Unmarshal(m, b) @@ -469,7 +469,7 @@ func (m *Shard_ServedType) Reset() { *m = Shard_ServedType{} } func (m *Shard_ServedType) String() string { return proto.CompactTextString(m) } func (*Shard_ServedType) ProtoMessage() {} func (*Shard_ServedType) Descriptor() ([]byte, []int) { - return fileDescriptor_topodata_23985cc74c86747c, []int{3, 0} + return fileDescriptor_topodata_693bf5422a92a7f4, []int{3, 0} } func (m *Shard_ServedType) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Shard_ServedType.Unmarshal(m, b) @@ -526,7 +526,7 @@ func (m *Shard_SourceShard) Reset() { *m = Shard_SourceShard{} } func (m *Shard_SourceShard) String() string { return proto.CompactTextString(m) } func (*Shard_SourceShard) ProtoMessage() {} func (*Shard_SourceShard) Descriptor() ([]byte, []int) { - return fileDescriptor_topodata_23985cc74c86747c, []int{3, 1} + return fileDescriptor_topodata_693bf5422a92a7f4, []int{3, 1} } func (m *Shard_SourceShard) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Shard_SourceShard.Unmarshal(m, b) @@ -601,7 +601,7 @@ func (m *Shard_TabletControl) Reset() { *m = Shard_TabletControl{} } func (m *Shard_TabletControl) String() string { return proto.CompactTextString(m) } func (*Shard_TabletControl) ProtoMessage() {} func (*Shard_TabletControl) Descriptor() ([]byte, []int) { - return fileDescriptor_topodata_23985cc74c86747c, []int{3, 2} + return fileDescriptor_topodata_693bf5422a92a7f4, []int{3, 2} } func (m *Shard_TabletControl) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Shard_TabletControl.Unmarshal(m, b) @@ -676,7 +676,7 @@ func (m *Keyspace) Reset() { *m = Keyspace{} } func (m *Keyspace) String() string { return proto.CompactTextString(m) } func (*Keyspace) ProtoMessage() {} func (*Keyspace) Descriptor() ([]byte, []int) { - return fileDescriptor_topodata_23985cc74c86747c, []int{4} + return fileDescriptor_topodata_693bf5422a92a7f4, []int{4} } func (m *Keyspace) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Keyspace.Unmarshal(m, b) @@ -735,7 +735,7 @@ func (m *Keyspace_ServedFrom) Reset() { *m = Keyspace_ServedFrom{} } func (m *Keyspace_ServedFrom) String() string { return proto.CompactTextString(m) } func (*Keyspace_ServedFrom) ProtoMessage() {} func (*Keyspace_ServedFrom) Descriptor() ([]byte, []int) { - return fileDescriptor_topodata_23985cc74c86747c, []int{4, 0} + return fileDescriptor_topodata_693bf5422a92a7f4, []int{4, 0} } func (m *Keyspace_ServedFrom) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Keyspace_ServedFrom.Unmarshal(m, b) @@ -791,7 +791,7 @@ func (m *ShardReplication) Reset() { *m = ShardReplication{} } func (m *ShardReplication) String() string { return proto.CompactTextString(m) } func (*ShardReplication) ProtoMessage() {} func (*ShardReplication) Descriptor() ([]byte, []int) { - return fileDescriptor_topodata_23985cc74c86747c, []int{5} + return fileDescriptor_topodata_693bf5422a92a7f4, []int{5} } func (m *ShardReplication) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ShardReplication.Unmarshal(m, b) @@ -830,7 +830,7 @@ func (m *ShardReplication_Node) Reset() { *m = ShardReplication_Node{} } func (m *ShardReplication_Node) String() string { return proto.CompactTextString(m) } func (*ShardReplication_Node) ProtoMessage() {} func (*ShardReplication_Node) Descriptor() ([]byte, []int) { - return fileDescriptor_topodata_23985cc74c86747c, []int{5, 0} + return fileDescriptor_topodata_693bf5422a92a7f4, []int{5, 0} } func (m *ShardReplication_Node) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ShardReplication_Node.Unmarshal(m, b) @@ -871,7 +871,7 @@ func (m *ShardReference) Reset() { *m = ShardReference{} } func (m *ShardReference) String() string { return proto.CompactTextString(m) } func (*ShardReference) ProtoMessage() {} func (*ShardReference) Descriptor() ([]byte, []int) { - return fileDescriptor_topodata_23985cc74c86747c, []int{6} + return fileDescriptor_topodata_693bf5422a92a7f4, []int{6} } func (m *ShardReference) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ShardReference.Unmarshal(m, b) @@ -922,7 +922,7 @@ func (m *SrvKeyspace) Reset() { *m = SrvKeyspace{} } func (m *SrvKeyspace) String() string { return proto.CompactTextString(m) } func (*SrvKeyspace) ProtoMessage() {} func (*SrvKeyspace) Descriptor() ([]byte, []int) { - return fileDescriptor_topodata_23985cc74c86747c, []int{7} + return fileDescriptor_topodata_693bf5422a92a7f4, []int{7} } func (m *SrvKeyspace) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SrvKeyspace.Unmarshal(m, b) @@ -984,7 +984,7 @@ func (m *SrvKeyspace_KeyspacePartition) Reset() { *m = SrvKeyspace_Keysp func (m *SrvKeyspace_KeyspacePartition) String() string { return proto.CompactTextString(m) } func (*SrvKeyspace_KeyspacePartition) ProtoMessage() {} func (*SrvKeyspace_KeyspacePartition) Descriptor() ([]byte, []int) { - return fileDescriptor_topodata_23985cc74c86747c, []int{7, 0} + return fileDescriptor_topodata_693bf5422a92a7f4, []int{7, 0} } func (m *SrvKeyspace_KeyspacePartition) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SrvKeyspace_KeyspacePartition.Unmarshal(m, b) @@ -1034,7 +1034,7 @@ func (m *SrvKeyspace_ServedFrom) Reset() { *m = SrvKeyspace_ServedFrom{} func (m *SrvKeyspace_ServedFrom) String() string { return proto.CompactTextString(m) } func (*SrvKeyspace_ServedFrom) ProtoMessage() {} func (*SrvKeyspace_ServedFrom) Descriptor() ([]byte, []int) { - return fileDescriptor_topodata_23985cc74c86747c, []int{7, 1} + return fileDescriptor_topodata_693bf5422a92a7f4, []int{7, 1} } func (m *SrvKeyspace_ServedFrom) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SrvKeyspace_ServedFrom.Unmarshal(m, b) @@ -1092,7 +1092,7 @@ func (m *CellInfo) Reset() { *m = CellInfo{} } func (m *CellInfo) String() string { return proto.CompactTextString(m) } func (*CellInfo) ProtoMessage() {} func (*CellInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_topodata_23985cc74c86747c, []int{8} + return fileDescriptor_topodata_693bf5422a92a7f4, []int{8} } func (m *CellInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CellInfo.Unmarshal(m, b) @@ -1156,9 +1156,9 @@ func init() { proto.RegisterEnum("topodata.TabletType", TabletType_name, TabletType_value) } -func init() { proto.RegisterFile("topodata.proto", fileDescriptor_topodata_23985cc74c86747c) } +func init() { proto.RegisterFile("topodata.proto", fileDescriptor_topodata_693bf5422a92a7f4) } -var fileDescriptor_topodata_23985cc74c86747c = []byte{ +var fileDescriptor_topodata_693bf5422a92a7f4 = []byte{ // 1162 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x6f, 0x8f, 0xda, 0x46, 0x13, 0x7f, 0x0c, 0x86, 0x33, 0x63, 0x8e, 0x38, 0xfb, 0x24, 0x95, 0xe5, 0x2a, 0x2a, 0x42, 0x8a, diff --git a/go/vt/proto/vschema/vschema.pb.go b/go/vt/proto/vschema/vschema.pb.go index e6436628cd..4a3aa16b0d 100644 --- a/go/vt/proto/vschema/vschema.pb.go +++ b/go/vt/proto/vschema/vschema.pb.go @@ -34,7 +34,7 @@ func (m *Keyspace) Reset() { *m = Keyspace{} } func (m *Keyspace) String() string { return proto.CompactTextString(m) } func (*Keyspace) ProtoMessage() {} func (*Keyspace) Descriptor() ([]byte, []int) { - return fileDescriptor_vschema_5ecfaf46981fe072, []int{0} + return fileDescriptor_vschema_58a865bec489dd60, []int{0} } func (m *Keyspace) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Keyspace.Unmarshal(m, b) @@ -99,7 +99,7 @@ func (m *Vindex) Reset() { *m = Vindex{} } func (m *Vindex) String() string { return proto.CompactTextString(m) } func (*Vindex) ProtoMessage() {} func (*Vindex) Descriptor() ([]byte, []int) { - return fileDescriptor_vschema_5ecfaf46981fe072, []int{1} + return fileDescriptor_vschema_58a865bec489dd60, []int{1} } func (m *Vindex) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Vindex.Unmarshal(m, b) @@ -170,7 +170,7 @@ func (m *Table) Reset() { *m = Table{} } func (m *Table) String() string { return proto.CompactTextString(m) } func (*Table) ProtoMessage() {} func (*Table) Descriptor() ([]byte, []int) { - return fileDescriptor_vschema_5ecfaf46981fe072, []int{2} + return fileDescriptor_vschema_58a865bec489dd60, []int{2} } func (m *Table) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Table.Unmarshal(m, b) @@ -249,7 +249,7 @@ func (m *ColumnVindex) Reset() { *m = ColumnVindex{} } func (m *ColumnVindex) String() string { return proto.CompactTextString(m) } func (*ColumnVindex) ProtoMessage() {} func (*ColumnVindex) Descriptor() ([]byte, []int) { - return fileDescriptor_vschema_5ecfaf46981fe072, []int{3} + return fileDescriptor_vschema_58a865bec489dd60, []int{3} } func (m *ColumnVindex) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ColumnVindex.Unmarshal(m, b) @@ -304,7 +304,7 @@ func (m *AutoIncrement) Reset() { *m = AutoIncrement{} } func (m *AutoIncrement) String() string { return proto.CompactTextString(m) } func (*AutoIncrement) ProtoMessage() {} func (*AutoIncrement) Descriptor() ([]byte, []int) { - return fileDescriptor_vschema_5ecfaf46981fe072, []int{4} + return fileDescriptor_vschema_58a865bec489dd60, []int{4} } func (m *AutoIncrement) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_AutoIncrement.Unmarshal(m, b) @@ -351,7 +351,7 @@ func (m *Column) Reset() { *m = Column{} } func (m *Column) String() string { return proto.CompactTextString(m) } func (*Column) ProtoMessage() {} func (*Column) Descriptor() ([]byte, []int) { - return fileDescriptor_vschema_5ecfaf46981fe072, []int{5} + return fileDescriptor_vschema_58a865bec489dd60, []int{5} } func (m *Column) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Column.Unmarshal(m, b) @@ -398,7 +398,7 @@ func (m *SrvVSchema) Reset() { *m = SrvVSchema{} } func (m *SrvVSchema) String() string { return proto.CompactTextString(m) } func (*SrvVSchema) ProtoMessage() {} func (*SrvVSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_vschema_5ecfaf46981fe072, []int{6} + return fileDescriptor_vschema_58a865bec489dd60, []int{6} } func (m *SrvVSchema) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SrvVSchema.Unmarshal(m, b) @@ -439,9 +439,9 @@ func init() { proto.RegisterMapType((map[string]*Keyspace)(nil), "vschema.SrvVSchema.KeyspacesEntry") } -func init() { proto.RegisterFile("vschema.proto", fileDescriptor_vschema_5ecfaf46981fe072) } +func init() { proto.RegisterFile("vschema.proto", fileDescriptor_vschema_58a865bec489dd60) } -var fileDescriptor_vschema_5ecfaf46981fe072 = []byte{ +var fileDescriptor_vschema_58a865bec489dd60 = []byte{ // 562 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x54, 0x41, 0x6f, 0xd3, 0x4c, 0x10, 0x95, 0x93, 0xc6, 0x4d, 0xc6, 0x5f, 0xd2, 0x8f, 0x55, 0x29, 0xc6, 0x08, 0x35, 0xb2, 0x0a, diff --git a/go/vt/proto/vtctldata/vtctldata.pb.go b/go/vt/proto/vtctldata/vtctldata.pb.go index 849b81f15e..6f3860b8ef 100644 --- a/go/vt/proto/vtctldata/vtctldata.pb.go +++ b/go/vt/proto/vtctldata/vtctldata.pb.go @@ -33,7 +33,7 @@ func (m *ExecuteVtctlCommandRequest) Reset() { *m = ExecuteVtctlCommandR func (m *ExecuteVtctlCommandRequest) String() string { return proto.CompactTextString(m) } func (*ExecuteVtctlCommandRequest) ProtoMessage() {} func (*ExecuteVtctlCommandRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtctldata_1ba3ba7c409e0e97, []int{0} + return fileDescriptor_vtctldata_116d6c451a061272, []int{0} } func (m *ExecuteVtctlCommandRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteVtctlCommandRequest.Unmarshal(m, b) @@ -79,7 +79,7 @@ func (m *ExecuteVtctlCommandResponse) Reset() { *m = ExecuteVtctlCommand func (m *ExecuteVtctlCommandResponse) String() string { return proto.CompactTextString(m) } func (*ExecuteVtctlCommandResponse) ProtoMessage() {} func (*ExecuteVtctlCommandResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtctldata_1ba3ba7c409e0e97, []int{1} + return fileDescriptor_vtctldata_116d6c451a061272, []int{1} } func (m *ExecuteVtctlCommandResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteVtctlCommandResponse.Unmarshal(m, b) @@ -111,9 +111,9 @@ func init() { proto.RegisterType((*ExecuteVtctlCommandResponse)(nil), "vtctldata.ExecuteVtctlCommandResponse") } -func init() { proto.RegisterFile("vtctldata.proto", fileDescriptor_vtctldata_1ba3ba7c409e0e97) } +func init() { proto.RegisterFile("vtctldata.proto", fileDescriptor_vtctldata_116d6c451a061272) } -var fileDescriptor_vtctldata_1ba3ba7c409e0e97 = []byte{ +var fileDescriptor_vtctldata_116d6c451a061272 = []byte{ // 200 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0xd1, 0x4a, 0x87, 0x30, 0x14, 0x06, 0x70, 0xd6, 0xbf, 0x82, 0xff, 0x42, 0x83, 0x5d, 0x89, 0xdd, 0x88, 0x54, 0xec, 0xca, diff --git a/go/vt/proto/vtctlservice/vtctlservice.pb.go b/go/vt/proto/vtctlservice/vtctlservice.pb.go index 4214d5d15a..9dd938fd48 100644 --- a/go/vt/proto/vtctlservice/vtctlservice.pb.go +++ b/go/vt/proto/vtctlservice/vtctlservice.pb.go @@ -123,9 +123,9 @@ var _Vtctl_serviceDesc = grpc.ServiceDesc{ Metadata: "vtctlservice.proto", } -func init() { proto.RegisterFile("vtctlservice.proto", fileDescriptor_vtctlservice_a3582c3eb674ce30) } +func init() { proto.RegisterFile("vtctlservice.proto", fileDescriptor_vtctlservice_af4114a311e29c50) } -var fileDescriptor_vtctlservice_a3582c3eb674ce30 = []byte{ +var fileDescriptor_vtctlservice_af4114a311e29c50 = []byte{ // 146 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2a, 0x2b, 0x49, 0x2e, 0xc9, 0x29, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, diff --git a/go/vt/proto/vtgate/vtgate.pb.go b/go/vt/proto/vtgate/vtgate.pb.go index 51e445dc61..ae204d1e8a 100644 --- a/go/vt/proto/vtgate/vtgate.pb.go +++ b/go/vt/proto/vtgate/vtgate.pb.go @@ -53,7 +53,7 @@ func (x TransactionMode) String() string { return proto.EnumName(TransactionMode_name, int32(x)) } func (TransactionMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{0} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{0} } // Session objects are exchanged like cookies through various @@ -98,7 +98,7 @@ func (m *Session) Reset() { *m = Session{} } func (m *Session) String() string { return proto.CompactTextString(m) } func (*Session) ProtoMessage() {} func (*Session) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{0} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{0} } func (m *Session) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Session.Unmarshal(m, b) @@ -186,7 +186,7 @@ func (m *Session_ShardSession) Reset() { *m = Session_ShardSession{} } func (m *Session_ShardSession) String() string { return proto.CompactTextString(m) } func (*Session_ShardSession) ProtoMessage() {} func (*Session_ShardSession) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{0, 0} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{0, 0} } func (m *Session_ShardSession) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Session_ShardSession.Unmarshal(m, b) @@ -244,7 +244,7 @@ func (m *ExecuteRequest) Reset() { *m = ExecuteRequest{} } func (m *ExecuteRequest) String() string { return proto.CompactTextString(m) } func (*ExecuteRequest) ProtoMessage() {} func (*ExecuteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{1} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{1} } func (m *ExecuteRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteRequest.Unmarshal(m, b) @@ -332,7 +332,7 @@ func (m *ExecuteResponse) Reset() { *m = ExecuteResponse{} } func (m *ExecuteResponse) String() string { return proto.CompactTextString(m) } func (*ExecuteResponse) ProtoMessage() {} func (*ExecuteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{2} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{2} } func (m *ExecuteResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteResponse.Unmarshal(m, b) @@ -402,7 +402,7 @@ func (m *ExecuteShardsRequest) Reset() { *m = ExecuteShardsRequest{} } func (m *ExecuteShardsRequest) String() string { return proto.CompactTextString(m) } func (*ExecuteShardsRequest) ProtoMessage() {} func (*ExecuteShardsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{3} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{3} } func (m *ExecuteShardsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteShardsRequest.Unmarshal(m, b) @@ -497,7 +497,7 @@ func (m *ExecuteShardsResponse) Reset() { *m = ExecuteShardsResponse{} } func (m *ExecuteShardsResponse) String() string { return proto.CompactTextString(m) } func (*ExecuteShardsResponse) ProtoMessage() {} func (*ExecuteShardsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{4} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{4} } func (m *ExecuteShardsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteShardsResponse.Unmarshal(m, b) @@ -568,7 +568,7 @@ func (m *ExecuteKeyspaceIdsRequest) Reset() { *m = ExecuteKeyspaceIdsReq func (m *ExecuteKeyspaceIdsRequest) String() string { return proto.CompactTextString(m) } func (*ExecuteKeyspaceIdsRequest) ProtoMessage() {} func (*ExecuteKeyspaceIdsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{5} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{5} } func (m *ExecuteKeyspaceIdsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteKeyspaceIdsRequest.Unmarshal(m, b) @@ -663,7 +663,7 @@ func (m *ExecuteKeyspaceIdsResponse) Reset() { *m = ExecuteKeyspaceIdsRe func (m *ExecuteKeyspaceIdsResponse) String() string { return proto.CompactTextString(m) } func (*ExecuteKeyspaceIdsResponse) ProtoMessage() {} func (*ExecuteKeyspaceIdsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{6} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{6} } func (m *ExecuteKeyspaceIdsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteKeyspaceIdsResponse.Unmarshal(m, b) @@ -734,7 +734,7 @@ func (m *ExecuteKeyRangesRequest) Reset() { *m = ExecuteKeyRangesRequest func (m *ExecuteKeyRangesRequest) String() string { return proto.CompactTextString(m) } func (*ExecuteKeyRangesRequest) ProtoMessage() {} func (*ExecuteKeyRangesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{7} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{7} } func (m *ExecuteKeyRangesRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteKeyRangesRequest.Unmarshal(m, b) @@ -829,7 +829,7 @@ func (m *ExecuteKeyRangesResponse) Reset() { *m = ExecuteKeyRangesRespon func (m *ExecuteKeyRangesResponse) String() string { return proto.CompactTextString(m) } func (*ExecuteKeyRangesResponse) ProtoMessage() {} func (*ExecuteKeyRangesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{8} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{8} } func (m *ExecuteKeyRangesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteKeyRangesResponse.Unmarshal(m, b) @@ -902,7 +902,7 @@ func (m *ExecuteEntityIdsRequest) Reset() { *m = ExecuteEntityIdsRequest func (m *ExecuteEntityIdsRequest) String() string { return proto.CompactTextString(m) } func (*ExecuteEntityIdsRequest) ProtoMessage() {} func (*ExecuteEntityIdsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{9} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{9} } func (m *ExecuteEntityIdsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteEntityIdsRequest.Unmarshal(m, b) @@ -1001,7 +1001,7 @@ func (m *ExecuteEntityIdsRequest_EntityId) Reset() { *m = ExecuteEntityI func (m *ExecuteEntityIdsRequest_EntityId) String() string { return proto.CompactTextString(m) } func (*ExecuteEntityIdsRequest_EntityId) ProtoMessage() {} func (*ExecuteEntityIdsRequest_EntityId) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{9, 0} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{9, 0} } func (m *ExecuteEntityIdsRequest_EntityId) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteEntityIdsRequest_EntityId.Unmarshal(m, b) @@ -1061,7 +1061,7 @@ func (m *ExecuteEntityIdsResponse) Reset() { *m = ExecuteEntityIdsRespon func (m *ExecuteEntityIdsResponse) String() string { return proto.CompactTextString(m) } func (*ExecuteEntityIdsResponse) ProtoMessage() {} func (*ExecuteEntityIdsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{10} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{10} } func (m *ExecuteEntityIdsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteEntityIdsResponse.Unmarshal(m, b) @@ -1126,7 +1126,7 @@ func (m *ExecuteBatchRequest) Reset() { *m = ExecuteBatchRequest{} } func (m *ExecuteBatchRequest) String() string { return proto.CompactTextString(m) } func (*ExecuteBatchRequest) ProtoMessage() {} func (*ExecuteBatchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{11} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{11} } func (m *ExecuteBatchRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteBatchRequest.Unmarshal(m, b) @@ -1214,7 +1214,7 @@ func (m *ExecuteBatchResponse) Reset() { *m = ExecuteBatchResponse{} } func (m *ExecuteBatchResponse) String() string { return proto.CompactTextString(m) } func (*ExecuteBatchResponse) ProtoMessage() {} func (*ExecuteBatchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{12} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{12} } func (m *ExecuteBatchResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteBatchResponse.Unmarshal(m, b) @@ -1274,7 +1274,7 @@ func (m *BoundShardQuery) Reset() { *m = BoundShardQuery{} } func (m *BoundShardQuery) String() string { return proto.CompactTextString(m) } func (*BoundShardQuery) ProtoMessage() {} func (*BoundShardQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{13} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{13} } func (m *BoundShardQuery) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BoundShardQuery.Unmarshal(m, b) @@ -1342,7 +1342,7 @@ func (m *ExecuteBatchShardsRequest) Reset() { *m = ExecuteBatchShardsReq func (m *ExecuteBatchShardsRequest) String() string { return proto.CompactTextString(m) } func (*ExecuteBatchShardsRequest) ProtoMessage() {} func (*ExecuteBatchShardsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{14} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{14} } func (m *ExecuteBatchShardsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteBatchShardsRequest.Unmarshal(m, b) @@ -1423,7 +1423,7 @@ func (m *ExecuteBatchShardsResponse) Reset() { *m = ExecuteBatchShardsRe func (m *ExecuteBatchShardsResponse) String() string { return proto.CompactTextString(m) } func (*ExecuteBatchShardsResponse) ProtoMessage() {} func (*ExecuteBatchShardsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{15} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{15} } func (m *ExecuteBatchShardsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteBatchShardsResponse.Unmarshal(m, b) @@ -1484,7 +1484,7 @@ func (m *BoundKeyspaceIdQuery) Reset() { *m = BoundKeyspaceIdQuery{} } func (m *BoundKeyspaceIdQuery) String() string { return proto.CompactTextString(m) } func (*BoundKeyspaceIdQuery) ProtoMessage() {} func (*BoundKeyspaceIdQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{16} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{16} } func (m *BoundKeyspaceIdQuery) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BoundKeyspaceIdQuery.Unmarshal(m, b) @@ -1551,7 +1551,7 @@ func (m *ExecuteBatchKeyspaceIdsRequest) Reset() { *m = ExecuteBatchKeys func (m *ExecuteBatchKeyspaceIdsRequest) String() string { return proto.CompactTextString(m) } func (*ExecuteBatchKeyspaceIdsRequest) ProtoMessage() {} func (*ExecuteBatchKeyspaceIdsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{17} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{17} } func (m *ExecuteBatchKeyspaceIdsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteBatchKeyspaceIdsRequest.Unmarshal(m, b) @@ -1632,7 +1632,7 @@ func (m *ExecuteBatchKeyspaceIdsResponse) Reset() { *m = ExecuteBatchKey func (m *ExecuteBatchKeyspaceIdsResponse) String() string { return proto.CompactTextString(m) } func (*ExecuteBatchKeyspaceIdsResponse) ProtoMessage() {} func (*ExecuteBatchKeyspaceIdsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{18} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{18} } func (m *ExecuteBatchKeyspaceIdsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteBatchKeyspaceIdsResponse.Unmarshal(m, b) @@ -1696,7 +1696,7 @@ func (m *StreamExecuteRequest) Reset() { *m = StreamExecuteRequest{} } func (m *StreamExecuteRequest) String() string { return proto.CompactTextString(m) } func (*StreamExecuteRequest) ProtoMessage() {} func (*StreamExecuteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{19} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{19} } func (m *StreamExecuteRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamExecuteRequest.Unmarshal(m, b) @@ -1775,7 +1775,7 @@ func (m *StreamExecuteResponse) Reset() { *m = StreamExecuteResponse{} } func (m *StreamExecuteResponse) String() string { return proto.CompactTextString(m) } func (*StreamExecuteResponse) ProtoMessage() {} func (*StreamExecuteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{20} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{20} } func (m *StreamExecuteResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamExecuteResponse.Unmarshal(m, b) @@ -1826,7 +1826,7 @@ func (m *StreamExecuteShardsRequest) Reset() { *m = StreamExecuteShardsR func (m *StreamExecuteShardsRequest) String() string { return proto.CompactTextString(m) } func (*StreamExecuteShardsRequest) ProtoMessage() {} func (*StreamExecuteShardsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{21} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{21} } func (m *StreamExecuteShardsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamExecuteShardsRequest.Unmarshal(m, b) @@ -1903,7 +1903,7 @@ func (m *StreamExecuteShardsResponse) Reset() { *m = StreamExecuteShards func (m *StreamExecuteShardsResponse) String() string { return proto.CompactTextString(m) } func (*StreamExecuteShardsResponse) ProtoMessage() {} func (*StreamExecuteShardsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{22} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{22} } func (m *StreamExecuteShardsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamExecuteShardsResponse.Unmarshal(m, b) @@ -1955,7 +1955,7 @@ func (m *StreamExecuteKeyspaceIdsRequest) Reset() { *m = StreamExecuteKe func (m *StreamExecuteKeyspaceIdsRequest) String() string { return proto.CompactTextString(m) } func (*StreamExecuteKeyspaceIdsRequest) ProtoMessage() {} func (*StreamExecuteKeyspaceIdsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{23} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{23} } func (m *StreamExecuteKeyspaceIdsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamExecuteKeyspaceIdsRequest.Unmarshal(m, b) @@ -2032,7 +2032,7 @@ func (m *StreamExecuteKeyspaceIdsResponse) Reset() { *m = StreamExecuteK func (m *StreamExecuteKeyspaceIdsResponse) String() string { return proto.CompactTextString(m) } func (*StreamExecuteKeyspaceIdsResponse) ProtoMessage() {} func (*StreamExecuteKeyspaceIdsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{24} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{24} } func (m *StreamExecuteKeyspaceIdsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamExecuteKeyspaceIdsResponse.Unmarshal(m, b) @@ -2084,7 +2084,7 @@ func (m *StreamExecuteKeyRangesRequest) Reset() { *m = StreamExecuteKeyR func (m *StreamExecuteKeyRangesRequest) String() string { return proto.CompactTextString(m) } func (*StreamExecuteKeyRangesRequest) ProtoMessage() {} func (*StreamExecuteKeyRangesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{25} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{25} } func (m *StreamExecuteKeyRangesRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamExecuteKeyRangesRequest.Unmarshal(m, b) @@ -2161,7 +2161,7 @@ func (m *StreamExecuteKeyRangesResponse) Reset() { *m = StreamExecuteKey func (m *StreamExecuteKeyRangesResponse) String() string { return proto.CompactTextString(m) } func (*StreamExecuteKeyRangesResponse) ProtoMessage() {} func (*StreamExecuteKeyRangesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{26} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{26} } func (m *StreamExecuteKeyRangesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamExecuteKeyRangesResponse.Unmarshal(m, b) @@ -2207,7 +2207,7 @@ func (m *BeginRequest) Reset() { *m = BeginRequest{} } func (m *BeginRequest) String() string { return proto.CompactTextString(m) } func (*BeginRequest) ProtoMessage() {} func (*BeginRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{27} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{27} } func (m *BeginRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BeginRequest.Unmarshal(m, b) @@ -2254,7 +2254,7 @@ func (m *BeginResponse) Reset() { *m = BeginResponse{} } func (m *BeginResponse) String() string { return proto.CompactTextString(m) } func (*BeginResponse) ProtoMessage() {} func (*BeginResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{28} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{28} } func (m *BeginResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BeginResponse.Unmarshal(m, b) @@ -2302,7 +2302,7 @@ func (m *CommitRequest) Reset() { *m = CommitRequest{} } func (m *CommitRequest) String() string { return proto.CompactTextString(m) } func (*CommitRequest) ProtoMessage() {} func (*CommitRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{29} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{29} } func (m *CommitRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CommitRequest.Unmarshal(m, b) @@ -2354,7 +2354,7 @@ func (m *CommitResponse) Reset() { *m = CommitResponse{} } func (m *CommitResponse) String() string { return proto.CompactTextString(m) } func (*CommitResponse) ProtoMessage() {} func (*CommitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{30} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{30} } func (m *CommitResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CommitResponse.Unmarshal(m, b) @@ -2390,7 +2390,7 @@ func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } func (m *RollbackRequest) String() string { return proto.CompactTextString(m) } func (*RollbackRequest) ProtoMessage() {} func (*RollbackRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{31} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{31} } func (m *RollbackRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RollbackRequest.Unmarshal(m, b) @@ -2435,7 +2435,7 @@ func (m *RollbackResponse) Reset() { *m = RollbackResponse{} } func (m *RollbackResponse) String() string { return proto.CompactTextString(m) } func (*RollbackResponse) ProtoMessage() {} func (*RollbackResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{32} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{32} } func (m *RollbackResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RollbackResponse.Unmarshal(m, b) @@ -2471,7 +2471,7 @@ func (m *ResolveTransactionRequest) Reset() { *m = ResolveTransactionReq func (m *ResolveTransactionRequest) String() string { return proto.CompactTextString(m) } func (*ResolveTransactionRequest) ProtoMessage() {} func (*ResolveTransactionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{33} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{33} } func (m *ResolveTransactionRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ResolveTransactionRequest.Unmarshal(m, b) @@ -2527,7 +2527,7 @@ func (m *MessageStreamRequest) Reset() { *m = MessageStreamRequest{} } func (m *MessageStreamRequest) String() string { return proto.CompactTextString(m) } func (*MessageStreamRequest) ProtoMessage() {} func (*MessageStreamRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{34} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{34} } func (m *MessageStreamRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MessageStreamRequest.Unmarshal(m, b) @@ -2602,7 +2602,7 @@ func (m *MessageAckRequest) Reset() { *m = MessageAckRequest{} } func (m *MessageAckRequest) String() string { return proto.CompactTextString(m) } func (*MessageAckRequest) ProtoMessage() {} func (*MessageAckRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{35} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{35} } func (m *MessageAckRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MessageAckRequest.Unmarshal(m, b) @@ -2666,7 +2666,7 @@ func (m *IdKeyspaceId) Reset() { *m = IdKeyspaceId{} } func (m *IdKeyspaceId) String() string { return proto.CompactTextString(m) } func (*IdKeyspaceId) ProtoMessage() {} func (*IdKeyspaceId) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{36} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{36} } func (m *IdKeyspaceId) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_IdKeyspaceId.Unmarshal(m, b) @@ -2719,7 +2719,7 @@ func (m *MessageAckKeyspaceIdsRequest) Reset() { *m = MessageAckKeyspace func (m *MessageAckKeyspaceIdsRequest) String() string { return proto.CompactTextString(m) } func (*MessageAckKeyspaceIdsRequest) ProtoMessage() {} func (*MessageAckKeyspaceIdsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{37} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{37} } func (m *MessageAckKeyspaceIdsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MessageAckKeyspaceIdsRequest.Unmarshal(m, b) @@ -2778,7 +2778,7 @@ func (m *ResolveTransactionResponse) Reset() { *m = ResolveTransactionRe func (m *ResolveTransactionResponse) String() string { return proto.CompactTextString(m) } func (*ResolveTransactionResponse) ProtoMessage() {} func (*ResolveTransactionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{38} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{38} } func (m *ResolveTransactionResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ResolveTransactionResponse.Unmarshal(m, b) @@ -2803,7 +2803,7 @@ var xxx_messageInfo_ResolveTransactionResponse proto.InternalMessageInfo // SplitQuery takes a "SELECT" query and generates a list of queries called // "query-parts". Each query-part consists of the original query with an // added WHERE clause that restricts the query-part to operate only on -// rows whose values in the the columns listed in the "split_column" field +// rows whose values in the columns listed in the "split_column" field // of the request (see below) are in a particular range. // // It is guaranteed that the set of rows obtained from @@ -2896,7 +2896,7 @@ func (m *SplitQueryRequest) Reset() { *m = SplitQueryRequest{} } func (m *SplitQueryRequest) String() string { return proto.CompactTextString(m) } func (*SplitQueryRequest) ProtoMessage() {} func (*SplitQueryRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{39} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{39} } func (m *SplitQueryRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SplitQueryRequest.Unmarshal(m, b) @@ -2985,7 +2985,7 @@ func (m *SplitQueryResponse) Reset() { *m = SplitQueryResponse{} } func (m *SplitQueryResponse) String() string { return proto.CompactTextString(m) } func (*SplitQueryResponse) ProtoMessage() {} func (*SplitQueryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{40} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{40} } func (m *SplitQueryResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SplitQueryResponse.Unmarshal(m, b) @@ -3026,7 +3026,7 @@ func (m *SplitQueryResponse_KeyRangePart) Reset() { *m = SplitQueryRespo func (m *SplitQueryResponse_KeyRangePart) String() string { return proto.CompactTextString(m) } func (*SplitQueryResponse_KeyRangePart) ProtoMessage() {} func (*SplitQueryResponse_KeyRangePart) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{40, 0} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{40, 0} } func (m *SplitQueryResponse_KeyRangePart) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SplitQueryResponse_KeyRangePart.Unmarshal(m, b) @@ -3074,7 +3074,7 @@ func (m *SplitQueryResponse_ShardPart) Reset() { *m = SplitQueryResponse func (m *SplitQueryResponse_ShardPart) String() string { return proto.CompactTextString(m) } func (*SplitQueryResponse_ShardPart) ProtoMessage() {} func (*SplitQueryResponse_ShardPart) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{40, 1} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{40, 1} } func (m *SplitQueryResponse_ShardPart) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SplitQueryResponse_ShardPart.Unmarshal(m, b) @@ -3127,7 +3127,7 @@ func (m *SplitQueryResponse_Part) Reset() { *m = SplitQueryResponse_Part func (m *SplitQueryResponse_Part) String() string { return proto.CompactTextString(m) } func (*SplitQueryResponse_Part) ProtoMessage() {} func (*SplitQueryResponse_Part) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{40, 2} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{40, 2} } func (m *SplitQueryResponse_Part) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SplitQueryResponse_Part.Unmarshal(m, b) @@ -3188,7 +3188,7 @@ func (m *GetSrvKeyspaceRequest) Reset() { *m = GetSrvKeyspaceRequest{} } func (m *GetSrvKeyspaceRequest) String() string { return proto.CompactTextString(m) } func (*GetSrvKeyspaceRequest) ProtoMessage() {} func (*GetSrvKeyspaceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{41} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{41} } func (m *GetSrvKeyspaceRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetSrvKeyspaceRequest.Unmarshal(m, b) @@ -3228,7 +3228,7 @@ func (m *GetSrvKeyspaceResponse) Reset() { *m = GetSrvKeyspaceResponse{} func (m *GetSrvKeyspaceResponse) String() string { return proto.CompactTextString(m) } func (*GetSrvKeyspaceResponse) ProtoMessage() {} func (*GetSrvKeyspaceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{42} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{42} } func (m *GetSrvKeyspaceResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetSrvKeyspaceResponse.Unmarshal(m, b) @@ -3286,7 +3286,7 @@ func (m *UpdateStreamRequest) Reset() { *m = UpdateStreamRequest{} } func (m *UpdateStreamRequest) String() string { return proto.CompactTextString(m) } func (*UpdateStreamRequest) ProtoMessage() {} func (*UpdateStreamRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{43} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{43} } func (m *UpdateStreamRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UpdateStreamRequest.Unmarshal(m, b) @@ -3374,7 +3374,7 @@ func (m *UpdateStreamResponse) Reset() { *m = UpdateStreamResponse{} } func (m *UpdateStreamResponse) String() string { return proto.CompactTextString(m) } func (*UpdateStreamResponse) ProtoMessage() {} func (*UpdateStreamResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtgate_071b9c990aff35bf, []int{44} + return fileDescriptor_vtgate_8f5c6038eac4796e, []int{44} } func (m *UpdateStreamResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UpdateStreamResponse.Unmarshal(m, b) @@ -3462,9 +3462,9 @@ func init() { proto.RegisterEnum("vtgate.TransactionMode", TransactionMode_name, TransactionMode_value) } -func init() { proto.RegisterFile("vtgate.proto", fileDescriptor_vtgate_071b9c990aff35bf) } +func init() { proto.RegisterFile("vtgate.proto", fileDescriptor_vtgate_8f5c6038eac4796e) } -var fileDescriptor_vtgate_071b9c990aff35bf = []byte{ +var fileDescriptor_vtgate_8f5c6038eac4796e = []byte{ // 1883 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5a, 0x4f, 0x8f, 0x23, 0x47, 0x15, 0xa7, 0xbb, 0xfd, 0xf7, 0xf9, 0xef, 0xd6, 0x78, 0x77, 0x1d, 0x67, 0xd8, 0x99, 0x74, 0x18, diff --git a/go/vt/proto/vtgateservice/vtgateservice.pb.go b/go/vt/proto/vtgateservice/vtgateservice.pb.go index 7d9302b07f..3a0bc073da 100644 --- a/go/vt/proto/vtgateservice/vtgateservice.pb.go +++ b/go/vt/proto/vtgateservice/vtgateservice.pb.go @@ -1069,9 +1069,9 @@ var _Vitess_serviceDesc = grpc.ServiceDesc{ Metadata: "vtgateservice.proto", } -func init() { proto.RegisterFile("vtgateservice.proto", fileDescriptor_vtgateservice_7815d679f21c0eb2) } +func init() { proto.RegisterFile("vtgateservice.proto", fileDescriptor_vtgateservice_6694e3d05903167c) } -var fileDescriptor_vtgateservice_7815d679f21c0eb2 = []byte{ +var fileDescriptor_vtgateservice_6694e3d05903167c = []byte{ // 579 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x95, 0xdb, 0x6f, 0xd3, 0x30, 0x14, 0xc6, 0xe1, 0x81, 0x82, 0x0e, 0xed, 0x84, 0xbc, 0xad, 0xdb, 0xca, 0x75, 0x05, 0x36, 0xc4, diff --git a/go/vt/proto/vtrpc/vtrpc.pb.go b/go/vt/proto/vtrpc/vtrpc.pb.go index a65e873587..a9b53a78ef 100644 --- a/go/vt/proto/vtrpc/vtrpc.pb.go +++ b/go/vt/proto/vtrpc/vtrpc.pb.go @@ -168,7 +168,7 @@ func (x Code) String() string { return proto.EnumName(Code_name, int32(x)) } func (Code) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_vtrpc_63266364cf161411, []int{0} + return fileDescriptor_vtrpc_88a14d8f1bc03cf5, []int{0} } // LegacyErrorCode is the enum values for Errors. This type is deprecated. @@ -276,7 +276,7 @@ func (x LegacyErrorCode) String() string { return proto.EnumName(LegacyErrorCode_name, int32(x)) } func (LegacyErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_vtrpc_63266364cf161411, []int{1} + return fileDescriptor_vtrpc_88a14d8f1bc03cf5, []int{1} } // CallerID is passed along RPCs to identify the originating client @@ -311,7 +311,7 @@ func (m *CallerID) Reset() { *m = CallerID{} } func (m *CallerID) String() string { return proto.CompactTextString(m) } func (*CallerID) ProtoMessage() {} func (*CallerID) Descriptor() ([]byte, []int) { - return fileDescriptor_vtrpc_63266364cf161411, []int{0} + return fileDescriptor_vtrpc_88a14d8f1bc03cf5, []int{0} } func (m *CallerID) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CallerID.Unmarshal(m, b) @@ -369,7 +369,7 @@ func (m *RPCError) Reset() { *m = RPCError{} } func (m *RPCError) String() string { return proto.CompactTextString(m) } func (*RPCError) ProtoMessage() {} func (*RPCError) Descriptor() ([]byte, []int) { - return fileDescriptor_vtrpc_63266364cf161411, []int{1} + return fileDescriptor_vtrpc_88a14d8f1bc03cf5, []int{1} } func (m *RPCError) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RPCError.Unmarshal(m, b) @@ -417,9 +417,9 @@ func init() { proto.RegisterEnum("vtrpc.LegacyErrorCode", LegacyErrorCode_name, LegacyErrorCode_value) } -func init() { proto.RegisterFile("vtrpc.proto", fileDescriptor_vtrpc_63266364cf161411) } +func init() { proto.RegisterFile("vtrpc.proto", fileDescriptor_vtrpc_88a14d8f1bc03cf5) } -var fileDescriptor_vtrpc_63266364cf161411 = []byte{ +var fileDescriptor_vtrpc_88a14d8f1bc03cf5 = []byte{ // 605 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x93, 0x4d, 0x4f, 0x1b, 0x3b, 0x14, 0x86, 0xc9, 0x07, 0xf9, 0x38, 0x13, 0x88, 0x31, 0x5f, 0xe1, 0x5e, 0xae, 0xee, 0x55, 0x56, diff --git a/go/vt/proto/vttest/vttest.pb.go b/go/vt/proto/vttest/vttest.pb.go index ffb667b13f..b53058b7dc 100644 --- a/go/vt/proto/vttest/vttest.pb.go +++ b/go/vt/proto/vttest/vttest.pb.go @@ -37,7 +37,7 @@ func (m *Shard) Reset() { *m = Shard{} } func (m *Shard) String() string { return proto.CompactTextString(m) } func (*Shard) ProtoMessage() {} func (*Shard) Descriptor() ([]byte, []int) { - return fileDescriptor_vttest_beaece7261b82562, []int{0} + return fileDescriptor_vttest_6aca346ef34e15cf, []int{0} } func (m *Shard) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Shard.Unmarshal(m, b) @@ -96,7 +96,7 @@ func (m *Keyspace) Reset() { *m = Keyspace{} } func (m *Keyspace) String() string { return proto.CompactTextString(m) } func (*Keyspace) ProtoMessage() {} func (*Keyspace) Descriptor() ([]byte, []int) { - return fileDescriptor_vttest_beaece7261b82562, []int{1} + return fileDescriptor_vttest_6aca346ef34e15cf, []int{1} } func (m *Keyspace) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Keyspace.Unmarshal(m, b) @@ -180,7 +180,7 @@ func (m *VTTestTopology) Reset() { *m = VTTestTopology{} } func (m *VTTestTopology) String() string { return proto.CompactTextString(m) } func (*VTTestTopology) ProtoMessage() {} func (*VTTestTopology) Descriptor() ([]byte, []int) { - return fileDescriptor_vttest_beaece7261b82562, []int{2} + return fileDescriptor_vttest_6aca346ef34e15cf, []int{2} } func (m *VTTestTopology) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VTTestTopology.Unmarshal(m, b) @@ -220,9 +220,9 @@ func init() { proto.RegisterType((*VTTestTopology)(nil), "vttest.VTTestTopology") } -func init() { proto.RegisterFile("vttest.proto", fileDescriptor_vttest_beaece7261b82562) } +func init() { proto.RegisterFile("vttest.proto", fileDescriptor_vttest_6aca346ef34e15cf) } -var fileDescriptor_vttest_beaece7261b82562 = []byte{ +var fileDescriptor_vttest_6aca346ef34e15cf = []byte{ // 322 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0xcb, 0x6a, 0xe3, 0x40, 0x10, 0x44, 0xb6, 0xa5, 0x5d, 0xb7, 0x1f, 0x98, 0xc1, 0x87, 0xb9, 0xad, 0xd7, 0xc6, 0xa0, 0x93, diff --git a/go/vt/proto/vtworkerdata/vtworkerdata.pb.go b/go/vt/proto/vtworkerdata/vtworkerdata.pb.go index 4944c62ba3..4213557157 100644 --- a/go/vt/proto/vtworkerdata/vtworkerdata.pb.go +++ b/go/vt/proto/vtworkerdata/vtworkerdata.pb.go @@ -31,7 +31,7 @@ func (m *ExecuteVtworkerCommandRequest) Reset() { *m = ExecuteVtworkerCo func (m *ExecuteVtworkerCommandRequest) String() string { return proto.CompactTextString(m) } func (*ExecuteVtworkerCommandRequest) ProtoMessage() {} func (*ExecuteVtworkerCommandRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_vtworkerdata_9c94bc714251542b, []int{0} + return fileDescriptor_vtworkerdata_cae0940479e6aeb1, []int{0} } func (m *ExecuteVtworkerCommandRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteVtworkerCommandRequest.Unmarshal(m, b) @@ -70,7 +70,7 @@ func (m *ExecuteVtworkerCommandResponse) Reset() { *m = ExecuteVtworkerC func (m *ExecuteVtworkerCommandResponse) String() string { return proto.CompactTextString(m) } func (*ExecuteVtworkerCommandResponse) ProtoMessage() {} func (*ExecuteVtworkerCommandResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_vtworkerdata_9c94bc714251542b, []int{1} + return fileDescriptor_vtworkerdata_cae0940479e6aeb1, []int{1} } func (m *ExecuteVtworkerCommandResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExecuteVtworkerCommandResponse.Unmarshal(m, b) @@ -102,9 +102,9 @@ func init() { proto.RegisterType((*ExecuteVtworkerCommandResponse)(nil), "vtworkerdata.ExecuteVtworkerCommandResponse") } -func init() { proto.RegisterFile("vtworkerdata.proto", fileDescriptor_vtworkerdata_9c94bc714251542b) } +func init() { proto.RegisterFile("vtworkerdata.proto", fileDescriptor_vtworkerdata_cae0940479e6aeb1) } -var fileDescriptor_vtworkerdata_9c94bc714251542b = []byte{ +var fileDescriptor_vtworkerdata_cae0940479e6aeb1 = []byte{ // 175 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2a, 0x2b, 0x29, 0xcf, 0x2f, 0xca, 0x4e, 0x2d, 0x4a, 0x49, 0x2c, 0x49, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, diff --git a/go/vt/proto/vtworkerservice/vtworkerservice.pb.go b/go/vt/proto/vtworkerservice/vtworkerservice.pb.go index 015b0945e9..bc38e2eca4 100644 --- a/go/vt/proto/vtworkerservice/vtworkerservice.pb.go +++ b/go/vt/proto/vtworkerservice/vtworkerservice.pb.go @@ -128,10 +128,10 @@ var _Vtworker_serviceDesc = grpc.ServiceDesc{ } func init() { - proto.RegisterFile("vtworkerservice.proto", fileDescriptor_vtworkerservice_0d36cb3c7cfddd2b) + proto.RegisterFile("vtworkerservice.proto", fileDescriptor_vtworkerservice_4efa3310356e3c00) } -var fileDescriptor_vtworkerservice_0d36cb3c7cfddd2b = []byte{ +var fileDescriptor_vtworkerservice_4efa3310356e3c00 = []byte{ // 151 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2d, 0x2b, 0x29, 0xcf, 0x2f, 0xca, 0x4e, 0x2d, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, diff --git a/go/vt/proto/workflow/workflow.pb.go b/go/vt/proto/workflow/workflow.pb.go index 5b0fb41e1c..8c173f294b 100644 --- a/go/vt/proto/workflow/workflow.pb.go +++ b/go/vt/proto/workflow/workflow.pb.go @@ -45,7 +45,7 @@ func (x WorkflowState) String() string { return proto.EnumName(WorkflowState_name, int32(x)) } func (WorkflowState) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_workflow_cc5eebeb403313d8, []int{0} + return fileDescriptor_workflow_daba593a7423a6c7, []int{0} } type TaskState int32 @@ -71,7 +71,7 @@ func (x TaskState) String() string { return proto.EnumName(TaskState_name, int32(x)) } func (TaskState) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_workflow_cc5eebeb403313d8, []int{1} + return fileDescriptor_workflow_daba593a7423a6c7, []int{1} } // Workflow is the persisted state of a long-running workflow. @@ -119,7 +119,7 @@ func (m *Workflow) Reset() { *m = Workflow{} } func (m *Workflow) String() string { return proto.CompactTextString(m) } func (*Workflow) ProtoMessage() {} func (*Workflow) Descriptor() ([]byte, []int) { - return fileDescriptor_workflow_cc5eebeb403313d8, []int{0} + return fileDescriptor_workflow_daba593a7423a6c7, []int{0} } func (m *Workflow) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Workflow.Unmarshal(m, b) @@ -223,7 +223,7 @@ func (m *WorkflowCheckpoint) Reset() { *m = WorkflowCheckpoint{} } func (m *WorkflowCheckpoint) String() string { return proto.CompactTextString(m) } func (*WorkflowCheckpoint) ProtoMessage() {} func (*WorkflowCheckpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_workflow_cc5eebeb403313d8, []int{1} + return fileDescriptor_workflow_daba593a7423a6c7, []int{1} } func (m *WorkflowCheckpoint) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_WorkflowCheckpoint.Unmarshal(m, b) @@ -279,7 +279,7 @@ func (m *Task) Reset() { *m = Task{} } func (m *Task) String() string { return proto.CompactTextString(m) } func (*Task) ProtoMessage() {} func (*Task) Descriptor() ([]byte, []int) { - return fileDescriptor_workflow_cc5eebeb403313d8, []int{2} + return fileDescriptor_workflow_daba593a7423a6c7, []int{2} } func (m *Task) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Task.Unmarshal(m, b) @@ -338,9 +338,9 @@ func init() { proto.RegisterEnum("workflow.TaskState", TaskState_name, TaskState_value) } -func init() { proto.RegisterFile("workflow.proto", fileDescriptor_workflow_cc5eebeb403313d8) } +func init() { proto.RegisterFile("workflow.proto", fileDescriptor_workflow_daba593a7423a6c7) } -var fileDescriptor_workflow_cc5eebeb403313d8 = []byte{ +var fileDescriptor_workflow_daba593a7423a6c7 = []byte{ // 517 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x6f, 0x8b, 0xd3, 0x4e, 0x10, 0xfe, 0x25, 0x6d, 0xae, 0xe9, 0xa4, 0x97, 0x2b, 0xf3, 0x3b, 0x30, 0x16, 0xd4, 0x5a, 0x94, diff --git a/go/vt/schemamanager/schemaswap/schema_swap.go b/go/vt/schemamanager/schemaswap/schema_swap.go index 6b39ccfb21..2e8e232b54 100644 --- a/go/vt/schemamanager/schemaswap/schema_swap.go +++ b/go/vt/schemamanager/schemaswap/schema_swap.go @@ -459,7 +459,7 @@ func (schemaSwap *Swap) stopAllHealthWatchers() { } // initializeSwap starts the schema swap process. If there is already a schema swap process started -// the the method just picks up that. Otherwise it starts a new one and writes into the database that +// the method just picks up that. Otherwise it starts a new one and writes into the database that // the process was started. func (schemaSwap *Swap) initializeSwap() error { var waitGroup sync.WaitGroup diff --git a/go/vt/sqlparser/analyzer.go b/go/vt/sqlparser/analyzer.go index 1c459cee5b..418a63ab01 100644 --- a/go/vt/sqlparser/analyzer.go +++ b/go/vt/sqlparser/analyzer.go @@ -31,6 +31,7 @@ import ( ) // These constants are used to identify the SQL statement type. +// Changing this list will require reviewing all calls to Preview. const ( StmtSelect = iota StmtStream diff --git a/go/vt/sqlparser/ast.go b/go/vt/sqlparser/ast.go index df3dfd75dd..1fa69deceb 100644 --- a/go/vt/sqlparser/ast.go +++ b/go/vt/sqlparser/ast.go @@ -1031,8 +1031,8 @@ type ColumnType struct { // Generic field options. NotNull BoolVal Autoincrement BoolVal - Default *SQLVal - OnUpdate *SQLVal + Default Expr + OnUpdate Expr Comment *SQLVal // Numeric field options @@ -1170,6 +1170,8 @@ func (ct *ColumnType) SQLType() querypb.Type { return sqltypes.Uint64 } return sqltypes.Int64 + case keywordStrings[BOOL], keywordStrings[BOOLEAN]: + return sqltypes.Uint8 case keywordStrings[TEXT]: return sqltypes.Text case keywordStrings[TINYTEXT]: @@ -2151,34 +2153,36 @@ type Expr interface { SQLNode } -func (*AndExpr) iExpr() {} -func (*OrExpr) iExpr() {} -func (*NotExpr) iExpr() {} -func (*ParenExpr) iExpr() {} -func (*ComparisonExpr) iExpr() {} -func (*RangeCond) iExpr() {} -func (*IsExpr) iExpr() {} -func (*ExistsExpr) iExpr() {} -func (*SQLVal) iExpr() {} -func (*NullVal) iExpr() {} -func (BoolVal) iExpr() {} -func (*ColName) iExpr() {} -func (ValTuple) iExpr() {} -func (*Subquery) iExpr() {} -func (ListArg) iExpr() {} -func (*BinaryExpr) iExpr() {} -func (*UnaryExpr) iExpr() {} -func (*IntervalExpr) iExpr() {} -func (*CollateExpr) iExpr() {} -func (*FuncExpr) iExpr() {} -func (*CaseExpr) iExpr() {} -func (*ValuesFuncExpr) iExpr() {} -func (*ConvertExpr) iExpr() {} -func (*SubstrExpr) iExpr() {} -func (*ConvertUsingExpr) iExpr() {} -func (*MatchExpr) iExpr() {} -func (*GroupConcatExpr) iExpr() {} -func (*Default) iExpr() {} +func (*AndExpr) iExpr() {} +func (*OrExpr) iExpr() {} +func (*NotExpr) iExpr() {} +func (*ParenExpr) iExpr() {} +func (*ComparisonExpr) iExpr() {} +func (*RangeCond) iExpr() {} +func (*IsExpr) iExpr() {} +func (*ExistsExpr) iExpr() {} +func (*SQLVal) iExpr() {} +func (*NullVal) iExpr() {} +func (BoolVal) iExpr() {} +func (*ColName) iExpr() {} +func (ValTuple) iExpr() {} +func (*Subquery) iExpr() {} +func (ListArg) iExpr() {} +func (*BinaryExpr) iExpr() {} +func (*UnaryExpr) iExpr() {} +func (*IntervalExpr) iExpr() {} +func (*CollateExpr) iExpr() {} +func (*FuncExpr) iExpr() {} +func (*TimestampFuncExpr) iExpr() {} +func (*CurTimeFuncExpr) iExpr() {} +func (*CaseExpr) iExpr() {} +func (*ValuesFuncExpr) iExpr() {} +func (*ConvertExpr) iExpr() {} +func (*SubstrExpr) iExpr() {} +func (*ConvertUsingExpr) iExpr() {} +func (*MatchExpr) iExpr() {} +func (*GroupConcatExpr) iExpr() {} +func (*Default) iExpr() {} // ReplaceExpr finds the from expression from root // and replaces it with to. If from matches root, @@ -2379,6 +2383,32 @@ func (node *ComparisonExpr) replace(from, to Expr) bool { return replaceExprs(from, to, &node.Left, &node.Right, &node.Escape) } +// IsImpossible returns true if the comparison in the expression can never evaluate to true. +// Note that this is not currently exhaustive to ALL impossible comparisons. +func (node *ComparisonExpr) IsImpossible() bool { + var left, right *SQLVal + var ok bool + if left, ok = node.Left.(*SQLVal); !ok { + return false + } + if right, ok = node.Right.(*SQLVal); !ok { + return false + } + if node.Operator == NotEqualStr && left.Type == right.Type { + if len(left.Val) != len(right.Val) { + return false + } + + for i := range left.Val { + if left.Val[i] != right.Val[i] { + return false + } + } + return true + } + return false +} + // RangeCond represents a BETWEEN or a NOT BETWEEN expression. type RangeCond struct { Operator string @@ -2842,6 +2872,66 @@ func (node *IntervalExpr) replace(from, to Expr) bool { return replaceExprs(from, to, &node.Expr) } +// TimestampFuncExpr represents the function and arguments for TIMESTAMP{ADD,DIFF} functions. +type TimestampFuncExpr struct { + Name string + Expr1 Expr + Expr2 Expr + Unit string +} + +// Format formats the node. +func (node *TimestampFuncExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%s(%s, %v, %v)", node.Name, node.Unit, node.Expr1, node.Expr2) +} + +func (node *TimestampFuncExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr1, + node.Expr2, + ) +} + +func (node *TimestampFuncExpr) replace(from, to Expr) bool { + if replaceExprs(from, to, &node.Expr1) { + return true + } + if replaceExprs(from, to, &node.Expr2) { + return true + } + return false +} + +// CurTimeFuncExpr represents the function and arguments for CURRENT DATE/TIME functions +// supported functions are documented in the grammar +type CurTimeFuncExpr struct { + Name ColIdent + Fsp Expr // fractional seconds precision, integer from 0 to 6 +} + +// Format formats the node. +func (node *CurTimeFuncExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%s(%v)", node.Name.String(), node.Fsp) +} + +func (node *CurTimeFuncExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Fsp, + ) +} + +func (node *CurTimeFuncExpr) replace(from, to Expr) bool { + return replaceExprs(from, to, &node.Fsp) +} + // CollateExpr represents dynamic collate operator. type CollateExpr struct { Expr Expr diff --git a/go/vt/sqlparser/ast_test.go b/go/vt/sqlparser/ast_test.go index 08bd05d96d..cf66c25400 100644 --- a/go/vt/sqlparser/ast_test.go +++ b/go/vt/sqlparser/ast_test.go @@ -412,6 +412,35 @@ func TestIsAggregate(t *testing.T) { } } +func TestIsImpossible(t *testing.T) { + f := ComparisonExpr{ + Operator: NotEqualStr, + Left: newIntVal("1"), + Right: newIntVal("1"), + } + if !f.IsImpossible() { + t.Error("IsImpossible: false, want true") + } + + f = ComparisonExpr{ + Operator: EqualStr, + Left: newIntVal("1"), + Right: newIntVal("1"), + } + if f.IsImpossible() { + t.Error("IsImpossible: true, want false") + } + + f = ComparisonExpr{ + Operator: NotEqualStr, + Left: newIntVal("1"), + Right: newIntVal("2"), + } + if f.IsImpossible() { + t.Error("IsImpossible: true, want false") + } +} + func TestReplaceExpr(t *testing.T) { tcases := []struct { in, out string diff --git a/go/vt/sqlparser/parse_test.go b/go/vt/sqlparser/parse_test.go index bdafd2061a..1b71b1d85c 100644 --- a/go/vt/sqlparser/parse_test.go +++ b/go/vt/sqlparser/parse_test.go @@ -425,8 +425,50 @@ var ( input: "select /* function with distinct */ count(distinct a) from t", }, { input: "select /* if as func */ 1 from t where a = if(b)", + }, { + input: "select /* current_timestamp */ current_timestamp() from t", }, { input: "select /* current_timestamp as func */ current_timestamp() from t", + }, { + input: "select /* current_timestamp with fsp */ current_timestamp(3) from t", + }, { + input: "select /* current_date */ current_date() from t", + }, { + input: "select /* current_date as func */ current_date() from t", + }, { + input: "select /* current_time */ current_time() from t", + }, { + input: "select /* current_time as func */ current_time() from t", + }, { + input: "select /* current_time with fsp */ current_time(1) from t", + }, { + input: "select /* utc_timestamp */ utc_timestamp() from t", + }, { + input: "select /* utc_timestamp as func */ utc_timestamp() from t", + }, { + input: "select /* utc_timestamp with fsp */ utc_timestamp(0) from t", + }, { + input: "select /* utc_time */ utc_time() from t", + }, { + input: "select /* utc_time as func */ utc_time() from t", + }, { + input: "select /* utc_time with fsp */ utc_time(4) from t", + }, { + input: "select /* utc_date */ utc_date() from t", + }, { + input: "select /* utc_date as func */ utc_date() from t", + }, { + input: "select /* localtime */ localtime() from t", + }, { + input: "select /* localtime as func */ localtime() from t", + }, { + input: "select /* localtime with fsp */ localtime(5) from t", + }, { + input: "select /* localtimestamp */ localtimestamp() from t", + }, { + input: "select /* localtimestamp as func */ localtimestamp() from t", + }, { + input: "select /* localtimestamp with fsp */ localtimestamp(7) from t", }, { input: "select /* mod as func */ a from tab where mod(b, 2) = 0", }, { @@ -532,6 +574,12 @@ var ( input: "select /* interval */ adddate('2008-01-02', interval 31 day) from t", }, { input: "select /* interval keyword */ adddate('2008-01-02', interval 1 year) from t", + }, { + input: "select /* TIMESTAMPADD */ TIMESTAMPADD(MINUTE, 1, '2008-01-04') from t", + output: "select /* TIMESTAMPADD */ timestampadd(MINUTE, 1, '2008-01-04') from t", + }, { + input: "select /* TIMESTAMPDIFF */ TIMESTAMPDIFF(MINUTE, '2008-01-02', '2008-01-04') from t", + output: "select /* TIMESTAMPDIFF */ timestampdiff(MINUTE, '2008-01-02', '2008-01-04') from t", }, { input: "select /* dual */ 1 from dual", }, { @@ -918,6 +966,9 @@ var ( }, { input: "create table a (a int, b char, c garbage)", output: "create table a", + }, { + input: "create table a (b1 bool not null primary key, b2 boolean not null)", + output: "create table a (\n\tb1 bool not null primary key,\n\tb2 boolean not null\n)", }, { input: "alter vschema create vindex hash_vdx using hash", }, { @@ -1573,6 +1624,8 @@ func TestKeywords(t *testing.T) { output: "select current_timestamp() from dual", }, { input: "update t set a = current_timestamp()", + }, { + input: "update t set a = current_timestamp(5)", }, { input: "select a, current_date from t", output: "select a, current_date() from t", @@ -1581,18 +1634,26 @@ func TestKeywords(t *testing.T) { output: "insert into t(a, b) values (current_date(), current_date())", }, { input: "select * from t where a > utc_timestmp()", + }, { + input: "select * from t where a > utc_timestamp(4)", }, { input: "update t set b = utc_timestamp + 5", output: "update t set b = utc_timestamp() + 5", }, { - input: "select utc_time, utc_date", - output: "select utc_time(), utc_date() from dual", + input: "select utc_time, utc_date, utc_time(6)", + output: "select utc_time(), utc_date(), utc_time(6) from dual", }, { input: "select 1 from dual where localtime > utc_time", output: "select 1 from dual where localtime() > utc_time()", + }, { + input: "select 1 from dual where localtime(2) > utc_time(1)", + output: "select 1 from dual where localtime(2) > utc_time(1)", }, { input: "update t set a = localtimestamp(), b = utc_timestamp", output: "update t set a = localtimestamp(), b = utc_timestamp()", + }, { + input: "update t set a = localtimestamp(10), b = utc_timestamp(13)", + output: "update t set a = localtimestamp(10), b = utc_timestamp(13)", }, { input: "insert into t(a) values (unix_timestamp)", }, { @@ -1877,28 +1938,6 @@ func TestCreateTable(t *testing.T) { " col_multipolygon2 multipolygon not null\n" + ")", - // test defaults - "create table t (\n" + - " i1 int default 1,\n" + - " i2 int default null,\n" + - " f1 float default 1.23,\n" + - " s1 varchar default 'c',\n" + - " s2 varchar default 'this is a string',\n" + - " s3 varchar default null,\n" + - " s4 timestamp default current_timestamp,\n" + - " s5 bit(1) default B'0'\n" + - ")", - - // test key field options - "create table t (\n" + - " id int auto_increment primary key,\n" + - " username varchar unique key,\n" + - " email varchar unique,\n" + - " full_name varchar key,\n" + - " time1 timestamp on update current_timestamp,\n" + - " time2 timestamp default current_timestamp on update current_timestamp\n" + - ")", - // test defining indexes separately "create table t (\n" + " id int auto_increment,\n" + @@ -2004,6 +2043,13 @@ func TestCreateTable(t *testing.T) { " stats_sample_pages 1,\n" + " tablespace tablespace_name storage disk,\n" + " tablespace tablespace_name\n", + + // boolean columns + "create table t (\n" + + " bi bigint not null primary key,\n" + + " b1 bool not null,\n" + + " b2 boolean\n" + + ")", } for _, sql := range validSQL { sql = strings.TrimSpace(sql) @@ -2049,19 +2095,169 @@ func TestCreateTable(t *testing.T) { " unique key by_username2 (username) key_block_size 8,\n" + " unique by_username3 (username) key_block_size 4\n" + ")", + }, { + // test defaults + input: "create table t (\n" + + " i1 int default 1,\n" + + " i2 int default null,\n" + + " f1 float default 1.23,\n" + + " s1 varchar default 'c',\n" + + " s2 varchar default 'this is a string',\n" + + " s3 varchar default null,\n" + + " s4 timestamp default current_timestamp,\n" + + " s5 bit(1) default B'0'\n" + + ")", + output: "create table t (\n" + + " i1 int default 1,\n" + + " i2 int default null,\n" + + " f1 float default 1.23,\n" + + " s1 varchar default 'c',\n" + + " s2 varchar default 'this is a string',\n" + + " s3 varchar default null,\n" + + " s4 timestamp default current_timestamp(),\n" + + " s5 bit(1) default B'0'\n" + + ")", + }, { + // test key field options + input: "create table t (\n" + + " id int auto_increment primary key,\n" + + " username varchar unique key,\n" + + " email varchar unique,\n" + + " full_name varchar key,\n" + + " time1 timestamp on update current_timestamp,\n" + + " time2 timestamp default current_timestamp on update current_timestamp\n" + + ")", + output: "create table t (\n" + + " id int auto_increment primary key,\n" + + " username varchar unique key,\n" + + " email varchar unique,\n" + + " full_name varchar key,\n" + + " time1 timestamp on update current_timestamp(),\n" + + " time2 timestamp default current_timestamp() on update current_timestamp()\n" + + ")", }, { // test current_timestamp with and without () input: "create table t (\n" + " time1 timestamp default current_timestamp,\n" + " time2 timestamp default current_timestamp(),\n" + " time3 timestamp default current_timestamp on update current_timestamp,\n" + - " time4 timestamp default current_timestamp() on update current_timestamp()\n" + + " time4 timestamp default current_timestamp() on update current_timestamp(),\n" + + " time5 timestamp(3) default current_timestamp(3) on update current_timestamp(3)\n" + ")", output: "create table t (\n" + - " time1 timestamp default current_timestamp,\n" + - " time2 timestamp default current_timestamp,\n" + - " time3 timestamp default current_timestamp on update current_timestamp,\n" + - " time4 timestamp default current_timestamp on update current_timestamp\n" + + " time1 timestamp default current_timestamp(),\n" + + " time2 timestamp default current_timestamp(),\n" + + " time3 timestamp default current_timestamp() on update current_timestamp(),\n" + + " time4 timestamp default current_timestamp() on update current_timestamp(),\n" + + " time5 timestamp(3) default current_timestamp(3) on update current_timestamp(3)\n" + + ")", + }, { + // test utc_timestamp with and without () + input: "create table t (\n" + + " time1 timestamp default utc_timestamp,\n" + + " time2 timestamp default utc_timestamp(),\n" + + " time3 timestamp default utc_timestamp on update utc_timestamp,\n" + + " time4 timestamp default utc_timestamp() on update utc_timestamp(),\n" + + " time5 timestamp(4) default utc_timestamp(4) on update utc_timestamp(4)\n" + + ")", + output: "create table t (\n" + + " time1 timestamp default utc_timestamp(),\n" + + " time2 timestamp default utc_timestamp(),\n" + + " time3 timestamp default utc_timestamp() on update utc_timestamp(),\n" + + " time4 timestamp default utc_timestamp() on update utc_timestamp(),\n" + + " time5 timestamp(4) default utc_timestamp(4) on update utc_timestamp(4)\n" + + ")", + }, { + // test utc_time with and without () + input: "create table t (\n" + + " time1 timestamp default utc_time,\n" + + " time2 timestamp default utc_time(),\n" + + " time3 timestamp default utc_time on update utc_time,\n" + + " time4 timestamp default utc_time() on update utc_time(),\n" + + " time5 timestamp(5) default utc_time(5) on update utc_time(5)\n" + + ")", + output: "create table t (\n" + + " time1 timestamp default utc_time(),\n" + + " time2 timestamp default utc_time(),\n" + + " time3 timestamp default utc_time() on update utc_time(),\n" + + " time4 timestamp default utc_time() on update utc_time(),\n" + + " time5 timestamp(5) default utc_time(5) on update utc_time(5)\n" + + ")", + }, { + // test utc_date with and without () + input: "create table t (\n" + + " time1 timestamp default utc_date,\n" + + " time2 timestamp default utc_date(),\n" + + " time3 timestamp default utc_date on update utc_date,\n" + + " time4 timestamp default utc_date() on update utc_date()\n" + + ")", + output: "create table t (\n" + + " time1 timestamp default utc_date(),\n" + + " time2 timestamp default utc_date(),\n" + + " time3 timestamp default utc_date() on update utc_date(),\n" + + " time4 timestamp default utc_date() on update utc_date()\n" + + ")", + }, { + // test localtime with and without () + input: "create table t (\n" + + " time1 timestamp default localtime,\n" + + " time2 timestamp default localtime(),\n" + + " time3 timestamp default localtime on update localtime,\n" + + " time4 timestamp default localtime() on update localtime(),\n" + + " time5 timestamp(6) default localtime(6) on update localtime(6)\n" + + ")", + output: "create table t (\n" + + " time1 timestamp default localtime(),\n" + + " time2 timestamp default localtime(),\n" + + " time3 timestamp default localtime() on update localtime(),\n" + + " time4 timestamp default localtime() on update localtime(),\n" + + " time5 timestamp(6) default localtime(6) on update localtime(6)\n" + + ")", + }, { + // test localtimestamp with and without () + input: "create table t (\n" + + " time1 timestamp default localtimestamp,\n" + + " time2 timestamp default localtimestamp(),\n" + + " time3 timestamp default localtimestamp on update localtimestamp,\n" + + " time4 timestamp default localtimestamp() on update localtimestamp(),\n" + + " time5 timestamp(1) default localtimestamp(1) on update localtimestamp(1)\n" + + ")", + output: "create table t (\n" + + " time1 timestamp default localtimestamp(),\n" + + " time2 timestamp default localtimestamp(),\n" + + " time3 timestamp default localtimestamp() on update localtimestamp(),\n" + + " time4 timestamp default localtimestamp() on update localtimestamp(),\n" + + " time5 timestamp(1) default localtimestamp(1) on update localtimestamp(1)\n" + + ")", + }, { + // test current_date with and without () + input: "create table t (\n" + + " time1 timestamp default current_date,\n" + + " time2 timestamp default current_date(),\n" + + " time3 timestamp default current_date on update current_date,\n" + + " time4 timestamp default current_date() on update current_date()\n" + + ")", + output: "create table t (\n" + + " time1 timestamp default current_date(),\n" + + " time2 timestamp default current_date(),\n" + + " time3 timestamp default current_date() on update current_date(),\n" + + " time4 timestamp default current_date() on update current_date()\n" + + ")", + }, { + // test current_time with and without () + input: "create table t (\n" + + " time1 timestamp default current_time,\n" + + " time2 timestamp default current_time(),\n" + + " time3 timestamp default current_time on update current_time,\n" + + " time4 timestamp default current_time() on update current_time(),\n" + + " time5 timestamp(2) default current_time(2) on update current_time(2)\n" + + ")", + output: "create table t (\n" + + " time1 timestamp default current_time(),\n" + + " time2 timestamp default current_time(),\n" + + " time3 timestamp default current_time() on update current_time(),\n" + + " time4 timestamp default current_time() on update current_time(),\n" + + " time5 timestamp(2) default current_time(2) on update current_time(2)\n" + ")", }, } diff --git a/go/vt/sqlparser/sql.go b/go/vt/sqlparser/sql.go index 7575306054..446f7e3851 100644 --- a/go/vt/sqlparser/sql.go +++ b/go/vt/sqlparser/sql.go @@ -65,6 +65,7 @@ type yySymType struct { expr Expr exprs Exprs boolVal BoolVal + sqlVal *SQLVal colTuple ColTuple values Values valTuple ValTuple @@ -85,7 +86,7 @@ type yySymType struct { TableSpec *TableSpec columnType ColumnType colKeyOpt ColumnKeyOption - optVal *SQLVal + optVal Expr LengthScaleOption LengthScaleOption columnDefinition *ColumnDefinition indexDefinition *IndexDefinition @@ -343,14 +344,16 @@ const SUBSTR = 57579 const SUBSTRING = 57580 const GROUP_CONCAT = 57581 const SEPARATOR = 57582 -const MATCH = 57583 -const AGAINST = 57584 -const BOOLEAN = 57585 -const LANGUAGE = 57586 -const WITH = 57587 -const QUERY = 57588 -const EXPANSION = 57589 -const UNUSED = 57590 +const TIMESTAMPADD = 57583 +const TIMESTAMPDIFF = 57584 +const MATCH = 57585 +const AGAINST = 57586 +const BOOLEAN = 57587 +const LANGUAGE = 57588 +const WITH = 57589 +const QUERY = 57590 +const EXPANSION = 57591 +const UNUSED = 57592 var yyToknames = [...]string{ "$end", @@ -610,6 +613,8 @@ var yyToknames = [...]string{ "SUBSTRING", "GROUP_CONCAT", "SEPARATOR", + "TIMESTAMPADD", + "TIMESTAMPDIFF", "MATCH", "AGAINST", "BOOLEAN", @@ -635,1306 +640,1369 @@ var yyExca = [...]int{ 5, 29, -2, 4, -1, 37, - 159, 300, - 160, 300, - -2, 290, - -1, 267, - 112, 636, - -2, 632, + 159, 295, + 160, 295, + -2, 285, -1, 268, - 112, 637, - -2, 633, - -1, 331, - 82, 807, + 112, 640, + -2, 636, + -1, 269, + 112, 641, + -2, 637, + -1, 334, + 82, 814, -2, 60, - -1, 332, - 82, 764, + -1, 335, + 82, 771, -2, 61, - -1, 337, - 82, 743, - -2, 598, - -1, 339, - 82, 785, - -2, 600, - -1, 597, - 1, 352, - 5, 352, - 12, 352, - 13, 352, - 14, 352, - 15, 352, - 17, 352, - 19, 352, - 30, 352, - 31, 352, - 42, 352, - 43, 352, - 44, 352, - 45, 352, - 46, 352, - 48, 352, - 49, 352, - 52, 352, - 53, 352, - 55, 352, - 56, 352, - 266, 352, - -2, 370, - -1, 600, + -1, 340, + 82, 750, + -2, 602, + -1, 342, + 82, 792, + -2, 604, + -1, 611, + 1, 347, + 5, 347, + 12, 347, + 13, 347, + 14, 347, + 15, 347, + 17, 347, + 19, 347, + 30, 347, + 31, 347, + 42, 347, + 43, 347, + 44, 347, + 45, 347, + 46, 347, + 48, 347, + 49, 347, + 52, 347, + 53, 347, + 55, 347, + 56, 347, + 268, 347, + -2, 365, + -1, 614, 53, 43, 55, 43, -2, 45, - -1, 740, - 112, 639, - -2, 635, - -1, 958, + -1, 756, + 112, 643, + -2, 639, + -1, 977, 5, 30, - -2, 436, - -1, 987, + -2, 431, + -1, 1009, 5, 29, - -2, 572, - -1, 1240, - 5, 30, - -2, 573, - -1, 1295, - 5, 29, - -2, 575, - -1, 1376, - 5, 30, -2, 576, + -1, 1261, + 5, 30, + -2, 577, + -1, 1317, + 5, 29, + -2, 579, + -1, 1398, + 5, 30, + -2, 580, } const yyPrivate = 57344 -const yyLast = 12140 +const yyLast = 12766 var yyAct = [...]int{ - 268, 1411, 1401, 1197, 1364, 1079, 272, 990, 562, 844, - 1008, 1274, 1132, 1166, 991, 1260, 57, 821, 246, 1133, - 1129, 1307, 840, 887, 873, 853, 923, 843, 1139, 1033, - 1014, 1102, 81, 561, 3, 819, 208, 336, 1145, 208, - 765, 775, 705, 1059, 1050, 857, 950, 298, 808, 823, - 772, 742, 793, 593, 610, 237, 494, 500, 609, 594, - 255, 330, 883, 514, 325, 801, 435, 208, 81, 327, - 932, 506, 208, 56, 208, 1404, 774, 1388, 1399, 1374, - 1396, 1198, 1387, 1373, 867, 245, 1124, 1234, 440, 1318, - 468, 203, 199, 200, 201, 1161, 1162, 1160, 270, 61, - 238, 239, 240, 241, 834, 259, 244, 576, 488, 1340, - 527, 526, 536, 537, 529, 530, 531, 532, 533, 534, - 535, 528, 243, 906, 538, 63, 64, 65, 66, 67, - 1174, 1175, 1176, 835, 836, 242, 1041, 905, 1179, 1177, - 1021, 484, 611, 1020, 612, 195, 1022, 197, 866, 485, - 482, 483, 218, 1263, 453, 874, 470, 1280, 472, 1217, - 1215, 236, 477, 478, 680, 910, 487, 1082, 1081, 678, - 1398, 1395, 1365, 1078, 904, 802, 231, 1356, 858, 1419, - 1103, 454, 1415, 442, 1308, 197, 1009, 1011, 469, 471, - 1083, 684, 671, 860, 860, 1345, 208, 1310, 679, 208, - 860, 599, 1155, 1154, 1153, 208, 438, 681, 445, 210, - 198, 208, 202, 1316, 81, 1243, 81, 1105, 81, 81, - 1183, 81, 1034, 81, 901, 898, 899, 211, 897, 436, - 917, 841, 81, 916, 214, 550, 551, 205, 1089, 968, - 196, 1075, 222, 217, 944, 714, 518, 1077, 528, 460, - 1107, 538, 1111, 450, 1106, 538, 1104, 513, 1354, 908, - 911, 1109, 81, 1010, 436, 1309, 502, 706, 326, 711, - 1108, 1184, 70, 437, 220, 439, 467, 963, 1066, 1341, - 230, 1326, 1143, 1110, 1112, 503, 613, 874, 1413, 859, - 859, 1414, 1126, 1412, 1372, 903, 859, 434, 490, 491, - 925, 856, 854, 511, 855, 794, 212, 1064, 71, 852, - 858, 299, 51, 1178, 1317, 1315, 447, 902, 448, 513, - 794, 449, 977, 208, 208, 208, 441, 512, 511, 81, - 1039, 673, 1359, 224, 215, 81, 225, 226, 227, 229, - 508, 228, 234, 1076, 513, 1074, 216, 219, 707, 213, - 233, 232, 504, 512, 511, 1420, 456, 457, 458, 592, - 907, 749, 1378, 51, 531, 532, 533, 534, 535, 528, - 513, 251, 538, 909, 1065, 747, 748, 746, 924, 1070, - 1067, 1060, 1068, 1063, 863, 512, 511, 1061, 1062, 601, - 864, 261, 1128, 1270, 1421, 717, 718, 446, 607, 1380, - 452, 1069, 513, 443, 444, 54, 459, 732, 734, 735, - 1269, 194, 461, 733, 1054, 745, 274, 1053, 466, 578, - 579, 580, 581, 582, 583, 584, 529, 530, 531, 532, - 533, 534, 535, 528, 1042, 208, 538, 1355, 1291, 962, - 81, 961, 1267, 512, 511, 208, 208, 81, 713, 22, - 1086, 208, 1352, 766, 208, 767, 1200, 208, 512, 511, - 513, 208, 1051, 81, 81, 941, 942, 943, 81, 81, - 81, 81, 81, 81, 1034, 513, 322, 323, 81, 81, - 1023, 1029, 1024, 1313, 1397, 712, 333, 526, 536, 537, - 529, 530, 531, 532, 533, 534, 535, 528, 693, 768, - 538, 690, 512, 511, 297, 689, 81, 1383, 493, 250, - 208, 1313, 1368, 1313, 493, 604, 81, 674, 685, 513, - 672, 719, 1313, 1346, 591, 465, 600, 465, 669, 465, - 465, 691, 465, 462, 465, 455, 79, 1313, 1312, 1258, - 1257, 1245, 493, 465, 527, 526, 536, 537, 529, 530, - 531, 532, 533, 534, 535, 528, 740, 605, 538, 603, - 81, 1242, 493, 51, 493, 721, 1190, 1189, 1381, 743, - 1186, 1187, 335, 1186, 1185, 1323, 784, 788, 547, 736, - 1322, 549, 795, 288, 287, 290, 291, 292, 293, 738, - 1319, 208, 289, 294, 956, 493, 1180, 951, 779, 208, - 208, 805, 493, 208, 208, 777, 493, 81, 1015, 560, - 861, 564, 565, 566, 567, 568, 569, 570, 571, 572, - 81, 575, 577, 577, 577, 577, 577, 577, 577, 577, - 585, 586, 587, 588, 1092, 598, 621, 798, 829, 497, - 501, 1142, 780, 781, 791, 804, 675, 676, 790, 769, - 770, 805, 682, 620, 619, 326, 519, 58, 688, 875, - 876, 877, 797, 1015, 799, 800, 827, 832, 831, 972, - 805, 24, 208, 81, 1130, 81, 24, 1142, 956, 81, - 81, 208, 208, 777, 208, 208, 548, 848, 208, 81, - 970, 563, 889, 967, 965, 985, 828, 956, 603, 986, - 574, 492, 1238, 1325, 1294, 208, 1142, 208, 208, 805, - 208, 728, 1188, 971, 869, 870, 871, 872, 335, 54, - 335, 24, 335, 335, 54, 335, 1025, 335, 885, 886, - 880, 881, 882, 833, 969, 956, 335, 966, 964, 603, - 597, 606, 810, 813, 814, 815, 811, 333, 812, 816, - 715, 465, 740, 810, 813, 814, 815, 811, 465, 812, - 816, 683, 54, 1146, 1147, 252, 516, 1389, 1362, 54, - 1276, 1265, 868, 933, 465, 465, 934, 1250, 888, 465, - 465, 465, 465, 465, 465, 1171, 1146, 1147, 1080, 465, - 465, 1028, 803, 884, 743, 879, 878, 891, 946, 1406, - 1402, 1173, 1149, 940, 1130, 830, 208, 208, 208, 208, - 208, 992, 1055, 54, 709, 687, 727, 1002, 208, 1000, - 1152, 208, 1003, 1151, 1001, 208, 999, 998, 1004, 208, - 814, 815, 1393, 335, 1386, 987, 256, 257, 1088, 615, - 929, 507, 1391, 939, 81, 976, 938, 1046, 618, 463, - 955, 495, 1038, 1026, 779, 1361, 505, 1360, 1292, 993, - 1017, 51, 996, 496, 1016, 1005, 994, 995, 974, 997, - 1036, 1030, 1236, 892, 1013, 1272, 564, 894, 708, 686, - 818, 507, 914, 915, 247, 918, 919, 1035, 1018, 920, - 253, 254, 81, 81, 1043, 1044, 1045, 937, 1047, 1048, - 1049, 1031, 1032, 729, 730, 936, 922, 1330, 248, 58, - 820, 928, 1329, 1278, 598, 1015, 486, 1408, 1407, 1408, - 704, 81, 509, 1342, 1052, 1264, 710, 60, 62, 602, - 55, 1, 1400, 1199, 1273, 900, 208, 739, 744, 1363, - 1306, 1165, 1071, 851, 335, 81, 842, 69, 433, 68, - 1353, 335, 1058, 850, 849, 1314, 563, 1262, 862, 782, - 783, 1040, 865, 1172, 1358, 1085, 1037, 335, 335, 626, - 624, 625, 335, 335, 335, 335, 335, 335, 623, 628, - 627, 622, 335, 335, 465, 221, 465, 328, 817, 81, - 81, 1125, 992, 1131, 1096, 1114, 1095, 614, 1101, 890, - 465, 510, 72, 1113, 1073, 839, 740, 1072, 896, 480, - 723, 481, 223, 81, 546, 597, 935, 1019, 334, 597, - 516, 1141, 1136, 335, 1137, 333, 81, 716, 81, 81, - 499, 1157, 1328, 1277, 1134, 1150, 975, 1164, 845, 573, - 792, 1156, 265, 273, 731, 286, 283, 1159, 285, 284, - 722, 945, 984, 1163, 520, 1168, 208, 271, 263, 596, - 589, 1169, 1170, 809, 771, 807, 1181, 1182, 806, 1148, - 1144, 595, 1091, 208, 786, 786, 1233, 1339, 726, 81, - 786, 26, 81, 81, 208, 59, 258, 19, 18, 17, - 81, 20, 16, 208, 536, 537, 529, 530, 531, 532, - 533, 534, 535, 528, 930, 931, 538, 501, 15, 14, - 451, 335, 1204, 988, 989, 1192, 30, 598, 598, 598, - 598, 598, 1205, 21, 335, 13, 12, 1193, 1206, 1195, - 11, 1213, 820, 739, 1012, 10, 9, 1090, 8, 7, - 598, 6, 5, 4, 249, 23, 2, 0, 0, 992, - 0, 0, 1237, 0, 0, 0, 0, 1247, 1246, 0, - 0, 0, 0, 744, 81, 0, 0, 0, 0, 957, - 0, 0, 81, 1026, 0, 0, 1256, 335, 0, 335, - 0, 0, 0, 912, 913, 0, 978, 0, 1210, 1211, - 0, 1212, 81, 335, 1214, 0, 1216, 0, 0, 81, - 0, 0, 0, 0, 465, 720, 0, 0, 0, 0, - 1266, 0, 1268, 0, 0, 0, 464, 0, 0, 335, - 0, 0, 597, 597, 597, 597, 597, 0, 0, 0, - 0, 0, 465, 0, 0, 0, 1279, 597, 0, 0, - 0, 0, 0, 81, 81, 597, 81, 0, 0, 0, - 0, 81, 1259, 81, 81, 81, 208, 1191, 1293, 81, - 0, 0, 845, 776, 778, 1300, 1301, 0, 1302, 1303, - 1304, 0, 1311, 1305, 1194, 1295, 0, 81, 0, 796, - 0, 0, 0, 0, 0, 1203, 1134, 0, 0, 1320, - 0, 1321, 1327, 0, 0, 0, 0, 0, 1135, 0, - 51, 0, 0, 0, 0, 0, 0, 1343, 0, 786, - 0, 0, 0, 0, 81, 0, 1350, 1087, 552, 553, - 554, 555, 556, 557, 558, 559, 81, 81, 1351, 0, - 1344, 0, 0, 0, 0, 0, 1367, 1366, 0, 1370, - 0, 0, 1134, 0, 0, 0, 81, 0, 335, 992, - 1375, 0, 0, 0, 0, 0, 0, 208, 0, 0, - 0, 0, 0, 1094, 0, 0, 81, 0, 0, 1127, - 0, 0, 0, 0, 1385, 1230, 493, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1119, 1390, 1392, - 81, 0, 0, 0, 0, 0, 1056, 335, 0, 0, - 0, 0, 0, 1405, 598, 1394, 0, 1158, 0, 0, - 1416, 0, 0, 527, 526, 536, 537, 529, 530, 531, - 532, 533, 534, 535, 528, 335, 0, 538, 0, 0, - 1097, 0, 473, 0, 474, 475, 1232, 476, 0, 479, - 0, 0, 0, 0, 845, 0, 845, 0, 489, 335, - 527, 526, 536, 537, 529, 530, 531, 532, 533, 534, - 535, 528, 0, 0, 538, 1252, 1253, 1254, 0, 0, - 953, 0, 0, 335, 954, 0, 0, 0, 0, 0, - 0, 958, 959, 960, 0, 0, 0, 0, 0, 0, - 786, 0, 973, 1138, 1140, 0, 0, 979, 0, 980, - 981, 982, 983, 465, 0, 0, 0, 0, 1094, 597, - 0, 0, 0, 0, 0, 0, 0, 1140, 0, 0, - 1235, 1007, 0, 0, 0, 0, 0, 563, 0, 0, - 335, 0, 335, 1167, 0, 1248, 0, 0, 1249, 0, - 0, 1251, 0, 0, 0, 0, 0, 0, 0, 0, - 1135, 0, 0, 1296, 0, 0, 0, 0, 1379, 0, - 0, 0, 0, 741, 0, 0, 750, 751, 752, 753, - 754, 755, 756, 757, 758, 759, 760, 761, 762, 763, - 764, 0, 845, 1196, 1324, 0, 1201, 1202, 0, 0, - 0, 0, 0, 0, 335, 0, 0, 498, 0, 0, - 0, 0, 522, 0, 525, 0, 1135, 0, 51, 0, - 539, 540, 541, 542, 543, 544, 545, 1275, 523, 524, - 521, 527, 526, 536, 537, 529, 530, 531, 532, 533, - 534, 535, 528, 206, 0, 538, 235, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 786, 0, 0, - 0, 0, 0, 0, 0, 0, 670, 1100, 0, 0, - 0, 262, 0, 677, 206, 0, 0, 0, 335, 206, - 0, 206, 0, 0, 0, 0, 1261, 0, 0, 694, - 695, 0, 0, 0, 696, 697, 698, 699, 700, 701, - 1227, 493, 0, 0, 702, 703, 335, 0, 0, 0, - 0, 0, 0, 335, 0, 0, 0, 0, 0, 0, - 1403, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1369, 563, 0, 0, 0, 1224, 493, 527, 526, - 536, 537, 529, 530, 531, 532, 533, 534, 535, 528, - 0, 0, 538, 0, 1275, 845, 0, 1297, 1298, 0, - 1299, 0, 0, 0, 0, 1261, 0, 1261, 1261, 1261, - 0, 0, 0, 1167, 527, 526, 536, 537, 529, 530, - 531, 532, 533, 534, 535, 528, 0, 0, 538, 0, - 0, 1261, 0, 0, 0, 0, 0, 0, 0, 0, - 947, 948, 949, 206, 0, 0, 206, 1207, 0, 0, - 0, 0, 206, 0, 1209, 0, 0, 0, 206, 0, - 0, 0, 0, 0, 0, 1218, 1219, 1220, 1357, 1223, - 0, 0, 1226, 0, 1229, 0, 0, 0, 0, 0, - 335, 335, 0, 0, 0, 0, 0, 1239, 1240, 1241, - 0, 1244, 0, 0, 0, 0, 0, 786, 0, 0, - 1377, 0, 0, 0, 0, 0, 0, 0, 1255, 0, - 24, 25, 52, 27, 28, 0, 0, 0, 0, 0, - 1384, 0, 0, 0, 0, 0, 0, 0, 0, 43, - 0, 0, 0, 0, 29, 48, 49, 0, 0, 893, - 0, 895, 0, 0, 1261, 0, 0, 0, 1221, 493, - 0, 0, 0, 0, 38, 921, 0, 0, 54, 0, - 0, 0, 0, 0, 493, 0, 0, 0, 0, 0, - 206, 206, 206, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1290, 527, 526, 536, 537, - 529, 530, 531, 532, 533, 534, 535, 528, 0, 0, - 538, 527, 526, 536, 537, 529, 530, 531, 532, 533, - 534, 535, 528, 0, 0, 538, 0, 0, 0, 31, - 32, 34, 33, 36, 0, 50, 0, 0, 0, 0, - 0, 0, 0, 1331, 1332, 1333, 1334, 1335, 1336, 1337, - 1338, 0, 0, 1231, 1098, 1099, 37, 44, 45, 0, - 0, 46, 47, 35, 1347, 1348, 1349, 1115, 1116, 1117, - 1118, 1228, 1120, 1121, 1122, 1123, 39, 40, 0, 41, - 42, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 206, 0, 0, 0, 0, 0, 0, 0, - 0, 1371, 206, 206, 1225, 0, 1376, 0, 206, 0, - 0, 206, 0, 0, 206, 0, 0, 0, 692, 0, - 0, 0, 0, 0, 0, 1382, 643, 527, 526, 536, - 537, 529, 530, 531, 532, 533, 534, 535, 528, 0, - 0, 538, 0, 0, 0, 527, 526, 536, 537, 529, - 530, 531, 532, 533, 534, 535, 528, 0, 0, 538, - 0, 0, 0, 0, 0, 53, 0, 206, 0, 1057, - 0, 1417, 1418, 0, 0, 0, 692, 0, 527, 526, - 536, 537, 529, 530, 531, 532, 533, 534, 535, 528, - 0, 0, 538, 0, 0, 0, 0, 1084, 0, 0, - 1208, 0, 0, 0, 631, 527, 526, 536, 537, 529, - 530, 531, 532, 533, 534, 535, 528, 0, 262, 538, - 0, 0, 0, 262, 262, 0, 0, 787, 787, 262, - 1222, 0, 0, 787, 0, 0, 0, 0, 0, 0, - 0, 644, 0, 262, 262, 262, 262, 0, 206, 0, - 0, 0, 0, 0, 0, 0, 206, 825, 0, 0, - 206, 206, 0, 657, 658, 659, 660, 661, 662, 663, - 0, 664, 665, 666, 667, 668, 645, 646, 647, 648, - 629, 630, 0, 0, 632, 0, 633, 634, 635, 636, - 637, 638, 639, 640, 641, 642, 649, 650, 651, 652, - 653, 654, 655, 656, 527, 526, 536, 537, 529, 530, - 531, 532, 533, 534, 535, 528, 0, 0, 538, 0, - 0, 0, 0, 0, 1281, 1282, 952, 1283, 1284, 206, - 1285, 1286, 0, 1287, 1288, 1289, 0, 0, 206, 206, - 0, 206, 206, 0, 0, 206, 527, 526, 536, 537, - 529, 530, 531, 532, 533, 534, 535, 528, 0, 0, - 538, 0, 206, 0, 926, 927, 0, 206, 0, 0, - 0, 0, 692, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 262, 0, 0, 0, 0, 0, + 269, 1432, 1386, 567, 1101, 1012, 1216, 273, 1422, 863, + 1329, 1294, 1156, 1030, 1190, 1013, 286, 247, 840, 1153, + 892, 299, 1157, 906, 1281, 838, 942, 862, 872, 57, + 566, 3, 81, 1036, 1169, 1163, 209, 1124, 339, 209, + 791, 859, 238, 781, 1055, 788, 721, 969, 1072, 842, + 1081, 876, 809, 827, 624, 758, 607, 499, 505, 790, + 902, 623, 333, 820, 440, 511, 271, 209, 81, 951, + 519, 256, 209, 330, 209, 328, 56, 300, 51, 1425, + 1409, 1420, 246, 1396, 1417, 1217, 1408, 239, 240, 241, + 242, 1395, 1146, 245, 581, 1253, 445, 61, 1185, 1186, + 1184, 582, 608, 473, 260, 853, 925, 1361, 532, 531, + 541, 542, 534, 535, 536, 537, 538, 539, 540, 533, + 924, 493, 543, 63, 64, 65, 66, 67, 471, 51, + 204, 200, 201, 202, 886, 244, 311, 252, 317, 318, + 315, 316, 314, 313, 312, 854, 855, 625, 929, 626, + 243, 1125, 319, 320, 489, 1043, 1063, 923, 1042, 885, + 1284, 1044, 490, 487, 488, 196, 458, 198, 1300, 475, + 893, 477, 1236, 1234, 237, 482, 483, 1104, 1103, 492, + 696, 694, 1419, 1416, 1387, 1100, 821, 1379, 1127, 877, + 1330, 1440, 459, 1031, 1033, 447, 336, 209, 198, 1105, + 209, 474, 476, 1332, 700, 687, 209, 920, 917, 918, + 1088, 916, 209, 879, 695, 81, 1338, 81, 1179, 81, + 81, 1129, 81, 1133, 81, 1128, 1178, 1126, 1436, 275, + 1177, 443, 1131, 81, 1368, 879, 697, 450, 211, 1086, + 879, 1130, 927, 930, 199, 936, 555, 556, 935, 860, + 1264, 203, 1097, 1111, 1132, 1134, 987, 963, 1099, 730, + 197, 523, 465, 81, 1056, 1202, 543, 727, 944, 441, + 1032, 1331, 533, 518, 722, 543, 517, 516, 922, 1362, + 507, 1377, 1347, 508, 1167, 627, 495, 496, 810, 472, + 1148, 441, 470, 518, 470, 689, 470, 470, 893, 470, + 921, 470, 1382, 810, 1394, 996, 1087, 765, 1061, 878, + 470, 1092, 1089, 1082, 1090, 1085, 1203, 1339, 1337, 1083, + 1084, 763, 764, 762, 439, 513, 209, 209, 209, 1400, + 51, 878, 81, 1091, 1434, 1441, 878, 1435, 81, 1433, + 1290, 875, 873, 926, 874, 552, 943, 882, 554, 871, + 877, 509, 606, 883, 1098, 723, 1096, 516, 928, 455, + 536, 537, 538, 539, 540, 533, 1375, 1402, 543, 461, + 462, 463, 1289, 518, 1442, 1076, 565, 446, 569, 570, + 571, 572, 573, 574, 575, 576, 577, 70, 580, 583, + 583, 583, 589, 583, 583, 589, 583, 597, 598, 599, + 600, 601, 602, 615, 612, 621, 54, 584, 586, 588, + 590, 592, 594, 595, 585, 587, 761, 591, 593, 782, + 596, 783, 452, 71, 453, 1075, 1064, 454, 1378, 613, + 541, 542, 534, 535, 536, 537, 538, 539, 540, 533, + 209, 1045, 543, 1046, 981, 81, 980, 748, 750, 751, + 209, 209, 81, 749, 448, 449, 209, 22, 1311, 209, + 336, 195, 209, 517, 516, 206, 209, 1287, 81, 81, + 1108, 733, 734, 81, 81, 81, 81, 81, 81, 1073, + 518, 1219, 982, 81, 81, 534, 535, 536, 537, 538, + 539, 540, 533, 1056, 812, 543, 329, 498, 298, 1051, + 553, 442, 784, 444, 709, 706, 517, 516, 960, 961, + 962, 81, 705, 1150, 690, 209, 688, 251, 685, 517, + 516, 81, 470, 518, 467, 735, 325, 326, 460, 470, + 79, 1344, 517, 516, 701, 707, 518, 1335, 1418, 1404, + 498, 729, 1335, 1390, 1343, 470, 470, 1199, 759, 518, + 470, 470, 470, 470, 470, 470, 611, 1335, 498, 880, + 470, 470, 756, 1335, 1369, 81, 338, 531, 541, 542, + 534, 535, 536, 537, 538, 539, 540, 533, 728, 823, + 543, 737, 800, 804, 1335, 1334, 1279, 1278, 811, 1266, + 498, 752, 1166, 754, 1154, 517, 516, 1166, 81, 81, + 795, 1263, 498, 1102, 824, 209, 289, 288, 291, 292, + 293, 294, 518, 209, 209, 290, 295, 209, 209, 1209, + 1208, 81, 785, 786, 1205, 1206, 451, 1205, 1204, 457, + 796, 797, 51, 618, 81, 464, 806, 975, 498, 824, + 498, 466, 848, 807, 24, 24, 817, 569, 793, 498, + 634, 633, 755, 847, 793, 617, 1259, 1346, 1114, 816, + 1037, 818, 819, 1037, 24, 991, 989, 58, 1007, 894, + 895, 896, 1008, 1316, 824, 619, 1207, 617, 846, 1047, + 852, 986, 1001, 850, 851, 984, 1000, 975, 209, 81, + 839, 81, 54, 54, 612, 81, 81, 209, 209, 867, + 209, 209, 975, 824, 209, 81, 1166, 975, 908, 990, + 988, 617, 54, 338, 620, 338, 731, 338, 338, 699, + 338, 209, 338, 209, 209, 985, 209, 253, 910, 983, + 54, 338, 829, 832, 833, 834, 830, 1410, 831, 835, + 1296, 497, 904, 905, 743, 887, 1271, 907, 1195, 336, + 1170, 1171, 948, 1050, 903, 605, 760, 614, 898, 897, + 1427, 521, 864, 1423, 1197, 1173, 470, 1154, 470, 756, + 1077, 725, 703, 1176, 1026, 54, 833, 834, 888, 889, + 890, 891, 470, 1024, 759, 1175, 1022, 1021, 1025, 1020, + 952, 1023, 1414, 953, 899, 900, 901, 257, 258, 1407, + 1110, 512, 959, 532, 531, 541, 542, 534, 535, 536, + 537, 538, 539, 540, 533, 1412, 510, 543, 965, 958, + 957, 1068, 500, 632, 468, 209, 209, 209, 209, 209, + 338, 1014, 1384, 964, 501, 1060, 629, 209, 1383, 1314, + 209, 1058, 611, 1052, 209, 1257, 611, 1292, 209, 974, + 913, 1009, 702, 837, 254, 255, 970, 512, 956, 755, + 248, 995, 1351, 81, 249, 58, 955, 993, 1350, 635, + 795, 1298, 1048, 1038, 1037, 491, 1429, 1428, 1429, 691, + 692, 1015, 1039, 720, 1018, 698, 266, 514, 329, 1027, + 1365, 704, 1285, 1035, 726, 60, 62, 616, 1010, 1011, + 55, 1, 612, 612, 612, 612, 612, 1040, 1421, 1065, + 1066, 81, 81, 1218, 1293, 919, 1385, 839, 1328, 1034, + 1189, 1057, 870, 861, 69, 612, 1053, 1054, 1016, 1017, + 438, 1019, 68, 1376, 1067, 869, 1069, 1070, 1071, 868, + 81, 1336, 1283, 338, 744, 881, 1062, 1074, 884, 1196, + 338, 1059, 1381, 640, 638, 209, 639, 637, 1080, 642, + 641, 636, 222, 331, 81, 836, 338, 338, 1093, 628, + 909, 338, 338, 338, 338, 338, 338, 515, 72, 1095, + 1094, 338, 338, 915, 485, 486, 224, 551, 1107, 470, + 954, 864, 760, 1041, 829, 832, 833, 834, 830, 337, + 831, 835, 1161, 736, 1170, 1171, 732, 504, 1349, 739, + 1117, 81, 81, 1155, 1118, 1014, 1123, 470, 1297, 521, + 1136, 994, 338, 1147, 578, 1135, 756, 808, 274, 747, + 1158, 287, 284, 285, 822, 81, 738, 1006, 525, 272, + 264, 1160, 610, 603, 828, 826, 1165, 849, 81, 825, + 81, 81, 1172, 1174, 611, 611, 611, 611, 611, 1188, + 1168, 792, 794, 787, 609, 1113, 1181, 1252, 1360, 611, + 1180, 742, 1183, 802, 802, 1187, 26, 611, 209, 802, + 1192, 59, 259, 19, 18, 17, 1159, 20, 51, 16, + 15, 14, 1116, 456, 30, 209, 814, 815, 1193, 1194, + 21, 81, 13, 12, 81, 81, 209, 1200, 1201, 11, + 10, 9, 81, 8, 7, 209, 1141, 911, 6, 338, + 5, 4, 250, 23, 1211, 2, 933, 934, 0, 937, + 938, 0, 338, 939, 1223, 0, 1212, 0, 1214, 0, + 1225, 0, 0, 0, 0, 0, 0, 1224, 0, 0, + 941, 0, 0, 1232, 0, 947, 0, 0, 0, 0, + 0, 0, 0, 557, 558, 559, 560, 561, 562, 563, + 564, 0, 0, 0, 1014, 1258, 864, 0, 864, 0, + 0, 0, 1268, 0, 0, 0, 0, 338, 81, 338, + 0, 0, 612, 931, 932, 1267, 81, 1048, 0, 1277, + 0, 0, 0, 338, 0, 0, 0, 262, 0, 0, + 0, 81, 0, 0, 0, 0, 0, 0, 81, 0, + 0, 0, 0, 0, 1251, 1286, 0, 1288, 0, 338, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1116, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1299, 0, 0, 0, 1273, 1274, 1275, 0, 0, + 1229, 1230, 0, 1231, 81, 81, 1233, 81, 1235, 0, + 0, 0, 81, 0, 81, 81, 81, 209, 0, 972, + 81, 1158, 1315, 973, 0, 1322, 0, 0, 470, 0, + 977, 978, 979, 1317, 1333, 1327, 1323, 81, 1324, 1325, + 1326, 992, 1340, 0, 0, 0, 998, 0, 999, 0, + 0, 1002, 1003, 1004, 1005, 0, 864, 0, 0, 0, + 0, 1348, 802, 0, 0, 0, 1280, 0, 1341, 1366, + 1342, 0, 0, 1029, 0, 0, 81, 1159, 1158, 1374, + 1318, 1373, 0, 0, 611, 0, 1295, 81, 81, 1367, + 0, 0, 0, 0, 0, 1392, 0, 1389, 1388, 0, + 0, 338, 0, 0, 0, 0, 0, 0, 0, 81, + 1345, 1397, 0, 1014, 0, 0, 0, 0, 0, 0, + 209, 0, 0, 0, 1112, 0, 0, 0, 81, 0, + 219, 0, 0, 0, 1159, 1406, 51, 0, 0, 0, + 0, 1249, 498, 0, 0, 0, 0, 1411, 0, 1078, + 338, 81, 757, 1413, 232, 766, 767, 768, 769, 770, + 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, + 1426, 1437, 0, 0, 0, 1415, 0, 0, 338, 532, + 531, 541, 542, 534, 535, 536, 537, 538, 539, 540, + 533, 0, 0, 543, 0, 0, 502, 506, 0, 0, + 0, 0, 338, 0, 0, 212, 0, 0, 0, 1122, + 813, 0, 215, 524, 0, 1295, 864, 0, 0, 0, + 223, 218, 0, 0, 0, 0, 338, 0, 0, 0, + 503, 0, 0, 469, 0, 0, 0, 1424, 0, 0, + 0, 0, 0, 0, 0, 0, 802, 1210, 568, 1162, + 1164, 0, 221, 0, 0, 0, 0, 579, 231, 0, + 0, 0, 0, 0, 1213, 0, 207, 0, 0, 236, + 0, 0, 0, 1164, 0, 1222, 0, 0, 0, 0, + 0, 0, 0, 0, 213, 0, 338, 0, 338, 1191, + 0, 0, 0, 0, 263, 0, 0, 207, 0, 0, + 0, 0, 207, 0, 207, 0, 0, 0, 0, 0, + 0, 225, 216, 0, 226, 227, 228, 230, 0, 229, + 235, 0, 0, 0, 217, 220, 0, 214, 234, 233, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1215, + 0, 0, 1220, 1221, 0, 0, 0, 0, 0, 0, + 338, 0, 1226, 0, 0, 0, 0, 0, 0, 1228, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1237, 1238, 1239, 0, 1242, 0, 0, 1245, 0, 1248, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 966, 967, 968, 0, 1260, 1261, 1262, 0, 1265, 0, + 0, 0, 0, 0, 0, 802, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1276, 1250, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 338, 207, 0, 0, + 207, 1246, 498, 0, 1282, 0, 207, 0, 0, 724, + 0, 0, 207, 0, 0, 0, 0, 0, 0, 338, + 478, 0, 479, 480, 0, 481, 338, 484, 0, 0, + 0, 0, 0, 0, 745, 746, 494, 0, 0, 532, + 531, 541, 542, 534, 535, 536, 537, 538, 539, 540, + 533, 0, 0, 543, 0, 0, 0, 1310, 0, 0, + 532, 531, 541, 542, 534, 535, 536, 537, 538, 539, + 540, 533, 1319, 1320, 543, 1321, 0, 0, 0, 0, + 1282, 0, 1282, 1282, 1282, 0, 0, 568, 1191, 0, + 798, 799, 0, 0, 1243, 498, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1282, 1352, 1353, 1354, 1355, + 1356, 1357, 1358, 1359, 0, 0, 0, 1363, 1364, 1401, + 0, 0, 0, 0, 0, 0, 207, 207, 207, 1370, + 1371, 1372, 532, 531, 541, 542, 534, 535, 536, 537, + 538, 539, 540, 533, 1380, 858, 543, 0, 0, 0, + 0, 0, 0, 0, 0, 338, 338, 0, 0, 0, + 0, 0, 0, 0, 0, 1393, 0, 1120, 1121, 0, + 0, 0, 1398, 0, 802, 0, 0, 1399, 0, 0, + 1137, 1138, 1139, 1140, 0, 1142, 1143, 1144, 1145, 0, + 1403, 0, 0, 0, 0, 0, 1405, 1151, 1152, 0, + 0, 0, 0, 0, 527, 0, 530, 0, 0, 0, + 0, 0, 544, 545, 546, 547, 548, 549, 550, 1282, + 528, 529, 526, 532, 531, 541, 542, 534, 535, 536, + 537, 538, 539, 540, 533, 1438, 1439, 543, 0, 0, + 207, 0, 0, 0, 0, 0, 949, 950, 686, 506, + 207, 207, 1240, 498, 0, 693, 207, 1198, 0, 207, + 0, 0, 207, 0, 0, 0, 708, 0, 0, 0, + 0, 710, 711, 0, 0, 0, 712, 713, 714, 715, + 716, 717, 0, 0, 0, 0, 718, 719, 0, 0, + 532, 531, 541, 542, 534, 535, 536, 537, 538, 539, + 540, 533, 498, 0, 543, 0, 0, 0, 0, 1247, + 0, 976, 0, 0, 0, 207, 1227, 0, 0, 0, + 0, 0, 0, 0, 708, 0, 0, 0, 997, 0, + 0, 0, 0, 0, 1119, 0, 0, 0, 0, 532, + 531, 541, 542, 534, 535, 536, 537, 538, 539, 540, + 533, 0, 0, 543, 532, 531, 541, 542, 534, 535, + 536, 537, 538, 539, 540, 533, 263, 0, 543, 0, + 0, 263, 263, 0, 0, 803, 803, 263, 0, 0, + 0, 803, 1256, 532, 531, 541, 542, 534, 535, 536, + 537, 538, 539, 540, 533, 0, 0, 543, 0, 0, + 263, 263, 263, 263, 0, 207, 0, 0, 0, 0, + 0, 0, 0, 207, 844, 0, 0, 207, 207, 0, + 532, 531, 541, 542, 534, 535, 536, 537, 538, 539, + 540, 533, 1255, 0, 543, 0, 0, 1301, 1302, 0, + 1303, 1304, 0, 1305, 1306, 0, 1307, 1308, 1309, 0, + 0, 0, 1312, 1313, 0, 0, 0, 0, 0, 0, + 0, 0, 1109, 0, 0, 0, 0, 0, 0, 0, + 532, 531, 541, 542, 534, 535, 536, 537, 538, 539, + 540, 533, 0, 0, 543, 1244, 0, 0, 207, 0, + 0, 0, 912, 0, 914, 0, 0, 207, 207, 0, + 207, 207, 0, 1241, 207, 0, 0, 0, 940, 0, + 0, 0, 0, 0, 1149, 0, 0, 0, 0, 0, + 0, 207, 0, 945, 946, 0, 207, 0, 0, 0, + 0, 708, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 263, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1182, 0, 0, 657, 532, + 531, 541, 542, 534, 535, 536, 537, 538, 539, 540, + 533, 0, 971, 543, 0, 0, 0, 532, 531, 541, + 542, 534, 535, 536, 537, 538, 539, 540, 533, 0, + 263, 543, 532, 531, 541, 542, 534, 535, 536, 537, + 538, 539, 540, 533, 0, 0, 543, 0, 263, 532, + 531, 541, 542, 534, 535, 536, 537, 538, 539, 540, + 533, 1430, 0, 543, 803, 207, 207, 207, 207, 207, + 0, 0, 0, 0, 0, 0, 645, 1028, 0, 0, + 207, 0, 0, 0, 844, 0, 0, 0, 207, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1254, 0, + 0, 0, 0, 658, 0, 0, 0, 568, 0, 0, + 0, 0, 0, 0, 0, 1269, 0, 0, 1270, 0, + 0, 1272, 0, 0, 0, 671, 674, 675, 676, 677, + 678, 679, 0, 680, 681, 682, 683, 684, 659, 660, + 661, 662, 643, 644, 672, 1079, 646, 0, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, 663, 664, + 665, 666, 667, 668, 669, 670, 0, 0, 0, 0, + 0, 0, 0, 1106, 0, 24, 25, 52, 27, 28, + 0, 0, 0, 0, 0, 207, 0, 0, 0, 0, + 0, 0, 0, 0, 43, 263, 0, 0, 0, 29, + 48, 49, 0, 0, 0, 0, 263, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 708, 38, + 673, 0, 0, 54, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 803, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 262, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 262, + 0, 0, 0, 0, 31, 32, 34, 33, 36, 0, + 50, 0, 0, 0, 0, 0, 0, 0, 1391, 568, + 0, 0, 0, 0, 0, 0, 0, 0, 207, 0, + 0, 37, 44, 45, 0, 0, 46, 47, 35, 0, + 0, 0, 0, 0, 0, 207, 0, 0, 0, 0, + 0, 39, 40, 0, 41, 42, 207, 0, 0, 0, + 0, 0, 0, 0, 0, 207, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 787, 206, 206, 206, 206, 206, 1271, 0, - 0, 0, 0, 0, 0, 1006, 0, 0, 206, 0, - 0, 0, 825, 0, 0, 0, 206, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1409, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 803, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 53, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1291, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 206, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 262, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 262, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 692, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 787, 138, 0, 0, 0, 0, 269, - 0, 0, 0, 102, 0, 266, 0, 0, 0, 119, - 309, 121, 0, 0, 158, 130, 0, 0, 0, 0, - 300, 301, 0, 0, 0, 0, 0, 0, 837, 0, - 54, 0, 0, 267, 288, 287, 290, 291, 292, 293, - 0, 0, 95, 289, 294, 295, 296, 838, 0, 0, - 264, 281, 0, 308, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 206, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 278, 279, 0, 0, 0, 0, 320, - 206, 280, 0, 0, 275, 276, 277, 282, 0, 0, - 0, 206, 0, 0, 0, 0, 0, 108, 0, 0, - 206, 209, 0, 0, 318, 0, 145, 0, 161, 110, - 118, 83, 89, 0, 109, 136, 150, 154, 0, 0, - 0, 98, 0, 152, 140, 174, 0, 141, 151, 122, - 166, 146, 173, 181, 182, 163, 180, 189, 84, 162, - 172, 96, 155, 86, 170, 160, 128, 114, 115, 85, - 787, 149, 101, 106, 100, 137, 167, 168, 99, 192, - 90, 179, 88, 91, 178, 135, 165, 171, 129, 126, - 87, 169, 127, 125, 117, 104, 111, 143, 124, 144, - 112, 132, 131, 133, 0, 0, 0, 159, 176, 193, - 93, 0, 164, 183, 184, 185, 186, 187, 188, 0, - 0, 94, 107, 103, 142, 134, 92, 113, 156, 116, - 123, 148, 191, 139, 153, 97, 175, 157, 310, 319, - 316, 317, 314, 315, 313, 312, 311, 321, 302, 303, - 304, 305, 307, 0, 306, 82, 0, 120, 190, 147, - 105, 177, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 825, 0, 0, 0, 421, 412, 0, - 383, 424, 361, 375, 432, 376, 377, 405, 347, 391, - 138, 373, 0, 364, 342, 370, 343, 362, 385, 102, - 388, 360, 414, 394, 423, 119, 430, 121, 399, 0, - 158, 130, 0, 0, 387, 416, 389, 410, 382, 406, - 352, 398, 425, 374, 403, 426, 0, 0, 0, 80, - 0, 846, 847, 0, 0, 0, 0, 0, 95, 0, - 401, 420, 372, 402, 404, 341, 400, 0, 345, 348, - 431, 418, 367, 368, 1027, 0, 0, 0, 0, 0, - 787, 386, 390, 407, 380, 0, 0, 0, 0, 0, - 0, 0, 0, 365, 206, 397, 0, 0, 0, 349, - 346, 0, 0, 384, 0, 0, 0, 351, 0, 366, - 408, 0, 340, 108, 411, 417, 381, 209, 419, 379, - 378, 422, 145, 0, 161, 110, 118, 83, 89, 0, - 109, 136, 150, 154, 415, 363, 371, 98, 369, 152, - 140, 174, 396, 141, 151, 122, 166, 146, 173, 181, - 182, 163, 180, 189, 84, 162, 172, 96, 155, 86, - 170, 160, 128, 114, 115, 85, 0, 149, 101, 106, - 100, 137, 167, 168, 99, 192, 90, 179, 88, 91, - 178, 135, 165, 171, 129, 126, 87, 169, 127, 125, - 117, 104, 111, 143, 124, 144, 112, 132, 131, 133, - 0, 344, 0, 159, 176, 193, 93, 359, 164, 183, - 184, 185, 186, 187, 188, 0, 0, 94, 107, 103, - 142, 134, 92, 113, 156, 116, 123, 148, 191, 139, - 153, 97, 175, 157, 355, 358, 353, 354, 392, 393, - 427, 428, 429, 409, 350, 0, 356, 357, 0, 413, - 395, 82, 0, 120, 190, 147, 105, 177, 421, 412, - 0, 383, 424, 361, 375, 432, 376, 377, 405, 347, - 391, 138, 373, 0, 364, 342, 370, 343, 362, 385, - 102, 388, 360, 414, 394, 423, 119, 430, 121, 399, - 0, 158, 130, 0, 0, 387, 416, 389, 410, 382, - 406, 352, 398, 425, 374, 403, 426, 0, 0, 0, - 80, 0, 846, 847, 0, 0, 0, 0, 0, 95, - 0, 401, 420, 372, 402, 404, 341, 400, 0, 345, - 348, 431, 418, 367, 368, 0, 0, 0, 0, 0, - 0, 0, 386, 390, 407, 380, 0, 0, 0, 0, - 0, 0, 0, 0, 365, 0, 397, 0, 0, 0, - 349, 346, 0, 0, 384, 0, 0, 0, 351, 0, - 366, 408, 0, 340, 108, 411, 417, 381, 209, 419, - 379, 378, 422, 145, 0, 161, 110, 118, 83, 89, - 0, 109, 136, 150, 154, 415, 363, 371, 98, 369, - 152, 140, 174, 396, 141, 151, 122, 166, 146, 173, - 181, 182, 163, 180, 189, 84, 162, 172, 96, 155, - 86, 170, 160, 128, 114, 115, 85, 0, 149, 101, - 106, 100, 137, 167, 168, 99, 192, 90, 179, 88, - 91, 178, 135, 165, 171, 129, 126, 87, 169, 127, - 125, 117, 104, 111, 143, 124, 144, 112, 132, 131, - 133, 0, 344, 0, 159, 176, 193, 93, 359, 164, - 183, 184, 185, 186, 187, 188, 0, 0, 94, 107, - 103, 142, 134, 92, 113, 156, 116, 123, 148, 191, - 139, 153, 97, 175, 157, 355, 358, 353, 354, 392, - 393, 427, 428, 429, 409, 350, 0, 356, 357, 0, - 413, 395, 82, 0, 120, 190, 147, 105, 177, 421, - 412, 0, 383, 424, 361, 375, 432, 376, 377, 405, - 347, 391, 138, 373, 0, 364, 342, 370, 343, 362, - 385, 102, 388, 360, 414, 394, 423, 119, 430, 121, - 399, 0, 158, 130, 0, 0, 387, 416, 389, 410, - 382, 406, 352, 398, 425, 374, 403, 426, 54, 0, - 0, 80, 0, 0, 0, 0, 0, 0, 0, 0, - 95, 0, 401, 420, 372, 402, 404, 341, 400, 0, - 345, 348, 431, 418, 367, 368, 0, 0, 0, 0, - 0, 0, 0, 386, 390, 407, 380, 0, 0, 0, - 0, 0, 0, 0, 0, 365, 0, 397, 0, 0, - 0, 349, 346, 0, 0, 384, 0, 0, 0, 351, - 0, 366, 408, 0, 340, 108, 411, 417, 381, 209, - 419, 379, 378, 422, 145, 0, 161, 110, 118, 83, - 89, 0, 109, 136, 150, 154, 415, 363, 371, 98, - 369, 152, 140, 174, 396, 141, 151, 122, 166, 146, - 173, 181, 182, 163, 180, 189, 84, 162, 172, 96, - 155, 86, 170, 160, 128, 114, 115, 85, 0, 149, - 101, 106, 100, 137, 167, 168, 99, 192, 90, 179, - 88, 91, 178, 135, 165, 171, 129, 126, 87, 169, - 127, 125, 117, 104, 111, 143, 124, 144, 112, 132, - 131, 133, 0, 344, 0, 159, 176, 193, 93, 359, - 164, 183, 184, 185, 186, 187, 188, 0, 0, 94, - 107, 103, 142, 134, 92, 113, 156, 116, 123, 148, - 191, 139, 153, 97, 175, 157, 355, 358, 353, 354, - 392, 393, 427, 428, 429, 409, 350, 0, 356, 357, - 0, 413, 395, 82, 0, 120, 190, 147, 105, 177, - 421, 412, 0, 383, 424, 361, 375, 432, 376, 377, - 405, 347, 391, 138, 373, 0, 364, 342, 370, 343, - 362, 385, 102, 388, 360, 414, 394, 423, 119, 430, - 121, 399, 0, 158, 130, 0, 0, 387, 416, 389, - 410, 382, 406, 352, 398, 425, 374, 403, 426, 0, + 0, 0, 0, 0, 0, 426, 415, 844, 386, 429, + 364, 378, 437, 379, 380, 408, 350, 394, 139, 376, + 0, 367, 345, 373, 346, 365, 388, 103, 391, 363, + 417, 397, 428, 120, 435, 122, 402, 0, 159, 131, + 0, 0, 390, 419, 392, 413, 385, 409, 355, 401, + 430, 377, 406, 431, 0, 0, 0, 80, 0, 865, + 866, 0, 0, 0, 0, 0, 96, 0, 404, 425, + 375, 405, 407, 344, 403, 0, 348, 351, 436, 421, + 370, 371, 1049, 0, 0, 0, 0, 0, 0, 389, + 393, 410, 383, 0, 0, 0, 803, 0, 0, 0, + 0, 368, 0, 400, 0, 0, 0, 352, 349, 0, + 207, 387, 0, 0, 0, 354, 0, 369, 411, 0, + 343, 109, 414, 420, 384, 210, 424, 382, 381, 427, + 146, 0, 162, 111, 119, 83, 90, 0, 110, 137, + 151, 155, 418, 366, 374, 99, 372, 153, 141, 175, + 399, 142, 152, 123, 167, 147, 174, 182, 183, 164, + 181, 190, 84, 163, 173, 97, 156, 86, 171, 161, + 129, 115, 116, 85, 0, 150, 102, 107, 101, 138, + 168, 169, 100, 193, 91, 180, 88, 92, 179, 136, + 166, 172, 130, 127, 87, 170, 128, 126, 118, 105, + 112, 144, 125, 145, 113, 133, 132, 134, 0, 347, + 0, 160, 177, 194, 94, 362, 165, 184, 185, 186, + 187, 188, 189, 0, 0, 95, 108, 104, 143, 135, + 93, 114, 157, 117, 124, 149, 192, 140, 154, 98, + 176, 158, 358, 361, 356, 357, 395, 396, 432, 433, + 434, 412, 353, 0, 359, 360, 0, 416, 422, 423, + 398, 82, 89, 121, 191, 148, 106, 178, 426, 415, + 0, 386, 429, 364, 378, 437, 379, 380, 408, 350, + 394, 139, 376, 0, 367, 345, 373, 346, 365, 388, + 103, 391, 363, 417, 397, 428, 120, 435, 122, 402, + 0, 159, 131, 0, 0, 390, 419, 392, 413, 385, + 409, 355, 401, 430, 377, 406, 431, 0, 0, 0, + 80, 0, 865, 866, 0, 0, 0, 0, 0, 96, + 0, 404, 425, 375, 405, 407, 344, 403, 0, 348, + 351, 436, 421, 370, 371, 0, 0, 0, 0, 0, + 0, 0, 389, 393, 410, 383, 0, 0, 0, 0, + 0, 0, 0, 0, 368, 0, 400, 0, 0, 0, + 352, 349, 0, 0, 387, 0, 0, 0, 354, 0, + 369, 411, 0, 343, 109, 414, 420, 384, 210, 424, + 382, 381, 427, 146, 0, 162, 111, 119, 83, 90, + 0, 110, 137, 151, 155, 418, 366, 374, 99, 372, + 153, 141, 175, 399, 142, 152, 123, 167, 147, 174, + 182, 183, 164, 181, 190, 84, 163, 173, 97, 156, + 86, 171, 161, 129, 115, 116, 85, 0, 150, 102, + 107, 101, 138, 168, 169, 100, 193, 91, 180, 88, + 92, 179, 136, 166, 172, 130, 127, 87, 170, 128, + 126, 118, 105, 112, 144, 125, 145, 113, 133, 132, + 134, 0, 347, 0, 160, 177, 194, 94, 362, 165, + 184, 185, 186, 187, 188, 189, 0, 0, 95, 108, + 104, 143, 135, 93, 114, 157, 117, 124, 149, 192, + 140, 154, 98, 176, 158, 358, 361, 356, 357, 395, + 396, 432, 433, 434, 412, 353, 0, 359, 360, 0, + 416, 422, 423, 398, 82, 89, 121, 191, 148, 106, + 178, 426, 415, 0, 386, 429, 364, 378, 437, 379, + 380, 408, 350, 394, 139, 376, 0, 367, 345, 373, + 346, 365, 388, 103, 391, 363, 417, 397, 428, 120, + 435, 122, 402, 0, 159, 131, 0, 0, 390, 419, + 392, 413, 385, 409, 355, 401, 430, 377, 406, 431, + 54, 0, 0, 80, 0, 0, 0, 0, 0, 0, + 0, 0, 96, 0, 404, 425, 375, 405, 407, 344, + 403, 0, 348, 351, 436, 421, 370, 371, 0, 0, + 0, 0, 0, 0, 0, 389, 393, 410, 383, 0, + 0, 0, 0, 0, 0, 0, 0, 368, 0, 400, + 0, 0, 0, 352, 349, 0, 0, 387, 0, 0, + 0, 354, 0, 369, 411, 0, 343, 109, 414, 420, + 384, 210, 424, 382, 381, 427, 146, 0, 162, 111, + 119, 83, 90, 0, 110, 137, 151, 155, 418, 366, + 374, 99, 372, 153, 141, 175, 399, 142, 152, 123, + 167, 147, 174, 182, 183, 164, 181, 190, 84, 163, + 173, 97, 156, 86, 171, 161, 129, 115, 116, 85, + 0, 150, 102, 107, 101, 138, 168, 169, 100, 193, + 91, 180, 88, 92, 179, 136, 166, 172, 130, 127, + 87, 170, 128, 126, 118, 105, 112, 144, 125, 145, + 113, 133, 132, 134, 0, 347, 0, 160, 177, 194, + 94, 362, 165, 184, 185, 186, 187, 188, 189, 0, + 0, 95, 108, 104, 143, 135, 93, 114, 157, 117, + 124, 149, 192, 140, 154, 98, 176, 158, 358, 361, + 356, 357, 395, 396, 432, 433, 434, 412, 353, 0, + 359, 360, 0, 416, 422, 423, 398, 82, 89, 121, + 191, 148, 106, 178, 426, 415, 0, 386, 429, 364, + 378, 437, 379, 380, 408, 350, 394, 139, 376, 0, + 367, 345, 373, 346, 365, 388, 103, 391, 363, 417, + 397, 428, 120, 435, 122, 402, 0, 159, 131, 0, + 0, 390, 419, 392, 413, 385, 409, 355, 401, 430, + 377, 406, 431, 0, 0, 0, 80, 0, 0, 0, + 0, 0, 0, 0, 0, 96, 0, 404, 425, 375, + 405, 407, 344, 403, 0, 348, 351, 436, 421, 370, + 371, 0, 0, 0, 0, 0, 0, 0, 389, 393, + 410, 383, 0, 0, 0, 0, 0, 0, 1115, 0, + 368, 0, 400, 0, 0, 0, 352, 349, 0, 0, + 387, 0, 0, 0, 354, 0, 369, 411, 0, 343, + 109, 414, 420, 384, 210, 424, 382, 381, 427, 146, + 0, 162, 111, 119, 83, 90, 0, 110, 137, 151, + 155, 418, 366, 374, 99, 372, 153, 141, 175, 399, + 142, 152, 123, 167, 147, 174, 182, 183, 164, 181, + 190, 84, 163, 173, 97, 156, 86, 171, 161, 129, + 115, 116, 85, 0, 150, 102, 107, 101, 138, 168, + 169, 100, 193, 91, 180, 88, 92, 179, 136, 166, + 172, 130, 127, 87, 170, 128, 126, 118, 105, 112, + 144, 125, 145, 113, 133, 132, 134, 0, 347, 0, + 160, 177, 194, 94, 362, 165, 184, 185, 186, 187, + 188, 189, 0, 0, 95, 108, 104, 143, 135, 93, + 114, 157, 117, 124, 149, 192, 140, 154, 98, 176, + 158, 358, 361, 356, 357, 395, 396, 432, 433, 434, + 412, 353, 0, 359, 360, 0, 416, 422, 423, 398, + 82, 89, 121, 191, 148, 106, 178, 426, 415, 0, + 386, 429, 364, 378, 437, 379, 380, 408, 350, 394, + 139, 376, 0, 367, 345, 373, 346, 365, 388, 103, + 391, 363, 417, 397, 428, 120, 435, 122, 402, 0, + 159, 131, 0, 0, 390, 419, 392, 413, 385, 409, + 355, 401, 430, 377, 406, 431, 0, 0, 0, 268, + 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, + 404, 425, 375, 405, 407, 344, 403, 0, 348, 351, + 436, 421, 370, 371, 0, 0, 0, 0, 0, 0, + 0, 389, 393, 410, 383, 0, 0, 0, 0, 0, + 0, 753, 0, 368, 0, 400, 0, 0, 0, 352, + 349, 0, 0, 387, 0, 0, 0, 354, 0, 369, + 411, 0, 343, 109, 414, 420, 384, 210, 424, 382, + 381, 427, 146, 0, 162, 111, 119, 83, 90, 0, + 110, 137, 151, 155, 418, 366, 374, 99, 372, 153, + 141, 175, 399, 142, 152, 123, 167, 147, 174, 182, + 183, 164, 181, 190, 84, 163, 173, 97, 156, 86, + 171, 161, 129, 115, 116, 85, 0, 150, 102, 107, + 101, 138, 168, 169, 100, 193, 91, 180, 88, 92, + 179, 136, 166, 172, 130, 127, 87, 170, 128, 126, + 118, 105, 112, 144, 125, 145, 113, 133, 132, 134, + 0, 347, 0, 160, 177, 194, 94, 362, 165, 184, + 185, 186, 187, 188, 189, 0, 0, 95, 108, 104, + 143, 135, 93, 114, 157, 117, 124, 149, 192, 140, + 154, 98, 176, 158, 358, 361, 356, 357, 395, 396, + 432, 433, 434, 412, 353, 0, 359, 360, 0, 416, + 422, 423, 398, 82, 89, 121, 191, 148, 106, 178, + 426, 415, 0, 386, 429, 364, 378, 437, 379, 380, + 408, 350, 394, 139, 376, 0, 367, 345, 373, 346, + 365, 388, 103, 391, 363, 417, 397, 428, 120, 435, + 122, 402, 0, 159, 131, 0, 0, 390, 419, 392, + 413, 385, 409, 355, 401, 430, 377, 406, 431, 0, 0, 0, 80, 0, 0, 0, 0, 0, 0, 0, - 0, 95, 0, 401, 420, 372, 402, 404, 341, 400, - 0, 345, 348, 431, 418, 367, 368, 0, 0, 0, - 0, 0, 0, 0, 386, 390, 407, 380, 0, 0, - 0, 0, 0, 0, 1093, 0, 365, 0, 397, 0, - 0, 0, 349, 346, 0, 0, 384, 0, 0, 0, - 351, 0, 366, 408, 0, 340, 108, 411, 417, 381, - 209, 419, 379, 378, 422, 145, 0, 161, 110, 118, - 83, 89, 0, 109, 136, 150, 154, 415, 363, 371, - 98, 369, 152, 140, 174, 396, 141, 151, 122, 166, - 146, 173, 181, 182, 163, 180, 189, 84, 162, 172, - 96, 155, 86, 170, 160, 128, 114, 115, 85, 0, - 149, 101, 106, 100, 137, 167, 168, 99, 192, 90, - 179, 88, 91, 178, 135, 165, 171, 129, 126, 87, - 169, 127, 125, 117, 104, 111, 143, 124, 144, 112, - 132, 131, 133, 0, 344, 0, 159, 176, 193, 93, - 359, 164, 183, 184, 185, 186, 187, 188, 0, 0, - 94, 107, 103, 142, 134, 92, 113, 156, 116, 123, - 148, 191, 139, 153, 97, 175, 157, 355, 358, 353, - 354, 392, 393, 427, 428, 429, 409, 350, 0, 356, - 357, 0, 413, 395, 82, 0, 120, 190, 147, 105, - 177, 421, 412, 0, 383, 424, 361, 375, 432, 376, - 377, 405, 347, 391, 138, 373, 0, 364, 342, 370, - 343, 362, 385, 102, 388, 360, 414, 394, 423, 119, - 430, 121, 399, 0, 158, 130, 0, 0, 387, 416, - 389, 410, 382, 406, 352, 398, 425, 374, 403, 426, - 0, 0, 0, 267, 0, 0, 0, 0, 0, 0, - 0, 0, 95, 0, 401, 420, 372, 402, 404, 341, - 400, 0, 345, 348, 431, 418, 367, 368, 0, 0, - 0, 0, 0, 0, 0, 386, 390, 407, 380, 0, - 0, 0, 0, 0, 0, 737, 0, 365, 0, 397, - 0, 0, 0, 349, 346, 0, 0, 384, 0, 0, - 0, 351, 0, 366, 408, 0, 340, 108, 411, 417, - 381, 209, 419, 379, 378, 422, 145, 0, 161, 110, - 118, 83, 89, 0, 109, 136, 150, 154, 415, 363, - 371, 98, 369, 152, 140, 174, 396, 141, 151, 122, - 166, 146, 173, 181, 182, 163, 180, 189, 84, 162, - 172, 96, 155, 86, 170, 160, 128, 114, 115, 85, - 0, 149, 101, 106, 100, 137, 167, 168, 99, 192, - 90, 179, 88, 91, 178, 135, 165, 171, 129, 126, - 87, 169, 127, 125, 117, 104, 111, 143, 124, 144, - 112, 132, 131, 133, 0, 344, 0, 159, 176, 193, - 93, 359, 164, 183, 184, 185, 186, 187, 188, 0, - 0, 94, 107, 103, 142, 134, 92, 113, 156, 116, - 123, 148, 191, 139, 153, 97, 175, 157, 355, 358, - 353, 354, 392, 393, 427, 428, 429, 409, 350, 0, - 356, 357, 0, 413, 395, 82, 0, 120, 190, 147, - 105, 177, 421, 412, 0, 383, 424, 361, 375, 432, - 376, 377, 405, 347, 391, 138, 373, 0, 364, 342, - 370, 343, 362, 385, 102, 388, 360, 414, 394, 423, - 119, 430, 121, 399, 0, 158, 130, 0, 0, 387, - 416, 389, 410, 382, 406, 352, 398, 425, 374, 403, - 426, 0, 0, 0, 80, 0, 0, 0, 0, 0, - 0, 0, 0, 95, 0, 401, 420, 372, 402, 404, - 341, 400, 0, 345, 348, 431, 418, 367, 368, 0, - 0, 0, 0, 0, 0, 0, 386, 390, 407, 380, - 0, 0, 0, 0, 0, 0, 0, 0, 365, 0, - 397, 0, 0, 0, 349, 346, 0, 0, 384, 0, - 0, 0, 351, 0, 366, 408, 0, 340, 108, 411, - 417, 381, 209, 419, 379, 378, 422, 145, 0, 161, - 110, 118, 83, 89, 0, 109, 136, 150, 154, 415, - 363, 371, 98, 369, 152, 140, 174, 396, 141, 151, - 122, 166, 146, 173, 181, 182, 163, 180, 189, 84, - 162, 172, 96, 155, 86, 170, 160, 128, 114, 115, - 85, 0, 149, 101, 106, 100, 137, 167, 168, 99, - 192, 90, 179, 88, 91, 178, 135, 165, 171, 129, - 126, 87, 169, 127, 125, 117, 104, 111, 143, 124, - 144, 112, 132, 131, 133, 0, 344, 0, 159, 176, - 193, 93, 359, 164, 183, 184, 185, 186, 187, 188, - 0, 0, 94, 107, 103, 142, 134, 92, 113, 156, - 116, 123, 148, 191, 139, 153, 97, 175, 157, 355, - 358, 353, 354, 392, 393, 427, 428, 429, 409, 350, - 0, 356, 357, 0, 413, 395, 82, 0, 120, 190, - 147, 105, 177, 421, 412, 0, 383, 424, 361, 375, - 432, 376, 377, 405, 347, 391, 138, 373, 0, 364, - 342, 370, 343, 362, 385, 102, 388, 360, 414, 394, - 423, 119, 430, 121, 399, 0, 158, 130, 0, 0, - 387, 416, 389, 410, 382, 406, 352, 398, 425, 374, - 403, 426, 0, 0, 0, 267, 0, 0, 0, 0, - 0, 0, 0, 0, 95, 0, 401, 420, 372, 402, - 404, 341, 400, 0, 345, 348, 431, 418, 367, 368, - 0, 0, 0, 0, 0, 0, 0, 386, 390, 407, - 380, 0, 0, 0, 0, 0, 0, 0, 0, 365, - 0, 397, 0, 0, 0, 349, 346, 0, 0, 384, - 0, 0, 0, 351, 0, 366, 408, 0, 340, 108, - 411, 417, 381, 209, 419, 379, 378, 422, 145, 0, - 161, 110, 118, 83, 89, 0, 109, 136, 150, 154, - 415, 363, 371, 98, 369, 152, 140, 174, 396, 141, - 151, 122, 166, 146, 173, 181, 182, 163, 180, 189, - 84, 162, 172, 96, 155, 86, 170, 160, 128, 114, - 115, 85, 0, 149, 101, 106, 100, 137, 167, 168, - 99, 192, 90, 179, 88, 91, 178, 135, 165, 171, - 129, 126, 87, 169, 127, 125, 117, 104, 111, 143, - 124, 144, 112, 132, 131, 133, 0, 344, 0, 159, - 176, 193, 93, 359, 164, 183, 184, 185, 186, 187, - 188, 0, 0, 94, 107, 103, 142, 134, 92, 113, - 156, 116, 123, 148, 191, 139, 153, 97, 175, 157, - 355, 358, 353, 354, 392, 393, 427, 428, 429, 409, - 350, 0, 356, 357, 0, 413, 395, 82, 0, 120, - 190, 147, 105, 177, 421, 412, 0, 383, 424, 361, - 375, 432, 376, 377, 405, 347, 391, 138, 373, 0, - 364, 342, 370, 343, 362, 385, 102, 388, 360, 414, - 394, 423, 119, 430, 121, 399, 0, 158, 130, 0, - 0, 387, 416, 389, 410, 382, 406, 352, 398, 425, - 374, 403, 426, 0, 0, 0, 80, 0, 0, 0, - 0, 0, 0, 0, 0, 95, 0, 401, 420, 372, - 402, 404, 341, 400, 0, 345, 348, 431, 418, 367, - 368, 0, 0, 0, 0, 0, 0, 0, 386, 390, - 407, 380, 0, 0, 0, 0, 0, 0, 0, 0, - 365, 0, 397, 0, 0, 0, 349, 346, 0, 0, - 384, 0, 0, 0, 351, 0, 366, 408, 0, 340, - 108, 411, 417, 381, 209, 419, 379, 378, 422, 145, - 0, 161, 110, 118, 83, 89, 0, 109, 136, 150, - 154, 415, 363, 371, 98, 369, 152, 140, 174, 396, - 141, 151, 122, 166, 146, 173, 181, 182, 163, 180, - 189, 84, 162, 172, 96, 155, 86, 170, 160, 128, - 114, 115, 85, 0, 149, 101, 106, 100, 137, 167, - 168, 99, 192, 90, 179, 88, 338, 178, 135, 165, - 171, 129, 126, 87, 169, 127, 125, 117, 104, 111, - 143, 124, 144, 112, 132, 131, 133, 0, 344, 0, - 159, 176, 193, 93, 359, 164, 183, 184, 185, 186, - 187, 188, 0, 0, 94, 107, 103, 142, 339, 337, - 113, 156, 116, 123, 148, 191, 139, 153, 97, 175, - 157, 355, 358, 353, 354, 392, 393, 427, 428, 429, - 409, 350, 0, 356, 357, 0, 413, 395, 82, 0, - 120, 190, 147, 105, 177, 421, 412, 0, 383, 424, - 361, 375, 432, 376, 377, 405, 347, 391, 138, 373, - 0, 364, 342, 370, 343, 362, 385, 102, 388, 360, - 414, 394, 423, 119, 430, 121, 399, 0, 158, 130, - 0, 0, 387, 416, 389, 410, 382, 406, 352, 398, - 425, 374, 403, 426, 0, 0, 0, 207, 0, 0, - 0, 0, 0, 0, 0, 0, 95, 0, 401, 420, - 372, 402, 404, 341, 400, 0, 345, 348, 431, 418, - 367, 368, 0, 0, 0, 0, 0, 0, 0, 386, - 390, 407, 380, 0, 0, 0, 0, 0, 0, 0, - 0, 365, 0, 397, 0, 0, 0, 349, 346, 0, - 0, 384, 0, 0, 0, 351, 0, 366, 408, 0, - 340, 108, 411, 417, 381, 209, 419, 379, 378, 422, - 145, 0, 161, 110, 118, 83, 89, 0, 109, 136, - 150, 154, 415, 363, 371, 98, 369, 152, 140, 174, - 396, 141, 151, 122, 166, 146, 173, 181, 182, 163, - 180, 189, 84, 162, 172, 96, 155, 86, 170, 160, - 128, 114, 115, 85, 0, 149, 101, 106, 100, 137, - 167, 168, 99, 192, 90, 179, 88, 91, 178, 135, - 165, 171, 129, 126, 87, 169, 127, 125, 117, 104, - 111, 143, 124, 144, 112, 132, 131, 133, 0, 344, - 0, 159, 176, 193, 93, 359, 164, 183, 184, 185, - 186, 187, 188, 0, 0, 94, 107, 103, 142, 134, - 92, 113, 156, 116, 123, 148, 191, 139, 153, 97, - 175, 157, 355, 358, 353, 354, 392, 393, 427, 428, - 429, 409, 350, 0, 356, 357, 0, 413, 395, 82, - 0, 120, 190, 147, 105, 177, 421, 412, 0, 383, - 424, 361, 375, 432, 376, 377, 405, 347, 391, 138, - 373, 0, 364, 342, 370, 343, 362, 385, 102, 388, - 360, 414, 394, 423, 119, 430, 121, 399, 0, 158, - 130, 0, 0, 387, 416, 389, 410, 382, 406, 352, - 398, 425, 374, 403, 426, 0, 0, 0, 80, 0, - 0, 0, 0, 0, 0, 0, 0, 95, 0, 401, - 420, 372, 402, 404, 341, 400, 0, 345, 348, 431, - 418, 367, 368, 0, 0, 0, 0, 0, 0, 0, - 386, 390, 407, 380, 0, 0, 0, 0, 0, 0, - 0, 0, 365, 0, 397, 0, 0, 0, 349, 346, - 0, 0, 384, 0, 0, 0, 351, 0, 366, 408, - 0, 340, 108, 411, 417, 381, 209, 419, 379, 378, - 422, 145, 0, 161, 110, 118, 83, 89, 0, 109, - 136, 150, 154, 415, 363, 371, 98, 369, 152, 140, - 174, 396, 141, 151, 122, 166, 146, 173, 181, 182, - 163, 180, 189, 84, 162, 608, 96, 155, 86, 170, - 160, 128, 114, 115, 85, 0, 149, 101, 106, 100, - 137, 167, 168, 99, 192, 90, 179, 88, 338, 178, - 135, 165, 171, 129, 126, 87, 169, 127, 125, 117, - 104, 111, 143, 124, 144, 112, 132, 131, 133, 0, - 344, 0, 159, 176, 193, 93, 359, 164, 183, 184, - 185, 186, 187, 188, 0, 0, 94, 107, 103, 142, - 339, 337, 113, 156, 116, 123, 148, 191, 139, 153, - 97, 175, 157, 355, 358, 353, 354, 392, 393, 427, - 428, 429, 409, 350, 0, 356, 357, 0, 413, 395, - 82, 0, 120, 190, 147, 105, 177, 421, 412, 0, - 383, 424, 361, 375, 432, 376, 377, 405, 347, 391, - 138, 373, 0, 364, 342, 370, 343, 362, 385, 102, - 388, 360, 414, 394, 423, 119, 430, 121, 399, 0, - 158, 130, 0, 0, 387, 416, 389, 410, 382, 406, - 352, 398, 425, 374, 403, 426, 0, 0, 0, 80, - 0, 0, 0, 0, 0, 0, 0, 0, 95, 0, - 401, 420, 372, 402, 404, 341, 400, 0, 345, 348, - 431, 418, 367, 368, 0, 0, 0, 0, 0, 0, - 0, 386, 390, 407, 380, 0, 0, 0, 0, 0, - 0, 0, 0, 365, 0, 397, 0, 0, 0, 349, - 346, 0, 0, 384, 0, 0, 0, 351, 0, 366, - 408, 0, 340, 108, 411, 417, 381, 209, 419, 379, - 378, 422, 145, 0, 161, 110, 118, 83, 89, 0, - 109, 136, 150, 154, 415, 363, 371, 98, 369, 152, - 140, 174, 396, 141, 151, 122, 166, 146, 173, 181, - 182, 163, 180, 189, 84, 162, 329, 96, 155, 86, - 170, 160, 128, 114, 115, 85, 0, 149, 101, 106, - 100, 137, 167, 168, 99, 192, 90, 179, 88, 338, - 178, 135, 165, 171, 129, 126, 87, 169, 127, 125, - 117, 104, 111, 143, 124, 144, 112, 132, 131, 133, - 0, 344, 0, 159, 176, 193, 93, 359, 164, 183, - 184, 185, 186, 187, 188, 0, 0, 94, 107, 103, - 142, 339, 337, 332, 331, 116, 123, 148, 191, 139, - 153, 97, 175, 157, 355, 358, 353, 354, 392, 393, - 427, 428, 429, 409, 350, 0, 356, 357, 0, 413, - 395, 82, 0, 120, 190, 147, 105, 177, 138, 0, - 0, 773, 0, 269, 0, 0, 0, 102, 0, 266, - 0, 0, 0, 119, 309, 121, 0, 0, 158, 130, - 0, 0, 0, 0, 300, 301, 0, 0, 0, 0, - 0, 0, 0, 0, 54, 0, 0, 267, 288, 287, - 290, 291, 292, 293, 0, 0, 95, 289, 294, 295, - 296, 0, 0, 0, 264, 281, 0, 308, 0, 0, + 0, 96, 0, 404, 425, 375, 405, 407, 344, 403, + 0, 348, 351, 436, 421, 370, 371, 0, 0, 0, + 0, 0, 0, 0, 389, 393, 410, 383, 0, 0, + 0, 0, 0, 0, 0, 0, 368, 0, 400, 0, + 0, 0, 352, 349, 0, 0, 387, 0, 0, 0, + 354, 0, 369, 411, 0, 343, 109, 414, 420, 384, + 210, 424, 382, 381, 427, 146, 0, 162, 111, 119, + 83, 90, 0, 110, 137, 151, 155, 418, 366, 374, + 99, 372, 153, 141, 175, 399, 142, 152, 123, 167, + 147, 174, 182, 183, 164, 181, 190, 84, 163, 173, + 97, 156, 86, 171, 161, 129, 115, 116, 85, 0, + 150, 102, 107, 101, 138, 168, 169, 100, 193, 91, + 180, 88, 92, 179, 136, 166, 172, 130, 127, 87, + 170, 128, 126, 118, 105, 112, 144, 125, 145, 113, + 133, 132, 134, 0, 347, 0, 160, 177, 194, 94, + 362, 165, 184, 185, 186, 187, 188, 189, 0, 0, + 95, 108, 104, 143, 135, 93, 114, 157, 117, 124, + 149, 192, 140, 154, 98, 176, 158, 358, 361, 356, + 357, 395, 396, 432, 433, 434, 412, 353, 0, 359, + 360, 0, 416, 422, 423, 398, 82, 89, 121, 191, + 148, 106, 178, 426, 415, 0, 386, 429, 364, 378, + 437, 379, 380, 408, 350, 394, 139, 376, 0, 367, + 345, 373, 346, 365, 388, 103, 391, 363, 417, 397, + 428, 120, 435, 122, 402, 0, 159, 131, 0, 0, + 390, 419, 392, 413, 385, 409, 355, 401, 430, 377, + 406, 431, 0, 0, 0, 268, 0, 0, 0, 0, + 0, 0, 0, 0, 96, 0, 404, 425, 375, 405, + 407, 344, 403, 0, 348, 351, 436, 421, 370, 371, + 0, 0, 0, 0, 0, 0, 0, 389, 393, 410, + 383, 0, 0, 0, 0, 0, 0, 0, 0, 368, + 0, 400, 0, 0, 0, 352, 349, 0, 0, 387, + 0, 0, 0, 354, 0, 369, 411, 0, 343, 109, + 414, 420, 384, 210, 424, 382, 381, 427, 146, 0, + 162, 111, 119, 83, 90, 0, 110, 137, 151, 155, + 418, 366, 374, 99, 372, 153, 141, 175, 399, 142, + 152, 123, 167, 147, 174, 182, 183, 164, 181, 190, + 84, 163, 173, 97, 156, 86, 171, 161, 129, 115, + 116, 85, 0, 150, 102, 107, 101, 138, 168, 169, + 100, 193, 91, 180, 88, 92, 179, 136, 166, 172, + 130, 127, 87, 170, 128, 126, 118, 105, 112, 144, + 125, 145, 113, 133, 132, 134, 0, 347, 0, 160, + 177, 194, 94, 362, 165, 184, 185, 186, 187, 188, + 189, 0, 0, 95, 108, 104, 143, 135, 93, 114, + 157, 117, 124, 149, 192, 140, 154, 98, 176, 158, + 358, 361, 356, 357, 395, 396, 432, 433, 434, 412, + 353, 0, 359, 360, 0, 416, 422, 423, 398, 82, + 89, 121, 191, 148, 106, 178, 426, 415, 0, 386, + 429, 364, 378, 437, 379, 380, 408, 350, 394, 139, + 376, 0, 367, 345, 373, 346, 365, 388, 103, 391, + 363, 417, 397, 428, 120, 435, 122, 402, 0, 159, + 131, 0, 0, 390, 419, 392, 413, 385, 409, 355, + 401, 430, 377, 406, 431, 0, 0, 0, 80, 0, + 0, 0, 0, 0, 0, 0, 0, 96, 0, 404, + 425, 375, 405, 407, 344, 403, 0, 348, 351, 436, + 421, 370, 371, 0, 0, 0, 0, 0, 0, 0, + 389, 393, 410, 383, 0, 0, 0, 0, 0, 0, + 0, 0, 368, 0, 400, 0, 0, 0, 352, 349, + 0, 0, 387, 0, 0, 0, 354, 0, 369, 411, + 0, 343, 109, 414, 420, 384, 210, 424, 382, 381, + 427, 146, 0, 162, 111, 119, 83, 90, 0, 110, + 137, 151, 155, 418, 366, 374, 99, 372, 153, 141, + 175, 399, 142, 152, 123, 167, 147, 174, 182, 183, + 164, 181, 190, 84, 163, 173, 97, 156, 86, 171, + 161, 129, 115, 116, 85, 0, 150, 102, 107, 101, + 138, 168, 169, 100, 193, 91, 180, 88, 341, 179, + 136, 166, 172, 130, 127, 87, 170, 128, 126, 118, + 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, + 347, 0, 160, 177, 194, 94, 362, 165, 184, 185, + 186, 187, 188, 189, 0, 0, 95, 108, 104, 143, + 342, 340, 114, 157, 117, 124, 149, 192, 140, 154, + 98, 176, 158, 358, 361, 356, 357, 395, 396, 432, + 433, 434, 412, 353, 0, 359, 360, 0, 416, 422, + 423, 398, 82, 89, 121, 191, 148, 106, 178, 426, + 415, 0, 386, 429, 364, 378, 437, 379, 380, 408, + 350, 394, 139, 376, 0, 367, 345, 373, 346, 365, + 388, 103, 391, 363, 417, 397, 428, 120, 435, 122, + 402, 0, 159, 131, 0, 0, 390, 419, 392, 413, + 385, 409, 355, 401, 430, 377, 406, 431, 0, 0, + 0, 208, 0, 0, 0, 0, 0, 0, 0, 0, + 96, 0, 404, 425, 375, 405, 407, 344, 403, 0, + 348, 351, 436, 421, 370, 371, 0, 0, 0, 0, + 0, 0, 0, 389, 393, 410, 383, 0, 0, 0, + 0, 0, 0, 0, 0, 368, 0, 400, 0, 0, + 0, 352, 349, 0, 0, 387, 0, 0, 0, 354, + 0, 369, 411, 0, 343, 109, 414, 420, 384, 210, + 424, 382, 381, 427, 146, 0, 162, 111, 119, 83, + 90, 0, 110, 137, 151, 155, 418, 366, 374, 99, + 372, 153, 141, 175, 399, 142, 152, 123, 167, 147, + 174, 182, 183, 164, 181, 190, 84, 163, 173, 97, + 156, 86, 171, 161, 129, 115, 116, 85, 0, 150, + 102, 107, 101, 138, 168, 169, 100, 193, 91, 180, + 88, 92, 179, 136, 166, 172, 130, 127, 87, 170, + 128, 126, 118, 105, 112, 144, 125, 145, 113, 133, + 132, 134, 0, 347, 0, 160, 177, 194, 94, 362, + 165, 184, 185, 186, 187, 188, 189, 0, 0, 95, + 108, 104, 143, 135, 93, 114, 157, 117, 124, 149, + 192, 140, 154, 98, 176, 158, 358, 361, 356, 357, + 395, 396, 432, 433, 434, 412, 353, 0, 359, 360, + 0, 416, 422, 423, 398, 82, 89, 121, 191, 148, + 106, 178, 426, 415, 0, 386, 429, 364, 378, 437, + 379, 380, 408, 350, 394, 139, 376, 0, 367, 345, + 373, 346, 365, 388, 103, 391, 363, 417, 397, 428, + 120, 435, 122, 402, 0, 159, 131, 0, 0, 390, + 419, 392, 413, 385, 409, 355, 401, 430, 377, 406, + 431, 0, 0, 0, 80, 0, 0, 0, 0, 0, + 0, 0, 0, 96, 0, 404, 425, 375, 405, 407, + 344, 403, 0, 348, 351, 436, 421, 370, 371, 0, + 0, 0, 0, 0, 0, 0, 389, 393, 410, 383, + 0, 0, 0, 0, 0, 0, 0, 0, 368, 0, + 400, 0, 0, 0, 352, 349, 0, 0, 387, 0, + 0, 0, 354, 0, 369, 411, 0, 343, 109, 414, + 420, 384, 210, 424, 382, 381, 427, 146, 0, 162, + 111, 119, 83, 90, 0, 110, 137, 151, 155, 418, + 366, 374, 99, 372, 153, 141, 175, 399, 142, 152, + 123, 167, 147, 174, 182, 183, 164, 181, 190, 84, + 163, 622, 97, 156, 86, 171, 161, 129, 115, 116, + 85, 0, 150, 102, 107, 101, 138, 168, 169, 100, + 193, 91, 180, 88, 341, 179, 136, 166, 172, 130, + 127, 87, 170, 128, 126, 118, 105, 112, 144, 125, + 145, 113, 133, 132, 134, 0, 347, 0, 160, 177, + 194, 94, 362, 165, 184, 185, 186, 187, 188, 189, + 0, 0, 95, 108, 104, 143, 342, 340, 114, 157, + 117, 124, 149, 192, 140, 154, 98, 176, 158, 358, + 361, 356, 357, 395, 396, 432, 433, 434, 412, 353, + 0, 359, 360, 0, 416, 422, 423, 398, 82, 89, + 121, 191, 148, 106, 178, 426, 415, 0, 386, 429, + 364, 378, 437, 379, 380, 408, 350, 394, 139, 376, + 0, 367, 345, 373, 346, 365, 388, 103, 391, 363, + 417, 397, 428, 120, 435, 122, 402, 0, 159, 131, + 0, 0, 390, 419, 392, 413, 385, 409, 355, 401, + 430, 377, 406, 431, 0, 0, 0, 80, 0, 0, + 0, 0, 0, 0, 0, 0, 96, 0, 404, 425, + 375, 405, 407, 344, 403, 0, 348, 351, 436, 421, + 370, 371, 0, 0, 0, 0, 0, 0, 0, 389, + 393, 410, 383, 0, 0, 0, 0, 0, 0, 0, + 0, 368, 0, 400, 0, 0, 0, 352, 349, 0, + 0, 387, 0, 0, 0, 354, 0, 369, 411, 0, + 343, 109, 414, 420, 384, 210, 424, 382, 381, 427, + 146, 0, 162, 111, 119, 83, 90, 0, 110, 137, + 151, 155, 418, 366, 374, 99, 372, 153, 141, 175, + 399, 142, 152, 123, 167, 147, 174, 182, 183, 164, + 181, 190, 84, 163, 332, 97, 156, 86, 171, 161, + 129, 115, 116, 85, 0, 150, 102, 107, 101, 138, + 168, 169, 100, 193, 91, 180, 88, 341, 179, 136, + 166, 172, 130, 127, 87, 170, 128, 126, 118, 105, + 112, 144, 125, 145, 113, 133, 132, 134, 0, 347, + 0, 160, 177, 194, 94, 362, 165, 184, 185, 186, + 187, 188, 189, 0, 0, 95, 108, 104, 143, 342, + 340, 335, 334, 117, 124, 149, 192, 140, 154, 98, + 176, 158, 358, 361, 356, 357, 395, 396, 432, 433, + 434, 412, 353, 0, 359, 360, 0, 416, 422, 423, + 398, 82, 89, 121, 191, 148, 106, 178, 139, 0, + 0, 0, 0, 270, 0, 0, 0, 103, 0, 267, + 0, 0, 0, 120, 310, 122, 0, 0, 159, 131, + 0, 0, 0, 0, 301, 302, 0, 0, 0, 0, + 0, 0, 856, 0, 54, 0, 0, 268, 289, 288, + 291, 292, 293, 294, 0, 0, 96, 290, 295, 296, + 297, 857, 0, 0, 265, 282, 0, 309, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 278, 279, 260, - 0, 0, 0, 320, 0, 280, 0, 0, 275, 276, - 277, 282, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 108, 0, 0, 0, 209, 0, 0, 318, 0, - 145, 0, 161, 110, 118, 83, 89, 0, 109, 136, - 150, 154, 0, 0, 0, 98, 0, 152, 140, 174, - 0, 141, 151, 122, 166, 146, 173, 181, 182, 163, - 180, 189, 84, 162, 172, 96, 155, 86, 170, 160, - 128, 114, 115, 85, 0, 149, 101, 106, 100, 137, - 167, 168, 99, 192, 90, 179, 88, 91, 178, 135, - 165, 171, 129, 126, 87, 169, 127, 125, 117, 104, - 111, 143, 124, 144, 112, 132, 131, 133, 0, 0, - 0, 159, 176, 193, 93, 0, 164, 183, 184, 185, - 186, 187, 188, 0, 0, 94, 107, 103, 142, 134, - 92, 113, 156, 116, 123, 148, 191, 139, 153, 97, - 175, 157, 310, 319, 316, 317, 314, 315, 313, 312, - 311, 321, 302, 303, 304, 305, 307, 0, 306, 82, - 0, 120, 190, 147, 105, 177, 138, 0, 0, 0, - 0, 269, 0, 0, 0, 102, 0, 266, 0, 0, - 0, 119, 309, 121, 0, 0, 158, 130, 0, 0, - 0, 0, 300, 301, 0, 0, 0, 0, 0, 0, - 0, 0, 54, 0, 493, 267, 288, 287, 290, 291, - 292, 293, 0, 0, 95, 289, 294, 295, 296, 0, - 0, 0, 264, 281, 0, 308, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 279, 280, 0, + 0, 0, 0, 323, 0, 281, 0, 0, 276, 277, + 278, 283, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 109, 0, 0, 0, 210, 0, 0, 321, 0, + 146, 0, 162, 111, 119, 83, 90, 0, 110, 137, + 151, 155, 0, 0, 0, 99, 0, 153, 141, 175, + 0, 142, 152, 123, 167, 147, 174, 182, 183, 164, + 181, 190, 84, 163, 173, 97, 156, 86, 171, 161, + 129, 115, 116, 85, 0, 150, 102, 107, 101, 138, + 168, 169, 100, 193, 91, 180, 88, 92, 179, 136, + 166, 172, 130, 127, 87, 170, 128, 126, 118, 105, + 112, 144, 125, 145, 113, 133, 132, 134, 0, 0, + 0, 160, 177, 194, 94, 0, 165, 184, 185, 186, + 187, 188, 189, 0, 0, 95, 108, 104, 143, 135, + 93, 114, 157, 117, 124, 149, 192, 140, 154, 98, + 176, 158, 311, 322, 317, 318, 315, 316, 314, 313, + 312, 324, 303, 304, 305, 306, 308, 0, 319, 320, + 307, 82, 89, 121, 191, 148, 106, 178, 139, 0, + 0, 789, 0, 270, 0, 0, 0, 103, 0, 267, + 0, 0, 0, 120, 310, 122, 0, 0, 159, 131, + 0, 0, 0, 0, 301, 302, 0, 0, 0, 0, + 0, 0, 0, 0, 54, 0, 0, 268, 289, 288, + 291, 292, 293, 294, 0, 0, 96, 290, 295, 296, + 297, 0, 0, 0, 265, 282, 0, 309, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 278, 279, 0, 0, 0, - 0, 320, 0, 280, 0, 0, 275, 276, 277, 282, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 108, - 0, 0, 0, 209, 0, 0, 318, 0, 145, 0, - 161, 110, 118, 83, 89, 0, 109, 136, 150, 154, - 0, 0, 0, 98, 0, 152, 140, 174, 0, 141, - 151, 122, 166, 146, 173, 181, 182, 163, 180, 189, - 84, 162, 172, 96, 155, 86, 170, 160, 128, 114, - 115, 85, 0, 149, 101, 106, 100, 137, 167, 168, - 99, 192, 90, 179, 88, 91, 178, 135, 165, 171, - 129, 126, 87, 169, 127, 125, 117, 104, 111, 143, - 124, 144, 112, 132, 131, 133, 0, 0, 0, 159, - 176, 193, 93, 0, 164, 183, 184, 185, 186, 187, - 188, 0, 0, 94, 107, 103, 142, 134, 92, 113, - 156, 116, 123, 148, 191, 139, 153, 97, 175, 157, - 310, 319, 316, 317, 314, 315, 313, 312, 311, 321, - 302, 303, 304, 305, 307, 0, 306, 82, 0, 120, - 190, 147, 105, 177, 138, 0, 0, 0, 0, 269, - 0, 0, 0, 102, 0, 266, 0, 0, 0, 119, - 309, 121, 0, 0, 158, 130, 0, 0, 0, 0, - 300, 301, 0, 0, 0, 0, 0, 0, 0, 0, - 54, 0, 0, 267, 288, 287, 290, 291, 292, 293, - 0, 0, 95, 289, 294, 295, 296, 0, 0, 0, - 264, 281, 0, 308, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 279, 280, 261, + 0, 0, 0, 323, 0, 281, 0, 0, 276, 277, + 278, 283, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 109, 0, 0, 0, 210, 0, 0, 321, 0, + 146, 0, 162, 111, 119, 83, 90, 0, 110, 137, + 151, 155, 0, 0, 0, 99, 0, 153, 141, 175, + 0, 142, 152, 123, 167, 147, 174, 182, 183, 164, + 181, 190, 84, 163, 173, 97, 156, 86, 171, 161, + 129, 115, 116, 85, 0, 150, 102, 107, 101, 138, + 168, 169, 100, 193, 91, 180, 88, 92, 179, 136, + 166, 172, 130, 127, 87, 170, 128, 126, 118, 105, + 112, 144, 125, 145, 113, 133, 132, 134, 0, 0, + 0, 160, 177, 194, 94, 0, 165, 184, 185, 186, + 187, 188, 189, 0, 0, 95, 108, 104, 143, 135, + 93, 114, 157, 117, 124, 149, 192, 140, 154, 98, + 176, 158, 311, 322, 317, 318, 315, 316, 314, 313, + 312, 324, 303, 304, 305, 306, 308, 0, 319, 320, + 307, 82, 89, 121, 191, 148, 106, 178, 139, 0, + 0, 0, 0, 270, 0, 0, 0, 103, 0, 267, + 0, 0, 0, 120, 310, 122, 0, 0, 159, 131, + 0, 0, 0, 0, 301, 302, 0, 0, 0, 0, + 0, 0, 0, 0, 54, 0, 498, 268, 289, 288, + 291, 292, 293, 294, 0, 0, 96, 290, 295, 296, + 297, 0, 0, 0, 265, 282, 0, 309, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 278, 279, 260, 0, 0, 0, 320, - 0, 280, 0, 0, 275, 276, 277, 282, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 108, 0, 0, - 0, 209, 0, 0, 318, 0, 145, 0, 161, 110, - 118, 83, 89, 0, 109, 136, 150, 154, 0, 0, - 0, 98, 0, 152, 140, 174, 0, 141, 151, 122, - 166, 146, 173, 181, 182, 163, 180, 189, 84, 162, - 172, 96, 155, 86, 170, 160, 128, 114, 115, 85, - 0, 149, 101, 106, 100, 137, 167, 168, 99, 192, - 90, 179, 88, 91, 178, 135, 165, 171, 129, 126, - 87, 169, 127, 125, 117, 104, 111, 143, 124, 144, - 112, 132, 131, 133, 0, 0, 0, 159, 176, 193, - 93, 0, 164, 183, 184, 185, 186, 187, 188, 0, - 0, 94, 107, 103, 142, 134, 92, 113, 156, 116, - 123, 148, 191, 139, 153, 97, 175, 157, 310, 319, - 316, 317, 314, 315, 313, 312, 311, 321, 302, 303, - 304, 305, 307, 24, 306, 82, 0, 120, 190, 147, - 105, 177, 0, 0, 0, 138, 0, 0, 0, 0, - 269, 0, 0, 0, 102, 0, 266, 0, 0, 0, - 119, 309, 121, 0, 0, 158, 130, 0, 0, 0, - 0, 300, 301, 0, 0, 0, 0, 0, 0, 0, - 0, 54, 0, 0, 267, 288, 287, 290, 291, 292, - 293, 0, 0, 95, 289, 294, 295, 296, 0, 0, - 0, 264, 281, 0, 308, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 279, 280, 0, + 0, 0, 0, 323, 0, 281, 0, 0, 276, 277, + 278, 283, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 109, 0, 0, 0, 210, 0, 0, 321, 0, + 146, 0, 162, 111, 119, 83, 90, 0, 110, 137, + 151, 155, 0, 0, 0, 99, 0, 153, 141, 175, + 0, 142, 152, 123, 167, 147, 174, 182, 183, 164, + 181, 190, 84, 163, 173, 97, 156, 86, 171, 161, + 129, 115, 116, 85, 0, 150, 102, 107, 101, 138, + 168, 169, 100, 193, 91, 180, 88, 92, 179, 136, + 166, 172, 130, 127, 87, 170, 128, 126, 118, 105, + 112, 144, 125, 145, 113, 133, 132, 134, 0, 0, + 0, 160, 177, 194, 94, 0, 165, 184, 185, 186, + 187, 188, 189, 0, 0, 95, 108, 104, 143, 135, + 93, 114, 157, 117, 124, 149, 192, 140, 154, 98, + 176, 158, 311, 322, 317, 318, 315, 316, 314, 313, + 312, 324, 303, 304, 305, 306, 308, 0, 319, 320, + 307, 82, 89, 121, 191, 148, 106, 178, 139, 0, + 0, 0, 0, 270, 0, 0, 0, 103, 0, 267, + 0, 0, 0, 120, 310, 122, 0, 0, 159, 131, + 0, 0, 0, 0, 301, 302, 0, 0, 0, 0, + 0, 0, 0, 0, 54, 0, 0, 268, 289, 288, + 291, 292, 293, 294, 0, 0, 96, 290, 295, 296, + 297, 0, 0, 0, 265, 282, 0, 309, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 278, 279, 0, 0, 0, 0, - 320, 0, 280, 0, 0, 275, 276, 277, 282, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 108, 0, - 0, 0, 209, 0, 0, 318, 0, 145, 0, 161, - 110, 118, 83, 89, 0, 109, 136, 150, 154, 0, - 0, 0, 98, 0, 152, 140, 174, 0, 141, 151, - 122, 166, 146, 173, 181, 182, 163, 180, 189, 84, - 162, 172, 96, 155, 86, 170, 160, 128, 114, 115, - 85, 0, 149, 101, 106, 100, 137, 167, 168, 99, - 192, 90, 179, 88, 91, 178, 135, 165, 171, 129, - 126, 87, 169, 127, 125, 117, 104, 111, 143, 124, - 144, 112, 132, 131, 133, 0, 0, 0, 159, 176, - 193, 93, 0, 164, 183, 184, 185, 186, 187, 188, - 0, 0, 94, 107, 103, 142, 134, 92, 113, 156, - 116, 123, 148, 191, 139, 153, 97, 175, 157, 310, - 319, 316, 317, 314, 315, 313, 312, 311, 321, 302, - 303, 304, 305, 307, 0, 306, 82, 0, 120, 190, - 147, 105, 177, 138, 0, 0, 0, 0, 269, 0, - 0, 0, 102, 0, 266, 0, 0, 0, 119, 309, - 121, 0, 0, 158, 130, 0, 0, 0, 0, 300, - 301, 0, 0, 0, 0, 0, 0, 0, 0, 54, - 0, 0, 267, 288, 287, 290, 291, 292, 293, 0, - 0, 95, 289, 294, 295, 296, 0, 0, 0, 264, - 281, 0, 308, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 279, 280, 261, + 0, 0, 0, 323, 0, 281, 0, 0, 276, 277, + 278, 283, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 109, 0, 0, 0, 210, 0, 0, 321, 0, + 146, 0, 162, 111, 119, 83, 90, 0, 110, 137, + 151, 155, 0, 0, 0, 99, 0, 153, 141, 175, + 0, 142, 152, 123, 167, 147, 174, 182, 183, 164, + 181, 190, 84, 163, 173, 97, 156, 86, 171, 161, + 129, 115, 116, 85, 0, 150, 102, 107, 101, 138, + 168, 169, 100, 193, 91, 180, 88, 92, 179, 136, + 166, 172, 130, 127, 87, 170, 128, 126, 118, 105, + 112, 144, 125, 145, 113, 133, 132, 134, 0, 0, + 0, 160, 177, 194, 94, 0, 165, 184, 185, 186, + 187, 188, 189, 0, 0, 95, 108, 104, 143, 135, + 93, 114, 157, 117, 124, 149, 192, 140, 154, 98, + 176, 158, 311, 322, 317, 318, 315, 316, 314, 313, + 312, 324, 303, 304, 305, 306, 308, 24, 319, 320, + 307, 82, 89, 121, 191, 148, 106, 178, 0, 139, + 0, 0, 0, 0, 270, 0, 0, 0, 103, 0, + 267, 0, 0, 0, 120, 310, 122, 0, 0, 159, + 131, 0, 0, 0, 0, 301, 302, 0, 0, 0, + 0, 0, 0, 0, 0, 54, 0, 0, 268, 289, + 288, 291, 292, 293, 294, 0, 0, 96, 290, 295, + 296, 297, 0, 0, 0, 265, 282, 0, 309, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 278, 279, 0, 0, 0, 0, 320, 0, - 280, 0, 0, 275, 276, 277, 282, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 108, 0, 0, 0, - 209, 0, 0, 318, 0, 145, 0, 161, 110, 118, - 83, 89, 0, 109, 136, 150, 154, 0, 0, 0, - 98, 0, 152, 140, 174, 0, 141, 151, 122, 166, - 146, 173, 181, 182, 163, 180, 189, 84, 162, 172, - 96, 155, 86, 170, 160, 128, 114, 115, 85, 0, - 149, 101, 106, 100, 137, 167, 168, 99, 192, 90, - 179, 88, 91, 178, 135, 165, 171, 129, 126, 87, - 169, 127, 125, 117, 104, 111, 143, 124, 144, 112, - 132, 131, 133, 0, 0, 0, 159, 176, 193, 93, - 0, 164, 183, 184, 185, 186, 187, 188, 0, 0, - 94, 107, 103, 142, 134, 92, 113, 156, 116, 123, - 148, 191, 139, 153, 97, 175, 157, 310, 319, 316, - 317, 314, 315, 313, 312, 311, 321, 302, 303, 304, - 305, 307, 138, 306, 82, 0, 120, 190, 147, 105, - 177, 102, 0, 0, 0, 0, 0, 119, 309, 121, - 0, 0, 158, 130, 0, 0, 0, 0, 300, 301, - 0, 0, 0, 0, 0, 0, 0, 0, 54, 0, - 0, 267, 288, 287, 290, 291, 292, 293, 0, 0, - 95, 289, 294, 295, 296, 0, 0, 0, 0, 281, - 0, 308, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 279, 280, + 0, 0, 0, 0, 323, 0, 281, 0, 0, 276, + 277, 278, 283, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 109, 0, 0, 0, 210, 0, 0, 321, + 0, 146, 0, 162, 111, 119, 83, 90, 0, 110, + 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, + 175, 0, 142, 152, 123, 167, 147, 174, 182, 183, + 164, 181, 190, 84, 163, 173, 97, 156, 86, 171, + 161, 129, 115, 116, 85, 0, 150, 102, 107, 101, + 138, 168, 169, 100, 193, 91, 180, 88, 92, 179, + 136, 166, 172, 130, 127, 87, 170, 128, 126, 118, + 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, + 0, 0, 160, 177, 194, 94, 0, 165, 184, 185, + 186, 187, 188, 189, 0, 0, 95, 108, 104, 143, + 135, 93, 114, 157, 117, 124, 149, 192, 140, 154, + 98, 176, 158, 311, 322, 317, 318, 315, 316, 314, + 313, 312, 324, 303, 304, 305, 306, 308, 0, 319, + 320, 307, 82, 89, 121, 191, 148, 106, 178, 139, + 0, 0, 0, 0, 270, 0, 0, 0, 103, 0, + 267, 0, 0, 0, 120, 310, 122, 0, 0, 159, + 131, 0, 0, 0, 0, 301, 302, 0, 0, 0, + 0, 0, 0, 0, 0, 54, 0, 0, 268, 289, + 288, 291, 292, 293, 294, 0, 0, 96, 290, 295, + 296, 297, 0, 0, 0, 265, 282, 0, 309, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 278, 279, 0, 0, 0, 0, 320, 0, 280, - 0, 0, 275, 276, 277, 282, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 108, 0, 0, 0, 209, - 0, 0, 318, 0, 145, 0, 161, 110, 118, 83, - 89, 0, 109, 136, 150, 154, 0, 0, 0, 98, - 0, 152, 140, 174, 1410, 141, 151, 122, 166, 146, - 173, 181, 182, 163, 180, 189, 84, 162, 172, 96, - 155, 86, 170, 160, 128, 114, 115, 85, 0, 149, - 101, 106, 100, 137, 167, 168, 99, 192, 90, 179, - 88, 91, 178, 135, 165, 171, 129, 126, 87, 169, - 127, 125, 117, 104, 111, 143, 124, 144, 112, 132, - 131, 133, 0, 0, 0, 159, 176, 193, 93, 0, - 164, 183, 184, 185, 186, 187, 188, 0, 0, 94, - 107, 103, 142, 134, 92, 113, 156, 116, 123, 148, - 191, 139, 153, 97, 175, 157, 310, 319, 316, 317, - 314, 315, 313, 312, 311, 321, 302, 303, 304, 305, - 307, 138, 306, 82, 0, 120, 190, 147, 105, 177, - 102, 0, 0, 0, 0, 0, 119, 309, 121, 0, - 0, 158, 130, 0, 0, 0, 0, 300, 301, 0, - 0, 0, 0, 0, 0, 0, 0, 54, 0, 0, - 267, 288, 287, 290, 291, 292, 293, 0, 0, 95, - 289, 294, 295, 296, 0, 0, 0, 0, 281, 0, - 308, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 279, 280, + 0, 0, 0, 0, 323, 0, 281, 0, 0, 276, + 277, 278, 283, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 109, 0, 0, 0, 210, 0, 0, 321, + 0, 146, 0, 162, 111, 119, 83, 90, 0, 110, + 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, + 175, 0, 142, 152, 123, 167, 147, 174, 182, 183, + 164, 181, 190, 84, 163, 173, 97, 156, 86, 171, + 161, 129, 115, 116, 85, 0, 150, 102, 107, 101, + 138, 168, 169, 100, 193, 91, 180, 88, 92, 179, + 136, 166, 172, 130, 127, 87, 170, 128, 126, 118, + 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, + 0, 0, 160, 177, 194, 94, 0, 165, 184, 185, + 186, 187, 188, 189, 0, 0, 95, 108, 104, 143, + 135, 93, 114, 157, 117, 124, 149, 192, 140, 154, + 98, 176, 158, 311, 322, 317, 318, 315, 316, 314, + 313, 312, 324, 303, 304, 305, 306, 308, 0, 319, + 320, 307, 82, 89, 121, 191, 148, 106, 178, 139, + 0, 0, 0, 0, 0, 0, 0, 0, 103, 0, + 0, 0, 0, 0, 120, 310, 122, 0, 0, 159, + 131, 0, 0, 0, 0, 301, 302, 0, 0, 0, + 0, 0, 0, 0, 0, 54, 0, 0, 268, 289, + 288, 291, 292, 293, 294, 0, 0, 96, 290, 295, + 296, 297, 0, 0, 0, 0, 282, 0, 309, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 278, 279, 0, 0, 0, 0, 320, 0, 280, 0, - 0, 275, 276, 277, 282, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 108, 0, 0, 0, 209, 0, - 0, 318, 0, 145, 0, 161, 110, 118, 83, 89, - 0, 109, 136, 150, 154, 0, 0, 0, 98, 0, - 152, 140, 174, 0, 141, 151, 122, 166, 146, 173, - 181, 182, 163, 180, 189, 84, 162, 172, 96, 155, - 86, 170, 160, 128, 114, 115, 85, 0, 149, 101, - 106, 100, 137, 167, 168, 99, 192, 90, 179, 88, - 91, 178, 135, 165, 171, 129, 126, 87, 169, 127, - 125, 117, 104, 111, 143, 124, 144, 112, 132, 131, - 133, 0, 0, 0, 159, 176, 193, 93, 0, 164, - 183, 184, 185, 186, 187, 188, 0, 0, 94, 107, - 103, 142, 134, 92, 113, 156, 116, 123, 148, 191, - 139, 153, 97, 175, 157, 310, 319, 316, 317, 314, - 315, 313, 312, 311, 321, 302, 303, 304, 305, 307, - 138, 306, 82, 0, 120, 190, 147, 105, 177, 102, - 0, 0, 0, 0, 0, 119, 0, 121, 0, 0, - 158, 130, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 279, 280, + 0, 0, 0, 0, 323, 0, 281, 0, 0, 276, + 277, 278, 283, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 109, 0, 0, 0, 210, 0, 0, 321, + 0, 146, 0, 162, 111, 119, 83, 90, 0, 110, + 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, + 175, 1431, 142, 152, 123, 167, 147, 174, 182, 183, + 164, 181, 190, 84, 163, 173, 97, 156, 86, 171, + 161, 129, 115, 116, 85, 0, 150, 102, 107, 101, + 138, 168, 169, 100, 193, 91, 180, 88, 92, 179, + 136, 166, 172, 130, 127, 87, 170, 128, 126, 118, + 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, + 0, 0, 160, 177, 194, 94, 0, 165, 184, 185, + 186, 187, 188, 189, 0, 0, 95, 108, 104, 143, + 135, 93, 114, 157, 117, 124, 149, 192, 140, 154, + 98, 176, 158, 311, 322, 317, 318, 315, 316, 314, + 313, 312, 324, 303, 304, 305, 306, 308, 0, 319, + 320, 307, 82, 89, 121, 191, 148, 106, 178, 139, + 0, 0, 0, 0, 0, 0, 0, 0, 103, 0, + 0, 0, 0, 0, 120, 310, 122, 0, 0, 159, + 131, 0, 0, 0, 0, 301, 302, 0, 0, 0, + 0, 0, 0, 0, 0, 54, 0, 498, 268, 289, + 288, 291, 292, 293, 294, 0, 0, 96, 290, 295, + 296, 297, 0, 0, 0, 0, 282, 0, 309, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 279, 280, + 0, 0, 0, 0, 323, 0, 281, 0, 0, 276, + 277, 278, 283, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 109, 0, 0, 0, 210, 0, 0, 321, + 0, 146, 0, 162, 111, 119, 83, 90, 0, 110, + 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, + 175, 0, 142, 152, 123, 167, 147, 174, 182, 183, + 164, 181, 190, 84, 163, 173, 97, 156, 86, 171, + 161, 129, 115, 116, 85, 0, 150, 102, 107, 101, + 138, 168, 169, 100, 193, 91, 180, 88, 92, 179, + 136, 166, 172, 130, 127, 87, 170, 128, 126, 118, + 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, + 0, 0, 160, 177, 194, 94, 0, 165, 184, 185, + 186, 187, 188, 189, 0, 0, 95, 108, 104, 143, + 135, 93, 114, 157, 117, 124, 149, 192, 140, 154, + 98, 176, 158, 311, 322, 317, 318, 315, 316, 314, + 313, 312, 324, 303, 304, 305, 306, 308, 0, 319, + 320, 307, 82, 89, 121, 191, 148, 106, 178, 139, + 0, 0, 0, 0, 0, 0, 0, 0, 103, 0, + 0, 0, 0, 0, 120, 310, 122, 0, 0, 159, + 131, 0, 0, 0, 0, 301, 302, 0, 0, 0, + 0, 0, 0, 0, 0, 54, 0, 0, 268, 289, + 288, 291, 292, 293, 294, 0, 0, 96, 290, 295, + 296, 297, 0, 0, 0, 0, 282, 0, 309, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 279, 280, + 0, 0, 0, 0, 323, 0, 281, 0, 0, 276, + 277, 278, 283, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 109, 0, 0, 0, 210, 0, 0, 321, + 0, 146, 0, 162, 111, 119, 83, 90, 0, 110, + 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, + 175, 0, 142, 152, 123, 167, 147, 174, 182, 183, + 164, 181, 190, 84, 163, 173, 97, 156, 86, 171, + 161, 129, 115, 116, 85, 0, 150, 102, 107, 101, + 138, 168, 169, 100, 193, 91, 180, 88, 92, 179, + 136, 166, 172, 130, 127, 87, 170, 128, 126, 118, + 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, + 0, 0, 160, 177, 194, 94, 0, 165, 184, 185, + 186, 187, 188, 189, 0, 0, 95, 108, 104, 143, + 135, 93, 114, 157, 117, 124, 149, 192, 140, 154, + 98, 176, 158, 311, 322, 317, 318, 315, 316, 314, + 313, 312, 324, 303, 304, 305, 306, 308, 0, 319, + 320, 307, 82, 89, 121, 191, 148, 106, 178, 139, + 0, 0, 0, 0, 0, 0, 0, 0, 103, 0, + 0, 0, 0, 0, 120, 0, 122, 0, 0, 159, + 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 80, 0, + 0, 0, 0, 0, 0, 0, 0, 96, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 532, 531, 541, 542, 534, 535, + 536, 537, 538, 539, 540, 533, 0, 0, 543, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 109, 0, 0, 0, 210, 0, 0, 0, + 0, 146, 0, 162, 111, 119, 83, 90, 0, 110, + 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, + 175, 0, 142, 152, 123, 167, 147, 174, 182, 183, + 164, 181, 190, 84, 163, 173, 97, 156, 86, 171, + 161, 129, 115, 116, 85, 0, 150, 102, 107, 101, + 138, 168, 169, 100, 193, 91, 180, 88, 92, 179, + 136, 166, 172, 130, 127, 87, 170, 128, 126, 118, + 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, + 0, 0, 160, 177, 194, 94, 0, 165, 184, 185, + 186, 187, 188, 189, 0, 0, 95, 108, 104, 143, + 135, 93, 114, 157, 117, 124, 149, 192, 140, 154, + 98, 176, 158, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 82, 89, 121, 191, 148, 106, 178, 139, + 0, 0, 0, 520, 0, 0, 0, 0, 103, 0, + 0, 0, 0, 0, 120, 0, 122, 0, 0, 159, + 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 80, 0, + 522, 0, 0, 0, 0, 0, 0, 96, 0, 0, + 0, 0, 0, 517, 516, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 518, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 109, 0, 0, 0, 210, 0, 0, 0, + 0, 146, 0, 162, 111, 119, 83, 90, 0, 110, + 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, + 175, 0, 142, 152, 123, 167, 147, 174, 182, 183, + 164, 181, 190, 84, 163, 173, 97, 156, 86, 171, + 161, 129, 115, 116, 85, 0, 150, 102, 107, 101, + 138, 168, 169, 100, 193, 91, 180, 88, 92, 179, + 136, 166, 172, 130, 127, 87, 170, 128, 126, 118, + 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, + 0, 0, 160, 177, 194, 94, 0, 165, 184, 185, + 186, 187, 188, 189, 0, 0, 95, 108, 104, 143, + 135, 93, 114, 157, 117, 124, 149, 192, 140, 154, + 98, 176, 158, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 139, 0, 82, 89, 121, 191, 148, 106, 178, 103, + 0, 0, 0, 0, 0, 120, 0, 122, 0, 0, + 159, 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 80, - 0, 0, 0, 0, 0, 0, 0, 0, 95, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 527, 526, 536, 537, 529, - 530, 531, 532, 533, 534, 535, 528, 0, 0, 538, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 108, 0, 0, 0, 209, 0, 0, - 0, 0, 145, 0, 161, 110, 118, 83, 89, 0, - 109, 136, 150, 154, 0, 0, 0, 98, 0, 152, - 140, 174, 0, 141, 151, 122, 166, 146, 173, 181, - 182, 163, 180, 189, 84, 162, 172, 96, 155, 86, - 170, 160, 128, 114, 115, 85, 0, 149, 101, 106, - 100, 137, 167, 168, 99, 192, 90, 179, 88, 91, - 178, 135, 165, 171, 129, 126, 87, 169, 127, 125, - 117, 104, 111, 143, 124, 144, 112, 132, 131, 133, - 0, 0, 0, 159, 176, 193, 93, 0, 164, 183, - 184, 185, 186, 187, 188, 0, 0, 94, 107, 103, - 142, 134, 92, 113, 156, 116, 123, 148, 191, 139, - 153, 97, 175, 157, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 138, 0, 0, 0, 515, 0, 0, - 0, 82, 102, 120, 190, 147, 105, 177, 119, 0, - 121, 0, 0, 158, 130, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 80, 0, 517, 0, 0, 0, 0, 0, - 0, 95, 0, 0, 0, 0, 0, 512, 511, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 513, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 108, 0, 0, 0, - 209, 0, 0, 0, 0, 145, 0, 161, 110, 118, - 83, 89, 0, 109, 136, 150, 154, 0, 0, 0, - 98, 0, 152, 140, 174, 0, 141, 151, 122, 166, - 146, 173, 181, 182, 163, 180, 189, 84, 162, 172, - 96, 155, 86, 170, 160, 128, 114, 115, 85, 0, - 149, 101, 106, 100, 137, 167, 168, 99, 192, 90, - 179, 88, 91, 178, 135, 165, 171, 129, 126, 87, - 169, 127, 125, 117, 104, 111, 143, 124, 144, 112, - 132, 131, 133, 0, 0, 0, 159, 176, 193, 93, - 0, 164, 183, 184, 185, 186, 187, 188, 0, 0, - 94, 107, 103, 142, 134, 92, 113, 156, 116, 123, - 148, 191, 139, 153, 97, 175, 157, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 138, 0, 0, 0, - 0, 0, 0, 0, 82, 102, 120, 190, 147, 105, - 177, 119, 0, 121, 0, 0, 158, 130, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 80, 0, 0, 0, 0, - 0, 0, 0, 0, 95, 0, 0, 0, 0, 0, - 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 108, - 76, 77, 0, 73, 0, 0, 0, 78, 145, 0, - 161, 110, 118, 83, 89, 0, 109, 136, 150, 154, - 0, 0, 0, 98, 0, 152, 140, 174, 0, 141, - 151, 122, 166, 146, 173, 181, 182, 163, 180, 189, - 84, 162, 172, 96, 155, 86, 170, 160, 128, 114, - 115, 85, 0, 149, 101, 106, 100, 137, 167, 168, - 99, 192, 90, 179, 88, 91, 178, 135, 165, 171, - 129, 126, 87, 169, 127, 125, 117, 104, 111, 143, - 124, 144, 112, 132, 131, 133, 0, 0, 0, 159, - 176, 193, 93, 0, 164, 183, 184, 185, 186, 187, - 188, 0, 0, 94, 107, 103, 142, 134, 92, 113, - 156, 116, 123, 148, 191, 139, 153, 97, 175, 157, - 0, 75, 0, 0, 0, 0, 0, 0, 0, 138, - 0, 0, 0, 824, 0, 0, 0, 82, 102, 120, - 190, 147, 105, 177, 119, 0, 121, 0, 0, 158, - 130, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 207, 0, - 826, 0, 0, 0, 0, 0, 0, 95, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 108, 0, 0, 0, 209, 0, 0, 0, - 0, 145, 0, 161, 110, 118, 83, 89, 0, 109, - 136, 150, 154, 0, 0, 0, 98, 0, 152, 140, - 174, 0, 141, 151, 122, 166, 146, 173, 181, 182, - 163, 180, 189, 84, 162, 172, 96, 155, 86, 170, - 160, 128, 114, 115, 85, 0, 149, 101, 106, 100, - 137, 167, 168, 99, 192, 90, 179, 88, 91, 178, - 135, 165, 171, 129, 126, 87, 169, 127, 125, 117, - 104, 111, 143, 124, 144, 112, 132, 131, 133, 0, - 0, 0, 159, 176, 193, 93, 0, 164, 183, 184, - 185, 186, 187, 188, 0, 0, 94, 107, 103, 142, - 134, 92, 113, 156, 116, 123, 148, 191, 139, 153, - 97, 175, 157, 0, 0, 0, 24, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 138, 0, - 82, 0, 120, 190, 147, 105, 177, 102, 0, 0, - 0, 0, 0, 119, 0, 121, 0, 0, 158, 130, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 54, 0, 0, 80, 0, 0, - 0, 0, 0, 0, 0, 0, 95, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 108, 0, 0, 0, 209, 0, 0, 0, 0, - 145, 0, 161, 110, 118, 83, 89, 0, 109, 136, - 150, 154, 0, 0, 0, 98, 0, 152, 140, 174, - 0, 141, 151, 122, 166, 146, 173, 181, 182, 163, - 180, 189, 84, 162, 172, 96, 155, 86, 170, 160, - 128, 114, 115, 85, 0, 149, 101, 106, 100, 137, - 167, 168, 99, 192, 90, 179, 88, 91, 178, 135, - 165, 171, 129, 126, 87, 169, 127, 125, 117, 104, - 111, 143, 124, 144, 112, 132, 131, 133, 0, 0, - 0, 159, 176, 193, 93, 0, 164, 183, 184, 185, - 186, 187, 188, 0, 0, 94, 107, 103, 142, 134, - 92, 113, 156, 116, 123, 148, 191, 139, 153, 97, - 175, 157, 0, 0, 0, 24, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 138, 0, 82, - 0, 120, 190, 147, 105, 177, 102, 0, 0, 0, - 0, 0, 119, 0, 121, 0, 0, 158, 130, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 54, 0, 0, 207, 0, 0, 0, - 0, 0, 0, 0, 0, 95, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 108, 0, 0, 0, 209, 0, 0, 0, 0, 145, - 0, 161, 110, 118, 83, 89, 0, 109, 136, 150, - 154, 0, 0, 0, 98, 0, 152, 140, 174, 0, - 141, 151, 122, 166, 146, 173, 181, 182, 163, 180, - 189, 84, 162, 172, 96, 155, 86, 170, 160, 128, - 114, 115, 85, 0, 149, 101, 106, 100, 137, 167, - 168, 99, 192, 90, 179, 88, 91, 178, 135, 165, - 171, 129, 126, 87, 169, 127, 125, 117, 104, 111, - 143, 124, 144, 112, 132, 131, 133, 0, 0, 0, - 159, 176, 193, 93, 0, 164, 183, 184, 185, 186, - 187, 188, 0, 0, 94, 107, 103, 142, 134, 92, - 113, 156, 116, 123, 148, 191, 139, 153, 97, 175, - 157, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 138, 0, 0, 0, 824, 0, 0, 0, 82, 102, - 120, 190, 147, 105, 177, 119, 0, 121, 0, 0, - 158, 130, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 207, - 0, 826, 0, 0, 0, 0, 0, 0, 95, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 108, 0, 0, 0, 209, 0, 0, - 0, 0, 145, 0, 161, 110, 118, 83, 89, 0, - 109, 136, 150, 154, 0, 0, 0, 98, 0, 152, - 140, 174, 0, 822, 151, 122, 166, 146, 173, 181, - 182, 163, 180, 189, 84, 162, 172, 96, 155, 86, - 170, 160, 128, 114, 115, 85, 0, 149, 101, 106, - 100, 137, 167, 168, 99, 192, 90, 179, 88, 91, - 178, 135, 165, 171, 129, 126, 87, 169, 127, 125, - 117, 104, 111, 143, 124, 144, 112, 132, 131, 133, - 0, 0, 0, 159, 176, 193, 93, 0, 164, 183, - 184, 185, 186, 187, 188, 0, 0, 94, 107, 103, - 142, 134, 92, 113, 156, 116, 123, 148, 191, 139, - 153, 97, 175, 157, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 138, 0, 0, 0, 0, 0, 0, - 0, 82, 102, 120, 190, 147, 105, 177, 119, 0, - 121, 0, 0, 158, 130, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, + 0, 0, 0, 0, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 80, 0, 0, 724, 0, 0, 725, 0, - 0, 95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 109, 76, 77, 0, 73, 0, 0, + 0, 78, 146, 0, 162, 111, 119, 83, 90, 0, + 110, 137, 151, 155, 0, 0, 0, 99, 0, 153, + 141, 175, 0, 142, 152, 123, 167, 147, 174, 182, + 183, 164, 181, 190, 84, 163, 173, 97, 156, 86, + 171, 161, 129, 115, 116, 85, 0, 150, 102, 107, + 101, 138, 168, 169, 100, 193, 91, 180, 88, 92, + 179, 136, 166, 172, 130, 127, 87, 170, 128, 126, + 118, 105, 112, 144, 125, 145, 113, 133, 132, 134, + 0, 0, 0, 160, 177, 194, 94, 0, 165, 184, + 185, 186, 187, 188, 189, 0, 0, 95, 108, 104, + 143, 135, 93, 114, 157, 117, 124, 149, 192, 140, + 154, 98, 176, 158, 0, 75, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 82, 89, 121, 191, 148, 106, 178, + 139, 0, 0, 0, 843, 0, 0, 0, 0, 103, + 0, 0, 0, 0, 0, 120, 0, 122, 0, 0, + 159, 131, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 208, + 0, 845, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 108, 0, 0, 0, - 209, 0, 0, 0, 0, 145, 0, 161, 110, 118, - 83, 89, 0, 109, 136, 150, 154, 0, 0, 0, - 98, 0, 152, 140, 174, 0, 141, 151, 122, 166, - 146, 173, 181, 182, 163, 180, 189, 84, 162, 172, - 96, 155, 86, 170, 160, 128, 114, 115, 85, 0, - 149, 101, 106, 100, 137, 167, 168, 99, 192, 90, - 179, 88, 91, 178, 135, 165, 171, 129, 126, 87, - 169, 127, 125, 117, 104, 111, 143, 124, 144, 112, - 132, 131, 133, 0, 0, 0, 159, 176, 193, 93, - 0, 164, 183, 184, 185, 186, 187, 188, 0, 0, - 94, 107, 103, 142, 134, 92, 113, 156, 116, 123, - 148, 191, 139, 153, 97, 175, 157, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 138, 0, 82, 0, 120, 190, 147, 105, - 177, 102, 0, 617, 0, 0, 0, 119, 0, 121, - 0, 0, 158, 130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 80, 0, 616, 0, 0, 0, 0, 0, 0, - 95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 109, 0, 0, 0, 210, 0, 0, + 0, 0, 146, 0, 162, 111, 119, 83, 90, 0, + 110, 137, 151, 155, 0, 0, 0, 99, 0, 153, + 141, 175, 0, 142, 152, 123, 167, 147, 174, 182, + 183, 164, 181, 190, 84, 163, 173, 97, 156, 86, + 171, 161, 129, 115, 116, 85, 0, 150, 102, 107, + 101, 138, 168, 169, 100, 193, 91, 180, 88, 92, + 179, 136, 166, 172, 130, 127, 87, 170, 128, 126, + 118, 105, 112, 144, 125, 145, 113, 133, 132, 134, + 0, 0, 0, 160, 177, 194, 94, 0, 165, 184, + 185, 186, 187, 188, 189, 0, 0, 95, 108, 104, + 143, 135, 93, 114, 157, 117, 124, 149, 192, 140, + 154, 98, 176, 158, 0, 0, 0, 0, 0, 24, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 139, 0, 82, 89, 121, 191, 148, 106, 178, + 103, 0, 0, 0, 0, 0, 120, 0, 122, 0, + 0, 159, 131, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 54, 0, 0, + 80, 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 108, 0, 0, 0, 209, - 0, 0, 0, 0, 145, 0, 161, 110, 118, 83, - 89, 0, 109, 136, 150, 154, 0, 0, 0, 98, - 0, 152, 140, 174, 0, 141, 151, 122, 166, 146, - 173, 181, 182, 163, 180, 189, 84, 162, 172, 96, - 155, 86, 170, 160, 128, 114, 115, 85, 0, 149, - 101, 106, 100, 137, 167, 168, 99, 192, 90, 179, - 88, 91, 178, 135, 165, 171, 129, 126, 87, 169, - 127, 125, 117, 104, 111, 143, 124, 144, 112, 132, - 131, 133, 0, 0, 0, 159, 176, 193, 93, 0, - 164, 183, 184, 185, 186, 187, 188, 0, 0, 94, - 107, 103, 142, 134, 92, 113, 156, 116, 123, 148, - 191, 139, 153, 97, 175, 157, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 138, 0, 0, 0, 0, - 0, 0, 0, 82, 102, 120, 190, 147, 105, 177, - 119, 0, 121, 0, 0, 158, 130, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 54, 0, 0, 207, 0, 0, 0, 0, 0, - 0, 0, 0, 95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 108, 0, - 0, 0, 209, 0, 0, 0, 0, 145, 0, 161, - 110, 118, 83, 89, 0, 109, 136, 150, 154, 0, - 0, 0, 98, 0, 152, 140, 174, 0, 141, 151, - 122, 166, 146, 173, 181, 182, 163, 180, 189, 84, - 162, 172, 96, 155, 86, 170, 160, 128, 114, 115, - 85, 0, 149, 101, 106, 100, 137, 167, 168, 99, - 192, 90, 179, 88, 91, 178, 135, 165, 171, 129, - 126, 87, 169, 127, 125, 117, 104, 111, 143, 124, - 144, 112, 132, 131, 133, 0, 0, 0, 159, 176, - 193, 93, 0, 164, 183, 184, 185, 186, 187, 188, - 0, 0, 94, 107, 103, 142, 134, 92, 113, 156, - 116, 123, 148, 191, 139, 153, 97, 175, 157, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 138, 0, - 0, 0, 0, 0, 0, 0, 82, 102, 120, 190, - 147, 105, 177, 119, 0, 121, 0, 0, 158, 130, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 207, 0, 826, - 0, 0, 0, 0, 0, 0, 95, 0, 0, 0, + 0, 0, 0, 0, 109, 0, 0, 0, 210, 0, + 0, 0, 0, 146, 0, 162, 111, 119, 83, 90, + 0, 110, 137, 151, 155, 0, 0, 0, 99, 0, + 153, 141, 175, 0, 142, 152, 123, 167, 147, 174, + 182, 183, 164, 181, 190, 84, 163, 173, 97, 156, + 86, 171, 161, 129, 115, 116, 85, 0, 150, 102, + 107, 101, 138, 168, 169, 100, 193, 91, 180, 88, + 92, 179, 136, 166, 172, 130, 127, 87, 170, 128, + 126, 118, 105, 112, 144, 125, 145, 113, 133, 132, + 134, 0, 0, 0, 160, 177, 194, 94, 0, 165, + 184, 185, 186, 187, 188, 189, 0, 0, 95, 108, + 104, 143, 135, 93, 114, 157, 117, 124, 149, 192, + 140, 154, 98, 176, 158, 0, 0, 0, 0, 0, + 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 139, 0, 82, 89, 121, 191, 148, 106, + 178, 103, 0, 0, 0, 0, 0, 120, 0, 122, + 0, 0, 159, 131, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 54, 0, + 0, 208, 0, 0, 0, 0, 0, 0, 0, 0, + 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 109, 0, 0, 0, 210, + 0, 0, 0, 0, 146, 0, 162, 111, 119, 83, + 90, 0, 110, 137, 151, 155, 0, 0, 0, 99, + 0, 153, 141, 175, 0, 142, 152, 123, 167, 147, + 174, 182, 183, 164, 181, 190, 84, 163, 173, 97, + 156, 86, 171, 161, 129, 115, 116, 85, 0, 150, + 102, 107, 101, 138, 168, 169, 100, 193, 91, 180, + 88, 92, 179, 136, 166, 172, 130, 127, 87, 170, + 128, 126, 118, 105, 112, 144, 125, 145, 113, 133, + 132, 134, 0, 0, 0, 160, 177, 194, 94, 0, + 165, 184, 185, 186, 187, 188, 189, 0, 0, 95, + 108, 104, 143, 135, 93, 114, 157, 117, 124, 149, + 192, 140, 154, 98, 176, 158, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 82, 89, 121, 191, 148, + 106, 178, 139, 0, 0, 0, 843, 0, 0, 0, + 0, 103, 0, 0, 0, 0, 0, 120, 0, 122, + 0, 0, 159, 131, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 208, 0, 845, 0, 0, 0, 0, 0, 0, + 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 108, 0, 0, 0, 209, 0, 0, 0, 0, - 145, 0, 161, 110, 118, 83, 89, 0, 109, 136, - 150, 154, 0, 0, 0, 98, 0, 152, 140, 174, - 0, 141, 151, 122, 166, 146, 173, 181, 182, 163, - 180, 189, 84, 162, 172, 96, 155, 86, 170, 160, - 128, 114, 115, 85, 0, 149, 101, 106, 100, 137, - 167, 168, 99, 192, 90, 179, 88, 91, 178, 135, - 165, 171, 129, 126, 87, 169, 127, 125, 117, 104, - 111, 143, 124, 144, 112, 132, 131, 133, 0, 0, - 0, 159, 176, 193, 93, 0, 164, 183, 184, 185, - 186, 187, 188, 0, 0, 94, 107, 103, 142, 134, - 92, 113, 156, 116, 123, 148, 191, 139, 153, 97, - 175, 157, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 138, 0, 0, 0, 0, 0, 0, 0, 82, - 102, 120, 190, 147, 105, 177, 119, 0, 121, 0, - 0, 158, 130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 267, 0, 789, 0, 0, 0, 0, 0, 0, 95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 109, 0, 0, 0, 210, + 0, 0, 0, 0, 146, 0, 162, 111, 119, 83, + 90, 0, 110, 137, 151, 155, 0, 0, 0, 99, + 0, 153, 141, 175, 0, 841, 152, 123, 167, 147, + 174, 182, 183, 164, 181, 190, 84, 163, 173, 97, + 156, 86, 171, 161, 129, 115, 116, 85, 0, 150, + 102, 107, 101, 138, 168, 169, 100, 193, 91, 180, + 88, 92, 179, 136, 166, 172, 130, 127, 87, 170, + 128, 126, 118, 105, 112, 144, 125, 145, 113, 133, + 132, 134, 0, 0, 0, 160, 177, 194, 94, 0, + 165, 184, 185, 186, 187, 188, 189, 0, 0, 95, + 108, 104, 143, 135, 93, 114, 157, 117, 124, 149, + 192, 140, 154, 98, 176, 158, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 139, 0, 82, 89, 121, 191, 148, + 106, 178, 103, 0, 0, 0, 0, 0, 120, 0, + 122, 0, 0, 159, 131, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 80, 0, 0, 740, 0, 0, 741, 0, + 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 108, 0, 0, 0, 209, 0, - 0, 0, 0, 145, 0, 161, 110, 118, 83, 89, - 0, 109, 136, 150, 154, 0, 0, 0, 98, 0, - 152, 140, 174, 0, 141, 151, 122, 166, 146, 173, - 181, 182, 163, 180, 189, 84, 162, 172, 96, 155, - 86, 170, 160, 128, 114, 115, 85, 0, 149, 101, - 106, 100, 137, 167, 168, 99, 192, 90, 179, 88, - 91, 178, 135, 165, 171, 129, 126, 87, 169, 127, - 125, 117, 104, 111, 143, 124, 144, 112, 132, 131, - 133, 0, 0, 0, 159, 176, 193, 93, 0, 164, - 183, 184, 185, 186, 187, 188, 0, 0, 94, 107, - 103, 142, 134, 92, 113, 156, 116, 123, 148, 191, - 139, 153, 97, 175, 157, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 138, 0, 0, 0, 0, 0, - 0, 0, 82, 102, 120, 190, 147, 105, 177, 119, - 0, 121, 0, 0, 158, 130, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 109, 0, 0, 0, + 210, 0, 0, 0, 0, 146, 0, 162, 111, 119, + 83, 90, 0, 110, 137, 151, 155, 0, 0, 0, + 99, 0, 153, 141, 175, 0, 142, 152, 123, 167, + 147, 174, 182, 183, 164, 181, 190, 84, 163, 173, + 97, 156, 86, 171, 161, 129, 115, 116, 85, 0, + 150, 102, 107, 101, 138, 168, 169, 100, 193, 91, + 180, 88, 92, 179, 136, 166, 172, 130, 127, 87, + 170, 128, 126, 118, 105, 112, 144, 125, 145, 113, + 133, 132, 134, 0, 0, 0, 160, 177, 194, 94, + 0, 165, 184, 185, 186, 187, 188, 189, 0, 0, + 95, 108, 104, 143, 135, 93, 114, 157, 117, 124, + 149, 192, 140, 154, 98, 176, 158, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 139, 0, 82, 89, 121, 191, + 148, 106, 178, 103, 0, 631, 0, 0, 0, 120, + 0, 122, 0, 0, 159, 131, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 80, 0, 630, 0, 0, 0, 0, + 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 267, 0, 785, 0, 0, 0, 0, - 0, 0, 95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 108, 0, 0, - 0, 209, 0, 0, 0, 0, 145, 0, 161, 110, - 118, 83, 89, 0, 109, 136, 150, 154, 0, 0, - 0, 98, 0, 152, 140, 174, 0, 141, 151, 122, - 166, 146, 173, 181, 182, 163, 180, 189, 84, 162, - 172, 96, 155, 86, 170, 160, 128, 114, 115, 85, - 0, 149, 101, 106, 100, 137, 167, 168, 99, 192, - 90, 179, 88, 91, 178, 135, 165, 171, 129, 126, - 87, 169, 127, 125, 117, 104, 111, 143, 124, 144, - 112, 132, 131, 133, 0, 0, 0, 159, 176, 193, - 93, 0, 164, 183, 184, 185, 186, 187, 188, 0, - 0, 94, 107, 103, 142, 134, 92, 113, 156, 116, - 123, 148, 191, 139, 153, 97, 175, 157, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 138, 0, 0, - 0, 0, 0, 0, 0, 82, 102, 120, 190, 147, - 105, 177, 119, 0, 121, 0, 0, 158, 130, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 80, 0, 517, 0, - 0, 0, 0, 0, 0, 95, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 108, 0, 0, 0, 209, 0, 0, 0, 0, 145, - 0, 161, 110, 118, 83, 89, 0, 109, 136, 150, - 154, 0, 0, 0, 98, 0, 152, 140, 174, 0, - 141, 151, 122, 166, 146, 173, 181, 182, 163, 180, - 189, 84, 162, 172, 96, 155, 86, 170, 160, 128, - 114, 115, 85, 0, 149, 101, 106, 100, 137, 167, - 168, 99, 192, 90, 179, 88, 91, 178, 135, 165, - 171, 129, 126, 87, 169, 127, 125, 117, 104, 111, - 143, 124, 144, 112, 132, 131, 133, 0, 0, 0, - 159, 176, 193, 93, 0, 164, 183, 184, 185, 186, - 187, 188, 0, 0, 94, 107, 103, 142, 134, 92, - 113, 156, 116, 123, 148, 191, 139, 153, 97, 175, - 157, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 138, 82, 0, - 120, 190, 147, 105, 177, 590, 102, 0, 0, 0, - 0, 0, 119, 0, 121, 0, 0, 158, 130, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 207, 0, 0, 0, - 0, 0, 0, 0, 0, 95, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 108, 0, 0, 0, 209, 0, 0, 0, 0, 145, - 0, 161, 110, 118, 83, 89, 0, 109, 136, 150, - 154, 0, 0, 0, 98, 0, 152, 140, 174, 0, - 141, 151, 122, 166, 146, 173, 181, 182, 163, 180, - 189, 84, 162, 172, 96, 155, 86, 170, 160, 128, - 114, 115, 85, 0, 149, 101, 106, 100, 137, 167, - 168, 99, 192, 90, 179, 88, 91, 178, 135, 165, - 171, 129, 126, 87, 169, 127, 125, 117, 104, 111, - 143, 124, 144, 112, 132, 131, 133, 0, 0, 0, - 159, 176, 193, 93, 0, 164, 183, 184, 185, 186, - 187, 188, 0, 0, 94, 107, 103, 142, 134, 92, - 113, 156, 116, 123, 148, 191, 139, 153, 97, 175, - 157, 0, 0, 324, 0, 0, 0, 0, 0, 0, - 138, 0, 0, 0, 0, 0, 0, 0, 82, 102, - 120, 190, 147, 105, 177, 119, 0, 121, 0, 0, - 158, 130, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 207, - 0, 0, 0, 0, 0, 0, 0, 0, 95, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 108, 0, 0, 0, 209, 0, 0, - 0, 0, 145, 0, 161, 110, 118, 83, 89, 0, - 109, 136, 150, 154, 0, 0, 0, 98, 0, 152, - 140, 174, 0, 141, 151, 122, 166, 146, 173, 181, - 182, 163, 180, 189, 84, 162, 172, 96, 155, 86, - 170, 160, 128, 114, 115, 85, 0, 149, 101, 106, - 100, 137, 167, 168, 99, 192, 90, 179, 88, 91, - 178, 135, 165, 171, 129, 126, 87, 169, 127, 125, - 117, 104, 111, 143, 124, 144, 112, 132, 131, 133, - 0, 0, 0, 159, 176, 193, 93, 0, 164, 183, - 184, 185, 186, 187, 188, 0, 0, 94, 107, 103, - 142, 134, 92, 113, 156, 116, 123, 148, 191, 139, - 153, 97, 175, 157, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 138, 0, 0, 0, 0, 0, 0, - 0, 82, 102, 120, 190, 147, 105, 177, 119, 0, - 121, 0, 0, 158, 130, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 207, 0, 0, 0, 0, 0, 0, 0, - 0, 95, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 108, 0, 204, 0, - 209, 0, 0, 0, 0, 145, 0, 161, 110, 118, - 83, 89, 0, 109, 136, 150, 154, 0, 0, 0, - 98, 0, 152, 140, 174, 0, 141, 151, 122, 166, - 146, 173, 181, 182, 163, 180, 189, 84, 162, 172, - 96, 155, 86, 170, 160, 128, 114, 115, 85, 0, - 149, 101, 106, 100, 137, 167, 168, 99, 192, 90, - 179, 88, 91, 178, 135, 165, 171, 129, 126, 87, - 169, 127, 125, 117, 104, 111, 143, 124, 144, 112, - 132, 131, 133, 0, 0, 0, 159, 176, 193, 93, - 0, 164, 183, 184, 185, 186, 187, 188, 0, 0, - 94, 107, 103, 142, 134, 92, 113, 156, 116, 123, - 148, 191, 139, 153, 97, 175, 157, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 138, 0, 0, 0, - 0, 0, 0, 0, 82, 102, 120, 190, 147, 105, - 177, 119, 0, 121, 0, 0, 158, 130, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 80, 0, 0, 0, 0, - 0, 0, 0, 0, 95, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 108, - 0, 0, 0, 209, 0, 0, 0, 0, 145, 0, - 161, 110, 118, 83, 89, 0, 109, 136, 150, 154, - 0, 0, 0, 98, 0, 152, 140, 174, 0, 141, - 151, 122, 166, 146, 173, 181, 182, 163, 180, 189, - 84, 162, 172, 96, 155, 86, 170, 160, 128, 114, - 115, 85, 0, 149, 101, 106, 100, 137, 167, 168, - 99, 192, 90, 179, 88, 91, 178, 135, 165, 171, - 129, 126, 87, 169, 127, 125, 117, 104, 111, 143, - 124, 144, 112, 132, 131, 133, 0, 0, 0, 159, - 176, 193, 93, 0, 164, 183, 184, 185, 186, 187, - 188, 0, 0, 94, 107, 103, 142, 134, 92, 113, - 156, 116, 123, 148, 191, 139, 153, 97, 175, 157, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 138, - 0, 0, 0, 0, 0, 0, 0, 82, 102, 120, - 190, 147, 105, 177, 119, 0, 121, 0, 0, 158, - 130, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 207, 0, - 0, 0, 0, 0, 0, 0, 0, 95, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 108, 0, 0, 0, 209, 0, 0, 0, - 0, 145, 0, 161, 110, 118, 83, 89, 0, 109, - 136, 150, 154, 0, 0, 0, 98, 0, 152, 140, - 174, 0, 141, 151, 122, 166, 146, 173, 181, 182, - 163, 180, 189, 84, 162, 172, 96, 155, 86, 170, - 160, 128, 114, 115, 85, 0, 149, 101, 106, 100, - 137, 167, 168, 99, 192, 90, 179, 88, 91, 178, - 135, 165, 171, 129, 126, 87, 169, 127, 125, 117, - 104, 111, 143, 124, 144, 112, 132, 131, 133, 0, - 0, 0, 159, 176, 193, 93, 0, 164, 183, 184, - 185, 186, 187, 188, 0, 0, 94, 107, 103, 142, - 134, 92, 113, 156, 116, 123, 148, 191, 139, 153, - 97, 175, 157, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 138, 0, 0, 0, 0, 0, 0, 0, - 82, 102, 120, 190, 147, 105, 177, 119, 0, 121, - 0, 0, 158, 130, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 267, 0, 0, 0, 0, 0, 0, 0, 0, - 95, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 108, 0, 0, 0, 209, - 0, 0, 0, 0, 145, 0, 161, 110, 118, 83, - 89, 0, 109, 136, 150, 154, 0, 0, 0, 98, - 0, 152, 140, 174, 0, 141, 151, 122, 166, 146, - 173, 181, 182, 163, 180, 189, 84, 162, 172, 96, - 155, 86, 170, 160, 128, 114, 115, 85, 0, 149, - 101, 106, 100, 137, 167, 168, 99, 192, 90, 179, - 88, 91, 178, 135, 165, 171, 129, 126, 87, 169, - 127, 125, 117, 104, 111, 143, 124, 144, 112, 132, - 131, 133, 0, 0, 0, 159, 176, 193, 93, 0, - 164, 183, 184, 185, 186, 187, 188, 0, 0, 94, - 107, 103, 142, 134, 92, 113, 156, 116, 123, 148, - 191, 139, 153, 97, 175, 157, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 82, 0, 120, 190, 147, 105, 177, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 109, 0, 0, + 0, 210, 0, 0, 0, 0, 146, 0, 162, 111, + 119, 83, 90, 0, 110, 137, 151, 155, 0, 0, + 0, 99, 0, 153, 141, 175, 0, 142, 152, 123, + 167, 147, 174, 182, 183, 164, 181, 190, 84, 163, + 173, 97, 156, 86, 171, 161, 129, 115, 116, 85, + 0, 150, 102, 107, 101, 138, 168, 169, 100, 193, + 91, 180, 88, 92, 179, 136, 166, 172, 130, 127, + 87, 170, 128, 126, 118, 105, 112, 144, 125, 145, + 113, 133, 132, 134, 0, 0, 0, 160, 177, 194, + 94, 0, 165, 184, 185, 186, 187, 188, 189, 0, + 0, 95, 108, 104, 143, 135, 93, 114, 157, 117, + 124, 149, 192, 140, 154, 98, 176, 158, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 139, 0, 82, 89, 121, + 191, 148, 106, 178, 103, 0, 0, 0, 0, 0, + 120, 0, 122, 0, 0, 159, 131, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 54, 0, 0, 208, 0, 0, 0, 0, 0, + 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 109, 0, + 0, 0, 210, 0, 0, 0, 0, 146, 0, 162, + 111, 119, 83, 90, 0, 110, 137, 151, 155, 0, + 0, 0, 99, 0, 153, 141, 175, 0, 142, 152, + 123, 167, 147, 174, 182, 183, 164, 181, 190, 84, + 163, 173, 97, 156, 86, 171, 161, 129, 115, 116, + 85, 0, 150, 102, 107, 101, 138, 168, 169, 100, + 193, 91, 180, 88, 92, 179, 136, 166, 172, 130, + 127, 87, 170, 128, 126, 118, 105, 112, 144, 125, + 145, 113, 133, 132, 134, 0, 0, 0, 160, 177, + 194, 94, 0, 165, 184, 185, 186, 187, 188, 189, + 0, 0, 95, 108, 104, 143, 135, 93, 114, 157, + 117, 124, 149, 192, 140, 154, 98, 176, 158, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 139, 0, 82, 89, + 121, 191, 148, 106, 178, 103, 0, 0, 0, 0, + 0, 120, 0, 122, 0, 0, 159, 131, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 208, 0, 845, 0, 0, + 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 109, + 0, 0, 0, 210, 0, 0, 0, 0, 146, 0, + 162, 111, 119, 83, 90, 0, 110, 137, 151, 155, + 0, 0, 0, 99, 0, 153, 141, 175, 0, 142, + 152, 123, 167, 147, 174, 182, 183, 164, 181, 190, + 84, 163, 173, 97, 156, 86, 171, 161, 129, 115, + 116, 85, 0, 150, 102, 107, 101, 138, 168, 169, + 100, 193, 91, 180, 88, 92, 179, 136, 166, 172, + 130, 127, 87, 170, 128, 126, 118, 105, 112, 144, + 125, 145, 113, 133, 132, 134, 0, 0, 0, 160, + 177, 194, 94, 0, 165, 184, 185, 186, 187, 188, + 189, 0, 0, 95, 108, 104, 143, 135, 93, 114, + 157, 117, 124, 149, 192, 140, 154, 98, 176, 158, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 139, 0, 82, + 89, 121, 191, 148, 106, 178, 103, 0, 0, 0, + 0, 0, 120, 0, 122, 0, 0, 159, 131, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 268, 0, 805, 0, + 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 109, 0, 0, 0, 210, 0, 0, 0, 0, 146, + 0, 162, 111, 119, 83, 90, 0, 110, 137, 151, + 155, 0, 0, 0, 99, 0, 153, 141, 175, 0, + 142, 152, 123, 167, 147, 174, 182, 183, 164, 181, + 190, 84, 163, 173, 97, 156, 86, 171, 161, 129, + 115, 116, 85, 0, 150, 102, 107, 101, 138, 168, + 169, 100, 193, 91, 180, 88, 92, 179, 136, 166, + 172, 130, 127, 87, 170, 128, 126, 118, 105, 112, + 144, 125, 145, 113, 133, 132, 134, 0, 0, 0, + 160, 177, 194, 94, 0, 165, 184, 185, 186, 187, + 188, 189, 0, 0, 95, 108, 104, 143, 135, 93, + 114, 157, 117, 124, 149, 192, 140, 154, 98, 176, + 158, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 139, 0, + 82, 89, 121, 191, 148, 106, 178, 103, 0, 0, + 0, 0, 0, 120, 0, 122, 0, 0, 159, 131, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 268, 0, 801, + 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 109, 0, 0, 0, 210, 0, 0, 0, 0, + 146, 0, 162, 111, 119, 83, 90, 0, 110, 137, + 151, 155, 0, 0, 0, 99, 0, 153, 141, 175, + 0, 142, 152, 123, 167, 147, 174, 182, 183, 164, + 181, 190, 84, 163, 173, 97, 156, 86, 171, 161, + 129, 115, 116, 85, 0, 150, 102, 107, 101, 138, + 168, 169, 100, 193, 91, 180, 88, 92, 179, 136, + 166, 172, 130, 127, 87, 170, 128, 126, 118, 105, + 112, 144, 125, 145, 113, 133, 132, 134, 0, 0, + 0, 160, 177, 194, 94, 0, 165, 184, 185, 186, + 187, 188, 189, 0, 0, 95, 108, 104, 143, 135, + 93, 114, 157, 117, 124, 149, 192, 140, 154, 98, + 176, 158, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 139, + 0, 82, 89, 121, 191, 148, 106, 178, 103, 0, + 0, 0, 0, 0, 120, 0, 122, 0, 0, 159, + 131, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 80, 0, + 522, 0, 0, 0, 0, 0, 0, 96, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 109, 0, 0, 0, 210, 0, 0, 0, + 0, 146, 0, 162, 111, 119, 83, 90, 0, 110, + 137, 151, 155, 0, 0, 0, 99, 0, 153, 141, + 175, 0, 142, 152, 123, 167, 147, 174, 182, 183, + 164, 181, 190, 84, 163, 173, 97, 156, 86, 171, + 161, 129, 115, 116, 85, 0, 150, 102, 107, 101, + 138, 168, 169, 100, 193, 91, 180, 88, 92, 179, + 136, 166, 172, 130, 127, 87, 170, 128, 126, 118, + 105, 112, 144, 125, 145, 113, 133, 132, 134, 0, + 0, 0, 160, 177, 194, 94, 0, 165, 184, 185, + 186, 187, 188, 189, 0, 0, 95, 108, 104, 143, + 135, 93, 114, 157, 117, 124, 149, 192, 140, 154, + 98, 176, 158, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 139, 82, 89, 121, 191, 148, 106, 178, 604, + 103, 0, 0, 0, 0, 0, 120, 0, 122, 0, + 0, 159, 131, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 208, 0, 0, 0, 0, 0, 0, 0, 0, 96, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 109, 0, 0, 0, 210, 0, + 0, 0, 0, 146, 0, 162, 111, 119, 83, 90, + 0, 110, 137, 151, 155, 0, 0, 0, 99, 0, + 153, 141, 175, 0, 142, 152, 123, 167, 147, 174, + 182, 183, 164, 181, 190, 84, 163, 173, 97, 156, + 86, 171, 161, 129, 115, 116, 85, 0, 150, 102, + 107, 101, 138, 168, 169, 100, 193, 91, 180, 88, + 92, 179, 136, 166, 172, 130, 127, 87, 170, 128, + 126, 118, 105, 112, 144, 125, 145, 113, 133, 132, + 134, 0, 0, 0, 160, 177, 194, 94, 0, 165, + 184, 185, 186, 187, 188, 189, 0, 0, 95, 108, + 104, 143, 135, 93, 114, 157, 117, 124, 149, 192, + 140, 154, 98, 176, 158, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 327, 0, 0, 0, 0, + 0, 0, 139, 0, 82, 89, 121, 191, 148, 106, + 178, 103, 0, 0, 0, 0, 0, 120, 0, 122, + 0, 0, 159, 131, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 208, 0, 0, 0, 0, 0, 0, 0, 0, + 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 109, 0, 0, 0, 210, + 0, 0, 0, 0, 146, 0, 162, 111, 119, 83, + 90, 0, 110, 137, 151, 155, 0, 0, 0, 99, + 0, 153, 141, 175, 0, 142, 152, 123, 167, 147, + 174, 182, 183, 164, 181, 190, 84, 163, 173, 97, + 156, 86, 171, 161, 129, 115, 116, 85, 0, 150, + 102, 107, 101, 138, 168, 169, 100, 193, 91, 180, + 88, 92, 179, 136, 166, 172, 130, 127, 87, 170, + 128, 126, 118, 105, 112, 144, 125, 145, 113, 133, + 132, 134, 0, 0, 0, 160, 177, 194, 94, 0, + 165, 184, 185, 186, 187, 188, 189, 0, 0, 95, + 108, 104, 143, 135, 93, 114, 157, 117, 124, 149, + 192, 140, 154, 98, 176, 158, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 139, 0, 82, 89, 121, 191, 148, + 106, 178, 103, 0, 0, 0, 0, 0, 120, 0, + 122, 0, 0, 159, 131, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 208, 0, 0, 0, 0, 0, 0, 0, + 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 109, 0, 205, 0, + 210, 0, 0, 0, 0, 146, 0, 162, 111, 119, + 83, 90, 0, 110, 137, 151, 155, 0, 0, 0, + 99, 0, 153, 141, 175, 0, 142, 152, 123, 167, + 147, 174, 182, 183, 164, 181, 190, 84, 163, 173, + 97, 156, 86, 171, 161, 129, 115, 116, 85, 0, + 150, 102, 107, 101, 138, 168, 169, 100, 193, 91, + 180, 88, 92, 179, 136, 166, 172, 130, 127, 87, + 170, 128, 126, 118, 105, 112, 144, 125, 145, 113, + 133, 132, 134, 0, 0, 0, 160, 177, 194, 94, + 0, 165, 184, 185, 186, 187, 188, 189, 0, 0, + 95, 108, 104, 143, 135, 93, 114, 157, 117, 124, + 149, 192, 140, 154, 98, 176, 158, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 139, 0, 82, 89, 121, 191, + 148, 106, 178, 103, 0, 0, 0, 0, 0, 120, + 0, 122, 0, 0, 159, 131, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 80, 0, 0, 0, 0, 0, 0, + 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 109, 0, 0, + 0, 210, 0, 0, 0, 0, 146, 0, 162, 111, + 119, 83, 90, 0, 110, 137, 151, 155, 0, 0, + 0, 99, 0, 153, 141, 175, 0, 142, 152, 123, + 167, 147, 174, 182, 183, 164, 181, 190, 84, 163, + 173, 97, 156, 86, 171, 161, 129, 115, 116, 85, + 0, 150, 102, 107, 101, 138, 168, 169, 100, 193, + 91, 180, 88, 92, 179, 136, 166, 172, 130, 127, + 87, 170, 128, 126, 118, 105, 112, 144, 125, 145, + 113, 133, 132, 134, 0, 0, 0, 160, 177, 194, + 94, 0, 165, 184, 185, 186, 187, 188, 189, 0, + 0, 95, 108, 104, 143, 135, 93, 114, 157, 117, + 124, 149, 192, 140, 154, 98, 176, 158, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 139, 0, 82, 89, 121, + 191, 148, 106, 178, 103, 0, 0, 0, 0, 0, + 120, 0, 122, 0, 0, 159, 131, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 208, 0, 0, 0, 0, 0, + 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 109, 0, + 0, 0, 210, 0, 0, 0, 0, 146, 0, 162, + 111, 119, 83, 90, 0, 110, 137, 151, 155, 0, + 0, 0, 99, 0, 153, 141, 175, 0, 142, 152, + 123, 167, 147, 174, 182, 183, 164, 181, 190, 84, + 163, 173, 97, 156, 86, 171, 161, 129, 115, 116, + 85, 0, 150, 102, 107, 101, 138, 168, 169, 100, + 193, 91, 180, 88, 92, 179, 136, 166, 172, 130, + 127, 87, 170, 128, 126, 118, 105, 112, 144, 125, + 145, 113, 133, 132, 134, 0, 0, 0, 160, 177, + 194, 94, 0, 165, 184, 185, 186, 187, 188, 189, + 0, 0, 95, 108, 104, 143, 135, 93, 114, 157, + 117, 124, 149, 192, 140, 154, 98, 176, 158, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 139, 0, 82, 89, + 121, 191, 148, 106, 178, 103, 0, 0, 0, 0, + 0, 120, 0, 122, 0, 0, 159, 131, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 268, 0, 0, 0, 0, + 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 109, + 0, 0, 0, 210, 0, 0, 0, 0, 146, 0, + 162, 111, 119, 83, 90, 0, 110, 137, 151, 155, + 0, 0, 0, 99, 0, 153, 141, 175, 0, 142, + 152, 123, 167, 147, 174, 182, 183, 164, 181, 190, + 84, 163, 173, 97, 156, 86, 171, 161, 129, 115, + 116, 85, 0, 150, 102, 107, 101, 138, 168, 169, + 100, 193, 91, 180, 88, 92, 179, 136, 166, 172, + 130, 127, 87, 170, 128, 126, 118, 105, 112, 144, + 125, 145, 113, 133, 132, 134, 0, 0, 0, 160, + 177, 194, 94, 0, 165, 184, 185, 186, 187, 188, + 189, 0, 0, 95, 108, 104, 143, 135, 93, 114, + 157, 117, 124, 149, 192, 140, 154, 98, 176, 158, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 82, + 89, 121, 191, 148, 106, 178, } var yyPact = [...]int{ - 1854, -1000, -193, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 2429, -1000, -192, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 894, 922, -1000, -1000, -1000, -1000, -1000, -1000, - 218, 7888, 20, 87, -31, 11175, 86, 119, 11641, -1000, - -3, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -81, -94, - -1000, 715, -1000, -1000, -1000, -1000, -1000, 867, 892, 759, - 870, 796, -1000, 6206, 58, 58, 10942, 5462, -1000, -1000, - 207, 11641, 82, 11641, -163, 55, 55, 55, -1000, -1000, + -1000, -1000, 850, 890, -1000, -1000, -1000, -1000, -1000, -1000, + 333, 8382, 40, 121, 8, 11775, 115, 1357, 12257, -1000, + 10, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -66, -81, + -1000, 658, -1000, -1000, -1000, -1000, -1000, 843, 848, 721, + 834, 757, -1000, 6390, 71, 71, 11534, 5390, -1000, -1000, + 234, 12257, 107, 12257, -155, 67, 67, 67, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, @@ -1946,21 +2014,22 @@ var yyPact = [...]int{ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 85, 11641, 201, -1000, 11641, 53, - 478, 53, 53, 53, 11641, -1000, 137, -1000, -1000, -1000, - 11641, 476, 819, 3374, 33, 3374, -1000, 3374, 3374, -1000, - 3374, 3, 3374, -75, 904, -1000, -1000, -1000, -1000, -50, - -1000, 3374, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 508, 832, 6705, 6705, 894, - -1000, 715, -1000, -1000, -1000, 820, -1000, -1000, 275, 911, - -1000, 7655, 134, -1000, 6705, 1528, 708, -1000, -1000, 708, - -1000, -1000, 122, -1000, -1000, 7183, 7183, 7183, 7183, 7183, - 7183, 7183, 7183, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 708, -1000, 6457, - 708, 708, 708, 708, 708, 708, 708, 708, 6705, 708, - 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, - 708, 708, 10709, 9537, 11641, 504, -1000, 686, 5201, -91, - -1000, -1000, -1000, 204, 9304, -1000, -1000, -1000, 818, -1000, + -1000, -1000, -1000, -1000, -1000, 114, 12257, 307, -1000, 12257, + 64, 471, 64, 64, 64, 12257, -1000, 150, -1000, -1000, + -1000, 12257, 467, 794, 3286, 46, 3286, -1000, 3286, 3286, + -1000, 3286, 16, 3286, -62, 863, -1000, -1000, -1000, -1000, + -37, -1000, 3286, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 441, 803, 6891, 6891, + 850, -1000, 658, -1000, -1000, -1000, 780, -1000, -1000, 260, + 876, -1000, 8141, 149, -1000, 6891, 1820, 676, -1000, -1000, + 676, -1000, -1000, 133, -1000, -1000, 7641, 7641, 7641, 7641, + 7641, 7641, 7641, 7641, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 676, -1000, + 6641, 676, 676, 676, 676, 676, 676, 676, 676, 6891, + 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, + 676, 676, 676, 676, 676, 11293, 10087, 12257, 622, -1000, + 659, 5127, -86, -1000, -1000, -1000, 203, 9846, -1000, -1000, + -1000, 793, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, @@ -1969,207 +2038,199 @@ var yyPact = [...]int{ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 595, 12257, + -1000, 2218, -1000, 461, 3286, 80, 459, 221, 457, 12257, + 12257, 3286, 24, 57, 113, 12257, 664, 78, 12257, 829, + 720, 12257, 455, 448, -1000, 4864, -1000, 3286, 3286, -1000, + -1000, -1000, 3286, 3286, 3286, 3286, 3286, 3286, -1000, -1000, + -1000, -1000, 3286, 3286, -1000, 872, 263, -1000, -1000, -1000, + -1000, 6891, -1000, 719, -1000, -1000, -1000, -1000, -1000, -1000, + 885, 175, 523, 147, 661, -1000, 447, 843, 441, 757, + 9605, 701, -1000, -1000, 12257, -1000, 6891, 6891, 379, -1000, + 11051, -1000, -1000, 3812, 184, 7641, 352, 231, 7641, 7641, + 7641, 7641, 7641, 7641, 7641, 7641, 7641, 7641, 7641, 7641, + 7641, 7641, 7641, 362, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 445, -1000, 658, 548, 548, 159, 159, 159, + 159, 159, 159, 159, 7891, 5890, 441, 593, 204, 6641, + 6390, 6390, 6891, 6891, 10810, 10569, 6390, 836, 210, 204, + 12498, -1000, -1000, 7391, -1000, -1000, -1000, -1000, -1000, 441, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 12016, 12016, 6390, + 6390, 6390, 6390, 35, 12257, -1000, 549, 690, -1000, -1000, + -1000, 831, 9114, 9364, 35, 600, 10087, 12257, -1000, -1000, + 4601, 659, -86, 625, -1000, -129, -91, 5640, 142, -1000, + -1000, -1000, -1000, 3023, 212, 503, 279, -53, -1000, -1000, + -1000, 691, -1000, 691, 691, 691, 691, -17, -17, -17, + -17, -1000, -1000, -1000, -1000, -1000, 705, 704, -1000, 691, + 691, 691, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 598, 11641, -1000, 2036, -1000, 471, 3374, - 67, 463, 257, 460, 11641, 11641, 3374, 12, 41, 84, - 11641, 706, 65, 11641, 856, 763, 11641, 448, 444, -1000, - 4940, -1000, 3374, 3374, -1000, -1000, -1000, 3374, 3374, 3374, - 3374, 3374, 3374, -1000, -1000, -1000, -1000, 3374, 3374, -1000, - 909, 256, -1000, -1000, -1000, -1000, 6705, -1000, 762, -1000, - -1000, -1000, -1000, -1000, -1000, 917, 177, 430, 133, 695, - -1000, 371, 867, 508, 796, 9065, 773, -1000, -1000, 11641, - -1000, 6705, 6705, 339, -1000, 10469, -1000, -1000, 3896, 168, - 7183, 351, 285, 7183, 7183, 7183, 7183, 7183, 7183, 7183, - 7183, 7183, 7183, 7183, 7183, 7183, 7183, 7183, 396, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 442, -1000, 715, - 525, 525, 148, 148, 148, 148, 148, 148, 148, 7422, - 5710, 508, 550, 281, 6457, 6206, 6206, 6705, 6705, 10236, - 10003, 6206, 860, 227, 281, 11874, -1000, 508, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 6206, 6206, 6206, 6206, 24, - 11641, -1000, 615, 700, -1000, -1000, -1000, 858, 8599, 8832, - 24, 643, 9537, 11641, -1000, -1000, 4679, 686, -91, 678, - -1000, -130, -103, 2566, 124, -1000, -1000, -1000, -1000, 3113, - 172, 554, 316, -64, -1000, -1000, -1000, 718, -1000, 718, - 718, 718, 718, -32, -32, -32, -32, -1000, -1000, -1000, - -1000, -1000, 742, 741, -1000, 718, 718, 718, -1000, -1000, + 700, 700, 700, 693, 693, 675, -1000, 12257, 3286, 827, + 3286, -1000, 91, -1000, 12016, 12016, 12257, 12257, 128, 12257, + 12257, 656, -1000, 12257, 3286, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 739, 739, 739, 724, 724, 744, - -1000, 11641, 3374, 854, 3374, -1000, 108, -1000, 11408, 11408, - 11641, 11641, 113, 11641, 11641, 684, -1000, 11641, 3374, -1000, + 12257, 256, 12257, 12257, 204, 12257, -1000, 714, 6891, 6891, + 4338, 6891, -1000, -1000, -1000, 803, -1000, 836, 847, -1000, + 786, 785, 6390, -1000, -1000, 184, 284, -1000, -1000, 440, + -1000, -1000, -1000, -1000, 145, 676, -1000, 2206, -1000, -1000, + -1000, -1000, 352, 7641, 7641, 7641, 710, 2206, 2189, 335, + 473, 159, 261, 261, 168, 168, 168, 168, 168, 388, + 388, -1000, -1000, -1000, 441, -1000, -1000, -1000, 441, 6390, + 632, -1000, -1000, 6891, -1000, 441, 582, 582, 391, 460, + 674, 670, -1000, 144, 655, 654, 582, 6390, 225, -1000, + 6891, 441, -1000, 1936, 631, 627, 582, 441, 582, 582, + 638, 676, -1000, 12498, 10087, 10087, 10087, 10087, 10087, -1000, + 747, 745, -1000, 744, 741, 732, 12257, -1000, 584, 9114, + 143, 676, -1000, 10328, -1000, -1000, 862, 10087, 648, -1000, + -1000, 625, -86, -80, -1000, -1000, -1000, -1000, 204, -1000, + 384, 624, 2760, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 699, 442, -1000, 815, 185, 207, 436, 813, -1000, -1000, + -1000, 806, -1000, 240, -57, -1000, -1000, 366, -17, -17, + -1000, -1000, 142, 791, 142, 142, 142, 420, 420, -1000, + -1000, -1000, -1000, 365, -1000, -1000, -1000, 315, -1000, 718, + 12016, 3286, -1000, -1000, -1000, -1000, 182, 182, 230, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 11641, 288, 11641, 11641, 281, 11641, - -1000, 802, 6705, 6705, 4418, 6705, -1000, -1000, -1000, 832, - -1000, 860, 886, -1000, 812, 809, 6206, -1000, -1000, 168, - 230, -1000, -1000, 397, -1000, -1000, -1000, -1000, 132, 708, - -1000, 2052, -1000, -1000, -1000, -1000, 351, 7183, 7183, 7183, - 451, 2052, 2193, 999, 393, 148, 265, 265, 144, 144, - 144, 144, 144, 329, 329, -1000, -1000, -1000, 508, -1000, - -1000, -1000, 508, 6206, 680, -1000, -1000, 6705, -1000, 508, - 539, 539, 386, 255, 683, 682, -1000, 127, 679, 658, - 539, 6206, 242, -1000, 6705, 508, -1000, 539, 508, 539, - 539, 665, 708, -1000, 11874, 9537, 9537, 9537, 9537, 9537, - -1000, 785, 784, -1000, 777, 775, 786, 11641, -1000, 546, - 8599, 136, 708, -1000, 9770, -1000, -1000, 903, 9537, 596, - -1000, -1000, 678, -91, -95, -1000, -1000, -1000, -1000, 281, - -1000, 423, 671, 2852, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 737, 424, -1000, 843, 166, 165, 417, 842, -1000, - -1000, -1000, 823, -1000, 262, -77, -1000, -1000, 374, -32, - -32, -1000, -1000, 124, 817, 124, 124, 124, 403, 403, - -1000, -1000, -1000, -1000, 357, -1000, -1000, -1000, 354, -1000, - 760, 11408, 3374, -1000, -1000, -1000, -1000, 250, 250, 219, + 34, 550, -1000, -1000, -1000, 21, 20, 73, -1000, 3286, + -1000, 263, -1000, 411, 6891, -1000, -1000, -1000, 761, 204, + 204, 141, -1000, -1000, 12257, -1000, -1000, -1000, -1000, 647, + -1000, -1000, -1000, 3549, 6390, -1000, 710, 2206, 1951, -1000, + 7641, 7641, -1000, -1000, 582, 6390, 204, -1000, -1000, -1000, + 43, 362, 43, 7641, 7641, 7641, 7641, 4338, 7641, 7641, + 7641, 7641, -169, 652, 209, -1000, 6891, 434, -1000, -1000, + 7641, 7641, -1000, -1000, -1000, -1000, 715, 12498, 676, -1000, + 8873, 12016, 651, -1000, 202, 690, 698, 713, 952, -1000, + -1000, -1000, -1000, 743, -1000, 731, -1000, -1000, -1000, -1000, + -1000, 106, 102, 94, 12016, -1000, 850, 6891, 648, -1000, + -1000, -1000, -135, -141, -1000, -1000, -1000, 3023, -1000, 3023, + 12016, 51, -1000, 436, 436, -1000, -1000, -1000, 694, 712, + 7641, -1000, -1000, -1000, 491, 142, 142, -1000, 208, -1000, + -1000, -1000, 572, -1000, 569, 621, 564, 12257, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 22, 735, -1000, -1000, -1000, 11, 10, 64, -1000, - 3374, -1000, 256, -1000, 391, 6705, -1000, -1000, -1000, 799, - 281, 281, 126, -1000, -1000, 11641, -1000, -1000, -1000, -1000, - 623, -1000, -1000, -1000, 3635, 6206, -1000, 451, 2052, 1357, - -1000, 7183, 7183, -1000, -1000, 539, 6206, 281, -1000, -1000, - -1000, 72, 396, 72, 7183, 7183, 7183, 7183, 4418, 7183, - 7183, 7183, 7183, -173, 642, 211, -1000, 6705, 313, -1000, - -1000, -1000, -1000, -1000, 752, 11874, 708, -1000, 8360, 11408, - 651, -1000, 200, 700, 734, 750, 711, -1000, -1000, -1000, - -1000, 781, -1000, 778, -1000, -1000, -1000, -1000, -1000, 80, - 79, 78, 11408, -1000, 894, 6705, 596, -1000, -1000, -1000, - -138, -144, -1000, -1000, -1000, 3113, -1000, 3113, 11408, 40, - -1000, 417, 417, -1000, -1000, -1000, 731, 749, 71, -1000, - -1000, -1000, 540, 124, 124, -1000, 163, -1000, -1000, -1000, - 518, -1000, 515, 657, 511, 11641, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 11641, -1000, -1000, -1000, -1000, -1000, 11408, -181, - 399, 11408, 11408, 11641, -1000, 288, -1000, 281, -1000, 4157, - -1000, 903, 9537, -1000, -1000, 508, -1000, 7183, 2052, 2052, - -1000, -1000, 508, 718, 718, -1000, 718, 724, -1000, 718, - -12, 718, -13, 508, 508, 1843, 2151, 1671, 2025, -1000, - 1635, 1992, 1320, 1974, 708, -170, -1000, 281, 6705, -1000, - 845, 622, 647, -1000, -1000, 5958, 508, 506, 103, 486, - -1000, 894, 11874, 6705, -1000, -1000, 6705, 723, -1000, 6705, - -1000, -1000, -1000, 708, 708, 708, 486, 867, 281, -1000, - -1000, -1000, -1000, 2852, -1000, 484, -1000, 718, -1000, -1000, - -1000, 11408, -56, 916, -1000, -1000, -1000, -1000, 717, -1000, - -1000, -1000, -1000, -1000, -1000, -32, 383, -32, 350, -1000, - 333, 3374, -1000, -1000, -1000, -1000, 849, -1000, 4157, -1000, - -1000, 716, -1000, -1000, -1000, 900, 654, -1000, 2052, -1000, - -1000, 100, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 7183, 7183, -1000, 7183, 7183, -1000, 7183, 7183, -1000, - 7183, 7183, 7183, 508, 379, 281, 830, -1000, 708, -1000, - -1000, 670, 11408, 11408, -1000, 11408, 867, -1000, 281, 281, - 11408, 281, 11408, 11408, 11408, 8121, -1000, 131, 11408, -1000, - 482, -1000, 185, -1000, -153, 534, 124, -1000, 124, 524, - 519, -1000, 708, 648, -1000, 199, 11408, 898, 891, -1000, - -1000, 1858, 1858, 1858, 1858, 1858, 1858, 1858, 1858, 17, - -1000, -1000, 914, -1000, 708, -1000, 715, 83, -1000, -1000, - -1000, 467, 458, 458, 458, 136, 131, -1000, 395, 176, - 378, -1000, 37, 11408, 266, 829, -1000, 827, 714, -1000, - -1000, -1000, -1000, -1000, 21, 4157, 3113, 456, -1000, 6705, - 6705, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 508, - 34, -184, 11874, 647, 508, 11408, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 302, -1000, -1000, 11641, -1000, -1000, 340, - -1000, -1000, 512, 452, -1000, 11408, -1000, -1000, 735, 281, - 628, -1000, 795, -179, -187, 586, -1000, -1000, -1000, 713, - -1000, -1000, -1000, 21, 808, -181, -1000, 793, -1000, 11408, - -1000, 18, -1000, -182, 428, 16, -185, 748, 708, -189, - 747, -1000, 908, 6944, -1000, -1000, 910, 152, 152, 1858, - 508, -1000, -1000, -1000, 44, 326, -1000, -1000, -1000, -1000, - -1000, -1000, + -1000, -1000, -1000, -1000, 12257, -1000, -1000, -1000, -1000, -1000, + 12016, -179, 424, 12016, 12016, 12257, -1000, 256, -1000, 204, + -1000, 4075, -1000, 862, 10087, -1000, -1000, 441, -1000, 7641, + 2206, 2206, -1000, -1000, 441, 691, 691, -1000, 691, 693, + -1000, 691, 1, 691, 0, 441, 441, 1887, 2174, 1729, + 2156, -1000, 1636, 1980, 1346, 1657, 676, -162, -1000, 204, + 6891, 2067, 2017, -1000, 818, 542, 601, -1000, -1000, 6140, + 441, 546, 138, 534, -1000, 850, 12498, 6891, -1000, -1000, + 6891, 692, -1000, 6891, -1000, -1000, -1000, 676, 676, 676, + 534, 843, 204, -1000, -1000, -1000, -1000, 2760, -1000, 531, + -1000, 691, -1000, -1000, -1000, 12016, -49, 883, 2206, -1000, + -1000, -1000, -1000, -1000, -17, 408, -17, 312, -1000, 280, + 3286, -1000, -1000, -1000, -1000, 821, -1000, 4075, -1000, -1000, + 686, -1000, -1000, -1000, 858, 619, -1000, 2206, -1000, -1000, + 111, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 7641, 7641, -1000, 7641, 7641, -1000, 7641, 7641, -1000, 7641, + 7641, 7641, 441, 399, 204, 7641, 7641, 811, -1000, 676, + -1000, -1000, 639, 12016, 12016, -1000, 12016, 843, -1000, 204, + 204, 12016, 204, 12016, 12016, 12016, 8632, -1000, 137, 12016, + -1000, 529, -1000, 188, -1000, -106, 142, -1000, 142, 488, + 475, -1000, 676, 602, -1000, 200, 12016, 854, 846, -1000, + -1000, 1936, 1936, 1936, 1936, 1936, 1936, 1936, 1936, 15, + -1000, -1000, 1936, 1936, 881, -1000, 676, -1000, 658, 122, + -1000, -1000, -1000, 508, 502, 502, 502, 143, 137, -1000, + 309, 199, 369, -1000, 47, 12016, 236, 810, -1000, 804, + -1000, -1000, -1000, -1000, -1000, 33, 4075, 3023, 487, -1000, + 6891, 6891, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 441, 42, -182, -1000, -1000, 12498, 601, 441, 12016, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 269, -1000, -1000, 12257, + -1000, -1000, 308, -1000, -1000, 484, -1000, 12016, -1000, -1000, + 550, 204, 599, -1000, 760, -177, -186, 537, -1000, -1000, + -1000, 683, -1000, -1000, 33, 781, -179, -1000, 753, -1000, + 12016, -1000, 30, -1000, -180, 482, 28, -184, 711, 676, + -187, 708, -1000, 867, 7141, -1000, -1000, 869, 198, 198, + 1936, 441, -1000, -1000, -1000, 56, 306, -1000, -1000, -1000, + -1000, -1000, -1000, } var yyPgo = [...]int{ - 0, 1146, 33, 449, 1145, 1144, 1143, 1142, 1141, 1139, - 1138, 1136, 1135, 1130, 1126, 1125, 1123, 1116, 1110, 1109, - 1108, 1092, 1091, 1089, 1088, 1087, 99, 1086, 1085, 1081, - 71, 1078, 60, 1077, 1076, 46, 76, 50, 41, 391, - 1072, 35, 53, 59, 1071, 38, 1070, 1069, 64, 1068, - 48, 1065, 1063, 201, 1060, 1059, 10, 30, 1058, 1057, - 1054, 1052, 98, 1042, 1050, 1049, 1048, 1046, 1045, 1044, - 51, 8, 12, 47, 19, 1043, 416, 6, 1040, 52, - 1039, 1036, 1033, 1032, 16, 1030, 57, 1027, 18, 56, - 1024, 15, 65, 28, 20, 7, 69, 58, 1018, 14, - 61, 54, 1017, 1016, 411, 1014, 1012, 42, 1011, 1009, - 26, 154, 326, 1008, 1007, 1004, 1002, 37, 0, 504, - 418, 63, 1001, 999, 997, 1597, 70, 49, 17, 988, - 55, 1216, 40, 987, 985, 31, 981, 980, 979, 978, - 971, 970, 969, 84, 966, 964, 963, 24, 22, 962, - 961, 62, 23, 958, 957, 955, 44, 66, 954, 953, - 45, 29, 950, 949, 948, 947, 946, 27, 9, 943, - 13, 941, 21, 940, 25, 939, 4, 935, 11, 934, - 3, 933, 5, 43, 1, 932, 2, 931, 930, 311, - 701, 929, 928, 107, + 0, 1125, 30, 457, 1123, 1122, 1121, 1120, 1118, 1114, + 1113, 1111, 1110, 1109, 1103, 1102, 1100, 1094, 1093, 1091, + 1090, 1089, 1087, 1085, 1084, 1083, 97, 1082, 1081, 1076, + 65, 1071, 71, 1068, 1067, 47, 59, 45, 40, 1207, + 1065, 25, 56, 102, 1064, 34, 1060, 1052, 75, 1049, + 53, 1045, 1044, 429, 1043, 1042, 13, 33, 1040, 1039, + 1038, 1037, 66, 886, 1036, 1033, 16, 1032, 1031, 101, + 1029, 55, 3, 12, 21, 22, 1028, 229, 7, 1027, + 52, 1024, 1021, 1018, 1008, 29, 1007, 58, 1006, 17, + 57, 1002, 24, 63, 35, 19, 5, 73, 61, 999, + 15, 62, 54, 993, 990, 461, 987, 986, 46, 985, + 984, 26, 166, 377, 983, 980, 979, 978, 38, 0, + 498, 128, 70, 977, 970, 969, 1490, 69, 49, 18, + 965, 42, 1493, 43, 963, 962, 37, 961, 960, 959, + 957, 956, 954, 953, 134, 952, 951, 949, 20, 41, + 948, 946, 60, 23, 945, 942, 941, 48, 64, 939, + 935, 51, 44, 933, 932, 930, 924, 923, 27, 9, + 922, 14, 920, 10, 918, 28, 916, 2, 915, 11, + 914, 6, 913, 4, 50, 1, 908, 8, 901, 900, + 77, 494, 897, 896, 94, } var yyR1 = [...]int{ - 0, 187, 188, 188, 1, 1, 1, 1, 1, 1, + 0, 188, 189, 189, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 6, 3, 4, 4, 5, 5, 7, 7, 29, 29, 8, 9, 9, - 9, 191, 191, 48, 48, 92, 92, 10, 10, 10, - 10, 97, 97, 101, 101, 101, 102, 102, 102, 102, - 133, 133, 11, 11, 11, 11, 11, 11, 11, 182, - 182, 181, 180, 180, 179, 179, 178, 17, 163, 165, - 165, 164, 164, 164, 164, 157, 136, 136, 136, 136, - 139, 139, 137, 137, 137, 137, 137, 137, 137, 138, - 138, 138, 138, 138, 140, 140, 140, 140, 140, 141, - 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, - 141, 141, 141, 141, 142, 142, 142, 142, 142, 142, - 142, 142, 156, 156, 143, 143, 151, 151, 152, 152, - 152, 149, 149, 150, 150, 153, 153, 153, 144, 144, - 144, 144, 144, 144, 144, 144, 146, 146, 146, 154, - 154, 147, 147, 147, 148, 148, 148, 155, 155, 155, - 155, 155, 145, 145, 158, 158, 173, 173, 172, 172, - 172, 162, 162, 169, 169, 169, 169, 169, 160, 160, - 161, 161, 171, 171, 170, 159, 159, 174, 174, 174, - 174, 185, 186, 184, 184, 184, 184, 184, 166, 166, - 166, 167, 167, 167, 168, 168, 168, 12, 12, 12, - 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, - 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, - 183, 177, 175, 175, 176, 176, 13, 18, 18, 14, - 14, 14, 14, 14, 15, 15, 19, 20, 20, 20, + 9, 192, 192, 48, 48, 93, 93, 10, 10, 10, + 10, 98, 98, 102, 102, 102, 103, 103, 103, 103, + 134, 134, 11, 11, 11, 11, 11, 11, 11, 183, + 183, 182, 181, 181, 180, 180, 179, 17, 164, 166, + 166, 165, 165, 165, 165, 158, 137, 137, 137, 137, + 140, 140, 138, 138, 138, 138, 138, 138, 138, 138, + 138, 139, 139, 139, 139, 139, 141, 141, 141, 141, + 141, 142, 142, 142, 142, 142, 142, 142, 142, 142, + 142, 142, 142, 142, 142, 142, 143, 143, 143, 143, + 143, 143, 143, 143, 157, 157, 144, 144, 152, 152, + 153, 153, 153, 150, 150, 151, 151, 154, 154, 154, + 146, 146, 147, 147, 155, 155, 148, 148, 148, 149, + 149, 149, 156, 156, 156, 156, 156, 145, 145, 159, + 159, 174, 174, 173, 173, 173, 163, 163, 170, 170, + 170, 170, 170, 161, 161, 162, 162, 172, 172, 171, + 160, 160, 175, 175, 175, 175, 186, 187, 185, 185, + 185, 185, 185, 167, 167, 167, 168, 168, 168, 169, + 169, 169, 12, 12, 12, 12, 12, 12, 12, 12, + 12, 12, 12, 12, 12, 184, 184, 184, 184, 184, + 184, 184, 184, 184, 184, 184, 178, 176, 176, 177, + 177, 13, 18, 18, 14, 14, 14, 14, 14, 15, + 15, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, - 20, 20, 20, 20, 20, 20, 20, 20, 108, 108, - 106, 106, 109, 109, 107, 107, 107, 110, 110, 110, - 134, 134, 134, 21, 21, 23, 23, 24, 25, 22, - 22, 22, 22, 22, 22, 22, 16, 192, 26, 27, - 27, 28, 28, 28, 32, 32, 32, 30, 30, 31, - 31, 37, 37, 36, 36, 38, 38, 38, 38, 122, - 122, 122, 121, 121, 40, 40, 41, 41, 42, 42, - 43, 43, 43, 43, 55, 55, 91, 91, 93, 93, - 44, 44, 44, 44, 45, 45, 46, 46, 47, 47, - 129, 129, 128, 128, 128, 127, 127, 49, 49, 49, - 51, 50, 50, 50, 50, 52, 52, 54, 54, 53, - 53, 56, 56, 56, 56, 57, 57, 39, 39, 39, - 39, 39, 39, 39, 105, 105, 59, 59, 58, 58, - 58, 58, 58, 58, 58, 58, 58, 58, 69, 69, - 69, 69, 69, 69, 60, 60, 60, 60, 60, 60, - 60, 35, 35, 70, 70, 70, 76, 71, 71, 63, + 20, 20, 20, 109, 109, 107, 107, 110, 110, 108, + 108, 108, 111, 111, 111, 135, 135, 135, 21, 21, + 23, 23, 24, 25, 22, 22, 22, 22, 22, 22, + 22, 16, 193, 26, 27, 27, 28, 28, 28, 32, + 32, 32, 30, 30, 31, 31, 37, 37, 36, 36, + 38, 38, 38, 38, 123, 123, 123, 122, 122, 40, + 40, 41, 41, 42, 42, 43, 43, 43, 43, 55, + 55, 92, 92, 94, 94, 44, 44, 44, 44, 45, + 45, 46, 46, 47, 47, 130, 130, 129, 129, 129, + 128, 128, 49, 49, 49, 51, 50, 50, 50, 50, + 52, 52, 54, 54, 53, 53, 56, 56, 56, 56, + 57, 57, 39, 39, 39, 39, 39, 39, 39, 106, + 106, 59, 59, 58, 58, 58, 58, 58, 58, 58, + 58, 58, 58, 70, 70, 70, 70, 70, 70, 60, + 60, 60, 60, 60, 60, 60, 35, 35, 71, 71, + 71, 77, 72, 72, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, - 63, 67, 67, 67, 65, 65, 65, 65, 65, 65, + 63, 63, 63, 63, 63, 63, 67, 67, 67, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, - 65, 65, 65, 65, 65, 66, 66, 66, 66, 66, - 66, 66, 66, 193, 193, 68, 68, 68, 68, 33, - 33, 33, 33, 33, 132, 132, 135, 135, 135, 135, - 135, 135, 135, 135, 135, 135, 135, 135, 135, 80, - 80, 34, 34, 78, 78, 79, 81, 81, 77, 77, - 77, 62, 62, 62, 62, 62, 62, 62, 62, 64, - 64, 64, 82, 82, 83, 83, 84, 84, 85, 85, - 86, 87, 87, 87, 88, 88, 88, 88, 89, 89, - 89, 61, 61, 61, 61, 61, 61, 90, 90, 90, - 90, 94, 94, 72, 72, 74, 74, 73, 75, 95, - 95, 99, 96, 96, 100, 100, 100, 100, 98, 98, - 98, 124, 124, 124, 103, 103, 111, 111, 112, 112, - 104, 104, 113, 113, 113, 113, 113, 113, 113, 113, - 113, 113, 114, 114, 114, 115, 115, 116, 116, 116, - 123, 123, 119, 119, 120, 120, 125, 125, 126, 126, - 117, 117, 117, 117, 117, 117, 117, 117, 117, 117, - 117, 117, 117, 117, 117, 117, 117, 117, 117, 117, - 117, 117, 117, 117, 117, 117, 117, 117, 117, 117, - 117, 117, 117, 117, 117, 117, 117, 117, 117, 117, - 117, 117, 117, 117, 117, 117, 117, 117, 117, 117, - 117, 117, 117, 117, 117, 117, 117, 117, 117, 117, - 117, 117, 117, 117, 117, 117, 117, 117, 117, 117, - 117, 117, 117, 117, 117, 117, 117, 117, 117, 117, - 117, 117, 117, 117, 117, 117, 117, 117, 117, 117, - 117, 117, 117, 118, 118, 118, 118, 118, 118, 118, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 194, 194, 69, 68, + 68, 68, 68, 33, 33, 33, 33, 33, 133, 133, + 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, + 136, 136, 136, 81, 81, 34, 34, 79, 79, 80, + 82, 82, 78, 78, 78, 62, 62, 62, 62, 62, + 62, 62, 62, 64, 64, 64, 83, 83, 84, 84, + 85, 85, 86, 86, 87, 88, 88, 88, 89, 89, + 89, 89, 90, 90, 90, 61, 61, 61, 61, 61, + 61, 91, 91, 91, 91, 95, 95, 73, 73, 75, + 75, 74, 76, 96, 96, 100, 97, 97, 101, 101, + 101, 101, 99, 99, 99, 125, 125, 125, 104, 104, + 112, 112, 113, 113, 105, 105, 114, 114, 114, 114, + 114, 114, 114, 114, 114, 114, 115, 115, 115, 116, + 116, 117, 117, 117, 124, 124, 120, 120, 121, 121, + 126, 126, 127, 127, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, @@ -2178,10 +2239,19 @@ var yyR1 = [...]int{ 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, - 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, - 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, - 118, 118, 118, 118, 118, 118, 189, 190, 130, 131, - 131, 131, + 118, 118, 118, 118, 118, 118, 118, 118, 118, 119, + 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, + 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, + 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, + 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, + 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, + 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, + 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, + 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, + 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, + 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, + 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, + 119, 119, 119, 190, 191, 131, 132, 132, 132, } var yyR2 = [...]int{ @@ -2194,61 +2264,61 @@ var yyR2 = [...]int{ 1, 1, 2, 2, 8, 4, 6, 5, 5, 0, 2, 1, 0, 2, 1, 3, 3, 4, 4, 2, 4, 1, 3, 3, 3, 8, 3, 1, 1, 1, - 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, - 2, 2, 2, 2, 1, 2, 2, 2, 1, 4, - 4, 2, 2, 3, 3, 3, 3, 1, 1, 1, - 1, 1, 6, 6, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 3, 0, 3, 0, 5, 0, 3, - 5, 0, 1, 0, 1, 0, 1, 2, 0, 2, - 2, 2, 2, 2, 4, 2, 0, 3, 5, 0, - 1, 0, 3, 3, 0, 2, 2, 0, 2, 1, - 2, 1, 0, 2, 5, 4, 1, 2, 2, 3, - 2, 0, 1, 2, 3, 3, 2, 2, 1, 1, - 0, 1, 1, 3, 2, 3, 1, 10, 11, 11, - 12, 3, 3, 1, 1, 2, 2, 2, 0, 1, - 3, 1, 2, 3, 1, 1, 1, 6, 7, 7, - 7, 7, 4, 5, 7, 5, 5, 5, 12, 7, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 7, 1, 3, 8, 8, 3, 3, 5, 4, - 6, 5, 4, 4, 3, 2, 3, 4, 4, 3, - 4, 4, 4, 4, 4, 4, 3, 2, 3, 3, - 2, 3, 4, 3, 7, 5, 4, 2, 4, 2, - 2, 2, 2, 3, 3, 5, 2, 3, 1, 1, - 0, 1, 1, 1, 0, 2, 2, 0, 2, 2, - 0, 1, 1, 2, 1, 1, 2, 1, 1, 2, - 2, 2, 2, 2, 3, 3, 2, 0, 2, 0, - 2, 1, 2, 2, 0, 1, 1, 0, 1, 0, - 1, 0, 1, 1, 3, 1, 2, 3, 5, 0, - 1, 2, 1, 1, 0, 2, 1, 3, 1, 1, - 1, 3, 1, 3, 3, 7, 1, 3, 1, 3, - 4, 4, 4, 3, 2, 4, 0, 1, 0, 2, - 0, 1, 0, 1, 2, 1, 1, 1, 2, 2, - 1, 2, 3, 2, 3, 2, 2, 2, 1, 1, - 3, 0, 5, 5, 5, 0, 2, 1, 3, 3, - 2, 3, 1, 2, 0, 3, 1, 1, 3, 3, - 4, 4, 5, 3, 4, 5, 6, 2, 1, 2, - 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, - 1, 0, 2, 1, 1, 1, 3, 1, 3, 1, - 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, - 2, 2, 2, 2, 2, 2, 3, 1, 1, 1, - 1, 4, 5, 6, 4, 4, 6, 6, 6, 6, - 8, 8, 6, 8, 8, 6, 8, 8, 6, 8, - 8, 9, 7, 5, 4, 2, 2, 2, 2, 2, - 2, 2, 2, 0, 2, 4, 4, 4, 4, 0, - 3, 4, 7, 3, 1, 1, 2, 3, 3, 1, - 2, 2, 1, 2, 1, 2, 2, 1, 2, 0, - 1, 0, 2, 1, 2, 4, 0, 2, 1, 3, - 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 2, 2, 0, 3, 0, 2, 0, 3, 1, 3, - 2, 0, 1, 1, 0, 2, 4, 4, 0, 2, - 4, 2, 1, 3, 5, 4, 6, 1, 3, 3, - 5, 0, 5, 1, 3, 1, 2, 3, 1, 1, - 3, 3, 1, 3, 3, 3, 3, 3, 1, 2, - 1, 1, 1, 1, 1, 1, 0, 2, 0, 3, - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, - 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, + 1, 4, 4, 2, 2, 3, 3, 3, 3, 1, + 1, 1, 1, 1, 6, 6, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 3, 0, 3, 0, 5, + 0, 3, 5, 0, 1, 0, 1, 0, 1, 2, + 0, 2, 0, 3, 0, 1, 0, 3, 3, 0, + 2, 2, 0, 2, 1, 2, 1, 0, 2, 5, + 4, 1, 2, 2, 3, 2, 0, 1, 2, 3, + 3, 2, 2, 1, 1, 0, 1, 1, 3, 2, + 3, 1, 10, 11, 11, 12, 3, 3, 1, 1, + 2, 2, 2, 0, 1, 3, 1, 2, 3, 1, + 1, 1, 6, 7, 7, 7, 7, 4, 5, 7, + 5, 5, 5, 12, 7, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 7, 1, 3, 8, + 8, 3, 3, 5, 4, 6, 5, 4, 4, 3, + 2, 3, 4, 4, 3, 4, 4, 4, 4, 4, + 4, 3, 2, 3, 3, 2, 3, 4, 3, 7, + 5, 4, 2, 4, 2, 2, 2, 2, 3, 3, + 5, 2, 3, 1, 1, 0, 1, 1, 1, 0, + 2, 2, 0, 2, 2, 0, 1, 1, 2, 1, + 1, 2, 1, 1, 2, 2, 2, 2, 2, 3, + 3, 2, 0, 2, 0, 2, 1, 2, 2, 0, + 1, 1, 0, 1, 0, 1, 0, 1, 1, 3, + 1, 2, 3, 5, 0, 1, 2, 1, 1, 0, + 2, 1, 3, 1, 1, 1, 3, 1, 3, 3, + 7, 1, 3, 1, 3, 4, 4, 4, 3, 2, + 4, 0, 1, 0, 2, 0, 1, 0, 1, 2, + 1, 1, 1, 2, 2, 1, 2, 3, 2, 3, + 2, 2, 2, 1, 1, 3, 0, 5, 5, 5, + 0, 2, 1, 3, 3, 2, 3, 1, 2, 0, + 3, 1, 1, 3, 3, 4, 4, 5, 3, 4, + 5, 6, 2, 1, 2, 1, 2, 1, 2, 1, + 1, 1, 1, 1, 1, 1, 0, 2, 1, 1, + 1, 3, 1, 3, 1, 1, 1, 1, 1, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, + 2, 3, 1, 1, 1, 1, 4, 5, 6, 4, + 4, 6, 6, 6, 6, 8, 8, 6, 8, 8, + 6, 8, 8, 6, 8, 8, 9, 7, 5, 4, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 8, 8, 0, 2, 3, 4, + 4, 4, 4, 0, 3, 4, 7, 3, 1, 1, + 2, 3, 3, 1, 2, 2, 1, 2, 1, 2, + 2, 1, 2, 0, 1, 0, 2, 1, 2, 4, + 0, 2, 1, 3, 5, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 2, 2, 0, 3, 0, 2, + 0, 3, 1, 3, 2, 0, 1, 1, 0, 2, + 4, 4, 0, 2, 4, 2, 1, 3, 5, 4, + 6, 1, 3, 3, 5, 0, 5, 1, 3, 1, + 2, 3, 1, 1, 3, 3, 1, 3, 3, 3, + 3, 3, 1, 2, 1, 1, 1, 1, 1, 1, + 0, 2, 0, 3, 0, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, + 1, 0, 1, 1, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, @@ -2269,300 +2339,304 @@ var yyR2 = [...]int{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, - 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 0, 0, 1, 1, } var yyChk = [...]int{ - -1000, -187, -1, -2, -6, -7, -8, -9, -10, -11, + -1000, -188, -1, -2, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -19, -20, -21, -23, -24, -25, -22, -16, -3, -4, 6, 7, -29, 9, 10, 30, -17, 115, 116, 118, 117, 149, 119, 142, 50, 162, 163, 165, 166, 25, 143, 144, 147, 148, 31, 32, - 121, -189, 8, 251, 54, -188, 266, -84, 15, -28, - 5, -26, -192, -26, -26, -26, -26, -26, -163, -165, - 54, 90, -116, 125, 72, 243, 122, 123, 129, -119, - 57, -118, 259, 135, 162, 173, 167, 194, 186, 136, - 184, 187, 230, 214, 225, 66, 165, 239, 145, 182, - 178, 176, 27, 227, 199, 264, 177, 226, 121, 138, - 133, 200, 204, 231, 171, 172, 233, 198, 134, 33, - 261, 35, 153, 234, 202, 197, 193, 196, 170, 192, - 39, 206, 205, 207, 229, 189, 139, 179, 18, 237, - 148, 151, 228, 201, 203, 130, 155, 263, 235, 175, - 140, 152, 147, 238, 141, 166, 232, 241, 38, 211, - 169, 132, 163, 159, 216, 190, 154, 180, 181, 195, - 168, 191, 164, 156, 149, 240, 212, 265, 188, 185, - 160, 157, 158, 217, 218, 219, 220, 221, 222, 161, - 262, 236, 183, 213, -104, 125, 220, 127, 123, 123, - 124, 125, 243, 122, 123, -53, -125, 57, -118, 125, - 123, 108, 187, 230, 115, 215, 227, 124, 33, 228, - 155, -134, 123, -106, 214, 217, 218, 219, 222, 220, - 161, 57, 232, 231, 223, -125, 164, -130, -130, -130, - -130, -130, 216, 216, -130, -2, -88, 17, 16, -5, - -3, -189, 6, 20, 21, -32, 40, 41, -27, -38, - 99, -39, -125, -58, 74, -63, 29, 57, -118, 23, - -62, -59, -77, -75, -76, 108, 109, 110, 97, 98, - 105, 75, 111, -67, -65, -66, -68, 59, 58, 67, - 60, 61, 62, 63, 68, 69, 70, -119, -73, -189, - 44, 45, 252, 253, 254, 255, 258, 256, 77, 34, - 242, 250, 249, 248, 246, 247, 244, 245, 128, 243, - 103, 251, -104, -104, 11, -48, -53, -96, -133, 164, - -100, 232, 231, -120, -98, -119, -117, 230, 187, 229, - 120, 73, 22, 24, 209, 76, 108, 16, 77, 107, - 252, 115, 48, 244, 245, 242, 254, 255, 243, 215, - 29, 10, 25, 143, 21, 101, 117, 80, 81, 146, - 23, 144, 70, 19, 51, 11, 13, 14, 128, 127, - 92, 124, 46, 8, 111, 26, 89, 42, 28, 44, - 90, 17, 246, 247, 31, 258, 150, 103, 49, 36, - 74, 68, 71, 52, 72, 15, 47, 91, 118, 251, - 45, 122, 6, 257, 30, 142, 43, 123, 79, 126, - 69, 5, 129, 32, 9, 50, 53, 248, 249, 250, - 34, 78, 12, -164, 90, -157, 57, -53, 124, -53, - 251, -112, 128, -112, -112, 123, -53, 115, 117, 120, - 52, -18, -53, -111, 128, 57, -111, -111, -111, -53, - 112, -53, 57, 30, -131, -189, -120, 243, 57, 155, - 123, 156, 125, -131, -131, -131, -131, 159, 160, -131, - -109, -108, 225, 226, 216, 224, 12, 216, 158, -131, - -130, -130, -190, 56, -89, 19, 31, -39, -125, -85, - -86, -39, -84, -2, -26, 36, -30, 21, 65, 11, - -122, 73, 72, 89, -121, 22, -119, 59, 112, -39, - -60, 92, 74, 90, 91, 76, 94, 93, 104, 97, - 98, 99, 100, 101, 102, 103, 95, 96, 107, 82, - 83, 84, 85, 86, 87, 88, -105, -189, -76, -189, - 113, 114, -63, -63, -63, -63, -63, -63, -63, -63, - -189, -2, -71, -39, -189, -189, -189, -189, -189, -189, - -189, -189, -189, -80, -39, -189, -193, -189, -193, -193, - -193, -193, -193, -193, -193, -189, -189, -189, -189, -54, - 26, -53, -41, -42, -43, -44, -55, -76, -189, -53, - -53, -48, -191, 55, 11, 53, 55, -96, 164, -97, - -101, 233, 235, 82, -124, -119, 59, 29, 30, 56, - 55, -53, -136, -139, -141, -140, -142, -137, -138, 184, - 185, 108, 188, 190, 191, 192, 193, 194, 195, 196, - 197, 198, 199, 30, 145, 180, 181, 182, 183, 200, - 201, 202, 203, 204, 205, 206, 207, 167, 168, 169, - 170, 171, 172, 173, 175, 176, 177, 178, 179, 57, - -131, 125, 57, 74, 57, -53, -53, -131, 157, 157, - 123, 123, -53, 55, 126, -48, 23, 52, -53, 57, - 57, -126, -125, -117, -131, -131, -131, -131, -131, -131, - -131, -131, -131, -131, 11, -107, 11, 92, -39, 52, - 9, 92, 55, 18, 112, 55, -87, 24, 25, -88, - -190, -32, -64, -119, 60, 63, -31, 43, -53, -39, - -39, -69, 68, 74, 69, 70, -121, 99, -126, -120, - -117, -63, -70, -73, -76, 64, 92, 90, 91, 76, + 121, -190, 8, 251, 54, -189, 268, -85, 15, -28, + 5, -26, -193, -26, -26, -26, -26, -26, -164, -166, + 54, 90, -117, 125, 72, 243, 122, 123, 129, -120, + 57, -119, 261, 135, 162, 173, 167, 194, 186, 262, + 136, 184, 187, 230, 214, 225, 66, 165, 239, 145, + 182, 178, 176, 27, 227, 199, 266, 177, 226, 121, + 138, 133, 200, 204, 231, 171, 172, 233, 198, 134, + 33, 263, 35, 153, 234, 202, 197, 193, 196, 170, + 192, 39, 206, 205, 207, 229, 189, 139, 179, 18, + 237, 148, 151, 228, 201, 203, 130, 155, 265, 235, + 175, 140, 152, 147, 238, 141, 166, 232, 241, 38, + 211, 169, 132, 163, 159, 216, 190, 154, 180, 181, + 195, 168, 191, 164, 156, 149, 240, 212, 267, 188, + 185, 160, 157, 158, 217, 218, 219, 220, 221, 222, + 161, 264, 236, 183, 213, -105, 125, 220, 127, 123, + 123, 124, 125, 243, 122, 123, -53, -126, 57, -119, + 125, 123, 108, 187, 230, 115, 215, 227, 124, 33, + 228, 155, -135, 123, -107, 214, 217, 218, 219, 222, + 220, 161, 57, 232, 231, 223, -126, 164, -131, -131, + -131, -131, -131, 216, 216, -131, -2, -89, 17, 16, + -5, -3, -190, 6, 20, 21, -32, 40, 41, -27, + -38, 99, -39, -126, -58, 74, -63, 29, 57, -119, + 23, -62, -59, -78, -76, -77, 108, 109, 110, 97, + 98, 105, 75, 111, -67, -65, -66, -68, 59, 58, + 67, 60, 61, 62, 63, 68, 69, 70, -120, -74, + -190, 44, 45, 252, 253, 254, 255, 260, 256, 77, + 34, 242, 250, 249, 248, 246, 247, 244, 245, 258, + 259, 128, 243, 103, 251, -105, -105, 11, -48, -53, + -97, -134, 164, -101, 232, 231, -121, -99, -120, -118, + 230, 187, 229, 120, 73, 22, 24, 209, 76, 108, + 16, 77, 107, 252, 115, 48, 244, 245, 242, 254, + 255, 243, 215, 29, 10, 25, 143, 21, 101, 117, + 80, 81, 146, 23, 144, 70, 19, 51, 11, 13, + 14, 128, 127, 92, 124, 46, 8, 111, 26, 89, + 42, 28, 44, 90, 17, 246, 247, 31, 260, 150, + 103, 49, 36, 74, 68, 71, 52, 72, 15, 47, + 91, 118, 251, 45, 122, 6, 257, 30, 142, 43, + 123, 79, 258, 259, 126, 69, 5, 129, 32, 9, + 50, 53, 248, 249, 250, 34, 78, 12, -165, 90, + -158, 57, -53, 124, -53, 251, -113, 128, -113, -113, + 123, -53, 115, 117, 120, 52, -18, -53, -112, 128, + 57, -112, -112, -112, -53, 112, -53, 57, 30, -132, + -190, -121, 243, 57, 155, 123, 156, 125, -132, -132, + -132, -132, 159, 160, -132, -110, -109, 225, 226, 216, + 224, 12, 216, 158, -132, -131, -131, -191, 56, -90, + 19, 31, -39, -126, -86, -87, -39, -85, -2, -26, + 36, -30, 21, 65, 11, -123, 73, 72, 89, -122, + 22, -120, 59, 112, -39, -60, 92, 74, 90, 91, + 76, 94, 93, 104, 97, 98, 99, 100, 101, 102, + 103, 95, 96, 107, 82, 83, 84, 85, 86, 87, + 88, -106, -190, -77, -190, 113, 114, -63, -63, -63, + -63, -63, -63, -63, -63, -190, -2, -72, -39, -190, + -190, -190, -190, -190, -190, -190, -190, -190, -81, -39, + -190, -194, -69, -190, -194, -69, -194, -69, -194, -190, + -194, -69, -194, -69, -194, -194, -69, -190, -190, -190, + -190, -190, -190, -54, 26, -53, -41, -42, -43, -44, + -55, -77, -190, -53, -53, -48, -192, 55, 11, 53, + 55, -97, 164, -98, -102, 233, 235, 82, -125, -120, + 59, 29, 30, 56, 55, -53, -137, -140, -142, -141, + -143, -138, -139, 184, 185, 108, 188, 190, 191, 192, + 193, 194, 195, 196, 197, 198, 199, 30, 145, 180, + 181, 182, 183, 200, 201, 202, 203, 204, 205, 206, + 207, 167, 186, 262, 168, 169, 170, 171, 172, 173, + 175, 176, 177, 178, 179, 57, -132, 125, 57, 74, + 57, -53, -53, -132, 157, 157, 123, 123, -53, 55, + 126, -48, 23, 52, -53, 57, 57, -127, -126, -118, + -132, -132, -132, -132, -132, -132, -132, -132, -132, -132, + 11, -108, 11, 92, -39, 52, 9, 92, 55, 18, + 112, 55, -88, 24, 25, -89, -191, -32, -64, -120, + 60, 63, -31, 43, -53, -39, -39, -70, 68, 74, + 69, 70, -122, 99, -127, -121, -118, -63, -71, -74, + -77, 64, 92, 90, 91, 76, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63, - -63, -63, -63, -63, -63, -132, 57, 59, 57, -62, - -62, -119, -37, 21, -36, -38, -190, 55, -190, -2, - -36, -36, -39, -39, -77, 59, -119, -125, -77, 59, - -36, -30, -78, -79, 78, -77, -190, -36, -37, -36, - -36, -92, 151, -53, 30, 55, -49, -51, -50, -52, - 42, 46, 48, 43, 44, 45, 49, -129, 22, -41, - -189, -128, 151, -127, 22, -125, 59, -92, 53, -41, - -53, -100, -97, 55, 234, 236, 237, 52, 71, -39, - -148, 107, -166, -167, -168, -120, 59, 60, -157, -158, - -159, -169, 137, -174, 130, 132, 129, -160, 138, 124, - 28, 56, -153, 68, 74, -149, 212, -143, 54, -143, - -143, -143, -143, -147, 187, -147, -147, -147, 54, 54, - -143, -143, -143, -151, 54, -151, -151, -152, 54, -152, - -123, 53, -53, -131, 23, -131, -113, 120, 117, 118, - -177, 116, 209, 187, 66, 29, 15, 252, 151, 265, - 57, 152, -119, -119, -53, -53, 120, 117, -53, -53, - -53, -131, -53, -110, 90, 12, -125, -125, -53, 38, - -39, -39, -126, -86, -89, -103, 19, 11, 34, 34, - -36, 68, 69, 70, 112, -189, -70, -63, -63, -63, - -35, 146, 73, -190, -190, -36, 55, -39, -190, -190, - -190, 55, 53, 22, 55, 11, 55, 11, 112, 55, - 11, 55, 11, -190, -36, -81, -79, 80, -39, -190, - -190, -190, -190, -190, -61, 30, 34, -2, -189, -189, - -95, -99, -77, -42, -43, -43, -42, -43, 42, 42, - 42, 47, 42, 47, 42, -50, -125, -190, -56, 50, - 127, 51, -189, -127, -57, 12, -41, -57, -101, -102, - 238, 235, 241, 57, 59, 55, -168, 82, 54, 57, - 28, -160, -160, -161, 57, -161, 28, -144, 29, 68, - -150, 213, 60, -147, -147, -148, 30, -148, -148, -148, - -156, 59, -156, 60, 60, 52, -119, -131, -130, -183, - 131, 137, 138, 133, 57, 124, 28, 130, 132, 151, - 129, -183, -114, -115, 126, 22, 124, 28, 151, -182, - 53, 157, 157, 126, -131, -107, 59, -39, 39, 112, - -53, -40, 11, 99, -120, -37, -35, 73, -63, -63, - -190, -38, -135, 108, 184, 145, 182, 178, 198, 189, - 211, 180, 212, -132, -135, -63, -63, -63, -63, -120, - -63, -63, -63, -63, 259, -84, 81, -39, 79, -94, - 52, -95, -72, -74, -73, -189, -2, -90, -119, -93, - -119, -57, 55, 82, -46, -45, 52, 53, -47, 52, - -45, 42, 42, 124, 124, 124, -93, -84, -39, -57, - 235, 239, 240, -167, -168, -171, -170, -119, -174, -161, - -161, 54, -146, 52, 59, 60, 61, 68, 242, 67, - 56, -148, -148, 57, 108, 56, 55, 56, 55, 56, - 55, -53, -130, -130, -53, -130, -119, -180, 262, -181, - 57, -119, -119, -53, -110, -57, -41, -190, -63, -190, - -143, -143, -143, -152, -143, 172, -143, 172, -190, -190, - -190, 55, 19, -190, 55, 19, -190, 55, 19, -190, - 55, 19, -189, -34, 257, -39, 27, -94, 55, -190, - -190, -190, 55, 112, -190, 55, -84, -99, -39, -39, - 54, -39, -189, -189, -189, -190, -88, 56, 55, -143, - -91, -119, -154, 209, 9, 54, -147, 59, -147, 60, - 60, -131, 26, -179, -178, -120, 54, -82, 13, -147, + -63, -133, 57, 59, 57, -62, -62, -120, -37, 21, + -36, -38, -191, 55, -191, -2, -36, -36, -39, -39, + -78, 59, -120, -126, -78, 59, -36, -30, -79, -80, + 78, -78, -191, -63, -120, -120, -36, -37, -36, -36, + -93, 151, -53, 30, 55, -49, -51, -50, -52, 42, + 46, 48, 43, 44, 45, 49, -130, 22, -41, -190, + -129, 151, -128, 22, -126, 59, -93, 53, -41, -53, + -101, -98, 55, 234, 236, 237, 52, 71, -39, -149, + 107, -167, -168, -169, -121, 59, 60, -158, -159, -160, + -170, 137, -175, 130, 132, 129, -161, 138, 124, 28, + 56, -154, 68, 74, -150, 212, -144, 54, -144, -144, + -144, -144, -148, 187, -148, -148, -148, 54, 54, -144, + -144, -144, -152, 54, -152, -152, -153, 54, -153, -124, + 53, -53, -132, 23, -132, -114, 120, 117, 118, -178, + 116, 209, 187, 66, 29, 15, 252, 151, 267, 57, + 152, -120, -120, -53, -53, 120, 117, -53, -53, -53, + -132, -53, -111, 90, 12, -126, -126, -53, 38, -39, + -39, -127, -87, -90, -104, 19, 11, 34, 34, -36, + 68, 69, 70, 112, -190, -71, -63, -63, -63, -35, + 146, 73, -191, -191, -36, 55, -39, -191, -191, -191, + 55, 53, 22, 55, 11, 55, 11, 112, 55, 11, + 55, 11, -191, -36, -82, -80, 80, -39, -191, -191, + 55, 55, -191, -191, -191, -191, -61, 30, 34, -2, + -190, -190, -96, -100, -78, -42, -43, -43, -42, -43, + 42, 42, 42, 47, 42, 47, 42, -50, -126, -191, + -56, 50, 127, 51, -190, -128, -57, 12, -41, -57, + -102, -103, 238, 235, 241, 57, 59, 55, -169, 82, + 54, 57, 28, -161, -161, -162, 57, -162, 28, -146, + 29, 68, -151, 213, 60, -148, -148, -149, 30, -149, + -149, -149, -157, 59, -157, 60, 60, 52, -120, -132, + -131, -184, 131, 137, 138, 133, 57, 124, 28, 130, + 132, 151, 129, -184, -115, -116, 126, 22, 124, 28, + 151, -183, 53, 157, 157, 126, -132, -108, 59, -39, + 39, 112, -53, -40, 11, 99, -121, -37, -35, 73, + -63, -63, -191, -38, -136, 108, 184, 145, 182, 178, + 198, 189, 211, 180, 212, -133, -136, -63, -63, -63, + -63, -121, -63, -63, -63, -63, 261, -85, 81, -39, + 79, -63, -63, -95, 52, -96, -73, -75, -74, -190, + -2, -91, -120, -94, -120, -57, 55, 82, -46, -45, + 52, 53, -47, 52, -45, 42, 42, 124, 124, 124, + -94, -85, -39, -57, 235, 239, 240, -168, -169, -172, + -171, -120, -175, -162, -162, 54, -147, 52, -63, 56, + -149, -149, 57, 108, 56, 55, 56, 55, 56, 55, + -53, -131, -131, -53, -131, -120, -181, 264, -182, 57, + -120, -120, -53, -111, -57, -41, -191, -63, -191, -144, + -144, -144, -153, -144, 172, -144, 172, -191, -191, -191, + 55, 19, -191, 55, 19, -191, 55, 19, -191, 55, + 19, -190, -34, 257, -39, 55, 55, 27, -95, 55, + -191, -191, -191, 55, 112, -191, 55, -85, -100, -39, + -39, 54, -39, -190, -190, -190, -191, -89, 56, 55, + -144, -92, -120, -155, 209, 9, -148, 59, -148, 60, + 60, -132, 26, -180, -179, -121, 54, -83, 13, -148, 57, -63, -63, -63, -63, -63, -63, -63, -63, -63, - -190, 59, 28, -74, 34, -2, -189, -119, -119, -119, - -88, -91, -91, -91, -91, -128, -173, -172, 53, 134, - 66, -170, 56, 55, -155, 130, 28, 129, 242, 56, - -148, -148, 56, 56, -189, 55, 82, -91, -83, 14, - 16, -190, -190, -190, -190, -190, -190, -190, -190, -33, - 92, 262, 9, -72, -2, 112, 56, -190, -190, -190, - -56, -172, 57, -162, 82, 59, 140, -119, -145, 66, - 28, 28, 54, -175, -176, 151, -178, -168, 56, -39, - -71, -190, 260, 49, 263, -95, -190, -119, 60, -53, - 59, 56, -190, 55, -119, -182, 39, 261, 264, 54, - -176, 34, -180, 39, -91, 153, 262, 56, 154, 263, - -185, -186, 52, -189, 264, -186, 52, 10, 9, -63, - 150, -184, 141, 136, 139, 30, -184, -190, -190, 135, - 29, 68, + -191, 59, -63, -63, 28, -75, 34, -2, -190, -120, + -120, -120, -89, -92, -92, -92, -92, -129, -174, -173, + 53, 134, 66, -171, 56, 55, -156, 130, 28, 129, + -66, -149, -149, 56, 56, -190, 55, 82, -92, -84, + 14, 16, -191, -191, -191, -191, -191, -191, -191, -191, + -33, 92, 264, -191, -191, 9, -73, -2, 112, 56, + -191, -191, -191, -56, -173, 57, -163, 82, 59, 140, + -120, -145, 66, 28, 28, -176, -177, 151, -179, -169, + 56, -39, -72, -191, 262, 49, 265, -96, -191, -120, + 60, -53, 59, -191, 55, -120, -183, 39, 263, 266, + 54, -177, 34, -181, 39, -92, 153, 264, 56, 154, + 265, -186, -187, 52, -190, 266, -187, 52, 10, 9, + -63, 150, -185, 141, 136, 139, 30, -185, -191, -191, + 135, 29, 68, } var yyDef = [...]int{ 23, -2, 2, -2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, - 21, 22, 556, 0, 317, 317, 317, 317, 317, 317, - 0, 627, 610, 0, 0, 0, 0, -2, 304, 305, - 0, 307, 308, 848, 848, 848, 848, 848, 0, 0, - 848, 0, 35, 36, 846, 1, 3, 564, 0, 0, - 321, 324, 319, 0, 610, 610, 0, 0, 62, 63, - 0, 0, 0, 832, 0, 608, 608, 608, 628, 629, - 632, 633, 733, 734, 735, 736, 737, 738, 739, 740, - 741, 742, 743, 744, 745, 746, 747, 748, 749, 750, - 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, - 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, - 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, - 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, - 791, 792, 793, 794, 795, 796, 797, 798, 799, 800, - 801, 802, 803, 804, 805, 806, 807, 808, 809, 810, - 811, 812, 813, 814, 815, 816, 817, 818, 819, 820, - 821, 822, 823, 824, 825, 826, 827, 828, 829, 830, - 831, 833, 834, 835, 836, 837, 838, 839, 840, 841, - 842, 843, 844, 845, 0, 0, 0, 611, 0, 606, - 0, 606, 606, 606, 0, 255, 389, 636, 637, 832, - 0, 0, 0, 849, 0, 849, 267, 849, 849, 270, - 849, 0, 849, 0, 277, 279, 280, 281, 282, 0, - 286, 849, 301, 302, 291, 303, 306, 309, 310, 311, - 312, 313, 848, 848, 316, 29, 568, 0, 0, 556, - 31, 0, 317, 322, 323, 327, 325, 326, 318, 0, - 335, 339, 0, 397, 0, 402, 404, -2, -2, 0, - 439, 440, 441, 442, 443, 0, 0, 0, 0, 0, - 0, 0, 0, 467, 468, 469, 470, 541, 542, 543, - 544, 545, 546, 547, 548, 406, 407, 538, 588, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 529, 0, - 503, 503, 503, 503, 503, 503, 503, 503, 0, 0, - 0, 0, 0, 0, 0, 0, 43, 47, 0, 823, - 592, -2, -2, 0, 0, 634, 635, -2, 742, -2, - 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, - 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, - 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, - 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, - 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, - 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, - 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, - 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, - 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, - 730, 731, 732, 0, 0, 81, 0, 79, 0, 849, - 0, 0, 0, 0, 0, 0, 849, 0, 0, 0, - 0, 246, 0, 0, 0, 0, 0, 0, 0, 254, - 0, 256, 849, 849, 259, 850, 851, 849, 849, 849, - 849, 849, 849, 266, 268, 269, 271, 849, 849, 273, - 0, 294, 292, 293, 288, 289, 0, 283, 284, 287, - 314, 315, 30, 847, 24, 0, 0, 565, 0, 557, - 558, 561, 564, 29, 324, 0, 329, 328, 320, 0, - 336, 0, 0, 0, 340, 0, 342, 343, 0, 400, + 21, 22, 560, 0, 312, 312, 312, 312, 312, 312, + 0, 631, 614, 0, 0, 0, 0, -2, 299, 300, + 0, 302, 303, 855, 855, 855, 855, 855, 0, 0, + 855, 0, 35, 36, 853, 1, 3, 568, 0, 0, + 316, 319, 314, 0, 614, 614, 0, 0, 62, 63, + 0, 0, 0, 839, 0, 612, 612, 612, 632, 633, + 636, 637, 739, 740, 741, 742, 743, 744, 745, 746, + 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, + 757, 758, 759, 760, 761, 762, 763, 764, 765, 766, + 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, + 777, 778, 779, 780, 781, 782, 783, 784, 785, 786, + 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, + 797, 798, 799, 800, 801, 802, 803, 804, 805, 806, + 807, 808, 809, 810, 811, 812, 813, 814, 815, 816, + 817, 818, 819, 820, 821, 822, 823, 824, 825, 826, + 827, 828, 829, 830, 831, 832, 833, 834, 835, 836, + 837, 838, 840, 841, 842, 843, 844, 845, 846, 847, + 848, 849, 850, 851, 852, 0, 0, 0, 615, 0, + 610, 0, 610, 610, 610, 0, 250, 384, 640, 641, + 839, 0, 0, 0, 856, 0, 856, 262, 856, 856, + 265, 856, 0, 856, 0, 272, 274, 275, 276, 277, + 0, 281, 856, 296, 297, 286, 298, 301, 304, 305, + 306, 307, 308, 855, 855, 311, 29, 572, 0, 0, + 560, 31, 0, 312, 317, 318, 322, 320, 321, 313, + 0, 330, 334, 0, 392, 0, 397, 399, -2, -2, + 0, 434, 435, 436, 437, 438, 0, 0, 0, 0, + 0, 0, 0, 0, 462, 463, 464, 465, 545, 546, + 547, 548, 549, 550, 551, 552, 401, 402, 542, 592, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 533, + 0, 506, 506, 506, 506, 506, 506, 506, 506, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 43, + 47, 0, 830, 596, -2, -2, 0, 0, 638, 639, + -2, 749, -2, 644, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, + 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, + 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, + 681, 682, 683, 684, 685, 686, 687, 688, 689, 690, + 691, 692, 693, 694, 695, 696, 697, 698, 699, 700, + 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, + 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, + 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, + 731, 732, 733, 734, 735, 736, 737, 738, 0, 0, + 81, 0, 79, 0, 856, 0, 0, 0, 0, 0, + 0, 856, 0, 0, 0, 0, 241, 0, 0, 0, + 0, 0, 0, 0, 249, 0, 251, 856, 856, 254, + 857, 858, 856, 856, 856, 856, 856, 856, 261, 263, + 264, 266, 856, 856, 268, 0, 289, 287, 288, 283, + 284, 0, 278, 279, 282, 309, 310, 30, 854, 24, + 0, 0, 569, 0, 561, 562, 565, 568, 29, 319, + 0, 324, 323, 315, 0, 331, 0, 0, 0, 335, + 0, 337, 338, 0, 395, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 424, - 425, 426, 427, 428, 429, 430, 403, 0, 417, 0, - 0, 0, 459, 460, 461, 462, 463, 464, 465, 0, - 331, 29, 0, 437, 0, 0, 0, 0, 0, 0, - 0, 0, 327, 0, 530, 0, 495, 0, 496, 497, - 498, 499, 500, 501, 502, 0, 331, 0, 0, 45, - 0, 388, 0, 346, 348, 349, 350, -2, 0, 372, - -2, 0, 0, 0, 41, 42, 0, 48, 823, 50, - 51, 0, 0, 0, 164, 601, 602, 603, 599, 208, - 0, 0, 145, 141, 87, 88, 89, 134, 91, 134, - 134, 134, 134, 161, 161, 161, 161, 117, 118, 119, - 120, 121, 0, 0, 104, 134, 134, 134, 108, 124, - 125, 126, 127, 128, 129, 130, 131, 92, 93, 94, - 95, 96, 97, 98, 136, 136, 136, 138, 138, 630, - 65, 0, 849, 0, 849, 77, 0, 222, 0, 0, - 0, 0, 0, 0, 0, 249, 607, 0, 849, 252, - 253, 390, 638, 639, 257, 258, 260, 261, 262, 263, - 264, 265, 272, 276, 0, 297, 0, 0, 278, 0, - 569, 0, 0, 0, 0, 0, 560, 562, 563, 568, - 32, 327, 0, 549, 0, 0, 0, 330, 27, 398, - 399, 401, 418, 0, 420, 422, 341, 337, 0, 539, - -2, 408, 409, 433, 434, 435, 0, 0, 0, 0, - 431, 413, 0, 444, 445, 446, 447, 448, 449, 450, - 451, 452, 453, 454, 455, 458, 514, 515, 0, 456, - 457, 466, 0, 0, 332, 333, 436, 0, 587, 29, - 0, 0, 0, 0, 0, 0, 538, 0, 0, 0, - 0, 0, 536, 533, 0, 0, 504, 0, 0, 0, - 0, 0, 0, 387, 0, 0, 0, 0, 0, 0, - 377, 0, 0, 380, 0, 0, 0, 0, 371, 0, - 0, 391, 792, 373, 0, 375, 376, 395, 0, 395, - 44, 593, 49, 0, 0, 54, 55, 594, 595, 596, - 597, 0, 78, 209, 211, 214, 215, 216, 82, 83, - 84, 0, 0, 196, 0, 0, 190, 190, 0, 188, - 189, 80, 148, 146, 0, 143, 142, 90, 0, 161, - 161, 111, 112, 164, 0, 164, 164, 164, 0, 0, - 105, 106, 107, 99, 0, 100, 101, 102, 0, 103, - 0, 0, 849, 67, 609, 68, 848, 0, 0, 622, - 223, 612, 613, 614, 615, 616, 617, 618, 619, 620, - 621, 0, 69, 225, 227, 226, 0, 0, 0, 247, - 849, 251, 294, 275, 0, 0, 295, 296, 285, 0, - 566, 567, 0, 559, 25, 0, 604, 605, 550, 551, - 344, 419, 421, 423, 0, 331, 410, 431, 414, 0, - 411, 0, 0, 405, 471, 0, 0, 438, -2, 474, - 475, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 556, 0, 534, 0, 0, 494, - 505, 506, 507, 508, 581, 0, 0, -2, 0, 0, - 395, 589, 0, 347, 366, 368, 0, 363, 378, 379, - 381, 0, 383, 0, 385, 386, 351, 353, 354, 0, - 0, 0, 0, 374, 556, 0, 395, 40, 52, 53, - 0, 0, 59, 165, 166, 0, 212, 0, 0, 0, - 183, 190, 190, 186, 191, 187, 0, 156, 0, 147, - 86, 144, 0, 164, 164, 113, 0, 114, 115, 116, - 0, 132, 0, 0, 0, 0, 631, 66, 217, 848, - 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, - 240, 848, 0, 848, 623, 624, 625, 626, 0, 72, - 0, 0, 0, 0, 250, 297, 298, 299, 570, 0, - 26, 395, 0, 338, 540, 0, 412, 0, 432, 415, - 472, 334, 0, 134, 134, 519, 134, 138, 522, 134, - 524, 134, 527, 0, 0, 0, 0, 0, 0, 539, - 0, 0, 0, 0, 0, 531, 493, 537, 0, 33, - 0, 581, 571, 583, 585, 0, 29, 0, 577, 0, - 358, 556, 0, 0, 360, 367, 0, 0, 361, 0, - 362, 382, 384, 0, 0, 0, 0, 564, 396, 39, - 56, 57, 58, 210, 213, 0, 192, 134, 195, 184, - 185, 0, 159, 0, 149, 150, 151, 152, 153, 155, - 135, 109, 110, 162, 163, 161, 0, 161, 0, 139, - 0, 849, 218, 219, 220, 221, 0, 224, 0, 70, - 71, 0, 229, 248, 274, 552, 345, 473, 416, 476, - 516, 161, 520, 521, 523, 525, 526, 528, 478, 477, - 479, 0, 0, 485, 0, 0, 482, 0, 0, 488, - 0, 0, 0, 0, 0, 535, 0, 34, 0, 586, - -2, 0, 0, 0, 46, 0, 564, 590, 591, 364, - 0, 369, 0, 0, 0, 372, 38, 175, 0, 194, - 0, 356, 167, 160, 0, 0, 164, 133, 164, 0, - 0, 64, 0, 73, 74, 0, 0, 554, 0, 517, - 518, 0, 0, 0, 0, 0, 0, 0, 0, 509, - 492, 532, 0, 584, 0, -2, 0, 579, 578, 359, - 37, 0, 0, 0, 0, 391, 174, 176, 0, 181, - 0, 193, 0, 0, 172, 0, 169, 171, 157, 154, - 122, 123, 137, 140, 0, 0, 0, 0, 28, 0, - 0, 480, 481, 486, 487, 483, 484, 489, 490, 0, - 0, 0, 0, 574, 29, 0, 365, 392, 393, 394, - 355, 177, 178, 0, 182, 180, 0, 357, 85, 0, - 168, 170, 0, 0, 242, 0, 75, 76, 69, 555, - 553, 491, 0, 0, 0, 582, -2, 580, 179, 0, - 173, 158, 241, 0, 0, 72, 510, 0, 513, 0, - 243, 0, 228, 511, 0, 0, 0, 197, 0, 0, - 198, 199, 0, 0, 512, 200, 0, 0, 0, 0, - 0, 201, 203, 204, 0, 0, 202, 244, 245, 205, - 206, 207, + 0, 0, 0, 0, 419, 420, 421, 422, 423, 424, + 425, 398, 0, 412, 0, 0, 0, 454, 455, 456, + 457, 458, 459, 460, 0, 326, 29, 0, 432, 0, + 0, 0, 0, 0, 0, 0, 0, 322, 0, 534, + 0, 490, 498, 0, 491, 499, 492, 500, 493, 0, + 494, 501, 495, 502, 496, 497, 503, 0, 0, 0, + 326, 0, 0, 45, 0, 383, 0, 341, 343, 344, + 345, -2, 0, 367, -2, 0, 0, 0, 41, 42, + 0, 48, 830, 50, 51, 0, 0, 0, 159, 605, + 606, 607, 603, 203, 0, 0, 147, 143, 87, 88, + 89, 136, 91, 136, 136, 136, 136, 156, 156, 156, + 156, 119, 120, 121, 122, 123, 0, 0, 106, 136, + 136, 136, 110, 126, 127, 128, 129, 130, 131, 132, + 133, 92, 93, 94, 95, 96, 97, 98, 99, 100, + 138, 138, 138, 140, 140, 634, 65, 0, 856, 0, + 856, 77, 0, 217, 0, 0, 0, 0, 0, 0, + 0, 244, 611, 0, 856, 247, 248, 385, 642, 643, + 252, 253, 255, 256, 257, 258, 259, 260, 267, 271, + 0, 292, 0, 0, 273, 0, 573, 0, 0, 0, + 0, 0, 564, 566, 567, 572, 32, 322, 0, 553, + 0, 0, 0, 325, 27, 393, 394, 396, 413, 0, + 415, 417, 336, 332, 0, 543, -2, 403, 404, 428, + 429, 430, 0, 0, 0, 0, 426, 408, 0, 439, + 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, + 450, 453, 518, 519, 0, 451, 452, 461, 0, 0, + 327, 328, 431, 0, 591, 29, 0, 0, 0, 0, + 0, 0, 542, 0, 0, 0, 0, 0, 540, 537, + 0, 0, 507, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 382, 0, 0, 0, 0, 0, 0, 372, + 0, 0, 375, 0, 0, 0, 0, 366, 0, 0, + 386, 799, 368, 0, 370, 371, 390, 0, 390, 44, + 597, 49, 0, 0, 54, 55, 598, 599, 600, 601, + 0, 78, 204, 206, 209, 210, 211, 82, 83, 84, + 0, 0, 191, 0, 0, 185, 185, 0, 183, 184, + 80, 150, 148, 0, 145, 144, 90, 0, 156, 156, + 113, 114, 159, 0, 159, 159, 159, 0, 0, 107, + 108, 109, 101, 0, 102, 103, 104, 0, 105, 0, + 0, 856, 67, 613, 68, 855, 0, 0, 626, 218, + 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, + 0, 69, 220, 222, 221, 0, 0, 0, 242, 856, + 246, 289, 270, 0, 0, 290, 291, 280, 0, 570, + 571, 0, 563, 25, 0, 608, 609, 554, 555, 339, + 414, 416, 418, 0, 326, 405, 426, 409, 0, 406, + 0, 0, 400, 466, 0, 0, 433, -2, 469, 470, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 560, 0, 538, 0, 0, 489, 508, + 0, 0, 509, 510, 511, 512, 585, 0, 0, -2, + 0, 0, 390, 593, 0, 342, 361, 363, 0, 358, + 373, 374, 376, 0, 378, 0, 380, 381, 346, 348, + 349, 0, 0, 0, 0, 369, 560, 0, 390, 40, + 52, 53, 0, 0, 59, 160, 161, 0, 207, 0, + 0, 0, 178, 185, 185, 181, 186, 182, 0, 152, + 0, 149, 86, 146, 0, 159, 159, 115, 0, 116, + 117, 118, 0, 134, 0, 0, 0, 0, 635, 66, + 212, 855, 225, 226, 227, 228, 229, 230, 231, 232, + 233, 234, 235, 855, 0, 855, 627, 628, 629, 630, + 0, 72, 0, 0, 0, 0, 245, 292, 293, 294, + 574, 0, 26, 390, 0, 333, 544, 0, 407, 0, + 427, 410, 467, 329, 0, 136, 136, 523, 136, 140, + 526, 136, 528, 136, 531, 0, 0, 0, 0, 0, + 0, 543, 0, 0, 0, 0, 0, 535, 488, 541, + 0, 0, 0, 33, 0, 585, 575, 587, 589, 0, + 29, 0, 581, 0, 353, 560, 0, 0, 355, 362, + 0, 0, 356, 0, 357, 377, 379, 0, 0, 0, + 0, 568, 391, 39, 56, 57, 58, 205, 208, 0, + 187, 136, 190, 179, 180, 0, 154, 0, 151, 137, + 111, 112, 157, 158, 156, 0, 156, 0, 141, 0, + 856, 213, 214, 215, 216, 0, 219, 0, 70, 71, + 0, 224, 243, 269, 556, 340, 468, 411, 471, 520, + 156, 524, 525, 527, 529, 530, 532, 473, 472, 474, + 0, 0, 480, 0, 0, 477, 0, 0, 483, 0, + 0, 0, 0, 0, 539, 0, 0, 0, 34, 0, + 590, -2, 0, 0, 0, 46, 0, 568, 594, 595, + 359, 0, 364, 0, 0, 0, 367, 38, 170, 0, + 189, 0, 351, 162, 155, 0, 159, 135, 159, 0, + 0, 64, 0, 73, 74, 0, 0, 558, 0, 521, + 522, 0, 0, 0, 0, 0, 0, 0, 0, 513, + 487, 536, 0, 0, 0, 588, 0, -2, 0, 583, + 582, 354, 37, 0, 0, 0, 0, 386, 169, 171, + 0, 176, 0, 188, 0, 0, 167, 0, 164, 166, + 153, 124, 125, 139, 142, 0, 0, 0, 0, 28, + 0, 0, 475, 476, 481, 482, 478, 479, 484, 485, + 0, 0, 0, 504, 505, 0, 578, 29, 0, 360, + 387, 388, 389, 350, 172, 173, 0, 177, 175, 0, + 352, 85, 0, 163, 165, 0, 237, 0, 75, 76, + 69, 559, 557, 486, 0, 0, 0, 586, -2, 584, + 174, 0, 168, 236, 0, 0, 72, 514, 0, 517, + 0, 238, 0, 223, 515, 0, 0, 0, 192, 0, + 0, 193, 194, 0, 0, 516, 195, 0, 0, 0, + 0, 0, 196, 198, 199, 0, 0, 197, 239, 240, + 200, 201, 202, } var yyTok1 = [...]int{ @@ -2571,7 +2645,7 @@ var yyTok1 = [...]int{ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 75, 3, 3, 3, 102, 94, 3, 54, 56, 99, 97, 55, 98, 112, 100, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 266, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 268, 83, 82, 84, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, @@ -2606,7 +2680,7 @@ var yyTok2 = [...]int{ 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, - 259, 260, 261, 262, 263, 264, 265, + 259, 260, 261, 262, 263, 264, 265, 266, 267, } var yyTok3 = [...]int{ 0, @@ -2951,35 +3025,35 @@ yydefault: case 1: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:313 +//line sql.y:316 { setParseTree(yylex, yyDollar[1].statement) } case 2: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:318 +//line sql.y:321 { } case 3: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:319 +//line sql.y:322 { } case 4: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:323 +//line sql.y:326 { yyVAL.statement = yyDollar[1].selStmt } case 23: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:345 +//line sql.y:348 { setParseTree(yylex, nil) } case 24: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:351 +//line sql.y:354 { sel := yyDollar[1].selStmt.(*Select) sel.OrderBy = yyDollar[2].orderBy @@ -2989,55 +3063,55 @@ yydefault: } case 25: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:359 +//line sql.y:362 { yyVAL.selStmt = &Union{Type: yyDollar[2].str, Left: yyDollar[1].selStmt, Right: yyDollar[3].selStmt, OrderBy: yyDollar[4].orderBy, Limit: yyDollar[5].limit, Lock: yyDollar[6].str} } case 26: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:363 +//line sql.y:366 { yyVAL.selStmt = &Select{Comments: Comments(yyDollar[2].bytes2), Cache: yyDollar[3].str, SelectExprs: SelectExprs{Nextval{Expr: yyDollar[5].expr}}, From: TableExprs{&AliasedTableExpr{Expr: yyDollar[7].tableName}}} } case 27: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:369 +//line sql.y:372 { yyVAL.statement = &Stream{Comments: Comments(yyDollar[2].bytes2), SelectExpr: yyDollar[3].selectExpr, Table: yyDollar[5].tableName} } case 28: yyDollar = yyS[yypt-10 : yypt+1] -//line sql.y:376 +//line sql.y:379 { yyVAL.selStmt = &Select{Comments: Comments(yyDollar[2].bytes2), Cache: yyDollar[3].str, Distinct: yyDollar[4].str, Hints: yyDollar[5].str, SelectExprs: yyDollar[6].selectExprs, From: yyDollar[7].tableExprs, Where: NewWhere(WhereStr, yyDollar[8].expr), GroupBy: GroupBy(yyDollar[9].exprs), Having: NewWhere(HavingStr, yyDollar[10].expr)} } case 29: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:382 +//line sql.y:385 { yyVAL.selStmt = yyDollar[1].selStmt } case 30: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:386 +//line sql.y:389 { yyVAL.selStmt = &ParenSelect{Select: yyDollar[2].selStmt} } case 31: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:392 +//line sql.y:395 { yyVAL.selStmt = yyDollar[1].selStmt } case 32: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:396 +//line sql.y:399 { yyVAL.selStmt = &ParenSelect{Select: yyDollar[2].selStmt} } case 33: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:403 +//line sql.y:406 { // insert_data returns a *Insert pre-filled with Columns & Values ins := yyDollar[6].ins @@ -3051,7 +3125,7 @@ yydefault: } case 34: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:415 +//line sql.y:418 { cols := make(Columns, 0, len(yyDollar[7].updateExprs)) vals := make(ValTuple, 0, len(yyDollar[8].updateExprs)) @@ -3063,174 +3137,174 @@ yydefault: } case 35: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:427 +//line sql.y:430 { yyVAL.str = InsertStr } case 36: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:431 +//line sql.y:434 { yyVAL.str = ReplaceStr } case 37: yyDollar = yyS[yypt-9 : yypt+1] -//line sql.y:437 +//line sql.y:440 { yyVAL.statement = &Update{Comments: Comments(yyDollar[2].bytes2), Ignore: yyDollar[3].str, TableExprs: yyDollar[4].tableExprs, Exprs: yyDollar[6].updateExprs, Where: NewWhere(WhereStr, yyDollar[7].expr), OrderBy: yyDollar[8].orderBy, Limit: yyDollar[9].limit} } case 38: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:443 +//line sql.y:446 { yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), TableExprs: TableExprs{&AliasedTableExpr{Expr: yyDollar[4].tableName}}, Partitions: yyDollar[5].partitions, Where: NewWhere(WhereStr, yyDollar[6].expr), OrderBy: yyDollar[7].orderBy, Limit: yyDollar[8].limit} } case 39: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:447 +//line sql.y:450 { yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), Targets: yyDollar[4].tableNames, TableExprs: yyDollar[6].tableExprs, Where: NewWhere(WhereStr, yyDollar[7].expr)} } case 40: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:451 +//line sql.y:454 { yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), Targets: yyDollar[3].tableNames, TableExprs: yyDollar[5].tableExprs, Where: NewWhere(WhereStr, yyDollar[6].expr)} } case 41: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:456 +//line sql.y:459 { } case 42: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:457 +//line sql.y:460 { } case 43: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:461 +//line sql.y:464 { yyVAL.tableNames = TableNames{yyDollar[1].tableName} } case 44: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:465 +//line sql.y:468 { yyVAL.tableNames = append(yyVAL.tableNames, yyDollar[3].tableName) } case 45: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:470 +//line sql.y:473 { yyVAL.partitions = nil } case 46: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:474 +//line sql.y:477 { yyVAL.partitions = yyDollar[3].partitions } case 47: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:480 +//line sql.y:483 { yyVAL.statement = &Set{Comments: Comments(yyDollar[2].bytes2), Exprs: yyDollar[3].setExprs} } case 48: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:484 +//line sql.y:487 { yyVAL.statement = &Set{Comments: Comments(yyDollar[2].bytes2), Scope: yyDollar[3].str, Exprs: yyDollar[4].setExprs} } case 49: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:488 +//line sql.y:491 { yyVAL.statement = &Set{Comments: Comments(yyDollar[2].bytes2), Scope: yyDollar[3].str, Exprs: yyDollar[5].setExprs} } case 50: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:492 +//line sql.y:495 { yyVAL.statement = &Set{Comments: Comments(yyDollar[2].bytes2), Exprs: yyDollar[4].setExprs} } case 51: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:498 +//line sql.y:501 { yyVAL.setExprs = SetExprs{yyDollar[1].setExpr} } case 52: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:502 +//line sql.y:505 { yyVAL.setExprs = append(yyVAL.setExprs, yyDollar[3].setExpr) } case 53: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:508 +//line sql.y:511 { yyVAL.setExpr = &SetExpr{Name: NewColIdent(TransactionStr), Expr: NewStrVal([]byte(yyDollar[3].str))} } case 54: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:512 +//line sql.y:515 { yyVAL.setExpr = &SetExpr{Name: NewColIdent(TransactionStr), Expr: NewStrVal([]byte(TxReadWrite))} } case 55: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:516 +//line sql.y:519 { yyVAL.setExpr = &SetExpr{Name: NewColIdent(TransactionStr), Expr: NewStrVal([]byte(TxReadOnly))} } case 56: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:522 +//line sql.y:525 { yyVAL.str = IsolationLevelRepeatableRead } case 57: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:526 +//line sql.y:529 { yyVAL.str = IsolationLevelReadCommitted } case 58: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:530 +//line sql.y:533 { yyVAL.str = IsolationLevelReadUncommitted } case 59: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:534 +//line sql.y:537 { yyVAL.str = IsolationLevelSerializable } case 60: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:540 +//line sql.y:543 { yyVAL.str = SessionStr } case 61: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:544 +//line sql.y:547 { yyVAL.str = GlobalStr } case 62: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:550 +//line sql.y:553 { yyDollar[1].ddl.TableSpec = yyDollar[2].TableSpec yyVAL.statement = yyDollar[1].ddl } case 63: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:555 +//line sql.y:558 { // Create table [name] like [name] yyDollar[1].ddl.OptLike = yyDollar[2].optLike @@ -3238,151 +3312,151 @@ yydefault: } case 64: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:561 +//line sql.y:564 { // Change this to an alter statement yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[7].tableName} } case 65: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:566 +//line sql.y:569 { yyVAL.statement = &DDL{Action: CreateStr, Table: yyDollar[3].tableName.ToViewName()} } case 66: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:570 +//line sql.y:573 { yyVAL.statement = &DDL{Action: CreateStr, Table: yyDollar[5].tableName.ToViewName()} } case 67: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:574 +//line sql.y:577 { yyVAL.statement = &DBDDL{Action: CreateStr, DBName: string(yyDollar[4].bytes)} } case 68: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:578 +//line sql.y:581 { yyVAL.statement = &DBDDL{Action: CreateStr, DBName: string(yyDollar[4].bytes)} } case 69: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:583 +//line sql.y:586 { yyVAL.colIdent = NewColIdent("") } case 70: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:587 +//line sql.y:590 { yyVAL.colIdent = yyDollar[2].colIdent } case 71: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:593 +//line sql.y:596 { yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) } case 72: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:598 +//line sql.y:601 { var v []VindexParam yyVAL.vindexParams = v } case 73: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:603 +//line sql.y:606 { yyVAL.vindexParams = yyDollar[2].vindexParams } case 74: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:609 +//line sql.y:612 { yyVAL.vindexParams = make([]VindexParam, 0, 4) yyVAL.vindexParams = append(yyVAL.vindexParams, yyDollar[1].vindexParam) } case 75: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:614 +//line sql.y:617 { yyVAL.vindexParams = append(yyVAL.vindexParams, yyDollar[3].vindexParam) } case 76: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:620 +//line sql.y:623 { yyVAL.vindexParam = VindexParam{Key: yyDollar[1].colIdent, Val: yyDollar[3].str} } case 77: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:626 +//line sql.y:629 { yyVAL.ddl = &DDL{Action: CreateStr, Table: yyDollar[4].tableName} setDDL(yylex, yyVAL.ddl) } case 78: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:633 +//line sql.y:636 { yyVAL.TableSpec = yyDollar[2].TableSpec yyVAL.TableSpec.Options = yyDollar[4].str } case 79: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:640 +//line sql.y:643 { yyVAL.optLike = &OptLike{LikeTable: yyDollar[2].tableName} } case 80: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:644 +//line sql.y:647 { yyVAL.optLike = &OptLike{LikeTable: yyDollar[3].tableName} } case 81: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:650 +//line sql.y:653 { yyVAL.TableSpec = &TableSpec{} yyVAL.TableSpec.AddColumn(yyDollar[1].columnDefinition) } case 82: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:655 +//line sql.y:658 { yyVAL.TableSpec.AddColumn(yyDollar[3].columnDefinition) } case 83: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:659 +//line sql.y:662 { yyVAL.TableSpec.AddIndex(yyDollar[3].indexDefinition) } case 84: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:663 +//line sql.y:666 { yyVAL.TableSpec.AddConstraint(yyDollar[3].constraintDefinition) } case 85: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:669 +//line sql.y:672 { yyDollar[2].columnType.NotNull = yyDollar[3].boolVal yyDollar[2].columnType.Default = yyDollar[4].optVal yyDollar[2].columnType.OnUpdate = yyDollar[5].optVal yyDollar[2].columnType.Autoincrement = yyDollar[6].boolVal yyDollar[2].columnType.KeyOpt = yyDollar[7].colKeyOpt - yyDollar[2].columnType.Comment = yyDollar[8].optVal + yyDollar[2].columnType.Comment = yyDollar[8].sqlVal yyVAL.columnDefinition = &ColumnDefinition{Name: NewColIdent(string(yyDollar[1].bytes)), Type: yyDollar[2].columnType} } case 86: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:680 +//line sql.y:683 { yyVAL.columnType = yyDollar[1].columnType yyVAL.columnType.Unsigned = yyDollar[2].boolVal @@ -3390,78 +3464,74 @@ yydefault: } case 90: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:691 +//line sql.y:694 { yyVAL.columnType = yyDollar[1].columnType - yyVAL.columnType.Length = yyDollar[2].optVal + yyVAL.columnType.Length = yyDollar[2].sqlVal } case 91: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:696 +//line sql.y:699 { yyVAL.columnType = yyDollar[1].columnType } case 92: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:702 +//line sql.y:705 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 93: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:706 +//line sql.y:709 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 94: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:710 +//line sql.y:713 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 95: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:714 +//line sql.y:717 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 96: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:718 +//line sql.y:721 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 97: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:722 +//line sql.y:725 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 98: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:726 +//line sql.y:729 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 99: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:732 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:733 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} - yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length - yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } case 100: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:738 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:737 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} - yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length - yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } case 101: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:744 +//line sql.y:743 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length @@ -3469,7 +3539,7 @@ yydefault: } case 102: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:750 +//line sql.y:749 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length @@ -3477,747 +3547,721 @@ yydefault: } case 103: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:756 +//line sql.y:755 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } case 104: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:764 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:761 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } case 105: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:768 +//line sql.y:767 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } case 106: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:772 - { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} - } - case 107: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:776 - { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} - } - case 108: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:780 +//line sql.y:775 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } - case 109: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:786 + case 107: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:779 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal, Charset: yyDollar[3].str, Collate: yyDollar[4].str} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} + } + case 108: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:783 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} + } + case 109: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:787 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} } case 110: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:790 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:791 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal, Charset: yyDollar[3].str, Collate: yyDollar[4].str} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 111: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:794 + yyDollar = yyS[yypt-4 : yypt+1] +//line sql.y:797 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal, Charset: yyDollar[3].str, Collate: yyDollar[4].str} } case 112: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:798 + yyDollar = yyS[yypt-4 : yypt+1] +//line sql.y:801 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal, Charset: yyDollar[3].str, Collate: yyDollar[4].str} } case 113: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:802 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:805 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} } case 114: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:806 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:809 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} } case 115: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:810 +//line sql.y:813 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} } case 116: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:814 +//line sql.y:817 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} } case 117: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:818 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:821 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} } case 118: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:822 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:825 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} } case 119: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:826 +//line sql.y:829 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 120: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:830 +//line sql.y:833 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 121: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:834 +//line sql.y:837 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 122: - yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:838 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:841 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].str, Collate: yyDollar[6].str} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 123: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:845 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 124: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:843 +//line sql.y:849 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].str, Collate: yyDollar[6].str} } - case 124: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:849 - { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} - } case 125: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:853 + yyDollar = yyS[yypt-6 : yypt+1] +//line sql.y:854 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].str, Collate: yyDollar[6].str} } case 126: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:857 +//line sql.y:860 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 127: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:861 +//line sql.y:864 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 128: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:865 +//line sql.y:868 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 129: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:869 +//line sql.y:872 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 130: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:873 +//line sql.y:876 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 131: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:877 +//line sql.y:880 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } case 132: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:883 +//line sql.y:884 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 133: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:888 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 134: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:894 { yyVAL.strs = make([]string, 0, 4) yyVAL.strs = append(yyVAL.strs, "'"+string(yyDollar[1].bytes)+"'") } - case 133: + case 135: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:888 +//line sql.y:899 { yyVAL.strs = append(yyDollar[1].strs, "'"+string(yyDollar[3].bytes)+"'") } - case 134: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:893 - { - yyVAL.optVal = nil - } - case 135: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:897 - { - yyVAL.optVal = NewIntVal(yyDollar[2].bytes) - } case 136: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:902 +//line sql.y:904 { - yyVAL.LengthScaleOption = LengthScaleOption{} + yyVAL.sqlVal = nil } case 137: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:906 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:908 { - yyVAL.LengthScaleOption = LengthScaleOption{ - Length: NewIntVal(yyDollar[2].bytes), - Scale: NewIntVal(yyDollar[4].bytes), - } + yyVAL.sqlVal = NewIntVal(yyDollar[2].bytes) } case 138: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:914 +//line sql.y:913 { yyVAL.LengthScaleOption = LengthScaleOption{} } case 139: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:918 - { - yyVAL.LengthScaleOption = LengthScaleOption{ - Length: NewIntVal(yyDollar[2].bytes), - } - } - case 140: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:924 +//line sql.y:917 { yyVAL.LengthScaleOption = LengthScaleOption{ Length: NewIntVal(yyDollar[2].bytes), Scale: NewIntVal(yyDollar[4].bytes), } } - case 141: + case 140: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:932 +//line sql.y:925 { - yyVAL.boolVal = BoolVal(false) + yyVAL.LengthScaleOption = LengthScaleOption{} + } + case 141: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:929 + { + yyVAL.LengthScaleOption = LengthScaleOption{ + Length: NewIntVal(yyDollar[2].bytes), + } } case 142: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:936 + yyDollar = yyS[yypt-5 : yypt+1] +//line sql.y:935 { - yyVAL.boolVal = BoolVal(true) + yyVAL.LengthScaleOption = LengthScaleOption{ + Length: NewIntVal(yyDollar[2].bytes), + Scale: NewIntVal(yyDollar[4].bytes), + } } case 143: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:941 +//line sql.y:943 { yyVAL.boolVal = BoolVal(false) } case 144: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:945 +//line sql.y:947 { yyVAL.boolVal = BoolVal(true) } case 145: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:951 +//line sql.y:952 { yyVAL.boolVal = BoolVal(false) } case 146: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:955 - { - yyVAL.boolVal = BoolVal(false) - } - case 147: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:959 +//line sql.y:956 { yyVAL.boolVal = BoolVal(true) } - case 148: + case 147: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:964 +//line sql.y:962 { - yyVAL.optVal = nil + yyVAL.boolVal = BoolVal(false) + } + case 148: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:966 + { + yyVAL.boolVal = BoolVal(false) } case 149: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:968 - { - yyVAL.optVal = NewStrVal(yyDollar[2].bytes) - } - case 150: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:972 - { - yyVAL.optVal = NewIntVal(yyDollar[2].bytes) - } - case 151: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:976 - { - yyVAL.optVal = NewFloatVal(yyDollar[2].bytes) - } - case 152: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:980 - { - yyVAL.optVal = NewValArg(yyDollar[2].bytes) - } - case 153: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:984 - { - yyVAL.optVal = NewValArg(yyDollar[2].bytes) - } - case 154: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:988 - { - yyVAL.optVal = NewValArg(yyDollar[2].bytes) - } - case 155: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:992 - { - yyVAL.optVal = NewBitVal(yyDollar[2].bytes) - } - case 156: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:997 - { - yyVAL.optVal = nil - } - case 157: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1001 - { - yyVAL.optVal = NewValArg(yyDollar[3].bytes) - } - case 158: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1005 - { - yyVAL.optVal = NewValArg(yyDollar[3].bytes) - } - case 159: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1010 - { - yyVAL.boolVal = BoolVal(false) - } - case 160: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1014 +//line sql.y:970 { yyVAL.boolVal = BoolVal(true) } - case 161: + case 150: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1019 - { - yyVAL.str = "" - } - case 162: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1023 - { - yyVAL.str = string(yyDollar[3].bytes) - } - case 163: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1027 - { - yyVAL.str = string(yyDollar[3].bytes) - } - case 164: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1032 - { - yyVAL.str = "" - } - case 165: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1036 - { - yyVAL.str = string(yyDollar[2].bytes) - } - case 166: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1040 - { - yyVAL.str = string(yyDollar[2].bytes) - } - case 167: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1045 - { - yyVAL.colKeyOpt = colKeyNone - } - case 168: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1049 - { - yyVAL.colKeyOpt = colKeyPrimary - } - case 169: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1053 - { - yyVAL.colKeyOpt = colKey - } - case 170: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1057 - { - yyVAL.colKeyOpt = colKeyUniqueKey - } - case 171: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1061 - { - yyVAL.colKeyOpt = colKeyUnique - } - case 172: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1066 +//line sql.y:975 { yyVAL.optVal = nil } - case 173: + case 151: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1070 +//line sql.y:979 { - yyVAL.optVal = NewStrVal(yyDollar[2].bytes) + yyVAL.optVal = yyDollar[2].expr } - case 174: + case 152: + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:984 + { + yyVAL.optVal = nil + } + case 153: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:988 + { + yyVAL.optVal = yyDollar[3].expr + } + case 154: + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:993 + { + yyVAL.boolVal = BoolVal(false) + } + case 155: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:997 + { + yyVAL.boolVal = BoolVal(true) + } + case 156: + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:1002 + { + yyVAL.str = "" + } + case 157: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1006 + { + yyVAL.str = string(yyDollar[3].bytes) + } + case 158: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1010 + { + yyVAL.str = string(yyDollar[3].bytes) + } + case 159: + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:1015 + { + yyVAL.str = "" + } + case 160: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1019 + { + yyVAL.str = string(yyDollar[2].bytes) + } + case 161: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1023 + { + yyVAL.str = string(yyDollar[2].bytes) + } + case 162: + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:1028 + { + yyVAL.colKeyOpt = colKeyNone + } + case 163: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1032 + { + yyVAL.colKeyOpt = colKeyPrimary + } + case 164: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1036 + { + yyVAL.colKeyOpt = colKey + } + case 165: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1040 + { + yyVAL.colKeyOpt = colKeyUniqueKey + } + case 166: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1044 + { + yyVAL.colKeyOpt = colKeyUnique + } + case 167: + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:1049 + { + yyVAL.sqlVal = nil + } + case 168: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1053 + { + yyVAL.sqlVal = NewStrVal(yyDollar[2].bytes) + } + case 169: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1076 +//line sql.y:1059 { yyVAL.indexDefinition = &IndexDefinition{Info: yyDollar[1].indexInfo, Columns: yyDollar[3].indexColumns, Options: yyDollar[5].indexOptions} } - case 175: + case 170: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1080 +//line sql.y:1063 { yyVAL.indexDefinition = &IndexDefinition{Info: yyDollar[1].indexInfo, Columns: yyDollar[3].indexColumns} } - case 176: + case 171: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1086 +//line sql.y:1069 { yyVAL.indexOptions = []*IndexOption{yyDollar[1].indexOption} } - case 177: + case 172: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1090 +//line sql.y:1073 { yyVAL.indexOptions = append(yyVAL.indexOptions, yyDollar[2].indexOption) } - case 178: + case 173: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1096 +//line sql.y:1079 { yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), Using: string(yyDollar[2].bytes)} } - case 179: + case 174: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1100 +//line sql.y:1083 { // should not be string yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), Value: NewIntVal(yyDollar[3].bytes)} } - case 180: + case 175: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1105 +//line sql.y:1088 { yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), Value: NewStrVal(yyDollar[2].bytes)} } - case 181: + case 176: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1111 +//line sql.y:1094 { yyVAL.str = "" } - case 182: + case 177: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1115 +//line sql.y:1098 { yyVAL.str = string(yyDollar[1].bytes) } - case 183: + case 178: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1121 +//line sql.y:1104 { yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].bytes), Name: NewColIdent("PRIMARY"), Primary: true, Unique: true} } - case 184: + case 179: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1125 +//line sql.y:1108 { yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].str), Name: NewColIdent(yyDollar[3].str), Spatial: true, Unique: false} } - case 185: + case 180: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1129 +//line sql.y:1112 { yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].str), Name: NewColIdent(yyDollar[3].str), Unique: true} } - case 186: + case 181: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1133 +//line sql.y:1116 { yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes), Name: NewColIdent(yyDollar[2].str), Unique: true} } - case 187: + case 182: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1137 +//line sql.y:1120 { yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].str), Name: NewColIdent(yyDollar[2].str), Unique: false} } - case 188: + case 183: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1143 +//line sql.y:1126 { yyVAL.str = string(yyDollar[1].bytes) } - case 189: + case 184: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1147 +//line sql.y:1130 { yyVAL.str = string(yyDollar[1].bytes) } - case 190: + case 185: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1152 +//line sql.y:1135 { yyVAL.str = "" } - case 191: + case 186: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1156 +//line sql.y:1139 { yyVAL.str = string(yyDollar[1].bytes) } - case 192: + case 187: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1162 +//line sql.y:1145 { yyVAL.indexColumns = []*IndexColumn{yyDollar[1].indexColumn} } - case 193: + case 188: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1166 +//line sql.y:1149 { yyVAL.indexColumns = append(yyVAL.indexColumns, yyDollar[3].indexColumn) } - case 194: + case 189: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1172 +//line sql.y:1155 { - yyVAL.indexColumn = &IndexColumn{Column: yyDollar[1].colIdent, Length: yyDollar[2].optVal} + yyVAL.indexColumn = &IndexColumn{Column: yyDollar[1].colIdent, Length: yyDollar[2].sqlVal} } - case 195: + case 190: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1178 +//line sql.y:1161 { yyVAL.constraintDefinition = &ConstraintDefinition{Name: string(yyDollar[2].bytes), Details: yyDollar[3].constraintInfo} } - case 196: + case 191: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1182 +//line sql.y:1165 { yyVAL.constraintDefinition = &ConstraintDefinition{Details: yyDollar[1].constraintInfo} } - case 197: + case 192: yyDollar = yyS[yypt-10 : yypt+1] -//line sql.y:1189 +//line sql.y:1172 { yyVAL.constraintInfo = &ForeignKeyDefinition{Source: yyDollar[4].columns, ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columns} } - case 198: + case 193: yyDollar = yyS[yypt-11 : yypt+1] -//line sql.y:1193 +//line sql.y:1176 { yyVAL.constraintInfo = &ForeignKeyDefinition{Source: yyDollar[4].columns, ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columns, OnDelete: yyDollar[11].ReferenceAction} } - case 199: + case 194: yyDollar = yyS[yypt-11 : yypt+1] -//line sql.y:1197 +//line sql.y:1180 { yyVAL.constraintInfo = &ForeignKeyDefinition{Source: yyDollar[4].columns, ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columns, OnUpdate: yyDollar[11].ReferenceAction} } - case 200: + case 195: yyDollar = yyS[yypt-12 : yypt+1] -//line sql.y:1201 +//line sql.y:1184 { yyVAL.constraintInfo = &ForeignKeyDefinition{Source: yyDollar[4].columns, ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columns, OnDelete: yyDollar[11].ReferenceAction, OnUpdate: yyDollar[12].ReferenceAction} } - case 201: + case 196: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1207 +//line sql.y:1190 { yyVAL.ReferenceAction = yyDollar[3].ReferenceAction } - case 202: + case 197: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1213 +//line sql.y:1196 { yyVAL.ReferenceAction = yyDollar[3].ReferenceAction } - case 203: + case 198: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1219 +//line sql.y:1202 { yyVAL.ReferenceAction = Restrict } - case 204: + case 199: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1223 +//line sql.y:1206 { yyVAL.ReferenceAction = Cascade } - case 205: + case 200: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1227 +//line sql.y:1210 { yyVAL.ReferenceAction = NoAction } - case 206: + case 201: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1231 +//line sql.y:1214 { yyVAL.ReferenceAction = SetDefault } - case 207: + case 202: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1235 +//line sql.y:1218 { yyVAL.ReferenceAction = SetNull } - case 208: + case 203: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1240 +//line sql.y:1223 { yyVAL.str = "" } - case 209: + case 204: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1244 +//line sql.y:1227 { yyVAL.str = " " + string(yyDollar[1].str) } - case 210: + case 205: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1248 +//line sql.y:1231 { yyVAL.str = string(yyDollar[1].str) + ", " + string(yyDollar[3].str) } - case 211: + case 206: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1256 +//line sql.y:1239 { yyVAL.str = yyDollar[1].str } - case 212: + case 207: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1260 +//line sql.y:1243 { yyVAL.str = yyDollar[1].str + " " + yyDollar[2].str } - case 213: + case 208: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1264 +//line sql.y:1247 { yyVAL.str = yyDollar[1].str + "=" + yyDollar[3].str } - case 214: + case 209: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1270 +//line sql.y:1253 { yyVAL.str = yyDollar[1].colIdent.String() } - case 215: + case 210: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1274 +//line sql.y:1257 { yyVAL.str = "'" + string(yyDollar[1].bytes) + "'" } - case 216: + case 211: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1278 +//line sql.y:1261 { yyVAL.str = string(yyDollar[1].bytes) } - case 217: + case 212: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:1284 +//line sql.y:1267 { yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName} } - case 218: + case 213: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:1288 +//line sql.y:1271 { yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName} } - case 219: + case 214: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:1292 +//line sql.y:1275 { yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName} } - case 220: + case 215: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:1296 +//line sql.y:1279 { // Change this to a rename statement yyVAL.statement = &DDL{Action: RenameStr, FromTables: TableNames{yyDollar[4].tableName}, ToTables: TableNames{yyDollar[7].tableName}} } - case 221: + case 216: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:1301 +//line sql.y:1284 { // Rename an index can just be an alter yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName} } - case 222: + case 217: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1306 +//line sql.y:1289 { yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[3].tableName.ToViewName()} } - case 223: + case 218: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1310 +//line sql.y:1293 { yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName, PartitionSpec: yyDollar[5].partSpec} } - case 224: + case 219: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:1314 +//line sql.y:1297 { yyVAL.statement = &DDL{Action: CreateVindexStr, VindexSpec: &VindexSpec{ Name: yyDollar[5].colIdent, @@ -4225,29 +4269,29 @@ yydefault: Params: yyDollar[7].vindexParams, }} } - case 225: + case 220: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1322 +//line sql.y:1305 { yyVAL.statement = &DDL{Action: DropVindexStr, VindexSpec: &VindexSpec{ Name: yyDollar[5].colIdent, }} } - case 226: + case 221: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1328 +//line sql.y:1311 { yyVAL.statement = &DDL{Action: AddVschemaTableStr, Table: yyDollar[5].tableName} } - case 227: + case 222: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1332 +//line sql.y:1315 { yyVAL.statement = &DDL{Action: DropVschemaTableStr, Table: yyDollar[5].tableName} } - case 228: + case 223: yyDollar = yyS[yypt-12 : yypt+1] -//line sql.y:1336 +//line sql.y:1319 { yyVAL.statement = &DDL{ Action: AddColVindexStr, @@ -4260,9 +4304,9 @@ yydefault: VindexCols: yyDollar[9].columns, } } - case 229: + case 224: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:1349 +//line sql.y:1332 { yyVAL.statement = &DDL{ Action: DropColVindexStr, @@ -4272,59 +4316,59 @@ yydefault: }, } } - case 241: + case 236: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:1374 +//line sql.y:1357 { yyVAL.partSpec = &PartitionSpec{Action: ReorganizeStr, Name: yyDollar[3].colIdent, Definitions: yyDollar[6].partDefs} } - case 242: + case 237: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1380 +//line sql.y:1363 { yyVAL.partDefs = []*PartitionDefinition{yyDollar[1].partDef} } - case 243: + case 238: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1384 +//line sql.y:1367 { yyVAL.partDefs = append(yyDollar[1].partDefs, yyDollar[3].partDef) } - case 244: + case 239: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:1390 +//line sql.y:1373 { yyVAL.partDef = &PartitionDefinition{Name: yyDollar[2].colIdent, Limit: yyDollar[7].expr} } - case 245: + case 240: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:1394 +//line sql.y:1377 { yyVAL.partDef = &PartitionDefinition{Name: yyDollar[2].colIdent, Maxvalue: true} } - case 246: + case 241: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1400 +//line sql.y:1383 { yyVAL.statement = yyDollar[3].ddl } - case 247: + case 242: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1406 +//line sql.y:1389 { yyVAL.ddl = &DDL{Action: RenameStr, FromTables: TableNames{yyDollar[1].tableName}, ToTables: TableNames{yyDollar[3].tableName}} } - case 248: + case 243: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1410 +//line sql.y:1393 { yyVAL.ddl = yyDollar[1].ddl yyVAL.ddl.FromTables = append(yyVAL.ddl.FromTables, yyDollar[3].tableName) yyVAL.ddl.ToTables = append(yyVAL.ddl.ToTables, yyDollar[5].tableName) } - case 249: + case 244: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1418 +//line sql.y:1401 { var exists bool if yyDollar[3].byt != 0 { @@ -4332,16 +4376,16 @@ yydefault: } yyVAL.statement = &DDL{Action: DropStr, FromTables: yyDollar[4].tableNames, IfExists: exists} } - case 250: + case 245: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:1426 +//line sql.y:1409 { // Change this to an alter statement yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[5].tableName} } - case 251: + case 246: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1431 +//line sql.y:1414 { var exists bool if yyDollar[3].byt != 0 { @@ -4349,148 +4393,148 @@ yydefault: } yyVAL.statement = &DDL{Action: DropStr, FromTables: TableNames{yyDollar[4].tableName.ToViewName()}, IfExists: exists} } - case 252: + case 247: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1439 +//line sql.y:1422 { yyVAL.statement = &DBDDL{Action: DropStr, DBName: string(yyDollar[4].bytes)} } - case 253: + case 248: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1443 +//line sql.y:1426 { yyVAL.statement = &DBDDL{Action: DropStr, DBName: string(yyDollar[4].bytes)} } - case 254: + case 249: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1449 +//line sql.y:1432 { yyVAL.statement = &DDL{Action: TruncateStr, Table: yyDollar[3].tableName} } - case 255: + case 250: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1453 +//line sql.y:1436 { yyVAL.statement = &DDL{Action: TruncateStr, Table: yyDollar[2].tableName} } - case 256: + case 251: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1458 +//line sql.y:1441 { yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[3].tableName} } + case 252: + yyDollar = yyS[yypt-4 : yypt+1] +//line sql.y:1447 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + } + case 253: + yyDollar = yyS[yypt-4 : yypt+1] +//line sql.y:1452 + { + yyVAL.statement = &Show{Type: CharsetStr} + } + case 254: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1456 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 255: + yyDollar = yyS[yypt-4 : yypt+1] +//line sql.y:1460 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + } + case 256: + yyDollar = yyS[yypt-4 : yypt+1] +//line sql.y:1465 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + } case 257: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1464 +//line sql.y:1469 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } case 258: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1469 - { - yyVAL.statement = &Show{Type: CharsetStr} - } - case 259: - yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:1473 { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } - case 260: + case 259: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:1477 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } - case 261: + case 260: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1482 +//line sql.y:1481 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } + case 261: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1485 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } case 262: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1486 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1489 { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } case 263: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1490 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1493 { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } case 264: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1494 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1497 { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } case 265: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1498 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1501 { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } case 266: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1502 +//line sql.y:1505 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } case 267: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1506 + yyDollar = yyS[yypt-4 : yypt+1] +//line sql.y:1509 { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + yyVAL.statement = &Show{Scope: yyDollar[2].str, Type: string(yyDollar[3].bytes)} } case 268: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1510 +//line sql.y:1513 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } case 269: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1514 - { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} - } - case 270: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1518 - { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} - } - case 271: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1522 - { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} - } - case 272: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1526 - { - yyVAL.statement = &Show{Scope: yyDollar[2].str, Type: string(yyDollar[3].bytes)} - } - case 273: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1530 - { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} - } - case 274: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:1534 +//line sql.y:1517 { showTablesOpt := &ShowTablesOpt{Full: yyDollar[2].str, DbName: yyDollar[6].str, Filter: yyDollar[7].showFilter} yyVAL.statement = &Show{Type: string(yyDollar[3].str), ShowTablesOpt: showTablesOpt, OnTable: yyDollar[5].tableName} } - case 275: + case 270: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1539 +//line sql.y:1522 { // this is ugly, but I couldn't find a better way for now if yyDollar[3].str == "processlist" { @@ -4500,650 +4544,650 @@ yydefault: yyVAL.statement = &Show{Type: yyDollar[3].str, ShowTablesOpt: showTablesOpt} } } - case 276: + case 271: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1549 +//line sql.y:1532 { yyVAL.statement = &Show{Scope: yyDollar[2].str, Type: string(yyDollar[3].bytes)} } - case 277: + case 272: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1553 +//line sql.y:1536 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 278: + case 273: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1557 +//line sql.y:1540 { // Cannot dereference $4 directly, or else the parser stackcannot be pooled. See yyParsePooled showCollationFilterOpt := yyDollar[4].expr yyVAL.statement = &Show{Type: string(yyDollar[2].bytes), ShowCollationFilterOpt: &showCollationFilterOpt} } - case 279: + case 274: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1563 +//line sql.y:1546 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 280: + case 275: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1567 +//line sql.y:1550 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } + case 276: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1554 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 277: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1558 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 278: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1562 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + } + case 279: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1566 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + } + case 280: + yyDollar = yyS[yypt-5 : yypt+1] +//line sql.y:1570 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes), OnTable: yyDollar[5].tableName} + } case 281: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1571 +//line sql.y:1574 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } case 282: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1575 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1584 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } case 283: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1579 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1590 { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + yyVAL.str = string(yyDollar[1].bytes) } case 284: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1583 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1594 { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + yyVAL.str = string(yyDollar[1].bytes) } case 285: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1587 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:1600 { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes), OnTable: yyDollar[5].tableName} + yyVAL.str = "" } case 286: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1591 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1604 { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + yyVAL.str = "full " } case 287: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1601 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1610 { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + yyVAL.str = string(yyDollar[1].bytes) } case 288: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1607 +//line sql.y:1614 { yyVAL.str = string(yyDollar[1].bytes) } case 289: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1611 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:1620 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = "" } case 290: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1617 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1624 { - yyVAL.str = "" + yyVAL.str = yyDollar[2].tableIdent.v } case 291: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1621 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1628 { - yyVAL.str = "full " + yyVAL.str = yyDollar[2].tableIdent.v } case 292: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1627 - { - yyVAL.str = string(yyDollar[1].bytes) - } - case 293: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1631 - { - yyVAL.str = string(yyDollar[1].bytes) - } - case 294: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1637 - { - yyVAL.str = "" - } - case 295: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1641 - { - yyVAL.str = yyDollar[2].tableIdent.v - } - case 296: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1645 - { - yyVAL.str = yyDollar[2].tableIdent.v - } - case 297: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1651 +//line sql.y:1634 { yyVAL.showFilter = nil } - case 298: + case 293: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1655 +//line sql.y:1638 { yyVAL.showFilter = &ShowFilter{Like: string(yyDollar[2].bytes)} } - case 299: + case 294: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1659 +//line sql.y:1642 { yyVAL.showFilter = &ShowFilter{Filter: yyDollar[2].expr} } - case 300: + case 295: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1665 +//line sql.y:1648 { yyVAL.str = "" } - case 301: + case 296: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1669 +//line sql.y:1652 { yyVAL.str = SessionStr } - case 302: + case 297: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1673 +//line sql.y:1656 { yyVAL.str = GlobalStr } - case 303: + case 298: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1679 +//line sql.y:1662 { yyVAL.statement = &Use{DBName: yyDollar[2].tableIdent} } - case 304: + case 299: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1683 +//line sql.y:1666 { yyVAL.statement = &Use{DBName: TableIdent{v: ""}} } - case 305: + case 300: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1689 +//line sql.y:1672 { yyVAL.statement = &Begin{} } - case 306: + case 301: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1693 +//line sql.y:1676 { yyVAL.statement = &Begin{} } - case 307: + case 302: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1699 +//line sql.y:1682 { yyVAL.statement = &Commit{} } - case 308: + case 303: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1705 +//line sql.y:1688 { yyVAL.statement = &Rollback{} } - case 309: + case 304: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1711 +//line sql.y:1694 { yyVAL.statement = &OtherRead{} } - case 310: + case 305: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1715 +//line sql.y:1698 { yyVAL.statement = &OtherRead{} } + case 306: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1702 + { + yyVAL.statement = &OtherRead{} + } + case 307: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1706 + { + yyVAL.statement = &OtherAdmin{} + } + case 308: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1710 + { + yyVAL.statement = &OtherAdmin{} + } + case 309: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1714 + { + yyVAL.statement = &OtherAdmin{} + } + case 310: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1718 + { + yyVAL.statement = &OtherAdmin{} + } case 311: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1719 - { - yyVAL.statement = &OtherRead{} - } - case 312: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1723 - { - yyVAL.statement = &OtherAdmin{} - } - case 313: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1727 - { - yyVAL.statement = &OtherAdmin{} - } - case 314: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1731 - { - yyVAL.statement = &OtherAdmin{} - } - case 315: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1735 - { - yyVAL.statement = &OtherAdmin{} - } - case 316: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1741 +//line sql.y:1724 { yyVAL.statement = &DDL{Action: FlushStr} } - case 317: + case 312: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1745 +//line sql.y:1728 { setAllowComments(yylex, true) } - case 318: + case 313: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1749 +//line sql.y:1732 { yyVAL.bytes2 = yyDollar[2].bytes2 setAllowComments(yylex, false) } - case 319: + case 314: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1755 +//line sql.y:1738 { yyVAL.bytes2 = nil } - case 320: + case 315: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1759 +//line sql.y:1742 { yyVAL.bytes2 = append(yyDollar[1].bytes2, yyDollar[2].bytes) } - case 321: + case 316: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1765 +//line sql.y:1748 { yyVAL.str = UnionStr } - case 322: + case 317: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1769 +//line sql.y:1752 { yyVAL.str = UnionAllStr } - case 323: + case 318: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1773 +//line sql.y:1756 { yyVAL.str = UnionDistinctStr } + case 319: + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:1761 + { + yyVAL.str = "" + } + case 320: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1765 + { + yyVAL.str = SQLNoCacheStr + } + case 321: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1769 + { + yyVAL.str = SQLCacheStr + } + case 322: + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:1774 + { + yyVAL.str = "" + } + case 323: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1778 + { + yyVAL.str = DistinctStr + } case 324: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1778 +//line sql.y:1783 { yyVAL.str = "" } case 325: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1782 - { - yyVAL.str = SQLNoCacheStr - } - case 326: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1786 - { - yyVAL.str = SQLCacheStr - } - case 327: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1791 - { - yyVAL.str = "" - } - case 328: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1795 - { - yyVAL.str = DistinctStr - } - case 329: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1800 - { - yyVAL.str = "" - } - case 330: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1804 +//line sql.y:1787 { yyVAL.str = StraightJoinHint } - case 331: + case 326: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1809 +//line sql.y:1792 { yyVAL.selectExprs = nil } - case 332: + case 327: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1813 +//line sql.y:1796 { yyVAL.selectExprs = yyDollar[1].selectExprs } - case 333: + case 328: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1819 +//line sql.y:1802 { yyVAL.selectExprs = SelectExprs{yyDollar[1].selectExpr} } - case 334: + case 329: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1823 +//line sql.y:1806 { yyVAL.selectExprs = append(yyVAL.selectExprs, yyDollar[3].selectExpr) } - case 335: + case 330: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1829 +//line sql.y:1812 { yyVAL.selectExpr = &StarExpr{} } - case 336: + case 331: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1833 +//line sql.y:1816 { yyVAL.selectExpr = &AliasedExpr{Expr: yyDollar[1].expr, As: yyDollar[2].colIdent} } - case 337: + case 332: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1837 +//line sql.y:1820 { yyVAL.selectExpr = &StarExpr{TableName: TableName{Name: yyDollar[1].tableIdent}} } - case 338: + case 333: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1841 +//line sql.y:1824 { yyVAL.selectExpr = &StarExpr{TableName: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}} } - case 339: + case 334: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1846 +//line sql.y:1829 { yyVAL.colIdent = ColIdent{} } - case 340: + case 335: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1850 +//line sql.y:1833 { yyVAL.colIdent = yyDollar[1].colIdent } - case 341: + case 336: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1854 +//line sql.y:1837 { yyVAL.colIdent = yyDollar[2].colIdent } - case 343: + case 338: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1861 +//line sql.y:1844 { yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) } - case 344: + case 339: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1866 +//line sql.y:1849 { yyVAL.tableExprs = TableExprs{&AliasedTableExpr{Expr: TableName{Name: NewTableIdent("dual")}}} } - case 345: + case 340: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1870 +//line sql.y:1853 { yyVAL.tableExprs = yyDollar[2].tableExprs } - case 346: + case 341: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1876 +//line sql.y:1859 { yyVAL.tableExprs = TableExprs{yyDollar[1].tableExpr} } - case 347: + case 342: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1880 +//line sql.y:1863 { yyVAL.tableExprs = append(yyVAL.tableExprs, yyDollar[3].tableExpr) } - case 350: + case 345: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1890 +//line sql.y:1873 { yyVAL.tableExpr = yyDollar[1].aliasedTableName } - case 351: + case 346: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1894 +//line sql.y:1877 { yyVAL.tableExpr = &AliasedTableExpr{Expr: yyDollar[1].subquery, As: yyDollar[3].tableIdent} } - case 352: + case 347: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1898 +//line sql.y:1881 { // missed alias for subquery yylex.Error("Every derived table must have its own alias") return 1 } - case 353: + case 348: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1904 +//line sql.y:1887 { yyVAL.tableExpr = &ParenTableExpr{Exprs: yyDollar[2].tableExprs} } - case 354: + case 349: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1910 +//line sql.y:1893 { yyVAL.aliasedTableName = &AliasedTableExpr{Expr: yyDollar[1].tableName, As: yyDollar[2].tableIdent, Hints: yyDollar[3].indexHints} } - case 355: + case 350: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:1914 +//line sql.y:1897 { yyVAL.aliasedTableName = &AliasedTableExpr{Expr: yyDollar[1].tableName, Partitions: yyDollar[4].partitions, As: yyDollar[6].tableIdent, Hints: yyDollar[7].indexHints} } - case 356: + case 351: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1920 +//line sql.y:1903 { yyVAL.columns = Columns{yyDollar[1].colIdent} } - case 357: + case 352: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1924 +//line sql.y:1907 { yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent) } - case 358: + case 353: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1930 +//line sql.y:1913 { yyVAL.partitions = Partitions{yyDollar[1].colIdent} } - case 359: + case 354: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1934 +//line sql.y:1917 { yyVAL.partitions = append(yyVAL.partitions, yyDollar[3].colIdent) } - case 360: + case 355: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1947 +//line sql.y:1930 { yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} } - case 361: + case 356: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1951 +//line sql.y:1934 { yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} } - case 362: + case 357: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1955 +//line sql.y:1938 { yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} } - case 363: + case 358: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1959 +//line sql.y:1942 { yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr} } + case 359: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1948 + { + yyVAL.joinCondition = JoinCondition{On: yyDollar[2].expr} + } + case 360: + yyDollar = yyS[yypt-4 : yypt+1] +//line sql.y:1950 + { + yyVAL.joinCondition = JoinCondition{Using: yyDollar[3].columns} + } + case 361: + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:1954 + { + yyVAL.joinCondition = JoinCondition{} + } + case 362: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1956 + { + yyVAL.joinCondition = yyDollar[1].joinCondition + } + case 363: + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:1960 + { + yyVAL.joinCondition = JoinCondition{} + } case 364: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1965 +//line sql.y:1962 { yyVAL.joinCondition = JoinCondition{On: yyDollar[2].expr} } case 365: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1967 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:1965 { - yyVAL.joinCondition = JoinCondition{Using: yyDollar[3].columns} + yyVAL.empty = struct{}{} } case 366: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1971 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1967 { - yyVAL.joinCondition = JoinCondition{} + yyVAL.empty = struct{}{} } case 367: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1973 - { - yyVAL.joinCondition = yyDollar[1].joinCondition - } - case 368: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1977 - { - yyVAL.joinCondition = JoinCondition{} - } - case 369: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1979 - { - yyVAL.joinCondition = JoinCondition{On: yyDollar[2].expr} - } - case 370: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1982 - { - yyVAL.empty = struct{}{} - } - case 371: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1984 - { - yyVAL.empty = struct{}{} - } - case 372: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1987 +//line sql.y:1970 { yyVAL.tableIdent = NewTableIdent("") } - case 373: + case 368: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1991 +//line sql.y:1974 { yyVAL.tableIdent = yyDollar[1].tableIdent } - case 374: + case 369: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1995 +//line sql.y:1978 { yyVAL.tableIdent = yyDollar[2].tableIdent } - case 376: + case 371: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2002 +//line sql.y:1985 { yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) } - case 377: + case 372: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2008 +//line sql.y:1991 { yyVAL.str = JoinStr } - case 378: + case 373: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2012 +//line sql.y:1995 { yyVAL.str = JoinStr } - case 379: + case 374: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2016 +//line sql.y:1999 { yyVAL.str = JoinStr } - case 380: + case 375: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2022 +//line sql.y:2005 { yyVAL.str = StraightJoinStr } - case 381: + case 376: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2028 +//line sql.y:2011 { yyVAL.str = LeftJoinStr } - case 382: + case 377: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2032 +//line sql.y:2015 { yyVAL.str = LeftJoinStr } - case 383: + case 378: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2036 +//line sql.y:2019 { yyVAL.str = RightJoinStr } - case 384: + case 379: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2040 +//line sql.y:2023 { yyVAL.str = RightJoinStr } - case 385: + case 380: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2046 +//line sql.y:2029 { yyVAL.str = NaturalJoinStr } - case 386: + case 381: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2050 +//line sql.y:2033 { if yyDollar[2].str == LeftJoinStr { yyVAL.str = NaturalLeftJoinStr @@ -5151,459 +5195,459 @@ yydefault: yyVAL.str = NaturalRightJoinStr } } - case 387: + case 382: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2060 +//line sql.y:2043 { yyVAL.tableName = yyDollar[2].tableName } - case 388: + case 383: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2064 +//line sql.y:2047 { yyVAL.tableName = yyDollar[1].tableName } - case 389: + case 384: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2070 +//line sql.y:2053 { yyVAL.tableName = TableName{Name: yyDollar[1].tableIdent} } - case 390: + case 385: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2074 +//line sql.y:2057 { yyVAL.tableName = TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent} } - case 391: + case 386: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2079 +//line sql.y:2062 { yyVAL.indexHints = nil } - case 392: + case 387: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2083 +//line sql.y:2066 { yyVAL.indexHints = &IndexHints{Type: UseStr, Indexes: yyDollar[4].columns} } - case 393: + case 388: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2087 +//line sql.y:2070 { yyVAL.indexHints = &IndexHints{Type: IgnoreStr, Indexes: yyDollar[4].columns} } - case 394: + case 389: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2091 +//line sql.y:2074 { yyVAL.indexHints = &IndexHints{Type: ForceStr, Indexes: yyDollar[4].columns} } - case 395: + case 390: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2096 +//line sql.y:2079 { yyVAL.expr = nil } - case 396: + case 391: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2100 +//line sql.y:2083 { yyVAL.expr = yyDollar[2].expr } + case 392: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2089 + { + yyVAL.expr = yyDollar[1].expr + } + case 393: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:2093 + { + yyVAL.expr = &AndExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} + } + case 394: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:2097 + { + yyVAL.expr = &OrExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} + } + case 395: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2101 + { + yyVAL.expr = &NotExpr{Expr: yyDollar[2].expr} + } + case 396: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:2105 + { + yyVAL.expr = &IsExpr{Operator: yyDollar[3].str, Expr: yyDollar[1].expr} + } case 397: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2106 +//line sql.y:2109 { yyVAL.expr = yyDollar[1].expr } case 398: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2110 - { - yyVAL.expr = &AndExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} - } - case 399: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2114 - { - yyVAL.expr = &OrExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} - } - case 400: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2118 - { - yyVAL.expr = &NotExpr{Expr: yyDollar[2].expr} - } - case 401: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2122 - { - yyVAL.expr = &IsExpr{Operator: yyDollar[3].str, Expr: yyDollar[1].expr} - } - case 402: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2126 - { - yyVAL.expr = yyDollar[1].expr - } - case 403: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2130 +//line sql.y:2113 { yyVAL.expr = &Default{ColName: yyDollar[2].str} } - case 404: + case 399: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2136 +//line sql.y:2119 { yyVAL.str = "" } - case 405: + case 400: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2140 +//line sql.y:2123 { yyVAL.str = string(yyDollar[2].bytes) } - case 406: + case 401: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2146 +//line sql.y:2129 { yyVAL.boolVal = BoolVal(true) } - case 407: + case 402: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2150 +//line sql.y:2133 { yyVAL.boolVal = BoolVal(false) } - case 408: + case 403: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2156 +//line sql.y:2139 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: yyDollar[2].str, Right: yyDollar[3].expr} } - case 409: + case 404: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2160 +//line sql.y:2143 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: InStr, Right: yyDollar[3].colTuple} } - case 410: + case 405: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2164 +//line sql.y:2147 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotInStr, Right: yyDollar[4].colTuple} } - case 411: + case 406: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2168 +//line sql.y:2151 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: LikeStr, Right: yyDollar[3].expr, Escape: yyDollar[4].expr} } - case 412: + case 407: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2172 +//line sql.y:2155 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotLikeStr, Right: yyDollar[4].expr, Escape: yyDollar[5].expr} } - case 413: + case 408: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2176 +//line sql.y:2159 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: RegexpStr, Right: yyDollar[3].expr} } - case 414: + case 409: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2180 +//line sql.y:2163 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotRegexpStr, Right: yyDollar[4].expr} } - case 415: + case 410: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2184 +//line sql.y:2167 { yyVAL.expr = &RangeCond{Left: yyDollar[1].expr, Operator: BetweenStr, From: yyDollar[3].expr, To: yyDollar[5].expr} } - case 416: + case 411: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:2188 +//line sql.y:2171 { yyVAL.expr = &RangeCond{Left: yyDollar[1].expr, Operator: NotBetweenStr, From: yyDollar[4].expr, To: yyDollar[6].expr} } - case 417: + case 412: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2192 +//line sql.y:2175 { yyVAL.expr = &ExistsExpr{Subquery: yyDollar[2].subquery} } - case 418: + case 413: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2198 +//line sql.y:2181 { yyVAL.str = IsNullStr } - case 419: + case 414: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2202 +//line sql.y:2185 { yyVAL.str = IsNotNullStr } - case 420: + case 415: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2206 +//line sql.y:2189 { yyVAL.str = IsTrueStr } - case 421: + case 416: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2210 +//line sql.y:2193 { yyVAL.str = IsNotTrueStr } - case 422: + case 417: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2214 +//line sql.y:2197 { yyVAL.str = IsFalseStr } - case 423: + case 418: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2218 +//line sql.y:2201 { yyVAL.str = IsNotFalseStr } - case 424: + case 419: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2224 +//line sql.y:2207 { yyVAL.str = EqualStr } - case 425: + case 420: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2228 +//line sql.y:2211 { yyVAL.str = LessThanStr } - case 426: + case 421: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2232 +//line sql.y:2215 { yyVAL.str = GreaterThanStr } - case 427: + case 422: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2236 +//line sql.y:2219 { yyVAL.str = LessEqualStr } - case 428: + case 423: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2240 +//line sql.y:2223 { yyVAL.str = GreaterEqualStr } - case 429: + case 424: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2244 +//line sql.y:2227 { yyVAL.str = NotEqualStr } - case 430: + case 425: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2248 +//line sql.y:2231 { yyVAL.str = NullSafeEqualStr } - case 431: + case 426: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2253 +//line sql.y:2236 { yyVAL.expr = nil } - case 432: + case 427: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2257 +//line sql.y:2240 { yyVAL.expr = yyDollar[2].expr } - case 433: + case 428: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2263 +//line sql.y:2246 { yyVAL.colTuple = yyDollar[1].valTuple } - case 434: + case 429: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2267 +//line sql.y:2250 { yyVAL.colTuple = yyDollar[1].subquery } - case 435: + case 430: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2271 +//line sql.y:2254 { yyVAL.colTuple = ListArg(yyDollar[1].bytes) } - case 436: + case 431: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2277 +//line sql.y:2260 { yyVAL.subquery = &Subquery{yyDollar[2].selStmt} } - case 437: + case 432: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2283 +//line sql.y:2266 { yyVAL.exprs = Exprs{yyDollar[1].expr} } - case 438: + case 433: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2287 +//line sql.y:2270 { yyVAL.exprs = append(yyDollar[1].exprs, yyDollar[3].expr) } - case 439: + case 434: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2293 +//line sql.y:2276 { yyVAL.expr = yyDollar[1].expr } - case 440: + case 435: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2297 +//line sql.y:2280 { yyVAL.expr = yyDollar[1].boolVal } - case 441: + case 436: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2301 +//line sql.y:2284 { yyVAL.expr = yyDollar[1].colName } - case 442: + case 437: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2305 +//line sql.y:2288 { yyVAL.expr = yyDollar[1].expr } - case 443: + case 438: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2309 +//line sql.y:2292 { yyVAL.expr = yyDollar[1].subquery } - case 444: + case 439: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2313 +//line sql.y:2296 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitAndStr, Right: yyDollar[3].expr} } - case 445: + case 440: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2317 +//line sql.y:2300 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitOrStr, Right: yyDollar[3].expr} } - case 446: + case 441: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2321 +//line sql.y:2304 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitXorStr, Right: yyDollar[3].expr} } - case 447: + case 442: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2325 +//line sql.y:2308 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: PlusStr, Right: yyDollar[3].expr} } - case 448: + case 443: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2329 +//line sql.y:2312 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: MinusStr, Right: yyDollar[3].expr} } - case 449: + case 444: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2333 +//line sql.y:2316 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: MultStr, Right: yyDollar[3].expr} } - case 450: + case 445: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2337 +//line sql.y:2320 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: DivStr, Right: yyDollar[3].expr} } - case 451: + case 446: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2341 +//line sql.y:2324 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: IntDivStr, Right: yyDollar[3].expr} } - case 452: + case 447: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2345 +//line sql.y:2328 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ModStr, Right: yyDollar[3].expr} } - case 453: + case 448: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2349 +//line sql.y:2332 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ModStr, Right: yyDollar[3].expr} } - case 454: + case 449: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2353 +//line sql.y:2336 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ShiftLeftStr, Right: yyDollar[3].expr} } - case 455: + case 450: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2357 +//line sql.y:2340 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ShiftRightStr, Right: yyDollar[3].expr} } - case 456: + case 451: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2361 +//line sql.y:2344 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].colName, Operator: JSONExtractOp, Right: yyDollar[3].expr} } - case 457: + case 452: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2365 +//line sql.y:2348 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].colName, Operator: JSONUnquoteExtractOp, Right: yyDollar[3].expr} } - case 458: + case 453: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2369 +//line sql.y:2352 { yyVAL.expr = &CollateExpr{Expr: yyDollar[1].expr, Charset: yyDollar[3].str} } - case 459: + case 454: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2373 +//line sql.y:2356 { yyVAL.expr = &UnaryExpr{Operator: BinaryStr, Expr: yyDollar[2].expr} } - case 460: + case 455: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2377 +//line sql.y:2360 { yyVAL.expr = &UnaryExpr{Operator: UBinaryStr, Expr: yyDollar[2].expr} } - case 461: + case 456: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2381 +//line sql.y:2364 { yyVAL.expr = &UnaryExpr{Operator: Utf8mb4Str, Expr: yyDollar[2].expr} } - case 462: + case 457: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2385 +//line sql.y:2368 { if num, ok := yyDollar[2].expr.(*SQLVal); ok && num.Type == IntVal { yyVAL.expr = num @@ -5611,9 +5655,9 @@ yydefault: yyVAL.expr = &UnaryExpr{Operator: UPlusStr, Expr: yyDollar[2].expr} } } - case 463: + case 458: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2393 +//line sql.y:2376 { if num, ok := yyDollar[2].expr.(*SQLVal); ok && num.Type == IntVal { // Handle double negative @@ -5627,21 +5671,21 @@ yydefault: yyVAL.expr = &UnaryExpr{Operator: UMinusStr, Expr: yyDollar[2].expr} } } - case 464: + case 459: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2407 +//line sql.y:2390 { yyVAL.expr = &UnaryExpr{Operator: TildaStr, Expr: yyDollar[2].expr} } - case 465: + case 460: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2411 +//line sql.y:2394 { yyVAL.expr = &UnaryExpr{Operator: BangStr, Expr: yyDollar[2].expr} } - case 466: + case 461: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2415 +//line sql.y:2398 { // This rule prevents the usage of INTERVAL // as a function. If support is needed for that, @@ -5649,467 +5693,521 @@ yydefault: // will be non-trivial because of grammar conflicts. yyVAL.expr = &IntervalExpr{Expr: yyDollar[2].expr, Unit: yyDollar[3].colIdent.String()} } - case 471: + case 466: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2433 +//line sql.y:2416 { yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Exprs: yyDollar[3].selectExprs} } - case 472: + case 467: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2437 +//line sql.y:2420 { yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Distinct: true, Exprs: yyDollar[4].selectExprs} } - case 473: + case 468: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:2441 +//line sql.y:2424 { yyVAL.expr = &FuncExpr{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].colIdent, Exprs: yyDollar[5].selectExprs} } - case 474: + case 469: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2451 +//line sql.y:2434 { yyVAL.expr = &FuncExpr{Name: NewColIdent("left"), Exprs: yyDollar[3].selectExprs} } - case 475: + case 470: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2455 +//line sql.y:2438 { yyVAL.expr = &FuncExpr{Name: NewColIdent("right"), Exprs: yyDollar[3].selectExprs} } - case 476: + case 471: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:2459 +//line sql.y:2442 { yyVAL.expr = &ConvertExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].convertType} } - case 477: + case 472: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:2463 +//line sql.y:2446 { yyVAL.expr = &ConvertExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].convertType} } - case 478: + case 473: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:2467 +//line sql.y:2450 { yyVAL.expr = &ConvertUsingExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].str} } - case 479: + case 474: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:2471 +//line sql.y:2454 { yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: nil} } - case 480: + case 475: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:2475 +//line sql.y:2458 { yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} } + case 476: + yyDollar = yyS[yypt-8 : yypt+1] +//line sql.y:2462 + { + yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} + } + case 477: + yyDollar = yyS[yypt-6 : yypt+1] +//line sql.y:2466 + { + yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: nil} + } + case 478: + yyDollar = yyS[yypt-8 : yypt+1] +//line sql.y:2470 + { + yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} + } + case 479: + yyDollar = yyS[yypt-8 : yypt+1] +//line sql.y:2474 + { + yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} + } + case 480: + yyDollar = yyS[yypt-6 : yypt+1] +//line sql.y:2478 + { + yyVAL.expr = &SubstrExpr{StrVal: NewStrVal(yyDollar[3].bytes), From: yyDollar[5].expr, To: nil} + } case 481: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:2479 +//line sql.y:2482 { - yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} + yyVAL.expr = &SubstrExpr{StrVal: NewStrVal(yyDollar[3].bytes), From: yyDollar[5].expr, To: yyDollar[7].expr} } case 482: - yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:2483 + yyDollar = yyS[yypt-8 : yypt+1] +//line sql.y:2486 { - yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: nil} + yyVAL.expr = &SubstrExpr{StrVal: NewStrVal(yyDollar[3].bytes), From: yyDollar[5].expr, To: yyDollar[7].expr} } case 483: - yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:2487 + yyDollar = yyS[yypt-6 : yypt+1] +//line sql.y:2490 { - yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} + yyVAL.expr = &SubstrExpr{StrVal: NewStrVal(yyDollar[3].bytes), From: yyDollar[5].expr, To: nil} } case 484: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:2491 +//line sql.y:2494 { - yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} + yyVAL.expr = &SubstrExpr{StrVal: NewStrVal(yyDollar[3].bytes), From: yyDollar[5].expr, To: yyDollar[7].expr} } case 485: - yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:2495 + yyDollar = yyS[yypt-8 : yypt+1] +//line sql.y:2498 { - yyVAL.expr = &SubstrExpr{StrVal: NewStrVal(yyDollar[3].bytes), From: yyDollar[5].expr, To: nil} + yyVAL.expr = &SubstrExpr{StrVal: NewStrVal(yyDollar[3].bytes), From: yyDollar[5].expr, To: yyDollar[7].expr} } case 486: - yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:2499 - { - yyVAL.expr = &SubstrExpr{StrVal: NewStrVal(yyDollar[3].bytes), From: yyDollar[5].expr, To: yyDollar[7].expr} - } - case 487: - yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:2503 - { - yyVAL.expr = &SubstrExpr{StrVal: NewStrVal(yyDollar[3].bytes), From: yyDollar[5].expr, To: yyDollar[7].expr} - } - case 488: - yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:2507 - { - yyVAL.expr = &SubstrExpr{StrVal: NewStrVal(yyDollar[3].bytes), From: yyDollar[5].expr, To: nil} - } - case 489: - yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:2511 - { - yyVAL.expr = &SubstrExpr{StrVal: NewStrVal(yyDollar[3].bytes), From: yyDollar[5].expr, To: yyDollar[7].expr} - } - case 490: - yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:2515 - { - yyVAL.expr = &SubstrExpr{StrVal: NewStrVal(yyDollar[3].bytes), From: yyDollar[5].expr, To: yyDollar[7].expr} - } - case 491: yyDollar = yyS[yypt-9 : yypt+1] -//line sql.y:2519 +//line sql.y:2502 { yyVAL.expr = &MatchExpr{Columns: yyDollar[3].selectExprs, Expr: yyDollar[7].expr, Option: yyDollar[8].str} } - case 492: + case 487: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:2523 +//line sql.y:2506 { yyVAL.expr = &GroupConcatExpr{Distinct: yyDollar[3].str, Exprs: yyDollar[4].selectExprs, OrderBy: yyDollar[5].orderBy, Separator: yyDollar[6].str} } - case 493: + case 488: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2527 +//line sql.y:2510 { yyVAL.expr = &CaseExpr{Expr: yyDollar[2].expr, Whens: yyDollar[3].whens, Else: yyDollar[4].expr} } - case 494: + case 489: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2531 +//line sql.y:2514 { yyVAL.expr = &ValuesFuncExpr{Name: yyDollar[3].colName} } - case 495: + case 490: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2541 +//line sql.y:2524 { yyVAL.expr = &FuncExpr{Name: NewColIdent("current_timestamp")} } - case 496: + case 491: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2545 +//line sql.y:2528 { yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_timestamp")} } - case 497: + case 492: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2549 +//line sql.y:2532 { yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_time")} } - case 498: + case 493: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2553 +//line sql.y:2537 { yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_date")} } - case 499: + case 494: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2558 +//line sql.y:2542 { yyVAL.expr = &FuncExpr{Name: NewColIdent("localtime")} } - case 500: + case 495: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2563 +//line sql.y:2547 { yyVAL.expr = &FuncExpr{Name: NewColIdent("localtimestamp")} } - case 501: + case 496: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2568 +//line sql.y:2553 { yyVAL.expr = &FuncExpr{Name: NewColIdent("current_date")} } - case 502: + case 497: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2573 +//line sql.y:2558 { yyVAL.expr = &FuncExpr{Name: NewColIdent("current_time")} } + case 498: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2563 + { + yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("current_timestamp"), Fsp: yyDollar[2].expr} + } + case 499: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2567 + { + yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("utc_timestamp"), Fsp: yyDollar[2].expr} + } + case 500: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2571 + { + yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("utc_time"), Fsp: yyDollar[2].expr} + } + case 501: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2576 + { + yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("localtime"), Fsp: yyDollar[2].expr} + } + case 502: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2581 + { + yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("localtimestamp"), Fsp: yyDollar[2].expr} + } + case 503: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2586 + { + yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("current_time"), Fsp: yyDollar[2].expr} + } + case 504: + yyDollar = yyS[yypt-8 : yypt+1] +//line sql.y:2590 + { + yyVAL.expr = &TimestampFuncExpr{Name: string("timestampadd"), Unit: yyDollar[3].colIdent.String(), Expr1: yyDollar[5].expr, Expr2: yyDollar[7].expr} + } case 505: + yyDollar = yyS[yypt-8 : yypt+1] +//line sql.y:2594 + { + yyVAL.expr = &TimestampFuncExpr{Name: string("timestampdiff"), Unit: yyDollar[3].colIdent.String(), Expr1: yyDollar[5].expr, Expr2: yyDollar[7].expr} + } + case 508: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:2604 + { + yyVAL.expr = yyDollar[2].expr + } + case 509: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2587 +//line sql.y:2614 { yyVAL.expr = &FuncExpr{Name: NewColIdent("if"), Exprs: yyDollar[3].selectExprs} } - case 506: + case 510: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2591 +//line sql.y:2618 { yyVAL.expr = &FuncExpr{Name: NewColIdent("database"), Exprs: yyDollar[3].selectExprs} } - case 507: + case 511: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2595 +//line sql.y:2622 { yyVAL.expr = &FuncExpr{Name: NewColIdent("mod"), Exprs: yyDollar[3].selectExprs} } - case 508: + case 512: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2599 +//line sql.y:2626 { yyVAL.expr = &FuncExpr{Name: NewColIdent("replace"), Exprs: yyDollar[3].selectExprs} } - case 509: + case 513: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2605 +//line sql.y:2632 { yyVAL.str = "" } - case 510: + case 514: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2609 +//line sql.y:2636 { yyVAL.str = BooleanModeStr } - case 511: + case 515: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2613 +//line sql.y:2640 { yyVAL.str = NaturalLanguageModeStr } - case 512: + case 516: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:2617 +//line sql.y:2644 { yyVAL.str = NaturalLanguageModeWithQueryExpansionStr } - case 513: + case 517: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2621 +//line sql.y:2648 { yyVAL.str = QueryExpansionStr } - case 514: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2627 - { - yyVAL.str = string(yyDollar[1].bytes) - } - case 515: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2631 - { - yyVAL.str = string(yyDollar[1].bytes) - } - case 516: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2637 - { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} - } - case 517: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2641 - { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal, Charset: yyDollar[3].str, Operator: CharacterSetStr} - } case 518: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2645 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2654 { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal, Charset: string(yyDollar[3].bytes)} + yyVAL.str = string(yyDollar[1].bytes) } case 519: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2649 +//line sql.y:2658 { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + yyVAL.str = string(yyDollar[1].bytes) } case 520: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2653 +//line sql.y:2664 { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} } case 521: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:2668 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal, Charset: yyDollar[3].str, Operator: CharacterSetStr} + } + case 522: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:2672 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal, Charset: string(yyDollar[3].bytes)} + } + case 523: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2676 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 524: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2657 +//line sql.y:2680 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} + } + case 525: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2684 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} yyVAL.convertType.Length = yyDollar[2].LengthScaleOption.Length yyVAL.convertType.Scale = yyDollar[2].LengthScaleOption.Scale } - case 522: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2663 - { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} - } - case 523: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2667 - { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} - } - case 524: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2671 - { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} - } - case 525: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2675 - { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} - } case 526: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2679 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2690 { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } case 527: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2683 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2694 { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} } case 528: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2687 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2698 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } case 529: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2692 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2702 { - yyVAL.expr = nil + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } case 530: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2696 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2706 { - yyVAL.expr = yyDollar[1].expr + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} } case 531: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2701 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2710 { - yyVAL.str = string("") + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } case 532: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2705 +//line sql.y:2714 { - yyVAL.str = " separator '" + string(yyDollar[2].bytes) + "'" + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } case 533: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2711 - { - yyVAL.whens = []*When{yyDollar[1].when} - } - case 534: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2715 - { - yyVAL.whens = append(yyDollar[1].whens, yyDollar[2].when) - } - case 535: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2721 - { - yyVAL.when = &When{Cond: yyDollar[2].expr, Val: yyDollar[4].expr} - } - case 536: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2726 +//line sql.y:2719 { yyVAL.expr = nil } - case 537: + case 534: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2723 + { + yyVAL.expr = yyDollar[1].expr + } + case 535: + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:2728 + { + yyVAL.str = string("") + } + case 536: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2730 +//line sql.y:2732 + { + yyVAL.str = " separator '" + string(yyDollar[2].bytes) + "'" + } + case 537: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2738 + { + yyVAL.whens = []*When{yyDollar[1].when} + } + case 538: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2742 + { + yyVAL.whens = append(yyDollar[1].whens, yyDollar[2].when) + } + case 539: + yyDollar = yyS[yypt-4 : yypt+1] +//line sql.y:2748 + { + yyVAL.when = &When{Cond: yyDollar[2].expr, Val: yyDollar[4].expr} + } + case 540: + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:2753 + { + yyVAL.expr = nil + } + case 541: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2757 { yyVAL.expr = yyDollar[2].expr } - case 538: + case 542: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2736 +//line sql.y:2763 { yyVAL.colName = &ColName{Name: yyDollar[1].colIdent} } - case 539: + case 543: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2740 +//line sql.y:2767 { yyVAL.colName = &ColName{Qualifier: TableName{Name: yyDollar[1].tableIdent}, Name: yyDollar[3].colIdent} } - case 540: + case 544: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2744 +//line sql.y:2771 { yyVAL.colName = &ColName{Qualifier: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}, Name: yyDollar[5].colIdent} } - case 541: + case 545: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2750 +//line sql.y:2777 { yyVAL.expr = NewStrVal(yyDollar[1].bytes) } - case 542: + case 546: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2754 +//line sql.y:2781 { yyVAL.expr = NewHexVal(yyDollar[1].bytes) } - case 543: + case 547: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2758 +//line sql.y:2785 { yyVAL.expr = NewBitVal(yyDollar[1].bytes) } - case 544: + case 548: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2762 +//line sql.y:2789 { yyVAL.expr = NewIntVal(yyDollar[1].bytes) } - case 545: + case 549: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2766 +//line sql.y:2793 { yyVAL.expr = NewFloatVal(yyDollar[1].bytes) } - case 546: + case 550: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2770 +//line sql.y:2797 { yyVAL.expr = NewHexNum(yyDollar[1].bytes) } - case 547: + case 551: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2774 +//line sql.y:2801 { yyVAL.expr = NewValArg(yyDollar[1].bytes) } - case 548: + case 552: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2778 +//line sql.y:2805 { yyVAL.expr = &NullVal{} } - case 549: + case 553: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2784 +//line sql.y:2811 { // TODO(sougou): Deprecate this construct. if yyDollar[1].colIdent.Lowered() != "value" { @@ -6118,239 +6216,239 @@ yydefault: } yyVAL.expr = NewIntVal([]byte("1")) } - case 550: + case 554: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2793 +//line sql.y:2820 { yyVAL.expr = NewIntVal(yyDollar[1].bytes) } - case 551: + case 555: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2797 +//line sql.y:2824 { yyVAL.expr = NewValArg(yyDollar[1].bytes) } - case 552: + case 556: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2802 +//line sql.y:2829 { yyVAL.exprs = nil } - case 553: + case 557: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2806 +//line sql.y:2833 { yyVAL.exprs = yyDollar[3].exprs } - case 554: + case 558: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2811 +//line sql.y:2838 { yyVAL.expr = nil } - case 555: + case 559: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2815 +//line sql.y:2842 { yyVAL.expr = yyDollar[2].expr } - case 556: + case 560: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2820 +//line sql.y:2847 { yyVAL.orderBy = nil } - case 557: + case 561: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2824 +//line sql.y:2851 { yyVAL.orderBy = yyDollar[3].orderBy } - case 558: + case 562: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2830 +//line sql.y:2857 { yyVAL.orderBy = OrderBy{yyDollar[1].order} } - case 559: + case 563: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2834 +//line sql.y:2861 { yyVAL.orderBy = append(yyDollar[1].orderBy, yyDollar[3].order) } - case 560: + case 564: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2840 +//line sql.y:2867 { yyVAL.order = &Order{Expr: yyDollar[1].expr, Direction: yyDollar[2].str} } - case 561: + case 565: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2845 +//line sql.y:2872 { yyVAL.str = AscScr } - case 562: + case 566: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2849 +//line sql.y:2876 { yyVAL.str = AscScr } - case 563: + case 567: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2853 +//line sql.y:2880 { yyVAL.str = DescScr } - case 564: + case 568: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2858 +//line sql.y:2885 { yyVAL.limit = nil } - case 565: + case 569: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2862 +//line sql.y:2889 { yyVAL.limit = &Limit{Rowcount: yyDollar[2].expr} } - case 566: + case 570: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2866 +//line sql.y:2893 { yyVAL.limit = &Limit{Offset: yyDollar[2].expr, Rowcount: yyDollar[4].expr} } - case 567: + case 571: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2870 +//line sql.y:2897 { yyVAL.limit = &Limit{Offset: yyDollar[4].expr, Rowcount: yyDollar[2].expr} } - case 568: + case 572: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2875 +//line sql.y:2902 { yyVAL.str = "" } - case 569: + case 573: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2879 +//line sql.y:2906 { yyVAL.str = ForUpdateStr } - case 570: + case 574: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2883 +//line sql.y:2910 { yyVAL.str = ShareModeStr } - case 571: + case 575: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2896 +//line sql.y:2923 { yyVAL.ins = &Insert{Rows: yyDollar[2].values} } - case 572: + case 576: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2900 +//line sql.y:2927 { yyVAL.ins = &Insert{Rows: yyDollar[1].selStmt} } - case 573: + case 577: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2904 +//line sql.y:2931 { // Drop the redundant parenthesis. yyVAL.ins = &Insert{Rows: yyDollar[2].selStmt} } - case 574: + case 578: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2909 +//line sql.y:2936 { yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[5].values} } - case 575: + case 579: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2913 +//line sql.y:2940 { yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[4].selStmt} } - case 576: + case 580: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:2917 +//line sql.y:2944 { // Drop the redundant parenthesis. yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[5].selStmt} } - case 577: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2924 - { - yyVAL.columns = Columns{yyDollar[1].colIdent} - } - case 578: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2928 - { - yyVAL.columns = Columns{yyDollar[3].colIdent} - } - case 579: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2932 - { - yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent) - } - case 580: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2936 - { - yyVAL.columns = append(yyVAL.columns, yyDollar[5].colIdent) - } case 581: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2941 - { - yyVAL.updateExprs = nil - } - case 582: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2945 - { - yyVAL.updateExprs = yyDollar[5].updateExprs - } - case 583: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:2951 { - yyVAL.values = Values{yyDollar[1].valTuple} + yyVAL.columns = Columns{yyDollar[1].colIdent} } - case 584: + case 582: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:2955 { - yyVAL.values = append(yyDollar[1].values, yyDollar[3].valTuple) + yyVAL.columns = Columns{yyDollar[3].colIdent} + } + case 583: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:2959 + { + yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent) + } + case 584: + yyDollar = yyS[yypt-5 : yypt+1] +//line sql.y:2963 + { + yyVAL.columns = append(yyVAL.columns, yyDollar[5].colIdent) } case 585: + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:2968 + { + yyVAL.updateExprs = nil + } + case 586: + yyDollar = yyS[yypt-5 : yypt+1] +//line sql.y:2972 + { + yyVAL.updateExprs = yyDollar[5].updateExprs + } + case 587: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2961 +//line sql.y:2978 + { + yyVAL.values = Values{yyDollar[1].valTuple} + } + case 588: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:2982 + { + yyVAL.values = append(yyDollar[1].values, yyDollar[3].valTuple) + } + case 589: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2988 { yyVAL.valTuple = yyDollar[1].valTuple } - case 586: + case 590: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2965 +//line sql.y:2992 { yyVAL.valTuple = ValTuple{} } - case 587: + case 591: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2971 +//line sql.y:2998 { yyVAL.valTuple = ValTuple(yyDollar[2].exprs) } - case 588: + case 592: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2977 +//line sql.y:3004 { if len(yyDollar[1].valTuple) == 1 { yyVAL.expr = &ParenExpr{yyDollar[1].valTuple[0]} @@ -6358,312 +6456,312 @@ yydefault: yyVAL.expr = yyDollar[1].valTuple } } - case 589: + case 593: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2987 +//line sql.y:3014 { yyVAL.updateExprs = UpdateExprs{yyDollar[1].updateExpr} } - case 590: + case 594: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2991 +//line sql.y:3018 { yyVAL.updateExprs = append(yyDollar[1].updateExprs, yyDollar[3].updateExpr) } - case 591: + case 595: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2997 +//line sql.y:3024 { yyVAL.updateExpr = &UpdateExpr{Name: yyDollar[1].colName, Expr: yyDollar[3].expr} } - case 592: + case 596: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3003 +//line sql.y:3030 { yyVAL.setExprs = SetExprs{yyDollar[1].setExpr} } - case 593: + case 597: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3007 +//line sql.y:3034 { yyVAL.setExprs = append(yyDollar[1].setExprs, yyDollar[3].setExpr) } - case 594: + case 598: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3013 +//line sql.y:3040 { yyVAL.setExpr = &SetExpr{Name: yyDollar[1].colIdent, Expr: NewStrVal([]byte("on"))} } - case 595: + case 599: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3017 +//line sql.y:3044 { yyVAL.setExpr = &SetExpr{Name: yyDollar[1].colIdent, Expr: NewStrVal([]byte("off"))} } - case 596: + case 600: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3021 +//line sql.y:3048 { yyVAL.setExpr = &SetExpr{Name: yyDollar[1].colIdent, Expr: yyDollar[3].expr} } - case 597: + case 601: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3025 +//line sql.y:3052 { yyVAL.setExpr = &SetExpr{Name: NewColIdent(string(yyDollar[1].bytes)), Expr: yyDollar[2].expr} } - case 599: + case 603: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3032 +//line sql.y:3059 { yyVAL.bytes = []byte("charset") } - case 601: + case 605: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3039 +//line sql.y:3066 { yyVAL.expr = NewStrVal([]byte(yyDollar[1].colIdent.String())) } - case 602: + case 606: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3043 +//line sql.y:3070 { yyVAL.expr = NewStrVal(yyDollar[1].bytes) } - case 603: + case 607: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3047 +//line sql.y:3074 { yyVAL.expr = &Default{} } - case 606: + case 610: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3056 +//line sql.y:3083 { yyVAL.byt = 0 } - case 607: + case 611: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3058 +//line sql.y:3085 { yyVAL.byt = 1 } - case 608: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3061 - { - yyVAL.empty = struct{}{} - } - case 609: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3063 - { - yyVAL.empty = struct{}{} - } - case 610: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3066 - { - yyVAL.str = "" - } - case 611: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3068 - { - yyVAL.str = IgnoreStr - } case 612: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3072 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:3088 { yyVAL.empty = struct{}{} } case 613: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3074 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:3090 { yyVAL.empty = struct{}{} } case 614: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3076 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:3093 { - yyVAL.empty = struct{}{} + yyVAL.str = "" } case 615: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3078 +//line sql.y:3095 { - yyVAL.empty = struct{}{} + yyVAL.str = IgnoreStr } case 616: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3080 +//line sql.y:3099 { yyVAL.empty = struct{}{} } case 617: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3082 +//line sql.y:3101 { yyVAL.empty = struct{}{} } case 618: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3084 +//line sql.y:3103 { yyVAL.empty = struct{}{} } case 619: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3086 +//line sql.y:3105 { yyVAL.empty = struct{}{} } case 620: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3088 +//line sql.y:3107 { yyVAL.empty = struct{}{} } case 621: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3090 +//line sql.y:3109 { yyVAL.empty = struct{}{} } case 622: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3093 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:3111 { yyVAL.empty = struct{}{} } case 623: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3095 +//line sql.y:3113 { yyVAL.empty = struct{}{} } case 624: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3097 +//line sql.y:3115 { yyVAL.empty = struct{}{} } case 625: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3101 +//line sql.y:3117 { yyVAL.empty = struct{}{} } case 626: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3103 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:3120 { yyVAL.empty = struct{}{} } case 627: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3106 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:3122 { yyVAL.empty = struct{}{} } case 628: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3108 +//line sql.y:3124 { yyVAL.empty = struct{}{} } case 629: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3110 +//line sql.y:3128 { yyVAL.empty = struct{}{} } case 630: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3113 - { - yyVAL.colIdent = ColIdent{} - } - case 631: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3115 - { - yyVAL.colIdent = yyDollar[2].colIdent - } - case 632: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3119 - { - yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) - } - case 633: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3123 - { - yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) - } - case 635: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:3130 { - yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) + yyVAL.empty = struct{}{} + } + case 631: + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:3133 + { + yyVAL.empty = struct{}{} + } + case 632: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:3135 + { + yyVAL.empty = struct{}{} + } + case 633: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:3137 + { + yyVAL.empty = struct{}{} + } + case 634: + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:3140 + { + yyVAL.colIdent = ColIdent{} + } + case 635: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:3142 + { + yyVAL.colIdent = yyDollar[2].colIdent } case 636: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3136 +//line sql.y:3146 { - yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) } case 637: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3140 +//line sql.y:3150 { - yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) } case 639: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3147 +//line sql.y:3157 + { + yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) + } + case 640: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:3163 { yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) } - case 846: + case 641: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3379 +//line sql.y:3167 + { + yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + } + case 643: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:3174 + { + yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + } + case 853: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:3409 { if incNesting(yylex) { yylex.Error("max nesting level reached") return 1 } } - case 847: + case 854: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3388 +//line sql.y:3418 { decNesting(yylex) } - case 848: + case 855: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3393 +//line sql.y:3423 { skipToEnd(yylex) } - case 849: + case 856: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3398 +//line sql.y:3428 { skipToEnd(yylex) } - case 850: + case 857: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3402 +//line sql.y:3432 { skipToEnd(yylex) } - case 851: + case 858: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3406 +//line sql.y:3436 { skipToEnd(yylex) } diff --git a/go/vt/sqlparser/sql.y b/go/vt/sqlparser/sql.y index 07dc55ca4c..149781ad6f 100644 --- a/go/vt/sqlparser/sql.y +++ b/go/vt/sqlparser/sql.y @@ -75,6 +75,7 @@ func skipToEnd(yylex interface{}) { expr Expr exprs Exprs boolVal BoolVal + sqlVal *SQLVal colTuple ColTuple values Values valTuple ValTuple @@ -95,7 +96,7 @@ func skipToEnd(yylex interface{}) { TableSpec *TableSpec columnType ColumnType colKeyOpt ColumnKeyOption - optVal *SQLVal + optVal Expr LengthScaleOption LengthScaleOption columnDefinition *ColumnDefinition indexDefinition *IndexDefinition @@ -193,6 +194,7 @@ func skipToEnd(yylex interface{}) { %token CONVERT CAST %token SUBSTR SUBSTRING %token GROUP_CONCAT SEPARATOR +%token TIMESTAMPADD TIMESTAMPDIFF // Match %token MATCH AGAINST BOOLEAN LANGUAGE WITH QUERY EXPANSION @@ -228,7 +230,7 @@ func skipToEnd(yylex interface{}) { %type compare %type insert_data %type value value_expression num_val -%type function_call_keyword function_call_nonkeyword function_call_generic function_call_conflict +%type function_call_keyword function_call_nonkeyword function_call_generic function_call_conflict func_datetime_precision %type is_suffix %type col_tuple %type expression_list @@ -273,7 +275,8 @@ func skipToEnd(yylex interface{}) { %type convert_type %type column_type %type int_type decimal_type numeric_type time_type char_type spatial_type -%type length_opt column_default_opt column_comment_opt on_update_opt +%type length_opt column_comment_opt +%type column_default_opt on_update_opt %type charset_opt collate_opt %type unsigned_opt zero_fill_opt %type float_length_opt decimal_length_opt @@ -702,6 +705,14 @@ int_type: { $$ = ColumnType{Type: string($1)} } +| BOOL + { + $$ = ColumnType{Type: string($1)} + } +| BOOLEAN + { + $$ = ColumnType{Type: string($1)} + } | TINYINT { $$ = ColumnType{Type: string($1)} @@ -964,46 +975,18 @@ column_default_opt: { $$ = nil } -| DEFAULT STRING +| DEFAULT value_expression { - $$ = NewStrVal($2) - } -| DEFAULT INTEGRAL - { - $$ = NewIntVal($2) - } -| DEFAULT FLOAT - { - $$ = NewFloatVal($2) - } -| DEFAULT NULL - { - $$ = NewValArg($2) - } -| DEFAULT CURRENT_TIMESTAMP - { - $$ = NewValArg($2) - } -| DEFAULT CURRENT_TIMESTAMP '(' ')' - { - $$ = NewValArg($2) - } -| DEFAULT BIT_LITERAL - { - $$ = NewBitVal($2) + $$ = $2 } on_update_opt: { $$ = nil } -| ON UPDATE CURRENT_TIMESTAMP +| ON UPDATE function_call_nonkeyword { - $$ = NewValArg($3) -} -| ON UPDATE CURRENT_TIMESTAMP '(' ')' -{ - $$ = NewValArg($3) + $$ = $3 } auto_increment_opt: @@ -2537,47 +2520,91 @@ function_call_keyword: Dedicated grammar rules are needed because of the special syntax */ function_call_nonkeyword: - CURRENT_TIMESTAMP func_datetime_precision_opt + CURRENT_TIMESTAMP func_datetime_opt { $$ = &FuncExpr{Name:NewColIdent("current_timestamp")} } -| UTC_TIMESTAMP func_datetime_precision_opt +| UTC_TIMESTAMP func_datetime_opt { $$ = &FuncExpr{Name:NewColIdent("utc_timestamp")} } -| UTC_TIME func_datetime_precision_opt +| UTC_TIME func_datetime_opt { $$ = &FuncExpr{Name:NewColIdent("utc_time")} } -| UTC_DATE func_datetime_precision_opt +/* doesn't support fsp */ +| UTC_DATE func_datetime_opt { $$ = &FuncExpr{Name:NewColIdent("utc_date")} } // now -| LOCALTIME func_datetime_precision_opt +| LOCALTIME func_datetime_opt { $$ = &FuncExpr{Name:NewColIdent("localtime")} } // now -| LOCALTIMESTAMP func_datetime_precision_opt +| LOCALTIMESTAMP func_datetime_opt { $$ = &FuncExpr{Name:NewColIdent("localtimestamp")} } // curdate -| CURRENT_DATE func_datetime_precision_opt +/* doesn't support fsp */ +| CURRENT_DATE func_datetime_opt { $$ = &FuncExpr{Name:NewColIdent("current_date")} } // curtime -| CURRENT_TIME func_datetime_precision_opt +| CURRENT_TIME func_datetime_opt { $$ = &FuncExpr{Name:NewColIdent("current_time")} } +// these functions can also be called with an optional argument +| CURRENT_TIMESTAMP func_datetime_precision + { + $$ = &CurTimeFuncExpr{Name:NewColIdent("current_timestamp"), Fsp:$2} + } +| UTC_TIMESTAMP func_datetime_precision + { + $$ = &CurTimeFuncExpr{Name:NewColIdent("utc_timestamp"), Fsp:$2} + } +| UTC_TIME func_datetime_precision + { + $$ = &CurTimeFuncExpr{Name:NewColIdent("utc_time"), Fsp:$2} + } + // now +| LOCALTIME func_datetime_precision + { + $$ = &CurTimeFuncExpr{Name:NewColIdent("localtime"), Fsp:$2} + } + // now +| LOCALTIMESTAMP func_datetime_precision + { + $$ = &CurTimeFuncExpr{Name:NewColIdent("localtimestamp"), Fsp:$2} + } + // curtime +| CURRENT_TIME func_datetime_precision + { + $$ = &CurTimeFuncExpr{Name:NewColIdent("current_time"), Fsp:$2} + } +| TIMESTAMPADD openb sql_id ',' value_expression ',' value_expression closeb + { + $$ = &TimestampFuncExpr{Name:string("timestampadd"), Unit:$3.String(), Expr1:$5, Expr2:$7} + } +| TIMESTAMPDIFF openb sql_id ',' value_expression ',' value_expression closeb + { + $$ = &TimestampFuncExpr{Name:string("timestampdiff"), Unit:$3.String(), Expr1:$5, Expr2:$7} + } -func_datetime_precision_opt: +func_datetime_opt: /* empty */ | openb closeb +func_datetime_precision: + openb value_expression closeb + { + $$ = $2 + } + /* Function calls using non reserved keywords with *normal* syntax forms. Because the names are non-reserved, they need a dedicated rule so as not to conflict @@ -3237,6 +3264,8 @@ reserved_keyword: | STRAIGHT_JOIN | TABLE | THEN +| TIMESTAMPADD +| TIMESTAMPDIFF | TO | TRUE | UNION @@ -3267,6 +3296,7 @@ non_reserved_keyword: | BIT | BLOB | BOOL +| BOOLEAN | CASCADE | CHAR | CHARACTER diff --git a/go/vt/sqlparser/token.go b/go/vt/sqlparser/token.go index 31b3bc33d7..dee429af00 100644 --- a/go/vt/sqlparser/token.go +++ b/go/vt/sqlparser/token.go @@ -357,6 +357,8 @@ var keywords = map[string]int{ "then": THEN, "time": TIME, "timestamp": TIMESTAMP, + "timestampadd": TIMESTAMPADD, + "timestampdiff": TIMESTAMPDIFF, "tinyblob": TINYBLOB, "tinyint": TINYINT, "tinytext": TINYTEXT, diff --git a/go/vt/srvtopo/target_stats.go b/go/vt/srvtopo/target_stats.go index ed49e41f02..6af44eedb8 100644 --- a/go/vt/srvtopo/target_stats.go +++ b/go/vt/srvtopo/target_stats.go @@ -27,9 +27,6 @@ import ( // routing of queries. // - discovery.TabletStatsCache will implement the discovery part of the // interface, and discoverygateway will have the QueryService. -// - hybridgateway will also implement this interface: for each l2vtgate pool, -// it will establish a StreamHealth connection, and store the returned -// health stats. type TargetStats interface { // GetAggregateStats returns the aggregate stats for the given Target. // The srvtopo module will use that information to route queries @@ -45,23 +42,6 @@ type TargetStats interface { GetMasterCell(keyspace, shard string) (cell string, qs queryservice.QueryService, err error) } -// TargetStatsListener is an interface used to propagate TargetStats changes. -// - discovery.TabletStatsCache will implement this interface. -// - the StreamHealth method in l2vtgate will use this interface to surface -// the health of its targets. -type TargetStatsListener interface { - // Subscribe will return the current full state of the TargetStats, - // and a channel that will receive subsequent updates. The int returned - // is the channel id, and can be sent to unsubscribe to stop - // notifications. - Subscribe() (int, []TargetStatsEntry, <-chan (*TargetStatsEntry), error) - - // Unsubscribe stops sending updates to the channel returned - // by Subscribe. The channel still needs to be drained to - // avoid deadlocks. - Unsubscribe(int) error -} - // TargetStatsEntry has the updated information for a Target. type TargetStatsEntry struct { // Target is what this entry applies to. diff --git a/go/vt/throttler/max_replication_lag_module_test.go b/go/vt/throttler/max_replication_lag_module_test.go index 0d57a68d3f..e5537e64f2 100644 --- a/go/vt/throttler/max_replication_lag_module_test.go +++ b/go/vt/throttler/max_replication_lag_module_test.go @@ -283,7 +283,7 @@ func TestMaxReplicationLagModule_ReplicaUnderTest_Timeout(t *testing.T) { } // r2 as "replica under test" did not report its lag for too long. - // We'll ignore it from now and and let other replicas trigger rate changes. + // We'll ignore it from now and let other replicas trigger rate changes. // r1 @ 173s, 0s lag // time for r1 must be > 172s (70s + 40s + 62s) which is // (last rate change + test duration + max duration between increases). diff --git a/go/vt/tlstest/tlstest_test.go b/go/vt/tlstest/tlstest_test.go index 686a2dcbd7..a88ab1c585 100644 --- a/go/vt/tlstest/tlstest_test.go +++ b/go/vt/tlstest/tlstest_test.go @@ -140,11 +140,21 @@ func TestClientServer(t *testing.T) { serverConn.Close() }() - if _, err = tls.Dial("tcp", addr, badClientConfig); err == nil { - t.Fatalf("Dial was expected to fail") + // When using TLS 1.2, the Dial will fail. + // With TLS 1.3, the Dial will succeed and the first Read will fail. + clientConn, err := tls.Dial("tcp", addr, badClientConfig) + if err != nil { + if !strings.Contains(err.Error(), "bad certificate") { + t.Errorf("Wrong error returned: %v", err) + } + return + } + data := make([]byte, 1) + _, err = clientConn.Read(data) + if err == nil { + t.Fatalf("Dial or first Read was expected to fail") } if !strings.Contains(err.Error(), "bad certificate") { t.Errorf("Wrong error returned: %v", err) } - t.Logf("Dial returned: %v", err) } diff --git a/go/vt/vtcombo/tablet_map.go b/go/vt/vtcombo/tablet_map.go index 0e6c79d254..6e3abbddec 100644 --- a/go/vt/vtcombo/tablet_map.go +++ b/go/vt/vtcombo/tablet_map.go @@ -47,6 +47,7 @@ import ( "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/wrangler" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" @@ -315,9 +316,9 @@ func (itc *internalTabletConn) ExecuteBatch(ctx context.Context, target *querypb // StreamExecute is part of queryservice.QueryService // We need to copy the bind variables as tablet server will change them. -func (itc *internalTabletConn) StreamExecute(ctx context.Context, target *querypb.Target, query string, bindVars map[string]*querypb.BindVariable, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) error { +func (itc *internalTabletConn) StreamExecute(ctx context.Context, target *querypb.Target, query string, bindVars map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) error { bindVars = sqltypes.CopyBindVariables(bindVars) - err := itc.tablet.qsc.QueryService().StreamExecute(ctx, target, query, bindVars, options, callback) + err := itc.tablet.qsc.QueryService().StreamExecute(ctx, target, query, bindVars, transactionID, options, callback) return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } @@ -472,6 +473,12 @@ func (itc *internalTabletConn) UpdateStream(ctx context.Context, target *querypb return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } +// VStream is part of queryservice.QueryService. +func (itc *internalTabletConn) VStream(ctx context.Context, target *querypb.Target, startPos string, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { + err := itc.tablet.qsc.QueryService().VStream(ctx, target, startPos, filter, send) + return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) +} + // // TabletManagerClient implementation // @@ -479,6 +486,14 @@ func (itc *internalTabletConn) UpdateStream(ctx context.Context, target *querypb // internalTabletManagerClient implements tmclient.TabletManagerClient type internalTabletManagerClient struct{} +func (itmc *internalTabletManagerClient) LockTables(ctx context.Context, tablet *topodatapb.Tablet) error { + return fmt.Errorf("not implemented in vtcombo") +} + +func (itmc *internalTabletManagerClient) UnlockTables(ctx context.Context, tablet *topodatapb.Tablet) error { + return fmt.Errorf("not implemented in vtcombo") +} + func (itmc *internalTabletManagerClient) Ping(ctx context.Context, tablet *topodatapb.Tablet) error { t, ok := tabletMap[tablet.Alias.Uid] if !ok { @@ -616,6 +631,10 @@ func (itmc *internalTabletManagerClient) StartSlave(ctx context.Context, tablet return fmt.Errorf("not implemented in vtcombo") } +func (itmc *internalTabletManagerClient) StartSlaveUntilAfter(ctx context.Context, tablet *topodatapb.Tablet, position string, duration time.Duration) error { + return fmt.Errorf("not implemented in vtcombo") +} + func (itmc *internalTabletManagerClient) TabletExternallyReparented(ctx context.Context, tablet *topodatapb.Tablet, externalID string) error { return fmt.Errorf("not implemented in vtcombo") } diff --git a/go/vt/vtctl/backup.go b/go/vt/vtctl/backup.go index 0309f41b8e..487bc90918 100644 --- a/go/vt/vtctl/backup.go +++ b/go/vt/vtctl/backup.go @@ -103,8 +103,10 @@ func commandBackupShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl var secondsBehind uint32 for i := range tablets { - // don't run a backup on a non-slave type - if !tablets[i].IsSlaveType() { + // only run a backup on a replica, rdonly or spare tablet type + switch tablets[i].Type { + case topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY, topodatapb.TabletType_SPARE: + default: continue } diff --git a/data/test/vtexplain/comments-queries.sql b/go/vt/vtexplain/testdata/comments-queries.sql similarity index 100% rename from data/test/vtexplain/comments-queries.sql rename to go/vt/vtexplain/testdata/comments-queries.sql diff --git a/data/test/vtexplain/deletesharded-queries.sql b/go/vt/vtexplain/testdata/deletesharded-queries.sql similarity index 100% rename from data/test/vtexplain/deletesharded-queries.sql rename to go/vt/vtexplain/testdata/deletesharded-queries.sql diff --git a/data/test/vtexplain/insertsharded-queries.sql b/go/vt/vtexplain/testdata/insertsharded-queries.sql similarity index 100% rename from data/test/vtexplain/insertsharded-queries.sql rename to go/vt/vtexplain/testdata/insertsharded-queries.sql diff --git a/data/test/vtexplain/multi-output/comments-output.txt b/go/vt/vtexplain/testdata/multi-output/comments-output.txt similarity index 100% rename from data/test/vtexplain/multi-output/comments-output.txt rename to go/vt/vtexplain/testdata/multi-output/comments-output.txt diff --git a/data/test/vtexplain/multi-output/deletesharded-output.txt b/go/vt/vtexplain/testdata/multi-output/deletesharded-output.txt similarity index 100% rename from data/test/vtexplain/multi-output/deletesharded-output.txt rename to go/vt/vtexplain/testdata/multi-output/deletesharded-output.txt diff --git a/data/test/vtexplain/multi-output/insertsharded-output.txt b/go/vt/vtexplain/testdata/multi-output/insertsharded-output.txt similarity index 100% rename from data/test/vtexplain/multi-output/insertsharded-output.txt rename to go/vt/vtexplain/testdata/multi-output/insertsharded-output.txt diff --git a/data/test/vtexplain/multi-output/options-output.txt b/go/vt/vtexplain/testdata/multi-output/options-output.txt similarity index 100% rename from data/test/vtexplain/multi-output/options-output.txt rename to go/vt/vtexplain/testdata/multi-output/options-output.txt diff --git a/data/test/vtexplain/multi-output/selectsharded-output.txt b/go/vt/vtexplain/testdata/multi-output/selectsharded-output.txt similarity index 100% rename from data/test/vtexplain/multi-output/selectsharded-output.txt rename to go/vt/vtexplain/testdata/multi-output/selectsharded-output.txt diff --git a/data/test/vtexplain/multi-output/target-output.txt b/go/vt/vtexplain/testdata/multi-output/target-output.txt similarity index 100% rename from data/test/vtexplain/multi-output/target-output.txt rename to go/vt/vtexplain/testdata/multi-output/target-output.txt diff --git a/data/test/vtexplain/multi-output/unsharded-output.txt b/go/vt/vtexplain/testdata/multi-output/unsharded-output.txt similarity index 100% rename from data/test/vtexplain/multi-output/unsharded-output.txt rename to go/vt/vtexplain/testdata/multi-output/unsharded-output.txt diff --git a/data/test/vtexplain/multi-output/updatesharded-output.txt b/go/vt/vtexplain/testdata/multi-output/updatesharded-output.txt similarity index 100% rename from data/test/vtexplain/multi-output/updatesharded-output.txt rename to go/vt/vtexplain/testdata/multi-output/updatesharded-output.txt diff --git a/data/test/vtexplain/options-queries.sql b/go/vt/vtexplain/testdata/options-queries.sql similarity index 100% rename from data/test/vtexplain/options-queries.sql rename to go/vt/vtexplain/testdata/options-queries.sql diff --git a/data/test/vtexplain/selectsharded-queries.sql b/go/vt/vtexplain/testdata/selectsharded-queries.sql similarity index 100% rename from data/test/vtexplain/selectsharded-queries.sql rename to go/vt/vtexplain/testdata/selectsharded-queries.sql diff --git a/data/test/vtexplain/target-queries.sql b/go/vt/vtexplain/testdata/target-queries.sql similarity index 100% rename from data/test/vtexplain/target-queries.sql rename to go/vt/vtexplain/testdata/target-queries.sql diff --git a/data/test/vtexplain/test-schema.sql b/go/vt/vtexplain/testdata/test-schema.sql similarity index 100% rename from data/test/vtexplain/test-schema.sql rename to go/vt/vtexplain/testdata/test-schema.sql diff --git a/data/test/vtexplain/test-vschema.json b/go/vt/vtexplain/testdata/test-vschema.json similarity index 100% rename from data/test/vtexplain/test-vschema.json rename to go/vt/vtexplain/testdata/test-vschema.json diff --git a/data/test/vtexplain/twopc-output/comments-output.txt b/go/vt/vtexplain/testdata/twopc-output/comments-output.txt similarity index 100% rename from data/test/vtexplain/twopc-output/comments-output.txt rename to go/vt/vtexplain/testdata/twopc-output/comments-output.txt diff --git a/data/test/vtexplain/twopc-output/deletesharded-output.txt b/go/vt/vtexplain/testdata/twopc-output/deletesharded-output.txt similarity index 100% rename from data/test/vtexplain/twopc-output/deletesharded-output.txt rename to go/vt/vtexplain/testdata/twopc-output/deletesharded-output.txt diff --git a/data/test/vtexplain/twopc-output/insertsharded-output.txt b/go/vt/vtexplain/testdata/twopc-output/insertsharded-output.txt similarity index 100% rename from data/test/vtexplain/twopc-output/insertsharded-output.txt rename to go/vt/vtexplain/testdata/twopc-output/insertsharded-output.txt diff --git a/data/test/vtexplain/twopc-output/options-output.txt b/go/vt/vtexplain/testdata/twopc-output/options-output.txt similarity index 100% rename from data/test/vtexplain/twopc-output/options-output.txt rename to go/vt/vtexplain/testdata/twopc-output/options-output.txt diff --git a/data/test/vtexplain/twopc-output/selectsharded-output.txt b/go/vt/vtexplain/testdata/twopc-output/selectsharded-output.txt similarity index 100% rename from data/test/vtexplain/twopc-output/selectsharded-output.txt rename to go/vt/vtexplain/testdata/twopc-output/selectsharded-output.txt diff --git a/data/test/vtexplain/twopc-output/unsharded-output.txt b/go/vt/vtexplain/testdata/twopc-output/unsharded-output.txt similarity index 100% rename from data/test/vtexplain/twopc-output/unsharded-output.txt rename to go/vt/vtexplain/testdata/twopc-output/unsharded-output.txt diff --git a/data/test/vtexplain/twopc-output/updatesharded-output.txt b/go/vt/vtexplain/testdata/twopc-output/updatesharded-output.txt similarity index 100% rename from data/test/vtexplain/twopc-output/updatesharded-output.txt rename to go/vt/vtexplain/testdata/twopc-output/updatesharded-output.txt diff --git a/data/test/vtexplain/unsharded-queries.sql b/go/vt/vtexplain/testdata/unsharded-queries.sql similarity index 100% rename from data/test/vtexplain/unsharded-queries.sql rename to go/vt/vtexplain/testdata/unsharded-queries.sql diff --git a/data/test/vtexplain/updatesharded-queries.sql b/go/vt/vtexplain/testdata/updatesharded-queries.sql similarity index 100% rename from data/test/vtexplain/updatesharded-queries.sql rename to go/vt/vtexplain/testdata/updatesharded-queries.sql diff --git a/go/vt/vtexplain/vtexplain_flaky_test.go b/go/vt/vtexplain/vtexplain_flaky_test.go index 190bd94989..370f59fccc 100644 --- a/go/vt/vtexplain/vtexplain_flaky_test.go +++ b/go/vt/vtexplain/vtexplain_flaky_test.go @@ -24,8 +24,6 @@ import ( "path" "strings" "testing" - - "vitess.io/vitess/go/testfiles" ) var testOutputTempDir string @@ -40,12 +38,12 @@ func defaultTestOpts() *Options { } func initTest(mode string, opts *Options, t *testing.T) { - schema, err := ioutil.ReadFile(testfiles.Locate("vtexplain/test-schema.sql")) + schema, err := ioutil.ReadFile("testdata/test-schema.sql") if err != nil { t.Fatalf("error: %v", err) } - vSchema, err := ioutil.ReadFile(testfiles.Locate("vtexplain/test-vschema.json")) + vSchema, err := ioutil.ReadFile("testdata/test-vschema.json") if err != nil { t.Fatalf("error: %v", err) } @@ -75,13 +73,13 @@ func runTestCase(testcase, mode string, opts *Options, t *testing.T) { t.Logf("vtexplain test: %s mode: %s", testcase, mode) initTest(mode, opts, t) - sqlFile := testfiles.Locate(fmt.Sprintf("vtexplain/%s-queries.sql", testcase)) + sqlFile := fmt.Sprintf("testdata/%s-queries.sql", testcase) sql, err := ioutil.ReadFile(sqlFile) if err != nil { t.Fatalf("vtexplain error: %v", err) } - textOutFile := testfiles.Locate(fmt.Sprintf("vtexplain/%s-output/%s-output.txt", mode, testcase)) + textOutFile := fmt.Sprintf("testdata/%s-output/%s-output.txt", mode, testcase) textOut, _ := ioutil.ReadFile(textOutFile) explains, err := Run(string(sql)) diff --git a/go/vt/vtgate/engine/insert.go b/go/vt/vtgate/engine/insert.go index 72d5e20da5..a1ead40673 100644 --- a/go/vt/vtgate/engine/insert.go +++ b/go/vt/vtgate/engine/insert.go @@ -78,6 +78,37 @@ type Insert struct { MultiShardAutocommit bool } +// NewQueryInsert creates an Insert with a query string. +func NewQueryInsert(opcode InsertOpcode, keyspace *vindexes.Keyspace, query string) *Insert { + return &Insert{ + Opcode: opcode, + Keyspace: keyspace, + Query: query, + } +} + +// NewSimpleInsert creates an Insert for a Table. +func NewSimpleInsert(opcode InsertOpcode, table *vindexes.Table, keyspace *vindexes.Keyspace) *Insert { + return &Insert{ + Opcode: opcode, + Table: table, + Keyspace: keyspace, + } +} + +// NewInsert creates a new Insert. +func NewInsert(opcode InsertOpcode, keyspace *vindexes.Keyspace, vindexValues []sqltypes.PlanValue, table *vindexes.Table, prefix string, mid []string, suffix string) *Insert { + return &Insert{ + Opcode: opcode, + Keyspace: keyspace, + VindexValues: vindexValues, + Table: table, + Prefix: prefix, + Mid: mid, + Suffix: suffix, + } +} + // MarshalJSON serializes the Insert into a JSON representation. // It's used for testing and diagnostics. func (ins *Insert) MarshalJSON() ([]byte, error) { diff --git a/go/vt/vtgate/engine/insert_test.go b/go/vt/vtgate/engine/insert_test.go index 5353588c73..70722ad2c1 100644 --- a/go/vt/vtgate/engine/insert_test.go +++ b/go/vt/vtgate/engine/insert_test.go @@ -28,14 +28,14 @@ import ( ) func TestInsertUnsharded(t *testing.T) { - ins := &Insert{ - Opcode: InsertUnsharded, - Keyspace: &vindexes.Keyspace{ + ins := NewQueryInsert( + InsertUnsharded, + &vindexes.Keyspace{ Name: "ks", Sharded: false, }, - Query: "dummy_insert", - } + "dummy_insert", + ) vc := &loggingVCursor{ shards: []string{"0"}, @@ -64,27 +64,27 @@ func TestInsertUnsharded(t *testing.T) { } func TestInsertUnshardedGenerate(t *testing.T) { - ins := &Insert{ - Opcode: InsertUnsharded, - Keyspace: &vindexes.Keyspace{ + ins := NewQueryInsert( + InsertUnsharded, + &vindexes.Keyspace{ Name: "ks", Sharded: false, }, - Query: "dummy_insert", - Generate: &Generate{ - Keyspace: &vindexes.Keyspace{ - Name: "ks2", - Sharded: false, - }, - Query: "dummy_generate", - Values: sqltypes.PlanValue{ - Values: []sqltypes.PlanValue{ - {Value: sqltypes.NewInt64(1)}, - {Value: sqltypes.NULL}, - {Value: sqltypes.NewInt64(2)}, - {Value: sqltypes.NULL}, - {Value: sqltypes.NewInt64(3)}, - }, + "dummy_insert", + ) + ins.Generate = &Generate{ + Keyspace: &vindexes.Keyspace{ + Name: "ks2", + Sharded: false, + }, + Query: "dummy_generate", + Values: sqltypes.PlanValue{ + Values: []sqltypes.PlanValue{ + {Value: sqltypes.NewInt64(1)}, + {Value: sqltypes.NULL}, + {Value: sqltypes.NewInt64(2)}, + {Value: sqltypes.NULL}, + {Value: sqltypes.NewInt64(3)}, }, }, } @@ -147,10 +147,10 @@ func TestInsertShardedSimple(t *testing.T) { ks := vs.Keyspaces["sharded"] // A single row insert should be autocommitted - ins := &Insert{ - Opcode: InsertSharded, - Keyspace: ks.Keyspace, - VindexValues: []sqltypes.PlanValue{{ + ins := NewInsert( + InsertSharded, + ks.Keyspace, + []sqltypes.PlanValue{{ // colVindex columns: id Values: []sqltypes.PlanValue{{ // 3 rows. @@ -159,11 +159,11 @@ func TestInsertShardedSimple(t *testing.T) { }}, }}, }}, - Table: ks.Tables["t1"], - Prefix: "prefix", - Mid: []string{" mid1"}, - Suffix: " suffix", - } + ks.Tables["t1"], + "prefix", + []string{" mid1"}, + " suffix", + ) vc := &loggingVCursor{ shards: []string{"-20", "20-"}, shardForKsid: []string{"20-", "-20", "20-"}, @@ -182,10 +182,10 @@ func TestInsertShardedSimple(t *testing.T) { }) // Multiple rows are not autocommitted by default - ins = &Insert{ - Opcode: InsertSharded, - Keyspace: ks.Keyspace, - VindexValues: []sqltypes.PlanValue{{ + ins = NewInsert( + InsertSharded, + ks.Keyspace, + []sqltypes.PlanValue{{ // colVindex columns: id Values: []sqltypes.PlanValue{{ // 3 rows. @@ -198,11 +198,11 @@ func TestInsertShardedSimple(t *testing.T) { }}, }}, }}, - Table: ks.Tables["t1"], - Prefix: "prefix", - Mid: []string{" mid1", " mid2", " mid3"}, - Suffix: " suffix", - } + ks.Tables["t1"], + "prefix", + []string{" mid1", " mid2", " mid3"}, + " suffix", + ) vc = &loggingVCursor{ shards: []string{"-20", "20-"}, shardForKsid: []string{"20-", "-20", "20-"}, @@ -222,10 +222,10 @@ func TestInsertShardedSimple(t *testing.T) { }) // Optional flag overrides autocommit - ins = &Insert{ - Opcode: InsertSharded, - Keyspace: ks.Keyspace, - VindexValues: []sqltypes.PlanValue{{ + ins = NewInsert( + InsertSharded, + ks.Keyspace, + []sqltypes.PlanValue{{ // colVindex columns: id Values: []sqltypes.PlanValue{{ // 3 rows. @@ -238,12 +238,13 @@ func TestInsertShardedSimple(t *testing.T) { }}, }}, }}, - Table: ks.Tables["t1"], - Prefix: "prefix", - Mid: []string{" mid1", " mid2", " mid3"}, - Suffix: " suffix", - MultiShardAutocommit: true, - } + ks.Tables["t1"], + "prefix", + []string{" mid1", " mid2", " mid3"}, + " suffix", + ) + ins.MultiShardAutocommit = true + vc = &loggingVCursor{ shards: []string{"-20", "20-"}, shardForKsid: []string{"20-", "-20", "20-"}, @@ -295,10 +296,10 @@ func TestInsertShardedFail(t *testing.T) { } ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSharded, - Keyspace: ks.Keyspace, - VindexValues: []sqltypes.PlanValue{{ + ins := NewInsert( + InsertSharded, + ks.Keyspace, + []sqltypes.PlanValue{{ // colVindex columns: id Values: []sqltypes.PlanValue{{ // 1 row @@ -307,11 +308,11 @@ func TestInsertShardedFail(t *testing.T) { }}, }}, }}, - Table: ks.Tables["t1"], - Prefix: "prefix", - Mid: []string{" mid1", " mid2", " mid3"}, - Suffix: " suffix", - } + ks.Tables["t1"], + "prefix", + []string{" mid1", " mid2", " mid3"}, + " suffix", + ) vc := &loggingVCursor{} @@ -347,10 +348,10 @@ func TestInsertShardedGenerate(t *testing.T) { } ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSharded, - Keyspace: ks.Keyspace, - VindexValues: []sqltypes.PlanValue{{ + ins := NewInsert( + InsertSharded, + ks.Keyspace, + []sqltypes.PlanValue{{ // colVindex columns: id Values: []sqltypes.PlanValue{{ // 3 rows. @@ -363,24 +364,25 @@ func TestInsertShardedGenerate(t *testing.T) { }}, }}, }}, - Table: ks.Tables["t1"], - Generate: &Generate{ - Keyspace: &vindexes.Keyspace{ - Name: "ks2", - Sharded: false, - }, - Query: "dummy_generate", - Values: sqltypes.PlanValue{ - Values: []sqltypes.PlanValue{ - {Value: sqltypes.NewInt64(1)}, - {Value: sqltypes.NULL}, - {Value: sqltypes.NewInt64(2)}, - }, + ks.Tables["t1"], + "prefix", + []string{" mid1", " mid2", " mid3"}, + " suffix", + ) + + ins.Generate = &Generate{ + Keyspace: &vindexes.Keyspace{ + Name: "ks2", + Sharded: false, + }, + Query: "dummy_generate", + Values: sqltypes.PlanValue{ + Values: []sqltypes.PlanValue{ + {Value: sqltypes.NewInt64(1)}, + {Value: sqltypes.NULL}, + {Value: sqltypes.NewInt64(2)}, }, }, - Prefix: "prefix", - Mid: []string{" mid1", " mid2", " mid3"}, - Suffix: " suffix", } vc := &loggingVCursor{ @@ -472,10 +474,10 @@ func TestInsertShardedOwned(t *testing.T) { } ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSharded, - Keyspace: ks.Keyspace, - VindexValues: []sqltypes.PlanValue{{ + ins := NewInsert( + InsertSharded, + ks.Keyspace, + []sqltypes.PlanValue{{ // colVindex columns: id Values: []sqltypes.PlanValue{{ // rows for id @@ -521,11 +523,11 @@ func TestInsertShardedOwned(t *testing.T) { }}, }}, }}, - Table: ks.Tables["t1"], - Prefix: "prefix", - Mid: []string{" mid1", " mid2", " mid3"}, - Suffix: " suffix", - } + ks.Tables["t1"], + "prefix", + []string{" mid1", " mid2", " mid3"}, + " suffix", + ) vc := &loggingVCursor{ shards: []string{"-20", "20-"}, @@ -599,10 +601,10 @@ func TestInsertShardedOwnedWithNull(t *testing.T) { } ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSharded, - Keyspace: ks.Keyspace, - VindexValues: []sqltypes.PlanValue{{ + ins := NewInsert( + InsertSharded, + ks.Keyspace, + []sqltypes.PlanValue{{ // colVindex columns: id Values: []sqltypes.PlanValue{{ // rows for id @@ -619,11 +621,11 @@ func TestInsertShardedOwnedWithNull(t *testing.T) { }}, }}, }}, - Table: ks.Tables["t1"], - Prefix: "prefix", - Mid: []string{" mid1", " mid2", " mid3"}, - Suffix: " suffix", - } + ks.Tables["t1"], + "prefix", + []string{" mid1", " mid2", " mid3"}, + " suffix", + ) vc := &loggingVCursor{ shards: []string{"-20", "20-"}, @@ -698,10 +700,10 @@ func TestInsertShardedIgnoreOwned(t *testing.T) { } ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertShardedIgnore, - Keyspace: ks.Keyspace, - VindexValues: []sqltypes.PlanValue{{ + ins := NewInsert( + InsertShardedIgnore, + ks.Keyspace, + []sqltypes.PlanValue{{ // colVindex columns: id Values: []sqltypes.PlanValue{{ // rows for id @@ -755,11 +757,11 @@ func TestInsertShardedIgnoreOwned(t *testing.T) { }}, }}, }}, - Table: ks.Tables["t1"], - Prefix: "prefix", - Mid: []string{" mid1", " mid2", " mid3", " mid4"}, - Suffix: " suffix", - } + ks.Tables["t1"], + "prefix", + []string{" mid1", " mid2", " mid3", " mid4"}, + " suffix", + ) ksid0 := sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -872,10 +874,10 @@ func TestInsertShardedIgnoreOwnedWithNull(t *testing.T) { } ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertShardedIgnore, - Keyspace: ks.Keyspace, - VindexValues: []sqltypes.PlanValue{{ + ins := NewInsert( + InsertShardedIgnore, + ks.Keyspace, + []sqltypes.PlanValue{{ // colVindex columns: id Values: []sqltypes.PlanValue{{ // rows for id @@ -892,11 +894,11 @@ func TestInsertShardedIgnoreOwnedWithNull(t *testing.T) { }}, }}, }}, - Table: ks.Tables["t1"], - Prefix: "prefix", - Mid: []string{" mid1", " mid2", " mid3", " mid4"}, - Suffix: " suffix", - } + ks.Tables["t1"], + "prefix", + []string{" mid1", " mid2", " mid3", " mid4"}, + " suffix", + ) ksid0 := sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -978,10 +980,10 @@ func TestInsertShardedUnownedVerify(t *testing.T) { } ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSharded, - Keyspace: ks.Keyspace, - VindexValues: []sqltypes.PlanValue{{ + ins := NewInsert( + InsertSharded, + ks.Keyspace, + []sqltypes.PlanValue{{ // colVindex columns: id Values: []sqltypes.PlanValue{{ // rows for id @@ -1027,11 +1029,11 @@ func TestInsertShardedUnownedVerify(t *testing.T) { }}, }}, }}, - Table: ks.Tables["t1"], - Prefix: "prefix", - Mid: []string{" mid1", " mid2", " mid3"}, - Suffix: " suffix", - } + ks.Tables["t1"], + "prefix", + []string{" mid1", " mid2", " mid3"}, + " suffix", + ) // nonemptyResult will cause the lookup verify queries to succeed. nonemptyResult := sqltypes.MakeTestResult( @@ -1122,10 +1124,10 @@ func TestInsertShardedIgnoreUnownedVerify(t *testing.T) { } ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertShardedIgnore, - Keyspace: ks.Keyspace, - VindexValues: []sqltypes.PlanValue{{ + ins := NewInsert( + InsertShardedIgnore, + ks.Keyspace, + []sqltypes.PlanValue{{ // colVindex columns: id Values: []sqltypes.PlanValue{{ // rows for id @@ -1150,11 +1152,11 @@ func TestInsertShardedIgnoreUnownedVerify(t *testing.T) { }}, }}, }}, - Table: ks.Tables["t1"], - Prefix: "prefix", - Mid: []string{" mid1", " mid2", " mid3"}, - Suffix: " suffix", - } + ks.Tables["t1"], + "prefix", + []string{" mid1", " mid2", " mid3"}, + " suffix", + ) // nonemptyResult will cause the lookup verify queries to succeed. nonemptyResult := sqltypes.MakeTestResult( @@ -1236,10 +1238,10 @@ func TestInsertShardedIgnoreUnownedVerifyFail(t *testing.T) { } ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSharded, - Keyspace: ks.Keyspace, - VindexValues: []sqltypes.PlanValue{{ + ins := NewInsert( + InsertSharded, + ks.Keyspace, + []sqltypes.PlanValue{{ // colVindex columns: id Values: []sqltypes.PlanValue{{ // rows for id @@ -1256,11 +1258,11 @@ func TestInsertShardedIgnoreUnownedVerifyFail(t *testing.T) { }}, }}, }}, - Table: ks.Tables["t1"], - Prefix: "prefix", - Mid: []string{" mid1", " mid2", " mid3"}, - Suffix: " suffix", - } + ks.Tables["t1"], + "prefix", + []string{" mid1", " mid2", " mid3"}, + " suffix", + ) vc := &loggingVCursor{ shards: []string{"-20", "20-"}, @@ -1318,10 +1320,10 @@ func TestInsertShardedUnownedReverseMap(t *testing.T) { } ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSharded, - Keyspace: ks.Keyspace, - VindexValues: []sqltypes.PlanValue{{ + ins := NewInsert( + InsertSharded, + ks.Keyspace, + []sqltypes.PlanValue{{ // colVindex columns: id Values: []sqltypes.PlanValue{{ // rows for id @@ -1367,11 +1369,11 @@ func TestInsertShardedUnownedReverseMap(t *testing.T) { }}, }}, }}, - Table: ks.Tables["t1"], - Prefix: "prefix", - Mid: []string{" mid1", " mid2", " mid3"}, - Suffix: " suffix", - } + ks.Tables["t1"], + "prefix", + []string{" mid1", " mid2", " mid3"}, + " suffix", + ) // nonemptyResult will cause the lookup verify queries to succeed. nonemptyResult := sqltypes.MakeTestResult( @@ -1448,10 +1450,10 @@ func TestInsertShardedUnownedReverseMapFail(t *testing.T) { } ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSharded, - Keyspace: ks.Keyspace, - VindexValues: []sqltypes.PlanValue{{ + ins := NewInsert( + InsertSharded, + ks.Keyspace, + []sqltypes.PlanValue{{ // colVindex columns: id Values: []sqltypes.PlanValue{{ // rows for id @@ -1468,11 +1470,11 @@ func TestInsertShardedUnownedReverseMapFail(t *testing.T) { }}, }}, }}, - Table: ks.Tables["t1"], - Prefix: "prefix", - Mid: []string{" mid1", " mid2", " mid3"}, - Suffix: " suffix", - } + ks.Tables["t1"], + "prefix", + []string{" mid1", " mid2", " mid3"}, + " suffix", + ) vc := &loggingVCursor{ shards: []string{"-20", "20-"}, diff --git a/go/vt/vtgate/engine/route.go b/go/vt/vtgate/engine/route.go index 470cc8f93c..fb6678bd23 100644 --- a/go/vt/vtgate/engine/route.go +++ b/go/vt/vtgate/engine/route.go @@ -83,6 +83,24 @@ type Route struct { ScatterErrorsAsWarnings bool } +// NewSimpleRoute creates a Route with the bare minimum of parameters. +func NewSimpleRoute(opcode RouteOpcode, keyspace *vindexes.Keyspace) *Route { + return &Route{ + Opcode: opcode, + Keyspace: keyspace, + } +} + +// NewRoute creates a Route. +func NewRoute(opcode RouteOpcode, keyspace *vindexes.Keyspace, query, fieldQuery string) *Route { + return &Route{ + Opcode: opcode, + Keyspace: keyspace, + Query: query, + FieldQuery: fieldQuery, + } +} + // OrderbyParams specifies the parameters for ordering. // This is used for merge-sorting scatter queries. type OrderbyParams struct { diff --git a/go/vt/vtgate/engine/route_test.go b/go/vt/vtgate/engine/route_test.go index 7aaf3fb3ce..682e6f0d78 100644 --- a/go/vt/vtgate/engine/route_test.go +++ b/go/vt/vtgate/engine/route_test.go @@ -35,15 +35,15 @@ var defaultSelectResult = sqltypes.MakeTestResult( ) func TestSelectUnsharded(t *testing.T) { - sel := &Route{ - Opcode: SelectUnsharded, - Keyspace: &vindexes.Keyspace{ + sel := NewRoute( + SelectUnsharded, + &vindexes.Keyspace{ Name: "ks", Sharded: false, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - } + "dummy_select", + "dummy_select_field", + ) vc := &loggingVCursor{ shards: []string{"0"}, @@ -72,15 +72,15 @@ func TestSelectUnsharded(t *testing.T) { } func TestSelectScatter(t *testing.T) { - sel := &Route{ - Opcode: SelectScatter, - Keyspace: &vindexes.Keyspace{ + sel := NewRoute( + SelectScatter, + &vindexes.Keyspace{ Name: "ks", Sharded: true, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - } + "dummy_select", + "dummy_select_field", + ) vc := &loggingVCursor{ shards: []string{"-20", "20-"}, @@ -110,17 +110,17 @@ func TestSelectScatter(t *testing.T) { func TestSelectEqualUnique(t *testing.T) { vindex, _ := vindexes.NewHash("", nil) - sel := &Route{ - Opcode: SelectEqualUnique, - Keyspace: &vindexes.Keyspace{ + sel := NewRoute( + SelectEqualUnique, + &vindexes.Keyspace{ Name: "ks", Sharded: true, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - Vindex: vindex, - Values: []sqltypes.PlanValue{{Value: sqltypes.NewInt64(1)}}, - } + "dummy_select", + "dummy_select_field", + ) + sel.Vindex = vindex + sel.Values = []sqltypes.PlanValue{{Value: sqltypes.NewInt64(1)}} vc := &loggingVCursor{ shards: []string{"-20", "20-"}, @@ -155,17 +155,17 @@ func TestSelectEqualUniqueScatter(t *testing.T) { "to": "toc", "write_only": "true", }) - sel := &Route{ - Opcode: SelectEqualUnique, - Keyspace: &vindexes.Keyspace{ + sel := NewRoute( + SelectEqualUnique, + &vindexes.Keyspace{ Name: "ks", Sharded: true, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - Vindex: vindex, - Values: []sqltypes.PlanValue{{Value: sqltypes.NewInt64(1)}}, - } + "dummy_select", + "dummy_select_field", + ) + sel.Vindex = vindex + sel.Values = []sqltypes.PlanValue{{Value: sqltypes.NewInt64(1)}} vc := &loggingVCursor{ shards: []string{"-20", "20-"}, @@ -199,17 +199,17 @@ func TestSelectEqual(t *testing.T) { "from": "from", "to": "toc", }) - sel := &Route{ - Opcode: SelectEqual, - Keyspace: &vindexes.Keyspace{ + sel := NewRoute( + SelectEqual, + &vindexes.Keyspace{ Name: "ks", Sharded: true, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - Vindex: vindex, - Values: []sqltypes.PlanValue{{Value: sqltypes.NewInt64(1)}}, - } + "dummy_select", + "dummy_select_field", + ) + sel.Vindex = vindex + sel.Values = []sqltypes.PlanValue{{Value: sqltypes.NewInt64(1)}} vc := &loggingVCursor{ shards: []string{"-20", "20-"}, @@ -255,17 +255,17 @@ func TestSelectEqualNoRoute(t *testing.T) { "from": "from", "to": "toc", }) - sel := &Route{ - Opcode: SelectEqual, - Keyspace: &vindexes.Keyspace{ + sel := NewRoute( + SelectEqual, + &vindexes.Keyspace{ Name: "ks", Sharded: true, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - Vindex: vindex, - Values: []sqltypes.PlanValue{{Value: sqltypes.NewInt64(1)}}, - } + "dummy_select", + "dummy_select_field", + ) + sel.Vindex = vindex + sel.Values = []sqltypes.PlanValue{{Value: sqltypes.NewInt64(1)}} vc := &loggingVCursor{shards: []string{"-20", "20-"}} result, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) @@ -292,25 +292,25 @@ func TestSelectEqualNoRoute(t *testing.T) { func TestSelectINUnique(t *testing.T) { vindex, _ := vindexes.NewHash("", nil) - sel := &Route{ - Opcode: SelectIN, - Keyspace: &vindexes.Keyspace{ + sel := NewRoute( + SelectIN, + &vindexes.Keyspace{ Name: "ks", Sharded: true, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - Vindex: vindex, + "dummy_select", + "dummy_select_field", + ) + sel.Vindex = vindex + sel.Values = []sqltypes.PlanValue{{ Values: []sqltypes.PlanValue{{ - Values: []sqltypes.PlanValue{{ - Value: sqltypes.NewInt64(1), - }, { - Value: sqltypes.NewInt64(2), - }, { - Value: sqltypes.NewInt64(4), - }}, + Value: sqltypes.NewInt64(1), + }, { + Value: sqltypes.NewInt64(2), + }, { + Value: sqltypes.NewInt64(4), }}, - } + }} vc := &loggingVCursor{ shards: []string{"-20", "20-"}, @@ -348,25 +348,25 @@ func TestSelectINNonUnique(t *testing.T) { "from": "from", "to": "toc", }) - sel := &Route{ - Opcode: SelectIN, - Keyspace: &vindexes.Keyspace{ + sel := NewRoute( + SelectIN, + &vindexes.Keyspace{ Name: "ks", Sharded: true, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - Vindex: vindex, + "dummy_select", + "dummy_select_field", + ) + sel.Vindex = vindex + sel.Values = []sqltypes.PlanValue{{ Values: []sqltypes.PlanValue{{ - Values: []sqltypes.PlanValue{{ - Value: sqltypes.NewInt64(1), - }, { - Value: sqltypes.NewInt64(2), - }, { - Value: sqltypes.NewInt64(4), - }}, + Value: sqltypes.NewInt64(1), + }, { + Value: sqltypes.NewInt64(2), + }, { + Value: sqltypes.NewInt64(4), }}, - } + }} fields := sqltypes.MakeTestFields( "toc", @@ -426,15 +426,15 @@ func TestSelectINNonUnique(t *testing.T) { } func TestSelectNext(t *testing.T) { - sel := &Route{ - Opcode: SelectNext, - Keyspace: &vindexes.Keyspace{ + sel := NewRoute( + SelectNext, + &vindexes.Keyspace{ Name: "ks", Sharded: true, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - } + "dummy_select", + "dummy_select_field", + ) vc := &loggingVCursor{ shards: []string{"-20", "20-"}, @@ -456,15 +456,15 @@ func TestSelectNext(t *testing.T) { } func TestSelectDBA(t *testing.T) { - sel := &Route{ - Opcode: SelectDBA, - Keyspace: &vindexes.Keyspace{ + sel := NewRoute( + SelectDBA, + &vindexes.Keyspace{ Name: "ks", Sharded: true, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - } + "dummy_select", + "dummy_select_field", + ) vc := &loggingVCursor{ shards: []string{"-20", "20-"}, @@ -491,17 +491,17 @@ func TestRouteGetFields(t *testing.T) { "from": "from", "to": "toc", }) - sel := &Route{ - Opcode: SelectEqual, - Keyspace: &vindexes.Keyspace{ + sel := NewRoute( + SelectEqual, + &vindexes.Keyspace{ Name: "ks", Sharded: true, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - Vindex: vindex, - Values: []sqltypes.PlanValue{{Value: sqltypes.NewInt64(1)}}, - } + "dummy_select", + "dummy_select_field", + ) + sel.Vindex = vindex + sel.Values = []sqltypes.PlanValue{{Value: sqltypes.NewInt64(1)}} vc := &loggingVCursor{shards: []string{"-20", "20-"}} result, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, true) @@ -531,18 +531,18 @@ func TestRouteGetFields(t *testing.T) { } func TestRouteSort(t *testing.T) { - sel := &Route{ - Opcode: SelectUnsharded, - Keyspace: &vindexes.Keyspace{ + sel := NewRoute( + SelectUnsharded, + &vindexes.Keyspace{ Name: "ks", Sharded: false, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - OrderBy: []OrderbyParams{{ - Col: 0, - }}, - } + "dummy_select", + "dummy_select_field", + ) + sel.OrderBy = []OrderbyParams{{ + Col: 0, + }} vc := &loggingVCursor{ shards: []string{"0"}, @@ -616,19 +616,19 @@ func TestRouteSort(t *testing.T) { } func TestRouteSortTruncate(t *testing.T) { - sel := &Route{ - Opcode: SelectUnsharded, - Keyspace: &vindexes.Keyspace{ + sel := NewRoute( + SelectUnsharded, + &vindexes.Keyspace{ Name: "ks", Sharded: false, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - OrderBy: []OrderbyParams{{ - Col: 0, - }}, - TruncateColumnCount: 1, - } + "dummy_select", + "dummy_select_field", + ) + sel.OrderBy = []OrderbyParams{{ + Col: 0, + }} + sel.TruncateColumnCount = 1 vc := &loggingVCursor{ shards: []string{"0"}, @@ -667,16 +667,16 @@ func TestRouteSortTruncate(t *testing.T) { } func TestRouteStreamTruncate(t *testing.T) { - sel := &Route{ - Opcode: SelectUnsharded, - Keyspace: &vindexes.Keyspace{ + sel := NewRoute( + SelectUnsharded, + &vindexes.Keyspace{ Name: "ks", Sharded: false, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - TruncateColumnCount: 1, - } + "dummy_select", + "dummy_select_field", + ) + sel.TruncateColumnCount = 1 vc := &loggingVCursor{ shards: []string{"0"}, @@ -711,19 +711,19 @@ func TestRouteStreamTruncate(t *testing.T) { } func TestRouteStreamSortTruncate(t *testing.T) { - sel := &Route{ - Opcode: SelectUnsharded, - Keyspace: &vindexes.Keyspace{ + sel := NewRoute( + SelectUnsharded, + &vindexes.Keyspace{ Name: "ks", Sharded: false, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - OrderBy: []OrderbyParams{{ - Col: 0, - }}, - TruncateColumnCount: 1, - } + "dummy_select", + "dummy_select_field", + ) + sel.OrderBy = []OrderbyParams{{ + Col: 0, + }} + sel.TruncateColumnCount = 1 vc := &loggingVCursor{ shards: []string{"0"}, @@ -762,15 +762,15 @@ func TestRouteStreamSortTruncate(t *testing.T) { } func TestParamsFail(t *testing.T) { - sel := &Route{ - Opcode: SelectUnsharded, - Keyspace: &vindexes.Keyspace{ + sel := NewRoute( + SelectUnsharded, + &vindexes.Keyspace{ Name: "ks", Sharded: false, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - } + "dummy_select", + "dummy_select_field", + ) vc := &loggingVCursor{shardErr: errors.New("shard error")} _, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) @@ -783,15 +783,15 @@ func TestParamsFail(t *testing.T) { func TestExecFail(t *testing.T) { // Unsharded error - sel := &Route{ - Opcode: SelectUnsharded, - Keyspace: &vindexes.Keyspace{ + sel := NewRoute( + SelectUnsharded, + &vindexes.Keyspace{ Name: "ks", Sharded: false, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - } + "dummy_select", + "dummy_select_field", + ) vc := &loggingVCursor{shards: []string{"0"}, resultErr: mysql.NewSQLError(mysql.ERQueryInterrupted, "", "query timeout")} _, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) @@ -803,15 +803,15 @@ func TestExecFail(t *testing.T) { expectError(t, "sel.StreamExecute err", err, "query timeout (errno 1317) (sqlstate HY000)") // Scatter fails if one of N fails without ScatterErrorsAsWarnings - sel = &Route{ - Opcode: SelectScatter, - Keyspace: &vindexes.Keyspace{ + sel = NewRoute( + SelectScatter, + &vindexes.Keyspace{ Name: "ks", Sharded: true, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - } + "dummy_select", + "dummy_select_field", + ) vc = &loggingVCursor{ shards: []string{"-20", "20-"}, @@ -831,16 +831,16 @@ func TestExecFail(t *testing.T) { vc.Rewind() // Scatter succeeds if all shards fail with ScatterErrorsAsWarnings - sel = &Route{ - Opcode: SelectScatter, - Keyspace: &vindexes.Keyspace{ + sel = NewRoute( + SelectScatter, + &vindexes.Keyspace{ Name: "ks", Sharded: true, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - ScatterErrorsAsWarnings: true, - } + "dummy_select", + "dummy_select_field", + ) + sel.ScatterErrorsAsWarnings = true vc = &loggingVCursor{ shards: []string{"-20", "20-"}, @@ -869,16 +869,16 @@ func TestExecFail(t *testing.T) { vc.Rewind() // Scatter succeeds if one of N fails with ScatterErrorsAsWarnings - sel = &Route{ - Opcode: SelectScatter, - Keyspace: &vindexes.Keyspace{ + sel = NewRoute( + SelectScatter, + &vindexes.Keyspace{ Name: "ks", Sharded: true, }, - Query: "dummy_select", - FieldQuery: "dummy_select_field", - ScatterErrorsAsWarnings: true, - } + "dummy_select", + "dummy_select_field", + ) + sel.ScatterErrorsAsWarnings = true vc = &loggingVCursor{ shards: []string{"-20", "20-"}, diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go index 38440dc3af..ef319ac7d0 100644 --- a/go/vt/vtgate/executor.go +++ b/go/vt/vtgate/executor.go @@ -749,6 +749,10 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql }, nil case sqlparser.KeywordString(sqlparser.TABLES): if show.ShowTablesOpt != nil && show.ShowTablesOpt.DbName != "" { + if destKeyspace == "" { + // Change "show tables from " to "show tables" directed to that keyspace. + destKeyspace = show.ShowTablesOpt.DbName + } show.ShowTablesOpt.DbName = "" } sql = sqlparser.String(show) diff --git a/go/vt/vtgate/executor_framework_test.go b/go/vt/vtgate/executor_framework_test.go index 36f4909ee0..26f86cbae6 100644 --- a/go/vt/vtgate/executor_framework_test.go +++ b/go/vt/vtgate/executor_framework_test.go @@ -267,6 +267,12 @@ var unshardedVSchema = ` } ` +type DestinationAnyShardPickerFirstShard struct{} + +func (dp DestinationAnyShardPickerFirstShard) PickShard(shardCount int) int { + return 0 +} + // keyRangeLookuper is for testing a lookup that returns a keyrange. type keyRangeLookuper struct { } @@ -353,6 +359,7 @@ func createExecutorEnv() (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn getSandbox(KsTestUnsharded).VSchema = unshardedVSchema executor = NewExecutor(context.Background(), serv, cell, "", resolver, false, testBufferSize, testCacheSize, false) + key.AnyShardPicker = DestinationAnyShardPickerFirstShard{} return executor, sbc1, sbc2, sbclookup } diff --git a/go/vt/vtgate/executor_select_test.go b/go/vt/vtgate/executor_select_test.go index 0499b0d60b..997757330d 100644 --- a/go/vt/vtgate/executor_select_test.go +++ b/go/vt/vtgate/executor_select_test.go @@ -596,7 +596,7 @@ func TestSelectKeyRangeUnique(t *testing.T) { func TestSelectIN(t *testing.T) { executor, sbc1, sbc2, sbclookup := createExecutorEnv() - // Constant in IN is just a number, not a bind variable. + // Constant in IN clause is just a number, not a bind variable. _, err := executorExec(executor, "select id from user where id in (1)", nil) if err != nil { t.Error(err) @@ -614,7 +614,7 @@ func TestSelectIN(t *testing.T) { t.Errorf("sbc2.Queries: %+v, want nil\n", sbc2.Queries) } - // Constant in IN is just a couple numbers, not bind variables. + // Constants in IN clause are just numbers, not bind variables. // They result in two different queries on two shards. sbc1.Queries = nil sbc2.Queries = nil diff --git a/go/vt/vtgate/executor_test.go b/go/vt/vtgate/executor_test.go index 008e0976fe..8af068afee 100644 --- a/go/vt/vtgate/executor_test.go +++ b/go/vt/vtgate/executor_test.go @@ -18,6 +18,7 @@ package vtgate import ( "bytes" + "fmt" "html/template" "reflect" "sort" @@ -589,7 +590,7 @@ func TestExecutorLegacyAutocommit(t *testing.T) { } func TestExecutorShow(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, sbclookup := createExecutorEnv() session := NewSafeSession(&vtgatepb.Session{TargetString: "@master"}) for _, query := range []string{"show databases", "show vitess_keyspaces"} { @@ -624,6 +625,49 @@ func TestExecutorShow(t *testing.T) { if err != nil { t.Error(err) } + + _, err = executor.Execute(context.Background(), "TestExecute", session, "show tables", nil) + if err != errNoKeyspace { + t.Errorf("'show tables' should fail without a keyspace") + } + + if len(sbclookup.Queries) != 0 { + t.Errorf("sbclookup unexpectedly has queries already") + } + + showResults := &sqltypes.Result{ + Fields: []*querypb.Field{ + {Name: "Tables_in_keyspace", Type: sqltypes.VarChar}, + }, + RowsAffected: 1, + InsertID: 0, + Rows: [][]sqltypes.Value{{ + sqltypes.NewVarChar("some_table"), + }}, + } + sbclookup.SetResults([]*sqltypes.Result{showResults}) + + query := fmt.Sprintf("show tables from %v", KsTestUnsharded) + qr, err := executor.Execute(context.Background(), "TestExecute", session, query, nil) + if err != nil { + t.Error(err) + } + + if len(sbclookup.Queries) != 1 { + t.Errorf("Tablet should have recieved one 'show' query. Instead received: %v", sbclookup.Queries) + } else { + lastQuery := sbclookup.Queries[len(sbclookup.Queries)-1].Sql + want := "show tables" + if lastQuery != want { + t.Errorf("Got: %v, want %v", lastQuery, want) + } + } + + wantqr := showResults + if !reflect.DeepEqual(qr, wantqr) { + t.Errorf("%v:\n%+v, want\n%+v", query, qr, wantqr) + } + for _, query := range []string{"show charset", "show charset like '%foo'", "show character set", "show character set like '%foo'"} { qr, err := executor.Execute(context.Background(), "TestExecute", session, query, nil) if err != nil { @@ -648,11 +692,11 @@ func TestExecutorShow(t *testing.T) { t.Errorf("%v:\n%+v, want\n%+v", query, qr, wantqr) } } - qr, err := executor.Execute(context.Background(), "TestExecute", session, "show engines", nil) + qr, err = executor.Execute(context.Background(), "TestExecute", session, "show engines", nil) if err != nil { t.Error(err) } - wantqr := &sqltypes.Result{ + wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Engine", "Support", "Comment", "Transactions", "XA", "Savepoints"), Rows: [][]sqltypes.Value{ buildVarCharRow( @@ -1797,7 +1841,7 @@ func TestExecutorUnrecognized(t *testing.T) { func TestExecutorMessageAckSharded(t *testing.T) { executor, sbc1, sbc2, _ := createExecutorEnv() - // Constant in IN is just a number, not a bind variable. + // Constant in IN clause is just a number, not a bind variable. ids := []*querypb.Value{{ Type: sqltypes.VarChar, Value: []byte("1"), @@ -1816,7 +1860,7 @@ func TestExecutorMessageAckSharded(t *testing.T) { t.Errorf("sbc2.MessageIDs: %+v, want nil\n", sbc2.MessageIDs) } - // Constant in IN is just a couple numbers, not bind variables. + // Constants in IN clause are just numbers, not bind variables. // They result in two different MessageIDs on two shards. sbc1.MessageIDs = nil sbc2.MessageIDs = nil diff --git a/go/vt/vtgate/gateway/discoverygateway.go b/go/vt/vtgate/gateway/discoverygateway.go index 9b075d2f59..26731f72e6 100644 --- a/go/vt/vtgate/gateway/discoverygateway.go +++ b/go/vt/vtgate/gateway/discoverygateway.go @@ -210,12 +210,6 @@ func (dg *discoveryGateway) GetMasterCell(keyspace, shard string) (string, query return cell, dg, err } -// StreamHealth is not forwarded to any other tablet, -// but we handle it directly here. -func (dg *discoveryGateway) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error { - return StreamHealthFromTargetStatsListener(ctx, dg.tsc, callback) -} - // Close shuts down underlying connections. // This function hides the inner implementation. func (dg *discoveryGateway) Close(ctx context.Context) error { diff --git a/go/vt/vtgate/gateway/discoverygateway_test.go b/go/vt/vtgate/gateway/discoverygateway_test.go index 4f0680f576..f3e06462a6 100644 --- a/go/vt/vtgate/gateway/discoverygateway_test.go +++ b/go/vt/vtgate/gateway/discoverygateway_test.go @@ -59,7 +59,7 @@ func TestDiscoveryGatewayExecuteBatch(t *testing.T) { func TestDiscoveryGatewayExecuteStream(t *testing.T) { testDiscoveryGatewayGeneric(t, true, func(dg Gateway, target *querypb.Target) error { - err := dg.StreamExecute(context.Background(), target, "query", nil, nil, func(qr *sqltypes.Result) error { + err := dg.StreamExecute(context.Background(), target, "query", nil, 0, nil, func(qr *sqltypes.Result) error { return nil }) return err @@ -206,6 +206,107 @@ func TestShuffleTablets(t *testing.T) { } } +func TestDiscoveryGatewayGetAggregateStats(t *testing.T) { + keyspace := "ks" + shard := "0" + hc := discovery.NewFakeHealthCheck() + dg := createDiscoveryGateway(hc, nil, "cell1", 2).(*discoveryGateway) + + // replica should only use local ones + hc.Reset() + dg.tsc.ResetForTesting() + hc.AddTestTablet("cell1", "1.1.1.1", 1001, keyspace, shard, topodatapb.TabletType_REPLICA, true, 10, nil) + hc.AddTestTablet("cell1", "2.2.2.2", 1001, keyspace, shard, topodatapb.TabletType_REPLICA, true, 10, nil) + target := &querypb.Target{ + Keyspace: keyspace, + Shard: shard, + TabletType: topodatapb.TabletType_REPLICA, + Cell: "cell1", + } + tsl, err := dg.tsc.GetAggregateStats(target) + if err != nil { + t.Error(err) + } + if tsl.HealthyTabletCount != 2 { + t.Errorf("Expected 2 healthy replica tablets, got: %v", tsl.HealthyTabletCount) + } +} + +func TestDiscoveryGatewayGetAggregateStatsRegion(t *testing.T) { + keyspace := "ks" + shard := "0" + hc := discovery.NewFakeHealthCheck() + dg := createDiscoveryGateway(hc, nil, "local-east", 2).(*discoveryGateway) + + topo.UpdateCellsToRegionsForTests(map[string]string{ + "local-west": "local", + "local-east": "local", + "remote": "remote", + }) + + hc.Reset() + dg.tsc.ResetForTesting() + hc.AddTestTablet("remote", "1.1.1.1", 1001, keyspace, shard, topodatapb.TabletType_REPLICA, true, 10, nil) + hc.AddTestTablet("local-west", "2.2.2.2", 1001, keyspace, shard, topodatapb.TabletType_REPLICA, true, 10, nil) + hc.AddTestTablet("local-east", "3.3.3.3", 1001, keyspace, shard, topodatapb.TabletType_REPLICA, true, 10, nil) + + // Non master targets in the same region as the gateway should be discoverable + target := &querypb.Target{ + Keyspace: keyspace, + Shard: shard, + TabletType: topodatapb.TabletType_REPLICA, + Cell: "local-west", + } + tsl, err := dg.tsc.GetAggregateStats(target) + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if tsl.HealthyTabletCount != 2 { + t.Errorf("Expected 2 healthy replica tablets, got: %v", tsl.HealthyTabletCount) + } +} + +func TestDiscoveryGatewayGetAggregateStatsMaster(t *testing.T) { + keyspace := "ks" + shard := "0" + hc := discovery.NewFakeHealthCheck() + dg := createDiscoveryGateway(hc, nil, "cell1", 2).(*discoveryGateway) + + // replica should only use local ones + hc.Reset() + dg.tsc.ResetForTesting() + hc.AddTestTablet("cell1", "1.1.1.1", 1001, keyspace, shard, topodatapb.TabletType_MASTER, true, 10, nil) + target := &querypb.Target{ + Keyspace: keyspace, + Shard: shard, + TabletType: topodatapb.TabletType_MASTER, + Cell: "cell1", + } + tsl, err := dg.tsc.GetAggregateStats(target) + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if tsl.HealthyTabletCount != 1 { + t.Errorf("Expected one healthy master, got: %v", tsl.HealthyTabletCount) + } + + // You can get aggregate regardless of the cell when requesting a master + target = &querypb.Target{ + Keyspace: keyspace, + Shard: shard, + TabletType: topodatapb.TabletType_MASTER, + Cell: "cell2", + } + + tsl, err = dg.tsc.GetAggregateStats(target) + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if tsl.HealthyTabletCount != 1 { + t.Errorf("Expected one healthy master, got: %v", tsl.HealthyTabletCount) + } +} + func TestDiscoveryGatewayGetTabletsWithRegion(t *testing.T) { keyspace := "ks" shard := "0" @@ -230,6 +331,50 @@ func TestDiscoveryGatewayGetTabletsWithRegion(t *testing.T) { } } +func BenchmarkOneCellGetAggregateStats(b *testing.B) { benchmarkCellsGetAggregateStats(1, b) } + +func BenchmarkTenCellGetAggregateStats(b *testing.B) { benchmarkCellsGetAggregateStats(10, b) } + +func Benchmark100CellGetAggregateStats(b *testing.B) { benchmarkCellsGetAggregateStats(100, b) } + +func Benchmark1000CellGetAggregateStats(b *testing.B) { benchmarkCellsGetAggregateStats(1000, b) } + +func benchmarkCellsGetAggregateStats(i int, b *testing.B) { + keyspace := "ks" + shard := "0" + hc := discovery.NewFakeHealthCheck() + dg := createDiscoveryGateway(hc, nil, "cell0", 2).(*discoveryGateway) + cellsToregions := make(map[string]string) + for j := 0; j < i; j++ { + cell := fmt.Sprintf("cell%v", j) + cellsToregions[cell] = "local" + } + + topo.UpdateCellsToRegionsForTests(cellsToregions) + hc.Reset() + dg.tsc.ResetForTesting() + + for j := 0; j < i; j++ { + cell := fmt.Sprintf("cell%v", j) + ip := fmt.Sprintf("%v.%v.%v,%v", j, j, j, j) + hc.AddTestTablet(cell, ip, 1001, keyspace, shard, topodatapb.TabletType_REPLICA, true, 10, nil) + } + + target := &querypb.Target{ + Keyspace: keyspace, + Shard: shard, + TabletType: topodatapb.TabletType_REPLICA, + Cell: "cell0", + } + + for n := 0; n < b.N; n++ { + _, err := dg.tsc.GetAggregateStats(target) + if err != nil { + b.Fatalf("Expected no error, got %v", err) + } + } +} + func testDiscoveryGatewayGeneric(t *testing.T, streaming bool, f func(dg Gateway, target *querypb.Target) error) { keyspace := "ks" shard := "0" diff --git a/go/vt/vtgate/gateway/gateway.go b/go/vt/vtgate/gateway/gateway.go index 6dc71db18c..d9f5166ea4 100644 --- a/go/vt/vtgate/gateway/gateway.go +++ b/go/vt/vtgate/gateway/gateway.go @@ -20,7 +20,6 @@ package gateway import ( "flag" - "fmt" "time" "golang.org/x/net/context" @@ -31,7 +30,6 @@ import ( "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/vttablet/queryservice" - querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -128,53 +126,3 @@ func WaitForTablets(gw Gateway, tabletTypesToWait []topodatapb.TabletType) error } return err } - -// StreamHealthFromTargetStatsListener responds to a StreamHealth -// streaming RPC using a srvtopo.TargetStatsListener implementation. -func StreamHealthFromTargetStatsListener(ctx context.Context, l srvtopo.TargetStatsListener, callback func(*querypb.StreamHealthResponse) error) error { - // Subscribe to the TargetStatsListener aggregate stats. - id, entries, c, err := l.Subscribe() - if err != nil { - return err - } - defer func() { - // Unsubscribe so we don't receive more updates, and - // drain the channel. - l.Unsubscribe(id) - for range c { - } - }() - - // Send all current entries. - for _, e := range entries { - shr := &querypb.StreamHealthResponse{ - Target: e.Target, - TabletExternallyReparentedTimestamp: e.TabletExternallyReparentedTimestamp, - AggregateStats: e.Stats, - } - if err := callback(shr); err != nil { - return err - } - } - - // Now listen for updates, or the end of the connection. - for { - select { - case <-ctx.Done(): - return ctx.Err() - case e, ok := <-c: - if !ok { - // Channel is closed, should never happen. - return fmt.Errorf("channel closed") - } - shr := &querypb.StreamHealthResponse{ - Target: e.Target, - TabletExternallyReparentedTimestamp: e.TabletExternallyReparentedTimestamp, - AggregateStats: e.Stats, - } - if err := callback(shr); err != nil { - return err - } - } - } -} diff --git a/go/vt/vtgate/gateway/hybridgateway.go b/go/vt/vtgate/gateway/hybridgateway.go deleted file mode 100644 index a2a1b0f3f7..0000000000 --- a/go/vt/vtgate/gateway/hybridgateway.go +++ /dev/null @@ -1,202 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gateway - -import ( - "fmt" - - "golang.org/x/net/context" - - "vitess.io/vitess/go/stats" - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/srvtopo" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vttablet/queryservice" -) - -// HybridGateway implements the gateway.Gateway interface by forwarding -// the queries to the right underlying implementation: -// - it has one gateway that watches for tablets. Usually a DiscoveryGateway. -// Useful for local tablets, or remote tablets that can be accessed. -// - it has a list of remote vtgate connections to talk to l2 vtgate processes. -// Useful for remote tablets that are far away, or if the number of local -// tablets grows too big. -// -// Note the WaitForTablets method for now only waits on the local gateway. -type HybridGateway struct { - queryservice.QueryService - - // gw is the local gateway that has the local connections. - gw Gateway - - // l2vtgates is the list of remote connections to other vtgate pools. - l2vtgates []*L2VTGateConn -} - -// NewHybridGateway returns a new HybridGateway based on the provided -// parameters. gw can be nil, in which case it is assumed there is no -// local tablets. -func NewHybridGateway(gw Gateway, addrs []string, retryCount int) (*HybridGateway, error) { - h := &HybridGateway{ - gw: gw, - } - - for i, addr := range addrs { - conn, err := NewL2VTGateConn(fmt.Sprintf("%v", i), addr, retryCount) - if err != nil { - h.Close(context.Background()) - return nil, fmt.Errorf("dialing %v failed: %v", addr, err) - } - h.l2vtgates = append(h.l2vtgates, conn) - } - - h.QueryService = queryservice.Wrap(nil, h.route) - return h, nil -} - -// Close is part of the queryservice.QueryService interface. -func (h *HybridGateway) Close(ctx context.Context) error { - for _, l := range h.l2vtgates { - l.Close(ctx) - } - return nil -} - -// WaitForTablets is part of the Gateway interface. -// We just forward to the local Gateway, if any. -func (h *HybridGateway) WaitForTablets(ctx context.Context, tabletTypesToWait []topodatapb.TabletType) error { - if h.gw != nil { - return h.gw.WaitForTablets(ctx, tabletTypesToWait) - } - - // No local tablets, we don't wait for anything here. - return nil -} - -// RegisterStats registers the l2vtgate connection counts stats. -func (h *HybridGateway) RegisterStats() { - stats.NewCountersFuncWithMultiLabels( - "L2VtgateConnections", - "number of l2vtgate connection", - []string{"Keyspace", "ShardName", "TabletType"}, - h.servingConnStats) -} - -func (h *HybridGateway) servingConnStats() map[string]int64 { - res := make(map[string]int64) - for _, l := range h.l2vtgates { - l.servingConnStats(res) - } - return res -} - -// CacheStatus is part of the Gateway interface. It just concatenates -// all statuses from all underlying parts. -func (h *HybridGateway) CacheStatus() TabletCacheStatusList { - var result TabletCacheStatusList - - // Start with the local Gateway part. - if h.gw != nil { - result = h.gw.CacheStatus() - } - - // Then add each gateway one at a time. - for _, l := range h.l2vtgates { - partial := l.CacheStatus() - result = append(result, partial...) - } - - return result -} - -// route sends the action to the right underlying implementation. -// This doesn't retry, and doesn't collect stats, as these two are -// done by the underlying gw or l2VTGateConn. -// -// FIXME(alainjobart) now we only use gw, or the one l2vtgates we have. -// Need to deprecate this code in favor of using GetAggregateStats. -func (h *HybridGateway) route(ctx context.Context, target *querypb.Target, conn queryservice.QueryService, name string, inTransaction bool, inner func(context.Context, *querypb.Target, queryservice.QueryService) (error, bool)) error { - if h.gw != nil { - err, _ := inner(ctx, target, h.gw) - return NewShardError(err, target, nil, inTransaction) - } - if len(h.l2vtgates) == 1 { - err, _ := inner(ctx, target, h.l2vtgates[0]) - return NewShardError(err, target, nil, inTransaction) - } - return NewShardError(topo.NewError(topo.NoNode, ""), target, nil, inTransaction) -} - -// GetAggregateStats is part of the srvtopo.TargetStats interface, included -// in the gateway.Gateway interface. -func (h *HybridGateway) GetAggregateStats(target *querypb.Target) (*querypb.AggregateStats, queryservice.QueryService, error) { - // Start with the local Gateway part. - if h.gw != nil { - stats, qs, err := h.gw.GetAggregateStats(target) - if !topo.IsErrType(err, topo.NoNode) { - // The local gateway either worked, or returned an - // error. But it knows about this target. - return stats, qs, err - } - } - - // The local gateway doesn't know about this target, - // try the remote ones. - for _, l := range h.l2vtgates { - stats, err := l.GetAggregateStats(target) - if !topo.IsErrType(err, topo.NoNode) { - // This remote gateway either worked, or returned an - // error. But it knows about this target. - return stats, l, err - } - } - - // We couldn't find a way to resolve this. - return nil, nil, topo.NewError(topo.NoNode, target.String()) -} - -// GetMasterCell is part of the srvtopo.TargetStats interface, included -// in the gateway.Gateway interface. -func (h *HybridGateway) GetMasterCell(keyspace, shard string) (cell string, qs queryservice.QueryService, err error) { - // Start with the local Gateway part. - if h.gw != nil { - cell, qs, err := h.gw.GetMasterCell(keyspace, shard) - if !topo.IsErrType(err, topo.NoNode) { - // The local gateway either worked, or returned an - // error. But it knows about this target. - return cell, qs, err - } - // The local gateway doesn't know about this target, - // try the remote ones. - } - - for _, l := range h.l2vtgates { - cell, err := l.GetMasterCell(keyspace, shard) - if !topo.IsErrType(err, topo.NoNode) { - // This remote gateway either worked, or returned an - // error. But it knows about this target. - return cell, l, err - } - } - - // We couldn't find a way to resolve this. - return "", nil, topo.NewError(topo.NoNode, keyspace+"/"+shard) -} - -var _ Gateway = (*HybridGateway)(nil) -var _ srvtopo.TargetStats = (*HybridGateway)(nil) diff --git a/go/vt/vtgate/gateway/l2vtgateconn.go b/go/vt/vtgate/gateway/l2vtgateconn.go deleted file mode 100644 index d4264eea82..0000000000 --- a/go/vt/vtgate/gateway/l2vtgateconn.go +++ /dev/null @@ -1,271 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gateway - -import ( - "fmt" - "sort" - "sync" - "time" - - "golang.org/x/net/context" - "vitess.io/vitess/go/vt/grpcclient" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/vttablet/queryservice" - "vitess.io/vitess/go/vt/vttablet/tabletconn" - - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -// L2VTGateConn keeps a single connection to a vtgate backend. The -// underlying vtgate backend must have been started with the -// '-enable_forwarding' flag. -// -// It will keep a healthcheck connection going to the target, to get -// the list of available Targets. It remembers them, and exposes a -// srvtopo.TargetStats interface to query them. -type L2VTGateConn struct { - queryservice.QueryService - - // addr is the destination address. Immutable. - addr string - - // name is the name to display for stats. Immutable. - name string - - // retryCount is the number of times to retry an action. Immutable. - retryCount int - - // cancel is associated with the life cycle of this L2VTGateConn. - // It is called when Close is called. - cancel context.CancelFunc - - // mu protects the following fields. - mu sync.RWMutex - // stats has all the stats we received from the other side. - stats map[l2VTGateConnKey]*l2VTGateConnValue - // statusAggregators is a map indexed by the key - // name:keyspace/shard/tablet type - statusAggregators map[string]*TabletStatusAggregator -} - -type l2VTGateConnKey struct { - keyspace string - shard string - tabletType topodatapb.TabletType -} - -type l2VTGateConnValue struct { - tabletExternallyReparentedTimestamp int64 - - // aggregates has the per-cell aggregates. - aggregates map[string]*querypb.AggregateStats -} - -// NewL2VTGateConn creates a new L2VTGateConn object. It also starts -// the background go routine to monitor its health. -func NewL2VTGateConn(name, addr string, retryCount int) (*L2VTGateConn, error) { - conn, err := tabletconn.GetDialer()(&topodatapb.Tablet{ - Hostname: addr, - }, grpcclient.FailFast(true)) - if err != nil { - return nil, err - } - - ctx, cancel := context.WithCancel(context.Background()) - c := &L2VTGateConn{ - addr: addr, - name: name, - cancel: cancel, - stats: make(map[l2VTGateConnKey]*l2VTGateConnValue), - statusAggregators: make(map[string]*TabletStatusAggregator), - } - c.QueryService = queryservice.Wrap(conn, c.withRetry) - go c.checkConn(ctx) - return c, nil -} - -// Close is part of the queryservice.QueryService interface. -func (c *L2VTGateConn) Close(ctx context.Context) error { - c.cancel() - return nil -} - -func (c *L2VTGateConn) servingConnStats(res map[string]int64) { - c.mu.Lock() - defer c.mu.Unlock() - for k, s := range c.stats { - key := fmt.Sprintf("%s.%s.%s", k.keyspace, k.shard, topoproto.TabletTypeLString(k.tabletType)) - var htc int32 - for _, stats := range s.aggregates { - htc += stats.HealthyTabletCount - } - res[key] += int64(htc) - } -} - -func (c *L2VTGateConn) checkConn(ctx context.Context) { - for { - err := c.StreamHealth(ctx, c.streamHealthCallback) - log.Warningf("StreamHealth to %v failed, will retry after 30s: %v", c.addr, err) - time.Sleep(30 * time.Second) - } -} - -func (c *L2VTGateConn) streamHealthCallback(shr *querypb.StreamHealthResponse) error { - key := l2VTGateConnKey{ - keyspace: shr.Target.Keyspace, - shard: shr.Target.Shard, - tabletType: shr.Target.TabletType, - } - c.mu.Lock() - defer c.mu.Unlock() - e, ok := c.stats[key] - if !ok { - // No current value for this keyspace/shard/tablet type. - // Check if we received a delete, drop it. - if shr.AggregateStats == nil || (shr.AggregateStats.HealthyTabletCount == 0 && shr.AggregateStats.UnhealthyTabletCount == 0) { - return nil - } - - // It's a record for a keyspace/shard/tablet type we - // don't know yet, just create our new record with one - // entry in the map for the cell. - c.stats[key] = &l2VTGateConnValue{ - tabletExternallyReparentedTimestamp: shr.TabletExternallyReparentedTimestamp, - aggregates: map[string]*querypb.AggregateStats{ - shr.Target.Cell: shr.AggregateStats, - }, - } - return nil - } - - // Save our new value. - e.tabletExternallyReparentedTimestamp = shr.TabletExternallyReparentedTimestamp - e.aggregates[shr.Target.Cell] = shr.AggregateStats - return nil -} - -// GetAggregateStats is the discovery part of srvtopo.TargetStats interface. -func (c *L2VTGateConn) GetAggregateStats(target *querypb.Target) (*querypb.AggregateStats, error) { - key := l2VTGateConnKey{ - keyspace: target.Keyspace, - shard: target.Shard, - tabletType: target.TabletType, - } - c.mu.RLock() - defer c.mu.RUnlock() - e, ok := c.stats[key] - if !ok { - return nil, topo.NewError(topo.NoNode, target.String()) - } - - a, ok := e.aggregates[target.Cell] - if !ok { - return nil, topo.NewError(topo.NoNode, target.String()) - } - return a, nil -} - -// GetMasterCell is the discovery part of the srvtopo.TargetStats interface. -func (c *L2VTGateConn) GetMasterCell(keyspace, shard string) (cell string, err error) { - key := l2VTGateConnKey{ - keyspace: keyspace, - shard: shard, - tabletType: topodatapb.TabletType_MASTER, - } - c.mu.RLock() - defer c.mu.RUnlock() - e, ok := c.stats[key] - if !ok { - return "", topo.NewError(topo.NoNode, keyspace+"/"+shard) - } - - for cell := range e.aggregates { - return cell, nil - } - return "", topo.NewError(topo.NoNode, keyspace+"/"+shard) -} - -// CacheStatus returns a list of TabletCacheStatus per -// name:keyspace/shard/tablet type. -func (c *L2VTGateConn) CacheStatus() TabletCacheStatusList { - c.mu.RLock() - res := make(TabletCacheStatusList, 0, len(c.statusAggregators)) - for _, aggr := range c.statusAggregators { - res = append(res, aggr.GetCacheStatus()) - } - c.mu.RUnlock() - sort.Sort(res) - return res -} - -func (c *L2VTGateConn) updateStats(target *querypb.Target, startTime time.Time, err error) { - elapsed := time.Now().Sub(startTime) - aggr := c.getStatsAggregator(target) - aggr.UpdateQueryInfo("", target.TabletType, elapsed, err != nil) -} - -func (c *L2VTGateConn) getStatsAggregator(target *querypb.Target) *TabletStatusAggregator { - key := fmt.Sprintf("%v:%v/%v/%v", c.name, target.Keyspace, target.Shard, target.TabletType.String()) - - // get existing aggregator - c.mu.RLock() - aggr, ok := c.statusAggregators[key] - c.mu.RUnlock() - if ok { - return aggr - } - - // create a new one, but check again before the creation - c.mu.Lock() - defer c.mu.Unlock() - aggr, ok = c.statusAggregators[key] - if ok { - return aggr - } - aggr = NewTabletStatusAggregator(target.Keyspace, target.Shard, target.TabletType, key) - c.statusAggregators[key] = aggr - return aggr -} - -// withRetry uses the connection to execute the action. If there are -// retryable errors, it retries retryCount times before failing. It -// does not retry if the connection is in the middle of a -// transaction. While returning the error check if it maybe a result -// of a resharding event, and set the re-resolve bit and let the upper -// layers re-resolve and retry. -func (c *L2VTGateConn) withRetry(ctx context.Context, target *querypb.Target, conn queryservice.QueryService, name string, inTransaction bool, inner func(context.Context, *querypb.Target, queryservice.QueryService) (error, bool)) error { - var err error - for i := 0; i < c.retryCount+1; i++ { - startTime := time.Now() - var canRetry bool - err, canRetry = inner(ctx, target, conn) - if target != nil { - // target can be nil for StreamHealth calls. - c.updateStats(target, startTime, err) - } - if canRetry { - continue - } - break - } - return NewShardError(err, target, nil, inTransaction) -} diff --git a/go/vt/vtgate/gatewaytest/grpc_discovery_test.go b/go/vt/vtgate/gatewaytest/grpc_discovery_test.go index b14c5d5af1..6bcdec6e7c 100644 --- a/go/vt/vtgate/gatewaytest/grpc_discovery_test.go +++ b/go/vt/vtgate/gatewaytest/grpc_discovery_test.go @@ -28,7 +28,6 @@ import ( "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/srvtopo" - "vitess.io/vitess/go/vt/vtgate" "vitess.io/vitess/go/vt/vtgate/gateway" "vitess.io/vitess/go/vt/vttablet/grpcqueryservice" "vitess.io/vitess/go/vt/vttablet/tabletconntest" @@ -90,77 +89,3 @@ func TestGRPCDiscovery(t *testing.T) { // run the test suite. TestSuite(t, "discovery-grpc", dg, service) } - -// TestL2VTGateDiscovery tests the hybrid gateway with a gRPC -// connection from the gateway to a l2vtgate in-process object. -func TestL2VTGateDiscovery(t *testing.T) { - flag.Set("tablet_protocol", "grpc") - flag.Set("gateway_implementation", "discoverygateway") - flag.Set("enable_forwarding", "true") - - // Fake services for the tablet, topo server. - service, ts, cell := CreateFakeServers(t) - - // Tablet: listen on a random port. - listener, err := net.Listen("tcp", ":0") - if err != nil { - t.Fatalf("Cannot listen: %v", err) - } - host := listener.Addr().(*net.TCPAddr).IP.String() - port := listener.Addr().(*net.TCPAddr).Port - defer listener.Close() - - // Tablet: create a gRPC server and listen on the port. - server := grpc.NewServer() - grpcqueryservice.Register(server, service) - go server.Serve(listener) - defer server.Stop() - - // L2VTGate: Create the discovery healthcheck, and the gateway. - // Wait for the right tablets to be present. - hc := discovery.NewHealthCheck(10*time.Second, 2*time.Minute) - rs := srvtopo.NewResilientServer(ts, "TestL2VTGateDiscovery") - l2vtgate := vtgate.Init(context.Background(), hc, rs, cell, 2, nil) - hc.AddTablet(&topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: cell, - Uid: 44, - }, - Keyspace: tabletconntest.TestTarget.Keyspace, - Shard: tabletconntest.TestTarget.Shard, - Type: tabletconntest.TestTarget.TabletType, - Hostname: host, - PortMap: map[string]int32{ - "grpc": int32(port), - }, - }, "test_tablet") - ctx := context.Background() - err = l2vtgate.Gateway().WaitForTablets(ctx, []topodatapb.TabletType{tabletconntest.TestTarget.TabletType}) - if err != nil { - t.Fatalf("WaitForTablets failed: %v", err) - } - - // L2VTGate: listen on a random port. - listener, err = net.Listen("tcp", ":0") - if err != nil { - t.Fatalf("Cannot listen: %v", err) - } - defer listener.Close() - - // L2VTGate: create a gRPC server and listen on the port. - server = grpc.NewServer() - grpcqueryservice.Register(server, l2vtgate.L2VTGate()) - go server.Serve(listener) - defer server.Stop() - - // VTGate: create the HybridGateway, with no local gateway, - // and just the remote address in the l2vtgate pool. - hg, err := gateway.NewHybridGateway(nil, []string{listener.Addr().String()}, 2) - if err != nil { - t.Fatalf("gateway.NewHybridGateway() failed: %v", err) - } - defer hg.Close(ctx) - - // and run the test suite. - TestSuite(t, "l2vtgate-grpc", hg, service) -} diff --git a/go/vt/vtgate/l2vtgate.go b/go/vt/vtgate/l2vtgate.go deleted file mode 100644 index b33fd5f861..0000000000 --- a/go/vt/vtgate/l2vtgate.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vtgate - -import ( - "time" - - "golang.org/x/net/context" - - "vitess.io/vitess/go/stats" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/gateway" - "vitess.io/vitess/go/vt/vttablet/queryservice" - - querypb "vitess.io/vitess/go/vt/proto/query" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" -) - -var ( - l2VTGate *L2VTGate -) - -// L2VTGate implements queryservice.QueryService and forwards queries to -// the underlying gateway. -type L2VTGate struct { - queryservice.QueryService - timings *stats.MultiTimings - errorCounts *stats.CountersWithMultiLabels - gateway gateway.Gateway -} - -// RegisterL2VTGate defines the type of registration mechanism. -type RegisterL2VTGate func(queryservice.QueryService) - -// RegisterL2VTGates stores register funcs for L2VTGate server. -var RegisterL2VTGates []RegisterL2VTGate - -// initL2VTGate creates the single L2VTGate with the provided parameters. -func initL2VTGate(gw gateway.Gateway) *L2VTGate { - if l2VTGate != nil { - log.Fatalf("L2VTGate already initialized") - } - - l2VTGate = &L2VTGate{ - timings: stats.NewMultiTimings( - "QueryServiceCall", - "l2VTGate query service call timings", - []string{"Operation", "Keyspace", "ShardName", "DbType"}), - errorCounts: stats.NewCountersWithMultiLabels( - "QueryServiceCallErrorCount", - "Error count from calls to the query service", - []string{"Operation", "Keyspace", "ShardName", "DbType"}), - gateway: gw, - } - l2VTGate.QueryService = queryservice.Wrap( - gw, - func(ctx context.Context, target *querypb.Target, conn queryservice.QueryService, name string, inTransaction bool, inner func(context.Context, *querypb.Target, queryservice.QueryService) (error, bool)) (err error) { - if target != nil { - startTime, statsKey := l2VTGate.startAction(name, target) - defer l2VTGate.endAction(startTime, statsKey, &err) - } - err, _ = inner(ctx, target, conn) - return err - }, - ) - servenv.OnRun(func() { - for _, f := range RegisterL2VTGates { - f(l2VTGate) - } - }) - return l2VTGate -} - -func (l *L2VTGate) startAction(name string, target *querypb.Target) (time.Time, []string) { - statsKey := []string{name, target.Keyspace, target.Shard, topoproto.TabletTypeLString(target.TabletType)} - startTime := time.Now() - return startTime, statsKey -} - -func (l *L2VTGate) endAction(startTime time.Time, statsKey []string, err *error) { - if *err != nil { - // Don't increment the error counter for duplicate - // keys or bad queries, as those errors are caused by - // client queries and are not VTGate's fault. - ec := vterrors.Code(*err) - if ec != vtrpcpb.Code_ALREADY_EXISTS && ec != vtrpcpb.Code_INVALID_ARGUMENT { - l.errorCounts.Add(statsKey, 1) - } - } - l.timings.Record(statsKey, startTime) -} diff --git a/go/vt/vtgate/planbuilder/from.go b/go/vt/vtgate/planbuilder/from.go index af0319a3df..dd598cc951 100644 --- a/go/vt/vtgate/planbuilder/from.go +++ b/go/vt/vtgate/planbuilder/from.go @@ -149,10 +149,7 @@ func (pb *primitiveBuilder) buildTablePrimitive(tableExpr *sqlparser.AliasedTabl return err } rb, st := newRoute(sel, nil, nil) - rb.ERoute = &engine.Route{ - Opcode: engine.SelectDBA, - Keyspace: ks, - } + rb.ERoute = engine.NewSimpleRoute(engine.SelectDBA, ks) pb.bldr, pb.st = rb, st return nil } @@ -172,28 +169,20 @@ func (pb *primitiveBuilder) buildTablePrimitive(tableExpr *sqlparser.AliasedTabl _ = st.AddVindexTable(alias, table, rb) if !table.Keyspace.Sharded { - rb.ERoute = &engine.Route{ - Opcode: engine.SelectUnsharded, - Keyspace: table.Keyspace, - } + rb.ERoute = engine.NewSimpleRoute(engine.SelectUnsharded, table.Keyspace) return nil } if table.Pinned == nil { - rb.ERoute = &engine.Route{ - Opcode: engine.SelectScatter, - Keyspace: table.Keyspace, - TargetDestination: destTarget, - TargetTabletType: destTableType, - } + rb.ERoute = engine.NewSimpleRoute(engine.SelectScatter, table.Keyspace) + rb.ERoute.TargetDestination = destTarget + rb.ERoute.TargetTabletType = destTableType + return nil } // Pinned tables have their keyspace ids already assigned. // Use the Binary vindex, which is the identity function // for keyspace id. Currently only dual tables are pinned. - eRoute := &engine.Route{ - Opcode: engine.SelectEqualUnique, - Keyspace: table.Keyspace, - } + eRoute := engine.NewSimpleRoute(engine.SelectEqualUnique, table.Keyspace) eRoute.Vindex, _ = vindexes.NewBinary("binary", nil) eRoute.Values = []sqltypes.PlanValue{{Value: sqltypes.MakeTrusted(sqltypes.VarBinary, table.Pinned)}} rb.ERoute = eRoute diff --git a/go/vt/vtgate/planbuilder/insert.go b/go/vt/vtgate/planbuilder/insert.go index 4ba900513b..5bef6cde7e 100644 --- a/go/vt/vtgate/planbuilder/insert.go +++ b/go/vt/vtgate/planbuilder/insert.go @@ -59,11 +59,11 @@ func buildInsertPlan(ins *sqlparser.Insert, vschema ContextVSchema) (*engine.Ins } func buildInsertUnshardedPlan(ins *sqlparser.Insert, table *vindexes.Table, vschema ContextVSchema) (*engine.Insert, error) { - eins := &engine.Insert{ - Opcode: engine.InsertUnsharded, - Table: table, - Keyspace: table.Keyspace, - } + eins := engine.NewSimpleInsert( + engine.InsertUnsharded, + table, + table.Keyspace, + ) var rows sqlparser.Values switch insertValues := ins.Rows.(type) { case *sqlparser.Select, *sqlparser.Union: @@ -99,11 +99,11 @@ func buildInsertUnshardedPlan(ins *sqlparser.Insert, table *vindexes.Table, vsch } func buildInsertShardedPlan(ins *sqlparser.Insert, table *vindexes.Table) (*engine.Insert, error) { - eins := &engine.Insert{ - Opcode: engine.InsertSharded, - Table: table, - Keyspace: table.Keyspace, - } + eins := engine.NewSimpleInsert( + engine.InsertSharded, + table, + table.Keyspace, + ) if ins.Ignore != "" { eins.Opcode = engine.InsertShardedIgnore } @@ -206,7 +206,7 @@ func modifyForAutoinc(ins *sqlparser.Insert, eins *engine.Insert) error { return nil } -// swapBindVariables swaps in bind variable names at the the specified +// swapBindVariables swaps in bind variable names at the specified // column position in the AST values and returns the converted values back. // Bind variable names are generated using baseName. func swapBindVariables(rows sqlparser.Values, colNum int, baseName string) (sqltypes.PlanValue, error) { diff --git a/go/vt/vtgate/planbuilder/ordered_aggregate.go b/go/vt/vtgate/planbuilder/ordered_aggregate.go index 905fe37bdc..30c4ec2831 100644 --- a/go/vt/vtgate/planbuilder/ordered_aggregate.go +++ b/go/vt/vtgate/planbuilder/ordered_aggregate.go @@ -272,7 +272,7 @@ func (oa *orderedAggregate) PushFilter(_ *primitiveBuilder, _ sqlparser.Expr, wh // be performed 'as they come'. In this respect, oa is the originator for // aggregate expressions like MAX, which will be added to symtab. The underlying // MAX sent to the route will not be added to symtab and will not be reachable by -// others. This functionality depends on the the PushOrderBy to request that +// others. This functionality depends on the PushOrderBy to request that // the rows be correctly ordered for a merge sort. func (oa *orderedAggregate) PushSelect(expr *sqlparser.AliasedExpr, origin builder) (rc *resultColumn, colnum int, err error) { if inner, ok := expr.Expr.(*sqlparser.FuncExpr); ok { diff --git a/go/vt/vtgate/planbuilder/plan_test.go b/go/vt/vtgate/planbuilder/plan_test.go index d6bcd90d66..c98686fba9 100644 --- a/go/vt/vtgate/planbuilder/plan_test.go +++ b/go/vt/vtgate/planbuilder/plan_test.go @@ -23,12 +23,10 @@ import ( "fmt" "io" "os" - "path" "strings" "testing" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/testfiles" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/topoproto" @@ -334,8 +332,5 @@ func iterateExecFile(name string) (testCaseIterator chan testCase) { } func locateFile(name string) string { - if path.IsAbs(name) { - return name - } - return testfiles.Locate("vtgate/" + name) + return "testdata/" + name } diff --git a/data/test/vtgate/aggr_cases.txt b/go/vt/vtgate/planbuilder/testdata/aggr_cases.txt similarity index 100% rename from data/test/vtgate/aggr_cases.txt rename to go/vt/vtgate/planbuilder/testdata/aggr_cases.txt diff --git a/data/test/vtgate/dml_cases.txt b/go/vt/vtgate/planbuilder/testdata/dml_cases.txt similarity index 100% rename from data/test/vtgate/dml_cases.txt rename to go/vt/vtgate/planbuilder/testdata/dml_cases.txt diff --git a/data/test/vtgate/filter_cases.txt b/go/vt/vtgate/planbuilder/testdata/filter_cases.txt similarity index 99% rename from data/test/vtgate/filter_cases.txt rename to go/vt/vtgate/planbuilder/testdata/filter_cases.txt index 48ae26d6f7..b9b9da4a4f 100644 --- a/data/test/vtgate/filter_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/filter_cases.txt @@ -936,6 +936,6 @@ # outer and inner subquery route reference the same "uu.id" name # but they refer to different things. The first reference is to the outermost query, -# and the second reference is to the the innermost 'from' subquery. +# and the second reference is to the innermost 'from' subquery. "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select col from (select id from user_extra where user_id = 5) uu where uu.user_id = uu.id))" "unsupported: cross-shard correlated subquery" diff --git a/data/test/vtgate/from_cases.txt b/go/vt/vtgate/planbuilder/testdata/from_cases.txt similarity index 100% rename from data/test/vtgate/from_cases.txt rename to go/vt/vtgate/planbuilder/testdata/from_cases.txt diff --git a/data/test/vtgate/onecase.txt b/go/vt/vtgate/planbuilder/testdata/onecase.txt similarity index 100% rename from data/test/vtgate/onecase.txt rename to go/vt/vtgate/planbuilder/testdata/onecase.txt diff --git a/data/test/vtgate/postprocess_cases.txt b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.txt similarity index 100% rename from data/test/vtgate/postprocess_cases.txt rename to go/vt/vtgate/planbuilder/testdata/postprocess_cases.txt diff --git a/data/test/vtgate/schema_test.json b/go/vt/vtgate/planbuilder/testdata/schema_test.json similarity index 100% rename from data/test/vtgate/schema_test.json rename to go/vt/vtgate/planbuilder/testdata/schema_test.json diff --git a/data/test/vtgate/select_cases.txt b/go/vt/vtgate/planbuilder/testdata/select_cases.txt similarity index 100% rename from data/test/vtgate/select_cases.txt rename to go/vt/vtgate/planbuilder/testdata/select_cases.txt diff --git a/data/test/vtgate/symtab_cases.txt b/go/vt/vtgate/planbuilder/testdata/symtab_cases.txt similarity index 100% rename from data/test/vtgate/symtab_cases.txt rename to go/vt/vtgate/planbuilder/testdata/symtab_cases.txt diff --git a/data/test/vtgate/unsupported_cases.txt b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.txt similarity index 100% rename from data/test/vtgate/unsupported_cases.txt rename to go/vt/vtgate/planbuilder/testdata/unsupported_cases.txt diff --git a/data/test/vtgate/vindex_func_cases.txt b/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.txt similarity index 100% rename from data/test/vtgate/vindex_func_cases.txt rename to go/vt/vtgate/planbuilder/testdata/vindex_func_cases.txt diff --git a/data/test/vtgate/wireup_cases.txt b/go/vt/vtgate/planbuilder/testdata/wireup_cases.txt similarity index 100% rename from data/test/vtgate/wireup_cases.txt rename to go/vt/vtgate/planbuilder/testdata/wireup_cases.txt diff --git a/go/vt/vtgate/scatter_conn.go b/go/vt/vtgate/scatter_conn.go index 32283678d3..da72bc5278 100644 --- a/go/vt/vtgate/scatter_conn.go +++ b/go/vt/vtgate/scatter_conn.go @@ -458,7 +458,7 @@ func (stc *ScatterConn) StreamExecute( fieldSent := false allErrors := stc.multiGo(ctx, "StreamExecute", rss, tabletType, func(rs *srvtopo.ResolvedShard, i int) error { - return rs.QueryService.StreamExecute(ctx, rs.Target, query, bindVars, options, func(qr *sqltypes.Result) error { + return rs.QueryService.StreamExecute(ctx, rs.Target, query, bindVars, 0, options, func(qr *sqltypes.Result) error { return stc.processOneStreamingResult(&mu, &fieldSent, qr, callback) }) }) @@ -484,7 +484,7 @@ func (stc *ScatterConn) StreamExecuteMulti( fieldSent := false allErrors := stc.multiGo(ctx, "StreamExecute", rss, tabletType, func(rs *srvtopo.ResolvedShard, i int) error { - return rs.QueryService.StreamExecute(ctx, rs.Target, query, bindVars[i], options, func(qr *sqltypes.Result) error { + return rs.QueryService.StreamExecute(ctx, rs.Target, query, bindVars[i], 0, options, func(qr *sqltypes.Result) error { return stc.processOneStreamingResult(&mu, &fieldSent, qr, callback) }) }) @@ -700,7 +700,7 @@ func injectShuffleQueryPartsRandomGenerator( return oldRandGen } -// shuffleQueryParts performs an in-place shuffle of the the given array. +// shuffleQueryParts performs an in-place shuffle of the given array. // The result is a psuedo-random permutation of the array chosen uniformally // from the space of all permutations. func shuffleQueryParts(splits []*vtgatepb.SplitQueryResponse_Part) { diff --git a/go/vt/vtgate/vindexes/numeric_static_map_test.go b/go/vt/vtgate/vindexes/numeric_static_map_test.go index fe9f0ae37c..78829c6d68 100644 --- a/go/vt/vtgate/vindexes/numeric_static_map_test.go +++ b/go/vt/vtgate/vindexes/numeric_static_map_test.go @@ -23,19 +23,14 @@ import ( "strings" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/testfiles" "vitess.io/vitess/go/vt/key" ) // createVindex creates the "numeric_static_map" vindex object which is used by // each test. -// -// IMPORTANT: This code is called per test and must not be called from init() -// because our internal implementation of testfiles.Locate() does not support to -// be called from init(). func createVindex() (Vindex, error) { m := make(map[string]string) - m["json_path"] = testfiles.Locate("vtgate/numeric_static_map_test.json") + m["json_path"] = "testdata/numeric_static_map_test.json" return CreateVindex("numeric_static_map", "numericStaticMap", m) } diff --git a/data/test/vtgate/numeric_static_map_test.json b/go/vt/vtgate/vindexes/testdata/numeric_static_map_test.json similarity index 100% rename from data/test/vtgate/numeric_static_map_test.json rename to go/vt/vtgate/vindexes/testdata/numeric_static_map_test.json diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index 75305867e1..2b2e7b81d8 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -28,7 +28,6 @@ import ( "golang.org/x/net/context" "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/flagutil" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/tb" @@ -61,7 +60,6 @@ var ( queryPlanCacheSize = flag.Int64("gate_query_cache_size", 10000, "gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.") legacyAutocommit = flag.Bool("legacy_autocommit", false, "DEPRECATED: set this flag to true to get the legacy behavior: all transactions will need an explicit begin, and DMLs outside transactions will return an error.") enableForwarding = flag.Bool("enable_forwarding", false, "if specified, this process will also expose a QueryService interface that allows other vtgates to talk through this vtgate to the underlying tablets.") - l2vtgateAddrs flagutil.StringListValue disableLocalGateway = flag.Bool("disable_local_gateway", false, "if specified, this process will not route any queries to local tablets in the local cell") ) @@ -118,7 +116,6 @@ type VTGate struct { resolver *Resolver txConn *TxConn gw gateway.Gateway - l2vtgate *L2VTGate // stats objects. // TODO(sougou): This needs to be cleaned up. There @@ -162,30 +159,12 @@ func Init(ctx context.Context, hc discovery.HealthCheck, serv srvtopo.Server, ce // Start with the gateway. If we can't reach the topology service, // we can't go on much further, so we log.Fatal out. var gw gateway.Gateway - var l2vtgate *L2VTGate if !*disableLocalGateway { gw = gateway.GetCreator()(hc, serv, cell, retryCount) gw.RegisterStats() if err := gateway.WaitForTablets(gw, tabletTypesToWait); err != nil { log.Fatalf("gateway.WaitForTablets failed: %v", err) } - - // l2vtgate gives access to the underlying Gateway - // from an exported QueryService interface. - if *enableForwarding { - l2vtgate = initL2VTGate(gw) - } - } - - // If we have other vtgate pools to connect to, create a - // HybridGateway to perform the routing. - if len(l2vtgateAddrs) > 0 { - hgw, err := gateway.NewHybridGateway(gw, l2vtgateAddrs, retryCount) - if err != nil { - log.Fatalf("gateway.NewHybridGateway failed: %v", err) - } - hgw.RegisterStats() - gw = hgw } // Check we have something to do. @@ -215,7 +194,6 @@ func Init(ctx context.Context, hc discovery.HealthCheck, serv srvtopo.Server, ce resolver: resolver, txConn: tc, gw: gw, - l2vtgate: l2vtgate, timings: stats.NewMultiTimings( "VtgateApi", "VtgateApi timings", @@ -295,11 +273,6 @@ func (vtg *VTGate) Gateway() gateway.Gateway { return vtg.gw } -// L2VTGate returns the L2VTGate object. Mostly used for tests. -func (vtg *VTGate) L2VTGate() *L2VTGate { - return vtg.l2vtgate -} - // Execute executes a non-streaming query. This is a V3 function. func (vtg *VTGate) Execute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (newSession *vtgatepb.Session, qr *sqltypes.Result, err error) { // In this context, we don't care if we can't fully parse destination @@ -1195,7 +1168,3 @@ func unambiguousKeyspaceBSQ(queries []*vtgatepb.BoundShardQuery) string { return keyspace } } - -func init() { - flag.Var(&l2vtgateAddrs, "l2vtgate_addrs", "Specifies a comma-separated list of other l2 vtgate pools to connect to. These other vtgates must run with the --enable_forwarding flag") -} diff --git a/go/vt/vttablet/agentrpctest/test_agent_rpc.go b/go/vt/vttablet/agentrpctest/test_agent_rpc.go index 0bfa22d1e7..7039a216a4 100644 --- a/go/vt/vttablet/agentrpctest/test_agent_rpc.go +++ b/go/vt/vttablet/agentrpctest/test_agent_rpc.go @@ -52,6 +52,14 @@ type fakeRPCAgent struct { mu sync.Mutex } +func (fra *fakeRPCAgent) LockTables(ctx context.Context) error { + panic("implement me") +} + +func (fra *fakeRPCAgent) UnlockTables(ctx context.Context) error { + panic("implement me") +} + func (fra *fakeRPCAgent) setSlow(slow bool) { fra.mu.Lock() fra.slow = slow @@ -781,11 +789,26 @@ func (fra *fakeRPCAgent) StartSlave(ctx context.Context) error { return nil } +var testStartSlaveUntilAfterCalledWith = "" + +func (fra *fakeRPCAgent) StartSlaveUntilAfter(ctx context.Context, position string, waitTime time.Duration) error { + if fra.panics { + panic(fmt.Errorf("test-triggered panic")) + } + testStartSlaveUntilAfterCalledWith = position + return nil +} + func agentRPCTestStartSlave(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { err := client.StartSlave(ctx, tablet) compareError(t, "StartSlave", err, true, testStartSlaveCalled) } +func agentRPCTestStartSlaveUntilAfter(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.StartSlaveUntilAfter(ctx, tablet, "test-position", time.Minute) + compareError(t, "StartSlaveUntilAfter", err, "test-position", testStartSlaveUntilAfterCalledWith) +} + func agentRPCTestStartSlavePanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { err := client.StartSlave(ctx, tablet) expectHandleRPCPanic(t, "StartSlave", true /*verbose*/, err) diff --git a/go/vt/vttablet/endtoend/compatibility_test.go b/go/vt/vttablet/endtoend/compatibility_test.go index 6edc6628e4..1bf9090463 100644 --- a/go/vt/vttablet/endtoend/compatibility_test.go +++ b/go/vt/vttablet/endtoend/compatibility_test.go @@ -794,7 +794,14 @@ func TestJSONType(t *testing.T) { }, } if !reflect.DeepEqual(*qr, want) { - t.Errorf("Execute: \n%v, want \n%v", prettyPrint(*qr), prettyPrint(want)) + // MariaDB 10.3 has different behavior. + want2 := want.Copy() + want2.Fields[1].Type = sqltypes.Blob + want2.Fields[1].Charset = 33 + want2.Rows[0][1] = sqltypes.TestValue(sqltypes.Blob, "{\"foo\": \"bar\"}") + if !reflect.DeepEqual(*qr, *want2) { + t.Errorf("Execute:\n%v, want\n%v or\n%v", prettyPrint(*qr), prettyPrint(want), prettyPrint(*want2)) + } } } diff --git a/go/vt/vttablet/endtoend/framework/client.go b/go/vt/vttablet/endtoend/framework/client.go index 782304b1cc..a590e08bad 100644 --- a/go/vt/vttablet/endtoend/framework/client.go +++ b/go/vt/vttablet/endtoend/framework/client.go @@ -188,6 +188,7 @@ func (client *QueryClient) StreamExecuteWithOptions(query string, bindvars map[s &client.target, query, bindvars, + 0, options, func(res *sqltypes.Result) error { if result.Fields == nil { @@ -211,6 +212,7 @@ func (client *QueryClient) Stream(query string, bindvars map[string]*querypb.Bin &client.target, query, bindvars, + 0, &querypb.ExecuteOptions{IncludedFields: querypb.ExecuteOptions_ALL}, sendFunc, ) diff --git a/go/vt/vttablet/endtoend/misc_test.go b/go/vt/vttablet/endtoend/misc_test.go index 5e961a9ae3..81fb606ba0 100644 --- a/go/vt/vttablet/endtoend/misc_test.go +++ b/go/vt/vttablet/endtoend/misc_test.go @@ -364,7 +364,13 @@ func TestBindInSelect(t *testing.T) { }, } if !qr.Equal(want) { - t.Errorf("Execute: \n%#v, want \n%#v", prettyPrint(*qr), prettyPrint(*want)) + // MariaDB 10.3 has different behavior. + want2 := want.Copy() + want2.Fields[0].Type = sqltypes.Int32 + want2.Rows[0][0] = sqltypes.NewInt32(1) + if !qr.Equal(want2) { + t.Errorf("Execute:\n%v, want\n%v or\n%v", prettyPrint(*qr), prettyPrint(*want), prettyPrint(*want2)) + } } // String bind var. @@ -382,7 +388,6 @@ func TestBindInSelect(t *testing.T) { Type: sqltypes.VarChar, ColumnLength: 12, Charset: 33, - Decimals: 31, Flags: 1, }}, RowsAffected: 1, @@ -392,6 +397,8 @@ func TestBindInSelect(t *testing.T) { }, }, } + // MariaDB 10.3 has different behavior. + qr.Fields[0].Decimals = 0 if !qr.Equal(want) { t.Errorf("Execute: \n%#v, want \n%#v", prettyPrint(*qr), prettyPrint(*want)) } @@ -411,7 +418,6 @@ func TestBindInSelect(t *testing.T) { Type: sqltypes.VarChar, ColumnLength: 6, Charset: 33, - Decimals: 31, Flags: 1, }}, RowsAffected: 1, @@ -421,6 +427,8 @@ func TestBindInSelect(t *testing.T) { }, }, } + // MariaDB 10.3 has different behavior. + qr.Fields[0].Decimals = 0 if !qr.Equal(want) { t.Errorf("Execute: \n%#v, want \n%#v", prettyPrint(*qr), prettyPrint(*want)) } diff --git a/go/vt/vttablet/endtoend/queries_test.go b/go/vt/vttablet/endtoend/queries_test.go index 56021b831d..f407e08b91 100644 --- a/go/vt/vttablet/endtoend/queries_test.go +++ b/go/vt/vttablet/endtoend/queries_test.go @@ -1789,6 +1789,38 @@ func TestQueries(t *testing.T) { }, }, }, + &framework.MultiCase{ + Name: "impossible queries", + Cases: []framework.Testable{ + &framework.TestCase{ + Name: "specific column", + Query: "select eid from vitess_a where 1 != 1", + Rewritten: []string{ + "select eid from vitess_a where 1 != 1", + }, + RowsAffected: 0, + }, + &framework.TestCase{ + Name: "all columns", + Query: "select * from vitess_a where 1 != 1", + Rewritten: []string{ + "select * from vitess_a where 1 != 1", + }, + RowsAffected: 0, + }, + &framework.TestCase{ + Name: "bind vars", + Query: "select :bv from vitess_a where 1 != 1", + BindVars: map[string]*querypb.BindVariable{ + "bv": sqltypes.Int64BindVariable(1), + }, + Rewritten: []string{ + "select 1 from vitess_a where 1 != 1 limit 10001", + }, + RowsAffected: 0, + }, + }, + }, } for _, tcase := range testCases { if err := tcase.Test("", client); err != nil { diff --git a/go/vt/vttablet/faketmclient/fake_client.go b/go/vt/vttablet/faketmclient/fake_client.go index 77b528d993..5ce41a95ad 100644 --- a/go/vt/vttablet/faketmclient/fake_client.go +++ b/go/vt/vttablet/faketmclient/fake_client.go @@ -93,6 +93,16 @@ func (client *FakeTabletManagerClient) GetPermissions(ctx context.Context, table return &tabletmanagerdatapb.Permissions{}, nil } +// LockTables is part of the tmclient.TabletManagerClient interface. +func (client *FakeTabletManagerClient) LockTables(ctx context.Context, tablet *topodatapb.Tablet) error { + return nil +} + +// UnlockTables is part of the tmclient.TabletManagerClient interface. +func (client *FakeTabletManagerClient) UnlockTables(ctx context.Context, tablet *topodatapb.Tablet) error { + return nil +} + // // Various read-write methods // @@ -186,6 +196,11 @@ func (client *FakeTabletManagerClient) StartSlave(ctx context.Context, tablet *t return nil } +// StartSlaveUntilAfter is part of the tmclient.TabletManagerClient interface. +func (client *FakeTabletManagerClient) StartSlaveUntilAfter(ctx context.Context, tablet *topodatapb.Tablet, position string, duration time.Duration) error { + return nil +} + // TabletExternallyReparented is part of the tmclient.TabletManagerClient interface. func (client *FakeTabletManagerClient) TabletExternallyReparented(ctx context.Context, tablet *topodatapb.Tablet, externalID string) error { return nil diff --git a/go/vt/vttablet/grpcqueryservice/server.go b/go/vt/vttablet/grpcqueryservice/server.go index c3ada0faec..3ea8fb016f 100644 --- a/go/vt/vttablet/grpcqueryservice/server.go +++ b/go/vt/vttablet/grpcqueryservice/server.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/queryservice" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" queryservicepb "vitess.io/vitess/go/vt/proto/queryservice" ) @@ -75,7 +76,7 @@ func (q *query) StreamExecute(request *querypb.StreamExecuteRequest, stream quer request.EffectiveCallerId, request.ImmediateCallerId, ) - if err := q.server.StreamExecute(ctx, request.Target, request.Query.Sql, request.Query.BindVariables, request.Options, func(reply *sqltypes.Result) error { + if err := q.server.StreamExecute(ctx, request.Target, request.Query.Sql, request.Query.BindVariables, request.TransactionId, request.Options, func(reply *sqltypes.Result) error { return stream.Send(&querypb.StreamExecuteResponse{ Result: sqltypes.ResultToProto3(reply), }) @@ -375,6 +376,23 @@ func (q *query) UpdateStream(request *querypb.UpdateStreamRequest, stream querys return nil } +// VStream is part of the queryservice.QueryServer interface +func (q *query) VStream(request *binlogdatapb.VStreamRequest, stream queryservicepb.Query_VStreamServer) (err error) { + defer q.server.HandlePanic(&err) + ctx := callerid.NewContext(callinfo.GRPCCallInfo(stream.Context()), + request.EffectiveCallerId, + request.ImmediateCallerId, + ) + if err := q.server.VStream(ctx, request.Target, request.Position, request.Filter, func(events []*binlogdatapb.VEvent) error { + return stream.Send(&binlogdatapb.VStreamResponse{ + Events: events, + }) + }); err != nil { + return vterrors.ToGRPC(err) + } + return nil +} + // Register registers the implementation on the provide gRPC Server. func Register(s *grpc.Server, server queryservice.QueryService) { queryservicepb.RegisterQueryServer(s, &query{server}) diff --git a/go/vt/vttablet/grpctabletconn/conn.go b/go/vt/vttablet/grpctabletconn/conn.go index f4871b5a86..56667e7251 100644 --- a/go/vt/vttablet/grpctabletconn/conn.go +++ b/go/vt/vttablet/grpctabletconn/conn.go @@ -30,6 +30,7 @@ import ( "vitess.io/vitess/go/vt/vttablet/queryservice" "vitess.io/vitess/go/vt/vttablet/tabletconn" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" queryservicepb "vitess.io/vitess/go/vt/proto/queryservice" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -138,7 +139,7 @@ func (conn *gRPCQueryClient) ExecuteBatch(ctx context.Context, target *querypb.T } // StreamExecute executes the query and streams results back through callback. -func (conn *gRPCQueryClient) StreamExecute(ctx context.Context, target *querypb.Target, query string, bindVars map[string]*querypb.BindVariable, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) error { +func (conn *gRPCQueryClient) StreamExecute(ctx context.Context, target *querypb.Target, query string, bindVars map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) error { // All streaming clients should follow the code pattern below. // The first part of the function starts the stream while holding // a lock on conn.mu. The second part receives the data and calls @@ -165,7 +166,8 @@ func (conn *gRPCQueryClient) StreamExecute(ctx context.Context, target *querypb. Sql: query, BindVariables: bindVars, }, - Options: options, + Options: options, + TransactionId: transactionID, } stream, err := conn.c.StreamExecute(ctx, req) if err != nil { @@ -668,6 +670,50 @@ func (conn *gRPCQueryClient) UpdateStream(ctx context.Context, target *querypb.T } } +// VStream starts a VReplication stream. +func (conn *gRPCQueryClient) VStream(ctx context.Context, target *querypb.Target, position string, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { + stream, err := func() (queryservicepb.Query_VStreamClient, error) { + conn.mu.RLock() + defer conn.mu.RUnlock() + if conn.cc == nil { + return nil, tabletconn.ConnClosed + } + + req := &binlogdatapb.VStreamRequest{ + Target: target, + EffectiveCallerId: callerid.EffectiveCallerIDFromContext(ctx), + ImmediateCallerId: callerid.ImmediateCallerIDFromContext(ctx), + Position: position, + Filter: filter, + } + stream, err := conn.c.VStream(ctx, req) + if err != nil { + return nil, tabletconn.ErrorFromGRPC(err) + } + return stream, nil + }() + if err != nil { + return err + } + for { + r, err := stream.Recv() + if err != nil { + return tabletconn.ErrorFromGRPC(err) + } + select { + case <-ctx.Done(): + return nil + default: + } + if err := send(r.Events); err != nil { + if err == io.EOF { + return nil + } + return err + } + } +} + // HandlePanic is a no-op. func (conn *gRPCQueryClient) HandlePanic(err *error) { } diff --git a/go/vt/vttablet/grpctmclient/client.go b/go/vt/vttablet/grpctmclient/client.go index 0a0232c51b..7b83101e1b 100644 --- a/go/vt/vttablet/grpctmclient/client.go +++ b/go/vt/vttablet/grpctmclient/client.go @@ -344,6 +344,30 @@ func (client *Client) ApplySchema(ctx context.Context, tablet *topodatapb.Tablet }, nil } +// LockTables is part of the tmclient.TabletManagerClient interface. +func (client *Client) LockTables(ctx context.Context, tablet *topodatapb.Tablet) error { + cc, c, err := client.dial(tablet) + if err != nil { + return err + } + defer cc.Close() + + _, err = c.LockTables(ctx, &tabletmanagerdatapb.LockTablesRequest{}) + return err +} + +// UnlockTables is part of the tmclient.TabletManagerClient interface. +func (client *Client) UnlockTables(ctx context.Context, tablet *topodatapb.Tablet) error { + cc, c, err := client.dial(tablet) + if err != nil { + return err + } + defer cc.Close() + + _, err = c.UnlockTables(ctx, &tabletmanagerdatapb.UnlockTablesRequest{}) + return err +} + // ExecuteFetchAsDba is part of the tmclient.TabletManagerClient interface. func (client *Client) ExecuteFetchAsDba(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, query []byte, maxRows int, disableBinlogs, reloadSchema bool) (*querypb.QueryResult, error) { var c tabletmanagerservicepb.TabletManagerClient @@ -497,6 +521,20 @@ func (client *Client) StartSlave(ctx context.Context, tablet *topodatapb.Tablet) return err } +// StartSlaveUntilAfter is part of the tmclient.TabletManagerClient interface. +func (client *Client) StartSlaveUntilAfter(ctx context.Context, tablet *topodatapb.Tablet, position string, waitTime time.Duration) error { + cc, c, err := client.dial(tablet) + if err != nil { + return err + } + defer cc.Close() + _, err = c.StartSlaveUntilAfter(ctx, &tabletmanagerdatapb.StartSlaveUntilAfterRequest{ + Position: position, + WaitTimeout: int64(waitTime), + }) + return err +} + // TabletExternallyReparented is part of the tmclient.TabletManagerClient interface. func (client *Client) TabletExternallyReparented(ctx context.Context, tablet *topodatapb.Tablet, externalID string) error { cc, c, err := client.dial(tablet) diff --git a/go/vt/vttablet/grpctmserver/server.go b/go/vt/vttablet/grpctmserver/server.go index 7054c37b72..bba4157a88 100644 --- a/go/vt/vttablet/grpctmserver/server.go +++ b/go/vt/vttablet/grpctmserver/server.go @@ -178,6 +178,22 @@ func (s *server) ApplySchema(ctx context.Context, request *tabletmanagerdatapb.A return response, err } +func (s *server) LockTables(ctx context.Context, req *tabletmanagerdatapb.LockTablesRequest) (*tabletmanagerdatapb.LockTablesResponse, error) { + err := s.agent.LockTables(ctx) + if err != nil { + return nil, err + } + return &tabletmanagerdatapb.LockTablesResponse{}, nil +} + +func (s *server) UnlockTables(ctx context.Context, req *tabletmanagerdatapb.UnlockTablesRequest) (*tabletmanagerdatapb.UnlockTablesResponse, error) { + err := s.agent.UnlockTables(ctx) + if err != nil { + return nil, err + } + return &tabletmanagerdatapb.UnlockTablesResponse{}, nil +} + func (s *server) ExecuteFetchAsDba(ctx context.Context, request *tabletmanagerdatapb.ExecuteFetchAsDbaRequest) (response *tabletmanagerdatapb.ExecuteFetchAsDbaResponse, err error) { defer s.agent.HandleRPCPanic(ctx, "ExecuteFetchAsDba", request, response, false /*verbose*/, &err) ctx = callinfo.GRPCCallInfo(ctx) @@ -265,6 +281,13 @@ func (s *server) StartSlave(ctx context.Context, request *tabletmanagerdatapb.St return response, s.agent.StartSlave(ctx) } +func (s *server) StartSlaveUntilAfter(ctx context.Context, request *tabletmanagerdatapb.StartSlaveUntilAfterRequest) (response *tabletmanagerdatapb.StartSlaveUntilAfterResponse, err error) { + defer s.agent.HandleRPCPanic(ctx, "StartSlave", request, response, true /*verbose*/, &err) + ctx = callinfo.GRPCCallInfo(ctx) + response = &tabletmanagerdatapb.StartSlaveUntilAfterResponse{} + return response, s.agent.StartSlaveUntilAfter(ctx, request.Position, time.Duration(request.WaitTimeout)) +} + func (s *server) TabletExternallyReparented(ctx context.Context, request *tabletmanagerdatapb.TabletExternallyReparentedRequest) (response *tabletmanagerdatapb.TabletExternallyReparentedResponse, err error) { defer s.agent.HandleRPCPanic(ctx, "TabletExternallyReparented", request, response, false /*verbose*/, &err) ctx = callinfo.GRPCCallInfo(ctx) diff --git a/go/vt/vttablet/queryservice/queryservice.go b/go/vt/vttablet/queryservice/queryservice.go index 225912ab26..52153a2941 100644 --- a/go/vt/vttablet/queryservice/queryservice.go +++ b/go/vt/vttablet/queryservice/queryservice.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/sqltypes" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -77,7 +78,7 @@ type QueryService interface { // Query execution Execute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions) (*sqltypes.Result, error) - StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) error + StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) error ExecuteBatch(ctx context.Context, target *querypb.Target, queries []*querypb.BoundQuery, asTransaction bool, transactionID int64, options *querypb.ExecuteOptions) ([]sqltypes.Result, error) // Combo methods, they also return the transactionID from the @@ -99,6 +100,9 @@ type QueryService interface { // UpdateStream streams updates from the provided position or timestamp. UpdateStream(ctx context.Context, target *querypb.Target, position string, timestamp int64, callback func(*querypb.StreamEvent) error) error + // VStream streams VReplication events based on the specified filter. + VStream(ctx context.Context, target *querypb.Target, startPos string, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error + // StreamHealth streams health status. StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error @@ -133,7 +137,30 @@ func ExecuteWithStreamer(ctx context.Context, conn QueryService, target *querypb } go func() { defer close(rs.done) - rs.err = conn.StreamExecute(ctx, target, sql, bindVariables, options, func(qr *sqltypes.Result) error { + rs.err = conn.StreamExecute(ctx, target, sql, bindVariables, 0, options, func(qr *sqltypes.Result) error { + select { + case <-ctx.Done(): + return io.EOF + case rs.ch <- qr: + } + return nil + }) + if rs.err == nil { + rs.err = io.EOF + } + }() + return rs +} + +// ExecuteWithTransactionalStreamer does the same thing as ExecuteWithStreamer, but inside a transaction +func ExecuteWithTransactionalStreamer(ctx context.Context, conn QueryService, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions) sqltypes.ResultStream { + rs := &resultStreamer{ + done: make(chan struct{}), + ch: make(chan *sqltypes.Result), + } + go func() { + defer close(rs.done) + rs.err = conn.StreamExecute(ctx, target, sql, bindVariables, transactionID, options, func(qr *sqltypes.Result) error { select { case <-ctx.Done(): return io.EOF diff --git a/go/vt/vttablet/queryservice/wrapped.go b/go/vt/vttablet/queryservice/wrapped.go index 0ba474dff6..d0873b22af 100644 --- a/go/vt/vttablet/queryservice/wrapped.go +++ b/go/vt/vttablet/queryservice/wrapped.go @@ -22,6 +22,7 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/vterrors" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) @@ -175,10 +176,10 @@ func (ws *wrappedService) Execute(ctx context.Context, target *querypb.Target, q return qr, err } -func (ws *wrappedService) StreamExecute(ctx context.Context, target *querypb.Target, query string, bindVars map[string]*querypb.BindVariable, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) error { +func (ws *wrappedService) StreamExecute(ctx context.Context, target *querypb.Target, query string, bindVars map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) error { return ws.wrapper(ctx, target, ws.impl, "StreamExecute", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { streamingStarted := false - innerErr := conn.StreamExecute(ctx, target, query, bindVars, options, func(qr *sqltypes.Result) error { + innerErr := conn.StreamExecute(ctx, target, query, bindVars, transactionID, options, func(qr *sqltypes.Result) error { streamingStarted = true return callback(qr) }) @@ -250,6 +251,13 @@ func (ws *wrappedService) UpdateStream(ctx context.Context, target *querypb.Targ }) } +func (ws *wrappedService) VStream(ctx context.Context, target *querypb.Target, startPos string, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { + return ws.wrapper(ctx, target, ws.impl, "UpdateStream", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { + innerErr := conn.VStream(ctx, target, startPos, filter, send) + return innerErr, canRetry(ctx, innerErr) + }) +} + func (ws *wrappedService) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error { return ws.wrapper(ctx, nil, ws.impl, "StreamHealth", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { innerErr := conn.StreamHealth(ctx, callback) diff --git a/go/vt/vttablet/sandboxconn/sandboxconn.go b/go/vt/vttablet/sandboxconn/sandboxconn.go index dc614c7d2b..36caa6ee66 100644 --- a/go/vt/vttablet/sandboxconn/sandboxconn.go +++ b/go/vt/vttablet/sandboxconn/sandboxconn.go @@ -27,6 +27,7 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/queryservice" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -152,7 +153,7 @@ func (sbc *SandboxConn) ExecuteBatch(ctx context.Context, target *querypb.Target } // StreamExecute is part of the QueryService interface. -func (sbc *SandboxConn) StreamExecute(ctx context.Context, target *querypb.Target, query string, bindVars map[string]*querypb.BindVariable, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) error { +func (sbc *SandboxConn) StreamExecute(ctx context.Context, target *querypb.Target, query string, bindVars map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) error { sbc.ExecCount.Add(1) bv := make(map[string]*querypb.BindVariable) for k, v := range bindVars { @@ -354,7 +355,11 @@ func (sbc *SandboxConn) StreamHealth(ctx context.Context, callback func(*querypb // UpdateStream is part of the QueryService interface. func (sbc *SandboxConn) UpdateStream(ctx context.Context, target *querypb.Target, position string, timestamp int64, callback func(*querypb.StreamEvent) error) error { - // FIXME(alainjobart) implement, use in vtgate tests. + return fmt.Errorf("Not implemented in test") +} + +// VStream is part of the QueryService interface. +func (sbc *SandboxConn) VStream(ctx context.Context, target *querypb.Target, startPos string, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { return fmt.Errorf("Not implemented in test") } diff --git a/go/vt/vttablet/tabletconntest/fakequeryservice.go b/go/vt/vttablet/tabletconntest/fakequeryservice.go index 813a48efd3..5a935e5867 100644 --- a/go/vt/vttablet/tabletconntest/fakequeryservice.go +++ b/go/vt/vttablet/tabletconntest/fakequeryservice.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -457,7 +458,7 @@ var StreamExecuteQueryResult2 = sqltypes.Result{ } // StreamExecute is part of the queryservice.QueryService interface -func (f *FakeQueryService) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) error { +func (f *FakeQueryService) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) error { if f.Panics && f.StreamExecutePanicsEarly { panic(fmt.Errorf("test-triggered panic early")) } @@ -850,6 +851,11 @@ func (f *FakeQueryService) UpdateStream(ctx context.Context, target *querypb.Tar return nil } +// VStream is part of the queryservice.QueryService interface +func (f *FakeQueryService) VStream(ctx context.Context, target *querypb.Target, position string, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { + panic("not implemented") +} + // CreateFakeServer returns the fake server for the tests func CreateFakeServer(t *testing.T) *FakeQueryService { return &FakeQueryService{ diff --git a/go/vt/vttablet/tabletconntest/tabletconntest.go b/go/vt/vttablet/tabletconntest/tabletconntest.go index 6fb78fa752..8b25182816 100644 --- a/go/vt/vttablet/tabletconntest/tabletconntest.go +++ b/go/vt/vttablet/tabletconntest/tabletconntest.go @@ -479,7 +479,7 @@ func testStreamExecute(t *testing.T, conn queryservice.QueryService, f *FakeQuer ctx := context.Background() ctx = callerid.NewContext(ctx, TestCallerID, TestVTGateCallerID) i := 0 - err := conn.StreamExecute(ctx, TestTarget, StreamExecuteQuery, StreamExecuteBindVars, TestExecuteOptions, func(qr *sqltypes.Result) error { + err := conn.StreamExecute(ctx, TestTarget, StreamExecuteQuery, StreamExecuteBindVars, 0, TestExecuteOptions, func(qr *sqltypes.Result) error { switch i { case 0: if len(qr.Rows) == 0 { @@ -515,7 +515,7 @@ func testStreamExecuteError(t *testing.T, conn queryservice.QueryService, f *Fak testErrorHelper(t, f, "StreamExecute", func(ctx context.Context) error { f.ErrorWait = make(chan struct{}) ctx = callerid.NewContext(ctx, TestCallerID, TestVTGateCallerID) - return conn.StreamExecute(ctx, TestTarget, StreamExecuteQuery, StreamExecuteBindVars, TestExecuteOptions, func(qr *sqltypes.Result) error { + return conn.StreamExecute(ctx, TestTarget, StreamExecuteQuery, StreamExecuteBindVars, 0, TestExecuteOptions, func(qr *sqltypes.Result) error { // For some errors, the call can be retried. select { case <-f.ErrorWait: @@ -544,7 +544,7 @@ func testStreamExecutePanics(t *testing.T, conn queryservice.QueryService, f *Fa f.StreamExecutePanicsEarly = true testPanicHelper(t, f, "StreamExecute.Early", func(ctx context.Context) error { ctx = callerid.NewContext(ctx, TestCallerID, TestVTGateCallerID) - return conn.StreamExecute(ctx, TestTarget, StreamExecuteQuery, StreamExecuteBindVars, TestExecuteOptions, func(qr *sqltypes.Result) error { + return conn.StreamExecute(ctx, TestTarget, StreamExecuteQuery, StreamExecuteBindVars, 0, TestExecuteOptions, func(qr *sqltypes.Result) error { return nil }) }) @@ -554,7 +554,7 @@ func testStreamExecutePanics(t *testing.T, conn queryservice.QueryService, f *Fa testPanicHelper(t, f, "StreamExecute.Late", func(ctx context.Context) error { f.PanicWait = make(chan struct{}) ctx = callerid.NewContext(ctx, TestCallerID, TestVTGateCallerID) - return conn.StreamExecute(ctx, TestTarget, StreamExecuteQuery, StreamExecuteBindVars, TestExecuteOptions, func(qr *sqltypes.Result) error { + return conn.StreamExecute(ctx, TestTarget, StreamExecuteQuery, StreamExecuteBindVars, 0, TestExecuteOptions, func(qr *sqltypes.Result) error { // For some errors, the call can be retried. select { case <-f.PanicWait: diff --git a/go/vt/vttablet/tabletmanager/action_agent.go b/go/vt/vttablet/tabletmanager/action_agent.go index d2673b48a7..2663fe4ac1 100644 --- a/go/vt/vttablet/tabletmanager/action_agent.go +++ b/go/vt/vttablet/tabletmanager/action_agent.go @@ -47,6 +47,7 @@ import ( "vitess.io/vitess/go/vt/vterrors" "golang.org/x/net/context" + "vitess.io/vitess/go/vt/dbconnpool" "github.com/golang/protobuf/proto" "vitess.io/vitess/go/history" @@ -204,6 +205,11 @@ type ActionAgent struct { // _slaveStopped remembers if we've been told to stop replicating. // If it's nil, we'll try to check for the slaveStoppedFile. _slaveStopped *bool + + // _lockTablesConnection is used to get and release the table read locks to pause replication + _lockTablesConnection *dbconnpool.DBConnection + _lockTablesTimer *time.Timer + _lockTablesTimeout *time.Duration } // NewActionAgent creates a new ActionAgent and registers all the diff --git a/go/vt/vttablet/tabletmanager/rpc_agent.go b/go/vt/vttablet/tabletmanager/rpc_agent.go index 04f956afdf..1bef381a7e 100644 --- a/go/vt/vttablet/tabletmanager/rpc_agent.go +++ b/go/vt/vttablet/tabletmanager/rpc_agent.go @@ -66,6 +66,10 @@ type RPCAgent interface { ApplySchema(ctx context.Context, change *tmutils.SchemaChange) (*tabletmanagerdatapb.SchemaChangeResult, error) + LockTables(ctx context.Context) error + + UnlockTables(ctx context.Context) error + ExecuteFetchAsDba(ctx context.Context, query []byte, dbName string, maxrows int, disableBinlogs bool, reloadSchema bool) (*querypb.QueryResult, error) ExecuteFetchAsAllPrivs(ctx context.Context, query []byte, dbName string, maxrows int, reloadSchema bool) (*querypb.QueryResult, error) @@ -84,6 +88,8 @@ type RPCAgent interface { StartSlave(ctx context.Context) error + StartSlaveUntilAfter(ctx context.Context, position string, waitTime time.Duration) error + TabletExternallyReparented(ctx context.Context, externalID string) error GetSlaves(ctx context.Context) ([]string, error) diff --git a/go/vt/vttablet/tabletmanager/rpc_backup.go b/go/vt/vttablet/tabletmanager/rpc_backup.go index 2ad3cc2d98..8eeeb322ce 100644 --- a/go/vt/vttablet/tabletmanager/rpc_backup.go +++ b/go/vt/vttablet/tabletmanager/rpc_backup.go @@ -40,7 +40,15 @@ func (agent *ActionAgent) Backup(ctx context.Context, concurrency int, logger lo return fmt.Errorf("cannot perform backup without my.cnf, please restart vttablet with a my.cnf file specified") } - // update our type to BACKUP + // Check tablet type current process has. + // During a network partition it is possible that from the topology perspective this is no longer the master, + // but the process didn't find out about this. + // It is not safe to take backups from tablet in this state + currentTablet := agent.Tablet() + if currentTablet.Type == topodatapb.TabletType_MASTER { + return fmt.Errorf("type MASTER cannot take backup, if you really need to do this, restart vttablet in replica mode") + } + tablet, err := agent.TopoServer.GetTablet(ctx, agent.TabletAlias) if err != nil { return err @@ -49,6 +57,8 @@ func (agent *ActionAgent) Backup(ctx context.Context, concurrency int, logger lo return fmt.Errorf("type MASTER cannot take backup, if you really need to do this, restart vttablet in replica mode") } originalType := tablet.Type + + // update our type to BACKUP if _, err := topotools.ChangeType(ctx, agent.TopoServer, tablet.Alias, topodatapb.TabletType_BACKUP); err != nil { return err } diff --git a/go/vt/vttablet/tabletmanager/rpc_lock_tables.go b/go/vt/vttablet/tabletmanager/rpc_lock_tables.go new file mode 100644 index 0000000000..66fda08395 --- /dev/null +++ b/go/vt/vttablet/tabletmanager/rpc_lock_tables.go @@ -0,0 +1,144 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tabletmanager + +import ( + "errors" + "flag" + "fmt" + "strings" + "time" + + "vitess.io/vitess/go/vt/dbconnpool" + + "github.com/golang/glog" + "vitess.io/vitess/go/sqlescape" + + "golang.org/x/net/context" +) + +var ( + lockTablesTimeout = flag.Duration("lock_tables_timeout", 1*time.Minute, "How long to keep the table locked before timing out") +) + +// LockTables will lock all tables with read locks, effectively pausing replication while the lock is held (idempotent) +func (agent *ActionAgent) LockTables(ctx context.Context) error { + // get a connection + agent.mutex.Lock() + defer agent.mutex.Unlock() + + if agent._lockTablesConnection != nil { + // tables are already locked, bail out + return errors.New("tables already locked on this tablet") + } + + conn, err := agent.MysqlDaemon.GetDbaConnection() + if err != nil { + return err + } + + // FTWRL is preferable, so we'll try that first + _, err = conn.ExecuteFetch("FLUSH TABLES WITH READ LOCK", 0, false) + if err != nil { + // as fall back, we can lock each individual table as well. + // this requires slightly less privileges but achieves the same effect + err = agent.lockTablesUsingLockTables(conn) + if err != nil { + return err + } + } + glog.Infof("[%v] Tables locked", conn.ConnectionID) + + agent._lockTablesConnection = conn + agent._lockTablesTimer = time.AfterFunc(*lockTablesTimeout, func() { + // Here we'll sleep until the timeout time has elapsed. + // If the table locks have not been released yet, we'll release them here + agent.mutex.Lock() + defer agent.mutex.Unlock() + + // We need the mutex locked before we check this field + if agent._lockTablesConnection == conn { + glog.Errorf("table lock timed out and released the lock - something went wrong") + err = agent.unlockTablesHoldingMutex() + if err != nil { + glog.Errorf("failed to unlock tables: %v", err) + } + } + }) + + return nil +} + +func (agent *ActionAgent) lockTablesUsingLockTables(conn *dbconnpool.DBConnection) error { + glog.Warningf("failed to lock tables with FTWRL - falling back to LOCK TABLES") + + // Ensure schema engine is Open. If vttablet came up in a non_serving role, + // the schema engine may not have been initialized. Open() is idempotent, so this + // is always safe + se := agent.QueryServiceControl.SchemaEngine() + if err := se.Open(); err != nil { + return err + } + + tables := se.GetSchema() + tableNames := make([]string, 0, len(tables)) + for name := range tables { + if name == "dual" { + continue + } + tableNames = append(tableNames, fmt.Sprintf("%s READ", sqlescape.EscapeID(name))) + } + lockStatement := fmt.Sprintf("LOCK TABLES %v", strings.Join(tableNames, ", ")) + _, err := conn.ExecuteFetch(fmt.Sprintf("USE %s", agent.DBConfigs.DBName.Get()), 0, false) + if err != nil { + return err + } + + _, err = conn.ExecuteFetch(lockStatement, 0, false) + if err != nil { + return err + } + + return nil +} + +// UnlockTables will unlock all tables (idempotent) +func (agent *ActionAgent) UnlockTables(ctx context.Context) error { + agent.mutex.Lock() + defer agent.mutex.Unlock() + + if agent._lockTablesConnection == nil { + return fmt.Errorf("tables were not locked") + } + + return agent.unlockTablesHoldingMutex() +} + +func (agent *ActionAgent) unlockTablesHoldingMutex() error { + // We are cleaning up manually, let's kill the timer + agent._lockTablesTimer.Stop() + _, err := agent._lockTablesConnection.ExecuteFetch("UNLOCK TABLES", 0, false) + if err != nil { + return err + } + glog.Infof("[%v] Tables unlocked", agent._lockTablesConnection.ConnectionID) + agent._lockTablesConnection.Close() + agent._lockTablesConnection = nil + agent._lockTablesTimer = nil + + return nil +} diff --git a/go/vt/vttablet/tabletmanager/rpc_replication.go b/go/vt/vttablet/tabletmanager/rpc_replication.go index f11d2875bd..4b255b6863 100644 --- a/go/vt/vttablet/tabletmanager/rpc_replication.go +++ b/go/vt/vttablet/tabletmanager/rpc_replication.go @@ -144,6 +144,25 @@ func (agent *ActionAgent) StartSlave(ctx context.Context) error { return agent.MysqlDaemon.StartSlave(agent.hookExtraEnv()) } +// StartSlaveUntilAfter will start the replication and let it catch up +// until and including the transactions in `position` +func (agent *ActionAgent) StartSlaveUntilAfter(ctx context.Context, position string, waitTime time.Duration) error { + if err := agent.lock(ctx); err != nil { + return err + } + defer agent.unlock() + + waitCtx, cancel := context.WithTimeout(ctx, waitTime) + defer cancel() + + pos, err := mysql.DecodePosition(position) + if err != nil { + return err + } + + return agent.MysqlDaemon.StartSlaveUntilAfter(waitCtx, pos) +} + // GetSlaves returns the address of all the slaves func (agent *ActionAgent) GetSlaves(ctx context.Context) ([]string, error) { return mysqlctl.FindSlaves(agent.MysqlDaemon) diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller.go b/go/vt/vttablet/tabletmanager/vreplication/controller.go index 784d104c1f..75d1c7c32d 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller.go @@ -131,8 +131,14 @@ func (ct *controller) run(ctx context.Context) { return default: } - log.Warningf("stream %v: %v, retrying after %v", ct.id, err, *retryDelay) - time.Sleep(*retryDelay) + log.Errorf("stream %v: %v, retrying after %v", ct.id, err, *retryDelay) + timer := time.NewTimer(*retryDelay) + select { + case <-ctx.Done(): + timer.Stop() + return + case <-timer.C: + } } } @@ -169,7 +175,8 @@ func (ct *controller) runBlp(ctx context.Context) (err error) { } ct.sourceTablet.Set(tablet.Alias.String()) - if len(ct.source.Tables) > 0 { + switch { + case len(ct.source.Tables) > 0: // Table names can have search patterns. Resolve them against the schema. tables, err := mysqlctl.ResolveTables(ct.mysqld, dbClient.DBName(), ct.source.Tables) if err != nil { @@ -178,9 +185,18 @@ func (ct *controller) runBlp(ctx context.Context) (err error) { player := binlogplayer.NewBinlogPlayerTables(dbClient, tablet, tables, ct.id, ct.blpStats) return player.ApplyBinlogEvents(ctx) + case ct.source.KeyRange != nil: + player := binlogplayer.NewBinlogPlayerKeyRange(dbClient, tablet, ct.source.KeyRange, ct.id, ct.blpStats) + return player.ApplyBinlogEvents(ctx) + case ct.source.Filter != nil: + // VPlayer requires the timezone to be UTC. + if _, err := dbClient.ExecuteFetch("set @@session.time_zone = '+00:00'", 10000); err != nil { + return err + } + vplayer := newVPlayer(ct.id, &ct.source, tablet, ct.blpStats, dbClient, ct.mysqld) + return vplayer.Play(ctx) } - player := binlogplayer.NewBinlogPlayerKeyRange(dbClient, tablet, ct.source.KeyRange, ct.id, ct.blpStats) - return player.ApplyBinlogEvents(ctx) + return fmt.Errorf("missing source") } func (ct *controller) Stop() { diff --git a/go/vt/vttablet/tabletmanager/vreplication/planbuilder.go b/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go similarity index 88% rename from go/vt/vttablet/tabletmanager/vreplication/planbuilder.go rename to go/vt/vttablet/tabletmanager/vreplication/controller_plan.go index 22713df947..1a964e7b02 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/planbuilder.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go @@ -23,8 +23,8 @@ import ( "vitess.io/vitess/go/vt/sqlparser" ) -// plan is the plan for vreplication control statements. -type plan struct { +// controllerPlan is the plan for vreplication control statements. +type controllerPlan struct { opcode int query string id int @@ -37,8 +37,8 @@ const ( selectQuery ) -// getPlan parses the input query and returns an appropriate plan. -func getPlan(query string) (*plan, error) { +// buildControllerPlan parses the input query and returns an appropriate plan. +func buildControllerPlan(query string) (*controllerPlan, error) { stmt, err := sqlparser.Parse(query) if err != nil { return nil, err @@ -57,7 +57,7 @@ func getPlan(query string) (*plan, error) { } } -func buildInsertPlan(ins *sqlparser.Insert) (*plan, error) { +func buildInsertPlan(ins *sqlparser.Insert) (*controllerPlan, error) { if ins.Action != sqlparser.InsertStr { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(ins)) } @@ -99,13 +99,13 @@ func buildInsertPlan(ins *sqlparser.Insert) (*plan, error) { return nil, fmt.Errorf("id should not have a value: %v", sqlparser.String(ins)) } } - return &plan{ + return &controllerPlan{ opcode: insertQuery, query: sqlparser.String(ins), }, nil } -func buildUpdatePlan(upd *sqlparser.Update) (*plan, error) { +func buildUpdatePlan(upd *sqlparser.Update) (*controllerPlan, error) { if sqlparser.String(upd.TableExprs) != "_vt.vreplication" { return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(upd.TableExprs)) } @@ -123,14 +123,14 @@ func buildUpdatePlan(upd *sqlparser.Update) (*plan, error) { return nil, err } - return &plan{ + return &controllerPlan{ opcode: updateQuery, query: sqlparser.String(upd), id: id, }, nil } -func buildDeletePlan(del *sqlparser.Delete) (*plan, error) { +func buildDeletePlan(del *sqlparser.Delete) (*controllerPlan, error) { if del.Targets != nil { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(del)) } @@ -149,18 +149,18 @@ func buildDeletePlan(del *sqlparser.Delete) (*plan, error) { return nil, err } - return &plan{ + return &controllerPlan{ opcode: deleteQuery, query: sqlparser.String(del), id: id, }, nil } -func buildSelectPlan(sel *sqlparser.Select) (*plan, error) { +func buildSelectPlan(sel *sqlparser.Select) (*controllerPlan, error) { if sqlparser.String(sel.From) != "_vt.vreplication" { return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(sel.From)) } - return &plan{ + return &controllerPlan{ opcode: selectQuery, query: sqlparser.String(sel), }, nil diff --git a/go/vt/vttablet/tabletmanager/vreplication/planbuilder_test.go b/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go similarity index 96% rename from go/vt/vttablet/tabletmanager/vreplication/planbuilder_test.go rename to go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go index af89c949c6..533668a295 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/planbuilder_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go @@ -21,27 +21,27 @@ import ( "testing" ) -func TestPlanBuilder(t *testing.T) { +func TestControllerPlan(t *testing.T) { tcases := []struct { in string - plan *plan + plan *controllerPlan err string }{{ // Insert in: "insert into _vt.vreplication values(null)", - plan: &plan{ + plan: &controllerPlan{ opcode: insertQuery, query: "insert into _vt.vreplication values (null)", }, }, { in: "insert into _vt.vreplication(id) values(null)", - plan: &plan{ + plan: &controllerPlan{ opcode: insertQuery, query: "insert into _vt.vreplication(id) values (null)", }, }, { in: "insert into _vt.vreplication(workflow, id) values('', null)", - plan: &plan{ + plan: &controllerPlan{ opcode: insertQuery, query: "insert into _vt.vreplication(workflow, id) values ('', null)", }, @@ -79,7 +79,7 @@ func TestPlanBuilder(t *testing.T) { // Update }, { in: "update _vt.vreplication set state='Running' where id = 1", - plan: &plan{ + plan: &controllerPlan{ opcode: updateQuery, query: "update _vt.vreplication set state = 'Running' where id = 1", id: 1, @@ -115,7 +115,7 @@ func TestPlanBuilder(t *testing.T) { // Delete }, { in: "delete from _vt.vreplication where id = 1", - plan: &plan{ + plan: &controllerPlan{ opcode: deleteQuery, query: "delete from _vt.vreplication where id = 1", id: 1, @@ -154,7 +154,7 @@ func TestPlanBuilder(t *testing.T) { // Select }, { in: "select * from _vt.vreplication where id = 1", - plan: &plan{ + plan: &controllerPlan{ opcode: selectQuery, query: "select * from _vt.vreplication where id = 1", }, @@ -171,7 +171,7 @@ func TestPlanBuilder(t *testing.T) { err: "unsupported construct: set a = 1", }} for _, tcase := range tcases { - pl, err := getPlan(tcase.in) + pl, err := buildControllerPlan(tcase.in) if err != nil { if err.Error() != tcase.err { t.Errorf("getPlan(%v) error:\n%v, want\n%v", tcase.in, err, tcase.err) diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_test.go b/go/vt/vttablet/tabletmanager/vreplication/controller_test.go index 52c2a0f0b6..e261444ee6 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_test.go @@ -18,6 +18,7 @@ package vreplication import ( "errors" + "fmt" "testing" "time" @@ -51,14 +52,14 @@ var ( ) func TestControllerKeyRange(t *testing.T) { - ts := createTopo() - fbc := newFakeBinlogClient() - wantTablet := addTablet(ts, 100, "0", topodatapb.TabletType_REPLICA, true, true) + resetBinlogClient() + wantTablet := addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true) + defer deleteTablet(wantTablet) params := map[string]string{ "id": "1", "state": binlogplayer.BlpRunning, - "source": `keyspace:"ks" shard:"0" key_range: `, + "source": fmt.Sprintf(`keyspace:"%s" shard:"0" key_range: `, env.KeyspaceName), } dbClient := binlogplayer.NewMockDBClient(t) @@ -72,7 +73,7 @@ func TestControllerKeyRange(t *testing.T) { dbClientFactory := func() binlogplayer.DBClient { return dbClient } mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: 3306} - ct, err := newController(context.Background(), params, dbClientFactory, mysqld, ts, testCell, "replica", nil) + ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil) if err != nil { t.Fatal(err) } @@ -82,18 +83,18 @@ func TestControllerKeyRange(t *testing.T) { }() dbClient.Wait() - expectFBCRequest(t, fbc, wantTablet, testPos, nil, &topodatapb.KeyRange{End: []byte{0x80}}) + expectFBCRequest(t, wantTablet, testPos, nil, &topodatapb.KeyRange{End: []byte{0x80}}) } func TestControllerTables(t *testing.T) { - ts := createTopo() - wantTablet := addTablet(ts, 100, "0", topodatapb.TabletType_REPLICA, true, true) - fbc := newFakeBinlogClient() + wantTablet := addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true) + defer deleteTablet(wantTablet) + resetBinlogClient() params := map[string]string{ "id": "1", "state": binlogplayer.BlpRunning, - "source": `keyspace:"ks" shard:"0" tables:"table1" tables:"/funtables_/" `, + "source": fmt.Sprintf(`keyspace:"%s" shard:"0" tables:"table1" tables:"/funtables_/" `, env.KeyspaceName), } dbClient := binlogplayer.NewMockDBClient(t) @@ -132,7 +133,7 @@ func TestControllerTables(t *testing.T) { }, } - ct, err := newController(context.Background(), params, dbClientFactory, mysqld, ts, testCell, "replica", nil) + ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil) if err != nil { t.Fatal(err) } @@ -142,7 +143,7 @@ func TestControllerTables(t *testing.T) { }() dbClient.Wait() - expectFBCRequest(t, fbc, wantTablet, testPos, []string{"table1", "funtables_one"}, nil) + expectFBCRequest(t, wantTablet, testPos, []string{"table1", "funtables_one"}, nil) } func TestControllerBadID(t *testing.T) { @@ -176,15 +177,15 @@ func TestControllerStopped(t *testing.T) { } func TestControllerOverrides(t *testing.T) { - ts := createTopo() - fbc := newFakeBinlogClient() - wantTablet := addTablet(ts, 100, "0", topodatapb.TabletType_REPLICA, true, true) + resetBinlogClient() + wantTablet := addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true) + defer deleteTablet(wantTablet) params := map[string]string{ "id": "1", "state": binlogplayer.BlpRunning, - "source": `keyspace:"ks" shard:"0" key_range: `, - "cell": testCell, + "source": fmt.Sprintf(`keyspace:"%s" shard:"0" key_range: `, env.KeyspaceName), + "cell": env.Cells[0], "tablet_types": "replica", } @@ -199,7 +200,7 @@ func TestControllerOverrides(t *testing.T) { dbClientFactory := func() binlogplayer.DBClient { return dbClient } mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: 3306} - ct, err := newController(context.Background(), params, dbClientFactory, mysqld, ts, testCell, "rdonly", nil) + ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "rdonly", nil) if err != nil { t.Fatal(err) } @@ -209,22 +210,21 @@ func TestControllerOverrides(t *testing.T) { }() dbClient.Wait() - expectFBCRequest(t, fbc, wantTablet, testPos, nil, &topodatapb.KeyRange{End: []byte{0x80}}) + expectFBCRequest(t, wantTablet, testPos, nil, &topodatapb.KeyRange{End: []byte{0x80}}) } func TestControllerCanceledContext(t *testing.T) { - ts := createTopo() - _ = addTablet(ts, 100, "0", topodatapb.TabletType_REPLICA, true, true) + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) params := map[string]string{ "id": "1", "state": binlogplayer.BlpRunning, - "source": `keyspace:"ks" shard:"0" key_range: `, + "source": fmt.Sprintf(`keyspace:"%s" shard:"0" key_range: `, env.KeyspaceName), } ctx, cancel := context.WithCancel(context.Background()) cancel() - ct, err := newController(ctx, params, nil, nil, ts, testCell, "rdonly", nil) + ct, err := newController(ctx, params, nil, nil, env.TopoServ, env.Cells[0], "rdonly", nil) if err != nil { t.Fatal(err) } @@ -242,15 +242,14 @@ func TestControllerRetry(t *testing.T) { defer func() { *retryDelay = savedDelay }() *retryDelay = 10 * time.Millisecond - ts := createTopo() - _ = newFakeBinlogClient() - _ = addTablet(ts, 100, "0", topodatapb.TabletType_REPLICA, true, true) + resetBinlogClient() + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) params := map[string]string{ "id": "1", "state": binlogplayer.BlpRunning, - "source": `keyspace:"ks" shard:"0" key_range: `, - "cell": testCell, + "source": fmt.Sprintf(`keyspace:"%s" shard:"0" key_range: `, env.KeyspaceName), + "cell": env.Cells[0], "tablet_types": "replica", } @@ -267,7 +266,7 @@ func TestControllerRetry(t *testing.T) { dbClientFactory := func() binlogplayer.DBClient { return dbClient } mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: 3306} - ct, err := newController(context.Background(), params, dbClientFactory, mysqld, ts, testCell, "rdonly", nil) + ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "rdonly", nil) if err != nil { t.Fatal(err) } @@ -277,14 +276,14 @@ func TestControllerRetry(t *testing.T) { } func TestControllerStopPosition(t *testing.T) { - ts := createTopo() - fbc := newFakeBinlogClient() - wantTablet := addTablet(ts, 100, "0", topodatapb.TabletType_REPLICA, true, true) + resetBinlogClient() + wantTablet := addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true) + defer deleteTablet(wantTablet) params := map[string]string{ "id": "1", "state": binlogplayer.BlpRunning, - "source": `keyspace:"ks" shard:"0" key_range: `, + "source": fmt.Sprintf(`keyspace:"%s" shard:"0" key_range: `, env.KeyspaceName), } dbClient := binlogplayer.NewMockDBClient(t) @@ -312,7 +311,7 @@ func TestControllerStopPosition(t *testing.T) { dbClientFactory := func() binlogplayer.DBClient { return dbClient } mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: 3306} - ct, err := newController(context.Background(), params, dbClientFactory, mysqld, ts, testCell, "replica", nil) + ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil) if err != nil { t.Fatal(err) } @@ -329,5 +328,5 @@ func TestControllerStopPosition(t *testing.T) { } dbClient.Wait() - expectFBCRequest(t, fbc, wantTablet, testPos, nil, &topodatapb.KeyRange{End: []byte{0x80}}) + expectFBCRequest(t, wantTablet, testPos, nil, &topodatapb.KeyRange{End: []byte{0x80}}) } diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine.go b/go/vt/vttablet/tabletmanager/vreplication/engine.go index 1b27247369..3577508abd 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine.go @@ -24,7 +24,6 @@ import ( "time" "golang.org/x/net/context" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" @@ -51,8 +50,7 @@ type Engine struct { isOpen bool controllers map[int]*controller // wg is used by in-flight functions that can run for long periods. - wg sync.WaitGroup - mustCreate bool + wg sync.WaitGroup // ctx is the root context for all controllers. ctx context.Context @@ -100,6 +98,34 @@ func (vre *Engine) Open(ctx context.Context) error { return nil } +// executeFetchMaybeCreateTable calls DBClient.ExecuteFetch and does one retry if +// there's a failure due to mysql.ERNoSuchTable or mysql.ERBadDb which can be fixed +// by re-creating the _vt.vreplication table. +func (vre *Engine) executeFetchMaybeCreateTable(dbClient binlogplayer.DBClient, query string, maxrows int) (qr *sqltypes.Result, err error) { + qr, err = dbClient.ExecuteFetch(query, maxrows) + + if err == nil { + return + } + + // If it's a bad table or db, it could be because _vt.vreplication wasn't created. + // In that case we can try creating it again. + merr, isSQLErr := err.(*mysql.SQLError) + if !isSQLErr || !(merr.Num == mysql.ERNoSuchTable || merr.Num == mysql.ERBadDb) { + return qr, err + } + + log.Info("Looks like _vt.vreplication table may not exist. Trying to recreate... ") + for _, query := range binlogplayer.CreateVReplicationTable() { + if _, merr := dbClient.ExecuteFetch(query, 0); merr != nil { + log.Warningf("Failed to ensure _vt.vreplication table exists: %v", merr) + return nil, err + } + } + + return dbClient.ExecuteFetch(query, maxrows) +} + func (vre *Engine) initAll() error { dbClient := vre.dbClientFactory() if err := dbClient.Connect(); err != nil { @@ -110,8 +136,7 @@ func (vre *Engine) initAll() error { rows, err := readAllRows(dbClient) if err != nil { // Handle Table not found. - if merr, ok := err.(*mysql.SQLError); ok && merr.Num == 1146 { - vre.mustCreate = true + if merr, ok := err.(*mysql.SQLError); ok && merr.Num == mysql.ERNoSuchTable { log.Info("_vt.vreplication table not found. Will create it later if needed") return nil } @@ -175,7 +200,7 @@ func (vre *Engine) Exec(query string) (*sqltypes.Result, error) { } defer vre.updateStats() - plan, err := getPlan(query) + plan, err := buildControllerPlan(query) if err != nil { return nil, err } @@ -185,25 +210,16 @@ func (vre *Engine) Exec(query string) (*sqltypes.Result, error) { } defer dbClient.Close() - if vre.mustCreate { - for _, query := range binlogplayer.CreateVReplicationTable() { - if _, err := dbClient.ExecuteFetch(query, 0); err != nil { - return nil, err - } - } - vre.mustCreate = false - } - // Change the database to ensure that these events don't get // replicated by another vreplication. This can happen when // we reverse replication. - if _, err := dbClient.ExecuteFetch("use _vt", 1); err != nil { + if _, err := vre.executeFetchMaybeCreateTable(dbClient, "use _vt", 1); err != nil { return nil, err } switch plan.opcode { case insertQuery: - qr, err := dbClient.ExecuteFetch(plan.query, 1) + qr, err := vre.executeFetchMaybeCreateTable(dbClient, plan.query, 1) if err != nil { return nil, err } @@ -228,7 +244,7 @@ func (vre *Engine) Exec(query string) (*sqltypes.Result, error) { ct.Stop() blpStats = ct.blpStats } - qr, err := dbClient.ExecuteFetch(plan.query, 1) + qr, err := vre.executeFetchMaybeCreateTable(dbClient, plan.query, 1) if err != nil { return nil, err } @@ -250,10 +266,10 @@ func (vre *Engine) Exec(query string) (*sqltypes.Result, error) { ct.Stop() delete(vre.controllers, plan.id) } - return dbClient.ExecuteFetch(plan.query, 1) + return vre.executeFetchMaybeCreateTable(dbClient, plan.query, 1) case selectQuery: // select queries are passed through. - return dbClient.ExecuteFetch(plan.query, 10000) + return vre.executeFetchMaybeCreateTable(dbClient, plan.query, 10000) } panic("unreachable") } @@ -265,14 +281,19 @@ func (vre *Engine) WaitForPos(ctx context.Context, id int, pos string) error { return err } - vre.mu.Lock() - if !vre.isOpen { - vre.mu.Unlock() - return errors.New("vreplication engine is closed") + if err := func() error { + vre.mu.Lock() + defer vre.mu.Unlock() + if !vre.isOpen { + return errors.New("vreplication engine is closed") + } + + // Ensure that the engine won't be closed while this is running. + vre.wg.Add(1) + return nil + }(); err != nil { + return err } - // Ensure that the engine won't be closed while this is running. - vre.wg.Add(1) - vre.mu.Unlock() defer vre.wg.Done() dbClient := vre.dbClientFactory() @@ -282,13 +303,13 @@ func (vre *Engine) WaitForPos(ctx context.Context, id int, pos string) error { defer dbClient.Close() for { - qr, err := dbClient.ExecuteFetch(binlogplayer.ReadVReplicationPos(uint32(id)), 10) + qr, err := dbClient.ExecuteFetch(binlogplayer.ReadVReplicationStatus(uint32(id)), 10) switch { case err != nil: return err case len(qr.Rows) == 0: return fmt.Errorf("vreplication stream %d not found", id) - case len(qr.Rows) > 1 || len(qr.Rows[0]) != 1: + case len(qr.Rows) > 1 || len(qr.Rows[0]) != 3: return fmt.Errorf("unexpected result: %v", qr) } current, err := mysql.DecodePosition(qr.Rows[0][0].ToString()) @@ -300,6 +321,10 @@ func (vre *Engine) WaitForPos(ctx context.Context, id int, pos string) error { return nil } + if qr.Rows[0][1].ToString() == binlogplayer.BlpStopped { + return fmt.Errorf("replication has stopped at %v before reaching position %v, message: %s", current, mPos, qr.Rows[0][2].ToString()) + } + select { case <-ctx.Done(): return ctx.Err() diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine_test.go b/go/vt/vttablet/tabletmanager/vreplication/engine_test.go index 24c2140e5d..eb106370b5 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine_test.go @@ -17,6 +17,7 @@ limitations under the License. package vreplication import ( + "fmt" "reflect" "testing" "time" @@ -26,23 +27,21 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/mysqlctl/fakemysqldaemon" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) func TestEngineOpen(t *testing.T) { defer func() { globalStats = &vrStats{} }() - ts := createTopo() - _ = addTablet(ts, 100, "0", topodatapb.TabletType_REPLICA, true, true) - _ = newFakeBinlogClient() + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + resetBinlogClient() dbClient := binlogplayer.NewMockDBClient(t) dbClientFactory := func() binlogplayer.DBClient { return dbClient } mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: 3306} // Test Insert - vre := NewEngine(ts, testCell, mysqld, dbClientFactory) + vre := NewEngine(env.TopoServ, env.Cells[0], mysqld, dbClientFactory) if vre.IsOpen() { t.Errorf("IsOpen: %v, want false", vre.IsOpen()) } @@ -52,7 +51,7 @@ func TestEngineOpen(t *testing.T) { "id|state|source", "int64|varchar|varchar", ), - `1|Running|keyspace:"ks" shard:"0" key_range: `, + fmt.Sprintf(`1|Running|keyspace:"%s" shard:"0" key_range: `, env.KeyspaceName), ), nil) dbClient.ExpectRequest("update _vt.vreplication set state='Running', message='' where id=1", testDMLResponse, nil) dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag from _vt.vreplication where id=1", testSettingsResponse, nil) @@ -82,16 +81,15 @@ func TestEngineOpen(t *testing.T) { func TestEngineExec(t *testing.T) { defer func() { globalStats = &vrStats{} }() - ts := createTopo() - _ = addTablet(ts, 100, "0", topodatapb.TabletType_REPLICA, true, true) - _ = newFakeBinlogClient() + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + resetBinlogClient() dbClient := binlogplayer.NewMockDBClient(t) dbClientFactory := func() binlogplayer.DBClient { return dbClient } mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: 3306} // Test Insert - vre := NewEngine(ts, testCell, mysqld, dbClientFactory) + vre := NewEngine(env.TopoServ, env.Cells[0], mysqld, dbClientFactory) dbClient.ExpectRequest("select * from _vt.vreplication", &sqltypes.Result{}, nil) if err := vre.Open(context.Background()); err != nil { @@ -106,7 +104,7 @@ func TestEngineExec(t *testing.T) { "id|state|source", "int64|varchar|varchar", ), - `1|Running|keyspace:"ks" shard:"0" key_range: `, + fmt.Sprintf(`1|Running|keyspace:"%s" shard:"0" key_range: `, env.KeyspaceName), ), nil) dbClient.ExpectRequest("update _vt.vreplication set state='Running', message='' where id=1", testDMLResponse, nil) dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag from _vt.vreplication where id=1", testSettingsResponse, nil) @@ -146,7 +144,7 @@ func TestEngineExec(t *testing.T) { "id|state|source", "int64|varchar|varchar", ), - `1|Running|keyspace:"ks" shard:"0" key_range: `, + fmt.Sprintf(`1|Running|keyspace:"%s" shard:"0" key_range: `, env.KeyspaceName), ), nil) dbClient.ExpectRequest("update _vt.vreplication set state='Running', message='' where id=1", testDMLResponse, nil) dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag from _vt.vreplication where id=1", testSettingsResponse, nil) @@ -207,15 +205,14 @@ func TestEngineExec(t *testing.T) { func TestEngineBadInsert(t *testing.T) { defer func() { globalStats = &vrStats{} }() - ts := createTopo() - _ = addTablet(ts, 100, "0", topodatapb.TabletType_REPLICA, true, true) - _ = newFakeBinlogClient() + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + resetBinlogClient() dbClient := binlogplayer.NewMockDBClient(t) dbClientFactory := func() binlogplayer.DBClient { return dbClient } mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: 3306} - vre := NewEngine(ts, testCell, mysqld, dbClientFactory) + vre := NewEngine(env.TopoServ, env.Cells[0], mysqld, dbClientFactory) dbClient.ExpectRequest("select * from _vt.vreplication", &sqltypes.Result{}, nil) if err := vre.Open(context.Background()); err != nil { @@ -238,15 +235,14 @@ func TestEngineBadInsert(t *testing.T) { } func TestEngineSelect(t *testing.T) { - ts := createTopo() - _ = addTablet(ts, 100, "0", topodatapb.TabletType_REPLICA, true, true) - _ = newFakeBinlogClient() + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + resetBinlogClient() dbClient := binlogplayer.NewMockDBClient(t) dbClientFactory := func() binlogplayer.DBClient { return dbClient } mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: 3306} - vre := NewEngine(ts, testCell, mysqld, dbClientFactory) + vre := NewEngine(env.TopoServ, env.Cells[0], mysqld, dbClientFactory) dbClient.ExpectRequest("select * from _vt.vreplication", &sqltypes.Result{}, nil) if err := vre.Open(context.Background()); err != nil { @@ -261,7 +257,7 @@ func TestEngineSelect(t *testing.T) { "id|state|source|pos", "int64|varchar|varchar|varchar", ), - `1|Running|keyspace:"ks" shard:"0" key_range: |MariaDB/0-1-1083`, + fmt.Sprintf(`1|Running|keyspace:"%s" shard:"0" key_range: |MariaDB/0-1-1083`, env.KeyspaceName), ) dbClient.ExpectRequest(wantQuery, wantResult, nil) qr, err := vre.Exec(wantQuery) @@ -281,18 +277,22 @@ func TestWaitForPos(t *testing.T) { dbClient := binlogplayer.NewMockDBClient(t) mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: 3306} dbClientFactory := func() binlogplayer.DBClient { return dbClient } - vre := NewEngine(createTopo(), testCell, mysqld, dbClientFactory) + vre := NewEngine(env.TopoServ, env.Cells[0], mysqld, dbClientFactory) dbClient.ExpectRequest("select * from _vt.vreplication", &sqltypes.Result{}, nil) if err := vre.Open(context.Background()); err != nil { t.Fatal(err) } - dbClient.ExpectRequest("select pos from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ + dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ sqltypes.NewVarBinary("MariaDB/0-1-1083"), + sqltypes.NewVarBinary("Running"), + sqltypes.NewVarBinary(""), }}}, nil) - dbClient.ExpectRequest("select pos from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ + dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ sqltypes.NewVarBinary("MariaDB/0-1-1084"), + sqltypes.NewVarBinary("Running"), + sqltypes.NewVarBinary(""), }}}, nil) start := time.Now() if err := vre.WaitForPos(context.Background(), 1, "MariaDB/0-1-1084"); err != nil { @@ -307,7 +307,7 @@ func TestWaitForPosError(t *testing.T) { dbClient := binlogplayer.NewMockDBClient(t) mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: 3306} dbClientFactory := func() binlogplayer.DBClient { return dbClient } - vre := NewEngine(createTopo(), testCell, mysqld, dbClientFactory) + vre := NewEngine(env.TopoServ, env.Cells[0], mysqld, dbClientFactory) err := vre.WaitForPos(context.Background(), 1, "MariaDB/0-1-1084") want := `vreplication engine is closed` @@ -326,14 +326,14 @@ func TestWaitForPosError(t *testing.T) { t.Errorf("WaitForPos: %v, want %v", err, want) } - dbClient.ExpectRequest("select pos from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{}}}, nil) + dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{}}}, nil) err = vre.WaitForPos(context.Background(), 1, "MariaDB/0-1-1084") want = "unexpected result: &{[] 0 0 [[]] }" if err == nil || err.Error() != want { t.Errorf("WaitForPos: %v, want %v", err, want) } - dbClient.ExpectRequest("select pos from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ + dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ sqltypes.NewVarBinary("MariaDB/0-1-1083"), }, { sqltypes.NewVarBinary("MariaDB/0-1-1083"), @@ -349,15 +349,17 @@ func TestWaitForPosCancel(t *testing.T) { dbClient := binlogplayer.NewMockDBClient(t) mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: 3306} dbClientFactory := func() binlogplayer.DBClient { return dbClient } - vre := NewEngine(createTopo(), testCell, mysqld, dbClientFactory) + vre := NewEngine(env.TopoServ, env.Cells[0], mysqld, dbClientFactory) dbClient.ExpectRequest("select * from _vt.vreplication", &sqltypes.Result{}, nil) if err := vre.Open(context.Background()); err != nil { t.Fatal(err) } - dbClient.ExpectRequest("select pos from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ + dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ sqltypes.NewVarBinary("MariaDB/0-1-1083"), + sqltypes.NewVarBinary("Running"), + sqltypes.NewVarBinary(""), }}}, nil) ctx, cancel := context.WithCancel(context.Background()) cancel() @@ -371,8 +373,10 @@ func TestWaitForPosCancel(t *testing.T) { time.Sleep(5 * time.Millisecond) vre.Close() }() - dbClient.ExpectRequest("select pos from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ + dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ sqltypes.NewVarBinary("MariaDB/0-1-1083"), + sqltypes.NewVarBinary("Running"), + sqltypes.NewVarBinary(""), }}}, nil) err = vre.WaitForPos(context.Background(), 1, "MariaDB/0-1-1084") want := "vreplication is closing: context canceled" @@ -384,35 +388,55 @@ func TestWaitForPosCancel(t *testing.T) { func TestCreateDBAndTable(t *testing.T) { defer func() { globalStats = &vrStats{} }() - ts := createTopo() - _ = addTablet(ts, 100, "0", topodatapb.TabletType_REPLICA, true, true) - _ = newFakeBinlogClient() + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + resetBinlogClient() dbClient := binlogplayer.NewMockDBClient(t) dbClientFactory := func() binlogplayer.DBClient { return dbClient } mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: 3306} // Test Insert - vre := NewEngine(ts, testCell, mysqld, dbClientFactory) + vre := NewEngine(env.TopoServ, env.Cells[0], mysqld, dbClientFactory) - notFound := mysql.SQLError{Num: 1146, Message: "not found"} - dbClient.ExpectRequest("select * from _vt.vreplication", nil, ¬Found) + tableNotFound := mysql.SQLError{Num: 1146, Message: "table not found"} + dbClient.ExpectRequest("select * from _vt.vreplication", nil, &tableNotFound) if err := vre.Open(context.Background()); err != nil { t.Fatal(err) } defer vre.Close() + // Missing db. Statement should get retried after creating everything. + dbNotFound := mysql.SQLError{Num: 1049, Message: "db not found"} + dbClient.ExpectRequest("use _vt", &sqltypes.Result{}, &dbNotFound) + dbClient.ExpectRequest("CREATE DATABASE IF NOT EXISTS _vt", &sqltypes.Result{}, nil) dbClient.ExpectRequest("DROP TABLE IF EXISTS _vt.blp_checkpoint", &sqltypes.Result{}, nil) dbClient.ExpectRequestRE("CREATE TABLE IF NOT EXISTS _vt.vreplication.*", &sqltypes.Result{}, nil) + dbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) + + // Non-recoverable error. + unrecoverableError := &mysql.SQLError{Num: 1234, Message: "random error"} + dbClient.ExpectRequest("select fail_query from _vt.vreplication", &sqltypes.Result{}, unrecoverableError) + + // Missing table. Statement should get retried after creating everything. + dbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) + dbClient.ExpectRequest("insert into _vt.vreplication values (null)", &sqltypes.Result{}, &tableNotFound) + + dbClient.ExpectRequest("CREATE DATABASE IF NOT EXISTS _vt", &sqltypes.Result{}, nil) + dbClient.ExpectRequest("DROP TABLE IF EXISTS _vt.blp_checkpoint", &sqltypes.Result{}, nil) + dbClient.ExpectRequestRE("CREATE TABLE IF NOT EXISTS _vt.vreplication.*", &sqltypes.Result{}, nil) + dbClient.ExpectRequest("insert into _vt.vreplication values (null)", &sqltypes.Result{InsertID: 1}, nil) + + // The rest of this test is normal with no db errors or extra queries. + dbClient.ExpectRequest("select * from _vt.vreplication where id = 1", sqltypes.MakeTestResult( sqltypes.MakeTestFields( "id|state|source", "int64|varchar|varchar", ), - `1|Running|keyspace:"ks" shard:"0" key_range: `, + fmt.Sprintf(`1|Running|keyspace:"%s" shard:"0" key_range: `, env.KeyspaceName), ), nil) dbClient.ExpectRequest("update _vt.vreplication set state='Running', message='' where id=1", testDMLResponse, nil) dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag from _vt.vreplication where id=1", testSettingsResponse, nil) @@ -421,6 +445,11 @@ func TestCreateDBAndTable(t *testing.T) { dbClient.ExpectRequestRE("update _vt.vreplication set pos='MariaDB/0-1-1235', time_updated=.*", testDMLResponse, nil) dbClient.ExpectRequest("commit", nil, nil) + _, err := vre.Exec("select fail_query from _vt.vreplication") + if err != unrecoverableError { + t.Errorf("Want: %v, Got: %v", unrecoverableError, err) + } + qr, err := vre.Exec("insert into _vt.vreplication values(null)") if err != nil { t.Fatal(err) diff --git a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go index 559f285e1f..23c019bd63 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go @@ -18,58 +18,119 @@ package vreplication import ( "flag" + "fmt" + "os" "reflect" + "regexp" + "strings" "testing" + "time" "github.com/golang/protobuf/proto" "golang.org/x/net/context" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vttablet/queryservice" "vitess.io/vitess/go/vt/vttablet/queryservice/fakes" "vitess.io/vitess/go/vt/vttablet/tabletconn" + "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer" + "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer/testenv" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) -const ( - testCell = "cell" - testKeyspace = "ks" - testShard = "0" +var ( + playerEngine *Engine + streamerEngine *vstreamer.Engine + env *testenv.Env + globalFBC = &fakeBinlogClient{} + vrepldb = "vrepl" + globalDBQueries = make(chan string, 1000) ) -// This file provides support functions for tests. -// It's capable of creating a single unsharded keyspace -// and allows you to add various tablet types. +func init() { + tabletconn.RegisterDialer("test", func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + return &fakeTabletConn{ + QueryService: fakes.ErrorQueryService, + tablet: tablet, + }, nil + }) + flag.Set("tablet_protocol", "test") + + binlogplayer.RegisterClientFactory("test", func() binlogplayer.Client { return globalFBC }) + flag.Set("binlog_player_protocol", "test") +} + +func TestMain(m *testing.M) { + flag.Parse() // Do not remove this comment, import into google3 depends on it + + if testing.Short() { + os.Exit(m.Run()) + } + + exitCode := func() int { + var err error + env, err = testenv.Init() + if err != nil { + fmt.Fprintf(os.Stderr, "%v", err) + return 1 + } + defer env.Close() + + // engines cannot be initialized in testenv because it introduces + // circular dependencies. + streamerEngine = vstreamer.NewEngine(env.SrvTopo, env.SchemaEngine) + streamerEngine.InitDBConfig(env.Dbcfgs) + streamerEngine.Open(env.KeyspaceName, env.Cells[0]) + defer streamerEngine.Close() + + if err := env.Mysqld.ExecuteSuperQuery(context.Background(), fmt.Sprintf("create database %s", vrepldb)); err != nil { + fmt.Fprintf(os.Stderr, "%v", err) + return 1 + } + + if err := env.Mysqld.ExecuteSuperQuery(context.Background(), "set @@global.innodb_lock_wait_timeout=1"); err != nil { + fmt.Fprintf(os.Stderr, "%v", err) + return 1 + } + + playerEngine = NewEngine(env.TopoServ, env.Cells[0], env.Mysqld, realDBClientFactory) + if err := playerEngine.Open(context.Background()); err != nil { + fmt.Fprintf(os.Stderr, "%v", err) + return 1 + } + defer playerEngine.Close() + + return m.Run() + }() + os.Exit(exitCode) +} + +func resetBinlogClient() { + globalFBC = &fakeBinlogClient{} +} //-------------------------------------- // Topos and tablets -func createTopo() *topo.Server { - ts := memorytopo.NewServer(testCell) - ctx := context.Background() - if err := ts.CreateKeyspace(ctx, testKeyspace, &topodatapb.Keyspace{}); err != nil { - panic(err) - } - if err := ts.CreateShard(ctx, testKeyspace, testShard); err != nil { - panic(err) - } - return ts -} - -func addTablet(ts *topo.Server, id int, shard string, tabletType topodatapb.TabletType, serving, healthy bool) *topodatapb.Tablet { +func addTablet(id int, shard string, tabletType topodatapb.TabletType, serving, healthy bool) *topodatapb.Tablet { t := newTablet(id, shard, tabletType, serving, healthy) - if err := ts.CreateTablet(context.Background(), t); err != nil { + if err := env.TopoServ.CreateTablet(context.Background(), t); err != nil { panic(err) } return t } +func deleteTablet(t *topodatapb.Tablet) { + env.TopoServ.DeleteTablet(context.Background(), t.Alias) +} + func newTablet(id int, shard string, tabletType topodatapb.TabletType, serving, healthy bool) *topodatapb.Tablet { stag := "not_serving" if serving { @@ -85,11 +146,11 @@ func newTablet(id int, shard string, tabletType topodatapb.TabletType, serving, } return &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ - Cell: testCell, + Cell: env.Cells[0], Uid: uint32(id), }, - Keyspace: testKeyspace, - Shard: testShard, + Keyspace: env.KeyspaceName, + Shard: env.ShardName, KeyRange: kr, Type: tabletType, Tags: map[string]string{ @@ -132,6 +193,11 @@ func (ftc *fakeTabletConn) StreamHealth(ctx context.Context, callback func(*quer return nil } +// VStream directly calls into the pre-initialized engine. +func (ftc *fakeTabletConn) VStream(ctx context.Context, target *querypb.Target, startPos string, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { + return streamerEngine.Stream(ctx, startPos, filter, send) +} + //-------------------------------------- // Binlog Client to TabletManager @@ -145,11 +211,6 @@ type fakeBinlogClient struct { lastCharset *binlogdatapb.Charset } -func newFakeBinlogClient() *fakeBinlogClient { - globalFBC = &fakeBinlogClient{} - return globalFBC -} - func (fbc *fakeBinlogClient) Dial(tablet *topodatapb.Tablet) error { fbc.lastTablet = tablet return nil @@ -198,37 +259,138 @@ func (t *btStream) Recv() (*binlogdatapb.BinlogTransaction, error) { return nil, t.ctx.Err() } -func expectFBCRequest(t *testing.T, fbc *fakeBinlogClient, tablet *topodatapb.Tablet, pos string, tables []string, kr *topodatapb.KeyRange) { +func expectFBCRequest(t *testing.T, tablet *topodatapb.Tablet, pos string, tables []string, kr *topodatapb.KeyRange) { t.Helper() - if !proto.Equal(tablet, fbc.lastTablet) { - t.Errorf("Request tablet: %v, want %v", fbc.lastTablet, tablet) + if !proto.Equal(tablet, globalFBC.lastTablet) { + t.Errorf("Request tablet: %v, want %v", globalFBC.lastTablet, tablet) } - if pos != fbc.lastPos { - t.Errorf("Request pos: %v, want %v", fbc.lastPos, pos) + if pos != globalFBC.lastPos { + t.Errorf("Request pos: %v, want %v", globalFBC.lastPos, pos) } - if !reflect.DeepEqual(tables, fbc.lastTables) { - t.Errorf("Request tables: %v, want %v", fbc.lastTables, tables) + if !reflect.DeepEqual(tables, globalFBC.lastTables) { + t.Errorf("Request tables: %v, want %v", globalFBC.lastTables, tables) } - if !proto.Equal(kr, fbc.lastKeyRange) { - t.Errorf("Request KeyRange: %v, want %v", fbc.lastKeyRange, kr) + if !proto.Equal(kr, globalFBC.lastKeyRange) { + t.Errorf("Request KeyRange: %v, want %v", globalFBC.lastKeyRange, kr) } } //-------------------------------------- -// init +// DBCLient wrapper -// globalFBC is set by newFakeBinlogClient, which is then returned by the client factory below. -var globalFBC *fakeBinlogClient - -func init() { - tabletconn.RegisterDialer("test", func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { - return &fakeTabletConn{ - QueryService: fakes.ErrorQueryService, - tablet: tablet, - }, nil - }) - flag.Set("tablet_protocol", "test") - - binlogplayer.RegisterClientFactory("test", func() binlogplayer.Client { return globalFBC }) - flag.Set("binlog_player_protocol", "test") +func realDBClientFactory() binlogplayer.DBClient { + return &realDBClient{} +} + +type realDBClient struct { + conn *mysql.Conn + nolog bool +} + +func (dbc *realDBClient) DBName() string { + return vrepldb +} + +func (dbc *realDBClient) Connect() error { + app := env.Dbcfgs.AppWithDB() + app.DbName = vrepldb + conn, err := mysql.Connect(context.Background(), app) + if err != nil { + return err + } + dbc.conn = conn + return nil +} + +func (dbc *realDBClient) Begin() error { + _, err := dbc.ExecuteFetch("begin", 10000) + return err +} + +func (dbc *realDBClient) Commit() error { + _, err := dbc.ExecuteFetch("commit", 10000) + return err +} + +func (dbc *realDBClient) Rollback() error { + _, err := dbc.ExecuteFetch("rollback", 10000) + return err +} + +func (dbc *realDBClient) Close() { + dbc.conn.Close() + dbc.conn = nil +} + +func (dbc *realDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Result, error) { + if strings.HasPrefix(query, "use") { + return nil, nil + } + qr, err := dbc.conn.ExecuteFetch(query, 10000, true) + if !strings.HasPrefix(query, "select") && !strings.HasPrefix(query, "set") && !dbc.nolog { + globalDBQueries <- query + } + return qr, err +} + +func expectDBClientQueries(t *testing.T, queries []string) { + t.Helper() + failed := false + for i, query := range queries { + if failed { + t.Errorf("no query received, expecting %s", query) + continue + } + var got string + select { + case got = <-globalDBQueries: + var match bool + if query[0] == '/' { + result, err := regexp.MatchString(query[1:], got) + if err != nil { + panic(err) + } + match = result + } else { + match = (got == query) + } + if !match { + t.Errorf("query:\n%q, does not match query %d:\n%q", got, i, query) + } + case <-time.After(5 * time.Second): + t.Errorf("no query received, expecting %s", query) + failed = true + } + } + for { + select { + case got := <-globalDBQueries: + t.Errorf("unexpected query: %s", got) + default: + return + } + } +} + +func expectData(t *testing.T, table string, values [][]string) { + t.Helper() + + qr, err := env.Mysqld.FetchSuperQuery(context.Background(), fmt.Sprintf("select * from %s.%s", vrepldb, table)) + if err != nil { + t.Error(err) + return + } + if len(values) != len(qr.Rows) { + t.Fatalf("row counts don't match: %v, want %v", qr.Rows, values) + } + for i, row := range values { + if len(row) != len(qr.Rows[i]) { + t.Fatalf("Too few columns, result: %v, row: %d, want: %v", qr.Rows[i], i, row) + } + for j, val := range row { + if got := qr.Rows[i][j].ToString(); got != val { + t.Errorf("Mismatch at (%d, %d): %v, want %s", i, j, qr.Rows[i][j], val) + } + } + } } diff --git a/go/vt/vttablet/tabletmanager/vreplication/player_plan.go b/go/vt/vttablet/tabletmanager/vreplication/player_plan.go new file mode 100644 index 0000000000..77ecde44d5 --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vreplication/player_plan.go @@ -0,0 +1,245 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "fmt" + "strings" + + "vitess.io/vitess/go/vt/sqlparser" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" +) + +// PlayerPlan is the execution plan for a player stream. +type PlayerPlan struct { + VStreamFilter *binlogdatapb.Filter + TablePlans map[string]*TablePlan +} + +// TablePlan is the execution plan for a table within a player stream. +// There are two incarantions of this per table. The first one is built +// while analyzing the inital stream request. A tentative plan is built +// without knowing the table info. The second incarnation is built when +// we receive the field info for a table. At that time, we copy the +// original TablePlan into a separtae map and populate the Fields and +// PKCols members. +type TablePlan struct { + Name string + ColExprs []*ColExpr `json:",omitempty"` + OnInsert InsertType `json:",omitempty"` + + Fields []*querypb.Field `json:",omitempty"` + PKCols []*ColExpr `json:",omitempty"` +} + +func (tp *TablePlan) findCol(name sqlparser.ColIdent) *ColExpr { + for _, cExpr := range tp.ColExprs { + if cExpr.ColName.Equal(name) { + return cExpr + } + } + return nil +} + +// ColExpr describes the processing to be performed to +// compute the value of the target table column. +type ColExpr struct { + ColName sqlparser.ColIdent + ColNum int + Operation Operation `json:",omitempty"` + IsGrouped bool `json:",omitempty"` +} + +// Operation is the opcode for the ColExpr. +type Operation int + +// The following values are the various ColExpr opcodes. +const ( + OpNone = Operation(iota) + OpCount + OpSum +) + +// InsertType describes the type of insert statement to generate. +type InsertType int + +// The following values are the various insert types. +const ( + InsertNormal = InsertType(iota) + InsertOndup + InsertIgnore +) + +func buildPlayerPlan(filter *binlogdatapb.Filter) (*PlayerPlan, error) { + plan := &PlayerPlan{ + VStreamFilter: &binlogdatapb.Filter{ + Rules: make([]*binlogdatapb.Rule, len(filter.Rules)), + }, + TablePlans: make(map[string]*TablePlan), + } + for i, rule := range filter.Rules { + if strings.HasPrefix(rule.Match, "/") { + plan.VStreamFilter.Rules[i] = rule + continue + } + sendRule, tplan, err := buildTablePlan(rule) + if err != nil { + return nil, err + } + plan.VStreamFilter.Rules[i] = sendRule + plan.TablePlans[sendRule.Match] = tplan + } + return plan, nil +} + +func buildTablePlan(rule *binlogdatapb.Rule) (*binlogdatapb.Rule, *TablePlan, error) { + statement, err := sqlparser.Parse(rule.Filter) + if err != nil { + return nil, nil, err + } + sel, ok := statement.(*sqlparser.Select) + if !ok { + return nil, nil, fmt.Errorf("unexpected: %v", sqlparser.String(statement)) + } + if sel.Distinct != "" { + return nil, nil, fmt.Errorf("unexpected: %v", sqlparser.String(sel)) + } + if len(sel.From) > 1 { + return nil, nil, fmt.Errorf("unexpected: %v", sqlparser.String(sel)) + } + node, ok := sel.From[0].(*sqlparser.AliasedTableExpr) + if !ok { + return nil, nil, fmt.Errorf("unexpected: %v", sqlparser.String(sel)) + } + fromTable := sqlparser.GetTableName(node.Expr) + if fromTable.IsEmpty() { + return nil, nil, fmt.Errorf("unexpected: %v", sqlparser.String(sel)) + } + + if _, ok := sel.SelectExprs[0].(*sqlparser.StarExpr); ok { + if len(sel.SelectExprs) != 1 { + return nil, nil, fmt.Errorf("unexpected: %v", sqlparser.String(sel)) + } + sendRule := &binlogdatapb.Rule{ + Match: fromTable.String(), + Filter: rule.Filter, + } + return sendRule, &TablePlan{Name: rule.Match}, nil + } + + tplan := &TablePlan{ + Name: rule.Match, + } + sendSelect := &sqlparser.Select{ + From: sel.From, + Where: sel.Where, + } + for _, expr := range sel.SelectExprs { + selExpr, cExpr, err := analyzeExpr(expr) + if err != nil { + return nil, nil, err + } + if selExpr != nil { + sendSelect.SelectExprs = append(sendSelect.SelectExprs, selExpr) + cExpr.ColNum = len(sendSelect.SelectExprs) - 1 + } + tplan.ColExprs = append(tplan.ColExprs, cExpr) + } + + if sel.GroupBy != nil { + if err := analyzeGroupBy(sel.GroupBy, tplan); err != nil { + return nil, nil, err + } + tplan.OnInsert = InsertIgnore + for _, cExpr := range tplan.ColExprs { + if !cExpr.IsGrouped { + tplan.OnInsert = InsertOndup + break + } + } + } + sendRule := &binlogdatapb.Rule{ + Match: fromTable.String(), + Filter: sqlparser.String(sendSelect), + } + return sendRule, tplan, nil +} + +func analyzeExpr(selExpr sqlparser.SelectExpr) (sqlparser.SelectExpr, *ColExpr, error) { + aliased, ok := selExpr.(*sqlparser.AliasedExpr) + if !ok { + return nil, nil, fmt.Errorf("unexpected: %v", sqlparser.String(selExpr)) + } + as := aliased.As + if as.IsEmpty() { + as = sqlparser.NewColIdent(sqlparser.String(aliased.Expr)) + } + switch expr := aliased.Expr.(type) { + case *sqlparser.ColName: + return selExpr, &ColExpr{ColName: as}, nil + case *sqlparser.FuncExpr: + if expr.Distinct || len(expr.Exprs) != 1 { + return nil, nil, fmt.Errorf("unexpected: %v", sqlparser.String(expr)) + } + if aliased.As.IsEmpty() { + return nil, nil, fmt.Errorf("expression needs an alias: %v", sqlparser.String(expr)) + } + switch fname := expr.Name.Lowered(); fname { + case "month", "day", "hour": + return selExpr, &ColExpr{ColName: as}, nil + case "count": + if _, ok := expr.Exprs[0].(*sqlparser.StarExpr); !ok { + return nil, nil, fmt.Errorf("only count(*) is supported: %v", sqlparser.String(expr)) + } + return nil, &ColExpr{ColName: as, Operation: OpCount}, nil + case "sum": + aInner, ok := expr.Exprs[0].(*sqlparser.AliasedExpr) + if !ok { + return nil, nil, fmt.Errorf("unexpected: %v", sqlparser.String(expr)) + } + innerCol, ok := aInner.Expr.(*sqlparser.ColName) + if !ok { + return nil, nil, fmt.Errorf("unexpected: %v", sqlparser.String(expr)) + } + return &sqlparser.AliasedExpr{Expr: innerCol}, &ColExpr{ColName: as, Operation: OpSum}, nil + default: + return nil, nil, fmt.Errorf("unexpected: %v", sqlparser.String(expr)) + } + default: + return nil, nil, fmt.Errorf("unexpected: %v", sqlparser.String(expr)) + } +} + +func analyzeGroupBy(groupBy sqlparser.GroupBy, tplan *TablePlan) error { + for _, expr := range groupBy { + colname, ok := expr.(*sqlparser.ColName) + if !ok { + return fmt.Errorf("unexpected: %v", sqlparser.String(expr)) + } + cExpr := tplan.findCol(colname.Name) + if cExpr == nil { + return fmt.Errorf("group by expression does not reference an alias in the select list: %v", sqlparser.String(expr)) + } + if cExpr.Operation != OpNone { + return fmt.Errorf("group by expression is not allowed to reference an aggregate expression: %v", sqlparser.String(expr)) + } + cExpr.IsGrouped = true + } + return nil +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/player_plan_test.go b/go/vt/vttablet/tabletmanager/vreplication/player_plan_test.go new file mode 100644 index 0000000000..bf5066002f --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vreplication/player_plan_test.go @@ -0,0 +1,435 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "encoding/json" + "testing" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + "vitess.io/vitess/go/vt/sqlparser" +) + +func TestPlayerPlan(t *testing.T) { + testcases := []struct { + input *binlogdatapb.Filter + plan *PlayerPlan + err string + }{{ + // Regular expression + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + }}, + }, + plan: &PlayerPlan{ + VStreamFilter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + }}, + }, + TablePlans: map[string]*TablePlan{}, + }, + }, { + // '*' expression + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t2", + }}, + }, + plan: &PlayerPlan{ + VStreamFilter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t2", + Filter: "select * from t2", + }}, + }, + TablePlans: map[string]*TablePlan{ + "t2": { + Name: "t1", + }, + }, + }, + }, { + // Explicit columns + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select c1, c2 from t2", + }}, + }, + plan: &PlayerPlan{ + VStreamFilter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t2", + Filter: "select c1, c2 from t2", + }}, + }, + TablePlans: map[string]*TablePlan{ + "t2": { + Name: "t1", + ColExprs: []*ColExpr{{ + ColName: sqlparser.NewColIdent("c1"), + ColNum: 0, + }, { + ColName: sqlparser.NewColIdent("c2"), + ColNum: 1, + }}, + }, + }, + }, + }, { + // func expr + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select hour(c1) as hc1, day(c2) as dc2 from t2", + }}, + }, + plan: &PlayerPlan{ + VStreamFilter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t2", + Filter: "select hour(c1) as hc1, day(c2) as dc2 from t2", + }}, + }, + TablePlans: map[string]*TablePlan{ + "t2": { + Name: "t1", + ColExprs: []*ColExpr{{ + ColName: sqlparser.NewColIdent("hc1"), + ColNum: 0, + }, { + ColName: sqlparser.NewColIdent("dc2"), + ColNum: 1, + }}, + }, + }, + }, + }, { + // count expr + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select hour(c1) as hc1, count(*) as c, day(c2) as dc2 from t2", + }}, + }, + plan: &PlayerPlan{ + VStreamFilter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t2", + Filter: "select hour(c1) as hc1, day(c2) as dc2 from t2", + }}, + }, + TablePlans: map[string]*TablePlan{ + "t2": { + Name: "t1", + ColExprs: []*ColExpr{{ + ColName: sqlparser.NewColIdent("hc1"), + ColNum: 0, + }, { + ColName: sqlparser.NewColIdent("c"), + Operation: OpCount, + }, { + ColName: sqlparser.NewColIdent("dc2"), + ColNum: 1, + }}, + }, + }, + }, + }, { + // sum expr + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select hour(c1) as hc1, sum(c3) as s, day(c2) as dc2 from t2", + }}, + }, + plan: &PlayerPlan{ + VStreamFilter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t2", + Filter: "select hour(c1) as hc1, c3, day(c2) as dc2 from t2", + }}, + }, + TablePlans: map[string]*TablePlan{ + "t2": { + Name: "t1", + ColExprs: []*ColExpr{{ + ColName: sqlparser.NewColIdent("hc1"), + ColNum: 0, + }, { + ColName: sqlparser.NewColIdent("s"), + ColNum: 1, + Operation: OpSum, + }, { + ColName: sqlparser.NewColIdent("dc2"), + ColNum: 2, + }}, + }, + }, + }, + }, { + // partial group by + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select c1, c2, c3 from t2 group by c3, c1", + }}, + }, + plan: &PlayerPlan{ + VStreamFilter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t2", + Filter: "select c1, c2, c3 from t2", + }}, + }, + TablePlans: map[string]*TablePlan{ + "t2": { + Name: "t1", + ColExprs: []*ColExpr{{ + ColName: sqlparser.NewColIdent("c1"), + ColNum: 0, + IsGrouped: true, + }, { + ColName: sqlparser.NewColIdent("c2"), + ColNum: 1, + }, { + ColName: sqlparser.NewColIdent("c3"), + ColNum: 2, + IsGrouped: true, + }}, + OnInsert: InsertOndup, + }, + }, + }, + }, { + // full group by + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select c1, c2, c3 from t2 group by c3, c1, c2", + }}, + }, + plan: &PlayerPlan{ + VStreamFilter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t2", + Filter: "select c1, c2, c3 from t2", + }}, + }, + TablePlans: map[string]*TablePlan{ + "t2": { + Name: "t1", + ColExprs: []*ColExpr{{ + ColName: sqlparser.NewColIdent("c1"), + ColNum: 0, + IsGrouped: true, + }, { + ColName: sqlparser.NewColIdent("c2"), + ColNum: 1, + IsGrouped: true, + }, { + ColName: sqlparser.NewColIdent("c3"), + ColNum: 2, + IsGrouped: true, + }}, + OnInsert: InsertIgnore, + }, + }, + }, + }, { + // syntax error + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "bad query", + }}, + }, + err: "syntax error at position 4 near 'bad'", + }, { + // not a select + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "update t1 set val=1", + }}, + }, + err: "unexpected: update t1 set val = 1", + }, { + // no distinct + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select distinct c1 from t1", + }}, + }, + err: "unexpected: select distinct c1 from t1", + }, { + // no ',' join + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1, t2", + }}, + }, + err: "unexpected: select * from t1, t2", + }, { + // no join + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 join t2", + }}, + }, + err: "unexpected: select * from t1 join t2", + }, { + // no subqueries + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from (select * from t2) as a", + }}, + }, + err: "unexpected: select * from (select * from t2) as a", + }, { + // cannot combine '*' with other + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select *, c1 from t1", + }}, + }, + err: "unexpected: select *, c1 from t1", + }, { + // cannot combine '*' with other (different code path) + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select c1, * from t1", + }}, + }, + err: "unexpected: *", + }, { + // no distinct in func + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select hour(distinct c1) from t1", + }}, + }, + err: "unexpected: hour(distinct c1)", + }, { + // funcs need alias + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select hour(c1) from t1", + }}, + }, + err: "expression needs an alias: hour(c1)", + }, { + // only count(*) + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select count(c1) as c from t1", + }}, + }, + err: "only count(*) is supported: count(c1)", + }, { + // no sum(*) + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select sum(*) as c from t1", + }}, + }, + err: "unexpected: sum(*)", + }, { + // no complex expr in sum + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select sum(a + b) as c from t1", + }}, + }, + err: "unexpected: sum(a + b)", + }, { + // unsupported func + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select foo(a) as c from t1", + }}, + }, + err: "unexpected: foo(a)", + }, { + // no complex expr in select + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select a + b from t1", + }}, + }, + err: "unexpected: a + b", + }, { + // no complex expr in group by + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select a from t1 group by a + 1", + }}, + }, + err: "unexpected: a + 1", + }, { + // group by does not reference alias + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select a as b from t1 group by a", + }}, + }, + err: "group by expression does not reference an alias in the select list: a", + }, { + // cannot group by aggr + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select count(*) as a from t1 group by a", + }}, + }, + err: "group by expression is not allowed to reference an aggregate expression: a", + }} + + for _, tcase := range testcases { + plan, err := buildPlayerPlan(tcase.input) + gotPlan, _ := json.Marshal(plan) + wantPlan, _ := json.Marshal(tcase.plan) + if string(gotPlan) != string(wantPlan) { + t.Errorf("Filter(%v):\n%s, want\n%s", tcase.input, gotPlan, wantPlan) + } + gotErr := "" + if err != nil { + gotErr = err.Error() + } + if gotErr != tcase.err { + t.Errorf("Filter err(%v): %s, want %v", tcase.input, gotErr, tcase.err) + } + } +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/relaylog.go b/go/vt/vttablet/tabletmanager/vreplication/relaylog.go new file mode 100644 index 0000000000..f0714390d2 --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vreplication/relaylog.go @@ -0,0 +1,154 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "io" + "sync" + "time" + + "golang.org/x/net/context" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +type relayLog struct { + ctx context.Context + maxItems int + maxSize int + + // mu controls all variables below and is shared by canAccept and hasItems. + // Broadcasting must be done while holding mu. This is mainly necessary because both + // conditions depend on ctx.Done(), which can change state asynchronously. + mu sync.Mutex + curSize int + items [][]*binlogdatapb.VEvent + timedout bool + err error + // canAccept is true if: curSize<=maxSize, len(items)0, ctx is not Done, and interuptFetch is false. + hasItems sync.Cond +} + +func newRelayLog(ctx context.Context, maxItems, maxSize int) *relayLog { + rl := &relayLog{ + ctx: ctx, + maxItems: maxItems, + maxSize: maxSize, + } + rl.canAccept.L = &rl.mu + rl.hasItems.L = &rl.mu + + // Any time context is done, wake up all waiters to make them exit. + go func() { + <-ctx.Done() + rl.mu.Lock() + defer rl.mu.Unlock() + rl.canAccept.Broadcast() + rl.hasItems.Broadcast() + }() + return rl +} + +func (rl *relayLog) Send(events []*binlogdatapb.VEvent) error { + rl.mu.Lock() + defer rl.mu.Unlock() + + if err := rl.checkDone(); err != nil { + return err + } + for rl.curSize > rl.maxSize || len(rl.items) >= rl.maxItems { + rl.canAccept.Wait() + if err := rl.checkDone(); err != nil { + return err + } + } + rl.items = append(rl.items, events) + rl.curSize += eventsSize(events) + rl.hasItems.Broadcast() + return nil +} + +func (rl *relayLog) Fetch() ([][]*binlogdatapb.VEvent, error) { + rl.mu.Lock() + defer rl.mu.Unlock() + + if err := rl.checkDone(); err != nil { + return nil, err + } + cancelTimer := rl.startTimer() + defer cancelTimer() + for len(rl.items) == 0 && !rl.timedout { + rl.hasItems.Wait() + if err := rl.checkDone(); err != nil { + return nil, err + } + } + rl.timedout = false + items := rl.items + rl.items = nil + rl.curSize = 0 + rl.canAccept.Broadcast() + return items, nil +} + +func (rl *relayLog) checkDone() error { + select { + case <-rl.ctx.Done(): + return io.EOF + default: + } + return nil +} + +func (rl *relayLog) startTimer() (cancel func()) { + timer := time.NewTimer(idleTimeout) + timerDone := make(chan struct{}) + go func() { + select { + case <-timer.C: + rl.mu.Lock() + defer rl.mu.Unlock() + rl.timedout = true + rl.hasItems.Broadcast() + case <-timerDone: + } + }() + return func() { + timer.Stop() + close(timerDone) + } +} + +func eventsSize(events []*binlogdatapb.VEvent) int { + size := 0 + for _, event := range events { + if event.Type != binlogdatapb.VEventType_ROW { + continue + } + for _, rowChange := range event.RowEvent.RowChanges { + if rowChange.Before != nil { + size += len(rowChange.Before.Values) + } + if rowChange.After != nil { + size += len(rowChange.After.Values) + } + } + } + return size +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/retryable_client.go b/go/vt/vttablet/tabletmanager/vreplication/retryable_client.go new file mode 100644 index 0000000000..f5e8eaa4ef --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vreplication/retryable_client.go @@ -0,0 +1,84 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" +) + +// retryableClient is a wrapper on binlogplayer.DBClient. +// It allows us to retry a failed transactions on lock errors. +type retryableClient struct { + binlogplayer.DBClient + InTransaction bool + queries []string +} + +func (rt *retryableClient) Begin() error { + if rt.InTransaction { + return nil + } + if err := rt.DBClient.Begin(); err != nil { + return err + } + rt.queries = append(rt.queries, "begin") + rt.InTransaction = true + return nil +} + +func (rt *retryableClient) Commit() error { + if err := rt.DBClient.Commit(); err != nil { + return err + } + rt.InTransaction = false + rt.queries = nil + return nil +} + +func (rt *retryableClient) Rollback() error { + if err := rt.DBClient.Rollback(); err != nil { + return err + } + rt.InTransaction = false + // Don't reset queries to allow for vplayer to retry. + return nil +} + +func (rt *retryableClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Result, error) { + if !rt.InTransaction { + rt.queries = []string{query} + } else { + rt.queries = append(rt.queries, query) + } + return rt.DBClient.ExecuteFetch(query, maxrows) +} + +func (rt *retryableClient) Retry() error { + for _, q := range rt.queries { + if q == "begin" { + if err := rt.Begin(); err != nil { + return err + } + continue + } + if _, err := rt.DBClient.ExecuteFetch(q, 10000); err != nil { + return err + } + } + return nil +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/tablet_picker_test.go b/go/vt/vttablet/tabletmanager/vreplication/tablet_picker_test.go index 3bbec75cb5..e3e4daf5e7 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/tablet_picker_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/tablet_picker_test.go @@ -17,6 +17,7 @@ limitations under the License. package vreplication import ( + "fmt" "testing" "github.com/golang/protobuf/proto" @@ -26,11 +27,10 @@ import ( ) func TestPickSimple(t *testing.T) { - ts := createTopo() - defer ts.Close() - want := addTablet(ts, 100, "0", topodatapb.TabletType_REPLICA, true, true) + want := addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true) + defer deleteTablet(want) - tp, err := newTabletPicker(ts, testCell, testKeyspace, testShard, "replica") + tp, err := newTabletPicker(env.TopoServ, env.Cells[0], env.KeyspaceName, env.ShardName, "replica") if err != nil { t.Fatal(err) } @@ -46,12 +46,12 @@ func TestPickSimple(t *testing.T) { } func TestPickFromTwoHealthy(t *testing.T) { - ts := createTopo() - defer ts.Close() - want1 := addTablet(ts, 100, "0", topodatapb.TabletType_REPLICA, true, true) - want2 := addTablet(ts, 101, "0", topodatapb.TabletType_RDONLY, true, true) + want1 := addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true) + defer deleteTablet(want1) + want2 := addTablet(101, "0", topodatapb.TabletType_RDONLY, true, true) + defer deleteTablet(want2) - tp, err := newTabletPicker(ts, testCell, testKeyspace, testShard, "replica,rdonly") + tp, err := newTabletPicker(env.TopoServ, env.Cells[0], env.KeyspaceName, env.ShardName, "replica,rdonly") if err != nil { t.Fatal(err) } @@ -65,7 +65,7 @@ func TestPickFromTwoHealthy(t *testing.T) { t.Errorf("Pick:\n%v, want\n%v", tablet, want1) } - tp, err = newTabletPicker(ts, testCell, testKeyspace, testShard, "rdonly,replica") + tp, err = newTabletPicker(env.TopoServ, env.Cells[0], env.KeyspaceName, env.ShardName, "rdonly,replica") if err != nil { t.Fatal(err) } @@ -81,12 +81,11 @@ func TestPickFromTwoHealthy(t *testing.T) { } func TestPickFromSomeUnhealthy(t *testing.T) { - ts := createTopo() - defer ts.Close() - _ = addTablet(ts, 100, "0", topodatapb.TabletType_REPLICA, false, false) - want := addTablet(ts, 101, "0", topodatapb.TabletType_RDONLY, false, true) + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, false, false)) + want := addTablet(101, "0", topodatapb.TabletType_RDONLY, false, true) + defer deleteTablet(want) - tp, err := newTabletPicker(ts, testCell, testKeyspace, testShard, "replica,rdonly") + tp, err := newTabletPicker(env.TopoServ, env.Cells[0], env.KeyspaceName, env.ShardName, "replica,rdonly") if err != nil { t.Fatal(err) } @@ -102,24 +101,22 @@ func TestPickFromSomeUnhealthy(t *testing.T) { } func TestPickError(t *testing.T) { - ts := createTopo() - defer ts.Close() - _ = addTablet(ts, 100, "0", topodatapb.TabletType_REPLICA, false, false) + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, false, false)) - _, err := newTabletPicker(ts, testCell, testKeyspace, testShard, "badtype") + _, err := newTabletPicker(env.TopoServ, env.Cells[0], env.KeyspaceName, env.ShardName, "badtype") want := "failed to parse list of tablet types: badtype" if err == nil || err.Error() != want { t.Errorf("newTabletPicker err: %v, want %v", err, want) } - tp, err := newTabletPicker(ts, testCell, testKeyspace, testShard, "replica,rdonly") + tp, err := newTabletPicker(env.TopoServ, env.Cells[0], env.KeyspaceName, env.ShardName, "replica,rdonly") if err != nil { t.Fatal(err) } defer tp.Close() _, err = tp.Pick(context.Background()) - want = "can't find any healthy source tablet for ks 0 [REPLICA RDONLY]" + want = fmt.Sprintf("can't find any healthy source tablet for %s 0 [REPLICA RDONLY]", env.KeyspaceName) if err == nil || err.Error() != want { t.Errorf("Pick err: %v, want %v", err, want) } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go new file mode 100644 index 0000000000..fd4bc4746e --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go @@ -0,0 +1,641 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "errors" + "fmt" + "io" + "time" + + "golang.org/x/net/context" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/grpcclient" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tabletconn" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +var ( + idleTimeout = 1 * time.Second + dbLockRetryDelay = 1 * time.Second + relayLogMaxSize = 10000 + relayLogMaxItems = 1000 +) + +type vplayer struct { + id uint32 + source *binlogdatapb.BinlogSource + sourceTablet *topodatapb.Tablet + stats *binlogplayer.Stats + dbClient *retryableClient + // mysqld is used to fetch the local schema. + mysqld mysqlctl.MysqlDaemon + + pos mysql.Position + // unsavedGTID when we receive a GTID event and reset + // if it gets saved. If Fetch returns on idleTimeout, + // we save the last unsavedGTID. + unsavedGTID *binlogdatapb.VEvent + // timeLastSaved is set every time a GTID is saved. + timeLastSaved time.Time + stopPos mysql.Position + + // pplan is built based on the source Filter at the beginning. + pplan *PlayerPlan + // tplans[table] is built for each table based on pplan and schema info + // about the table. + tplans map[string]*TablePlan +} + +func newVPlayer(id uint32, source *binlogdatapb.BinlogSource, sourceTablet *topodatapb.Tablet, stats *binlogplayer.Stats, dbClient binlogplayer.DBClient, mysqld mysqlctl.MysqlDaemon) *vplayer { + return &vplayer{ + id: id, + source: source, + sourceTablet: sourceTablet, + stats: stats, + dbClient: &retryableClient{DBClient: dbClient}, + mysqld: mysqld, + timeLastSaved: time.Now(), + tplans: make(map[string]*TablePlan), + } +} + +func (vp *vplayer) Play(ctx context.Context) error { + if err := vp.setState(binlogplayer.BlpRunning, ""); err != nil { + return err + } + if err := vp.play(ctx); err != nil { + msg := err.Error() + vp.stats.History.Add(&binlogplayer.StatsHistoryRecord{ + Time: time.Now(), + Message: msg, + }) + if err := vp.setState(binlogplayer.BlpError, msg); err != nil { + return err + } + return err + } + return nil +} + +func (vp *vplayer) play(ctx context.Context) error { + startPos, stopPos, _, _, err := binlogplayer.ReadVRSettings(vp.dbClient, vp.id) + if err != nil { + return vp.setState(binlogplayer.BlpStopped, fmt.Sprintf("error reading VReplication settings: %v", err)) + } + vp.pos, err = mysql.DecodePosition(startPos) + if err != nil { + return vp.setState(binlogplayer.BlpStopped, fmt.Sprintf("error decoding start position %v: %v", startPos, err)) + } + if stopPos != "" { + vp.stopPos, err = mysql.DecodePosition(stopPos) + if err != nil { + return vp.setState(binlogplayer.BlpStopped, fmt.Sprintf("error decoding stop position %v: %v", stopPos, err)) + } + } + if !vp.stopPos.IsZero() { + if vp.pos.AtLeast(vp.stopPos) { + return vp.setState(binlogplayer.BlpStopped, fmt.Sprintf("Stop position %v already reached: %v", vp.pos, vp.stopPos)) + } + } + log.Infof("Starting VReplication player id: %v, startPos: %v, stop: %v, source: %v, filter: %v", vp.id, startPos, vp.stopPos, vp.sourceTablet, vp.source) + + plan, err := buildPlayerPlan(vp.source.Filter) + if err != nil { + return err + } + vp.pplan = plan + + vsClient, err := tabletconn.GetDialer()(vp.sourceTablet, grpcclient.FailFast(false)) + if err != nil { + return fmt.Errorf("error dialing tablet: %v", err) + } + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + relay := newRelayLog(ctx, relayLogMaxItems, relayLogMaxSize) + + target := &querypb.Target{ + Keyspace: vp.sourceTablet.Keyspace, + Shard: vp.sourceTablet.Shard, + TabletType: vp.sourceTablet.Type, + } + log.Infof("Sending vstream command: %v", plan.VStreamFilter) + streamErr := make(chan error, 1) + go func() { + streamErr <- vsClient.VStream(ctx, target, startPos, plan.VStreamFilter, func(events []*binlogdatapb.VEvent) error { + return relay.Send(events) + }) + }() + + applyErr := make(chan error, 1) + go func() { + applyErr <- vp.applyEvents(ctx, relay) + }() + + select { + case err := <-applyErr: + defer func() { + // cancel and wait for the other thread to finish. + cancel() + <-streamErr + }() + // If the apply thread ends with io.EOF, it means either the Engine + // is shutting down and canceled the context, or stop position was reached. + // If so, we return nil which will cause the controller to not retry. + if err == io.EOF { + return nil + } + return err + case err := <-streamErr: + defer func() { + // cancel and wait for the other thread to finish. + cancel() + <-applyErr + }() + // If context is done, don't return an error. + select { + case <-ctx.Done(): + return nil + default: + } + // If the stream ends normally we have to return an error indicating + // that the controller has to retry a different vttablet. + if err == nil || err == io.EOF { + return errors.New("vstream ended") + } + return err + } +} + +func (vp *vplayer) applyEvents(ctx context.Context, relay *relayLog) error { + for { + items, err := relay.Fetch() + if err != nil { + return err + } + // Filtered replication often ends up receiving a large number of empty transactions. + // This is required because the player needs to know the latest position of the source. + // This allows it to stop at that position if requested. + // This position also needs to be saved, which will allow an external request + // to check if a required position has been reached. + // However, this leads to a large number of empty commits which not only slow + // down the replay, but also generate binlog bloat on the target. + // In order to mitigate this problem, empty transactions are saved at most + // once every idleTimeout. + // This covers two situations: + // 1. Fetch was idle for idleTimeout. + // 2. We've been receiving empty events for longer than idleTimeout. + // In both cases, now > timeLastSaved. If so, any unsaved GTID should be saved. + if time.Now().Sub(vp.timeLastSaved) >= idleTimeout && vp.unsavedGTID != nil { + // Although unlikely, we should not save if a transaction is still open. + // This can happen if a large transaction is split as multiple events. + if !vp.dbClient.InTransaction { + if err := vp.updatePos(vp.unsavedGTID.Timestamp); err != nil { + return err + } + } + } + for i, events := range items { + for j, event := range events { + mustSave := false + switch event.Type { + case binlogdatapb.VEventType_COMMIT: + if vp.pos.Equal(vp.stopPos) { + mustSave = true + break + } + if hasAnotherCommit(items, i, j+1) { + continue + } + } + if err := vp.applyEvent(ctx, event, mustSave); err != nil { + return err + } + } + } + } +} + +func hasAnotherCommit(items [][]*binlogdatapb.VEvent, i, j int) bool { + for i < len(items) { + for j < len(items[i]) { + // We ignore GTID, BEGIN, FIELD and ROW. + switch items[i][j].Type { + case binlogdatapb.VEventType_COMMIT: + return true + case binlogdatapb.VEventType_DDL: + return false + } + j++ + } + j = 0 + i++ + } + return false +} + +func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, mustSave bool) error { + switch event.Type { + case binlogdatapb.VEventType_GTID: + pos, err := mysql.DecodePosition(event.Gtid) + if err != nil { + return err + } + vp.pos = pos + vp.unsavedGTID = event + if vp.stopPos.IsZero() { + return nil + } + if !vp.pos.Equal(vp.stopPos) && vp.pos.AtLeast(vp.stopPos) { + // Code is unreachable, but bad data can cause this to happen. + if err := vp.setState(binlogplayer.BlpStopped, fmt.Sprintf("next event position %v exceeds stop pos %v, exiting without applying", vp.pos, vp.stopPos)); err != nil { + return err + } + return io.EOF + } + case binlogdatapb.VEventType_BEGIN: + // No-op: begin is called as needed. + case binlogdatapb.VEventType_COMMIT: + if mustSave { + if err := vp.dbClient.Begin(); err != nil { + return err + } + } + + if !vp.dbClient.InTransaction { + return nil + } + if err := vp.updatePos(event.Timestamp); err != nil { + return err + } + posReached := !vp.stopPos.IsZero() && vp.pos.Equal(vp.stopPos) + if posReached { + if err := vp.setState(binlogplayer.BlpStopped, fmt.Sprintf("Stopped at position %v", vp.stopPos)); err != nil { + return err + } + } + if err := vp.dbClient.Commit(); err != nil { + return err + } + if posReached { + return io.EOF + } + case binlogdatapb.VEventType_FIELD: + if err := vp.dbClient.Begin(); err != nil { + return err + } + if err := vp.updatePlan(event.FieldEvent); err != nil { + return err + } + case binlogdatapb.VEventType_ROW: + if err := vp.dbClient.Begin(); err != nil { + return err + } + if err := vp.applyRowEvent(ctx, event.RowEvent); err != nil { + return err + } + case binlogdatapb.VEventType_DDL: + if vp.dbClient.InTransaction { + return fmt.Errorf("unexpected state: DDL encountered in the middle of a transaction: %v", event.Ddl) + } + switch vp.source.OnDdl { + case binlogdatapb.OnDDLAction_IGNORE: + // no-op + case binlogdatapb.OnDDLAction_STOP: + if err := vp.dbClient.Begin(); err != nil { + return err + } + if err := vp.updatePos(event.Timestamp); err != nil { + return err + } + if err := vp.setState(binlogplayer.BlpStopped, fmt.Sprintf("Stopped at DDL %s", event.Ddl)); err != nil { + return err + } + if err := vp.dbClient.Commit(); err != nil { + return err + } + return io.EOF + case binlogdatapb.OnDDLAction_EXEC: + if err := vp.exec(ctx, event.Ddl); err != nil { + return err + } + if err := vp.updatePos(event.Timestamp); err != nil { + return err + } + case binlogdatapb.OnDDLAction_EXEC_IGNORE: + if err := vp.exec(ctx, event.Ddl); err != nil { + log.Infof("Ignoring error: %v for DDL: %s", err, event.Ddl) + } + if err := vp.updatePos(event.Timestamp); err != nil { + return err + } + } + } + return nil +} + +func (vp *vplayer) setState(state, message string) error { + return binlogplayer.SetVReplicationState(vp.dbClient, vp.id, state, message) +} + +func (vp *vplayer) updatePlan(fieldEvent *binlogdatapb.FieldEvent) error { + prelim := vp.pplan.TablePlans[fieldEvent.TableName] + tplan := &TablePlan{ + Name: fieldEvent.TableName, + } + if prelim != nil { + *tplan = *prelim + } + tplan.Fields = fieldEvent.Fields + + if tplan.ColExprs == nil { + tplan.ColExprs = make([]*ColExpr, len(tplan.Fields)) + for i, field := range tplan.Fields { + tplan.ColExprs[i] = &ColExpr{ + ColName: sqlparser.NewColIdent(field.Name), + ColNum: i, + } + } + } else { + for _, cExpr := range tplan.ColExprs { + if cExpr.ColNum >= len(tplan.Fields) { + // Unreachable code. + return fmt.Errorf("columns received from vreplication: %v, do not match expected: %v", tplan.Fields, tplan.ColExprs) + } + } + } + + pkcols, err := vp.mysqld.GetPrimaryKeyColumns(vp.dbClient.DBName(), tplan.Name) + if err != nil { + return fmt.Errorf("error fetching pk columns for %s: %v", tplan.Name, err) + } + if len(pkcols) == 0 { + // If the table doesn't have a PK, then we treat all columns as PK. + pkcols, err = vp.mysqld.GetColumns(vp.dbClient.DBName(), tplan.Name) + if err != nil { + return fmt.Errorf("error fetching pk columns for %s: %v", tplan.Name, err) + } + } + for _, pkcol := range pkcols { + found := false + for i, cExpr := range tplan.ColExprs { + if cExpr.ColName.EqualString(pkcol) { + found = true + tplan.PKCols = append(tplan.PKCols, &ColExpr{ + ColName: cExpr.ColName, + ColNum: i, + }) + break + } + } + if !found { + return fmt.Errorf("primary key column %s missing from select list for table %s", pkcol, tplan.Name) + } + } + vp.tplans[fieldEvent.TableName] = tplan + return nil +} + +func (vp *vplayer) applyRowEvent(ctx context.Context, rowEvent *binlogdatapb.RowEvent) error { + tplan := vp.tplans[rowEvent.TableName] + if tplan == nil { + return fmt.Errorf("unexpected event on table %s", rowEvent.TableName) + } + for _, change := range rowEvent.RowChanges { + if err := vp.applyRowChange(ctx, tplan, change); err != nil { + return err + } + } + return nil +} + +func (vp *vplayer) applyRowChange(ctx context.Context, tplan *TablePlan, rowChange *binlogdatapb.RowChange) error { + // MakeRowTrusted is needed here because because Proto3ToResult is not convenient. + var before, after []sqltypes.Value + if rowChange.Before != nil { + before = sqltypes.MakeRowTrusted(tplan.Fields, rowChange.Before) + } + if rowChange.After != nil { + after = sqltypes.MakeRowTrusted(tplan.Fields, rowChange.After) + } + var query string + switch { + case before == nil && after != nil: + query = vp.generateInsert(tplan, after) + case before != nil && after != nil: + query = vp.generateUpdate(tplan, before, after) + case before != nil && after == nil: + query = vp.generateDelete(tplan, before) + case before == nil && after == nil: + // unreachable + } + if query == "" { + return nil + } + return vp.exec(ctx, query) +} + +func (vp *vplayer) generateInsert(tplan *TablePlan, after []sqltypes.Value) string { + sql := sqlparser.NewTrackedBuffer(nil) + if tplan.OnInsert == InsertIgnore { + sql.Myprintf("insert ignore into %v set ", sqlparser.NewTableIdent(tplan.Name)) + } else { + sql.Myprintf("insert into %v set ", sqlparser.NewTableIdent(tplan.Name)) + } + vp.writeInsertValues(sql, tplan, after) + if tplan.OnInsert == InsertOndup { + sql.Myprintf(" on duplicate key update ") + _ = vp.writeUpdateValues(sql, tplan, nil, after) + } + return sql.String() +} + +func (vp *vplayer) generateUpdate(tplan *TablePlan, before, after []sqltypes.Value) string { + if tplan.OnInsert == InsertIgnore { + return vp.generateInsert(tplan, after) + } + sql := sqlparser.NewTrackedBuffer(nil) + sql.Myprintf("update %v set ", sqlparser.NewTableIdent(tplan.Name)) + if ok := vp.writeUpdateValues(sql, tplan, before, after); !ok { + return "" + } + sql.Myprintf(" where ") + vp.writeWhereValues(sql, tplan, before) + return sql.String() +} + +func (vp *vplayer) generateDelete(tplan *TablePlan, before []sqltypes.Value) string { + sql := sqlparser.NewTrackedBuffer(nil) + switch tplan.OnInsert { + case InsertOndup: + return vp.generateUpdate(tplan, before, nil) + case InsertIgnore: + return "" + default: // insertNormal + sql.Myprintf("delete from %v where ", sqlparser.NewTableIdent(tplan.Name)) + vp.writeWhereValues(sql, tplan, before) + } + return sql.String() +} + +func (vp *vplayer) writeInsertValues(sql *sqlparser.TrackedBuffer, tplan *TablePlan, after []sqltypes.Value) { + separator := "" + for _, cExpr := range tplan.ColExprs { + sql.Myprintf("%s%v=", separator, cExpr.ColName) + separator = ", " + if cExpr.Operation == OpCount { + sql.WriteString("1") + } else { + if cExpr.Operation == OpSum && after[cExpr.ColNum].IsNull() { + sql.WriteString("0") + } else { + encodeValue(sql, after[cExpr.ColNum]) + } + } + } +} + +// writeUpdateValues returns true if at least one value was set. Otherwise, it returns false. +func (vp *vplayer) writeUpdateValues(sql *sqlparser.TrackedBuffer, tplan *TablePlan, before, after []sqltypes.Value) bool { + separator := "" + hasSet := false + for _, cExpr := range tplan.ColExprs { + if cExpr.IsGrouped { + continue + } + if len(before) != 0 && len(after) != 0 { + if cExpr.Operation == OpCount { + continue + } + bef := before[cExpr.ColNum] + aft := after[cExpr.ColNum] + // If both are null, there's no change + if bef.IsNull() && aft.IsNull() { + continue + } + // If any one of them is null, something has changed. + if bef.IsNull() || aft.IsNull() { + goto mustSet + } + // Compare content only if none are null. + if bef.ToString() == aft.ToString() { + continue + } + } + mustSet: + sql.Myprintf("%s%v=", separator, cExpr.ColName) + separator = ", " + hasSet = true + if cExpr.Operation == OpCount || cExpr.Operation == OpSum { + sql.Myprintf("%v", cExpr.ColName) + } + if len(before) != 0 { + switch cExpr.Operation { + case OpNone: + if len(after) == 0 { + sql.WriteString("NULL") + } + case OpCount: + sql.WriteString("-1") + case OpSum: + if !before[cExpr.ColNum].IsNull() { + sql.WriteString("-") + encodeValue(sql, before[cExpr.ColNum]) + } + } + } + if len(after) != 0 { + switch cExpr.Operation { + case OpNone: + encodeValue(sql, after[cExpr.ColNum]) + case OpCount: + sql.WriteString("+1") + case OpSum: + if !after[cExpr.ColNum].IsNull() { + sql.WriteString("+") + encodeValue(sql, after[cExpr.ColNum]) + } + } + } + } + return hasSet +} + +func (vp *vplayer) writeWhereValues(sql *sqlparser.TrackedBuffer, tplan *TablePlan, before []sqltypes.Value) { + separator := "" + for _, cExpr := range tplan.PKCols { + sql.Myprintf("%s%v=", separator, cExpr.ColName) + separator = " and " + encodeValue(sql, before[cExpr.ColNum]) + } +} + +func (vp *vplayer) updatePos(ts int64) error { + updatePos := binlogplayer.GenerateUpdatePos(vp.id, vp.pos, time.Now().Unix(), ts) + if _, err := vp.dbClient.ExecuteFetch(updatePos, 0); err != nil { + vp.dbClient.Rollback() + return fmt.Errorf("error %v updating position", err) + } + vp.unsavedGTID = nil + vp.timeLastSaved = time.Now() + return nil +} + +func (vp *vplayer) exec(ctx context.Context, sql string) error { + vp.stats.Timings.Record("query", time.Now()) + _, err := vp.dbClient.ExecuteFetch(sql, 0) + for err != nil { + // 1213: deadlock, 1205: lock wait timeout + if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == 1213 || sqlErr.Number() == 1205 { + log.Infof("retryable error: %v, waiting for %v and retrying", sqlErr, dbLockRetryDelay) + if err := vp.dbClient.Rollback(); err != nil { + return err + } + time.Sleep(dbLockRetryDelay) + // Check context here. Otherwise this can become an infinite loop. + select { + case <-ctx.Done(): + return io.EOF + default: + } + err = vp.dbClient.Retry() + continue + } + return err + } + return nil +} + +func encodeValue(sql *sqlparser.TrackedBuffer, value sqltypes.Value) { + // This is currently a separate function because special handling + // may be needed for certain types. + // Previously, this function used to convert timestamp to the session + // time zone, but we now set the session timezone to UTC. So, the timestamp + // value we receive as UTC can be sent as is. + // TODO(sougou): handle BIT data type here? + value.EncodeSQL(sql) +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer_test.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer_test.go new file mode 100644 index 0000000000..1996015d82 --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer_test.go @@ -0,0 +1,1270 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "flag" + "fmt" + "strings" + "testing" + "time" + + "golang.org/x/net/context" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +func TestPlayerFilters(t *testing.T) { + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + + execStatements(t, []string{ + "create table src1(id int, val varbinary(128), primary key(id))", + fmt.Sprintf("create table %s.dst1(id int, val varbinary(128), primary key(id))", vrepldb), + "create table src2(id int, val1 int, val2 int, primary key(id))", + fmt.Sprintf("create table %s.dst2(id int, val1 int, sval2 int, rcount int, primary key(id))", vrepldb), + "create table src3(id int, val varbinary(128), primary key(id))", + fmt.Sprintf("create table %s.dst3(id int, val varbinary(128), primary key(id))", vrepldb), + "create table yes(id int, val varbinary(128), primary key(id))", + fmt.Sprintf("create table %s.yes(id int, val varbinary(128), primary key(id))", vrepldb), + "create table no(id int, val varbinary(128), primary key(id))", + "create table nopk(id int, val varbinary(128))", + fmt.Sprintf("create table %s.nopk(id int, val varbinary(128))", vrepldb), + }) + defer execStatements(t, []string{ + "drop table src1", + fmt.Sprintf("drop table %s.dst1", vrepldb), + "drop table src2", + fmt.Sprintf("drop table %s.dst2", vrepldb), + "drop table src3", + fmt.Sprintf("drop table %s.dst3", vrepldb), + "drop table yes", + fmt.Sprintf("drop table %s.yes", vrepldb), + "drop table no", + "drop table nopk", + fmt.Sprintf("drop table %s.nopk", vrepldb), + }) + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "dst1", + Filter: "select * from src1", + }, { + Match: "dst2", + Filter: "select id, val1, sum(val2) as sval2, count(*) as rcount from src2 group by id", + }, { + Match: "dst3", + Filter: "select id, val from src3 group by id, val", + }, { + Match: "/yes", + }, { + Match: "/nopk", + }}, + } + cancel, _ := startVReplication(t, filter, binlogdatapb.OnDDLAction_IGNORE, "") + defer cancel() + + testcases := []struct { + input string + output []string + table string + data [][]string + }{{ + // insert with insertNormal + input: "insert into src1 values(1, 'aaa')", + output: []string{ + "begin", + "insert into dst1 set id=1, val='aaa'", + "/update _vt.vreplication set pos=", + "commit", + }, + table: "dst1", + data: [][]string{ + {"1", "aaa"}, + }, + }, { + // update with insertNormal + input: "update src1 set val='bbb'", + output: []string{ + "begin", + "update dst1 set val='bbb' where id=1", + "/update _vt.vreplication set pos=", + "commit", + }, + table: "dst1", + data: [][]string{ + {"1", "bbb"}, + }, + }, { + // delete with insertNormal + input: "delete from src1 where id=1", + output: []string{ + "begin", + "delete from dst1 where id=1", + "/update _vt.vreplication set pos=", + "commit", + }, + table: "dst1", + data: [][]string{}, + }, { + // insert with insertOnDup + input: "insert into src2 values(1, 2, 3)", + output: []string{ + "begin", + "insert into dst2 set id=1, val1=2, sval2=3, rcount=1 on duplicate key update val1=2, sval2=sval2+3, rcount=rcount+1", + "/update _vt.vreplication set pos=", + "commit", + }, + table: "dst2", + data: [][]string{ + {"1", "2", "3", "1"}, + }, + }, { + // update with insertOnDup + input: "update src2 set val1=5, val2=1 where id=1", + output: []string{ + "begin", + "update dst2 set val1=5, sval2=sval2-3+1 where id=1", + "/update _vt.vreplication set pos=", + "commit", + }, + table: "dst2", + data: [][]string{ + {"1", "5", "1", "1"}, + }, + }, { + // delete with insertOnDup + input: "delete from src2 where id=1", + output: []string{ + "begin", + "update dst2 set val1=NULL, sval2=sval2-1, rcount=rcount-1 where id=1", + "/update _vt.vreplication set pos=", + "commit", + }, + table: "dst2", + data: [][]string{ + {"1", "", "0", "0"}, + }, + }, { + // insert with insertIgnore + input: "insert into src3 values(1, 'aaa')", + output: []string{ + "begin", + "insert ignore into dst3 set id=1, val='aaa'", + "/update _vt.vreplication set pos=", + "commit", + }, + table: "dst3", + data: [][]string{ + {"1", "aaa"}, + }, + }, { + // update with insertIgnore + input: "update src3 set val='bbb'", + output: []string{ + "begin", + "insert ignore into dst3 set id=1, val='bbb'", + "/update _vt.vreplication set pos=", + "commit", + }, + table: "dst3", + data: [][]string{ + {"1", "aaa"}, + }, + }, { + // delete with insertIgnore + input: "delete from src3 where id=1", + output: []string{ + "begin", + "/update _vt.vreplication set pos=", + "commit", + }, + table: "dst3", + data: [][]string{ + {"1", "aaa"}, + }, + }, { + // insert: regular expression filter + input: "insert into yes values(1, 'aaa')", + output: []string{ + "begin", + "insert into yes set id=1, val='aaa'", + "/update _vt.vreplication set pos=", + "commit", + }, + table: "yes", + data: [][]string{ + {"1", "aaa"}, + }, + }, { + // update: regular expression filter + input: "update yes set val='bbb'", + output: []string{ + "begin", + "update yes set val='bbb' where id=1", + "/update _vt.vreplication set pos=", + "commit", + }, + table: "yes", + data: [][]string{ + {"1", "bbb"}, + }, + }, { + // table should not match a rule + input: "insert into no values(1, 'aaa')", + output: []string{}, + }, { + // nopk: insert + input: "insert into nopk values(1, 'aaa')", + output: []string{ + "begin", + "insert into nopk set id=1, val='aaa'", + "/update _vt.vreplication set pos=", + "commit", + }, + table: "nopk", + data: [][]string{ + {"1", "aaa"}, + }, + }, { + // nopk: update + input: "update nopk set val='bbb' where id=1", + output: []string{ + "begin", + "update nopk set val='bbb' where id=1 and val='aaa'", + "/update _vt.vreplication set pos=", + "commit", + }, + table: "nopk", + data: [][]string{ + {"1", "bbb"}, + }, + }, { + // nopk: delete + input: "delete from nopk where id=1", + output: []string{ + "begin", + "delete from nopk where id=1 and val='bbb'", + "/update _vt.vreplication set pos=", + "commit", + }, + table: "nopk", + data: [][]string{}, + }} + + for _, tcases := range testcases { + execStatements(t, []string{tcases.input}) + expectDBClientQueries(t, tcases.output) + if tcases.table != "" { + expectData(t, tcases.table, tcases.data) + } + } +} + +func TestPlayerUpdates(t *testing.T) { + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + + execStatements(t, []string{ + "create table t1(id int, grouped int, ungrouped int, summed int, primary key(id))", + fmt.Sprintf("create table %s.t1(id int, grouped int, ungrouped int, summed int, rcount int, primary key(id))", vrepldb), + }) + defer execStatements(t, []string{ + "drop table t1", + fmt.Sprintf("drop table %s.t1", vrepldb), + }) + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select id, grouped, ungrouped, sum(summed) as summed, count(*) as rcount from t1 group by id, grouped", + }}, + } + cancel, _ := startVReplication(t, filter, binlogdatapb.OnDDLAction_IGNORE, "") + defer cancel() + + testcases := []struct { + input string + output string + table string + data [][]string + }{{ + // Start with all nulls + input: "insert into t1 values(1, null, null, null)", + output: "insert into t1 set id=1, grouped=null, ungrouped=null, summed=0, rcount=1 on duplicate key update ungrouped=null, summed=summed, rcount=rcount+1", + table: "t1", + data: [][]string{ + {"1", "", "", "0", "1"}, + }, + }, { + // null to null values + input: "update t1 set grouped=1 where id=1", + output: "", + table: "t1", + data: [][]string{ + {"1", "", "", "0", "1"}, + }, + }, { + // null to non-null values + input: "update t1 set ungrouped=1, summed=1 where id=1", + output: "update t1 set ungrouped=1, summed=summed+1 where id=1", + table: "t1", + data: [][]string{ + {"1", "", "1", "1", "1"}, + }, + }, { + // non-null to non-null values + input: "update t1 set ungrouped=2, summed=2 where id=1", + output: "update t1 set ungrouped=2, summed=summed-1+2 where id=1", + table: "t1", + data: [][]string{ + {"1", "", "2", "2", "1"}, + }, + }, { + // non-null to null values + input: "update t1 set ungrouped=null, summed=null where id=1", + output: "update t1 set ungrouped=null, summed=summed-2 where id=1", + table: "t1", + data: [][]string{ + {"1", "", "", "0", "1"}, + }, + }, { + // insert non-null values + input: "insert into t1 values(2, 2, 3, 4)", + output: "insert into t1 set id=2, grouped=2, ungrouped=3, summed=4, rcount=1 on duplicate key update ungrouped=3, summed=summed+4, rcount=rcount+1", + table: "t1", + data: [][]string{ + {"1", "", "", "0", "1"}, + {"2", "2", "3", "4", "1"}, + }, + }, { + // delete non-null values + input: "delete from t1 where id=2", + output: "update t1 set ungrouped=NULL, summed=summed-4, rcount=rcount-1 where id=2", + table: "t1", + data: [][]string{ + {"1", "", "", "0", "1"}, + {"2", "2", "", "0", "0"}, + }, + }} + + for _, tcases := range testcases { + execStatements(t, []string{tcases.input}) + output := []string{ + "begin", + tcases.output, + "/update _vt.vreplication set pos=", + "commit", + } + if tcases.output == "" { + output = []string{ + "begin", + "/update _vt.vreplication set pos=", + "commit", + } + } + expectDBClientQueries(t, output) + if tcases.table != "" { + expectData(t, tcases.table, tcases.data) + } + } +} + +func TestPlayerTypes(t *testing.T) { + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + + execStatements(t, []string{ + "create table vitess_ints(tiny tinyint, tinyu tinyint unsigned, small smallint, smallu smallint unsigned, medium mediumint, mediumu mediumint unsigned, normal int, normalu int unsigned, big bigint, bigu bigint unsigned, y year, primary key(tiny))", + fmt.Sprintf("create table %s.vitess_ints(tiny tinyint, tinyu tinyint unsigned, small smallint, smallu smallint unsigned, medium mediumint, mediumu mediumint unsigned, normal int, normalu int unsigned, big bigint, bigu bigint unsigned, y year, primary key(tiny))", vrepldb), + "create table vitess_fracts(id int, deci decimal(5,2), num numeric(5,2), f float, d double, primary key(id))", + fmt.Sprintf("create table %s.vitess_fracts(id int, deci decimal(5,2), num numeric(5,2), f float, d double, primary key(id))", vrepldb), + "create table vitess_strings(vb varbinary(16), c char(16), vc varchar(16), b binary(4), tb tinyblob, bl blob, ttx tinytext, tx text, en enum('a','b'), s set('a','b'), primary key(vb))", + fmt.Sprintf("create table %s.vitess_strings(vb varbinary(16), c char(16), vc varchar(16), b binary(4), tb tinyblob, bl blob, ttx tinytext, tx text, en enum('a','b'), s set('a','b'), primary key(vb))", vrepldb), + "create table vitess_misc(id int, b bit(8), d date, dt datetime, t time, g geometry, primary key(id))", + fmt.Sprintf("create table %s.vitess_misc(id int, b bit(8), d date, dt datetime, t time, g geometry, primary key(id))", vrepldb), + "create table vitess_null(id int, val varbinary(128), primary key(id))", + fmt.Sprintf("create table %s.vitess_null(id int, val varbinary(128), primary key(id))", vrepldb), + "create table src1(id int, val varbinary(128), primary key(id))", + fmt.Sprintf("create table %s.src1(id int, val varbinary(128), primary key(id))", vrepldb), + "create table binary_pk(b binary(4), val varbinary(4), primary key(b))", + fmt.Sprintf("create table %s.binary_pk(b binary(4), val varbinary(4), primary key(b))", vrepldb), + }) + defer execStatements(t, []string{ + "drop table vitess_ints", + fmt.Sprintf("drop table %s.vitess_ints", vrepldb), + "drop table vitess_fracts", + fmt.Sprintf("drop table %s.vitess_fracts", vrepldb), + "drop table vitess_strings", + fmt.Sprintf("drop table %s.vitess_strings", vrepldb), + "drop table vitess_misc", + fmt.Sprintf("drop table %s.vitess_misc", vrepldb), + "drop table vitess_null", + fmt.Sprintf("drop table %s.vitess_null", vrepldb), + "drop table binary_pk", + fmt.Sprintf("drop table %s.binary_pk", vrepldb), + }) + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + }}, + } + cancel, _ := startVReplication(t, filter, binlogdatapb.OnDDLAction_IGNORE, "") + defer cancel() + testcases := []struct { + input string + output string + table string + data [][]string + }{{ + input: "insert into vitess_ints values(-128, 255, -32768, 65535, -8388608, 16777215, -2147483648, 4294967295, -9223372036854775808, 18446744073709551615, 2012)", + output: "insert into vitess_ints set tiny=-128, tinyu=255, small=-32768, smallu=65535, medium=-8388608, mediumu=16777215, normal=-2147483648, normalu=4294967295, big=-9223372036854775808, bigu=18446744073709551615, y=2012", + table: "vitess_ints", + data: [][]string{ + {"-128", "255", "-32768", "65535", "-8388608", "16777215", "-2147483648", "4294967295", "-9223372036854775808", "18446744073709551615", "2012"}, + }, + }, { + input: "insert into vitess_fracts values(1, 1.99, 2.99, 3.99, 4.99)", + output: "insert into vitess_fracts set id=1, deci=1.99, num=2.99, f=3.99E+00, d=4.99E+00", + table: "vitess_fracts", + data: [][]string{ + {"1", "1.99", "2.99", "3.99", "4.99"}, + }, + }, { + input: "insert into vitess_strings values('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'a', 'a,b')", + output: "insert into vitess_strings set vb='a', c='b', vc='c', b='d\\0\\0\\0', tb='e', bl='f', ttx='g', tx='h', en='1', s='3'", + table: "vitess_strings", + data: [][]string{ + {"a", "b", "c", "d\x00\x00\x00", "e", "f", "g", "h", "a", "a,b"}, + }, + }, { + input: "insert into vitess_misc values(1, '\x01', '2012-01-01', '2012-01-01 15:45:45', '15:45:45', point(1, 2))", + output: "insert into vitess_misc set id=1, b=b'00000001', d='2012-01-01', dt='2012-01-01 15:45:45', t='15:45:45', g='\\0\\0\\0\\0\x01\x01\\0\\0\\0\\0\\0\\0\\0\\0\\0\xf0?\\0\\0\\0\\0\\0\\0\\0@'", + table: "vitess_misc", + data: [][]string{ + {"1", "\x01", "2012-01-01", "2012-01-01 15:45:45", "15:45:45", "\x00\x00\x00\x00\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@"}, + }, + }, { + input: "insert into vitess_null values(1, null)", + output: "insert into vitess_null set id=1, val=null", + table: "vitess_null", + data: [][]string{ + {"1", ""}, + }, + }, { + input: "insert into binary_pk values('a', 'aaa')", + output: "insert into binary_pk set b='a\\0\\0\\0', val='aaa'", + table: "binary_pk", + data: [][]string{ + {"a\x00\x00\x00", "aaa"}, + }, + }, { + // Binary pk is a special case: https://github.com/vitessio/vitess/issues/3984 + input: "update binary_pk set val='bbb' where b='a\\0\\0\\0'", + output: "update binary_pk set val='bbb' where b='a\\0\\0\\0'", + table: "binary_pk", + data: [][]string{ + {"a\x00\x00\x00", "bbb"}, + }, + }} + + for _, tcases := range testcases { + execStatements(t, []string{tcases.input}) + want := []string{ + "begin", + tcases.output, + "/update _vt.vreplication set pos=", + "commit", + } + expectDBClientQueries(t, want) + if tcases.table != "" { + expectData(t, tcases.table, tcases.data) + } + } +} + +func TestPlayerDDL(t *testing.T) { + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + execStatements(t, []string{ + "create table dummy(id int, primary key(id))", + fmt.Sprintf("create table %s.dummy(id int, primary key(id))", vrepldb), + }) + defer execStatements(t, []string{ + "drop table dummy", + fmt.Sprintf("drop table %s.dummy", vrepldb), + }) + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + }}, + } + + cancel, _ := startVReplication(t, filter, binlogdatapb.OnDDLAction_IGNORE, "") + // Issue a dummy change to ensure vreplication is initialized. Otherwise there + // is a race between the DDLs and the schema loader of vstreamer. + // Root cause seems to be with MySQL where t1 shows up in information_schema before + // the actual table is created. + execStatements(t, []string{"insert into dummy values(1)"}) + expectDBClientQueries(t, []string{ + "begin", + "insert into dummy set id=1", + "/update _vt.vreplication set pos=", + "commit", + }) + + execStatements(t, []string{"create table t1(id int, primary key(id))"}) + execStatements(t, []string{"drop table t1"}) + expectDBClientQueries(t, []string{}) + cancel() + + cancel, id := startVReplication(t, filter, binlogdatapb.OnDDLAction_STOP, "") + execStatements(t, []string{"create table t1(id int, primary key(id))"}) + pos1 := masterPosition(t) + execStatements(t, []string{"drop table t1"}) + pos2 := masterPosition(t) + // The stop position must be the GTID of the first DDL + expectDBClientQueries(t, []string{ + "begin", + fmt.Sprintf("/update _vt.vreplication set pos='%s'", pos1), + "/update _vt.vreplication set state='Stopped'", + "commit", + }) + // Restart vreplication + if _, err := playerEngine.Exec(fmt.Sprintf(`update _vt.vreplication set state = 'Running', message='' where id=%d`, id)); err != nil { + t.Fatal(err) + } + // It should stop at the next DDL + expectDBClientQueries(t, []string{ + "/update.*'Running'", + "/update.*'Running'", + "begin", + fmt.Sprintf("/update.*'%s'", pos2), + "/update _vt.vreplication set state='Stopped'", + "commit", + }) + cancel() + + execStatements(t, []string{fmt.Sprintf("create table %s.t2(id int, primary key(id))", vrepldb)}) + cancel, _ = startVReplication(t, filter, binlogdatapb.OnDDLAction_EXEC, "") + execStatements(t, []string{"create table t1(id int, primary key(id))"}) + expectDBClientQueries(t, []string{ + "create table t1(id int, primary key(id))", + "/update _vt.vreplication set pos=", + }) + execStatements(t, []string{"create table t2(id int, primary key(id))"}) + expectDBClientQueries(t, []string{ + "create table t2(id int, primary key(id))", + "/update _vt.vreplication set state='Error'", + }) + cancel() + + // Don't test drop. + // MySQL rewrites them by uppercasing, which may be version specific. + execStatements(t, []string{ + "drop table t1", + fmt.Sprintf("drop table %s.t1", vrepldb), + "drop table t2", + fmt.Sprintf("drop table %s.t2", vrepldb), + }) + + execStatements(t, []string{fmt.Sprintf("create table %s.t2(id int, primary key(id))", vrepldb)}) + cancel, _ = startVReplication(t, filter, binlogdatapb.OnDDLAction_EXEC_IGNORE, "") + execStatements(t, []string{"create table t1(id int, primary key(id))"}) + expectDBClientQueries(t, []string{ + "create table t1(id int, primary key(id))", + "/update _vt.vreplication set pos=", + }) + execStatements(t, []string{"create table t2(id int, primary key(id))"}) + expectDBClientQueries(t, []string{ + "create table t2(id int, primary key(id))", + "/update _vt.vreplication set pos=", + }) + cancel() + + execStatements(t, []string{ + "drop table t1", + fmt.Sprintf("drop table %s.t1", vrepldb), + "drop table t2", + fmt.Sprintf("drop table %s.t2", vrepldb), + }) +} + +func TestPlayerStopPos(t *testing.T) { + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + + execStatements(t, []string{ + "create table yes(id int, val varbinary(128), primary key(id))", + fmt.Sprintf("create table %s.yes(id int, val varbinary(128), primary key(id))", vrepldb), + "create table no(id int, val varbinary(128), primary key(id))", + }) + defer execStatements(t, []string{ + "drop table yes", + fmt.Sprintf("drop table %s.yes", vrepldb), + "drop table no", + }) + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/yes", + }}, + } + bls := &binlogdatapb.BinlogSource{ + Keyspace: env.KeyspaceName, + Shard: env.ShardName, + Filter: filter, + OnDdl: binlogdatapb.OnDDLAction_IGNORE, + } + startPos := masterPosition(t) + query := binlogplayer.CreateVReplicationStopped("test", bls, startPos) + qr, err := playerEngine.Exec(query) + if err != nil { + t.Fatal(err) + } + id := uint32(qr.InsertID) + for q := range globalDBQueries { + if strings.HasPrefix(q, "insert into _vt.vreplication") { + break + } + } + + // Test normal stop. + execStatements(t, []string{ + "insert into yes values(1, 'aaa')", + }) + stopPos := masterPosition(t) + query = binlogplayer.StartVReplicationUntil(id, stopPos) + if _, err := playerEngine.Exec(query); err != nil { + t.Fatal(err) + } + expectDBClientQueries(t, []string{ + "/update.*'Running'", // done by Engine + "/update.*'Running'", // done by vplayer on start + "begin", + "insert into yes set id=1, val='aaa'", + fmt.Sprintf("/update.*'%s'", stopPos), + "/update.*'Stopped'", + "commit", + }) + + // Test stopping at empty transaction. + execStatements(t, []string{ + "insert into no values(2, 'aaa')", + "insert into no values(3, 'aaa')", + }) + stopPos = masterPosition(t) + execStatements(t, []string{ + "insert into no values(4, 'aaa')", + }) + query = binlogplayer.StartVReplicationUntil(id, stopPos) + if _, err := playerEngine.Exec(query); err != nil { + t.Fatal(err) + } + expectDBClientQueries(t, []string{ + "/update.*'Running'", // done by Engine + "/update.*'Running'", // done by vplayer on start + "begin", + // Since 'no' generates empty transactions that are skipped by + // vplayer, a commit is done only for the stop position event. + fmt.Sprintf("/update.*'%s'", stopPos), + "/update.*'Stopped'", + "commit", + }) + + // Test stopping when position is already reached. + query = binlogplayer.StartVReplicationUntil(id, stopPos) + if _, err := playerEngine.Exec(query); err != nil { + t.Fatal(err) + } + expectDBClientQueries(t, []string{ + "/update.*'Running'", // done by Engine + "/update.*'Running'", // done by vplayer on start + "/update.*'Stopped'.*already reached", + }) +} + +func TestPlayerIdleUpdate(t *testing.T) { + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + + savedIdleTimeout := idleTimeout + defer func() { idleTimeout = savedIdleTimeout }() + idleTimeout = 100 * time.Millisecond + + execStatements(t, []string{ + "create table t1(id int, val varbinary(128), primary key(id))", + fmt.Sprintf("create table %s.t1(id int, val varbinary(128), primary key(id))", vrepldb), + }) + defer execStatements(t, []string{ + "drop table t1", + fmt.Sprintf("drop table %s.t1", vrepldb), + }) + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + }}, + } + cancel, _ := startVReplication(t, filter, binlogdatapb.OnDDLAction_IGNORE, "") + defer cancel() + + execStatements(t, []string{ + "insert into t1 values(1, 'aaa')", + }) + start := time.Now() + expectDBClientQueries(t, []string{ + "begin", + "insert into t1 set id=1, val='aaa'", + "/update _vt.vreplication set pos=", + "commit", + }) + // The above write will generate a new binlog event, and + // that event will loopback into player as an empty event. + // But it must not get saved until idleTimeout has passed. + // The exact positions are hard to verify because of this + // loopback mechanism. + expectDBClientQueries(t, []string{ + "/update _vt.vreplication set pos=", + }) + if duration := time.Now().Sub(start); duration < idleTimeout { + t.Errorf("duration: %v, must be at least %v", duration, idleTimeout) + } +} + +func TestPlayerSplitTransaction(t *testing.T) { + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + flag.Set("vstream_packet_size", "10") + defer flag.Set("vstream_packet_size", "10000") + + execStatements(t, []string{ + "create table t1(id int, val varbinary(128), primary key(id))", + fmt.Sprintf("create table %s.t1(id int, val varbinary(128), primary key(id))", vrepldb), + }) + defer execStatements(t, []string{ + "drop table t1", + fmt.Sprintf("drop table %s.t1", vrepldb), + }) + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + }}, + } + cancel, _ := startVReplication(t, filter, binlogdatapb.OnDDLAction_IGNORE, "") + defer cancel() + + execStatements(t, []string{ + "begin", + "insert into t1 values(1, '123456')", + "insert into t1 values(2, '789012')", + "commit", + }) + // Because the packet size is 10, this is received as two events, + // but still combined as one transaction. + expectDBClientQueries(t, []string{ + "begin", + "insert into t1 set id=1, val='123456'", + "insert into t1 set id=2, val='789012'", + "/update _vt.vreplication set pos=", + "commit", + }) +} + +func TestPlayerLockErrors(t *testing.T) { + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + + execStatements(t, []string{ + "create table t1(id int, val varbinary(128), primary key(id))", + fmt.Sprintf("create table %s.t1(id int, val varbinary(128), primary key(id))", vrepldb), + }) + defer execStatements(t, []string{ + "drop table t1", + fmt.Sprintf("drop table %s.t1", vrepldb), + }) + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + }}, + } + cancel, _ := startVReplication(t, filter, binlogdatapb.OnDDLAction_IGNORE, "") + defer cancel() + + execStatements(t, []string{ + "begin", + "insert into t1 values(1, 'aaa')", + "insert into t1 values(2, 'bbb')", + "commit", + }) + expectDBClientQueries(t, []string{ + "begin", + "insert into t1 set id=1, val='aaa'", + "insert into t1 set id=2, val='bbb'", + "/update _vt.vreplication set pos=", + "commit", + }) + + vconn := &realDBClient{nolog: true} + if err := vconn.Connect(); err != nil { + t.Error(err) + } + defer vconn.Close() + + // Start a transaction and lock the second row. + if _, err := vconn.ExecuteFetch("begin", 1); err != nil { + t.Error(err) + } + if _, err := vconn.ExecuteFetch("update t1 set val='bbb' where id=2", 1); err != nil { + t.Error(err) + } + + execStatements(t, []string{ + "begin", + "update t1 set val='ccc' where id=1", + "update t1 set val='ccc' where id=2", + "commit", + }) + // The innodb lock wait timeout is set to 1s. + expectDBClientQueries(t, []string{ + "begin", + "update t1 set val='ccc' where id=1", + "update t1 set val='ccc' where id=2", + "rollback", + }) + + // Release the lock, and watch the retry go through. + _, _ = vconn.ExecuteFetch("rollback", 1) + expectDBClientQueries(t, []string{ + "begin", + "update t1 set val='ccc' where id=1", + "update t1 set val='ccc' where id=2", + "/update _vt.vreplication set pos=", + "commit", + }) +} + +func TestPlayerCancelOnLock(t *testing.T) { + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + + execStatements(t, []string{ + "create table t1(id int, val varbinary(128), primary key(id))", + fmt.Sprintf("create table %s.t1(id int, val varbinary(128), primary key(id))", vrepldb), + }) + defer execStatements(t, []string{ + "drop table t1", + fmt.Sprintf("drop table %s.t1", vrepldb), + }) + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + }}, + } + cancel, _ := startVReplication(t, filter, binlogdatapb.OnDDLAction_IGNORE, "") + defer cancel() + + execStatements(t, []string{ + "begin", + "insert into t1 values(1, 'aaa')", + "commit", + }) + expectDBClientQueries(t, []string{ + "begin", + "insert into t1 set id=1, val='aaa'", + "/update _vt.vreplication set pos=", + "commit", + }) + + vconn := &realDBClient{nolog: true} + if err := vconn.Connect(); err != nil { + t.Error(err) + } + defer vconn.Close() + + // Start a transaction and lock the row. + if _, err := vconn.ExecuteFetch("begin", 1); err != nil { + t.Error(err) + } + if _, err := vconn.ExecuteFetch("update t1 set val='bbb' where id=1", 1); err != nil { + t.Error(err) + } + + execStatements(t, []string{ + "begin", + "update t1 set val='ccc' where id=1", + "commit", + }) + // The innodb lock wait timeout is set to 1s. + expectDBClientQueries(t, []string{ + "begin", + "update t1 set val='ccc' where id=1", + "rollback", + }) + + // VReplication should not get stuck if you cancel now. + done := make(chan bool) + go func() { + cancel() + close(done) + }() + select { + case <-done: + case <-time.After(5 * time.Second): + t.Error("cancel is hung") + } +} + +func TestPlayerBatching(t *testing.T) { + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + + execStatements(t, []string{ + "create table t1(id int, val varbinary(128), primary key(id))", + fmt.Sprintf("create table %s.t1(id int, val varbinary(128), primary key(id))", vrepldb), + }) + defer execStatements(t, []string{ + "drop table t1", + fmt.Sprintf("drop table %s.t1", vrepldb), + }) + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + }}, + } + cancel, _ := startVReplication(t, filter, binlogdatapb.OnDDLAction_EXEC, "") + defer cancel() + + execStatements(t, []string{ + "insert into t1 values(1, 'aaa')", + }) + expectDBClientQueries(t, []string{ + "begin", + "insert into t1 set id=1, val='aaa'", + "/update _vt.vreplication set pos=", + "commit", + }) + + vconn := &realDBClient{nolog: true} + if err := vconn.Connect(); err != nil { + t.Error(err) + } + defer vconn.Close() + + // Start a transaction and lock the row. + if _, err := vconn.ExecuteFetch("begin", 1); err != nil { + t.Error(err) + } + if _, err := vconn.ExecuteFetch("update t1 set val='bbb' where id=1", 1); err != nil { + t.Error(err) + } + + // create one transaction + execStatements(t, []string{ + "update t1 set val='ccc' where id=1", + }) + // Wait for the begin. The update will be blocked. + expectDBClientQueries(t, []string{ + "begin", + }) + + // Create two more transactions. They will go and wait in the relayLog. + execStatements(t, []string{ + "insert into t1 values(2, 'aaa')", + "insert into t1 values(3, 'aaa')", + "create table t2(id int, val varbinary(128), primary key(id))", + "drop table t2", + }) + + // Release the lock. + _, _ = vconn.ExecuteFetch("rollback", 1) + // First transaction will complete. The other two + // transactions must be batched into one. But the + // DDLs should be on their own. + expectDBClientQueries(t, []string{ + "update t1 set val='ccc' where id=1", + "/update _vt.vreplication set pos=", + "commit", + "begin", + "insert into t1 set id=2, val='aaa'", + "insert into t1 set id=3, val='aaa'", + "/update _vt.vreplication set pos=", + "commit", + "create table t2(id int, val varbinary(128), primary key(id))", + "/update _vt.vreplication set pos=", + "/", // drop table is rewritten by mysql. Don't check. + "/update _vt.vreplication set pos=", + }) +} + +func TestPlayerRelayLogMaxSize(t *testing.T) { + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + + for i := 0; i < 2; i++ { + // First iteration checks max size, second checks max items + func() { + switch i { + case 0: + savedSize := relayLogMaxSize + defer func() { relayLogMaxSize = savedSize }() + relayLogMaxSize = 10 + case 1: + savedLen := relayLogMaxItems + defer func() { relayLogMaxItems = savedLen }() + relayLogMaxItems = 2 + } + + execStatements(t, []string{ + "create table t1(id int, val varbinary(128), primary key(id))", + fmt.Sprintf("create table %s.t1(id int, val varbinary(128), primary key(id))", vrepldb), + }) + defer execStatements(t, []string{ + "drop table t1", + fmt.Sprintf("drop table %s.t1", vrepldb), + }) + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + }}, + } + cancel, _ := startVReplication(t, filter, binlogdatapb.OnDDLAction_IGNORE, "") + defer cancel() + + execStatements(t, []string{ + "insert into t1 values(1, '123456')", + }) + expectDBClientQueries(t, []string{ + "begin", + "insert into t1 set id=1, val='123456'", + "/update _vt.vreplication set pos=", + "commit", + }) + + vconn := &realDBClient{nolog: true} + if err := vconn.Connect(); err != nil { + t.Error(err) + } + defer vconn.Close() + + // Start a transaction and lock the row. + if _, err := vconn.ExecuteFetch("begin", 1); err != nil { + t.Error(err) + } + if _, err := vconn.ExecuteFetch("update t1 set val='bbb' where id=1", 1); err != nil { + t.Error(err) + } + + // create one transaction + execStatements(t, []string{ + "update t1 set val='ccc' where id=1", + }) + // Wait for the begin. The update will be blocked. + expectDBClientQueries(t, []string{ + "begin", + }) + + // Create two more transactions. They will go and wait in the relayLog. + execStatements(t, []string{ + "insert into t1 values(2, '789012')", + "insert into t1 values(3, '345678')", + "insert into t1 values(4, '901234')", + }) + + // Release the lock. + _, _ = vconn.ExecuteFetch("rollback", 1) + // First transaction will complete. The other two + // transactions must be batched into one. The last transaction + // will wait to be sent to the relay until the player fetches + // them. + expectDBClientQueries(t, []string{ + "update t1 set val='ccc' where id=1", + "/update _vt.vreplication set pos=", + "commit", + "begin", + "insert into t1 set id=2, val='789012'", + "insert into t1 set id=3, val='345678'", + "/update _vt.vreplication set pos=", + "commit", + "begin", + "insert into t1 set id=4, val='901234'", + "/update _vt.vreplication set pos=", + "commit", + }) + }() + } +} + +func TestRestartOnVStreamEnd(t *testing.T) { + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + + savedDelay := *retryDelay + defer func() { *retryDelay = savedDelay }() + *retryDelay = 1 * time.Millisecond + + execStatements(t, []string{ + "create table t1(id int, val varbinary(128), primary key(id))", + fmt.Sprintf("create table %s.t1(id int, val varbinary(128), primary key(id))", vrepldb), + }) + defer execStatements(t, []string{ + "drop table t1", + fmt.Sprintf("drop table %s.t1", vrepldb), + }) + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + }}, + } + cancel, _ := startVReplication(t, filter, binlogdatapb.OnDDLAction_IGNORE, "") + defer cancel() + + execStatements(t, []string{ + "insert into t1 values(1, 'aaa')", + }) + expectDBClientQueries(t, []string{ + "begin", + "insert into t1 set id=1, val='aaa'", + "/update _vt.vreplication set pos=", + "commit", + }) + + streamerEngine.Close() + expectDBClientQueries(t, []string{ + "/update.*'Error'.*vstream ended", + }) + if err := streamerEngine.Open(env.KeyspaceName, env.ShardName); err != nil { + t.Fatal(err) + } + + execStatements(t, []string{ + "insert into t1 values(2, 'aaa')", + }) + expectDBClientQueries(t, []string{ + "/update.*'Running'", + "begin", + "insert into t1 set id=2, val='aaa'", + "/update _vt.vreplication set pos=", + "commit", + }) +} + +func TestTimestamp(t *testing.T) { + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + + execStatements(t, []string{ + "create table t1(id int, ts timestamp, dt datetime)", + fmt.Sprintf("create table %s.t1(id int, ts timestamp, dt datetime)", vrepldb), + }) + defer execStatements(t, []string{ + "drop table t1", + fmt.Sprintf("drop table %s.t1", vrepldb), + }) + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + }}, + } + cancel, _ := startVReplication(t, filter, binlogdatapb.OnDDLAction_IGNORE, "") + defer cancel() + + qr, err := env.Mysqld.FetchSuperQuery(context.Background(), "select now()") + if err != nil { + t.Fatal(err) + } + want := qr.Rows[0][0].ToString() + t.Logf("want: %s", want) + + execStatements(t, []string{ + fmt.Sprintf("insert into t1 values(1, '%s', '%s')", want, want), + }) + expectDBClientQueries(t, []string{ + "begin", + // The insert value for ts will be in UTC. + // We'll check the row instead. + "/insert into t1 set id=", + "/update _vt.vreplication set pos=", + "commit", + }) + + expectData(t, "t1", [][]string{{"1", want, want}}) +} + +func execStatements(t *testing.T, queries []string) { + t.Helper() + if err := env.Mysqld.ExecuteSuperQueryList(context.Background(), queries); err != nil { + t.Error(err) + } +} + +func startVReplication(t *testing.T, filter *binlogdatapb.Filter, onddl binlogdatapb.OnDDLAction, pos string) (cancelFunc func(), id int) { + t.Helper() + + bls := &binlogdatapb.BinlogSource{ + Keyspace: env.KeyspaceName, + Shard: env.ShardName, + Filter: filter, + OnDdl: onddl, + } + if pos == "" { + pos = masterPosition(t) + } + query := binlogplayer.CreateVReplication("test", bls, pos, 9223372036854775807, 9223372036854775807, 0) + qr, err := playerEngine.Exec(query) + if err != nil { + t.Fatal(err) + } + // Eat all the initialization queries + for q := range globalDBQueries { + if strings.HasPrefix(q, "update") { + break + } + } + return func() { + t.Helper() + query := fmt.Sprintf("delete from _vt.vreplication where id = %d", qr.InsertID) + if _, err := playerEngine.Exec(query); err != nil { + t.Fatal(err) + } + expectDBClientQueries(t, []string{ + "/delete", + }) + }, int(qr.InsertID) +} + +func masterPosition(t *testing.T) string { + t.Helper() + pos, err := env.Mysqld.MasterPosition() + if err != nil { + t.Fatal(err) + } + return mysql.EncodePosition(pos) +} diff --git a/go/vt/vttablet/tabletserver/connpool/dbconn.go b/go/vt/vttablet/tabletserver/connpool/dbconn.go index 8c020f143a..abeef3e68b 100644 --- a/go/vt/vttablet/tabletserver/connpool/dbconn.go +++ b/go/vt/vttablet/tabletserver/connpool/dbconn.go @@ -37,7 +37,7 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" ) -// BinlogFormat is used for for specifying the binlog format. +// BinlogFormat is used for specifying the binlog format. type BinlogFormat int // The following constants specify the possible binlog format values. @@ -231,8 +231,8 @@ var ( ) // VerifyMode is a helper method to verify mysql is running with -// sql_mode = STRICT_TRANS_TABLES and autocommit=ON. It also returns -// the current binlog format. +// sql_mode = STRICT_TRANS_TABLES or STRICT_ALL_TABLES and autocommit=ON. +// It also returns the current binlog format. func (dbc *DBConn) VerifyMode(strictTransTables bool) (BinlogFormat, error) { if strictTransTables { qr, err := dbc.conn.ExecuteFetch(getModeSQL, 2, false) @@ -242,8 +242,9 @@ func (dbc *DBConn) VerifyMode(strictTransTables bool) (BinlogFormat, error) { if len(qr.Rows) != 1 { return 0, fmt.Errorf("incorrect rowcount received for %s: %d", getModeSQL, len(qr.Rows)) } - if !strings.Contains(qr.Rows[0][0].ToString(), "STRICT_TRANS_TABLES") { - return 0, fmt.Errorf("require sql_mode to be STRICT_TRANS_TABLES: got '%s'", qr.Rows[0][0].ToString()) + sqlMode := qr.Rows[0][0].ToString() + if !(strings.Contains(sqlMode, "STRICT_TRANS_TABLES") || strings.Contains(sqlMode, "STRICT_ALL_TABLES")) { + return 0, fmt.Errorf("require sql_mode to be STRICT_TRANS_TABLES or STRICT_ALL_TABLES: got '%s'", qr.Rows[0][0].ToString()) } } qr, err := dbc.conn.ExecuteFetch(getAutocommit, 2, false) diff --git a/go/vt/vttablet/tabletserver/connpool/dbconn_test.go b/go/vt/vttablet/tabletserver/connpool/dbconn_test.go index ba7f25a2f2..594f2d80d3 100644 --- a/go/vt/vttablet/tabletserver/connpool/dbconn_test.go +++ b/go/vt/vttablet/tabletserver/connpool/dbconn_test.go @@ -102,7 +102,7 @@ func TestDBConnExec(t *testing.T) { startCounts = tabletenv.MySQLStats.Counts() - // Set the connection fail flag and and try again. + // Set the connection fail flag and try again. // This time the initial query fails as does the reconnect attempt. db.EnableConnFail() _, err = dbConn.Exec(ctx, sql, 1, false) diff --git a/go/vt/vttablet/tabletserver/connpool/pool.go b/go/vt/vttablet/tabletserver/connpool/pool.go index 7c96b1dd64..02272c1aae 100644 --- a/go/vt/vttablet/tabletserver/connpool/pool.go +++ b/go/vt/vttablet/tabletserver/connpool/pool.go @@ -277,9 +277,9 @@ func (cp *Pool) IdleClosed() int64 { } func (cp *Pool) isCallerIDAppDebug(ctx context.Context) bool { - callerID := callerid.ImmediateCallerIDFromContext(ctx) - if cp.appDebugParams.Uname == "" { + if cp.appDebugParams == nil || cp.appDebugParams.Uname == "" { return false } + callerID := callerid.ImmediateCallerIDFromContext(ctx) return callerID != nil && callerID.Username == cp.appDebugParams.Uname } diff --git a/go/vt/vttablet/tabletserver/planbuilder/dml.go b/go/vt/vttablet/tabletserver/planbuilder/dml.go index 13544efe65..c1cadcc4d7 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/dml.go +++ b/go/vt/vttablet/tabletserver/planbuilder/dml.go @@ -202,6 +202,14 @@ func analyzeSelect(sel *sqlparser.Select, tables map[string]*schema.Table) (plan return nil, err } + if sel.Where != nil { + comp, ok := sel.Where.Expr.(*sqlparser.ComparisonExpr) + if ok && comp.IsImpossible() { + plan.PlanID = PlanSelectImpossible + return plan, nil + } + } + // Check if it's a NEXT VALUE statement. if nextVal, ok := sel.SelectExprs[0].(sqlparser.Nextval); ok { if table.Type != schema.Sequence { @@ -466,7 +474,14 @@ func analyzeInsertMessage(ins *sqlparser.Insert, plan *Plan, table *schema.Table return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "subquery not allowed for message table: %s", table.Name.String()) } if ins.OnDup != nil { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "'on duplicate key' construct not allowed for message table: %s", table.Name.String()) + // only allow 'on duplicate key' where time_scheduled and id are not referenced + ts := sqlparser.NewColIdent("time_scheduled") + id := sqlparser.NewColIdent("id") + for _, updateExpr := range ins.OnDup { + if updateExpr.Name.Name.Equal(ts) || updateExpr.Name.Name.Equal(id) { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "'on duplicate key' cannot reference time_scheduled or id for message table: %s", table.Name.String()) + } + } } if len(ins.Columns) == 0 { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "column list must be specified for message table insert: %s", table.Name.String()) diff --git a/go/vt/vttablet/tabletserver/planbuilder/plan.go b/go/vt/vttablet/tabletserver/planbuilder/plan.go index bbeecd963e..befeae05e4 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/plan.go +++ b/go/vt/vttablet/tabletserver/planbuilder/plan.go @@ -83,6 +83,8 @@ const ( PlanOtherAdmin // PlanMessageStream is used for streaming messages. PlanMessageStream + // PlanSelectImpossible is used for where or having clauses that can never be true. + PlanSelectImpossible // NumPlans stores the total number of plans NumPlans ) @@ -105,6 +107,7 @@ var planName = [NumPlans]string{ "OTHER_READ", "OTHER_ADMIN", "MESSAGE_STREAM", + "SELECT_IMPOSSIBLE", } func (pt PlanType) String() string { @@ -126,7 +129,7 @@ func PlanByName(s string) (pt PlanType, ok bool) { // IsSelect returns true if PlanType is about a select query. func (pt PlanType) IsSelect() bool { - return pt == PlanPassSelect || pt == PlanSelectLock + return pt == PlanPassSelect || pt == PlanSelectLock || pt == PlanSelectImpossible } // MarshalJSON returns a json string for PlanType. @@ -140,22 +143,23 @@ func (pt PlanType) MinRole() tableacl.Role { } var tableACLRoles = map[PlanType]tableacl.Role{ - PlanPassSelect: tableacl.READER, - PlanSelectLock: tableacl.READER, - PlanSet: tableacl.READER, - PlanPassDML: tableacl.WRITER, - PlanDMLPK: tableacl.WRITER, - PlanDMLSubquery: tableacl.WRITER, - PlanInsertPK: tableacl.WRITER, - PlanInsertSubquery: tableacl.WRITER, - PlanInsertMessage: tableacl.WRITER, - PlanDDL: tableacl.ADMIN, - PlanSelectStream: tableacl.READER, - PlanOtherRead: tableacl.READER, - PlanOtherAdmin: tableacl.ADMIN, - PlanUpsertPK: tableacl.WRITER, - PlanNextval: tableacl.WRITER, - PlanMessageStream: tableacl.WRITER, + PlanPassSelect: tableacl.READER, + PlanSelectLock: tableacl.READER, + PlanSet: tableacl.READER, + PlanPassDML: tableacl.WRITER, + PlanDMLPK: tableacl.WRITER, + PlanDMLSubquery: tableacl.WRITER, + PlanInsertPK: tableacl.WRITER, + PlanInsertSubquery: tableacl.WRITER, + PlanInsertMessage: tableacl.WRITER, + PlanDDL: tableacl.ADMIN, + PlanSelectStream: tableacl.READER, + PlanOtherRead: tableacl.READER, + PlanOtherAdmin: tableacl.ADMIN, + PlanUpsertPK: tableacl.WRITER, + PlanNextval: tableacl.WRITER, + PlanMessageStream: tableacl.WRITER, + PlanSelectImpossible: tableacl.READER, } //_______________________________________________ diff --git a/go/vt/vttablet/tabletserver/planbuilder/plan_test.go b/go/vt/vttablet/tabletserver/planbuilder/plan_test.go index f6f2e671a0..9f8b95debc 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/plan_test.go +++ b/go/vt/vttablet/tabletserver/planbuilder/plan_test.go @@ -25,13 +25,11 @@ import ( "io/ioutil" "log" "os" - "path" "path/filepath" "strings" "testing" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/testfiles" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/tableacl" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" @@ -112,7 +110,7 @@ func TestPlan(t *testing.T) { } func TestCustom(t *testing.T) { - testSchemas := testfiles.Glob("tabletserver/*_schema.json") + testSchemas, _ := filepath.Glob("testdata/*_schema.json") if len(testSchemas) == 0 { t.Log("No schemas to test") return @@ -325,8 +323,5 @@ func iterateExecFile(name string) (testCaseIterator chan testCase) { } func locateFile(name string) string { - if path.IsAbs(name) { - return name - } - return testfiles.Locate("tabletserver/" + name) + return "testdata/" + name } diff --git a/data/test/tabletserver/ddl_cases.txt b/go/vt/vttablet/tabletserver/planbuilder/testdata/ddl_cases.txt similarity index 100% rename from data/test/tabletserver/ddl_cases.txt rename to go/vt/vttablet/tabletserver/planbuilder/testdata/ddl_cases.txt diff --git a/data/test/tabletserver/exec_cases.txt b/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt similarity index 97% rename from data/test/tabletserver/exec_cases.txt rename to go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt index d4d1a296b5..9a60c082c7 100644 --- a/data/test/tabletserver/exec_cases.txt +++ b/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt @@ -1015,13 +1015,42 @@ options:PassthroughDMLs ] } +# message multi-value upsert +"insert into msg(time_scheduled, id, message) values(1, 2, 'aa'), (3, 4, 'bb') on duplicate key update message = values(message)" +{ + "PlanID": "INSERT_MESSAGE", + "TableName": "msg", + "Permissions": [ + { + "TableName": "msg", + "Role": 1 + } + ], + "FullQuery": "insert into msg(time_scheduled, id, message) values (1, 2, 'aa'), (3, 4, 'bb') on duplicate key update message = values(message)", + "OuterQuery": "insert into msg(time_scheduled, id, message, time_next, time_created, epoch) values (1, 2, 'aa', 1, :#time_now, 0), (3, 4, 'bb', 3, :#time_now, 0) on duplicate key update message = values(message)", + "PKValues": [ + [ + 1, + 3 + ], + [ + 2, + 4 + ] + ] +} + # message insert subquery "insert into msg(time_scheduled, id, message) select * from a" "subquery not allowed for message table: msg" -# message insert upsert key -"insert into msg(time_scheduled, id, message) Values(1, 2, 'aa') on duplicate key update message='bb'" -"'on duplicate key' construct not allowed for message table: msg" +# message insert upsert time_scheduled +"insert into msg(time_scheduled, id, message) Values(1, 2, 'aa') on duplicate key update time_scheduled=123" +"'on duplicate key' cannot reference time_scheduled or id for message table: msg" + +# message insert upsert id +"insert into msg(time_scheduled, id, message) Values(1, 2, 'aa') on duplicate key update id=123" +"'on duplicate key' cannot reference time_scheduled or id for message table: msg" # message insert without column list "insert into msg values(1)" diff --git a/data/test/tabletserver/schema_test.json b/go/vt/vttablet/tabletserver/planbuilder/testdata/schema_test.json similarity index 100% rename from data/test/tabletserver/schema_test.json rename to go/vt/vttablet/tabletserver/planbuilder/testdata/schema_test.json diff --git a/data/test/tabletserver/stream_cases.txt b/go/vt/vttablet/tabletserver/planbuilder/testdata/stream_cases.txt similarity index 100% rename from data/test/tabletserver/stream_cases.txt rename to go/vt/vttablet/tabletserver/planbuilder/testdata/stream_cases.txt diff --git a/go/vt/vttablet/tabletserver/query_engine_test.go b/go/vt/vttablet/tabletserver/query_engine_test.go index 882cbe0dd5..739d823982 100644 --- a/go/vt/vttablet/tabletserver/query_engine_test.go +++ b/go/vt/vttablet/tabletserver/query_engine_test.go @@ -40,7 +40,7 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" ) -func TestStrictTransTables(t *testing.T) { +func TestStrictMode(t *testing.T) { db := fakesqldb.New(t) defer db.Close() for query, result := range schematest.Queries() { @@ -61,7 +61,7 @@ func TestStrictTransTables(t *testing.T) { } qe.Close() - // Check that we fail if STRICT_TRANS_TABLES is not set. + // Check that we fail if STRICT_TRANS_TABLES or STRICT_ALL_TABLES is not set. db.AddQuery( "select @@global.sql_mode", &sqltypes.Result{ @@ -72,7 +72,7 @@ func TestStrictTransTables(t *testing.T) { qe = NewQueryEngine(DummyChecker, schema.NewEngine(DummyChecker, config), config) qe.InitDBConfig(dbcfgs) err := qe.Open() - wantErr := "require sql_mode to be STRICT_TRANS_TABLES: got ''" + wantErr := "require sql_mode to be STRICT_TRANS_TABLES or STRICT_ALL_TABLES: got ''" if err == nil || err.Error() != wantErr { t.Errorf("Open: %v, want %s", err, wantErr) } diff --git a/go/vt/vttablet/tabletserver/query_executor.go b/go/vt/vttablet/tabletserver/query_executor.go index 847b8923e2..6e8747e952 100644 --- a/go/vt/vttablet/tabletserver/query_executor.go +++ b/go/vt/vttablet/tabletserver/query_executor.go @@ -106,6 +106,17 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { return qre.execDDL() case planbuilder.PlanNextval: return qre.execNextval() + case planbuilder.PlanSelectImpossible: + if qre.plan.Fields != nil { + return &sqltypes.Result{ + Fields: qre.plan.Fields, + RowsAffected: 0, + InsertID: 0, + Rows: nil, + Extras: nil, + }, nil + } + break } if qre.transactionID != 0 { @@ -137,7 +148,7 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { return qre.execUpsertPK(conn) case planbuilder.PlanSet: return qre.txFetch(conn, qre.plan.FullQuery, qre.bindVars, nil, nil, true, true) - case planbuilder.PlanPassSelect, planbuilder.PlanSelectLock: + case planbuilder.PlanPassSelect, planbuilder.PlanSelectLock, planbuilder.PlanSelectImpossible: return qre.execDirect(conn) default: // handled above: @@ -151,7 +162,7 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { } } else { switch qre.plan.PlanID { - case planbuilder.PlanPassSelect: + case planbuilder.PlanPassSelect, planbuilder.PlanSelectImpossible: return qre.execSelect() case planbuilder.PlanSelectLock: return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "%s disallowed outside transaction", qre.plan.PlanID.String()) @@ -209,11 +220,23 @@ func (qre *QueryExecutor) Stream(callback func(*sqltypes.Result) error) error { return err } - conn, err := qre.getStreamConn() - if err != nil { - return err + // if we have a transaction id, let's use the txPool for this query + var conn *connpool.DBConn + if qre.transactionID != 0 { + txConn, err := qre.tsv.te.txPool.Get(qre.transactionID, "for streaming query") + if err != nil { + return err + } + defer txConn.Recycle() + conn = txConn.DBConn + } else { + dbConn, err := qre.getStreamConn() + if err != nil { + return err + } + defer dbConn.Recycle() + conn = dbConn } - defer conn.Recycle() qd := NewQueryDetail(qre.logStats.Ctx, conn) qre.tsv.qe.streamQList.Add(qd) @@ -326,6 +349,11 @@ func (qre *QueryExecutor) checkPermissions() error { return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "disallowed due to rule: %s", desc) } + // Skip ACL check for queries against the dummy dual table + if qre.plan.TableName().String() == "dual" { + return nil + } + // Skip the ACL check if the connecting user is an exempted superuser. // Necessary to whitelist e.g. direct vtworker access. if qre.tsv.qe.exemptACL != nil && qre.tsv.qe.exemptACL.IsMember(&querypb.VTGateCallerID{Username: username}) { diff --git a/go/vt/vttablet/tabletserver/query_executor_test.go b/go/vt/vttablet/tabletserver/query_executor_test.go index 84d0d02f45..334c026a28 100644 --- a/go/vt/vttablet/tabletserver/query_executor_test.go +++ b/go/vt/vttablet/tabletserver/query_executor_test.go @@ -1242,6 +1242,31 @@ func TestQueryExecutorPlanPassSelect(t *testing.T) { } } +func TestQueryExecutorPlanSelectImpossible(t *testing.T) { + db := setUpQueryExecutorTest(t) + defer db.Close() + query := "select * from test_table where 1 != 1" + want := &sqltypes.Result{ + Fields: getTestTableFields(), + } + db.AddQuery(query, want) + db.AddQuery("select * from test_table where 1 != 1", &sqltypes.Result{ + Fields: getTestTableFields(), + }) + ctx := context.Background() + tsv := newTestTabletServer(ctx, noFlags, db) + qre := newTestQueryExecutor(ctx, tsv, query, 0) + defer tsv.StopService() + checkPlanID(t, planbuilder.PlanSelectImpossible, qre.plan.PlanID) + got, err := qre.Execute() + if err != nil { + t.Fatalf("qre.Execute() = %v, want nil", err) + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("got: %v, want: %v", got, want) + } +} + func TestQueryExecutorPlanPassSelectSqlSelectLimit(t *testing.T) { db := setUpQueryExecutorTest(t) defer db.Close() @@ -1706,6 +1731,53 @@ func TestQueryExecutorTableAclNoPermission(t *testing.T) { } } +func TestQueryExecutorTableAclDualTableExempt(t *testing.T) { + aclName := fmt.Sprintf("simpleacl-test-%d", rand.Int63()) + tableacl.Register(aclName, &simpleacl.Factory{}) + tableacl.SetDefaultACL(aclName) + db := setUpQueryExecutorTest(t) + defer db.Close() + + username := "Sleve McDichael" + callerID := &querypb.VTGateCallerID{ + Username: username, + } + ctx := callerid.NewContext(context.Background(), nil, callerID) + + config := &tableaclpb.Config{ + TableGroups: []*tableaclpb.TableGroupSpec{}, + } + + if err := tableacl.InitFromProto(config); err != nil { + t.Fatalf("unable to load tableacl config, error: %v", err) + } + + // enable Config.StrictTableAcl + tsv := newTestTabletServer(ctx, enableStrictTableACL, db) + query := "select * from test_table where 1 != 1" + qre := newTestQueryExecutor(ctx, tsv, query, 0) + defer tsv.StopService() + checkPlanID(t, planbuilder.PlanSelectImpossible, qre.plan.PlanID) + // query should fail because nobody has read access to test_table + _, err := qre.Execute() + if code := vterrors.Code(err); code != vtrpcpb.Code_PERMISSION_DENIED { + t.Fatalf("qre.Execute: %v, want %v", code, vtrpcpb.Code_PERMISSION_DENIED) + } + wanterr := "table acl error" + if !strings.Contains(err.Error(), wanterr) { + t.Fatalf("qre.Execute: %v, want %s", err, wanterr) + } + + // table acl should be ignored when querying against dual table + query = "select @@version_comment from dual limit 1" + ctx = callerid.NewContext(context.Background(), nil, callerID) + qre = newTestQueryExecutor(ctx, tsv, query, 0) + _, err = qre.Execute() + if err != nil { + t.Fatalf("qre.Execute: %v, want: nil", err) + } +} + func TestQueryExecutorTableAclExemptACL(t *testing.T) { aclName := fmt.Sprintf("simpleacl-test-%d", rand.Int63()) tableacl.Register(aclName, &simpleacl.Factory{}) @@ -2112,6 +2184,20 @@ func getQueryExecutorSupportedQueries(testTableHasMultipleUniqueKeys bool) map[s {sqltypes.NewVarBinary("0")}, }, }, + "select @@version_comment from dual where 1 != 1": { + Fields: []*querypb.Field{{ + Type: sqltypes.VarChar, + }}, + }, + "select @@version_comment from dual limit 1": { + Fields: []*querypb.Field{{ + Type: sqltypes.VarChar, + }}, + RowsAffected: 1, + Rows: [][]sqltypes.Value{ + {sqltypes.NewVarBinary("fakedb server")}, + }, + }, "show variables like 'binlog_format'": { Fields: []*querypb.Field{{ Type: sqltypes.VarChar, diff --git a/go/vt/vttablet/tabletserver/schema/engine.go b/go/vt/vttablet/tabletserver/schema/engine.go index e0f59421bd..630d117d94 100644 --- a/go/vt/vttablet/tabletserver/schema/engine.go +++ b/go/vt/vttablet/tabletserver/schema/engine.go @@ -85,6 +85,14 @@ func NewEngine(checker connpool.MySQLChecker, config tabletenv.TabletConfig) *En http.Handle("/debug/schema", se) http.HandleFunc("/schemaz", func(w http.ResponseWriter, r *http.Request) { + // Ensure schema engine is Open. If vttablet came up in a non_serving role, + // the schema engine may not have been initialized. + err := se.Open() + if err != nil { + w.Write([]byte(err.Error())) + return + } + schemazHandler(se.GetSchema(), w, r) }) }) @@ -483,6 +491,14 @@ func (se *Engine) ServeHTTP(response http.ResponseWriter, request *http.Request) } func (se *Engine) handleHTTPSchema(response http.ResponseWriter, request *http.Request) { + // Ensure schema engine is Open. If vttablet came up in a non_serving role, + // the schema engine may not have been initialized. + err := se.Open() + if err != nil { + response.Write([]byte(err.Error())) + return + } + response.Header().Set("Content-Type", "application/json; charset=utf-8") b, err := json.MarshalIndent(se.GetSchema(), "", " ") if err != nil { diff --git a/go/vt/vttablet/tabletserver/tabletenv/config.go b/go/vt/vttablet/tabletserver/tabletenv/config.go index cc6eac7df9..f0fa5e9d2e 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/config.go +++ b/go/vt/vttablet/tabletserver/tabletenv/config.go @@ -99,7 +99,7 @@ func init() { flag.BoolVar(&Config.HeartbeatEnable, "heartbeat_enable", DefaultQsConfig.HeartbeatEnable, "If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the table _vt.heartbeat. The result is used to inform the serving state of the vttablet via healthchecks.") flag.DurationVar(&Config.HeartbeatInterval, "heartbeat_interval", DefaultQsConfig.HeartbeatInterval, "How frequently to read and write replication heartbeat.") - flag.BoolVar(&Config.EnforceStrictTransTables, "enforce_strict_trans_tables", DefaultQsConfig.EnforceStrictTransTables, "If true, vttablet requires MySQL to run with STRICT_TRANS_TABLES on. It is recommended to not turn this flag off. Otherwise MySQL may alter your supplied values before saving them to the database.") + flag.BoolVar(&Config.EnforceStrictTransTables, "enforce_strict_trans_tables", DefaultQsConfig.EnforceStrictTransTables, "If true, vttablet requires MySQL to run with STRICT_TRANS_TABLES or STRICT_ALL_TABLES on. It is recommended to not turn this flag off. Otherwise MySQL may alter your supplied values before saving them to the database.") flag.BoolVar(&Config.EnableConsolidator, "enable-consolidator", DefaultQsConfig.EnableConsolidator, "This option enables the query consolidator.") } diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index e698969816..2b2a4ad6a9 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -46,6 +46,7 @@ import ( "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/tableacl" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vterrors" @@ -60,7 +61,9 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/txserializer" "vitess.io/vitess/go/vt/vttablet/tabletserver/txthrottler" + "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -155,7 +158,6 @@ type TabletServer struct { target querypb.Target alsoAllow []topodatapb.TabletType requests sync.WaitGroup - beginRequests sync.WaitGroup // The following variables should be initialized only once // before starting the tabletserver. @@ -166,10 +168,12 @@ type TabletServer struct { se *schema.Engine qe *QueryEngine te *TxEngine + teCtrl TxPoolController hw *heartbeat.Writer hr *heartbeat.Reader messager *messager.Engine watcher *ReplicationWatcher + vstreamer *vstreamer.Engine updateStreamList *binlog.StreamList // checkMySQLThrottler is used to throttle the number of @@ -208,7 +212,45 @@ func NewServer(topoServer *topo.Server, alias topodatapb.TabletAlias) *TabletSer return NewTabletServer(tabletenv.Config, topoServer, alias) } +// TxPoolController is how the tablet server interacts with the tx-pool. +// It is responsible for keeping it's own state - knowing when different types +// of transactions are allowed, and how to do state transitions. +type TxPoolController interface { + // Stop will stop accepting any new transactions. Transactions are immediately aborted. + Stop() error + + // Will start accepting all transactions. If transitioning from RO mode, transactions + // might need to be rolled back before new transactions can be accepts. + AcceptReadWrite() error + + // Will start accepting read-only transactions, but not full read and write transactions. + // If the engine is currently accepting full read and write transactions, they need to + // given a chance to clean up before they are forcefully rolled back. + AcceptReadOnly() error + + // InitDBConfig must be called before Init. + InitDBConfig(dbcfgs *dbconfigs.DBConfigs) + + // Init must be called once when vttablet starts for setting + // up the metadata tables. + Init() error + + // StopGently will change the state to NotServing but first wait for transactions to wrap up + StopGently() + + // Begin begins a transaction, and returns the associated transaction id. + // Subsequent statements can access the connection through the transaction id. + Begin(ctx context.Context, options *querypb.ExecuteOptions) (int64, error) + + // Commit commits the specified transaction. + Commit(ctx context.Context, transactionID int64, mc messageCommitter) error + + // Rollback rolls back the specified transaction. + Rollback(ctx context.Context, transactionID int64) error +} + var tsOnce sync.Once +var srvTopoServer srvtopo.Server // NewTabletServerWithNilTopoServer is typically used in tests that // don't need a topoServer member. @@ -233,6 +275,7 @@ func NewTabletServer(config tabletenv.TabletConfig, topoServer *topo.Server, ali tsv.se = schema.NewEngine(tsv, config) tsv.qe = NewQueryEngine(tsv, tsv.se, config) tsv.te = NewTxEngine(tsv, config) + tsv.teCtrl = tsv.te tsv.hw = heartbeat.NewWriter(tsv, alias, config) tsv.hr = heartbeat.NewReader(tsv, config) tsv.txThrottler = txthrottler.CreateTxThrottlerFromTabletConfig(topoServer) @@ -243,6 +286,7 @@ func NewTabletServer(config tabletenv.TabletConfig, topoServer *topo.Server, ali // So that vtcombo doesn't even call it once, on the first tablet. // And we can remove the tsOnce variable. tsOnce.Do(func() { + srvTopoServer = srvtopo.NewResilientServer(topoServer, "TabletSrvTopo") stats.NewGaugeFunc("TabletState", "Tablet server state", func() int64 { tsv.mu.Lock() state := tsv.state @@ -260,6 +304,8 @@ func NewTabletServer(config tabletenv.TabletConfig, topoServer *topo.Server, ali stats.NewGaugeDurationFunc("QueryPoolTimeout", "Tablet server timeout to get a connection from the query pool", tsv.qe.connTimeout.Get) stats.NewGaugeDurationFunc("BeginTimeout", "Tablet server begin timeout", tsv.BeginTimeout.Get) }) + // TODO(sougou): move this up once the stats naming problem is fixed. + tsv.vstreamer = vstreamer.NewEngine(srvTopoServer, tsv.se) return tsv } @@ -343,11 +389,12 @@ func (tsv *TabletServer) InitDBConfig(target querypb.Target, dbcfgs *dbconfigs.D tsv.se.InitDBConfig(tsv.dbconfigs) tsv.qe.InitDBConfig(tsv.dbconfigs) - tsv.te.InitDBConfig(tsv.dbconfigs) + tsv.teCtrl.InitDBConfig(tsv.dbconfigs) tsv.hw.InitDBConfig(tsv.dbconfigs) tsv.hr.InitDBConfig(tsv.dbconfigs) tsv.messager.InitDBConfig(tsv.dbconfigs) tsv.watcher.InitDBConfig(tsv.dbconfigs) + tsv.vstreamer.InitDBConfig(tsv.dbconfigs) return nil } @@ -374,8 +421,7 @@ func (tsv *TabletServer) InitACL(tableACLConfigFile string, enforceTableACLConfi sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGHUP) go func() { - for { - <-sigChan + for range sigChan { tsv.initACL(tableACLConfigFile, enforceTableACLConfig) } }() @@ -505,7 +551,7 @@ func (tsv *TabletServer) fullStart() (err error) { if err := tsv.qe.Open(); err != nil { return err } - if err := tsv.te.Init(); err != nil { + if err := tsv.teCtrl.Init(); err != nil { return err } if err := tsv.hw.Init(tsv.target); err != nil { @@ -513,32 +559,30 @@ func (tsv *TabletServer) fullStart() (err error) { } tsv.hr.Init(tsv.target) tsv.updateStreamList.Init() + tsv.vstreamer.Open(tsv.target.Keyspace, tsv.alias.Cell) return tsv.serveNewType() } func (tsv *TabletServer) serveNewType() (err error) { + // Wait for in-flight transactional requests to complete + // before rolling back everything. In this state new + // transactional requests are not allowed. So, we can + // be sure that the tx pool won't change after the wait. if tsv.target.TabletType == topodatapb.TabletType_MASTER { + tsv.teCtrl.AcceptReadWrite() if err := tsv.txThrottler.Open(tsv.target.Keyspace, tsv.target.Shard); err != nil { return err } tsv.watcher.Close() - tsv.te.Open() tsv.messager.Open() tsv.hr.Close() tsv.hw.Open() } else { + tsv.teCtrl.AcceptReadOnly() tsv.messager.Close() tsv.hr.Open() tsv.hw.Close() - - // Wait for in-flight transactional requests to complete - // before rolling back everything. In this state new - // transactional requests are not allowed. So, we can - // be sure that the tx pool won't change after the wait. - tsv.beginRequests.Wait() - tsv.te.Close(true) tsv.watcher.Open() - tsv.txThrottler.Close() // Reset the sequences. tsv.se.MakeNonMaster() @@ -573,6 +617,7 @@ func (tsv *TabletServer) StopService() { log.Infof("Executing complete shutdown.") tsv.waitForShutdown() + tsv.vstreamer.Close() tsv.qe.Close() tsv.se.Close() tsv.hw.Close() @@ -587,9 +632,8 @@ func (tsv *TabletServer) waitForShutdown() { // we have the assurance that only non-begin transactional calls // will be allowed. They will enable the conclusion of outstanding // transactions. - tsv.beginRequests.Wait() tsv.messager.Close() - tsv.te.Close(false) + tsv.teCtrl.StopGently() tsv.qe.streamQList.TerminateAll() tsv.updateStreamList.Stop() tsv.watcher.Close() @@ -603,7 +647,7 @@ func (tsv *TabletServer) closeAll() { tsv.messager.Close() tsv.hr.Close() tsv.hw.Close() - tsv.te.Close(true) + tsv.teCtrl.StopGently() tsv.watcher.Close() tsv.updateStreamList.Stop() tsv.qe.Close() @@ -735,7 +779,7 @@ func (tsv *TabletServer) Begin(ctx context.Context, target *querypb.Target, opti // TODO(erez): I think this should be RESOURCE_EXHAUSTED. return vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "Transaction throttled") } - transactionID, err = tsv.te.txPool.Begin(ctx, options) + transactionID, err = tsv.teCtrl.Begin(ctx, options) logStats.TransactionID = transactionID return err }, @@ -752,7 +796,7 @@ func (tsv *TabletServer) Commit(ctx context.Context, target *querypb.Target, tra func(ctx context.Context, logStats *tabletenv.LogStats) error { defer tabletenv.QueryStats.Record("COMMIT", time.Now()) logStats.TransactionID = transactionID - return tsv.te.txPool.Commit(ctx, transactionID, tsv.messager) + return tsv.teCtrl.Commit(ctx, transactionID, tsv.messager) }, ) } @@ -766,7 +810,7 @@ func (tsv *TabletServer) Rollback(ctx context.Context, target *querypb.Target, t func(ctx context.Context, logStats *tabletenv.LogStats) error { defer tabletenv.QueryStats.Record("ROLLBACK", time.Now()) logStats.TransactionID = transactionID - return tsv.te.txPool.Rollback(ctx, transactionID) + return tsv.teCtrl.Rollback(ctx, transactionID) }, ) } @@ -963,7 +1007,7 @@ func (tsv *TabletServer) Execute(ctx context.Context, target *querypb.Target, sq // StreamExecute executes the query and streams the result. // The first QueryResult will have Fields set (and Rows nil). // The subsequent QueryResult will have Rows set (and Fields nil). -func (tsv *TabletServer) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) (err error) { +func (tsv *TabletServer) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) (err error) { return tsv.execRequest( ctx, 0, "StreamExecute", sql, bindVariables, @@ -981,6 +1025,7 @@ func (tsv *TabletServer) StreamExecute(ctx context.Context, target *querypb.Targ query: query, marginComments: comments, bindVars: bindVariables, + transactionID: transactionID, options: options, plan: plan, ctx: ctx, @@ -1030,7 +1075,7 @@ func (tsv *TabletServer) ExecuteBatch(ctx context.Context, target *querypb.Targe return nil, err } defer tsv.endRequest(false) - defer tsv.handlePanicAndSendLogStats("batch", nil, &err, nil) + defer tsv.handlePanicAndSendLogStats("batch", nil, nil) if asTransaction { transactionID, err = tsv.Begin(ctx, target, options) @@ -1233,7 +1278,7 @@ func (tsv *TabletServer) execDML(ctx context.Context, target *querypb.Target, qu return 0, err } defer tsv.endRequest(true) - defer tsv.handlePanicAndSendLogStats("ack", nil, &err, nil) + defer tsv.handlePanicAndSendLogStats("ack", nil, nil) query, bv, err := queryGenerator() if err != nil { @@ -1263,6 +1308,40 @@ func (tsv *TabletServer) execDML(ctx context.Context, target *querypb.Target, qu return int64(qr.RowsAffected), nil } +// VStream streams VReplication events. +func (tsv *TabletServer) VStream(ctx context.Context, target *querypb.Target, startPos string, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { + // This code is partially duplicated from startRequest. This is because + // is allowed even if the tablet is in non-serving state. + err := func() error { + tsv.mu.Lock() + defer tsv.mu.Unlock() + + if target != nil { + // a valid target needs to be used + switch { + case target.Keyspace != tsv.target.Keyspace: + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid keyspace %v", target.Keyspace) + case target.Shard != tsv.target.Shard: + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid shard %v", target.Shard) + case target.TabletType != tsv.target.TabletType: + for _, otherType := range tsv.alsoAllow { + if target.TabletType == otherType { + return nil + } + } + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "invalid tablet type: %v, want: %v or %v", target.TabletType, tsv.target.TabletType, tsv.alsoAllow) + } + } else if !tabletenv.IsLocalContext(ctx) { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "No target") + } + return nil + }() + if err != nil { + return err + } + return tsv.vstreamer.Stream(ctx, startPos, filter, send) +} + // SplitQuery splits a query + bind variables into smaller queries that return a // subset of rows from the original query. This is the new version that supports multiple // split columns and multiple split algortihms. @@ -1291,7 +1370,6 @@ func (tsv *TabletServer) SplitQuery( if err := validateSplitQueryParameters( target, query, - splitColumns, splitCount, numRowsPerQueryPart, algorithm, @@ -1327,7 +1405,7 @@ func (tsv *TabletServer) SplitQuery( return splits, err } -// execRequest performs verfications, sets up the necessary environments +// execRequest performs verifications, sets up the necessary environments // and calls the supplied function for executing the request. func (tsv *TabletServer) execRequest( ctx context.Context, timeout time.Duration, @@ -1339,7 +1417,7 @@ func (tsv *TabletServer) execRequest( logStats.Target = target logStats.OriginalSQL = sql logStats.BindVariables = bindVariables - defer tsv.handlePanicAndSendLogStats(sql, bindVariables, &err, logStats) + defer tsv.handlePanicAndSendLogStats(sql, bindVariables, logStats) if err = tsv.startRequest(ctx, target, isBegin, allowOnShutdown); err != nil { return err } @@ -1360,7 +1438,6 @@ func (tsv *TabletServer) execRequest( func (tsv *TabletServer) handlePanicAndSendLogStats( sql string, bindVariables map[string]*querypb.BindVariable, - err *error, logStats *tabletenv.LogStats, ) { if x := recover(); x != nil { @@ -1371,7 +1448,6 @@ func (tsv *TabletServer) handlePanicAndSendLogStats( tb.Stack(4) /* Skip the last 4 boiler-plate frames. */) log.Errorf(errorMessage) terr := vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "%s", errorMessage) - *err = terr tabletenv.InternalErrors.Add("Panic", 1) if logStats != nil { logStats.Error = terr @@ -1390,7 +1466,7 @@ func (tsv *TabletServer) convertAndLogError(ctx context.Context, sql string, bin return nil } - errCode := tsv.convertErrorCode(err) + errCode := convertErrorCode(err) tabletenv.ErrorStats.Add(errCode.String(), 1) callerID := "" @@ -1490,7 +1566,7 @@ func truncateSQLAndBindVars(sql string, bindVariables map[string]*querypb.BindVa return fmt.Sprintf("Sql: %q, %s", truncatedQuery, bv) } -func (tsv *TabletServer) convertErrorCode(err error) vtrpcpb.Code { +func convertErrorCode(err error) vtrpcpb.Code { errCode := vterrors.Code(err) sqlErr, ok := err.(*mysql.SQLError) if !ok { @@ -1569,7 +1645,6 @@ func (tsv *TabletServer) convertErrorCode(err error) vtrpcpb.Code { func validateSplitQueryParameters( target *querypb.Target, query *querypb.BoundQuery, - splitColumns []string, splitCount int64, numRowsPerQueryPart int64, algorithm querypb.SplitQueryRequest_Algorithm, @@ -1880,8 +1955,6 @@ verifyTarget: return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid keyspace %v", target.Keyspace) case target.Shard != tsv.target.Shard: return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid shard %v", target.Shard) - case isBegin && tsv.target.TabletType != topodatapb.TabletType_MASTER: - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "transactional statement disallowed on non-master tablet: %v", tsv.target.TabletType) case target.TabletType != tsv.target.TabletType: for _, otherType := range tsv.alsoAllow { if target.TabletType == otherType { @@ -1896,20 +1969,12 @@ verifyTarget: ok: tsv.requests.Add(1) - // If it's a begin, we should make the shutdown code - // wait for the call to end before it waits for tx empty. - if isBegin { - tsv.beginRequests.Add(1) - } return nil } // endRequest unregisters the current request (a waitgroup) as done. func (tsv *TabletServer) endRequest(isBegin bool) { tsv.requests.Done() - if isBegin { - tsv.beginRequests.Done() - } } func (tsv *TabletServer) registerDebugHealthHandler() { diff --git a/go/vt/vttablet/tabletserver/tabletserver_test.go b/go/vt/vttablet/tabletserver/tabletserver_test.go index b2ba5a51e5..4151045a98 100644 --- a/go/vt/vttablet/tabletserver/tabletserver_test.go +++ b/go/vt/vttablet/tabletserver/tabletserver_test.go @@ -17,14 +17,12 @@ limitations under the License. package tabletserver import ( - "expvar" "fmt" "io" "io/ioutil" "math/rand" "os" "reflect" - "strconv" "strings" "sync" "syscall" @@ -429,29 +427,6 @@ func TestTabletServerCheckMysqlFailInvalidConn(t *testing.T) { } } -func TestTabletServerCheckMysqlInUnintialized(t *testing.T) { - testUtils := newTestUtils() - config := testUtils.newQueryServiceConfig() - tsv := NewTabletServerWithNilTopoServer(config) - // TabletServer start request fail because we are in StateNotConnected; - // however, isMySQLReachable should return true. Here, we always assume - // MySQL is healthy unless we've verified it is not. - if !tsv.isMySQLReachable() { - t.Fatalf("isMySQLReachable should return true") - } - tabletState := expvar.Get("TabletState") - if tabletState == nil { - t.Fatal("TabletState should be exposed") - } - varzState, err := strconv.Atoi(tabletState.String()) - if err != nil { - t.Fatalf("invalid state reported by expvar, should be a valid state code, but got: %s", tabletState.String()) - } - if varzState != StateNotConnected { - t.Fatalf("queryservice should be in %d state, but exposed varz reports: %d", StateNotConnected, varzState) - } -} - func TestTabletServerReconnect(t *testing.T) { db := setUpTabletServerTest(t) defer db.Close() @@ -575,19 +550,6 @@ func TestTabletServerTarget(t *testing.T) { t.Errorf("err: %v, must contain %s", err, want) } - // Disallow tx statements if non-master. - tsv.SetServingType(topodatapb.TabletType_REPLICA, true, nil) - _, err = tsv.Begin(ctx, &target1, nil) - want = "transactional statement disallowed on non-master tablet" - if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("err: %v, must contain %s", err, want) - } - err = tsv.Commit(ctx, &target1, 1) - want = "invalid tablet type: MASTER" - if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("err: %v, must contain %s", err, want) - } - // Disallow all if service is stopped. tsv.StopService() _, err = tsv.Execute(ctx, &target1, "select * from test_table limit 1000", nil, 0, nil) @@ -597,46 +559,50 @@ func TestTabletServerTarget(t *testing.T) { } } -func TestTabletServerStopWithPrepare(t *testing.T) { - // Reuse code from tx_executor_test. - _, tsv, db := newTestTxExecutor(t) +func TestBeginOnReplica(t *testing.T) { + db := setUpTabletServerTest(t) + db.AddQuery("set transaction isolation level REPEATABLE READ", &sqltypes.Result{}) + db.AddQuery("start transaction with consistent snapshot, read only", &sqltypes.Result{}) defer db.Close() + testUtils := newTestUtils() + config := testUtils.newQueryServiceConfig() + tsv := NewTabletServerWithNilTopoServer(config) + dbcfgs := testUtils.newDBConfigs(db) + target1 := querypb.Target{ + Keyspace: "test_keyspace", + Shard: "test_shard", + TabletType: topodatapb.TabletType_REPLICA, + } + err := tsv.StartService(target1, dbcfgs) + if err != nil { + t.Fatalf("StartService failed: %v", err) + } + defer tsv.StopService() + + tsv.SetServingType(topodatapb.TabletType_REPLICA, true, nil) ctx := context.Background() - target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} - transactionID, err := tsv.Begin(ctx, &target, nil) - if err != nil { - t.Error(err) + options := querypb.ExecuteOptions{ + TransactionIsolation: querypb.ExecuteOptions_CONSISTENT_SNAPSHOT_READ_ONLY, } - if _, err := tsv.Execute(ctx, &target, "update test_table set name = 2 where pk = 1", nil, transactionID, nil); err != nil { - t.Error(err) - } - if err = tsv.Prepare(ctx, &target, transactionID, "aa"); err != nil { - t.Error(err) - } - ch := make(chan bool) - go func() { - tsv.StopService() - ch <- true - }() + txID, err := tsv.Begin(ctx, &target1, &options) - // StopService must wait for the prepared transaction to resolve. - select { - case <-ch: - t.Fatal("ch should not fire") - case <-time.After(10 * time.Millisecond): - } - if len(tsv.te.preparedPool.conns) != 1 { - t.Errorf("len(tsv.te.preparedPool.conns): %d, want 1", len(tsv.te.preparedPool.conns)) + if err != nil { + t.Errorf("err: %v, failed to create read only tx on replica", err) } - // RollbackPrepared will allow StopService to complete. - err = tsv.RollbackPrepared(ctx, &target, "aa", 0) + err = tsv.Rollback(ctx, &target1, txID) if err != nil { - t.Error(err) + t.Errorf("err: %v, failed to rollback read only tx", err) } - <-ch - if len(tsv.te.preparedPool.conns) != 0 { - t.Errorf("len(tsv.te.preparedPool.conns): %d, want 0", len(tsv.te.preparedPool.conns)) + + // test that RW transactions are refused + options = querypb.ExecuteOptions{ + TransactionIsolation: querypb.ExecuteOptions_DEFAULT, + } + _, err = tsv.Begin(ctx, &target1, &options) + + if err == nil { + t.Error("expected write tx to be refused") } } @@ -687,20 +653,28 @@ func TestTabletServerMasterToReplica(t *testing.T) { <-ch } -func TestTabletServerReplicaToMaster(t *testing.T) { +func TestTabletServerRedoLogIsKeptBetweenRestarts(t *testing.T) { // Reuse code from tx_executor_test. _, tsv, db := newTestTxExecutor(t) defer db.Close() defer tsv.StopService() tsv.SetServingType(topodatapb.TabletType_REPLICA, true, nil) + + turnOnTxEngine := func() { + tsv.SetServingType(topodatapb.TabletType_MASTER, true, nil) + } + turnOffTxEngine := func() { + tsv.SetServingType(topodatapb.TabletType_REPLICA, true, nil) + } + tpc := tsv.te.twoPC db.AddQuery(tpc.readAllRedo, &sqltypes.Result{}) - tsv.SetServingType(topodatapb.TabletType_MASTER, true, nil) + turnOnTxEngine() if len(tsv.te.preparedPool.conns) != 0 { t.Errorf("len(tsv.te.preparedPool.conns): %d, want 0", len(tsv.te.preparedPool.conns)) } - tsv.SetServingType(topodatapb.TabletType_REPLICA, true, nil) + turnOffTxEngine() db.AddQuery(tpc.readAllRedo, &sqltypes.Result{ Fields: []*querypb.Field{ @@ -716,7 +690,7 @@ func TestTabletServerReplicaToMaster(t *testing.T) { sqltypes.NewVarBinary("update test_table set name = 2 where pk in (1) /* _stream test_table (pk ) (1 ); */"), }}, }) - tsv.SetServingType(topodatapb.TabletType_MASTER, true, nil) + turnOnTxEngine() if len(tsv.te.preparedPool.conns) != 1 { t.Errorf("len(tsv.te.preparedPool.conns): %d, want 1", len(tsv.te.preparedPool.conns)) } @@ -725,7 +699,10 @@ func TestTabletServerReplicaToMaster(t *testing.T) { if !reflect.DeepEqual(got, want) { t.Errorf("Prepared queries: %v, want %v", got, want) } - tsv.SetServingType(topodatapb.TabletType_REPLICA, true, nil) + turnOffTxEngine() + if v := len(tsv.te.preparedPool.conns); v != 0 { + t.Errorf("len(tsv.te.preparedPool.conns): %d, want 0", v) + } tsv.te.txPool.lastID.Set(1) // Ensure we continue past errors. @@ -753,7 +730,7 @@ func TestTabletServerReplicaToMaster(t *testing.T) { sqltypes.NewVarBinary("unused"), }}, }) - tsv.SetServingType(topodatapb.TabletType_MASTER, true, nil) + turnOnTxEngine() if len(tsv.te.preparedPool.conns) != 1 { t.Errorf("len(tsv.te.preparedPool.conns): %d, want 1", len(tsv.te.preparedPool.conns)) } @@ -770,7 +747,10 @@ func TestTabletServerReplicaToMaster(t *testing.T) { if v := tsv.te.txPool.lastID.Get(); v != 20 { t.Errorf("tsv.te.txPool.lastID.Get(): %d, want 20", v) } - tsv.SetServingType(topodatapb.TabletType_REPLICA, true, nil) + turnOffTxEngine() + if v := len(tsv.te.preparedPool.conns); v != 0 { + t.Errorf("len(tsv.te.preparedPool.conns): %d, want 0", v) + } } func TestTabletServerCreateTransaction(t *testing.T) { @@ -1182,7 +1162,7 @@ func TestTabletServerStreamExecute(t *testing.T) { defer tsv.StopService() ctx := context.Background() callback := func(*sqltypes.Result) error { return nil } - if err := tsv.StreamExecute(ctx, &target, executeSQL, nil, nil, callback); err != nil { + if err := tsv.StreamExecute(ctx, &target, executeSQL, nil, 0, nil, callback); err != nil { t.Fatalf("TabletServer.StreamExecute should success: %s, but get error: %v", executeSQL, err) } @@ -2512,11 +2492,10 @@ func TestTabletServerSplitQueryEqualSplitsOnStringColumn(t *testing.T) { func TestHandleExecUnknownError(t *testing.T) { ctx := context.Background() logStats := tabletenv.NewLogStats(ctx, "TestHandleExecError") - var err error testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() tsv := NewTabletServerWithNilTopoServer(config) - defer tsv.handlePanicAndSendLogStats("select * from test_table", nil, &err, logStats) + defer tsv.handlePanicAndSendLogStats("select * from test_table", nil, logStats) panic("unknown exec error") } diff --git a/go/vt/vttablet/tabletserver/tx_engine.go b/go/vt/vttablet/tabletserver/tx_engine.go index 17f0405fb8..8ecd19b653 100644 --- a/go/vt/vttablet/tabletserver/tx_engine.go +++ b/go/vt/vttablet/tabletserver/tx_engine.go @@ -17,9 +17,13 @@ limitations under the License. package tabletserver import ( + "fmt" "sync" "time" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + "golang.org/x/net/context" "vitess.io/vitess/go/timer" @@ -35,15 +39,58 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" ) -// TxEngine handles transactions. +type txEngineState int + +// The TxEngine can be in any of these states +const ( + NotServing txEngineState = iota + Transitioning + AcceptingReadAndWrite + AcceptingReadOnly +) + +func (state txEngineState) String() string { + names := [...]string{ + "NotServing", + "Transitioning", + "AcceptReadWrite", + "AcceptingReadOnly"} + + if state < NotServing || state > AcceptingReadOnly { + return fmt.Sprintf("Unknown - %d", int(state)) + } + + return names[state] +} + +// TxEngine is responsible for handling the tx-pool and keeping read-write, read-only or not-serving +// states. It will start and shut down the underlying tx-pool as required. +// It does this in a concurrently safe way. type TxEngine struct { + // the following four fields are interconnected. `state` and `nextState` should be protected by the + // `stateLock` + // + // `nextState` is used when state is Transitioning. This means that in order to change the state of + // the transaction engine, we had to close transactions. `nextState` is the state we'll end up in + // once the transactions are closed + // while transitioning, `transitionSignal` will contain an open channel. Once the transition is + // over, the channel is closed to signal to any waiting goroutines that the state change is done. + stateLock sync.Mutex + state txEngineState + nextState txEngineState + transitionSignal chan struct{} + + // beginRequests is used to make sure that we do not make a state + // transition while creating new transactions + beginRequests sync.WaitGroup + dbconfigs *dbconfigs.DBConfigs - isOpen, twopcEnabled bool - shutdownGracePeriod time.Duration - coordinatorAddress string - abandonAge time.Duration - ticks *timer.Timer + twopcEnabled bool + shutdownGracePeriod time.Duration + coordinatorAddress string + abandonAge time.Duration + ticks *timer.Timer txPool *TxPool preparedPool *TxPreparedPool @@ -103,9 +150,205 @@ func NewTxEngine(checker connpool.MySQLChecker, config tabletenv.TabletConfig) * checker, ) te.twoPC = NewTwoPC(readPool) + te.transitionSignal = make(chan struct{}) + // By immediately closing this channel, all state changes can simply be made blocking by issuing the + // state change desired, and then selecting on this channel. It will contain an open channel while + // transitioning. + close(te.transitionSignal) + te.nextState = -1 + te.state = NotServing return te } +// Stop will stop accepting any new transactions. Transactions are immediately aborted. +func (te *TxEngine) Stop() error { + te.beginRequests.Wait() + te.stateLock.Lock() + + switch te.state { + case NotServing: + // Nothing to do. We are already stopped or stopping + te.stateLock.Unlock() + return nil + + case AcceptingReadAndWrite: + return te.transitionTo(NotServing) + + case AcceptingReadOnly: + // We are not master, so it's safe to kill all read-only transactions + te.close(true) + te.state = NotServing + te.stateLock.Unlock() + return nil + + case Transitioning: + te.nextState = NotServing + te.stateLock.Unlock() + te.blockUntilEndOfTransition() + return nil + + default: + te.stateLock.Unlock() + return te.unknownStateError() + } +} + +// AcceptReadWrite will start accepting all transactions. +// If transitioning from RO mode, transactions might need to be +// rolled back before new transactions can be accepts. +func (te *TxEngine) AcceptReadWrite() error { + te.beginRequests.Wait() + te.stateLock.Lock() + + switch te.state { + case AcceptingReadAndWrite: + // Nothing to do + te.stateLock.Unlock() + return nil + + case NotServing: + te.state = AcceptingReadAndWrite + te.open() + te.stateLock.Unlock() + return nil + + case Transitioning: + te.nextState = AcceptingReadAndWrite + te.stateLock.Unlock() + te.blockUntilEndOfTransition() + return nil + + case AcceptingReadOnly: + // We need to restart the tx-pool to make sure we handle 2PC correctly + te.close(true) + te.state = AcceptingReadAndWrite + te.open() + te.stateLock.Unlock() + return nil + + default: + return te.unknownStateError() + } +} + +// AcceptReadOnly will start accepting read-only transactions, but not full read and write transactions. +// If the engine is currently accepting full read and write transactions, they need to +// be rolled back. +func (te *TxEngine) AcceptReadOnly() error { + te.beginRequests.Wait() + te.stateLock.Lock() + switch te.state { + case AcceptingReadOnly: + // Nothing to do + te.stateLock.Unlock() + return nil + + case NotServing: + te.state = AcceptingReadOnly + te.open() + te.stateLock.Unlock() + return nil + + case AcceptingReadAndWrite: + return te.transitionTo(AcceptingReadOnly) + + case Transitioning: + te.nextState = AcceptingReadOnly + te.stateLock.Unlock() + te.blockUntilEndOfTransition() + return nil + + default: + te.stateLock.Unlock() + return te.unknownStateError() + } +} + +// Begin begins a transaction, and returns the associated transaction id. +// Subsequent statements can access the connection through the transaction id. +func (te *TxEngine) Begin(ctx context.Context, options *querypb.ExecuteOptions) (int64, error) { + te.stateLock.Lock() + + canOpenTransactions := te.state == AcceptingReadOnly || te.state == AcceptingReadAndWrite + if !canOpenTransactions { + // We are not in a state where we can start new transactions. Abort. + te.stateLock.Unlock() + return 0, vterrors.Errorf(vtrpc.Code_UNAVAILABLE, "tx engine can't accept new transactions in state %v", te.state) + } + + isWriteTransaction := options == nil || options.TransactionIsolation != querypb.ExecuteOptions_CONSISTENT_SNAPSHOT_READ_ONLY + if te.state == AcceptingReadOnly && isWriteTransaction { + te.stateLock.Unlock() + return 0, vterrors.Errorf(vtrpc.Code_UNAVAILABLE, "tx engine can only accept read-only transactions in current state") + } + + // By Add() to beginRequests, we block others from initiating state + // changes until we have finished adding this transaction + te.beginRequests.Add(1) + te.stateLock.Unlock() + + defer te.beginRequests.Done() + return te.txPool.Begin(ctx, options) +} + +// Commit commits the specified transaction. +func (te *TxEngine) Commit(ctx context.Context, transactionID int64, mc messageCommitter) error { + return te.txPool.Commit(ctx, transactionID, mc) +} + +// Rollback rolls back the specified transaction. +func (te *TxEngine) Rollback(ctx context.Context, transactionID int64) error { + return te.txPool.Rollback(ctx, transactionID) +} + +func (te *TxEngine) unknownStateError() error { + return vterrors.Errorf(vtrpc.Code_INTERNAL, "unknown state %v", te.state) +} + +func (te *TxEngine) blockUntilEndOfTransition() error { + select { + case <-te.transitionSignal: + return nil + } +} + +func (te *TxEngine) transitionTo(nextState txEngineState) error { + te.state = Transitioning + te.nextState = nextState + te.transitionSignal = make(chan struct{}) + te.stateLock.Unlock() + + // We do this outside the lock so others can see our state while we close up waiting transactions + te.close(true) + + te.stateLock.Lock() + defer func() { + // we use a lambda to make it clear in which order things need to happen + te.stateLock.Unlock() + close(te.transitionSignal) + }() + + if te.state != Transitioning { + return vterrors.Errorf(vtrpc.Code_INTERNAL, "this should never happen. the goroutine starting the transition should also finish it") + } + + // Once we reach this point, it's as if our state is NotServing, + // and we need to decide what the next step is + switch te.nextState { + case AcceptingReadAndWrite, AcceptingReadOnly: + te.state = te.nextState + te.open() + case NotServing: + te.state = NotServing + case Transitioning: + return vterrors.Errorf(vtrpc.Code_INTERNAL, "this should never happen. nextState cannot be transitioning") + } + + te.nextState = -1 + + return nil +} + // InitDBConfig must be called before Init. func (te *TxEngine) InitDBConfig(dbcfgs *dbconfigs.DBConfigs) { te.dbconfigs = dbcfgs @@ -120,28 +363,32 @@ func (te *TxEngine) Init() error { return nil } -// Open opens the TxEngine. If 2pc is enabled, it restores +// open opens the TxEngine. If 2pc is enabled, it restores // all previously prepared transactions from the redo log. -func (te *TxEngine) Open() { - if te.isOpen { - return - } +// this should only be called when the state is already locked +func (te *TxEngine) open() { te.txPool.Open(te.dbconfigs.AppWithDB(), te.dbconfigs.DbaWithDB(), te.dbconfigs.AppDebugWithDB()) - if !te.twopcEnabled { - te.isOpen = true - return - } - te.twoPC.Open(te.dbconfigs) - if err := te.prepareFromRedo(); err != nil { - // If this operation fails, we choose to raise an alert and - // continue anyway. Serving traffic is considered more important - // than blocking everything for the sake of a few transactions. - tabletenv.InternalErrors.Add("TwopcResurrection", 1) - log.Errorf("Could not prepare transactions: %v", err) + if te.twopcEnabled && te.state == AcceptingReadAndWrite { + te.twoPC.Open(te.dbconfigs) + if err := te.prepareFromRedo(); err != nil { + // If this operation fails, we choose to raise an alert and + // continue anyway. Serving traffic is considered more important + // than blocking everything for the sake of a few transactions. + tabletenv.InternalErrors.Add("TwopcResurrection", 1) + log.Errorf("Could not prepare transactions: %v", err) + } + te.startWatchdog() } - te.startWatchdog() - te.isOpen = true +} + +// StopGently will disregard common rules for when to kill transactions +// and wait forever for transactions to wrap up +func (te *TxEngine) StopGently() { + te.stateLock.Lock() + defer te.stateLock.Unlock() + te.close(false) + te.state = NotServing } // Close closes the TxEngine. If the immediate flag is on, @@ -150,10 +397,7 @@ func (te *TxEngine) Open() { // to conclude. If a shutdown grace period was specified, // the transactions are rolled back if they're not resolved // by that time. -func (te *TxEngine) Close(immediate bool) { - if !te.isOpen { - return - } +func (te *TxEngine) close(immediate bool) { // Shut down functions are idempotent. // No need to check if 2pc is enabled. te.stopWatchdog() @@ -177,6 +421,7 @@ func (te *TxEngine) Close(immediate bool) { } if te.shutdownGracePeriod <= 0 { // No grace period was specified. Never rollback. + te.rollbackPrepared() log.Info("No grace period specified: performing normal wait.") return } @@ -184,9 +429,8 @@ func (te *TxEngine) Close(immediate bool) { defer tmr.Stop() select { case <-tmr.C: - // The grace period has passed. Rollback, but don't touch the 2pc transactions. - log.Info("Grace period exceeded: rolling back non-2pc transactions now.") - te.txPool.RollbackNonBusy(tabletenv.LocalContext()) + log.Info("Grace period exceeded: rolling back now.") + te.rollbackTransactions() case <-poolEmpty: // The pool cleared before the timer kicked in. Just return. log.Info("Transactions completed before grace period: shutting down.") @@ -200,7 +444,6 @@ func (te *TxEngine) Close(immediate bool) { te.txPool.Close() te.twoPC.Close() - te.isOpen = false } // prepareFromRedo replays and prepares the transactions @@ -268,11 +511,18 @@ outer: // serving type. func (te *TxEngine) rollbackTransactions() { ctx := tabletenv.LocalContext() + for _, c := range te.preparedPool.FetchAll() { + te.txPool.LocalConclude(ctx, c) + } // The order of rollbacks is currently not material because // we don't allow new statements or commits during // this function. In case of any such change, this will // have to be revisited. te.txPool.RollbackNonBusy(ctx) +} + +func (te *TxEngine) rollbackPrepared() { + ctx := tabletenv.LocalContext() for _, c := range te.preparedPool.FetchAll() { te.txPool.LocalConclude(ctx, c) } diff --git a/go/vt/vttablet/tabletserver/tx_engine_test.go b/go/vt/vttablet/tabletserver/tx_engine_test.go index e570abed32..8cbfef5556 100644 --- a/go/vt/vttablet/tabletserver/tx_engine_test.go +++ b/go/vt/vttablet/tabletserver/tx_engine_test.go @@ -17,9 +17,15 @@ limitations under the License. package tabletserver import ( + "fmt" + "strings" + "sync" "testing" "time" + "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "golang.org/x/net/context" @@ -40,49 +46,49 @@ func TestTxEngineClose(t *testing.T) { te.InitDBConfig(dbcfgs) // Normal close. - te.Open() + te.open() start := time.Now() - te.Close(false) + te.close(false) if diff := time.Now().Sub(start); diff > 500*time.Millisecond { t.Errorf("Close time: %v, must be under 0.5s", diff) } // Normal close with timeout wait. - te.Open() + te.open() c, err := te.txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } c.Recycle() start = time.Now() - te.Close(false) + te.close(false) if diff := time.Now().Sub(start); diff < 500*time.Millisecond { t.Errorf("Close time: %v, must be over 0.5s", diff) } // Immediate close. - te.Open() + te.open() c, err = te.txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } c.Recycle() start = time.Now() - te.Close(true) + te.close(true) if diff := time.Now().Sub(start); diff > 500*time.Millisecond { t.Errorf("Close time: %v, must be under 0.5s", diff) } // Normal close with short grace period. te.shutdownGracePeriod = 250 * time.Millisecond - te.Open() + te.open() c, err = te.txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } c.Recycle() start = time.Now() - te.Close(false) + te.close(false) if diff := time.Now().Sub(start); diff > 500*time.Millisecond { t.Errorf("Close time: %v, must be under 0.5s", diff) } @@ -92,7 +98,7 @@ func TestTxEngineClose(t *testing.T) { // Normal close with short grace period, but pool gets empty early. te.shutdownGracePeriod = 250 * time.Millisecond - te.Open() + te.open() c, err = te.txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) @@ -107,7 +113,7 @@ func TestTxEngineClose(t *testing.T) { te.txPool.LocalConclude(ctx, c) }() start = time.Now() - te.Close(false) + te.close(false) if diff := time.Now().Sub(start); diff > 250*time.Millisecond { t.Errorf("Close time: %v, must be under 0.25s", diff) } @@ -116,7 +122,7 @@ func TestTxEngineClose(t *testing.T) { } // Immediate close, but connection is in use. - te.Open() + te.open() c, err = te.txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) @@ -126,7 +132,7 @@ func TestTxEngineClose(t *testing.T) { te.txPool.LocalConclude(ctx, c) }() start = time.Now() - te.Close(true) + te.close(true) if diff := time.Now().Sub(start); diff > 250*time.Millisecond { t.Errorf("Close time: %v, must be under 0.25s", diff) } @@ -134,3 +140,358 @@ func TestTxEngineClose(t *testing.T) { t.Errorf("Close time: %v, must be over 0.1", diff) } } + +type TxType int + +const ( + NoTx TxType = iota + ReadOnlyAccepted + WriteAccepted + ReadOnlyRejected + WriteRejected +) + +func (t TxType) String() string { + names := [...]string{ + "no transaction", + "read only transaction accepted", + "write transaction accepted", + "read only transaction rejected", + "write transaction rejected", + } + + if t < NoTx || t > WriteRejected { + return "unknown" + } + + return names[t] +} + +type TestCase struct { + startState txEngineState + TxEngineStates []txEngineState + tx TxType + stateAssertion func(state txEngineState) error +} + +func (test TestCase) String() string { + var sb strings.Builder + sb.WriteString("start from ") + sb.WriteString(test.startState.String()) + sb.WriteString(" with ") + sb.WriteString(test.tx.String()) + + for _, change := range test.TxEngineStates { + sb.WriteString(" change state to ") + sb.WriteString(change.String()) + } + + return sb.String() +} + +func changeState(te *TxEngine, state txEngineState) error { + switch state { + case AcceptingReadAndWrite: + return te.AcceptReadWrite() + case AcceptingReadOnly: + return te.AcceptReadOnly() + case NotServing: + return te.Stop() + default: + return fmt.Errorf("don't know how to do that: %v", state) + } +} + +func TestWithInnerTests(outerT *testing.T) { + + tests := []TestCase{ + // Start from RW and test all single hop transitions with and without tx + {AcceptingReadAndWrite, []txEngineState{ + NotServing}, + NoTx, assertEndStateIs(NotServing)}, + + {AcceptingReadAndWrite, []txEngineState{ + AcceptingReadAndWrite}, + NoTx, assertEndStateIs(AcceptingReadAndWrite)}, + + {AcceptingReadAndWrite, []txEngineState{ + AcceptingReadOnly}, + NoTx, assertEndStateIs(AcceptingReadOnly)}, + + {AcceptingReadAndWrite, []txEngineState{ + NotServing}, + WriteAccepted, assertEndStateIs(NotServing)}, + + {AcceptingReadAndWrite, []txEngineState{ + AcceptingReadAndWrite}, + WriteAccepted, assertEndStateIs(AcceptingReadAndWrite)}, + + {AcceptingReadAndWrite, []txEngineState{ + AcceptingReadOnly}, + WriteAccepted, assertEndStateIs(AcceptingReadOnly)}, + + {AcceptingReadAndWrite, []txEngineState{ + NotServing}, + ReadOnlyAccepted, assertEndStateIs(NotServing)}, + + {AcceptingReadAndWrite, []txEngineState{ + AcceptingReadAndWrite}, + ReadOnlyAccepted, assertEndStateIs(AcceptingReadAndWrite)}, + + {AcceptingReadAndWrite, []txEngineState{ + AcceptingReadOnly}, + ReadOnlyAccepted, assertEndStateIs(AcceptingReadOnly)}, + + // Start from RW and test all transitions with and without tx, plus a concurrent Stop() + {AcceptingReadAndWrite, []txEngineState{ + NotServing, + NotServing}, + NoTx, assertEndStateIs(NotServing)}, + + {AcceptingReadAndWrite, []txEngineState{ + AcceptingReadAndWrite, + NotServing}, + NoTx, assertEndStateIs(NotServing)}, + + {AcceptingReadAndWrite, []txEngineState{ + AcceptingReadOnly, + NotServing}, + NoTx, assertEndStateIs(NotServing)}, + + {AcceptingReadAndWrite, []txEngineState{ + NotServing, + NotServing}, + WriteAccepted, assertEndStateIs(NotServing)}, + + {AcceptingReadAndWrite, []txEngineState{ + AcceptingReadAndWrite, + NotServing}, + WriteAccepted, assertEndStateIs(NotServing)}, + + {AcceptingReadAndWrite, []txEngineState{ + AcceptingReadOnly, + NotServing}, + WriteAccepted, assertEndStateIs(NotServing)}, + + // Start from RW and test all transitions with and without tx, plus a concurrent ReadOnly() + {AcceptingReadAndWrite, []txEngineState{ + NotServing, + AcceptingReadOnly}, + NoTx, assertEndStateIs(AcceptingReadOnly)}, + + {AcceptingReadAndWrite, []txEngineState{ + AcceptingReadAndWrite, + AcceptingReadOnly}, + NoTx, assertEndStateIs(AcceptingReadOnly)}, + + {AcceptingReadAndWrite, []txEngineState{ + AcceptingReadOnly, + AcceptingReadOnly}, + NoTx, assertEndStateIs(AcceptingReadOnly)}, + + {AcceptingReadAndWrite, []txEngineState{ + NotServing, + AcceptingReadOnly}, + WriteAccepted, assertEndStateIs(AcceptingReadOnly)}, + + {AcceptingReadAndWrite, []txEngineState{ + AcceptingReadAndWrite, + AcceptingReadOnly}, + WriteAccepted, assertEndStateIs(AcceptingReadOnly)}, + + {AcceptingReadAndWrite, []txEngineState{ + AcceptingReadOnly, + AcceptingReadOnly}, + WriteAccepted, assertEndStateIs(AcceptingReadOnly)}, + + // Start from RO and test all single hop transitions with and without tx + {AcceptingReadOnly, []txEngineState{ + NotServing}, + NoTx, assertEndStateIs(NotServing)}, + + {AcceptingReadOnly, []txEngineState{ + AcceptingReadAndWrite}, + NoTx, assertEndStateIs(AcceptingReadAndWrite)}, + + {AcceptingReadOnly, []txEngineState{ + AcceptingReadOnly}, + NoTx, assertEndStateIs(AcceptingReadOnly)}, + + {AcceptingReadOnly, []txEngineState{ + NotServing}, + WriteRejected, assertEndStateIs(NotServing)}, + + {AcceptingReadOnly, []txEngineState{ + AcceptingReadAndWrite}, + WriteRejected, assertEndStateIs(AcceptingReadAndWrite)}, + + {AcceptingReadOnly, []txEngineState{ + AcceptingReadOnly}, + WriteRejected, assertEndStateIs(AcceptingReadOnly)}, + + // Start from RO and test all transitions with and without tx, plus a concurrent Stop() + {AcceptingReadOnly, []txEngineState{ + NotServing, + NotServing}, + NoTx, assertEndStateIs(NotServing)}, + + {AcceptingReadOnly, []txEngineState{ + AcceptingReadAndWrite, + NotServing}, + NoTx, assertEndStateIs(NotServing)}, + + {AcceptingReadOnly, []txEngineState{ + AcceptingReadOnly, + NotServing}, + NoTx, assertEndStateIs(NotServing)}, + + {AcceptingReadOnly, []txEngineState{ + NotServing, + NotServing}, + WriteRejected, assertEndStateIs(NotServing)}, + + {AcceptingReadOnly, []txEngineState{ + AcceptingReadAndWrite, + NotServing}, + WriteRejected, assertEndStateIs(NotServing)}, + + {AcceptingReadOnly, []txEngineState{ + AcceptingReadOnly, + NotServing}, + WriteRejected, assertEndStateIs(NotServing)}, + + // Start from RO and test all transitions with and without tx, plus a concurrent ReadWrite() + {AcceptingReadOnly, []txEngineState{ + NotServing, + AcceptingReadAndWrite}, + NoTx, assertEndStateIs(AcceptingReadAndWrite)}, + + {AcceptingReadOnly, []txEngineState{ + AcceptingReadAndWrite, + AcceptingReadAndWrite}, + NoTx, assertEndStateIs(AcceptingReadAndWrite)}, + + {AcceptingReadOnly, []txEngineState{ + AcceptingReadOnly, + AcceptingReadAndWrite}, + NoTx, assertEndStateIs(AcceptingReadAndWrite)}, + + {AcceptingReadOnly, []txEngineState{ + NotServing, + AcceptingReadAndWrite}, + WriteRejected, assertEndStateIs(AcceptingReadAndWrite)}, + + {AcceptingReadOnly, []txEngineState{ + AcceptingReadAndWrite, + AcceptingReadAndWrite}, + WriteRejected, assertEndStateIs(AcceptingReadAndWrite)}, + + {AcceptingReadOnly, []txEngineState{ + AcceptingReadOnly, + AcceptingReadAndWrite}, + WriteRejected, assertEndStateIs(AcceptingReadAndWrite)}, + + // Make sure that all transactions are rejected when we are not serving + {NotServing, []txEngineState{}, + WriteRejected, assertEndStateIs(NotServing)}, + + {NotServing, []txEngineState{}, + ReadOnlyRejected, assertEndStateIs(NotServing)}, + } + + for _, test := range tests { + outerT.Run(test.String(), func(t *testing.T) { + + db := setUpQueryExecutorTest(t) + db.AddQuery("set transaction isolation level REPEATABLE READ", &sqltypes.Result{}) + db.AddQuery("start transaction with consistent snapshot, read only", &sqltypes.Result{}) + defer db.Close() + te := setupTxEngine(db) + + failIfError(t, + changeState(te, test.startState)) + + switch test.tx { + case NoTx: + // nothing to do + case WriteAccepted: + failIfError(t, + startTransaction(te, true)) + case ReadOnlyAccepted: + failIfError(t, + startTransaction(te, false)) + case WriteRejected: + err := startTransaction(te, true) + if err == nil { + t.Fatalf("expected an error to be returned when opening write transaction, but got nil") + } + case ReadOnlyRejected: + err := startTransaction(te, false) + if err == nil { + t.Fatalf("expected an error to be returned when opening read transaction, but got nil") + } + default: + t.Fatalf("don't know how to [%v]", test.tx) + } + + wg := sync.WaitGroup{} + for _, newState := range test.TxEngineStates { + wg.Add(1) + go func(s txEngineState) { + defer wg.Done() + + failIfError(t, + changeState(te, s)) + }(newState) + + // We give the state changes a chance to get started + time.Sleep(10 * time.Millisecond) + } + + // Let's wait for all transitions to wrap up + wg.Wait() + + failIfError(t, + test.stateAssertion(te.state)) + }) + } +} + +func setupTxEngine(db *fakesqldb.DB) *TxEngine { + testUtils := newTestUtils() + dbcfgs := testUtils.newDBConfigs(db) + config := tabletenv.DefaultQsConfig + config.TransactionCap = 10 + config.TransactionTimeout = 0.5 + config.TxShutDownGracePeriod = 0 + te := NewTxEngine(nil, config) + te.InitDBConfig(dbcfgs) + return te +} + +func failIfError(t *testing.T, err error) { + if err != nil { + t.Logf("%+v", err) + t.FailNow() + } +} + +func assertEndStateIs(expected txEngineState) func(actual txEngineState) error { + return func(actual txEngineState) error { + if actual != expected { + return fmt.Errorf("expected the end state to be %v, but it was %v", expected, actual) + } + return nil + } +} + +func startTransaction(te *TxEngine, writeTransaction bool) error { + options := &querypb.ExecuteOptions{} + if writeTransaction { + options.TransactionIsolation = querypb.ExecuteOptions_DEFAULT + } else { + options.TransactionIsolation = querypb.ExecuteOptions_CONSISTENT_SNAPSHOT_READ_ONLY + } + _, err := te.Begin(context.Background(), options) + return err +} diff --git a/go/vt/vttablet/tabletserver/tx_pool.go b/go/vt/vttablet/tabletserver/tx_pool.go index 8d610becb6..0c47f93f33 100644 --- a/go/vt/vttablet/tabletserver/tx_pool.go +++ b/go/vt/vttablet/tabletserver/tx_pool.go @@ -54,15 +54,22 @@ const ( const txLogInterval = time.Duration(1 * time.Minute) +type queries struct { + setIsolationLevel string + openTransaction string +} + var ( txOnce sync.Once txStats = stats.NewTimings("Transactions", "Transaction stats", "operation") - txIsolations = map[querypb.ExecuteOptions_TransactionIsolation]string{ - querypb.ExecuteOptions_REPEATABLE_READ: "set transaction isolation level REPEATABLE READ", - querypb.ExecuteOptions_READ_COMMITTED: "set transaction isolation level READ COMMITTED", - querypb.ExecuteOptions_READ_UNCOMMITTED: "set transaction isolation level READ UNCOMMITTED", - querypb.ExecuteOptions_SERIALIZABLE: "set transaction isolation level SERIALIZABLE", + txIsolations = map[querypb.ExecuteOptions_TransactionIsolation]queries{ + querypb.ExecuteOptions_DEFAULT: {setIsolationLevel: "", openTransaction: "begin"}, + querypb.ExecuteOptions_REPEATABLE_READ: {setIsolationLevel: "REPEATABLE READ", openTransaction: "begin"}, + querypb.ExecuteOptions_READ_COMMITTED: {setIsolationLevel: "READ COMMITTED", openTransaction: "begin"}, + querypb.ExecuteOptions_READ_UNCOMMITTED: {setIsolationLevel: "READ UNCOMMITTED", openTransaction: "begin"}, + querypb.ExecuteOptions_SERIALIZABLE: {setIsolationLevel: "SERIALIZABLE", openTransaction: "begin"}, + querypb.ExecuteOptions_CONSISTENT_SNAPSHOT_READ_ONLY: {setIsolationLevel: "REPEATABLE READ", openTransaction: "start transaction with consistent snapshot, read only"}, } ) @@ -171,7 +178,7 @@ func (axp *TxPool) RollbackNonBusy(ctx context.Context) { func (axp *TxPool) transactionKiller() { defer tabletenv.LogError() - for _, v := range axp.activePool.GetOutdated(time.Duration(axp.Timeout()), "for rollback") { + for _, v := range axp.activePool.GetOutdated(time.Duration(axp.Timeout()), "for tx killer rollback") { conn := v.(*TxConnection) log.Warningf("killing transaction (exceeded timeout: %v): %s", axp.Timeout(), conn.Format(nil)) tabletenv.KillStats.Add("Transactions", 1) @@ -232,14 +239,18 @@ func (axp *TxPool) Begin(ctx context.Context, options *querypb.ExecuteOptions) ( return 0, err } - if query, ok := txIsolations[options.GetTransactionIsolation()]; ok { - if _, err := conn.Exec(ctx, query, 1, false); err != nil { + if queries, ok := txIsolations[options.GetTransactionIsolation()]; ok { + if queries.setIsolationLevel != "" { + if _, err := conn.Exec(ctx, "set transaction isolation level "+queries.setIsolationLevel, 1, false); err != nil { + return 0, err + } + } + + if _, err := conn.Exec(ctx, queries.openTransaction, 1, false); err != nil { return 0, err } - } - - if _, err := conn.Exec(ctx, "begin", 1, false); err != nil { - return 0, err + } else { + return 0, fmt.Errorf("don't know how to open a transaction of this type: %v", options.GetTransactionIsolation()) } beginSucceeded = true diff --git a/go/vt/vttablet/tabletserver/vstreamer/engine.go b/go/vt/vttablet/tabletserver/vstreamer/engine.go new file mode 100644 index 0000000000..9e0e014bd9 --- /dev/null +++ b/go/vt/vttablet/tabletserver/vstreamer/engine.go @@ -0,0 +1,237 @@ +/* +Copyright 2018 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vstreamer + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "net/http" + "sync" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" +) + +var ( + once sync.Once + vschemaErrors *stats.Counter + vschemaUpdates *stats.Counter +) + +// Engine is the engine for handling vreplication streaming requests. +type Engine struct { + // cp is initialized by InitDBConfig + cp *mysql.ConnParams + + // mu protects isOpen, streamers, streamIdx and kschema. + mu sync.Mutex + + isOpen bool + // wg is incremented for every Stream, and decremented on end. + // Close waits for all current streams to end by waiting on wg. + wg sync.WaitGroup + streamers map[int]*vstreamer + streamIdx int + + // watcherOnce is used for initializing kschema + // and setting up the vschema watch. It's guaranteed that + // no stream will start until kschema is initialized by + // the first call through watcherOnce. + watcherOnce sync.Once + kschema *vindexes.KeyspaceSchema + + // The following members are initialized once at the beginning. + ts srvtopo.Server + se *schema.Engine + keyspace string + cell string +} + +// NewEngine creates a new Engine. +// Initialization sequence is: NewEngine->InitDBConfig->Open. +// Open and Close can be called multiple times and are idempotent. +func NewEngine(ts srvtopo.Server, se *schema.Engine) *Engine { + vse := &Engine{ + streamers: make(map[int]*vstreamer), + kschema: &vindexes.KeyspaceSchema{}, + ts: ts, + se: se, + } + once.Do(func() { + vschemaErrors = stats.NewCounter("VSchemaErrors", "Count of VSchema errors") + vschemaUpdates = stats.NewCounter("VSchemaUpdates", "Count of VSchema updates. Does not include errors") + http.Handle("/debug/tablet_vschema", vse) + }) + return vse +} + +// InitDBConfig performs saves the required info from dbconfigs for future use. +func (vse *Engine) InitDBConfig(dbcfgs *dbconfigs.DBConfigs) { + vse.cp = dbcfgs.DbaWithDB() +} + +// Open starts the Engine service. +func (vse *Engine) Open(keyspace, cell string) error { + vse.mu.Lock() + defer vse.mu.Unlock() + if vse.isOpen { + return nil + } + vse.isOpen = true + vse.keyspace = keyspace + vse.cell = cell + return nil +} + +// Close closes the Engine service. +func (vse *Engine) Close() { + func() { + vse.mu.Lock() + defer vse.mu.Unlock() + if !vse.isOpen { + return + } + for _, s := range vse.streamers { + // cancel is non-blocking. + s.Cancel() + } + vse.isOpen = false + }() + + // Wait only after releasing the lock because the end of every + // stream will use the lock to remove the entry from streamers. + vse.wg.Wait() +} + +func (vse *Engine) vschema() *vindexes.KeyspaceSchema { + vse.mu.Lock() + defer vse.mu.Unlock() + return vse.kschema +} + +// Stream starts a new stream. +func (vse *Engine) Stream(ctx context.Context, startPos string, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { + // Ensure kschema is initialized and the watcher is started. + // Starting of the watcher has to be delayed till the first call to Stream + // because this overhead should be incurred only if someone uses this feature. + vse.watcherOnce.Do(vse.setWatch) + + // Create stream and add it to the map. + streamer, idx, err := func() (*vstreamer, int, error) { + vse.mu.Lock() + defer vse.mu.Unlock() + if !vse.isOpen { + return nil, 0, errors.New("VStreamer is not open") + } + streamer := newVStreamer(ctx, vse.cp, vse.se, startPos, filter, vse.kschema, send) + idx := vse.streamIdx + vse.streamers[idx] = streamer + vse.streamIdx++ + // Now that we've added the stream, increment wg. + // This must be done before releasing the lock. + vse.wg.Add(1) + return streamer, idx, nil + }() + if err != nil { + return err + } + + // Remove stream from map and decrement wg when it ends. + defer func() { + vse.mu.Lock() + defer vse.mu.Unlock() + delete(vse.streamers, idx) + vse.wg.Done() + }() + + // No lock is held while streaming, but wg is incremented. + return streamer.Stream() +} + +// ServeHTTP shows the current VSchema. +func (vse *Engine) ServeHTTP(response http.ResponseWriter, request *http.Request) { + if err := acl.CheckAccessHTTP(request, acl.DEBUGGING); err != nil { + acl.SendError(response, err) + return + } + response.Header().Set("Content-Type", "application/json; charset=utf-8") + vs := vse.vschema() + if vs == nil || vs.Keyspace == nil { + response.Write([]byte("{}")) + } + b, err := json.MarshalIndent(vs, "", " ") + if err != nil { + response.Write([]byte(err.Error())) + return + } + buf := bytes.NewBuffer(nil) + json.HTMLEscape(buf, b) + response.Write(buf.Bytes()) +} + +func (vse *Engine) setWatch() { + // WatchSrvVSchema does not return until the inner func has been called at least once. + vse.ts.WatchSrvVSchema(context.TODO(), vse.cell, func(v *vschemapb.SrvVSchema, err error) { + var kschema *vindexes.KeyspaceSchema + switch { + case err == nil: + kschema, err = vindexes.BuildKeyspaceSchema(v.Keyspaces[vse.keyspace], vse.keyspace) + if err != nil { + log.Errorf("Error building vschema %s: %v", vse.keyspace, err) + vschemaErrors.Add(1) + return + } + case topo.IsErrType(err, topo.NoNode): + // No-op. + default: + log.Errorf("Error fetching vschema %s: %v", vse.keyspace, err) + vschemaErrors.Add(1) + return + } + + if kschema == nil { + kschema = &vindexes.KeyspaceSchema{ + Keyspace: &vindexes.Keyspace{ + Name: vse.keyspace, + }, + } + } + + // Broadcast the change to all streamers. + vse.mu.Lock() + defer vse.mu.Unlock() + vse.kschema = kschema + b, _ := json.MarshalIndent(kschema, "", " ") + log.Infof("Updated KSchema: %s", b) + for _, s := range vse.streamers { + s.SetKSchema(kschema) + } + vschemaUpdates.Add(1) + }) +} diff --git a/go/vt/vttablet/tabletserver/vstreamer/engine_test.go b/go/vt/vttablet/tabletserver/vstreamer/engine_test.go new file mode 100644 index 0000000000..c88a9aeef4 --- /dev/null +++ b/go/vt/vttablet/tabletserver/vstreamer/engine_test.go @@ -0,0 +1,127 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vstreamer + +import ( + "encoding/json" + "testing" + "time" + + "golang.org/x/net/context" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +var shardedVSchema = `{ + "sharded": true, + "vindexes": { + "hash": { + "type": "hash" + } + }, + "tables": { + "t1": { + "column_vindexes": [ + { + "column": "id1", + "name": "hash" + } + ] + } + } +}` + +func TestUpdateVSchema(t *testing.T) { + if testing.Short() { + t.Skip() + } + + defer env.SetVSchema("{}") + + // We have to start at least one stream to start the vschema watcher. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*/", + }}, + } + + _ = startStream(ctx, t, filter) + cancel() + + startCount := expectUpdateCount(t, 1) + + if err := env.SetVSchema(shardedVSchema); err != nil { + t.Fatal(err) + } + expectUpdateCount(t, startCount+1) + + want := `{ + "sharded": true, + "tables": { + "t1": { + "name": "t1", + "column_vindexes": [ + { + "columns": [ + "id1" + ], + "type": "hash", + "name": "hash", + "vindex": {} + } + ], + "ordered": [ + { + "columns": [ + "id1" + ], + "type": "hash", + "name": "hash", + "vindex": {} + } + ] + } + }, + "vindexes": { + "hash": {} + } +}` + b, err := json.MarshalIndent(engine.vschema(), "", " ") + if err != nil { + t.Fatal(err) + } + if got := string(b); got != want { + t.Errorf("vschema:\n%s, want:\n%s", got, want) + } +} + +func expectUpdateCount(t *testing.T, wantCount int64) int64 { + for i := 0; i < 10; i++ { + gotCount := vschemaUpdates.Get() + if gotCount >= wantCount { + return gotCount + } + if i == 9 { + t.Fatalf("update count: %d, want %d", gotCount, wantCount) + } + time.Sleep(10 * time.Millisecond) + } + panic("unreachable") +} diff --git a/go/vt/vttablet/tabletserver/vstreamer/main_test.go b/go/vt/vttablet/tabletserver/vstreamer/main_test.go new file mode 100644 index 0000000000..224091078f --- /dev/null +++ b/go/vt/vttablet/tabletserver/vstreamer/main_test.go @@ -0,0 +1,59 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vstreamer + +import ( + "flag" + "fmt" + "os" + "testing" + + "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer/testenv" +) + +var ( + engine *Engine + env *testenv.Env +) + +func TestMain(m *testing.M) { + flag.Parse() // Do not remove this comment, import into google3 depends on it + + if testing.Short() { + os.Exit(m.Run()) + } + + exitCode := func() int { + var err error + env, err = testenv.Init() + if err != nil { + fmt.Fprintf(os.Stderr, "%v", err) + return 1 + } + defer env.Close() + + // engine cannot be initialized in testenv because it introduces + // circular dependencies. + engine = NewEngine(env.SrvTopo, env.SchemaEngine) + engine.InitDBConfig(env.Dbcfgs) + engine.Open(env.KeyspaceName, env.Cells[0]) + defer engine.Close() + + return m.Run() + }() + os.Exit(exitCode) +} diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go new file mode 100644 index 0000000000..f9920e791f --- /dev/null +++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go @@ -0,0 +1,422 @@ +/* +Copyright 2018 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vstreamer + +import ( + "fmt" + "regexp" + "strings" + "time" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +// Plan represents the streaming plan for a table. +type Plan struct { + Table *Table + ColExprs []ColExpr + VindexColumn int + Vindex vindexes.Vindex + KeyRange *topodatapb.KeyRange +} + +// ColExpr represents a column expression. +type ColExpr struct { + ColNum int + Alias sqlparser.ColIdent + Type querypb.Type + Operation Operation +} + +// Operation represents the operation to be performed on a column. +type Operation int + +// The following are the supported operations on a column. +const ( + OpNone = Operation(iota) + OpMonth + OpDay + OpHour +) + +// Table contains the metadata for a table. The +// name is dervied from mysql's Table_map_log_event. +type Table struct { + *mysql.TableMap + Columns []schema.TableColumn +} + +// The filter function needs the ability to perform expression evaluations. This is +// because the consumer of vstream is not just VPlayer. It can also be a dumb client +// like a mysql client that's subscribing to changes. This ability to allow users +// to directly pull events by sending a complex select query. The same reasoning +// applies to where clauses. For now, only simple functions like hour are supported, +// but this can be expanded in the future. +func (plan *Plan) filter(values []sqltypes.Value) (bool, []sqltypes.Value, error) { + result := make([]sqltypes.Value, len(plan.ColExprs)) + for i, colExpr := range plan.ColExprs { + switch colExpr.Operation { + case OpMonth: + v, _ := sqltypes.ToInt64(values[colExpr.ColNum]) + t := time.Unix(v, 0).UTC() + s := fmt.Sprintf("%d%02d", t.Year(), t.Month()) + result[i] = sqltypes.NewVarBinary(s) + case OpDay: + v, _ := sqltypes.ToInt64(values[colExpr.ColNum]) + t := time.Unix(v, 0).UTC() + s := fmt.Sprintf("%d%02d%02d", t.Year(), t.Month(), t.Day()) + result[i] = sqltypes.NewVarBinary(s) + case OpHour: + v, _ := sqltypes.ToInt64(values[colExpr.ColNum]) + t := time.Unix(v, 0).UTC() + s := fmt.Sprintf("%d%02d%02d%02d", t.Year(), t.Month(), t.Day(), t.Hour()) + result[i] = sqltypes.NewVarBinary(s) + default: + result[i] = values[colExpr.ColNum] + } + } + if plan.Vindex == nil { + return true, result, nil + } + + // Filter by Vindex. + destinations, err := plan.Vindex.Map(nil, []sqltypes.Value{result[plan.VindexColumn]}) + if err != nil { + return false, nil, err + } + if len(destinations) != 1 { + return false, nil, fmt.Errorf("mapping row to keyspace id returned an invalid array of destinations: %v", key.DestinationsString(destinations)) + } + ksid, ok := destinations[0].(key.DestinationKeyspaceID) + if !ok || len(ksid) == 0 { + return false, nil, fmt.Errorf("could not map %v to a keyspace id, got destination %v", result[plan.VindexColumn], destinations[0]) + } + if !key.KeyRangeContains(plan.KeyRange, ksid) { + return false, nil, nil + } + return true, result, nil +} + +func mustSendDDL(query mysql.Query, dbname string, filter *binlogdatapb.Filter) bool { + if query.Database != "" && query.Database != dbname { + return false + } + ast, err := sqlparser.Parse(query.SQL) + // If there was a parsing error, we send it through. Hopefully, + // recipient can handle it. + if err != nil { + return true + } + switch stmt := ast.(type) { + case *sqlparser.DBDDL: + return false + case *sqlparser.DDL: + if !stmt.Table.IsEmpty() { + return tableMatches(stmt.Table, dbname, filter) + } + for _, table := range stmt.FromTables { + if tableMatches(table, dbname, filter) { + return true + } + } + for _, table := range stmt.ToTables { + if tableMatches(table, dbname, filter) { + return true + } + } + return false + } + return true +} + +func tableMatches(table sqlparser.TableName, dbname string, filter *binlogdatapb.Filter) bool { + if !table.Qualifier.IsEmpty() && table.Qualifier.String() != dbname { + return false + } + for _, rule := range filter.Rules { + switch { + case strings.HasPrefix(rule.Match, "/"): + expr := strings.Trim(rule.Match, "/") + result, err := regexp.MatchString(expr, table.Name.String()) + if err != nil { + return true + } + if !result { + continue + } + return true + case table.Name.String() == rule.Match: + return true + } + } + return false +} + +func buildPlan(ti *Table, kschema *vindexes.KeyspaceSchema, filter *binlogdatapb.Filter) (*Plan, error) { + for _, rule := range filter.Rules { + switch { + case strings.HasPrefix(rule.Match, "/"): + expr := strings.Trim(rule.Match, "/") + result, err := regexp.MatchString(expr, ti.Name) + if err != nil { + return nil, err + } + if !result { + continue + } + return buildREPlan(ti, kschema, rule.Filter) + case rule.Match == ti.Name: + return buildTablePlan(ti, kschema, rule.Filter) + } + } + return nil, nil +} + +func buildREPlan(ti *Table, kschema *vindexes.KeyspaceSchema, filter string) (*Plan, error) { + plan := &Plan{ + Table: ti, + } + plan.ColExprs = make([]ColExpr, len(ti.Columns)) + for i, col := range ti.Columns { + plan.ColExprs[i].ColNum = i + plan.ColExprs[i].Alias = col.Name + plan.ColExprs[i].Type = col.Type + } + if filter == "" { + return plan, nil + } + + // We need to additionally set VindexColumn, Vindex and KeyRange + // based on the Primary Vindex of the table. + // Find table in kschema. + table := kschema.Tables[ti.Name] + if table == nil { + return nil, fmt.Errorf("no vschema definition for table %s", ti.Name) + } + // Get Primary Vindex. + if len(table.ColumnVindexes) == 0 { + return nil, fmt.Errorf("table %s has no primary vindex", ti.Name) + } + // findColumn can be used here because result column list is same + // as source. + colnum, err := findColumn(ti, table.ColumnVindexes[0].Columns[0]) + if err != nil { + return nil, err + } + plan.VindexColumn = colnum + plan.Vindex = table.ColumnVindexes[0].Vindex + + // Parse keyrange. + keyranges, err := key.ParseShardingSpec(filter) + if err != nil { + return nil, err + } + if len(keyranges) != 1 { + return nil, fmt.Errorf("error parsing keyrange: %v", filter) + } + plan.KeyRange = keyranges[0] + return plan, nil +} + +func buildTablePlan(ti *Table, kschema *vindexes.KeyspaceSchema, query string) (*Plan, error) { + statement, err := sqlparser.Parse(query) + if err != nil { + return nil, err + } + plan := &Plan{ + Table: ti, + } + sel, ok := statement.(*sqlparser.Select) + if !ok { + return nil, fmt.Errorf("unexpected: %v", sqlparser.String(statement)) + } + if len(sel.From) > 1 { + return nil, fmt.Errorf("unexpected: %v", sqlparser.String(sel)) + } + node, ok := sel.From[0].(*sqlparser.AliasedTableExpr) + if !ok { + return nil, fmt.Errorf("unexpected: %v", sqlparser.String(sel)) + } + fromTable := sqlparser.GetTableName(node.Expr) + if fromTable.IsEmpty() { + return nil, fmt.Errorf("unexpected: %v", sqlparser.String(sel)) + } + if fromTable.String() != ti.Name { + return nil, fmt.Errorf("unexpected: select expression table %v does not match the table entry name %s", sqlparser.String(fromTable), ti.Name) + } + + if _, ok := sel.SelectExprs[0].(*sqlparser.StarExpr); !ok { + for _, expr := range sel.SelectExprs { + cExpr, err := analyzeExpr(ti, expr) + if err != nil { + return nil, err + } + plan.ColExprs = append(plan.ColExprs, cExpr) + } + } else { + if len(sel.SelectExprs) != 1 { + return nil, fmt.Errorf("unexpected: %v", sqlparser.String(sel)) + } + plan.ColExprs = make([]ColExpr, len(ti.Columns)) + for i, col := range ti.Columns { + plan.ColExprs[i].ColNum = i + plan.ColExprs[i].Alias = col.Name + plan.ColExprs[i].Type = col.Type + } + } + + if sel.Where == nil { + return plan, nil + } + + // Filter by Vindex. + funcExpr, ok := sel.Where.Expr.(*sqlparser.FuncExpr) + if !ok { + return nil, fmt.Errorf("unexpected where clause: %v", sqlparser.String(sel.Where)) + } + if !funcExpr.Name.EqualString("in_keyrange") { + return nil, fmt.Errorf("unexpected where clause: %v", sqlparser.String(sel.Where)) + } + if len(funcExpr.Exprs) != 3 { + return nil, fmt.Errorf("unexpected where clause: %v", sqlparser.String(sel.Where)) + } + aexpr, ok := funcExpr.Exprs[0].(*sqlparser.AliasedExpr) + if !ok { + return nil, fmt.Errorf("unexpected: %v", sqlparser.String(funcExpr)) + } + colname, ok := aexpr.Expr.(*sqlparser.ColName) + if !ok { + return nil, fmt.Errorf("unsupported: %v", sqlparser.String(funcExpr)) + } + found := false + for i, cExpr := range plan.ColExprs { + if cExpr.Alias.Equal(colname.Name) { + found = true + plan.VindexColumn = i + break + } + } + if !found { + return nil, fmt.Errorf("keyrange expression does not reference a column in the select list: %v", sqlparser.String(colname)) + } + vtype, err := selString(funcExpr.Exprs[1]) + if err != nil { + return nil, err + } + plan.Vindex, err = vindexes.CreateVindex(vtype, vtype, map[string]string{}) + if err != nil { + return nil, err + } + if !plan.Vindex.IsUnique() || !plan.Vindex.IsFunctional() { + return nil, fmt.Errorf("vindex must be Unique and Functional to be used for VReplication: %s", vtype) + } + kr, err := selString(funcExpr.Exprs[2]) + if err != nil { + return nil, err + } + keyranges, err := key.ParseShardingSpec(kr) + if err != nil { + return nil, err + } + if len(keyranges) != 1 { + return nil, fmt.Errorf("unexpected where clause: %v", sqlparser.String(sel.Where)) + } + plan.KeyRange = keyranges[0] + return plan, nil +} + +func analyzeExpr(ti *Table, selExpr sqlparser.SelectExpr) (cExpr ColExpr, err error) { + aliased, ok := selExpr.(*sqlparser.AliasedExpr) + if !ok { + return ColExpr{}, fmt.Errorf("unexpected: %v", sqlparser.String(selExpr)) + } + as := aliased.As + if as.IsEmpty() { + as = sqlparser.NewColIdent(sqlparser.String(aliased.Expr)) + } + switch expr := aliased.Expr.(type) { + case *sqlparser.ColName: + colnum, err := findColumn(ti, expr.Name) + if err != nil { + return ColExpr{}, err + } + return ColExpr{ColNum: colnum, Alias: as, Type: ti.Columns[colnum].Type}, nil + case *sqlparser.FuncExpr: + if expr.Distinct || len(expr.Exprs) != 1 { + return ColExpr{}, fmt.Errorf("unsupported: %v", sqlparser.String(expr)) + } + switch fname := expr.Name.Lowered(); fname { + case "month", "day", "hour": + aInner, ok := expr.Exprs[0].(*sqlparser.AliasedExpr) + if !ok { + return ColExpr{}, fmt.Errorf("unsupported: %v", sqlparser.String(expr)) + } + innerCol, ok := aInner.Expr.(*sqlparser.ColName) + if !ok { + return ColExpr{}, fmt.Errorf("unsupported: %v", sqlparser.String(expr)) + } + colnum, err := findColumn(ti, innerCol.Name) + if err != nil { + return ColExpr{}, err + } + switch fname { + case "month": + return ColExpr{ColNum: colnum, Alias: as, Type: sqltypes.VarBinary, Operation: OpMonth}, nil + case "day": + return ColExpr{ColNum: colnum, Alias: as, Type: sqltypes.VarBinary, Operation: OpDay}, nil + case "hour": + return ColExpr{ColNum: colnum, Alias: as, Type: sqltypes.VarBinary, Operation: OpHour}, nil + default: + panic("unreachable") + } + default: + return ColExpr{}, fmt.Errorf("unsupported: %v", sqlparser.String(expr)) + } + default: + return ColExpr{}, fmt.Errorf("unexpected: %v", sqlparser.String(expr)) + } +} + +func selString(expr sqlparser.SelectExpr) (string, error) { + aexpr, ok := expr.(*sqlparser.AliasedExpr) + if !ok { + return "", fmt.Errorf("unexpected: %v", sqlparser.String(expr)) + } + val, ok := aexpr.Expr.(*sqlparser.SQLVal) + if !ok { + return "", fmt.Errorf("unexpected: %v", sqlparser.String(expr)) + } + return string(val.Val), nil +} + +func findColumn(ti *Table, name sqlparser.ColIdent) (int, error) { + for i, col := range ti.Columns { + if name.Equal(col.Name) { + return i, nil + } + } + return 0, fmt.Errorf("column %s not found in table %s", sqlparser.String(name), ti.Name) +} diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go new file mode 100644 index 0000000000..f9f357e24e --- /dev/null +++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go @@ -0,0 +1,464 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vstreamer + +import ( + "fmt" + "reflect" + "testing" + + "vitess.io/vitess/go/json2" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" +) + +var testKSChema *vindexes.KeyspaceSchema + +func init() { + input := `{ + "sharded": true, + "vindexes": { + "hash": { + "type": "hash" + }, + "lookup": { + "type": "lookup" + } + }, + "tables": { + "t1": { + "column_vindexes": [ + { + "column": "id", + "name": "hash" + } + ] + } + } +}` + var kspb vschemapb.Keyspace + if err := json2.Unmarshal([]byte(input), &kspb); err != nil { + panic(fmt.Errorf("Unmarshal failed: %v", err)) + } + kschema, err := vindexes.BuildKeyspaceSchema(&kspb, "ks") + if err != nil { + panic(err) + } + testKSChema = kschema +} + +func TestMustSendDDL(t *testing.T) { + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/t1.*/", + }, { + Match: "t2", + }}, + } + testcases := []struct { + sql string + db string + output bool + }{{ + sql: "create database db", + output: false, + }, { + sql: "create table foo(id int)", + output: false, + }, { + sql: "create table db.foo(id int)", + output: false, + }, { + sql: "create table mydb.foo(id int)", + output: false, + }, { + sql: "create table t1a(id int)", + output: true, + }, { + sql: "create table db.t1a(id int)", + output: false, + }, { + sql: "create table mydb.t1a(id int)", + output: true, + }, { + sql: "rename table t1a to foo, foo to bar", + output: true, + }, { + sql: "rename table foo to t1a, foo to bar", + output: true, + }, { + sql: "rename table foo to bar, t1a to bar", + output: true, + }, { + sql: "rename table foo to bar, bar to foo", + output: false, + }, { + sql: "drop table t1a, foo", + output: true, + }, { + sql: "drop table foo, t1a", + output: true, + }, { + sql: "drop table foo, bar", + output: false, + }, { + sql: "bad query", + output: true, + }, { + sql: "select * from t", + output: true, + }, { + sql: "drop table t2", + output: true, + }, { + sql: "create table t1a(id int)", + db: "db", + output: false, + }, { + sql: "create table t1a(id int)", + db: "mydb", + output: true, + }} + for _, tcase := range testcases { + q := mysql.Query{SQL: tcase.sql, Database: tcase.db} + got := mustSendDDL(q, "mydb", filter) + if got != tcase.output { + t.Errorf("%v: %v, want %v", q, got, tcase.output) + } + } +} + +func TestPlanbuilder(t *testing.T) { + t1 := &Table{ + TableMap: &mysql.TableMap{ + Name: "t1", + }, + Columns: []schema.TableColumn{{ + Name: sqlparser.NewColIdent("id"), + Type: sqltypes.Int64, + }, { + Name: sqlparser.NewColIdent("val"), + Type: sqltypes.VarBinary, + }}, + } + // t1alt has no id column + t1alt := &Table{ + TableMap: &mysql.TableMap{ + Name: "t1", + }, + Columns: []schema.TableColumn{{ + Name: sqlparser.NewColIdent("val"), + Type: sqltypes.VarBinary, + }}, + } + t2 := &Table{ + TableMap: &mysql.TableMap{ + Name: "t2", + }, + Columns: []schema.TableColumn{{ + Name: sqlparser.NewColIdent("id"), + Type: sqltypes.Int64, + }, { + Name: sqlparser.NewColIdent("val"), + Type: sqltypes.VarBinary, + }}, + } + + testcases := []struct { + inTable *Table + inRule *binlogdatapb.Rule + outPlan *Plan + outErr string + }{{ + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "/.*/"}, + outPlan: &Plan{ + ColExprs: []ColExpr{{ + ColNum: 0, + Alias: sqlparser.NewColIdent("id"), + Type: sqltypes.Int64, + }, { + ColNum: 1, + Alias: sqlparser.NewColIdent("val"), + Type: sqltypes.VarBinary, + }}, + }, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "/.*/", Filter: "-80"}, + outPlan: &Plan{ + ColExprs: []ColExpr{{ + ColNum: 0, + Alias: sqlparser.NewColIdent("id"), + Type: sqltypes.Int64, + }, { + ColNum: 1, + Alias: sqlparser.NewColIdent("val"), + Type: sqltypes.VarBinary, + }}, + VindexColumn: 0, + }, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select * from t1"}, + outPlan: &Plan{ + ColExprs: []ColExpr{{ + ColNum: 0, + Alias: sqlparser.NewColIdent("id"), + Type: sqltypes.Int64, + }, { + ColNum: 1, + Alias: sqlparser.NewColIdent("val"), + Type: sqltypes.VarBinary, + }}, + }, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1"}, + outPlan: &Plan{ + ColExprs: []ColExpr{{ + ColNum: 0, + Alias: sqlparser.NewColIdent("id"), + Type: sqltypes.Int64, + }, { + ColNum: 1, + Alias: sqlparser.NewColIdent("val"), + Type: sqltypes.VarBinary, + }}, + }, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select val, id from t1"}, + outPlan: &Plan{ + ColExprs: []ColExpr{{ + ColNum: 1, + Alias: sqlparser.NewColIdent("val"), + Type: sqltypes.VarBinary, + }, { + ColNum: 0, + Alias: sqlparser.NewColIdent("id"), + Type: sqltypes.Int64, + }}, + }, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select val, id from t1 where in_keyrange(id, 'hash', '-80')"}, + outPlan: &Plan{ + ColExprs: []ColExpr{{ + ColNum: 1, + Alias: sqlparser.NewColIdent("val"), + Type: sqltypes.VarBinary, + }, { + ColNum: 0, + Alias: sqlparser.NewColIdent("id"), + Type: sqltypes.Int64, + }}, + VindexColumn: 1, + }, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val, month(val) m, day(id), hour(val) from t1 where in_keyrange(m, 'hash', '-80')"}, + outPlan: &Plan{ + ColExprs: []ColExpr{{ + ColNum: 0, + Alias: sqlparser.NewColIdent("id"), + Type: sqltypes.Int64, + }, { + ColNum: 1, + Alias: sqlparser.NewColIdent("val"), + Type: sqltypes.VarBinary, + }, { + ColNum: 1, + Alias: sqlparser.NewColIdent("m"), + Type: sqltypes.VarBinary, + Operation: OpMonth, + }, { + ColNum: 0, + Alias: sqlparser.NewColIdent("day(id)"), + Type: sqltypes.VarBinary, + Operation: OpDay, + }, { + ColNum: 1, + Alias: sqlparser.NewColIdent("hour(val)"), + Type: sqltypes.VarBinary, + Operation: OpHour, + }}, + VindexColumn: 2, + }, + }, { + inTable: t2, + inRule: &binlogdatapb.Rule{Match: "/t1/"}, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "/*/"}, + outErr: "error parsing regexp: missing argument to repetition operator: `*`", + }, { + inTable: t2, + inRule: &binlogdatapb.Rule{Match: "/.*/", Filter: "-80"}, + outErr: `no vschema definition for table t2`, + }, { + inTable: t1alt, + inRule: &binlogdatapb.Rule{Match: "/.*/", Filter: "-80"}, + outErr: `column id not found in table t1`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "/.*/", Filter: "80"}, + outErr: `malformed spec: doesn't define a range: "80"`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "/.*/", Filter: "-80-"}, + outErr: `error parsing keyrange: -80-`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "bad query"}, + outErr: `syntax error at position 4 near 'bad'`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "delete from t1"}, + outErr: `unexpected: delete from t1`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select * from t1, t2"}, + outErr: `unexpected: select * from t1, t2`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select * from t1 join t2"}, + outErr: `unexpected: select * from t1 join t2`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select * from a.t1"}, + outErr: `unexpected: select * from a.t1`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select * from t2"}, + outErr: `unexpected: select expression table t2 does not match the table entry name t1`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select *, id from t1"}, + outErr: `unexpected: select *, id from t1`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where id=1"}, + outErr: `unexpected where clause: where id = 1`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where max(id)"}, + outErr: `unexpected where clause: where max(id)`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(id)"}, + outErr: `unexpected where clause: where in_keyrange(id)`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(*, 'hash', '-80')"}, + outErr: `unexpected: in_keyrange(*, 'hash', '-80')`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(1, 'hash', '-80')"}, + outErr: `unsupported: in_keyrange(1, 'hash', '-80')`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(none, 'hash', '-80')"}, + outErr: `keyrange expression does not reference a column in the select list: none`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(id, 'lookup', '-80')"}, + outErr: `vindex must be Unique and Functional to be used for VReplication: lookup`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(id, 'hash', '80')"}, + outErr: `malformed spec: doesn't define a range: "80"`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(id, 'hash', '-80-')"}, + outErr: `unexpected where clause: where in_keyrange(id, 'hash', '-80-')`, + }, { + // analyzeExpr tests. + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, * from t1"}, + outErr: `unexpected: *`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select none from t1"}, + outErr: `column none not found in table t1`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val, hour(distinct a) from t1"}, + outErr: `unsupported: hour(distinct a)`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val, hour(a, b) from t1"}, + outErr: `unsupported: hour(a, b)`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val, hour(*) from t1"}, + outErr: `unsupported: hour(*)`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val, hour(val+1) from t1"}, + outErr: `unsupported: hour(val + 1)`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val, hour(none) from t1"}, + outErr: `column none not found in table t1`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val, max(val) from t1"}, + outErr: `unsupported: max(val)`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id+1, val from t1"}, + outErr: `unexpected: id + 1`, + }, { + // selString + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(id, *, '-80')"}, + outErr: `unexpected: *`, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(id, 1+1, '-80')"}, + outErr: `unexpected: 1 + 1`, + }} + + for _, tcase := range testcases { + plan, err := buildPlan(tcase.inTable, testKSChema, &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{tcase.inRule}, + }) + if plan != nil { + plan.Table = nil + plan.Vindex = nil + plan.KeyRange = nil + if !reflect.DeepEqual(tcase.outPlan, plan) { + t.Errorf("Plan(%v, %v):\n%v, want\n%v", tcase.inTable, tcase.inRule, plan, tcase.outPlan) + } + } + gotErr := "" + if err != nil { + gotErr = err.Error() + } + if gotErr != tcase.outErr { + t.Errorf("Plan(%v, %v) err: %v, want %v", tcase.inTable, tcase.inRule, err, tcase.outErr) + } + + } +} diff --git a/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go b/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go new file mode 100644 index 0000000000..557fa40a60 --- /dev/null +++ b/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go @@ -0,0 +1,143 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package testenv supplies test functions for testing vstreamer. +package testenv + +import ( + "context" + "fmt" + "os" + "path" + + "vitess.io/vitess/go/json2" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" + "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttest" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vttestpb "vitess.io/vitess/go/vt/proto/vttest" +) + +// Env contains all the env vars for a test against a mysql instance. +type Env struct { + cluster *vttest.LocalCluster + + KeyspaceName string + ShardName string + Cells []string + + TopoServ *topo.Server + SrvTopo srvtopo.Server + Dbcfgs *dbconfigs.DBConfigs + Mysqld *mysqlctl.Mysqld + SchemaEngine *schema.Engine +} + +type checker struct{} + +var _ = connpool.MySQLChecker(checker{}) + +func (checker) CheckMySQL() {} + +// Init initializes an Env. +func Init() (*Env, error) { + te := &Env{ + KeyspaceName: "vttest", + ShardName: "0", + Cells: []string{"cell1"}, + } + + ctx := context.Background() + te.TopoServ = memorytopo.NewServer(te.Cells...) + if err := te.TopoServ.CreateKeyspace(ctx, te.KeyspaceName, &topodatapb.Keyspace{}); err != nil { + return nil, err + } + if err := te.TopoServ.CreateShard(ctx, te.KeyspaceName, te.ShardName); err != nil { + panic(err) + } + te.SrvTopo = srvtopo.NewResilientServer(te.TopoServ, "TestTopo") + + cfg := vttest.Config{ + Topology: &vttestpb.VTTestTopology{ + Keyspaces: []*vttestpb.Keyspace{ + { + Name: te.KeyspaceName, + Shards: []*vttestpb.Shard{ + { + Name: "0", + DbNameOverride: "vttest", + }, + }, + }, + }, + }, + ExtraMyCnf: []string{path.Join(os.Getenv("VTTOP"), "config/mycnf/rbr.cnf")}, + OnlyMySQL: true, + } + te.cluster = &vttest.LocalCluster{ + Config: cfg, + } + if err := te.cluster.Setup(); err != nil { + os.RemoveAll(te.cluster.Config.SchemaDir) + return nil, fmt.Errorf("could not launch mysql: %v", err) + } + + te.Dbcfgs = dbconfigs.NewTestDBConfigs(te.cluster.MySQLConnParams(), te.cluster.MySQLAppDebugConnParams(), te.cluster.DbName()) + te.Mysqld = mysqlctl.NewMysqld(te.Dbcfgs) + te.SchemaEngine = schema.NewEngine(checker{}, tabletenv.DefaultQsConfig) + te.SchemaEngine.InitDBConfig(te.Dbcfgs) + + // The first vschema should not be empty. Leads to Node not found error. + // TODO(sougou): need to fix the bug. + if err := te.SetVSchema(`{"sharded": true}`); err != nil { + te.Close() + return nil, err + } + + return te, nil +} + +// Close tears down TestEnv. +func (te *Env) Close() { + te.SchemaEngine.Close() + te.Mysqld.Close() + te.cluster.TearDown() + os.RemoveAll(te.cluster.Config.SchemaDir) +} + +// SetVSchema sets the vschema for the test keyspace. +func (te *Env) SetVSchema(vs string) error { + ctx := context.Background() + logger := logutil.NewConsoleLogger() + var kspb vschemapb.Keyspace + if err := json2.Unmarshal([]byte(vs), &kspb); err != nil { + return err + } + if err := te.TopoServ.SaveVSchema(ctx, te.KeyspaceName, &kspb); err != nil { + return err + } + return topotools.RebuildVSchema(ctx, logger, te.TopoServ, te.Cells) +} diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go new file mode 100644 index 0000000000..4bf378d9f8 --- /dev/null +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go @@ -0,0 +1,449 @@ +/* +Copyright 2018 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vstreamer + +import ( + "context" + "flag" + "fmt" + "io" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" +) + +var packetSize = flag.Int("vstream_packet_size", 10000, "Suggested packet size for VReplication streamer. This is used only as a recommendation. The actual packet size may be more or less than this amount.") + +type vstreamer struct { + ctx context.Context + cancel func() + + cp *mysql.ConnParams + se *schema.Engine + startPos string + filter *binlogdatapb.Filter + send func([]*binlogdatapb.VEvent) error + + // A kschema is a VSchema for just one keyspace. + kevents chan *vindexes.KeyspaceSchema + kschema *vindexes.KeyspaceSchema + plans map[uint64]*Plan + + // format and pos are updated by parseEvent. + format mysql.BinlogFormat + pos mysql.Position +} + +func newVStreamer(ctx context.Context, cp *mysql.ConnParams, se *schema.Engine, startPos string, filter *binlogdatapb.Filter, kschema *vindexes.KeyspaceSchema, send func([]*binlogdatapb.VEvent) error) *vstreamer { + ctx, cancel := context.WithCancel(ctx) + return &vstreamer{ + ctx: ctx, + cancel: cancel, + cp: cp, + se: se, + startPos: startPos, + filter: filter, + send: send, + kevents: make(chan *vindexes.KeyspaceSchema, 1), + kschema: kschema, + plans: make(map[uint64]*Plan), + } +} + +// SetKSchema updates all existing against the new kschema. +func (vs *vstreamer) SetKSchema(kschema *vindexes.KeyspaceSchema) { + // Since vs.Stream is a single-threaded loop. We just send an event to + // that thread, which helps us avoid mutexes to update the plans. + select { + case vs.kevents <- kschema: + case <-vs.ctx.Done(): + } +} + +func (vs *vstreamer) Cancel() { + vs.cancel() +} + +// Stream runs a single-threaded loop. +func (vs *vstreamer) Stream() error { + defer vs.cancel() + + pos, err := mysql.DecodePosition(vs.startPos) + if err != nil { + return err + } + vs.pos = pos + + // Ensure se is Open. If vttablet came up in a non_serving role, + // the schema engine may not have been initialized. + if err := vs.se.Open(); err != nil { + return wrapError(err, vs.pos) + } + + conn, err := binlog.NewSlaveConnection(vs.cp) + if err != nil { + return wrapError(err, vs.pos) + } + defer conn.Close() + + events, err := conn.StartBinlogDumpFromPosition(vs.ctx, vs.pos) + if err != nil { + return wrapError(err, vs.pos) + } + err = vs.parseEvents(vs.ctx, events) + return wrapError(err, vs.pos) +} + +func (vs *vstreamer) parseEvents(ctx context.Context, events <-chan mysql.BinlogEvent) error { + // bufferAndTransmit uses bufferedEvents and curSize to buffer events. + var ( + bufferedEvents []*binlogdatapb.VEvent + curSize int + ) + // Buffering only takes row lenghts into consideration. + // Length of other events is considered negligible. + // If a new row event causes the packet size to be exceeded, + // all existing rows are sent without the new row. + // If a single row exceeds the packet size, it will be in its own packet. + bufferAndTransmit := func(vevent *binlogdatapb.VEvent) error { + switch vevent.Type { + case binlogdatapb.VEventType_GTID, binlogdatapb.VEventType_BEGIN, binlogdatapb.VEventType_FIELD: + // We never have to send GTID, BEGIN or FIELD events on their own. + bufferedEvents = append(bufferedEvents, vevent) + case binlogdatapb.VEventType_COMMIT, binlogdatapb.VEventType_DDL: + // COMMIT and DDL are terminal. There may be no more events after + // these for a long time. So, we have to send whatever we have. + bufferedEvents = append(bufferedEvents, vevent) + vevents := bufferedEvents + bufferedEvents = nil + curSize = 0 + return vs.send(vevents) + case binlogdatapb.VEventType_ROW: + // ROW events happen inside transactions. So, we can chunk them. + // Buffer everything until packet size is reached, and then send. + newSize := 0 + for _, rowChange := range vevent.RowEvent.RowChanges { + if rowChange.Before != nil { + newSize += len(rowChange.Before.Values) + } + if rowChange.After != nil { + newSize += len(rowChange.After.Values) + } + } + if curSize+newSize > *packetSize { + vevents := bufferedEvents + bufferedEvents = []*binlogdatapb.VEvent{vevent} + curSize = newSize + return vs.send(vevents) + } + curSize += newSize + bufferedEvents = append(bufferedEvents, vevent) + default: + return fmt.Errorf("unexpected event: %v", vevent) + } + return nil + } + + // Main loop: calls bufferAndTransmit as events arrive. + for { + select { + case ev, ok := <-events: + if !ok { + select { + case <-ctx.Done(): + return nil + default: + } + return fmt.Errorf("unexpected server EOF") + } + vevents, err := vs.parseEvent(ev) + if err != nil { + return err + } + for _, vevent := range vevents { + if err := bufferAndTransmit(vevent); err != nil { + if err == io.EOF { + return nil + } + return fmt.Errorf("error sending event: %v", err) + } + } + case vs.kschema = <-vs.kevents: + if err := vs.rebuildPlans(); err != nil { + return err + } + case <-ctx.Done(): + return nil + } + } +} + +func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, error) { + // Validate the buffer before reading fields from it. + if !ev.IsValid() { + return nil, fmt.Errorf("can't parse binlog event: invalid data: %#v", ev) + } + + // We need to keep checking for FORMAT_DESCRIPTION_EVENT even after we've + // seen one, because another one might come along (e.g. on log rotate due to + // binlog settings change) that changes the format. + if ev.IsFormatDescription() { + var err error + vs.format, err = ev.Format() + if err != nil { + return nil, fmt.Errorf("can't parse FORMAT_DESCRIPTION_EVENT: %v, event data: %#v", err, ev) + } + return nil, nil + } + + // We can't parse anything until we get a FORMAT_DESCRIPTION_EVENT that + // tells us the size of the event header. + if vs.format.IsZero() { + // The only thing that should come before the FORMAT_DESCRIPTION_EVENT + // is a fake ROTATE_EVENT, which the master sends to tell us the name + // of the current log file. + if ev.IsRotate() { + return nil, nil + } + return nil, fmt.Errorf("got a real event before FORMAT_DESCRIPTION_EVENT: %#v", ev) + } + + // Strip the checksum, if any. We don't actually verify the checksum, so discard it. + ev, _, err := ev.StripChecksum(vs.format) + if err != nil { + return nil, fmt.Errorf("can't strip checksum from binlog event: %v, event data: %#v", err, ev) + } + var vevents []*binlogdatapb.VEvent + switch { + case ev.IsGTID(): + gtid, hasBegin, err := ev.GTID(vs.format) + if err != nil { + return nil, fmt.Errorf("can't get GTID from binlog event: %v, event data: %#v", err, ev) + } + if hasBegin { + vevents = append(vevents, &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_BEGIN, + }) + } + vs.pos = mysql.AppendGTID(vs.pos, gtid) + vevents = append(vevents, &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_GTID, + Gtid: mysql.EncodePosition(vs.pos), + }) + case ev.IsXID(): + vevents = append(vevents, &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_COMMIT, + }) + case ev.IsQuery(): + q, err := ev.Query(vs.format) + if err != nil { + return nil, fmt.Errorf("can't get query from binlog event: %v, event data: %#v", err, ev) + } + switch cat := sqlparser.Preview(q.SQL); cat { + case sqlparser.StmtBegin: + vevents = append(vevents, &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_BEGIN, + }) + case sqlparser.StmtCommit: + vevents = append(vevents, &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_COMMIT, + }) + case sqlparser.StmtDDL: + if mustSendDDL(q, vs.cp.DbName, vs.filter) { + vevents = append(vevents, &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_DDL, + Ddl: q.SQL, + }) + } else { + vevents = append(vevents, + &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_BEGIN, + }, + &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_COMMIT, + }) + } + // Proactively reload schema. + // If the DDL adds a column, comparing with an older snapshot of the + // schema will make us think that a column was dropped and error out. + vs.se.Reload(vs.ctx) + case sqlparser.StmtOther: + // These are DBA statements like REPAIR that can be ignored. + default: + return nil, fmt.Errorf("unexpected statement type %s in row-based replication: %q", sqlparser.StmtType(cat), q.SQL) + } + case ev.IsTableMap(): + // This is very frequent. It precedes every row event. + id := ev.TableID(vs.format) + tm, err := ev.TableMap(vs.format) + if err != nil { + return nil, err + } + // We have to build a plan only for new ids. + if _, ok := vs.plans[id]; ok { + return nil, nil + } + if tm.Database != "" && tm.Database != vs.cp.DbName { + vs.plans[id] = nil + return nil, nil + } + ti := vs.se.GetTable(sqlparser.NewTableIdent(tm.Name)) + if ti == nil { + return nil, fmt.Errorf("unknown table %v in schema", tm.Name) + } + if len(ti.Columns) < len(tm.Types) { + return nil, fmt.Errorf("cannot determine table columns for %s: event has %d columns, current schema has %d: %#v", tm.Name, len(tm.Types), len(ti.Columns), ev) + } + table := &Table{ + TableMap: tm, + // Columns should be truncated to match those in tm. + Columns: ti.Columns[:len(tm.Types)], + } + plan, err := buildPlan(table, vs.kschema, vs.filter) + if err != nil { + return nil, err + } + vs.plans[id] = plan + if plan == nil { + return nil, nil + } + fields := make([]*querypb.Field, len(plan.ColExprs)) + for i, ce := range plan.ColExprs { + fields[i] = &querypb.Field{ + Name: ce.Alias.String(), + Type: ce.Type, + } + } + vevents = append(vevents, &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_FIELD, + FieldEvent: &binlogdatapb.FieldEvent{ + TableName: plan.Table.Name, + Fields: fields, + }, + }) + case ev.IsWriteRows() || ev.IsDeleteRows() || ev.IsUpdateRows(): + // The existence of before and after images can be used to + // identify statememt types. It's also possible that the + // before and after images end up going to different shards. + // If so, an update will be treated as delete on one shard + // and insert on the other. + id := ev.TableID(vs.format) + plan := vs.plans[id] + if plan == nil { + return nil, nil + } + rows, err := ev.Rows(vs.format, plan.Table.TableMap) + if err != nil { + return nil, err + } + rowChanges := make([]*binlogdatapb.RowChange, 0, len(rows.Rows)) + for _, row := range rows.Rows { + beforeOK, beforeValues, err := vs.extractRowAndFilter(plan, row.Identify, rows.IdentifyColumns, row.NullIdentifyColumns) + if err != nil { + return nil, err + } + afterOK, afterValues, err := vs.extractRowAndFilter(plan, row.Data, rows.DataColumns, row.NullColumns) + if err != nil { + return nil, err + } + if !beforeOK && !afterOK { + continue + } + rowChange := &binlogdatapb.RowChange{} + if beforeOK { + rowChange.Before = sqltypes.RowToProto3(beforeValues) + } + if afterOK { + rowChange.After = sqltypes.RowToProto3(afterValues) + } + rowChanges = append(rowChanges, rowChange) + } + if len(rowChanges) != 0 { + vevents = append(vevents, &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_ROW, + RowEvent: &binlogdatapb.RowEvent{ + TableName: plan.Table.Name, + RowChanges: rowChanges, + }, + }) + } + } + for _, vevent := range vevents { + vevent.Timestamp = int64(ev.Timestamp()) + } + return vevents, nil +} + +func (vs *vstreamer) rebuildPlans() error { + for id, plan := range vs.plans { + if plan == nil { + // If a table has no plan, a kschema change will not + // cause that to change. + continue + } + newPlan, err := buildPlan(plan.Table, vs.kschema, vs.filter) + if err != nil { + return err + } + vs.plans[id] = newPlan + } + return nil +} + +func (vs *vstreamer) extractRowAndFilter(plan *Plan, data []byte, dataColumns, nullColumns mysql.Bitmap) (bool, []sqltypes.Value, error) { + if len(data) == 0 { + return false, nil, nil + } + values := make([]sqltypes.Value, dataColumns.Count()) + valueIndex := 0 + pos := 0 + for colNum := 0; colNum < dataColumns.Count(); colNum++ { + if !dataColumns.Bit(colNum) { + return false, nil, fmt.Errorf("partial row image encountered: ensure binlog_row_image is set to 'full'") + } + if nullColumns.Bit(valueIndex) { + valueIndex++ + continue + } + value, l, err := mysql.CellValue(data, pos, plan.Table.Types[colNum], plan.Table.Metadata[colNum], plan.Table.Columns[colNum].Type) + if err != nil { + return false, nil, err + } + pos += l + values[colNum] = value + valueIndex++ + } + return plan.filter(values) +} + +func wrapError(err error, stopPos mysql.Position) error { + if err != nil { + err = fmt.Errorf("stream error @ %v: %v", stopPos, err) + log.Error(err) + return err + } + log.Infof("stream ended @ %v", stopPos) + return nil +} diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go new file mode 100644 index 0000000000..517e19534a --- /dev/null +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go @@ -0,0 +1,1001 @@ +/* +Copyright 2018 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vstreamer + +import ( + "fmt" + "strings" + "testing" + + "golang.org/x/net/context" + + "vitess.io/vitess/go/mysql" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +type testcase struct { + input interface{} + output [][]string +} + +func TestStatements(t *testing.T) { + if testing.Short() { + t.Skip() + } + + execStatements(t, []string{ + "create table stream1(id int, val varbinary(128), primary key(id))", + "create table stream2(id int, val varbinary(128), primary key(id))", + }) + defer execStatements(t, []string{ + "drop table stream1", + "drop table stream2", + }) + engine.se.Reload(context.Background()) + + testcases := []testcase{{ + input: []string{ + "begin", + "insert into stream1 values (1, 'aaa')", + "update stream1 set val='bbb' where id = 1", + "commit", + }, + // MySQL issues GTID->BEGIN. + // MariaDB issues BEGIN->GTID. + output: [][]string{{ + `gtid|begin`, + `gtid|begin`, + `type:FIELD field_event: fields: > `, + `type:ROW row_event: > > `, + `type:ROW row_event: after: > > `, + `commit`, + }}, + }, { + // Normal DDL. + input: "alter table stream1 change column val val varbinary(128)", + output: [][]string{{ + `gtid`, + `type:DDL ddl:"alter table stream1 change column val val varbinary(128)" `, + }}, + }, { + // DDL padded with comments. + input: " /* prefix */ alter table stream1 change column val val varbinary(256) /* suffix */ ", + output: [][]string{{ + `gtid`, + `type:DDL ddl:"/* prefix */ alter table stream1 change column val val varbinary(256) /* suffix */" `, + }}, + }, { + // Multiple tables, and multiple rows changed per statement. + input: []string{ + "begin", + "insert into stream1 values (2, 'bbb')", + "insert into stream2 values (1, 'aaa')", + "update stream1 set val='ccc'", + "delete from stream1", + "commit", + }, + output: [][]string{{ + `gtid|begin`, + `gtid|begin`, + `type:FIELD field_event: fields: > `, + `type:ROW row_event: > > `, + `type:FIELD field_event: fields: > `, + `type:ROW row_event: > > `, + `type:ROW row_event: after: > ` + + `row_changes: after: > > `, + `type:ROW row_event: > ` + + `row_changes: > > `, + `commit`, + }}, + }, { + // truncate is a DDL + input: "truncate table stream2", + output: [][]string{{ + `gtid`, + `type:DDL ddl:"truncate table stream2" `, + }}, + }, { + // repair, optimize and analyze show up in binlog stream, but ignored by vitess. + input: "repair table stream2", + }, { + input: "optimize table stream2", + }, { + input: "analyze table stream2", + }, { + // select, set, show, analyze and describe don't get logged. + input: "select * from stream1", + }, { + input: "set @val=1", + }, { + input: "show tables", + }, { + input: "analyze table stream1", + }, { + input: "describe stream1", + }} + runCases(t, nil, testcases) +} + +func TestRegexp(t *testing.T) { + if testing.Short() { + t.Skip() + } + + execStatements(t, []string{ + "create table yes_stream(id int, val varbinary(128), primary key(id))", + "create table no_stream(id int, val varbinary(128), primary key(id))", + }) + defer execStatements(t, []string{ + "drop table yes_stream", + "drop table no_stream", + }) + engine.se.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/yes.*/", + }}, + } + + testcases := []testcase{{ + input: []string{ + "begin", + "insert into yes_stream values (1, 'aaa')", + "insert into no_stream values (2, 'bbb')", + "update yes_stream set val='bbb' where id = 1", + "update no_stream set val='bbb' where id = 2", + "commit", + }, + output: [][]string{{ + `gtid|begin`, + `gtid|begin`, + `type:FIELD field_event: fields: > `, + `type:ROW row_event: > > `, + `type:ROW row_event: after: > > `, + `commit`, + }}, + }} + runCases(t, filter, testcases) +} + +func TestREKeyrange(t *testing.T) { + if testing.Short() { + t.Skip() + } + + execStatements(t, []string{ + "create table t1(id1 int, id2 int, val varbinary(128), primary key(id1))", + }) + defer execStatements(t, []string{ + "drop table t1", + }) + engine.se.Reload(context.Background()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*/", + Filter: "-80", + }}, + } + ch := startStream(ctx, t, filter) + + if err := env.SetVSchema(shardedVSchema); err != nil { + t.Fatal(err) + } + defer env.SetVSchema("{}") + + // 1, 2, 3 and 5 are in shard -80. + // 4 and 6 are in shard 80-. + input := []string{ + "begin", + "insert into t1 values (1, 4, 'aaa')", + "insert into t1 values (4, 1, 'bbb')", + // Stay in shard. + "update t1 set id1 = 2 where id1 = 1", + // Move from -80 to 80-. + "update t1 set id1 = 6 where id1 = 2", + // Move from 80- to -80. + "update t1 set id1 = 3 where id1 = 4", + "commit", + } + execStatements(t, input) + expectLog(ctx, t, input, ch, [][]string{{ + `gtid|begin`, + `gtid|begin`, + `type:FIELD field_event: fields: fields: > `, + `type:ROW row_event: > > `, + `type:ROW row_event: after: > > `, + `type:ROW row_event: > > `, + `type:ROW row_event: > > `, + `commit`, + }}) + + // Switch the vschema to make id2 the primary vindex. + altVSchema := `{ + "sharded": true, + "vindexes": { + "hash": { + "type": "hash" + } + }, + "tables": { + "t1": { + "column_vindexes": [ + { + "column": "id2", + "name": "hash" + } + ] + } + } +}` + if err := env.SetVSchema(altVSchema); err != nil { + t.Fatal(err) + } + + // Only the first insert should be sent. + input = []string{ + "begin", + "insert into t1 values (4, 1, 'aaa')", + "insert into t1 values (1, 4, 'aaa')", + "commit", + } + execStatements(t, input) + expectLog(ctx, t, input, ch, [][]string{{ + `gtid|begin`, + `gtid|begin`, + `type:ROW row_event: > > `, + `commit`, + }}) +} + +func TestSelectFilter(t *testing.T) { + if testing.Short() { + t.Skip() + } + + execStatements(t, []string{ + "create table t1(id1 int, id2 int, val varbinary(128), primary key(id1))", + }) + defer execStatements(t, []string{ + "drop table t1", + }) + engine.se.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select id2, val from t1 where in_keyrange(id2, 'hash', '-80')", + }}, + } + + testcases := []testcase{{ + input: []string{ + "begin", + "insert into t1 values (4, 1, 'aaa')", + "insert into t1 values (2, 4, 'aaa')", + "commit", + }, + // MySQL issues GTID->BEGIN. + // MariaDB issues BEGIN->GTID. + output: [][]string{{ + `gtid|begin`, + `gtid|begin`, + `type:FIELD field_event: fields: > `, + `type:ROW row_event: > > `, + `commit`, + }}, + }} + runCases(t, filter, testcases) +} + +func TestSelectExpressions(t *testing.T) { + if testing.Short() { + t.Skip() + } + + execStatements(t, []string{ + "create table expr_test(id int, val bigint, primary key(id))", + }) + defer execStatements(t, []string{ + "drop table expr_test", + }) + engine.se.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "expr_test", + Filter: "select id, val, month(val), day(val), hour(val) from expr_test", + }}, + } + + testcases := []testcase{{ + input: []string{ + "begin", + "insert into expr_test values (1, 1546392881)", + "commit", + }, + // MySQL issues GTID->BEGIN. + // MariaDB issues BEGIN->GTID. + output: [][]string{{ + `gtid|begin`, + `gtid|begin`, + `type:FIELD field_event: ` + + `fields: ` + + `fields: ` + + `fields: ` + + `fields: > `, + `type:ROW row_event: > > `, + `commit`, + }}, + }} + runCases(t, filter, testcases) +} + +func TestDDLAddColumn(t *testing.T) { + if testing.Short() { + t.Skip() + } + + execStatements(t, []string{ + "create table ddl_test1(id int, val1 varbinary(128), primary key(id))", + "create table ddl_test2(id int, val1 varbinary(128), primary key(id))", + }) + defer execStatements(t, []string{ + "drop table ddl_test1", + "drop table ddl_test2", + }) + + // Record position before the next few statements. + pos := masterPosition(t) + execStatements(t, []string{ + "begin", + "insert into ddl_test1 values(1, 'aaa')", + "insert into ddl_test2 values(1, 'aaa')", + "commit", + // Adding columns is allowed. + "alter table ddl_test1 add column val2 varbinary(128)", + "alter table ddl_test2 add column val2 varbinary(128)", + "begin", + "insert into ddl_test1 values(2, 'bbb', 'ccc')", + "insert into ddl_test2 values(2, 'bbb', 'ccc')", + "commit", + }) + engine.se.Reload(context.Background()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Test RE as well as select-based filters. + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "ddl_test2", + Filter: "select * from ddl_test2", + }, { + Match: "/.*/", + }}, + } + + ch := make(chan []*binlogdatapb.VEvent) + go func() { + defer close(ch) + if err := vstream(ctx, t, pos, filter, ch); err != nil { + t.Fatal(err) + } + }() + expectLog(ctx, t, "ddls", ch, [][]string{{ + // Current schema has 3 columns, but they'll be truncated to match the two columns in the event. + `gtid|begin`, + `gtid|begin`, + `type:FIELD field_event: fields: > `, + `type:ROW row_event: > > `, + `type:FIELD field_event: fields: > `, + `type:ROW row_event: > > `, + `commit`, + }, { + `gtid`, + `type:DDL ddl:"alter table ddl_test1 add column val2 varbinary(128)" `, + }, { + `gtid`, + `type:DDL ddl:"alter table ddl_test2 add column val2 varbinary(128)" `, + }, { + // The plan will be updated to now include the third column + // because the new table map will have three columns. + `gtid|begin`, + `gtid|begin`, + `type:FIELD field_event: fields: fields: > `, + `type:ROW row_event: > > `, + `type:FIELD field_event: fields: fields: > `, + `type:ROW row_event: > > `, + `commit`, + }}) +} + +func TestDDLDropColumn(t *testing.T) { + if testing.Short() { + t.Skip() + } + + execStatement(t, "create table ddl_test2(id int, val1 varbinary(128), val2 varbinary(128), primary key(id))") + defer execStatement(t, "drop table ddl_test2") + + // Record position before the next few statements. + pos := masterPosition(t) + execStatements(t, []string{ + "insert into ddl_test2 values(1, 'aaa', 'ccc')", + // Adding columns is allowed. + "alter table ddl_test2 drop column val2", + "insert into ddl_test2 values(2, 'bbb')", + }) + engine.se.Reload(context.Background()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := make(chan []*binlogdatapb.VEvent) + go func() { + for range ch { + } + }() + defer close(ch) + err := vstream(ctx, t, pos, nil, ch) + want := "cannot determine table columns" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("err: %v, must contain %s", err, want) + } +} + +func TestUnsentDDL(t *testing.T) { + if testing.Short() { + t.Skip() + } + + execStatement(t, "create table unsent(id int, val varbinary(128), primary key(id))") + + testcases := []testcase{{ + input: []string{ + "drop table unsent", + }, + // An unsent DDL is sent as an empty transaction. + output: [][]string{{ + `gtid|begin`, + `gtid|begin`, + `commit`, + }}, + }} + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/none/", + }}, + } + runCases(t, filter, testcases) +} + +func TestBuffering(t *testing.T) { + if testing.Short() { + t.Skip() + } + + savedSize := *packetSize + *packetSize = 10 + defer func() { *packetSize = savedSize }() + + execStatement(t, "create table packet_test(id int, val varbinary(128), primary key(id))") + defer execStatement(t, "drop table packet_test") + engine.se.Reload(context.Background()) + + testcases := []testcase{{ + // All rows in one packet. + input: []string{ + "begin", + "insert into packet_test values (1, '123')", + "insert into packet_test values (2, '456')", + "commit", + }, + output: [][]string{{ + `gtid|begin`, + `gtid|begin`, + `type:FIELD field_event: fields: > `, + `type:ROW row_event: > > `, + `type:ROW row_event: > > `, + `commit`, + }}, + }, { + // A new row causes packet size to be exceeded. + // Also test deletes + input: []string{ + "begin", + "insert into packet_test values (3, '123456')", + "insert into packet_test values (4, '789012')", + "delete from packet_test where id=3", + "delete from packet_test where id=4", + "commit", + }, + output: [][]string{{ + `gtid|begin`, + `gtid|begin`, + `type:ROW row_event: > > `, + }, { + `type:ROW row_event: > > `, + }, { + `type:ROW row_event: > > `, + }, { + `type:ROW row_event: > > `, + `commit`, + }}, + }, { + // A single row is itself bigger than the packet size. + input: []string{ + "begin", + "insert into packet_test values (5, '123456')", + "insert into packet_test values (6, '12345678901')", + "insert into packet_test values (7, '23456')", + "commit", + }, + output: [][]string{{ + `gtid|begin`, + `gtid|begin`, + `type:ROW row_event: > > `, + }, { + `type:ROW row_event: > > `, + }, { + `type:ROW row_event: > > `, + `commit`, + }}, + }, { + // An update packet is bigger because it has a before and after image. + input: []string{ + "begin", + "insert into packet_test values (8, '123')", + "update packet_test set val='456' where id=8", + "commit", + }, + output: [][]string{{ + `gtid|begin`, + `gtid|begin`, + `type:ROW row_event: > > `, + }, { + `type:ROW row_event: after: > > `, + `commit`, + }}, + }, { + // DDL is in its own packet + input: []string{ + "alter table packet_test change val val varchar(128)", + }, + output: [][]string{{ + `gtid`, + `type:DDL ddl:"alter table packet_test change val val varchar(128)" `, + }}, + }} + runCases(t, nil, testcases) +} + +func TestTypes(t *testing.T) { + if testing.Short() { + t.Skip() + } + + // Modeled after vttablet endtoend compatibility tests. + execStatements(t, []string{ + "create table vitess_ints(tiny tinyint, tinyu tinyint unsigned, small smallint, smallu smallint unsigned, medium mediumint, mediumu mediumint unsigned, normal int, normalu int unsigned, big bigint, bigu bigint unsigned, y year, primary key(tiny))", + "create table vitess_fracts(id int, deci decimal(5,2), num numeric(5,2), f float, d double, primary key(id))", + "create table vitess_strings(vb varbinary(16), c char(16), vc varchar(16), b binary(4), tb tinyblob, bl blob, ttx tinytext, tx text, en enum('a','b'), s set('a','b'), primary key(vb))", + "create table vitess_misc(id int, b bit(8), d date, dt datetime, t time, g geometry, primary key(id))", + "create table vitess_null(id int, val varbinary(128), primary key(id))", + }) + defer execStatements(t, []string{ + "drop table vitess_ints", + "drop table vitess_fracts", + "drop table vitess_strings", + "drop table vitess_misc", + "drop table vitess_null", + }) + engine.se.Reload(context.Background()) + + testcases := []testcase{{ + input: []string{ + "insert into vitess_ints values(-128, 255, -32768, 65535, -8388608, 16777215, -2147483648, 4294967295, -9223372036854775808, 18446744073709551615, 2012)", + }, + output: [][]string{{ + `gtid|begin`, + `gtid|begin`, + `type:FIELD field_event: ` + + `fields: ` + + `fields: ` + + `fields: ` + + `fields: ` + + `fields: ` + + `fields: ` + + `fields: ` + + `fields: ` + + `fields: ` + + `fields: > `, + `type:ROW row_event: > > `, + `commit`, + }}, + }, { + input: []string{ + "insert into vitess_fracts values(1, 1.99, 2.99, 3.99, 4.99)", + }, + output: [][]string{{ + `gtid|begin`, + `gtid|begin`, + `type:FIELD field_event: ` + + `fields: ` + + `fields: ` + + `fields: ` + + `fields: > `, + `type:ROW row_event: > > `, + `commit`, + }}, + }, { + // TODO(sougou): validate that binary and char data generate correct DMLs on the other end. + input: []string{ + "insert into vitess_strings values('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'a', 'a,b')", + }, + output: [][]string{{ + `gtid|begin`, + `gtid|begin`, + `type:FIELD field_event: ` + + `fields: ` + + `fields: ` + + `fields: ` + + `fields: ` + + `fields: ` + + `fields: ` + + `fields: ` + + `fields: ` + + `fields: > `, + `type:ROW row_event: > > `, + `commit`, + }}, + }, { + // TODO(sougou): validate that the geometry value generates the correct DMLs on the other end. + input: []string{ + "insert into vitess_misc values(1, '\x01', '2012-01-01', '2012-01-01 15:45:45', '15:45:45', point(1, 2))", + }, + output: [][]string{{ + `gtid|begin`, + `gtid|begin`, + `type:FIELD field_event: ` + + `fields: ` + + `fields: ` + + `fields: ` + + `fields: ` + + `fields: > `, + `type:ROW row_event: > > `, + `commit`, + }}, + }, { + input: []string{ + "insert into vitess_null values(1, null)", + }, + output: [][]string{{ + `gtid|begin`, + `gtid|begin`, + `type:FIELD field_event: fields: > `, + `type:ROW row_event: > > `, + `commit`, + }}, + }} + runCases(t, nil, testcases) +} + +func TestJSON(t *testing.T) { + t.Skip("This test is disabled because every flavor of mysql has a different behavior.") + + // JSON is supported only after mysql57. + if err := env.Mysqld.ExecuteSuperQuery(context.Background(), "create table vitess_json(id int default 1, val json, primary key(id))"); err != nil { + // If it's a syntax error, MySQL is an older version. Skip this test. + if strings.Contains(err.Error(), "syntax") { + return + } + t.Fatal(err) + } + defer execStatement(t, "drop table vitess_json") + engine.se.Reload(context.Background()) + + testcases := []testcase{{ + input: []string{ + `insert into vitess_json values(1, '{"foo": "bar"}')`, + }, + output: [][]string{{ + `gtid|begin`, + `gtid|begin`, + `type:FIELD field_event: fields: > `, + `type:ROW row_event: > > `, + `commit`, + }}, + }} + runCases(t, nil, testcases) +} + +func TestExternalTable(t *testing.T) { + if testing.Short() { + t.Skip() + } + + execStatements(t, []string{ + "create database external", + "create table external.ext(id int, val varbinary(128), primary key(id))", + }) + defer execStatements(t, []string{ + "drop database external", + }) + engine.se.Reload(context.Background()) + + testcases := []testcase{{ + input: []string{ + "begin", + "insert into external.ext values (1, 'aaa')", + "commit", + }, + // External table events don't get sent. + output: [][]string{{ + `gtid|begin`, + `gtid|begin`, + `commit`, + }}, + }} + runCases(t, nil, testcases) +} + +func TestMinimalMode(t *testing.T) { + if testing.Short() { + t.Skip() + } + + execStatements(t, []string{ + "create table t1(id int, val1 varbinary(128), val2 varbinary(128), primary key(id))", + "insert into t1 values(1, 'aaa', 'bbb')", + }) + defer execStatements(t, []string{ + "drop table t1", + }) + engine.se.Reload(context.Background()) + + // Record position before the next few statements. + pos := masterPosition(t) + execStatements(t, []string{ + "set @@session.binlog_row_image='minimal'", + "update t1 set val1='bbb' where id=1", + "set @@session.binlog_row_image='full'", + }) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := make(chan []*binlogdatapb.VEvent) + go func() { + for evs := range ch { + t.Errorf("received: %v", evs) + } + }() + defer close(ch) + err := vstream(ctx, t, pos, nil, ch) + want := "partial row image encountered" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("err: %v, must contain '%s'", err, want) + } +} + +func TestStatementMode(t *testing.T) { + if testing.Short() { + t.Skip() + } + + execStatements(t, []string{ + "create table t1(id int, val1 varbinary(128), val2 varbinary(128), primary key(id))", + "insert into t1 values(1, 'aaa', 'bbb')", + }) + defer execStatements(t, []string{ + "drop table t1", + }) + engine.se.Reload(context.Background()) + + // Record position before the next few statements. + pos := masterPosition(t) + execStatements(t, []string{ + "set @@session.binlog_format='statement'", + "update t1 set val1='bbb' where id=1", + "set @@session.binlog_format='row'", + }) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := make(chan []*binlogdatapb.VEvent) + go func() { + for evs := range ch { + t.Errorf("received: %v", evs) + } + }() + defer close(ch) + err := vstream(ctx, t, pos, nil, ch) + want := "unexpected statement type" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("err: %v, must contain '%s'", err, want) + } +} + +func runCases(t *testing.T, filter *binlogdatapb.Filter, testcases []testcase) { + t.Helper() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := startStream(ctx, t, filter) + + for _, tcase := range testcases { + switch input := tcase.input.(type) { + case []string: + execStatements(t, input) + case string: + execStatement(t, input) + default: + t.Fatalf("unexpected input: %#v", input) + } + expectLog(ctx, t, tcase.input, ch, tcase.output) + } + cancel() + if evs, ok := <-ch; ok { + t.Fatalf("unexpected evs: %v", evs) + } +} + +func expectLog(ctx context.Context, t *testing.T, input interface{}, ch <-chan []*binlogdatapb.VEvent, output [][]string) { + t.Helper() + + for _, wantset := range output { + var evs []*binlogdatapb.VEvent + var ok bool + select { + case evs, ok = <-ch: + if !ok { + t.Fatal("stream ended early") + } + case <-ctx.Done(): + t.Fatal("stream ended early") + } + if len(wantset) != len(evs) { + t.Fatalf("%v: evs\n%v, want\n%v", input, evs, wantset) + } + for i, want := range wantset { + switch want { + case "gtid|begin": + if evs[i].Type != binlogdatapb.VEventType_GTID && evs[i].Type != binlogdatapb.VEventType_BEGIN { + t.Fatalf("%v (%d): event: %v, want gtid or begin", input, i, evs[i]) + } + case "gtid": + if evs[i].Type != binlogdatapb.VEventType_GTID { + t.Fatalf("%v (%d): event: %v, want gtid", input, i, evs[i]) + } + case "commit": + if evs[i].Type != binlogdatapb.VEventType_COMMIT { + t.Fatalf("%v (%d): event: %v, want commit", input, i, evs[i]) + } + default: + if evs[i].Timestamp == 0 { + t.Fatalf("evs[%d].Timestamp: 0, want non-zero", i) + } + evs[i].Timestamp = 0 + if got := fmt.Sprintf("%v", evs[i]); got != want { + t.Fatalf("%v (%d): event:\n%q, want\n%q", input, i, got, want) + } + } + } + } +} + +func startStream(ctx context.Context, t *testing.T, filter *binlogdatapb.Filter) <-chan []*binlogdatapb.VEvent { + pos := masterPosition(t) + + ch := make(chan []*binlogdatapb.VEvent) + go func() { + defer close(ch) + if err := vstream(ctx, t, pos, filter, ch); err != nil { + t.Error(err) + } + }() + return ch +} + +func vstream(ctx context.Context, t *testing.T, pos string, filter *binlogdatapb.Filter, ch chan []*binlogdatapb.VEvent) error { + if filter == nil { + filter = &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*/", + }}, + } + } + return engine.Stream(ctx, pos, filter, func(evs []*binlogdatapb.VEvent) error { + t.Logf("Received events: %v", evs) + select { + case ch <- evs: + case <-ctx.Done(): + return fmt.Errorf("stream ended early") + } + return nil + }) +} + +func execStatement(t *testing.T, query string) { + t.Helper() + if err := env.Mysqld.ExecuteSuperQuery(context.Background(), query); err != nil { + t.Fatal(err) + } +} + +func execStatements(t *testing.T, queries []string) { + t.Helper() + if err := env.Mysqld.ExecuteSuperQueryList(context.Background(), queries); err != nil { + t.Fatal(err) + } +} + +func masterPosition(t *testing.T) string { + t.Helper() + pos, err := env.Mysqld.MasterPosition() + if err != nil { + t.Fatal(err) + } + return mysql.EncodePosition(pos) +} diff --git a/go/vt/vttablet/tmclient/rpc_client_api.go b/go/vt/vttablet/tmclient/rpc_client_api.go index d02115afb4..77c344d35d 100644 --- a/go/vt/vttablet/tmclient/rpc_client_api.go +++ b/go/vt/vttablet/tmclient/rpc_client_api.go @@ -89,6 +89,10 @@ type TabletManagerClient interface { // ApplySchema will apply a schema change ApplySchema(ctx context.Context, tablet *topodatapb.Tablet, change *tmutils.SchemaChange) (*tabletmanagerdatapb.SchemaChangeResult, error) + LockTables(ctx context.Context, tablet *topodatapb.Tablet) error + + UnlockTables(ctx context.Context, tablet *topodatapb.Tablet) error + // ExecuteFetchAsDba executes a query remotely using the DBA pool. // If usePool is set, a connection pool may be used to make the // query faster. Close() should close the pool in that case. @@ -122,6 +126,9 @@ type TabletManagerClient interface { // StartSlave starts the mysql replication StartSlave(ctx context.Context, tablet *topodatapb.Tablet) error + // StartSlaveUntilAfter starts replication until after the position specified + StartSlaveUntilAfter(ctx context.Context, tablet *topodatapb.Tablet, position string, duration time.Duration) error + // TabletExternallyReparented tells a tablet it is now the master, after an // external tool has already promoted the underlying mysqld to master and // reparented the other mysqld servers to it. diff --git a/go/vt/worker/defaults.go b/go/vt/worker/defaults.go index 7bf60295f7..adf2ccf354 100644 --- a/go/vt/worker/defaults.go +++ b/go/vt/worker/defaults.go @@ -46,11 +46,12 @@ const ( // StreamExecute response. As of 06/2015, the default for it was 32 kB. // Note that higher values for this flag --destination_pack_count will // increase memory consumption in vtworker, vttablet and mysql. - defaultDestinationPackCount = 10 - defaultDestinationWriterCount = 20 - defaultMinHealthyRdonlyTablets = 2 - defaultDestTabletType = "RDONLY" - defaultParallelDiffsCount = 8 - defaultMaxTPS = throttler.MaxRateModuleDisabled - defaultMaxReplicationLag = throttler.ReplicationLagModuleDisabled + defaultDestinationPackCount = 10 + defaultDestinationWriterCount = 20 + defaultMinHealthyTablets = 2 + defaultDestTabletType = "RDONLY" + defaultParallelDiffsCount = 8 + defaultMaxTPS = throttler.MaxRateModuleDisabled + defaultMaxReplicationLag = throttler.ReplicationLagModuleDisabled + defaultUseConsistentSnapshot = false ) diff --git a/go/vt/worker/diff_utils.go b/go/vt/worker/diff_utils.go index 43b469fd21..4ff6ebc92a 100644 --- a/go/vt/worker/diff_utils.go +++ b/go/vt/worker/diff_utils.go @@ -27,6 +27,8 @@ import ( "time" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/tmclient" + "vitess.io/vitess/go/vt/wrangler" "golang.org/x/net/context" @@ -36,6 +38,7 @@ import ( "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -91,6 +94,61 @@ func NewQueryResultReaderForTablet(ctx context.Context, ts *topo.Server, tabletA }, nil } +// NewTransactionalQueryResultReaderForTablet creates a new QueryResultReader for +// the provided tablet / sql query, and runs it in an existing transaction +func NewTransactionalQueryResultReaderForTablet(ctx context.Context, ts *topo.Server, tabletAlias *topodatapb.TabletAlias, sql string, txID int64) (*QueryResultReader, error) { + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) + tablet, err := ts.GetTablet(shortCtx, tabletAlias) + cancel() + if err != nil { + return nil, err + } + + conn, err := tabletconn.GetDialer()(tablet.Tablet, grpcclient.FailFast(false)) + if err != nil { + return nil, err + } + + stream := queryservice.ExecuteWithTransactionalStreamer(ctx, conn, &querypb.Target{ + Keyspace: tablet.Tablet.Keyspace, + Shard: tablet.Tablet.Shard, + TabletType: tablet.Tablet.Type, + }, sql, make(map[string]*querypb.BindVariable), txID, nil) + + // read the columns, or grab the error + cols, err := stream.Recv() + if err != nil { + return nil, fmt.Errorf("Cannot read Fields for query '%v': %v", sql, err) + } + + return &QueryResultReader{ + output: stream, + fields: cols.Fields, + conn: conn, + }, nil +} + +// RollbackTransaction rolls back the transaction +func RollbackTransaction(ctx context.Context, ts *topo.Server, tabletAlias *topodatapb.TabletAlias, txID int64) error { + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) + tablet, err := ts.GetTablet(shortCtx, tabletAlias) + cancel() + if err != nil { + return err + } + + conn, err := tabletconn.GetDialer()(tablet.Tablet, grpcclient.FailFast(false)) + if err != nil { + return err + } + + return conn.Rollback(ctx, &querypb.Target{ + Keyspace: tablet.Tablet.Keyspace, + Shard: tablet.Tablet.Shard, + TabletType: tablet.Tablet.Type, + }, txID) +} + // Next returns the next result on the stream. It implements ResultReader. func (qrr *QueryResultReader) Next() (*sqltypes.Result, error) { return qrr.output.Recv() @@ -102,8 +160,8 @@ func (qrr *QueryResultReader) Fields() []*querypb.Field { } // Close closes the connection to the tablet. -func (qrr *QueryResultReader) Close(ctx context.Context) error { - return qrr.conn.Close(ctx) +func (qrr *QueryResultReader) Close(ctx context.Context) { + qrr.conn.Close(ctx) } // v3KeyRangeFilter is a sqltypes.ResultStream implementation that filters @@ -201,6 +259,26 @@ func TableScan(ctx context.Context, log logutil.Logger, ts *topo.Server, tabletA return NewQueryResultReaderForTablet(ctx, ts, tabletAlias, sql) } +// TransactionalTableScan does the same thing as TableScan, but runs inside a transaction +func TransactionalTableScan(ctx context.Context, log logutil.Logger, ts *topo.Server, tabletAlias *topodatapb.TabletAlias, txID int64, td *tabletmanagerdatapb.TableDefinition) (*QueryResultReader, error) { + sql := fmt.Sprintf("SELECT %v FROM %v", strings.Join(escapeAll(orderedColumns(td)), ", "), sqlescape.EscapeID(td.Name)) + if len(td.PrimaryKeyColumns) > 0 { + sql += fmt.Sprintf(" ORDER BY %v", strings.Join(escapeAll(td.PrimaryKeyColumns), ", ")) + } + log.Infof("SQL query for %v/%v: %v", topoproto.TabletAliasString(tabletAlias), td.Name, sql) + return NewTransactionalQueryResultReaderForTablet(ctx, ts, tabletAlias, sql, txID) +} + +// CreateTargetFrom is a helper function +func CreateTargetFrom(tablet *topodatapb.Tablet) *query.Target { + return &query.Target{ + Cell: tablet.Alias.Cell, + Keyspace: tablet.Keyspace, + Shard: tablet.Shard, + TabletType: tablet.Type, + } +} + // TableScanByKeyRange returns a QueryResultReader that gets all the // rows from a table that match the supplied KeyRange, ordered by // Primary Key. The returned columns are ordered with the Primary Key @@ -572,3 +650,129 @@ func (rd *RowDiffer) Go(log logutil.Logger) (dr DiffReport, err error) { advanceRight = true } } + +// createTransactions returns an array of transactions that all share the same view of the data. +// It will check that no new transactions have been seen between the creation of the underlying transactions, +// to guarantee that all TransactionalTableScanner are pointing to the same point +func createTransactions(ctx context.Context, numberOfScanners int, wr *wrangler.Wrangler, cleaner *wrangler.Cleaner, queryService queryservice.QueryService, target *query.Target, tabletInfo *topodatapb.Tablet) ([]int64, error) { + scanners := make([]int64, numberOfScanners) + for i := 0; i < numberOfScanners; i++ { + + tx, err := queryService.Begin(ctx, target, &query.ExecuteOptions{ + // Make sure our tx is not killed by tx sniper + Workload: query.ExecuteOptions_DBA, + TransactionIsolation: query.ExecuteOptions_CONSISTENT_SNAPSHOT_READ_ONLY, + }) + if err != nil { + return nil, fmt.Errorf("could not open transaction on %v\n%v", topoproto.TabletAliasString(tabletInfo.Alias), err) + } + + // Remember to rollback the transactions + cleaner.Record("CloseTransaction", topoproto.TabletAliasString(tabletInfo.Alias), func(ctx context.Context, wr *wrangler.Wrangler) error { + queryService, err := tabletconn.GetDialer()(tabletInfo, true) + if err != nil { + return err + } + return queryService.Rollback(ctx, target, tx) + }) + + scanners[i] = tx + } + + return scanners, nil +} + +// TableScanner is a simple abstraction that allows a TableScanner user to remain impervious +// by the transactionality of the connection +type TableScanner interface { + ScanTable(ctx context.Context, td *tabletmanagerdatapb.TableDefinition) (*QueryResultReader, error) +} + +// TransactionalTableScanner works inside of a transaction set up with CONSISTENT SNAPSHOT +type TransactionalTableScanner struct { + wr *wrangler.Wrangler + cleaner *wrangler.Cleaner + tabletAlias *topodatapb.TabletAlias + queryService queryservice.QueryService + tx int64 +} + +// ScanTable performs a full table scan, ordered by the primary keys, if any +func (tt TransactionalTableScanner) ScanTable(ctx context.Context, td *tabletmanagerdatapb.TableDefinition) (*QueryResultReader, error) { + return TransactionalTableScan(ctx, tt.wr.Logger(), tt.wr.TopoServer(), tt.tabletAlias, tt.tx, td) +} + +// NonTransactionalTableScanner just passes through the queries, and relies on paused replication traffic taking care of the consistent snapshot part +type NonTransactionalTableScanner struct { + wr *wrangler.Wrangler + cleaner *wrangler.Cleaner + tabletAlias *topodatapb.TabletAlias + queryService queryservice.QueryService +} + +// ScanTable performs a full table scan, ordered by the primary keys, if any +func (ntts NonTransactionalTableScanner) ScanTable(ctx context.Context, td *tabletmanagerdatapb.TableDefinition) (*QueryResultReader, error) { + return TableScan(ctx, ntts.wr.Logger(), ntts.wr.TopoServer(), ntts.tabletAlias, td) +} + +// CreateConsistentTableScanners will momentarily stop updates on the tablet, and then create connections that are all +// consistent snapshots of the same point in the transaction history +func CreateConsistentTableScanners(ctx context.Context, tablet *topo.TabletInfo, wr *wrangler.Wrangler, cleaner *wrangler.Cleaner, numberOfScanners int) ([]TableScanner, string, error) { + txs, gtid, err := CreateConsistentTransactions(ctx, tablet, wr, cleaner, numberOfScanners) + if err != nil { + return nil, "", err + } + + queryService, err := tabletconn.GetDialer()(tablet.Tablet, true) + defer queryService.Close(ctx) + + scanners := make([]TableScanner, numberOfScanners) + for i, tx := range txs { + scanners[i] = TransactionalTableScanner{ + wr: wr, + cleaner: cleaner, + tabletAlias: tablet.Alias, + queryService: queryService, + tx: tx, + } + } + + return scanners, gtid, nil +} + +// CreateConsistentTransactions creates a number of consistent snapshot transactions, +// all starting from the same spot in the tx log +func CreateConsistentTransactions(ctx context.Context, tablet *topo.TabletInfo, wr *wrangler.Wrangler, cleaner *wrangler.Cleaner, numberOfScanners int) ([]int64, string, error) { + tm := tmclient.NewTabletManagerClient() + defer tm.Close() + + // Lock all tables with a read lock to pause replication + err := tm.LockTables(ctx, tablet.Tablet) + if err != nil { + return nil, "", fmt.Errorf("could not lock tables on %v\n%v", topoproto.TabletAliasString(tablet.Tablet.Alias), err) + } + defer func() { + tm := tmclient.NewTabletManagerClient() + defer tm.Close() + tm.UnlockTables(ctx, tablet.Tablet) + wr.Logger().Infof("tables unlocked on %v", topoproto.TabletAliasString(tablet.Tablet.Alias)) + }() + + wr.Logger().Infof("tables locked on %v", topoproto.TabletAliasString(tablet.Tablet.Alias)) + target := CreateTargetFrom(tablet.Tablet) + + // Create transactions + queryService, err := tabletconn.GetDialer()(tablet.Tablet, true) + defer queryService.Close(ctx) + connections, err := createTransactions(ctx, numberOfScanners, wr, cleaner, queryService, target, tablet.Tablet) + if err != nil { + return nil, "", fmt.Errorf("failed to create transactions on %v: %v", topoproto.TabletAliasString(tablet.Tablet.Alias), err) + } + wr.Logger().Infof("transactions created on %v", topoproto.TabletAliasString(tablet.Tablet.Alias)) + executedGtid, err := tm.MasterPosition(ctx, tablet.Tablet) + if err != nil { + return nil, "", fmt.Errorf("could not read executed GTID set on %v\n%v", topoproto.TabletAliasString(tablet.Tablet.Alias), err) + } + + return connections, executedGtid, nil +} diff --git a/go/vt/worker/legacy_split_clone_cmd.go b/go/vt/worker/legacy_split_clone_cmd.go index ff48c291d4..ab73cca390 100644 --- a/go/vt/worker/legacy_split_clone_cmd.go +++ b/go/vt/worker/legacy_split_clone_cmd.go @@ -92,7 +92,7 @@ func commandLegacySplitClone(wi *Instance, wr *wrangler.Wrangler, subFlags *flag sourceReaderCount := subFlags.Int("source_reader_count", defaultSourceReaderCount, "number of concurrent streaming queries to use on the source") destinationPackCount := subFlags.Int("destination_pack_count", defaultDestinationPackCount, "number of packets to pack in one destination insert") destinationWriterCount := subFlags.Int("destination_writer_count", defaultDestinationWriterCount, "number of concurrent RPCs to execute on the destination") - minHealthyRdonlyTablets := subFlags.Int("min_healthy_rdonly_tablets", defaultMinHealthyRdonlyTablets, "minimum number of healthy RDONLY tablets before taking out one") + minHealthyRdonlyTablets := subFlags.Int("min_healthy_rdonly_tablets", defaultMinHealthyTablets, "minimum number of healthy RDONLY tablets before taking out one") maxTPS := subFlags.Int64("max_tps", defaultMaxTPS, "if non-zero, limit copy to maximum number of (write) transactions/second on the destination (unlimited by default)") if err := subFlags.Parse(args); err != nil { return nil, err @@ -146,7 +146,7 @@ func interactiveLegacySplitClone(ctx context.Context, wi *Instance, wr *wrangler result["DefaultSourceReaderCount"] = fmt.Sprintf("%v", defaultSourceReaderCount) result["DefaultDestinationPackCount"] = fmt.Sprintf("%v", defaultDestinationPackCount) result["DefaultDestinationWriterCount"] = fmt.Sprintf("%v", defaultDestinationWriterCount) - result["DefaultMinHealthyRdonlyTablets"] = fmt.Sprintf("%v", defaultMinHealthyRdonlyTablets) + result["DefaultMinHealthyRdonlyTablets"] = fmt.Sprintf("%v", defaultMinHealthyTablets) result["DefaultMaxTPS"] = fmt.Sprintf("%v", defaultMaxTPS) return nil, legacySplitCloneTemplate2, result, nil } diff --git a/go/vt/worker/legacy_split_clone_test.go b/go/vt/worker/legacy_split_clone_test.go index 8754f0c7d6..1bdd773fb8 100644 --- a/go/vt/worker/legacy_split_clone_test.go +++ b/go/vt/worker/legacy_split_clone_test.go @@ -238,7 +238,7 @@ type legacyTestQueryService struct { *fakes.StreamHealthQueryService } -func (sq *legacyTestQueryService) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions, callback func(reply *sqltypes.Result) error) error { +func (sq *legacyTestQueryService) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(reply *sqltypes.Result) error) error { // Custom parsing of the query we expect. min := legacySplitCloneTestMin max := legacySplitCloneTestMax diff --git a/go/vt/worker/multi_split_diff.go b/go/vt/worker/multi_split_diff.go index dbf899883f..56ec444d4f 100644 --- a/go/vt/worker/multi_split_diff.go +++ b/go/vt/worker/multi_split_diff.go @@ -19,28 +19,37 @@ package worker import ( "fmt" "html/template" + "sort" "sync" - - "vitess.io/vitess/go/vt/vterrors" + "time" "golang.org/x/net/context" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vttablet/queryservice" + "vitess.io/vitess/go/vt/vttablet/tabletconn" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/mysqlctl/tmutils" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/wrangler" - "sort" - - "time" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/vindexes" ) +// One of these per paralell runner +type Scanners struct { + // this is how we get data from the source shard + sourceScanner TableScanner + + // this is how we get data from the destination shards - we'll have one scanner per shard we are reading from + destinationScanner []TableScanner +} + // MultiSplitDiffWorker executes a diff between a destination shard and its // source shards in a shard split case. type MultiSplitDiffWorker struct { @@ -51,10 +60,12 @@ type MultiSplitDiffWorker struct { keyspace string shard string excludeTables []string - minHealthyRdonlyTablets int + minHealthyTablets int parallelDiffsCount int waitForFixedTimeRatherThanGtidSet bool cleaner *wrangler.Cleaner + useConsistentSnapshot bool + tabletType topodatapb.TabletType // populated during WorkerStateInit, read-only after that keyspaceInfo *topo.KeyspaceInfo @@ -65,21 +76,24 @@ type MultiSplitDiffWorker struct { // populated during WorkerStateFindTargets, read-only after that sourceAlias *topodatapb.TabletAlias destinationAliases []*topodatapb.TabletAlias // matches order of destinationShards + scanners []Scanners } // NewMultiSplitDiffWorker returns a new MultiSplitDiffWorker object. -func NewMultiSplitDiffWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, excludeTables []string, minHealthyRdonlyTablets, parallelDiffsCount int, waitForFixedTimeRatherThanGtidSet bool) Worker { +func NewMultiSplitDiffWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, excludeTables []string, minHealthyTablets, parallelDiffsCount int, waitForFixedTimeRatherThanGtidSet bool, useConsistentSnapshot bool, tabletType topodatapb.TabletType) Worker { return &MultiSplitDiffWorker{ - waitForFixedTimeRatherThanGtidSet: waitForFixedTimeRatherThanGtidSet, StatusWorker: NewStatusWorker(), wr: wr, cell: cell, keyspace: keyspace, shard: shard, excludeTables: excludeTables, - minHealthyRdonlyTablets: minHealthyRdonlyTablets, + minHealthyTablets: minHealthyTablets, parallelDiffsCount: parallelDiffsCount, cleaner: &wrangler.Cleaner{}, + useConsistentSnapshot: useConsistentSnapshot, + waitForFixedTimeRatherThanGtidSet: waitForFixedTimeRatherThanGtidSet, + tabletType: tabletType, } } @@ -144,7 +158,7 @@ func (msdw *MultiSplitDiffWorker) Run(ctx context.Context) error { func (msdw *MultiSplitDiffWorker) run(ctx context.Context) error { // first state: read what we need to do if err := msdw.init(ctx); err != nil { - return fmt.Errorf("init() failed: %v", err) + return vterrors.Wrap(err, "init() failed") } if err := checkDone(ctx); err != nil { return err @@ -152,15 +166,15 @@ func (msdw *MultiSplitDiffWorker) run(ctx context.Context) error { // second state: find targets if err := msdw.findTargets(ctx); err != nil { - return fmt.Errorf("findTargets() failed: %v", err) + return vterrors.Wrap(err, "findTargets() failed") } if err := checkDone(ctx); err != nil { return err } // third phase: synchronize replication - if err := msdw.synchronizeReplication(ctx); err != nil { - return fmt.Errorf("synchronizeReplication() failed: %v", err) + if err := msdw.synchronizeSrcAndDestTxState(ctx); err != nil { + return vterrors.Wrap(err, "synchronizeSrcAndDestTxState() failed") } if err := checkDone(ctx); err != nil { return err @@ -168,7 +182,7 @@ func (msdw *MultiSplitDiffWorker) run(ctx context.Context) error { // fourth phase: diff if err := msdw.diff(ctx); err != nil { - return fmt.Errorf("diff() failed: %v", err) + return vterrors.Wrap(err, "diff() failed") } return checkDone(ctx) @@ -179,27 +193,33 @@ func (msdw *MultiSplitDiffWorker) run(ctx context.Context) error { func (msdw *MultiSplitDiffWorker) init(ctx context.Context) error { msdw.SetState(WorkerStateInit) + if msdw.useConsistentSnapshot { + msdw.wr.Logger().Infof("splitting using consistent snapshot") + } else { + msdw.wr.Logger().Infof("splitting using STOP SLAVE") + } + var err error shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) msdw.keyspaceInfo, err = msdw.wr.TopoServer().GetKeyspace(shortCtx, msdw.keyspace) cancel() if err != nil { - return fmt.Errorf("cannot read keyspace %v: %v", msdw.keyspace, err) + return vterrors.Wrapf(err, "cannot read keyspace %v", msdw.keyspace) } shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) msdw.shardInfo, err = msdw.wr.TopoServer().GetShard(shortCtx, msdw.keyspace, msdw.shard) cancel() if err != nil { - return fmt.Errorf("cannot read shard %v/%v: %v", msdw.keyspace, msdw.shard, err) + return vterrors.Wrapf(err, "cannot read shard %v/%v", msdw.keyspace, msdw.shard) } if !msdw.shardInfo.HasMaster() { - return fmt.Errorf("shard %v/%v has no master", msdw.keyspace, msdw.shard) + return vterrors.Errorf(vtrpc.Code_UNAVAILABLE, "shard %v/%v has no master", msdw.keyspace, msdw.shard) } destinationShards, err := msdw.findDestinationShards(ctx) if err != nil { - return fmt.Errorf("findDestinationShards() failed for %v/%v/%v: %v", msdw.cell, msdw.keyspace, msdw.shard, err) + return vterrors.Wrapf(err, "findDestinationShards() failed for %v/%v/%v", msdw.cell, msdw.keyspace, msdw.shard) } msdw.destinationShards = destinationShards @@ -226,7 +246,7 @@ func (msdw *MultiSplitDiffWorker) findDestinationShards(ctx context.Context) ([] } if len(resultArray) == 0 { - return nil, fmt.Errorf("there are no destination shards") + return nil, vterrors.Errorf(vtrpc.Code_UNAVAILABLE, "there are no destination shards") } return resultArray, nil } @@ -253,7 +273,7 @@ func (msdw *MultiSplitDiffWorker) findShardsInKeyspace(ctx context.Context, keys msdw.sourceUID = uid first = false } else if msdw.sourceUID != uid { - return nil, fmt.Errorf("found a source ID that was different, aborting. %v vs %v", msdw.sourceUID, uid) + return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "found a source ID that was different, aborting. %v vs %v", msdw.sourceUID, uid) } resultArray = append(resultArray, shardInfo) @@ -287,47 +307,34 @@ func (msdw *MultiSplitDiffWorker) getShardInfo(ctx context.Context, keyspace str // - mark them all as 'worker' pointing back to us func (msdw *MultiSplitDiffWorker) findTargets(ctx context.Context) error { msdw.SetState(WorkerStateFindTargets) - var err error - // find an appropriate tablet in the source shard - msdw.sourceAlias, err = FindWorkerTablet( - ctx, - msdw.wr, - msdw.cleaner, - nil, /* tsc */ - msdw.cell, - msdw.keyspace, - msdw.shard, - 1, /* minHealthyTablets */ - topodatapb.TabletType_RDONLY) - if err != nil { - return fmt.Errorf("FindWorkerTablet() failed for %v/%v/%v: %v", msdw.cell, msdw.keyspace, msdw.shard, err) + var finderFunc func(keyspace string, shard string) (*topodatapb.TabletAlias, error) + if msdw.tabletType == topodatapb.TabletType_RDONLY { + finderFunc = func(keyspace string, shard string) (*topodatapb.TabletAlias, error) { + return FindWorkerTablet(ctx, msdw.wr, msdw.cleaner, nil /*tsc*/, msdw.cell, keyspace, shard, 1, topodatapb.TabletType_RDONLY) + } + } else { + finderFunc = func(keyspace string, shard string) (*topodatapb.TabletAlias, error) { + return FindHealthyTablet(ctx, msdw.wr, nil /*tsc*/, msdw.cell, keyspace, shard, 1, msdw.tabletType) + } + } + + msdw.sourceAlias, err = finderFunc(msdw.keyspace, msdw.shard) + if err != nil { + return vterrors.Wrapf(err, "finding source failed for %v/%v/%v", msdw.cell, msdw.keyspace, msdw.shard) } - // find an appropriate tablet in each destination shard msdw.destinationAliases = make([]*topodatapb.TabletAlias, len(msdw.destinationShards)) for i, destinationShard := range msdw.destinationShards { keyspace := destinationShard.Keyspace() shard := destinationShard.ShardName() - destinationAlias, err := FindWorkerTablet( - ctx, - msdw.wr, - msdw.cleaner, - nil, /* tsc */ - msdw.cell, - keyspace, - shard, - msdw.minHealthyRdonlyTablets, - topodatapb.TabletType_RDONLY) + destinationAlias, err := finderFunc(keyspace, shard) if err != nil { - return fmt.Errorf("FindWorkerTablet() failed for %v/%v/%v: %v", msdw.cell, keyspace, shard, err) + return vterrors.Wrapf(err, "finding destination failed for %v/%v/%v", msdw.cell, keyspace, shard) } msdw.destinationAliases[i] = destinationAlias } - if err != nil { - return fmt.Errorf("FindWorkerTablet() failed for %v/%v/%v: %v", msdw.cell, msdw.keyspace, msdw.shard, err) - } return nil } @@ -335,44 +342,44 @@ func (msdw *MultiSplitDiffWorker) findTargets(ctx context.Context) error { // ask the master of the destination shard to pause filtered replication, // and return the source binlog positions // (add a cleanup task to restart filtered replication on master) -func (msdw *MultiSplitDiffWorker) stopReplicationOnAllDestinationMasters(ctx context.Context, masterInfos []*topo.TabletInfo) ([]string, error) { +func (msdw *MultiSplitDiffWorker) stopVreplicationOnAll(ctx context.Context, tabletInfo []*topo.TabletInfo) ([]string, error) { destVreplicationPos := make([]string, len(msdw.destinationShards)) for i, shardInfo := range msdw.destinationShards { - masterInfo := masterInfos[i] + tablet := tabletInfo[i].Tablet - msdw.wr.Logger().Infof("Stopping master binlog replication on %v", shardInfo.MasterAlias) + msdw.wr.Logger().Infof("stopping master binlog replication on %v", shardInfo.MasterAlias) shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) - _, err := msdw.wr.TabletManagerClient().VReplicationExec(shortCtx, masterInfo.Tablet, binlogplayer.StopVReplication(msdw.sourceUID, "for split diff")) + _, err := msdw.wr.TabletManagerClient().VReplicationExec(shortCtx, tablet, binlogplayer.StopVReplication(msdw.sourceUID, "for split diff")) cancel() if err != nil { - return nil, fmt.Errorf("VReplicationExec(stop) for %v failed: %v", shardInfo.MasterAlias, err) + return nil, vterrors.Wrapf(err, "VReplicationExec(stop) for %v failed", shardInfo.MasterAlias) } - wrangler.RecordVReplicationAction(msdw.cleaner, masterInfo.Tablet, binlogplayer.StartVReplication(msdw.sourceUID)) + wrangler.RecordVReplicationAction(msdw.cleaner, tablet, binlogplayer.StartVReplication(msdw.sourceUID)) shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - p3qr, err := msdw.wr.TabletManagerClient().VReplicationExec(shortCtx, masterInfo.Tablet, binlogplayer.ReadVReplicationPos(msdw.sourceUID)) + p3qr, err := msdw.wr.TabletManagerClient().VReplicationExec(shortCtx, tablet, binlogplayer.ReadVReplicationPos(msdw.sourceUID)) cancel() if err != nil { - return nil, fmt.Errorf("VReplicationExec(stop) for %v failed: %v", msdw.shardInfo.MasterAlias, err) + return nil, vterrors.Wrapf(err, "VReplicationExec(stop) for %v failed", msdw.shardInfo.MasterAlias) } qr := sqltypes.Proto3ToResult(p3qr) if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 { - return nil, fmt.Errorf("unexpected result while reading position: %v", qr) + return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "unexpected result while reading position: %v", qr) } destVreplicationPos[i] = qr.Rows[0][0].ToString() if err != nil { - return nil, fmt.Errorf("StopBlp for %v failed: %v", msdw.shardInfo.MasterAlias, err) + return nil, vterrors.Wrapf(err, "StopBlp for %v failed", msdw.shardInfo.MasterAlias) } } return destVreplicationPos, nil } -func (msdw *MultiSplitDiffWorker) getTabletInfoForShard(ctx context.Context, shardInfo *topo.ShardInfo) (*topo.TabletInfo, error) { +func (msdw *MultiSplitDiffWorker) getMasterTabletInfoForShard(ctx context.Context, shardInfo *topo.ShardInfo) (*topo.TabletInfo, error) { shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) masterInfo, err := msdw.wr.TopoServer().GetTablet(shortCtx, shardInfo.MasterAlias) cancel() if err != nil { - return nil, fmt.Errorf("synchronizeReplication: cannot get Tablet record for master %v: %v", msdw.shardInfo.MasterAlias, err) + return nil, vterrors.Wrapf(err, "synchronizeSrcAndDestTxState: cannot get Tablet record for master %v", msdw.shardInfo.MasterAlias) } return masterInfo, nil } @@ -381,7 +388,7 @@ func (msdw *MultiSplitDiffWorker) getTabletInfoForShard(ctx context.Context, sha // destination masters. Return the reached position // (add a cleanup task to restart binlog replication on the source tablet, and // change the existing ChangeSlaveType cleanup action to 'spare' type) -func (msdw *MultiSplitDiffWorker) stopReplicationOnSourceRdOnlyTabletAt(ctx context.Context, destVreplicationPos []string) (string, error) { +func (msdw *MultiSplitDiffWorker) stopReplicationOnSourceTabletAt(ctx context.Context, destVreplicationPos []string) (string, error) { shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) sourceTablet, err := msdw.wr.TopoServer().GetTablet(shortCtx, msdw.sourceAlias) cancel() @@ -397,12 +404,8 @@ func (msdw *MultiSplitDiffWorker) stopReplicationOnSourceRdOnlyTabletAt(ctx cont // if we make StopSlaveMinimum take multiple blp positions then this will be a lot more efficient because you just // check for each position using WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS and then stop replication. - msdw.wr.Logger().Infof("Stopping slave %v at a minimum of %v", msdw.sourceAlias, vreplicationPos) - // read the tablet - sourceTablet, err := msdw.wr.TopoServer().GetTablet(shortCtx, msdw.sourceAlias) - if err != nil { - return "", err - } + msdw.wr.Logger().Infof("stopping slave %v at a minimum of %v", msdw.sourceAlias, vreplicationPos) + shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) msdw.wr.TabletManagerClient().StartSlave(shortCtx, sourceTablet.Tablet) cancel() @@ -414,7 +417,7 @@ func (msdw *MultiSplitDiffWorker) stopReplicationOnSourceRdOnlyTabletAt(ctx cont mysqlPos, err = msdw.wr.TabletManagerClient().StopSlaveMinimum(shortCtx, sourceTablet.Tablet, vreplicationPos, *remoteActionsTimeout) cancel() if err != nil { - return "", fmt.Errorf("cannot stop slave %v at right binlog position %v: %v", msdw.sourceAlias, vreplicationPos, err) + return "", vterrors.Wrapf(err, "cannot stop slave %v at right binlog position %v", msdw.sourceAlias, vreplicationPos) } } // change the cleaner actions from ChangeSlaveType(rdonly) @@ -425,27 +428,28 @@ func (msdw *MultiSplitDiffWorker) stopReplicationOnSourceRdOnlyTabletAt(ctx cont } // ask the master of the destination shard to resume filtered replication -// up to the new list of positions, and return its binlog position. -func (msdw *MultiSplitDiffWorker) resumeReplicationOnDestinationMasterUntil(ctx context.Context, shardInfo *topo.ShardInfo, mysqlPos string, masterInfo *topo.TabletInfo) (string, error) { - msdw.wr.Logger().Infof("Restarting master %v until it catches up to %v", shardInfo.MasterAlias, mysqlPos) +// up to the specified source position, and return the destination position. +func (msdw *MultiSplitDiffWorker) stopVreplicationAt(ctx context.Context, shardInfo *topo.ShardInfo, sourcePosition string, masterInfo *topo.TabletInfo) (string, error) { + msdw.wr.Logger().Infof("Restarting master %v until it catches up to %v", shardInfo.MasterAlias, sourcePosition) shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) - _, err := msdw.wr.TabletManagerClient().VReplicationExec(shortCtx, masterInfo.Tablet, binlogplayer.StartVReplicationUntil(msdw.sourceUID, mysqlPos)) + _, err := msdw.wr.TabletManagerClient().VReplicationExec(shortCtx, masterInfo.Tablet, binlogplayer.StartVReplicationUntil(msdw.sourceUID, sourcePosition)) cancel() if err != nil { - return "", fmt.Errorf("VReplication(start until) for %v until %v failed: %v", shardInfo.MasterAlias, mysqlPos, err) + return "", vterrors.Wrapf(err, "VReplication(start until) for %v until %v failed", shardInfo.MasterAlias, sourcePosition) } + shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - if err := msdw.wr.TabletManagerClient().VReplicationWaitForPos(shortCtx, masterInfo.Tablet, int(msdw.sourceUID), mysqlPos); err != nil { - cancel() - return "", fmt.Errorf("VReplicationWaitForPos for %v until %v failed: %v", shardInfo.MasterAlias, mysqlPos, err) - } + err = msdw.wr.TabletManagerClient().VReplicationWaitForPos(shortCtx, masterInfo.Tablet, int(msdw.sourceUID), sourcePosition) cancel() + if err != nil { + return "", vterrors.Wrapf(err, "VReplicationWaitForPos for %v until %v failed", shardInfo.MasterAlias, sourcePosition) + } shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) masterPos, err := msdw.wr.TabletManagerClient().MasterPosition(shortCtx, masterInfo.Tablet) cancel() if err != nil { - return "", fmt.Errorf("MasterPosition for %v failed: %v", msdw.shardInfo.MasterAlias, err) + return "", vterrors.Wrapf(err, "MasterPosition for %v failed", msdw.shardInfo.MasterAlias) } return masterPos, nil } @@ -454,11 +458,11 @@ func (msdw *MultiSplitDiffWorker) resumeReplicationOnDestinationMasterUntil(ctx // binlog position, and stop its replication. // (add a cleanup task to restart binlog replication on it, and change // the existing ChangeSlaveType cleanup action to 'spare' type) -func (msdw *MultiSplitDiffWorker) stopReplicationOnDestinationRdOnlys(ctx context.Context, destinationAlias *topodatapb.TabletAlias, masterPos string) error { +func (msdw *MultiSplitDiffWorker) stopReplicationAt(ctx context.Context, destinationAlias *topodatapb.TabletAlias, masterPos string) error { if msdw.waitForFixedTimeRatherThanGtidSet { - msdw.wr.Logger().Infof("Workaround for broken GTID set in destination RDONLY. Just waiting for 1 minute for %v and assuming replication has caught up. (should be at %v)", destinationAlias, masterPos) + msdw.wr.Logger().Infof("workaround for broken GTID set in destination RDONLY. Just waiting for 1 minute for %v and assuming replication has caught up. (should be at %v)", destinationAlias, masterPos) } else { - msdw.wr.Logger().Infof("Waiting for destination tablet %v to catch up to %v", destinationAlias, masterPos) + msdw.wr.Logger().Infof("waiting for destination tablet %v to catch up to %v", destinationAlias, masterPos) } shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) destinationTablet, err := msdw.wr.TopoServer().GetTablet(shortCtx, destinationAlias) @@ -479,7 +483,7 @@ func (msdw *MultiSplitDiffWorker) stopReplicationOnDestinationRdOnlys(ctx contex } cancel() if err != nil { - return fmt.Errorf("StopSlaveMinimum for %v at %v failed: %v", destinationAlias, masterPos, err) + return vterrors.Wrapf(err, "StopSlaveMinimum for %v at %v failed", destinationAlias, masterPos) } wrangler.RecordStartSlaveAction(msdw.cleaner, destinationTablet.Tablet) return nil @@ -487,87 +491,208 @@ func (msdw *MultiSplitDiffWorker) stopReplicationOnDestinationRdOnlys(ctx contex // restart filtered replication on the destination master. // (remove the cleanup task that does the same) -func (msdw *MultiSplitDiffWorker) restartReplicationOn(ctx context.Context, shardInfo *topo.ShardInfo, masterInfo *topo.TabletInfo) error { - msdw.wr.Logger().Infof("Restarting filtered replication on master %v", shardInfo.MasterAlias) +func (msdw *MultiSplitDiffWorker) startVreplication(ctx context.Context, shardInfo *topo.ShardInfo, masterInfo *topo.TabletInfo) error { + msdw.wr.Logger().Infof("restarting filtered replication on master %v", shardInfo.MasterAlias) shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) _, err := msdw.wr.TabletManagerClient().VReplicationExec(shortCtx, masterInfo.Tablet, binlogplayer.StartVReplication(msdw.sourceUID)) if err != nil { - return fmt.Errorf("VReplicationExec(start) failed for %v: %v", shardInfo.MasterAlias, err) + return vterrors.Wrapf(err, "VReplicationExec(start) failed for %v", shardInfo.MasterAlias) } cancel() return nil } -// synchronizeReplication phase: -// At this point, the source and the destination tablet are stopped at the same -// point. +func (msdw *MultiSplitDiffWorker) createNonTransactionalTableScanners(ctx context.Context, queryService queryservice.QueryService, source *topo.TabletInfo) ([]TableScanner, error) { + // If we are not using consistent snapshot, we'll use the NonTransactionalTableScanner, + // which does not have any instance state and so can be used by all connections + scanners := make([]TableScanner, msdw.parallelDiffsCount) + scanner := NonTransactionalTableScanner{ + queryService: queryService, + cleaner: msdw.cleaner, + wr: msdw.wr, + tabletAlias: source.Alias, + } -func (msdw *MultiSplitDiffWorker) synchronizeReplication(ctx context.Context) error { + for i := 0; i < msdw.parallelDiffsCount; i++ { + scanners[i] = scanner + } + + return scanners, nil +} + +func (msdw *MultiSplitDiffWorker) useTransactionScanners(ctx context.Context, source *topo.TabletInfo) (string, []TableScanner, error) { + connections, pos, err := CreateConsistentTableScanners(ctx, source, msdw.wr, msdw.cleaner, msdw.parallelDiffsCount) + if err != nil { + return "", nil, vterrors.Wrapf(err, "failed to create %d transactional connections", msdw.parallelDiffsCount) + } + return pos, connections, nil +} +func (msdw *MultiSplitDiffWorker) useNonTransactionalScanners(ctx context.Context, source *topo.TabletInfo, destVreplicationPos []string) (string, []TableScanner, error) { + pos, err := msdw.stopReplicationOnSourceTabletAt(ctx, destVreplicationPos) + if err != nil { + return "", nil, err + } + + queryService, err := tabletconn.GetDialer()(source.Tablet, true) + if err != nil { + return "", nil, vterrors.Wrapf(err, "failed to instantiate query service for %v", source.Tablet) + } + sourceScanners, err := msdw.createNonTransactionalTableScanners(ctx, queryService, source) + if err != nil { + return "", nil, err + } + + return pos, sourceScanners, nil +} + +// synchronizeSrcAndDestTxState phase: +// After this point, the source and the destination tablet are stopped at the same point. +func (msdw *MultiSplitDiffWorker) synchronizeSrcAndDestTxState(ctx context.Context) error { msdw.SetState(WorkerStateSyncReplication) var err error + // 1. Find all the tablets we will need to work with masterInfos := make([]*topo.TabletInfo, len(msdw.destinationAliases)) for i, shardInfo := range msdw.destinationShards { - masterInfos[i], err = msdw.getTabletInfoForShard(ctx, shardInfo) + masterInfos[i], err = msdw.getMasterTabletInfoForShard(ctx, shardInfo) if err != nil { return err } } - destVreplicationPos, err := msdw.stopReplicationOnAllDestinationMasters(ctx, masterInfos) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) + source, err := msdw.wr.TopoServer().GetTablet(shortCtx, msdw.sourceAlias) + cancel() + + var sourcePosition string + + // 2. Stop replication on destination + destVreplicationPos, err := msdw.stopVreplicationOnAll(ctx, masterInfos) if err != nil { return err } - mysqlPos, err := msdw.stopReplicationOnSourceRdOnlyTabletAt(ctx, destVreplicationPos) + // 3. Pause updates on the source and create consistent snapshot connections + var scanners []TableScanner + if msdw.useConsistentSnapshot { + sourcePosition, scanners, err = msdw.useTransactionScanners(ctx, source) + } else { + sourcePosition, scanners, err = msdw.useNonTransactionalScanners(ctx, source, destVreplicationPos) + } if err != nil { return err } + msdw.scanners = make([]Scanners, msdw.parallelDiffsCount) + for i := 0; i < msdw.parallelDiffsCount; i++ { + // We'll create one of these Scanners struct per thread we want to run in parallel + scanner := scanners[i] + i2 := Scanners{ + sourceScanner: scanner, + destinationScanner: make([]TableScanner, len(msdw.destinationShards)), + } + msdw.scanners[i] = i2 + } + + // 4. Make sure all replicas have caught up with the master for i, shardInfo := range msdw.destinationShards { masterInfo := masterInfos[i] destinationAlias := msdw.destinationAliases[i] - masterPos, err := msdw.resumeReplicationOnDestinationMasterUntil(ctx, shardInfo, mysqlPos, masterInfo) + destinationPosition, err := msdw.stopVreplicationAt(ctx, shardInfo, sourcePosition, masterInfo) if err != nil { return err } - err = msdw.stopReplicationOnDestinationRdOnlys(ctx, destinationAlias, masterPos) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) + destTabletInfo, err := msdw.wr.TopoServer().GetTablet(shortCtx, destinationAlias) + cancel() if err != nil { - return err + return vterrors.Wrapf(err, "waitForDestinationTabletToReach: cannot get Tablet record for master %v", msdw.shardInfo.MasterAlias) } - err = msdw.restartReplicationOn(ctx, shardInfo, masterInfo) - if err != nil { - return err + queryService, err := tabletconn.GetDialer()(source.Tablet, true) + var destScanners []TableScanner + + if msdw.useConsistentSnapshot { + // loop to wait for the destinationAlias tablet in shardInfo to have reached destinationPosition + err = msdw.waitForDestinationTabletToReach(ctx, destTabletInfo.Tablet, destinationPosition) + if err != nil { + return err + } + + destScanners, _, err = CreateConsistentTableScanners(ctx, destTabletInfo, msdw.wr, msdw.cleaner, msdw.parallelDiffsCount) + if err != nil { + return err + } + } else { + err = msdw.stopReplicationAt(ctx, destinationAlias, destinationPosition) + if err != nil { + return vterrors.Wrapf(err, "failed to stop replication on %v at position %v", destinationAlias, destinationPosition) + } + destScanners, err = msdw.createNonTransactionalTableScanners(ctx, queryService, destTabletInfo) + if err != nil { + return vterrors.Wrapf(err, "failed to stop create table scanners for %v using %v", destTabletInfo, queryService) + } } + + // 5. Spread out destination scanners between the goroutines + for j := 0; j < msdw.parallelDiffsCount; j++ { + msdw.scanners[j].destinationScanner[i] = destScanners[j] + } + + err = msdw.startVreplication(ctx, shardInfo, masterInfo) + if err != nil { + return vterrors.Wrapf(err, "failed to restart vreplication for shard %v on tablet %v", shardInfo, masterInfo) + } + } - return nil } -func (msdw *MultiSplitDiffWorker) diffSingleTable(ctx context.Context, wg *sync.WaitGroup, tableDefinition *tabletmanagerdatapb.TableDefinition, keyspaceSchema *vindexes.KeyspaceSchema) error { +func (msdw *MultiSplitDiffWorker) waitForDestinationTabletToReach(ctx context.Context, tablet *topodatapb.Tablet, mysqlPos string) error { + for i := 0; i < 20; i++ { + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) + pos, err := msdw.wr.TabletManagerClient().MasterPosition(shortCtx, tablet) + cancel() + if err != nil { + return vterrors.Wrapf(err, "get MasterPosition for %v failed", tablet) + } + + if pos == mysqlPos { + return nil + } + time.Sleep(time.Second) + } + return vterrors.Errorf(vtrpc.Code_UNAVAILABLE, "failed to reach transaction position after multiple attempts. it is safe to try again") +} + +func (msdw *MultiSplitDiffWorker) diffSingleTable(ctx context.Context, wg *sync.WaitGroup, tableDefinition *tabletmanagerdatapb.TableDefinition, keyspaceSchema *vindexes.KeyspaceSchema, sourceScanner TableScanner, destinationScanners []TableScanner) error { msdw.wr.Logger().Infof("Starting the diff on table %v", tableDefinition.Name) - sourceQueryResultReader, err := TableScan(ctx, msdw.wr.Logger(), msdw.wr.TopoServer(), msdw.sourceAlias, tableDefinition) + if len(destinationScanners) != len(msdw.destinationAliases) { + return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "did not receive the expected amount of destination connections") + } + + sourceQueryResultReader, err := sourceScanner.ScanTable(ctx, tableDefinition) if err != nil { - return fmt.Errorf("TableScan(source) failed: %v", err) + return err } defer sourceQueryResultReader.Close(ctx) destinationQueryResultReaders := make([]ResultReader, len(msdw.destinationAliases)) - for i, destinationAlias := range msdw.destinationAliases { - destinationQueryResultReader, err := TableScan(ctx, msdw.wr.Logger(), msdw.wr.TopoServer(), destinationAlias, tableDefinition) + for i := range msdw.destinationAliases { + scanner := destinationScanners[i] + destinationQueryResultReader, err := scanner.ScanTable(ctx, tableDefinition) if err != nil { - return fmt.Errorf("TableScan(destination) failed: %v", err) + return vterrors.Wrapf(err, "TableScan(destination) on %v failed", tableDefinition.String()) } // For the first result scanner, let's check the PKs are of types that we can work with if i == 0 { err = CheckValidTypesForResultMerger(destinationQueryResultReader.fields, len(tableDefinition.PrimaryKeyColumns)) if err != nil { - return fmt.Errorf("invalid types for multi split diff. use the regular split diff instead %v", err.Error()) + return vterrors.Wrapf(err, "invalid types for multi split diff. use the regular split diff instead") } } @@ -579,35 +704,41 @@ func (msdw *MultiSplitDiffWorker) diffSingleTable(ctx context.Context, wg *sync. } mergedResultReader, err := NewResultMerger(destinationQueryResultReaders, len(tableDefinition.PrimaryKeyColumns)) if err != nil { - return fmt.Errorf("NewResultMerger failed: %v", err) + return err } // Create the row differ. differ, err := NewRowDiffer(sourceQueryResultReader, mergedResultReader, tableDefinition) if err != nil { - return fmt.Errorf("NewRowDiffer() failed: %v", err) + return err } // And run the diff. report, err := differ.Go(msdw.wr.Logger()) if err != nil { - return fmt.Errorf("Differ.Go failed: %v", err.Error()) + return err } if report.HasDifferences() { - return fmt.Errorf("table %v has differences: %v", tableDefinition.Name, report.String()) + return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "table %v has differences: %v", tableDefinition.Name, report.String()) } - msdw.wr.Logger().Infof("Table %v checks out (%v rows processed, %v qps)", tableDefinition.Name, report.processedRows, report.processingQPS) + msdw.wr.Logger().Infof("table %v checks out (%v rows processed, %v qps)", tableDefinition.Name, report.processedRows, report.processingQPS) return nil } -func (msdw *MultiSplitDiffWorker) tableDiffingConsumer(ctx context.Context, wg *sync.WaitGroup, tableChan chan *tabletmanagerdatapb.TableDefinition, rec *concurrency.AllErrorRecorder, keyspaceSchema *vindexes.KeyspaceSchema) { +func (msdw *MultiSplitDiffWorker) tableDiffingConsumer(ctx context.Context, wg *sync.WaitGroup, tableChan chan *tabletmanagerdatapb.TableDefinition, rec *concurrency.AllErrorRecorder, keyspaceSchema *vindexes.KeyspaceSchema, sourceScanner TableScanner, destinationScanners []TableScanner) { + if destinationScanners == nil || sourceScanner == nil { + err := vterrors.Errorf(vtrpc.Code_INTERNAL, "should not be nil %v %v", destinationScanners, sourceScanner) + msdw.markAsWillFail(rec, err) + msdw.wr.Logger().Errorf("%v", err) + return + } defer wg.Done() for tableDefinition := range tableChan { - err := msdw.diffSingleTable(ctx, wg, tableDefinition, keyspaceSchema) + err := msdw.diffSingleTable(ctx, wg, tableDefinition, keyspaceSchema, sourceScanner, destinationScanners) if err != nil { msdw.markAsWillFail(rec, err) msdw.wr.Logger().Errorf("%v", err) @@ -616,7 +747,7 @@ func (msdw *MultiSplitDiffWorker) tableDiffingConsumer(ctx context.Context, wg * } func (msdw *MultiSplitDiffWorker) gatherSchemaInfo(ctx context.Context) ([]*tabletmanagerdatapb.SchemaDefinition, *tabletmanagerdatapb.SchemaDefinition, error) { - msdw.wr.Logger().Infof("Gathering schema information...") + msdw.wr.Logger().Infof("gathering schema information...") wg := sync.WaitGroup{} rec := &concurrency.AllErrorRecorder{} @@ -631,9 +762,11 @@ func (msdw *MultiSplitDiffWorker) gatherSchemaInfo(ctx context.Context) ([]*tabl destinationSchemaDefinition, err := msdw.wr.GetSchema( shortCtx, destinationAlias, nil /* tables */, msdw.excludeTables, false /* includeViews */) cancel() - msdw.markAsWillFail(rec, err) + if err != nil { + msdw.markAsWillFail(rec, err) + } destinationSchemaDefinitions[i] = destinationSchemaDefinition - msdw.wr.Logger().Infof("Got schema from destination %v", destinationAlias) + msdw.wr.Logger().Infof("got schema from destination %v", destinationAlias) wg.Done() }(i, destinationAlias) } @@ -644,8 +777,10 @@ func (msdw *MultiSplitDiffWorker) gatherSchemaInfo(ctx context.Context) ([]*tabl sourceSchemaDefinition, err = msdw.wr.GetSchema( shortCtx, msdw.sourceAlias, nil /* tables */, msdw.excludeTables, false /* includeViews */) cancel() - msdw.markAsWillFail(rec, err) - msdw.wr.Logger().Infof("Got schema from source %v", msdw.sourceAlias) + if err != nil { + msdw.markAsWillFail(rec, err) + } + msdw.wr.Logger().Infof("got schema from source %v", msdw.sourceAlias) wg.Done() }() @@ -657,8 +792,8 @@ func (msdw *MultiSplitDiffWorker) gatherSchemaInfo(ctx context.Context) ([]*tabl return destinationSchemaDefinitions, sourceSchemaDefinition, nil } -func (msdw *MultiSplitDiffWorker) diffSchemaInformation(ctx context.Context, destinationSchemaDefinitions []*tabletmanagerdatapb.SchemaDefinition, sourceSchemaDefinition *tabletmanagerdatapb.SchemaDefinition) { - msdw.wr.Logger().Infof("Diffing the schema...") +func (msdw *MultiSplitDiffWorker) diffSchemaInformation(ctx context.Context, destinationSchemaDefinitions []*tabletmanagerdatapb.SchemaDefinition, sourceSchemaDefinition *tabletmanagerdatapb.SchemaDefinition) error { + msdw.wr.Logger().Infof("diffing the schema...") rec := &concurrency.AllErrorRecorder{} sourceShardName := fmt.Sprintf("%v/%v", msdw.shardInfo.Keyspace(), msdw.shardInfo.ShardName()) for i, destinationSchemaDefinition := range destinationSchemaDefinitions { @@ -667,10 +802,12 @@ func (msdw *MultiSplitDiffWorker) diffSchemaInformation(ctx context.Context, des tmutils.DiffSchema(destinationShardName, destinationSchemaDefinition, sourceShardName, sourceSchemaDefinition, rec) } if rec.HasErrors() { - msdw.wr.Logger().Warningf("Different schemas: %v", rec.Error().Error()) - } else { - msdw.wr.Logger().Infof("Schema match, good.") + msdw.wr.Logger().Warningf("different schemas: %v", rec.Error().Error()) + return rec.Error() } + + msdw.wr.Logger().Infof("schema match, good.") + return nil } func (msdw *MultiSplitDiffWorker) loadVSchema(ctx context.Context) (*vindexes.KeyspaceSchema, error) { @@ -678,15 +815,15 @@ func (msdw *MultiSplitDiffWorker) loadVSchema(ctx context.Context) (*vindexes.Ke kschema, err := msdw.wr.TopoServer().GetVSchema(shortCtx, msdw.keyspace) cancel() if err != nil { - return nil, fmt.Errorf("cannot load VSchema for keyspace %v: %v", msdw.keyspace, err) + return nil, vterrors.Wrapf(err, "cannot load VSchema for keyspace %v", msdw.keyspace) } if kschema == nil { - return nil, fmt.Errorf("no VSchema for keyspace %v", msdw.keyspace) + return nil, vterrors.Errorf(vtrpc.Code_UNAVAILABLE, "no VSchema for keyspace %v", msdw.keyspace) } keyspaceSchema, err := vindexes.BuildKeyspaceSchema(kschema, msdw.keyspace) if err != nil { - return nil, fmt.Errorf("cannot build vschema for keyspace %v: %v", msdw.keyspace, err) + return nil, vterrors.Wrapf(err, "cannot build vschema for keyspace %v", msdw.keyspace) } return keyspaceSchema, nil } @@ -703,7 +840,10 @@ func (msdw *MultiSplitDiffWorker) diff(ctx context.Context) error { if err != nil { return err } - msdw.diffSchemaInformation(ctx, destinationSchemaDefinitions, sourceSchemaDefinition) + err = msdw.diffSchemaInformation(ctx, destinationSchemaDefinitions, sourceSchemaDefinition) + if err != nil { + return err + } // read the vschema if needed var keyspaceSchema *vindexes.KeyspaceSchema @@ -714,7 +854,7 @@ func (msdw *MultiSplitDiffWorker) diff(ctx context.Context) error { } } - msdw.wr.Logger().Infof("Running the diffs...") + msdw.wr.Logger().Infof("running the diffs...") tableDefinitions := sourceSchemaDefinition.TableDefinitions rec := &concurrency.AllErrorRecorder{} @@ -730,8 +870,13 @@ func (msdw *MultiSplitDiffWorker) diff(ctx context.Context) error { consumers := sync.WaitGroup{} // start as many goroutines we want parallel diffs running for i := 0; i < msdw.parallelDiffsCount; i++ { + scanners := msdw.scanners[i] + if scanners.sourceScanner == nil || scanners.destinationScanner == nil { + return vterrors.Errorf(vtrpc.Code_INTERNAL, "should not be nil [%v] OR [%v]", scanners.sourceScanner, scanners.destinationScanner) + } + consumers.Add(1) - go msdw.tableDiffingConsumer(ctx, &consumers, tableChan, rec, keyspaceSchema) + go msdw.tableDiffingConsumer(ctx, &consumers, tableChan, rec, keyspaceSchema, scanners.sourceScanner, scanners.destinationScanner) } // wait for all consumers to wrap up their work diff --git a/go/vt/worker/multi_split_diff_cmd.go b/go/vt/worker/multi_split_diff_cmd.go index 353e47790e..8d54df60c3 100644 --- a/go/vt/worker/multi_split_diff_cmd.go +++ b/go/vt/worker/multi_split_diff_cmd.go @@ -25,6 +25,8 @@ import ( "strings" "sync" + "vitess.io/vitess/go/vt/proto/topodata" + "golang.org/x/net/context" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/topo/topoproto" @@ -60,14 +62,22 @@ const multiSplitDiffHTML2 = `

+ + +

- -
+ +


+ + ?
@@ -79,10 +89,12 @@ var multiSplitDiffTemplate = mustParseTemplate("multiSplitDiff", multiSplitDiffH var multiSplitDiffTemplate2 = mustParseTemplate("multiSplitDiff2", multiSplitDiffHTML2) func commandMultiSplitDiff(wi *Instance, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (Worker, error) { + tabletTypeStr := subFlags.String("tablet_type", "RDONLY", "type of tablet used") excludeTables := subFlags.String("exclude_tables", "", "comma separated list of tables to exclude") - minHealthyRdonlyTablets := subFlags.Int("min_healthy_rdonly_tablets", defaultMinHealthyRdonlyTablets, "minimum number of healthy RDONLY tablets before taking out one") + minHealthyTablets := subFlags.Int("min_healthy_tablets", defaultMinHealthyTablets, "minimum number of healthy tablets before taking out one") parallelDiffsCount := subFlags.Int("parallel_diffs_count", defaultParallelDiffsCount, "number of tables to diff in parallel") waitForFixedTimeRatherThanGtidSet := subFlags.Bool("wait_for_fixed_time_rather_than_gtid_set", false, "wait for 1m when syncing up the destination RDONLY tablet rather than using the GTID set. Use this when the GTID set on the RDONLY is broken. Make sure the RDONLY is not behind in replication when using this flag.") + useConsistentSnapshot := subFlags.Bool("use_consistent_snapshot", defaultUseConsistentSnapshot, "Instead of pausing replication on the source, uses transactions with consistent snapshot to have a stable view of the data.") if err := subFlags.Parse(args); err != nil { return nil, err } @@ -98,7 +110,13 @@ func commandMultiSplitDiff(wi *Instance, wr *wrangler.Wrangler, subFlags *flag.F if *excludeTables != "" { excludeTableArray = strings.Split(*excludeTables, ",") } - return NewMultiSplitDiffWorker(wr, wi.cell, keyspace, shard, excludeTableArray, *minHealthyRdonlyTablets, *parallelDiffsCount, *waitForFixedTimeRatherThanGtidSet), nil + + tabletType, ok := topodata.TabletType_value[*tabletTypeStr] + if !ok { + return nil, fmt.Errorf("failed to find this tablet type %v", tabletTypeStr) + } + + return NewMultiSplitDiffWorker(wr, wi.cell, keyspace, shard, excludeTableArray, *minHealthyTablets, *parallelDiffsCount, *waitForFixedTimeRatherThanGtidSet, *useConsistentSnapshot, topodata.TabletType(tabletType)), nil } // shardSources returns all the shards that are SourceShards of at least one other shard. @@ -168,7 +186,7 @@ func shardSources(ctx context.Context, wr *wrangler.Wrangler) ([]map[string]stri return result, nil } -func interactiveMultiSplitDiff(ctx context.Context, wi *Instance, wr *wrangler.Wrangler, w http.ResponseWriter, r *http.Request) (Worker, *template.Template, map[string]interface{}, error) { +func interactiveMultiSplitDiff(ctx context.Context, wi *Instance, wr *wrangler.Wrangler, _ http.ResponseWriter, r *http.Request) (Worker, *template.Template, map[string]interface{}, error) { if err := r.ParseForm(); err != nil { return nil, nil, nil, fmt.Errorf("cannot parse form: %s", err) } @@ -194,7 +212,7 @@ func interactiveMultiSplitDiff(ctx context.Context, wi *Instance, wr *wrangler.W result["Keyspace"] = keyspace result["Shard"] = shard result["DefaultSourceUID"] = "0" - result["DefaultMinHealthyRdonlyTablets"] = fmt.Sprintf("%v", defaultMinHealthyRdonlyTablets) + result["DefaultMinHealthyTablets"] = fmt.Sprintf("%v", defaultMinHealthyTablets) result["DefaultParallelDiffsCount"] = fmt.Sprintf("%v", defaultParallelDiffsCount) return nil, multiSplitDiffTemplate2, result, nil } @@ -205,21 +223,26 @@ func interactiveMultiSplitDiff(ctx context.Context, wi *Instance, wr *wrangler.W if excludeTables != "" { excludeTableArray = strings.Split(excludeTables, ",") } - minHealthyRdonlyTabletsStr := r.FormValue("minHealthyRdonlyTablets") + minHealthyTabletsStr := r.FormValue("minHealthyTablets") parallelDiffsCountStr := r.FormValue("parallelDiffsCount") - minHealthyRdonlyTablets, err := strconv.ParseInt(minHealthyRdonlyTabletsStr, 0, 64) + minHealthyTablets, err := strconv.ParseInt(minHealthyTabletsStr, 0, 64) parallelDiffsCount, err := strconv.ParseInt(parallelDiffsCountStr, 0, 64) if err != nil { - return nil, nil, nil, fmt.Errorf("cannot parse minHealthyRdonlyTablets: %s", err) + return nil, nil, nil, fmt.Errorf("cannot parse minHealthyTablets: %s", err) } waitForFixedTimeRatherThanGtidSetStr := r.FormValue("waitForFixedTimeRatherThanGtidSet") waitForFixedTimeRatherThanGtidSet := waitForFixedTimeRatherThanGtidSetStr == "true" - if err != nil { - return nil, nil, nil, fmt.Errorf("cannot parse minHealthyRdonlyTablets: %s", err) + useConsistentSnapshotStr := r.FormValue("useConsistentSnapshot") + useConsistentSnapshot := useConsistentSnapshotStr == "true" + + tabletTypeStr := r.FormValue("tabletType") + tabletType, ok := topodata.TabletType_value[tabletTypeStr] + if !ok { + return nil, nil, nil, fmt.Errorf("cannot parse tabletType: %s", tabletTypeStr) } // start the diff job - wrk := NewMultiSplitDiffWorker(wr, wi.cell, keyspace, shard, excludeTableArray, int(minHealthyRdonlyTablets), int(parallelDiffsCount), waitForFixedTimeRatherThanGtidSet) + wrk := NewMultiSplitDiffWorker(wr, wi.cell, keyspace, shard, excludeTableArray, int(minHealthyTablets), int(parallelDiffsCount), waitForFixedTimeRatherThanGtidSet, useConsistentSnapshot, topodata.TabletType(tabletType)) return wrk, nil, nil, nil } @@ -228,4 +251,4 @@ func init() { commandMultiSplitDiff, interactiveMultiSplitDiff, "[--exclude_tables=''] ", "Diffs a rdonly destination shard against its SourceShards"}) -} +} \ No newline at end of file diff --git a/go/vt/worker/multi_split_diff_test.go b/go/vt/worker/multi_split_diff_test.go index 7811f5b1b5..90316e22a0 100644 --- a/go/vt/worker/multi_split_diff_test.go +++ b/go/vt/worker/multi_split_diff_test.go @@ -49,7 +49,7 @@ type msdDestinationTabletServer struct { shardIndex int } -func (sq *msdDestinationTabletServer) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions, callback func(reply *sqltypes.Result) error) error { +func (sq *msdDestinationTabletServer) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(reply *sqltypes.Result) error) error { if strings.Contains(sql, sq.excludedTable) { sq.t.Errorf("Split Diff operation on destination should skip the excluded table: %v query: %v", sq.excludedTable, sql) } @@ -111,7 +111,7 @@ type msdSourceTabletServer struct { v3 bool } -func (sq *msdSourceTabletServer) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions, callback func(reply *sqltypes.Result) error) error { +func (sq *msdSourceTabletServer) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(reply *sqltypes.Result) error) error { if strings.Contains(sql, sq.excludedTable) { sq.t.Errorf("Split Diff operation on source should skip the excluded table: %v query: %v", sq.excludedTable, sql) } @@ -277,7 +277,7 @@ func testMultiSplitDiff(t *testing.T, v3 bool) { qs := fakes.NewStreamHealthQueryService(sourceRdonly.Target()) qs.AddDefaultHealthResponse() grpcqueryservice.Register(sourceRdonly.RPCServer, &msdSourceTabletServer{ - t: t, + t: t, StreamHealthQueryService: qs, excludedTable: excludedTable, v3: v3, @@ -288,7 +288,7 @@ func testMultiSplitDiff(t *testing.T, v3 bool) { qs := fakes.NewStreamHealthQueryService(destRdonly.Target()) qs.AddDefaultHealthResponse() grpcqueryservice.Register(destRdonly.RPCServer, &msdDestinationTabletServer{ - t: t, + t: t, StreamHealthQueryService: qs, excludedTable: excludedTable, shardIndex: 0, @@ -299,7 +299,7 @@ func testMultiSplitDiff(t *testing.T, v3 bool) { qs := fakes.NewStreamHealthQueryService(destRdonly.Target()) qs.AddDefaultHealthResponse() grpcqueryservice.Register(destRdonly.RPCServer, &msdDestinationTabletServer{ - t: t, + t: t, StreamHealthQueryService: qs, excludedTable: excludedTable, shardIndex: 1, @@ -323,7 +323,7 @@ func testMultiSplitDiff(t *testing.T, v3 bool) { // necessary for synchronizing replication. wr := wrangler.New(logutil.NewConsoleLogger(), ts, newFakeTMCTopo(ts)) if err := runCommand(t, wi, wr, args); err != nil { - t.Fatal(err) + t.Fatalf("%+v", err) } } diff --git a/go/vt/worker/restartable_result_reader.go b/go/vt/worker/restartable_result_reader.go index 73fadf0414..90a5e8c20e 100644 --- a/go/vt/worker/restartable_result_reader.go +++ b/go/vt/worker/restartable_result_reader.go @@ -53,6 +53,8 @@ type RestartableResultReader struct { chunk chunk // allowMultipleRetries is true if we are allowed to retry more than once. allowMultipleRetries bool + // if we are running inside a transaction, this will hold a non-zero value + txID int64 query string @@ -82,6 +84,15 @@ func NewRestartableResultReader(ctx context.Context, logger logutil.Logger, tp t allowMultipleRetries: allowMultipleRetries, } + err := tryToConnect(r) + if err != nil { + return nil, err + } + return r, nil +} + +func tryToConnect(r *RestartableResultReader) error { + // If the initial connection fails we retry once. // Note: The first retry will be the second attempt. attempt := 0 @@ -97,15 +108,35 @@ func NewRestartableResultReader(ctx context.Context, logger logutil.Logger, tp t err = fmt.Errorf("tablet=%v: %v", topoproto.TabletAliasString(r.tablet.Alias), err) goto retry } - return r, nil + return nil retry: if !retryable || attempt > 1 { - return nil, fmt.Errorf("failed to initialize tablet connection: retryable %v, %v", retryable, err) + return fmt.Errorf("failed to initialize tablet connection: retryable %v, %v", retryable, err) } statsRetryCount.Add(1) - logger.Infof("retrying after error: %v", err) + log.Infof("retrying after error: %v", err) } + +} + +// NewTransactionalRestartableResultReader does the same thing that NewRestartableResultReader does, +// but works inside of a single transaction +func NewTransactionalRestartableResultReader(ctx context.Context, logger logutil.Logger, tp tabletProvider, td *tabletmanagerdatapb.TableDefinition, chunk chunk, allowMultipleRetries bool, txID int64) (*RestartableResultReader, error) { + r := &RestartableResultReader{ + ctx: ctx, + logger: logger, + tp: tp, + td: td, + chunk: chunk, + allowMultipleRetries: allowMultipleRetries, + txID: txID, + } + err := tryToConnect(r) + if err != nil { + return nil, err + } + return r, nil } // getTablet (re)sets the tablet which is used for the streaming query. @@ -145,11 +176,21 @@ func (r *RestartableResultReader) getTablet() (bool, error) { func (r *RestartableResultReader) startStream() (bool, error) { // Start the streaming query. r.generateQuery() - stream := queryservice.ExecuteWithStreamer(r.ctx, r.conn, &querypb.Target{ - Keyspace: r.tablet.Keyspace, - Shard: r.tablet.Shard, - TabletType: r.tablet.Type, - }, r.query, make(map[string]*querypb.BindVariable), nil) + var stream sqltypes.ResultStream + + if r.txID == 0 { + stream = queryservice.ExecuteWithStreamer(r.ctx, r.conn, &querypb.Target{ + Keyspace: r.tablet.Keyspace, + Shard: r.tablet.Shard, + TabletType: r.tablet.Type, + }, r.query, make(map[string]*querypb.BindVariable), nil) + } else { + stream = queryservice.ExecuteWithTransactionalStreamer(r.ctx, r.conn, &querypb.Target{ + Keyspace: r.tablet.Keyspace, + Shard: r.tablet.Shard, + TabletType: r.tablet.Type, + }, r.query, make(map[string]*querypb.BindVariable), r.txID, nil) + } // Read the fields information. cols, err := stream.Recv() @@ -387,4 +428,4 @@ func greaterThanTupleWhereClause(columns []string, row []sqltypes.Value) []strin clauses = append(clauses, b.String()) return clauses -} +} \ No newline at end of file diff --git a/go/vt/worker/result_merger.go b/go/vt/worker/result_merger.go index 68250d06ad..c0ae2ec719 100644 --- a/go/vt/worker/result_merger.go +++ b/go/vt/worker/result_merger.go @@ -21,9 +21,9 @@ import ( "fmt" "io" - "vitess.io/vitess/go/vt/vterrors" - "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/sqltypes" @@ -41,8 +41,9 @@ const ResultSizeRows = 64 // The output stream will be sorted by ascending primary key order. // It implements the ResultReader interface. type ResultMerger struct { - inputs []ResultReader - fields []*querypb.Field + inputs []ResultReader + allInputs []ResultReader + fields []*querypb.Field // output is the buffer of merged rows. Once it's full, we'll return it in // Next() (wrapped in a sqltypes.Result). output [][]sqltypes.Value @@ -91,6 +92,7 @@ func NewResultMerger(inputs []ResultReader, pkFieldCount int) (*ResultMerger, er rm := &ResultMerger{ inputs: activeInputs, + allInputs: inputs, fields: fields, nextRowHeap: nextRowHeap, } @@ -177,6 +179,13 @@ func (rm *ResultMerger) Next() (*sqltypes.Result, error) { return result, nil } +// Close closes all inputs +func (rm *ResultMerger) Close(ctx context.Context) { + for _, i := range rm.allInputs { + i.Close(ctx) + } +} + func (rm *ResultMerger) deleteInput(deleteMe ResultReader) { for i, input := range rm.inputs { if input == deleteMe { diff --git a/go/vt/worker/result_merger_test.go b/go/vt/worker/result_merger_test.go index 10b37f3261..4fe9dd19c6 100644 --- a/go/vt/worker/result_merger_test.go +++ b/go/vt/worker/result_merger_test.go @@ -23,6 +23,7 @@ import ( "testing" "time" + "golang.org/x/net/context" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" @@ -53,6 +54,7 @@ type fakeResultReader struct { // currentIndex is the current index within the current range. currentIndex int rowsReturned int + closed bool } // newFakeResultReader returns a new FakeResultReader. @@ -110,6 +112,11 @@ func (f *fakeResultReader) Fields() []*querypb.Field { return f.fields } +// Close closes nothing +func (f *fakeResultReader) Close(ctx context.Context) { + f.closed = true +} + // Next returns the next fake result. It is part of the ResultReader interface. func (f *fakeResultReader) Next() (*sqltypes.Result, error) { if f.rowsReturned == f.rowsTotal { @@ -298,53 +305,63 @@ func TestResultMerger(t *testing.T) { } for _, tc := range testcases { - t.Logf("checking testcase: %v", tc.desc) - pkFieldCount := 1 - if tc.multiPk { - pkFieldCount = 2 - } - rm, err := NewResultMerger(tc.inputs, pkFieldCount) - if err != nil { - t.Fatal(err) - } - - // Consume all merged Results. - var got []*sqltypes.Result - for { - result, err := rm.Next() + t.Run(fmt.Sprintf("checking testcase: %v", tc.desc), func(inner *testing.T) { + pkFieldCount := 1 + if tc.multiPk { + pkFieldCount = 2 + } + rm, err := NewResultMerger(tc.inputs, pkFieldCount) if err != nil { - if err == io.EOF { - break - } else { - t.Fatal(err) - } + inner.Fatal(err) } - got = append(got, result) - } - if !reflect.DeepEqual(got, tc.want) { - for i := range got { - if i == len(tc.want) { - // got has more Results than want. Avoid index out of range errors. - break + // Consume all merged Results. + var got []*sqltypes.Result + for { + result, err := rm.Next() + if err != nil { + if err == io.EOF { + break + } else { + inner.Fatal(err) + } } - if got[i].RowsAffected != tc.want[i].RowsAffected { - t.Logf("deviating RowsAffected value for Result at index: %v got = %v, want = %v", i, got[i].RowsAffected, tc.want[i].RowsAffected) - } - t.Logf("deviating Rows for Result at index: %v got = %v, want = %v", i, got[i].Rows, tc.want[i].Rows) + got = append(got, result) } - if len(tc.want)-len(got) > 0 { - for i := len(got); i < len(tc.want); i++ { - t.Logf("missing Result in got: %v", tc.want[i].Rows) + + rm.Close(context.Background()) + + if !reflect.DeepEqual(got, tc.want) { + for i := range got { + if i == len(tc.want) { + // got has more Results than want. Avoid index out of range errors. + break + } + if got[i].RowsAffected != tc.want[i].RowsAffected { + inner.Logf("deviating RowsAffected value for Result at index: %v got = %v, want = %v", i, got[i].RowsAffected, tc.want[i].RowsAffected) + } + inner.Logf("deviating Rows for Result at index: %v got = %v, want = %v", i, got[i].Rows, tc.want[i].Rows) + } + if len(tc.want)-len(got) > 0 { + for i := len(got); i < len(tc.want); i++ { + inner.Logf("missing Result in got: %v", tc.want[i].Rows) + } + } + if len(got)-len(tc.want) > 0 { + for i := len(tc.want); i < len(got); i++ { + inner.Logf("unnecessary extra Result in got: %v", got[i].Rows) + } + } + inner.Fatalf("ResultMerger testcase '%v' failed. See output above for different rows.", tc.desc) + } + + for _, x := range tc.inputs { + fake := x.(*fakeResultReader) + if !fake.closed { + inner.Fatal("expected inputs to be closed by now") } } - if len(got)-len(tc.want) > 0 { - for i := len(tc.want); i < len(got); i++ { - t.Logf("unnecessary extra Result in got: %v", got[i].Rows) - } - } - t.Fatalf("ResultMerger testcase '%v' failed. See output above for different rows.", tc.desc) - } + }) } } @@ -387,6 +404,10 @@ func (m *memoryResultReader) Next() (*sqltypes.Result, error) { return result, nil } +func (m *memoryResultReader) Close(ctx context.Context) { + // intentionally blank. we have nothing we need to close +} + // benchmarkResult is a package level variable whose sole purpose is to // reference output from the Benchmark* functions below. // This was suggested by http://dave.cheney.net/2013/06/30/how-to-write-benchmarks-in-go diff --git a/go/vt/worker/result_reader.go b/go/vt/worker/result_reader.go index 51ccda4236..efc0fc1f7b 100644 --- a/go/vt/worker/result_reader.go +++ b/go/vt/worker/result_reader.go @@ -17,6 +17,8 @@ limitations under the License. package worker import ( + "context" + "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" @@ -39,4 +41,5 @@ type ResultReader interface { // It returns the next result on the stream. // It will return io.EOF if the stream ended. Next() (*sqltypes.Result, error) + Close(ctx context.Context) } diff --git a/go/vt/worker/split_clone.go b/go/vt/worker/split_clone.go index 3cc7638d4a..bead11591c 100644 --- a/go/vt/worker/split_clone.go +++ b/go/vt/worker/split_clone.go @@ -24,14 +24,16 @@ import ( "sync" "time" + "vitess.io/vitess/go/vt/vttablet/tabletconn" + + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/vterrors" "golang.org/x/net/context" "vitess.io/vitess/go/event" "vitess.io/vitess/go/stats" - "vitess.io/vitess/go/sync2" - "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/throttler" @@ -64,28 +66,30 @@ var servingTypes = []topodatapb.TabletType{topodatapb.TabletType_MASTER, topodat type SplitCloneWorker struct { StatusWorker - wr *wrangler.Wrangler - cloneType cloneType - cell string - destinationKeyspace string - shard string - online bool - offline bool + wr *wrangler.Wrangler + cloneType cloneType + cell string + destinationKeyspace string + shard string + online bool + offline bool + useConsistentSnapshot bool // verticalSplit only: List of tables which should be split out. tables []string // horizontalResharding only: List of tables which will be skipped. - excludeTables []string - chunkCount int - minRowsPerChunk int - sourceReaderCount int - writeQueryMaxRows int - writeQueryMaxSize int - destinationWriterCount int - minHealthyRdonlyTablets int - maxTPS int64 - maxReplicationLag int64 - cleaner *wrangler.Cleaner - tabletTracker *TabletTracker + excludeTables []string + chunkCount int + minRowsPerChunk int + sourceReaderCount int + writeQueryMaxRows int + writeQueryMaxSize int + destinationWriterCount int + minHealthyTablets int + tabletType topodatapb.TabletType + maxTPS int64 + maxReplicationLag int64 + cleaner *wrangler.Cleaner + tabletTracker *TabletTracker // populated during WorkerStateInit, read-only after that destinationKeyspaceInfo *topo.KeyspaceInfo @@ -101,6 +105,9 @@ type SplitCloneWorker struct { // populated during WorkerStateFindTargets, read-only after that sourceTablets []*topodatapb.Tablet + lastPos string // contains the GTID position for the source + transactions []int64 + // shardWatchers contains a TopologyWatcher for each source and destination // shard. It updates the list of tablets in the healthcheck if replicas are // added/removed. @@ -118,15 +125,15 @@ type SplitCloneWorker struct { // Throttlers will be added/removed during WorkerStateClone(Online|Offline). throttlers map[string]*throttler.Throttler - // offlineSourceAliases has the list of tablets (per source shard) we took + // sourceAliases has the list of tablets (per source shard) we took // offline for the WorkerStateCloneOffline phase. // Populated shortly before WorkerStateCloneOffline, read-only after that. - offlineSourceAliases []*topodatapb.TabletAlias + sourceAliases []*topodatapb.TabletAlias // formattedOfflineSourcesMu guards all fields in this group. formattedOfflineSourcesMu sync.Mutex // formattedOfflineSources is a space separated list of - // "offlineSourceAliases". It is used by the StatusAs* methods to output the + // "sourceAliases". It is used by the StatusAs* methods to output the // used source tablets during the offline clone phase. formattedOfflineSources string @@ -140,20 +147,20 @@ type SplitCloneWorker struct { } // newSplitCloneWorker returns a new worker object for the SplitClone command. -func newSplitCloneWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, online, offline bool, excludeTables []string, chunkCount, minRowsPerChunk, sourceReaderCount, writeQueryMaxRows, writeQueryMaxSize, destinationWriterCount, minHealthyRdonlyTablets int, maxTPS, maxReplicationLag int64) (Worker, error) { - return newCloneWorker(wr, horizontalResharding, cell, keyspace, shard, online, offline, nil /* tables */, excludeTables, chunkCount, minRowsPerChunk, sourceReaderCount, writeQueryMaxRows, writeQueryMaxSize, destinationWriterCount, minHealthyRdonlyTablets, maxTPS, maxReplicationLag) +func newSplitCloneWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, online, offline bool, excludeTables []string, chunkCount, minRowsPerChunk, sourceReaderCount, writeQueryMaxRows, writeQueryMaxSize, destinationWriterCount, minHealthyTablets int, tabletType topodatapb.TabletType, maxTPS, maxReplicationLag int64, useConsistentSnapshot bool) (Worker, error) { + return newCloneWorker(wr, horizontalResharding, cell, keyspace, shard, online, offline, nil /* tables */, excludeTables, chunkCount, minRowsPerChunk, sourceReaderCount, writeQueryMaxRows, writeQueryMaxSize, destinationWriterCount, minHealthyTablets, tabletType, maxTPS, maxReplicationLag, useConsistentSnapshot) } // newVerticalSplitCloneWorker returns a new worker object for the // VerticalSplitClone command. -func newVerticalSplitCloneWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, online, offline bool, tables []string, chunkCount, minRowsPerChunk, sourceReaderCount, writeQueryMaxRows, writeQueryMaxSize, destinationWriterCount, minHealthyRdonlyTablets int, maxTPS, maxReplicationLag int64) (Worker, error) { - return newCloneWorker(wr, verticalSplit, cell, keyspace, shard, online, offline, tables, nil /* excludeTables */, chunkCount, minRowsPerChunk, sourceReaderCount, writeQueryMaxRows, writeQueryMaxSize, destinationWriterCount, minHealthyRdonlyTablets, maxTPS, maxReplicationLag) +func newVerticalSplitCloneWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, online, offline bool, tables []string, chunkCount, minRowsPerChunk, sourceReaderCount, writeQueryMaxRows, writeQueryMaxSize, destinationWriterCount, minHealthyTablets int, tabletType topodatapb.TabletType, maxTPS, maxReplicationLag int64, useConsistentSnapshot bool) (Worker, error) { + return newCloneWorker(wr, verticalSplit, cell, keyspace, shard, online, offline, tables, nil /* excludeTables */, chunkCount, minRowsPerChunk, sourceReaderCount, writeQueryMaxRows, writeQueryMaxSize, destinationWriterCount, minHealthyTablets, tabletType, maxTPS, maxReplicationLag, useConsistentSnapshot) } // newCloneWorker returns a new SplitCloneWorker object which is used both by // the SplitClone and VerticalSplitClone command. // TODO(mberlin): Rename SplitCloneWorker to cloneWorker. -func newCloneWorker(wr *wrangler.Wrangler, cloneType cloneType, cell, keyspace, shard string, online, offline bool, tables, excludeTables []string, chunkCount, minRowsPerChunk, sourceReaderCount, writeQueryMaxRows, writeQueryMaxSize, destinationWriterCount, minHealthyRdonlyTablets int, maxTPS, maxReplicationLag int64) (Worker, error) { +func newCloneWorker(wr *wrangler.Wrangler, cloneType cloneType, cell, keyspace, shard string, online, offline bool, tables, excludeTables []string, chunkCount, minRowsPerChunk, sourceReaderCount, writeQueryMaxRows, writeQueryMaxSize, destinationWriterCount, minHealthyTablets int, tabletType topodatapb.TabletType, maxTPS, maxReplicationLag int64, useConsistentSnapshot bool) (Worker, error) { if cloneType != horizontalResharding && cloneType != verticalSplit { return nil, fmt.Errorf("unknown cloneType: %v This is a bug. Please report", cloneType) } @@ -183,8 +190,8 @@ func newCloneWorker(wr *wrangler.Wrangler, cloneType cloneType, cell, keyspace, if destinationWriterCount <= 0 { return nil, fmt.Errorf("destination_writer_count must be > 0: %v", destinationWriterCount) } - if minHealthyRdonlyTablets < 0 { - return nil, fmt.Errorf("min_healthy_rdonly_tablets must be >= 0: %v", minHealthyRdonlyTablets) + if minHealthyTablets < 0 { + return nil, fmt.Errorf("min_healthy_tablets must be >= 0: %v", minHealthyTablets) } if maxTPS != throttler.MaxRateModuleDisabled { wr.Logger().Infof("throttling enabled and set to a max of %v transactions/second", maxTPS) @@ -195,35 +202,39 @@ func newCloneWorker(wr *wrangler.Wrangler, cloneType cloneType, cell, keyspace, if maxReplicationLag <= 0 { return nil, fmt.Errorf("max_replication_lag must be >= 1s: %v", maxReplicationLag) } + if tabletType != topodatapb.TabletType_REPLICA && tabletType != topodatapb.TabletType_RDONLY { + return nil, fmt.Errorf("tablet_type must be RDONLY or REPLICA: %v", topodatapb.TabletType_name[int32(tabletType)]) + } scw := &SplitCloneWorker{ - StatusWorker: NewStatusWorker(), - wr: wr, - cloneType: cloneType, - cell: cell, - destinationKeyspace: keyspace, - shard: shard, - online: online, - offline: offline, - tables: tables, - excludeTables: excludeTables, - chunkCount: chunkCount, - minRowsPerChunk: minRowsPerChunk, - sourceReaderCount: sourceReaderCount, - writeQueryMaxRows: writeQueryMaxRows, - writeQueryMaxSize: writeQueryMaxSize, - destinationWriterCount: destinationWriterCount, - minHealthyRdonlyTablets: minHealthyRdonlyTablets, - maxTPS: maxTPS, - maxReplicationLag: maxReplicationLag, - cleaner: &wrangler.Cleaner{}, - tabletTracker: NewTabletTracker(), - throttlers: make(map[string]*throttler.Throttler), - - destinationDbNames: make(map[string]string), - + StatusWorker: NewStatusWorker(), + wr: wr, + cloneType: cloneType, + cell: cell, + destinationKeyspace: keyspace, + shard: shard, + online: online, + offline: offline, + tables: tables, + excludeTables: excludeTables, + chunkCount: chunkCount, + minRowsPerChunk: minRowsPerChunk, + sourceReaderCount: sourceReaderCount, + writeQueryMaxRows: writeQueryMaxRows, + writeQueryMaxSize: writeQueryMaxSize, + destinationWriterCount: destinationWriterCount, + minHealthyTablets: minHealthyTablets, + maxTPS: maxTPS, + maxReplicationLag: maxReplicationLag, + cleaner: &wrangler.Cleaner{}, + tabletTracker: NewTabletTracker(), + tabletType: tabletType, tableStatusListOnline: &tableStatusList{}, tableStatusListOffline: &tableStatusList{}, + useConsistentSnapshot: useConsistentSnapshot, + + throttlers: make(map[string]*throttler.Throttler), + destinationDbNames: make(map[string]string), } scw.initializeEventDescriptor() return scw, nil @@ -476,9 +487,15 @@ func (scw *SplitCloneWorker) run(ctx context.Context) error { time.Sleep(1 * time.Second) } - // 4a: Take source tablets out of serving for an exact snapshot. - if err := scw.findOfflineSourceTablets(ctx); err != nil { - return vterrors.Wrap(err, "findSourceTablets() failed") + // 4a: Make sure the sources are producing a stable view of the data + if scw.useConsistentSnapshot { + if err := scw.findTransactionalSources(ctx); err != nil { + return vterrors.Wrap(err, "findSourceTablets() failed") + } + } else { + if err := scw.findOfflineSourceTablets(ctx); err != nil { + return vterrors.Wrap(err, "findSourceTablets() failed") + } } if err := checkDone(ctx); err != nil { return err @@ -489,6 +506,10 @@ func (scw *SplitCloneWorker) run(ctx context.Context) error { if err := scw.clone(ctx, WorkerStateCloneOffline); err != nil { return vterrors.Wrap(err, "offline clone() failed") } + if err := scw.setUpVReplication(ctx); err != nil { + return fmt.Errorf("failed to set up replication: %v", err) + } + d := time.Since(start) if err := checkDone(ctx); err != nil { return err @@ -583,6 +604,10 @@ func (scw *SplitCloneWorker) initShardsForHorizontalResharding(ctx context.Conte scw.destinationShards = os.Left } + if scw.useConsistentSnapshot && len(scw.sourceShards) > 1 { + return fmt.Errorf("cannot use consistent snapshot against multiple source shards") + } + return nil } @@ -705,20 +730,20 @@ func (scw *SplitCloneWorker) findOfflineSourceTablets(ctx context.Context) error scw.setState(WorkerStateFindTargets) // find an appropriate tablet in the source shards - scw.offlineSourceAliases = make([]*topodatapb.TabletAlias, len(scw.sourceShards)) + scw.sourceAliases = make([]*topodatapb.TabletAlias, len(scw.sourceShards)) for i, si := range scw.sourceShards { var err error - scw.offlineSourceAliases[i], err = FindWorkerTablet(ctx, scw.wr, scw.cleaner, scw.tsc, scw.cell, si.Keyspace(), si.ShardName(), scw.minHealthyRdonlyTablets, topodatapb.TabletType_RDONLY) + scw.sourceAliases[i], err = FindWorkerTablet(ctx, scw.wr, scw.cleaner, scw.tsc, scw.cell, si.Keyspace(), si.ShardName(), scw.minHealthyTablets, scw.tabletType) if err != nil { return vterrors.Wrapf(err, "FindWorkerTablet() failed for %v/%v/%v", scw.cell, si.Keyspace(), si.ShardName()) } - scw.wr.Logger().Infof("Using tablet %v as source for %v/%v", topoproto.TabletAliasString(scw.offlineSourceAliases[i]), si.Keyspace(), si.ShardName()) + scw.wr.Logger().Infof("Using tablet %v as source for %v/%v", topoproto.TabletAliasString(scw.sourceAliases[i]), si.Keyspace(), si.ShardName()) } - scw.setFormattedOfflineSources(scw.offlineSourceAliases) + scw.setFormattedOfflineSources(scw.sourceAliases) // get the tablet info for them, and stop their replication - scw.sourceTablets = make([]*topodatapb.Tablet, len(scw.offlineSourceAliases)) - for i, alias := range scw.offlineSourceAliases { + scw.sourceTablets = make([]*topodatapb.Tablet, len(scw.sourceAliases)) + for i, alias := range scw.sourceAliases { shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) ti, err := scw.wr.TopoServer().GetTablet(shortCtx, alias) cancel() @@ -740,6 +765,44 @@ func (scw *SplitCloneWorker) findOfflineSourceTablets(ctx context.Context) error return nil } +// findTransactionalSources phase: +// - get the aliases of all the source tablets +func (scw *SplitCloneWorker) findTransactionalSources(ctx context.Context) error { + scw.setState(WorkerStateFindTargets) + + if len(scw.sourceShards) > 1 { + return fmt.Errorf("consistent snapshot can only be used with a single source shard") + } + var err error + + // find an appropriate tablet in the source shard + si := scw.sourceShards[0] + scw.sourceAliases = make([]*topodatapb.TabletAlias, 1) + scw.sourceAliases[0], err = FindHealthyTablet(ctx, scw.wr, scw.tsc, scw.cell, si.Keyspace(), si.ShardName(), scw.minHealthyTablets, scw.tabletType) + if err != nil { + return fmt.Errorf("FindHealthyTablet() failed for %v/%v/%v: %v", scw.cell, si.Keyspace(), si.ShardName(), err) + } + scw.wr.Logger().Infof("Using tablet %v as source for %v/%v", topoproto.TabletAliasString(scw.sourceAliases[0]), si.Keyspace(), si.ShardName()) + scw.setFormattedOfflineSources(scw.sourceAliases) + + // get the tablet info + scw.sourceTablets = make([]*topodatapb.Tablet, 1) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) + ti, err := scw.wr.TopoServer().GetTablet(shortCtx, scw.sourceAliases[0]) + cancel() + if err != nil { + return fmt.Errorf("cannot read tablet %v: %v", topoproto.TabletAliasString(scw.sourceAliases[0]), err) + } + scw.sourceTablets[0] = ti.Tablet + + // stop replication and create transactions to work on + txs, gtid, err := CreateConsistentTransactions(ctx, ti, scw.wr, scw.cleaner, scw.sourceReaderCount) + scw.wr.Logger().Infof("created %v transactions", len(txs)) + scw.lastPos = gtid + scw.transactions = txs + return nil +} + // findDestinationMasters finds for each destination shard the current master. func (scw *SplitCloneWorker) findDestinationMasters(ctx context.Context) error { scw.setState(WorkerStateFindTargets) @@ -748,8 +811,9 @@ func (scw *SplitCloneWorker) findDestinationMasters(ctx context.Context) error { scw.wr.Logger().Infof("Finding a MASTER tablet for each destination shard...") for _, si := range scw.destinationShards { waitCtx, waitCancel := context.WithTimeout(ctx, *waitForHealthyTabletsTimeout) - defer waitCancel() - if err := scw.tsc.WaitForTablets(waitCtx, scw.cell, si.Keyspace(), si.ShardName(), topodatapb.TabletType_MASTER); err != nil { + err := scw.tsc.WaitForTablets(waitCtx, scw.cell, si.Keyspace(), si.ShardName(), topodatapb.TabletType_MASTER) + waitCancel() + if err != nil { return vterrors.Wrapf(err, "cannot find MASTER tablet for destination shard for %v/%v (in cell: %v)", si.Keyspace(), si.ShardName(), scw.cell) } masters := scw.tsc.GetHealthyTabletStats(si.Keyspace(), si.ShardName(), topodatapb.TabletType_MASTER) @@ -775,18 +839,18 @@ func (scw *SplitCloneWorker) waitForTablets(ctx context.Context, shardInfos []*t var wg sync.WaitGroup rec := concurrency.AllErrorRecorder{} - if scw.minHealthyRdonlyTablets > 0 && len(shardInfos) > 0 { - scw.wr.Logger().Infof("Waiting %v for %d %s/%s RDONLY tablet(s)", timeout, scw.minHealthyRdonlyTablets, shardInfos[0].Keyspace(), shardInfos[0].ShardName()) + if scw.minHealthyTablets > 0 && len(shardInfos) > 0 { + scw.wr.Logger().Infof("Waiting %v for %d %s/%s RDONLY tablet(s)", timeout, scw.minHealthyTablets, shardInfos[0].Keyspace(), shardInfos[0].ShardName()) } for _, si := range shardInfos { wg.Add(1) go func(keyspace, shard string) { defer wg.Done() - // We wait for --min_healthy_rdonly_tablets because we will use several + // We wait for --min_healthy_tablets because we will use several // tablets per shard to spread reading the chunks of rows across as many // tablets as possible. - if _, err := waitForHealthyTablets(ctx, scw.wr, scw.tsc, scw.cell, keyspace, shard, scw.minHealthyRdonlyTablets, timeout, topodatapb.TabletType_RDONLY); err != nil { + if _, err := waitForHealthyTablets(ctx, scw.wr, scw.tsc, scw.cell, keyspace, shard, scw.minHealthyTablets, timeout, scw.tabletType); err != nil { rec.RecordError(err) } }(si.Keyspace(), si.ShardName()) @@ -795,6 +859,254 @@ func (scw *SplitCloneWorker) waitForTablets(ctx context.Context, shardInfos []*t return rec.Error() } +func (scw *SplitCloneWorker) findFirstSourceTablet(ctx context.Context, state StatusWorkerState) (*topodatapb.Tablet, error) { + if state == WorkerStateCloneOffline { + // Use the first source tablet which we took offline. + return scw.sourceTablets[0], nil + } + + // Pick any healthy serving source tablet. + si := scw.sourceShards[0] + tablets := scw.tsc.GetHealthyTabletStats(si.Keyspace(), si.ShardName(), scw.tabletType) + if len(tablets) == 0 { + // We fail fast on this problem and don't retry because at the start all tablets should be healthy. + return nil, fmt.Errorf("no healthy %v tablet in source shard (%v) available (required to find out the schema)", topodatapb.TabletType_name[int32(scw.tabletType)], topoproto.KeyspaceShardString(si.Keyspace(), si.ShardName())) + } + return tablets[0].Tablet, nil +} + +func (scw *SplitCloneWorker) getCounters(state StatusWorkerState) ([]*stats.CountersWithSingleLabel, *tableStatusList) { + switch state { + case WorkerStateCloneOnline: + return []*stats.CountersWithSingleLabel{statsOnlineInsertsCounters, statsOnlineUpdatesCounters, statsOnlineDeletesCounters, statsOnlineEqualRowsCounters}, + scw.tableStatusListOnline + case WorkerStateCloneOffline: + return []*stats.CountersWithSingleLabel{statsOfflineInsertsCounters, statsOfflineUpdatesCounters, statsOfflineDeletesCounters, statsOfflineEqualRowsCounters}, + scw.tableStatusListOffline + default: + panic("should not happen") + } +} + +func (scw *SplitCloneWorker) startExecutor(ctx context.Context, wg *sync.WaitGroup, keyspace, shard string, insertChannel chan string, threadID int, processError func(string, ...interface{})) { + defer wg.Done() + t := scw.getThrottler(keyspace, shard) + //defer t.ThreadFinished(threadID) + + executor := newExecutor(scw.wr, scw.tsc, t, keyspace, shard, threadID) + if err := executor.fetchLoop(ctx, insertChannel); err != nil { + processError("executer.FetchLoop failed: %v", err) + } +} + +func mergeOrSingle(readers []ResultReader, td *tabletmanagerdatapb.TableDefinition) (ResultReader, error) { + if len(readers) == 1 { + return readers[0], nil + } + + sourceReader, err := NewResultMerger(readers, len(td.PrimaryKeyColumns)) + if err != nil { + return nil, err + } + + return sourceReader, nil +} + +func closeReaders(ctx context.Context, readers []ResultReader) { + for _, reader := range readers { + if reader != nil { + reader.Close(ctx) + } + } +} + +func (scw *SplitCloneWorker) getSourceResultReader(ctx context.Context, td *tabletmanagerdatapb.TableDefinition, state StatusWorkerState, chunk chunk, txID int64) (ResultReader, error) { + sourceReaders := make([]ResultReader, len(scw.sourceShards)) + + for shardIndex, si := range scw.sourceShards { + var sourceResultReader ResultReader + if state == WorkerStateCloneOffline && scw.useConsistentSnapshot { + var err error + if txID < 1 { + return nil, fmt.Errorf("tried using consistent snapshot without a valid transaction") + } + tp := newShardTabletProvider(scw.tsc, scw.tabletTracker, si.Keyspace(), si.ShardName(), scw.tabletType) + sourceResultReader, err = NewTransactionalRestartableResultReader(ctx, scw.wr.Logger(), tp, td, chunk, false, txID) + if err != nil { + closeReaders(ctx, sourceReaders) + return nil, fmt.Errorf("NewTransactionalRestartableResultReader for source: %v failed: %v", tp.description(), err) + } + } else { + var err error + var tp tabletProvider + allowMultipleRetries := true + if state == WorkerStateCloneOffline { + tp = newSingleTabletProvider(ctx, scw.wr.TopoServer(), scw.sourceAliases[shardIndex]) + // allowMultipleRetries is false to avoid that we'll keep retrying + // on the same tablet alias for hours. This guards us against the + // situation that an offline tablet gets restarted and serves again. + // In that case we cannot use it because its replication is no + // longer stopped at the same point as we took it offline initially. + allowMultipleRetries = false + } else { + tp = newShardTabletProvider(scw.tsc, scw.tabletTracker, si.Keyspace(), si.ShardName(), scw.tabletType) + } + sourceResultReader, err = NewRestartableResultReader(ctx, scw.wr.Logger(), tp, td, chunk, allowMultipleRetries) + if err != nil { + closeReaders(ctx, sourceReaders) + return nil, fmt.Errorf("NewRestartableResultReader for source: %v failed: %v", tp.description(), err) + } + } + sourceReaders[shardIndex] = sourceResultReader + } + resultReader, err := mergeOrSingle(sourceReaders, td) + if err != nil { + closeReaders(ctx, sourceReaders) + return nil, err + } + return resultReader, err +} + +func (scw *SplitCloneWorker) getDestinationResultReader(ctx context.Context, td *tabletmanagerdatapb.TableDefinition, state StatusWorkerState, chunk chunk) (ResultReader, error) { + destReaders := make([]ResultReader, len(scw.destinationShards)) + + for shardIndex, si := range scw.destinationShards { + tp := newShardTabletProvider(scw.tsc, scw.tabletTracker, si.Keyspace(), si.ShardName(), topodatapb.TabletType_MASTER) + destResultReader, err := NewRestartableResultReader(ctx, scw.wr.Logger(), tp, td, chunk, true /* allowMultipleRetries */) + if err != nil { + closeReaders(ctx, destReaders) + return nil, fmt.Errorf("NewRestartableResultReader for destination: %v failed: %v", tp.description(), err) + } + destReaders[shardIndex] = destResultReader + } + resultReader, err := mergeOrSingle(destReaders, td) + if err != nil { + closeReaders(ctx, destReaders) + return nil, err + } + return resultReader, err +} + +func (scw *SplitCloneWorker) cloneAChunk(ctx context.Context, td *tabletmanagerdatapb.TableDefinition, tableIndex int, chunk chunk, processError func(string, ...interface{}), state StatusWorkerState, tableStatusList *tableStatusList, keyResolver keyspaceIDResolver, start time.Time, insertChannels []chan string, txID int64, statsCounters []*stats.CountersWithSingleLabel) { + errPrefix := fmt.Sprintf("table=%v chunk=%v", td.Name, chunk) + + var err error + + if err := checkDone(ctx); err != nil { + processError("%v: Context expired while this thread was waiting for its turn. Context error: %v", errPrefix, err) + return + } + + tableStatusList.threadStarted(tableIndex) + defer tableStatusList.threadDone(tableIndex) + + if state == WorkerStateCloneOnline { + // Wait for enough healthy tablets (they might have become unhealthy + // and their replication lag might have increased since we started.) + if err := scw.waitForTablets(ctx, scw.sourceShards, *retryDuration); err != nil { + processError("%v: No healthy source tablets found (gave up after %v): %v", errPrefix, time.Since(start), err) + return + } + } + + // Set up readers for the diff. There will be one reader for every + // source and destination shard. + sourceReader, err := scw.getSourceResultReader(ctx, td, state, chunk, txID) + if err != nil { + processError("%v NewResultMerger for source tablets failed: %v", errPrefix, err) + return + } + defer sourceReader.Close(ctx) + destReader, err := scw.getDestinationResultReader(ctx, td, state, chunk) + if err != nil { + processError("%v NewResultMerger for destinations tablets failed: %v", errPrefix, err) + return + } + defer destReader.Close(ctx) + dbNames := make([]string, len(scw.destinationShards)) + for i, si := range scw.destinationShards { + keyspaceAndShard := topoproto.KeyspaceShardString(si.Keyspace(), si.ShardName()) + dbNames[i] = scw.destinationDbNames[keyspaceAndShard] + } + // Compare the data and reconcile any differences. + differ, err := NewRowDiffer2(ctx, sourceReader, destReader, td, tableStatusList, tableIndex, + scw.destinationShards, keyResolver, + insertChannels, ctx.Done(), dbNames, scw.writeQueryMaxRows, scw.writeQueryMaxSize, statsCounters) + if err != nil { + processError("%v: NewRowDiffer2 failed: %v", errPrefix, err) + return + } + // Ignore the diff report because all diffs should get reconciled. + _ /* DiffReport */, err = differ.Diff() + if err != nil { + processError("%v: RowDiffer2 failed: %v", errPrefix, err) + return + } +} + +type workUnit struct { + td *tabletmanagerdatapb.TableDefinition + chunk chunk + threadID int + resolver keyspaceIDResolver +} + +func (scw *SplitCloneWorker) startCloningData(ctx context.Context, state StatusWorkerState, sourceSchemaDefinition *tabletmanagerdatapb.SchemaDefinition, + processError func(string, ...interface{}), firstSourceTablet *topodatapb.Tablet, tableStatusList *tableStatusList, + start time.Time, statsCounters []*stats.CountersWithSingleLabel, insertChannels []chan string, wg *sync.WaitGroup) error { + + workPipeline := make(chan workUnit, 10) // We'll use a small buffer so producers do not run too far ahead of consumers + queryService, err := tabletconn.GetDialer()(firstSourceTablet, true) + if err != nil { + return fmt.Errorf("failed to create queryService: %v", err) + } + defer queryService.Close(ctx) + + // Let's start the work consumers + for i := 0; i < scw.sourceReaderCount; i++ { + var txID int64 + if scw.useConsistentSnapshot && state == WorkerStateCloneOffline { + txID = scw.transactions[i] + } else { + txID = -1 + } + + wg.Add(1) + go func() { + defer wg.Done() + for work := range workPipeline { + scw.cloneAChunk(ctx, work.td, work.threadID, work.chunk, processError, state, tableStatusList, work.resolver, start, insertChannels, txID, statsCounters) + } + }() + } + + // And now let's start producing work units + for tableIndex, td := range sourceSchemaDefinition.TableDefinitions { + td = reorderColumnsPrimaryKeyFirst(td) + + keyResolver, err := scw.createKeyResolver(td) + if err != nil { + return fmt.Errorf("cannot resolve sharding keys for keyspace %v: %v", scw.destinationKeyspace, err) + } + + // TODO(mberlin): We're going to chunk *all* source shards based on the MIN + // and MAX values of the *first* source shard. Is this going to be a problem? + chunks, err := generateChunks(ctx, scw.wr, firstSourceTablet, td, scw.chunkCount, scw.minRowsPerChunk) + if err != nil { + return fmt.Errorf("failed to split table into chunks: %v", err) + } + tableStatusList.setThreadCount(tableIndex, len(chunks)) + + for _, c := range chunks { + workPipeline <- workUnit{td: td, chunk: c, threadID: tableIndex, resolver: keyResolver} + } + } + + close(workPipeline) + + return nil +} + // copy phase: // - copy the data from source tablets to destination masters (with replication on) // Assumes that the schema has already been created on each destination tablet @@ -809,31 +1121,13 @@ func (scw *SplitCloneWorker) clone(ctx context.Context, state StatusWorkerState) statsStateDurationsNs.Set(string(state), time.Now().Sub(start).Nanoseconds()) }() - var firstSourceTablet *topodatapb.Tablet - if state == WorkerStateCloneOffline { - // Use the first source tablet which we took offline. - firstSourceTablet = scw.sourceTablets[0] - } else { - // Pick any healthy serving source tablet. - si := scw.sourceShards[0] - tablets := scw.tsc.GetHealthyTabletStats(si.Keyspace(), si.ShardName(), topodatapb.TabletType_RDONLY) - if len(tablets) == 0 { - // We fail fast on this problem and don't retry because at the start all tablets should be healthy. - return fmt.Errorf("no healthy RDONLY tablet in source shard (%v) available (required to find out the schema)", topoproto.KeyspaceShardString(si.Keyspace(), si.ShardName())) - } - firstSourceTablet = tablets[0].Tablet - } - var statsCounters []*stats.CountersWithSingleLabel - var tableStatusList *tableStatusList - switch state { - case WorkerStateCloneOnline: - statsCounters = []*stats.CountersWithSingleLabel{statsOnlineInsertsCounters, statsOnlineUpdatesCounters, statsOnlineDeletesCounters, statsOnlineEqualRowsCounters} - tableStatusList = scw.tableStatusListOnline - case WorkerStateCloneOffline: - statsCounters = []*stats.CountersWithSingleLabel{statsOfflineInsertsCounters, statsOfflineUpdatesCounters, statsOfflineDeletesCounters, statsOfflineEqualRowsCounters} - tableStatusList = scw.tableStatusListOffline + var firstSourceTablet, err = scw.findFirstSourceTablet(ctx, state) + if err != nil { + return err } + statsCounters, tableStatusList := scw.getCounters(state) + // The throttlers exist only for the duration of this clone() call. // That means a SplitClone invocation with both online and offline phases // will create throttlers for each phase. @@ -849,22 +1143,19 @@ func (scw *SplitCloneWorker) clone(ctx context.Context, state StatusWorkerState) scw.wr.Logger().Infof("Source tablet 0 has %v tables to copy", len(sourceSchemaDefinition.TableDefinitions)) tableStatusList.initialize(sourceSchemaDefinition) - // In parallel, setup the channels to send SQL data chunks to for each destination tablet: - // - // mu protects the context for cancelation, and firstError - mu := sync.Mutex{} var firstError error ctx, cancelCopy := context.WithCancel(ctx) defer cancelCopy() processError := func(format string, args ...interface{}) { - mu.Lock() + // in theory we could have two threads see firstError as null and both write to the variable + // that should not cause any problems though - canceling and logging is concurrently safe, + // and overwriting the variable will not cause any problems + scw.wr.Logger().Errorf(format, args...) if firstError == nil { - scw.wr.Logger().Errorf(format, args...) firstError = fmt.Errorf(format, args...) cancelCopy() } - mu.Unlock() } // NOTE: Code below this point must *not* use "return" to exit this Go routine @@ -877,6 +1168,7 @@ func (scw *SplitCloneWorker) clone(ctx context.Context, state StatusWorkerState) // races between "defer throttler.ThreadFinished()" (must be executed first) // and "defer scw.closeThrottlers()". Otherwise, vtworker will panic. + // In parallel, setup the channels to send SQL data chunks to for each destination tablet: insertChannels := make([]chan string, len(scw.destinationShards)) destinationWaitGroup := sync.WaitGroup{} for shardIndex, si := range scw.destinationShards { @@ -888,168 +1180,41 @@ func (scw *SplitCloneWorker) clone(ctx context.Context, state StatusWorkerState) for j := 0; j < scw.destinationWriterCount; j++ { destinationWaitGroup.Add(1) - go func(keyspace, shard string, insertChannel chan string, throttler *throttler.Throttler, threadID int) { - defer destinationWaitGroup.Done() - defer throttler.ThreadFinished(threadID) - - executor := newExecutor(scw.wr, scw.tsc, throttler, keyspace, shard, threadID) - if err := executor.fetchLoop(ctx, insertChannel); err != nil { - processError("executer.FetchLoop failed: %v", err) - } - }(si.Keyspace(), si.ShardName(), insertChannels[shardIndex], scw.getThrottler(si.Keyspace(), si.ShardName()), j) + go scw.startExecutor(ctx, &destinationWaitGroup, si.Keyspace(), si.ShardName(), insertChannels[shardIndex], j, processError) } } // Now for each table, read data chunks and send them to all // insertChannels - sourceWaitGroup := sync.WaitGroup{} - sema := sync2.NewSemaphore(scw.sourceReaderCount, 0) - for tableIndex, td := range sourceSchemaDefinition.TableDefinitions { - td = reorderColumnsPrimaryKeyFirst(td) + readers := sync.WaitGroup{} - keyResolver, err := scw.createKeyResolver(td) - if err != nil { - processError("cannot resolve sharding keys for keyspace %v: %v", scw.destinationKeyspace, err) - break - } - - // TODO(mberlin): We're going to chunk *all* source shards based on the MIN - // and MAX values of the *first* source shard. Is this going to be a problem? - chunks, err := generateChunks(ctx, scw.wr, firstSourceTablet, td, scw.chunkCount, scw.minRowsPerChunk) - if err != nil { - processError("failed to split table into chunks: %v", err) - break - } - tableStatusList.setThreadCount(tableIndex, len(chunks)) - - for _, c := range chunks { - sourceWaitGroup.Add(1) - go func(td *tabletmanagerdatapb.TableDefinition, tableIndex int, chunk chunk) { - defer sourceWaitGroup.Done() - errPrefix := fmt.Sprintf("table=%v chunk=%v", td.Name, chunk) - - // We need our own error per Go routine to avoid races. - var err error - - sema.Acquire() - defer sema.Release() - - if err := checkDone(ctx); err != nil { - processError("%v: Context expired while this thread was waiting for its turn. Context error: %v", errPrefix, err) - return - } - - tableStatusList.threadStarted(tableIndex) - defer tableStatusList.threadDone(tableIndex) - - if state == WorkerStateCloneOnline { - // Wait for enough healthy tablets (they might have become unhealthy - // and their replication lag might have increased since we started.) - if err := scw.waitForTablets(ctx, scw.sourceShards, *retryDuration); err != nil { - processError("%v: No healthy source tablets found (gave up after %v): %v", errPrefix, time.Since(start), err) - return - } - } - - // Set up readers for the diff. There will be one reader for every - // source and destination shard. - sourceReaders := make([]ResultReader, len(scw.sourceShards)) - destReaders := make([]ResultReader, len(scw.destinationShards)) - for shardIndex, si := range scw.sourceShards { - var tp tabletProvider - allowMultipleRetries := true - if state == WorkerStateCloneOffline { - tp = newSingleTabletProvider(ctx, scw.wr.TopoServer(), scw.offlineSourceAliases[shardIndex]) - // allowMultipleRetries is false to avoid that we'll keep retrying - // on the same tablet alias for hours. This guards us against the - // situation that an offline tablet gets restarted and serves again. - // In that case we cannot use it because its replication is no - // longer stopped at the same point as we took it offline initially. - allowMultipleRetries = false - } else { - tp = newShardTabletProvider(scw.tsc, scw.tabletTracker, si.Keyspace(), si.ShardName(), topodatapb.TabletType_RDONLY) - } - sourceResultReader, err := NewRestartableResultReader(ctx, scw.wr.Logger(), tp, td, chunk, allowMultipleRetries) - if err != nil { - processError("%v: NewRestartableResultReader for source: %v failed: %v", errPrefix, tp.description(), err) - return - } - defer sourceResultReader.Close(ctx) - sourceReaders[shardIndex] = sourceResultReader - } - - for shardIndex, si := range scw.destinationShards { - tp := newShardTabletProvider(scw.tsc, scw.tabletTracker, si.Keyspace(), si.ShardName(), topodatapb.TabletType_MASTER) - destResultReader, err := NewRestartableResultReader(ctx, scw.wr.Logger(), tp, td, chunk, true /* allowMultipleRetries */) - if err != nil { - processError("%v: NewRestartableResultReader for destination: %v failed: %v", errPrefix, tp.description(), err) - return - } - defer destResultReader.Close(ctx) - destReaders[shardIndex] = destResultReader - } - - var sourceReader ResultReader - var destReader ResultReader - if len(sourceReaders) >= 2 { - sourceReader, err = NewResultMerger(sourceReaders, len(td.PrimaryKeyColumns)) - if err != nil { - processError("%v: NewResultMerger for source tablets failed: %v", errPrefix, err) - return - } - } else { - sourceReader = sourceReaders[0] - } - if len(destReaders) >= 2 { - destReader, err = NewResultMerger(destReaders, len(td.PrimaryKeyColumns)) - if err != nil { - processError("%v: NewResultMerger for destination tablets failed: %v", errPrefix, err) - return - } - } else { - destReader = destReaders[0] - } - - dbNames := make([]string, len(scw.destinationShards)) - for i, si := range scw.destinationShards { - keyspaceAndShard := topoproto.KeyspaceShardString(si.Keyspace(), si.ShardName()) - dbNames[i] = scw.destinationDbNames[keyspaceAndShard] - } - // Compare the data and reconcile any differences. - differ, err := NewRowDiffer2(ctx, sourceReader, destReader, td, tableStatusList, tableIndex, - scw.destinationShards, keyResolver, - insertChannels, ctx.Done(), dbNames, scw.writeQueryMaxRows, scw.writeQueryMaxSize, statsCounters) - if err != nil { - processError("%v: NewRowDiffer2 failed: %v", errPrefix, err) - return - } - // Ignore the diff report because all diffs should get reconciled. - _ /* DiffReport */, err = differ.Diff() - if err != nil { - processError("%v: RowDiffer2 failed: %v", errPrefix, err) - return - } - }(td, tableIndex, c) - } + err = scw.startCloningData(ctx, state, sourceSchemaDefinition, processError, firstSourceTablet, tableStatusList, start, statsCounters, insertChannels, &readers) + if err != nil { + return fmt.Errorf("failed to startCloningData : %v", err) } - sourceWaitGroup.Wait() + readers.Wait() for shardIndex := range scw.destinationShards { close(insertChannels[shardIndex]) } destinationWaitGroup.Wait() - if firstError != nil { - return firstError - } - if state == WorkerStateCloneOffline { - // Create and populate the vreplication table to give filtered replication - // a starting point. - queries := make([]string, 0, 4) - queries = append(queries, binlogplayer.CreateVReplicationTable()...) + return firstError +} - // get the current position from the sources - sourcePositions := make([]string, len(scw.sourceShards)) +func (scw *SplitCloneWorker) setUpVReplication(ctx context.Context) error { + wg := sync.WaitGroup{} + // Create and populate the vreplication table to give filtered replication + // a starting point. + queries := make([]string, 0, 4) + queries = append(queries, binlogplayer.CreateVReplicationTable()...) + + // get the current position from the sources + sourcePositions := make([]string, len(scw.sourceShards)) + + if scw.useConsistentSnapshot { + sourcePositions[0] = scw.lastPos + } else { for shardIndex := range scw.sourceShards { shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) status, err := scw.wr.TabletManagerClient().SlaveStatus(shortCtx, scw.sourceTablets[shardIndex]) @@ -1059,46 +1224,60 @@ func (scw *SplitCloneWorker) clone(ctx context.Context, state StatusWorkerState) } sourcePositions[shardIndex] = status.Position } + } + cancelableCtx, cancel := context.WithCancel(ctx) + rec := concurrency.AllErrorRecorder{} + handleError := func(e error) { + rec.RecordError(e) + cancel() + } - for _, si := range scw.destinationShards { - destinationWaitGroup.Add(1) - go func(keyspace, shard string, kr *topodatapb.KeyRange) { - defer destinationWaitGroup.Done() - scw.wr.Logger().Infof("Making and populating vreplication table") + for _, si := range scw.destinationShards { + wg.Add(1) + go func(keyspace, shard string, kr *topodatapb.KeyRange) { + defer wg.Done() + scw.wr.Logger().Infof("Making and populating vreplication table") - exc := newExecutor(scw.wr, scw.tsc, nil, keyspace, shard, 0) - for shardIndex, src := range scw.sourceShards { - bls := &binlogdatapb.BinlogSource{ - Keyspace: src.Keyspace(), - Shard: src.ShardName(), - } - if scw.tables == nil { - bls.KeyRange = kr - } else { - bls.Tables = scw.tables - } - // TODO(mberlin): Fill in scw.maxReplicationLag once the adapative - // throttler is enabled by default. - qr, err := exc.vreplicationExec(ctx, binlogplayer.CreateVReplication("SplitClone", bls, sourcePositions[shardIndex], scw.maxTPS, throttler.ReplicationLagModuleDisabled, time.Now().Unix())) - if err != nil { - processError("vreplication queries failed: %v", err) - break - } - if err := scw.wr.SourceShardAdd(ctx, keyspace, shard, uint32(qr.InsertID), src.Keyspace(), src.ShardName(), src.Shard.KeyRange, scw.tables); err != nil { - processError("could not add source shard: %v", err) - break - } + exc := newExecutor(scw.wr, scw.tsc, nil, keyspace, shard, 0) + for shardIndex, src := range scw.sourceShards { + // Check if any error occurred in any other gorouties: + select { + case <-cancelableCtx.Done(): + return // Error somewhere, terminate + default: } - // refreshState will cause the destination to become non-serving because - // it's now participating in the resharding workflow. - if err := exc.refreshState(ctx); err != nil { - processError("RefreshState failed on tablet %v/%v: %v", keyspace, shard, err) + + bls := &binlogdatapb.BinlogSource{ + Keyspace: src.Keyspace(), + Shard: src.ShardName(), } - }(si.Keyspace(), si.ShardName(), si.KeyRange) - } - destinationWaitGroup.Wait() - } // clonePhase == offline - return firstError + if scw.tables == nil { + bls.KeyRange = kr + } else { + bls.Tables = scw.tables + } + // TODO(mberlin): Fill in scw.maxReplicationLag once the adapative throttler is enabled by default. + qr, err := exc.vreplicationExec(cancelableCtx, binlogplayer.CreateVReplication("SplitClone", bls, sourcePositions[shardIndex], scw.maxTPS, throttler.ReplicationLagModuleDisabled, time.Now().Unix())) + if err != nil { + handleError(fmt.Errorf("vreplication queries failed: %v", err)) + cancel() + return + } + if err := scw.wr.SourceShardAdd(cancelableCtx, keyspace, shard, uint32(qr.InsertID), src.Keyspace(), src.ShardName(), src.Shard.KeyRange, scw.tables); err != nil { + handleError(fmt.Errorf("could not add source shard: %v", err)) + break + } + } + // refreshState will cause the destination to become non-serving because + // it's now participating in the resharding workflow. + if err := exc.refreshState(ctx); err != nil { + handleError(fmt.Errorf("RefreshState failed on tablet %v/%v: %v", keyspace, shard, err)) + } + }(si.Keyspace(), si.ShardName(), si.KeyRange) + } + wg.Wait() + + return rec.Error() } func (scw *SplitCloneWorker) getSourceSchema(ctx context.Context, tablet *topodatapb.Tablet) (*tabletmanagerdatapb.SchemaDefinition, error) { diff --git a/go/vt/worker/split_clone_cmd.go b/go/vt/worker/split_clone_cmd.go index 9285ae2f4c..e3d1085560 100644 --- a/go/vt/worker/split_clone_cmd.go +++ b/go/vt/worker/split_clone_cmd.go @@ -25,12 +25,13 @@ import ( "strings" "sync" - "vitess.io/vitess/go/vt/vterrors" - "golang.org/x/net/context" "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/wrangler" ) @@ -82,14 +83,22 @@ const splitCloneHTML2 = `

- -
+ + +
+ +


+ + ?
@@ -108,7 +117,9 @@ func commandSplitClone(wi *Instance, wr *wrangler.Wrangler, subFlags *flag.FlagS writeQueryMaxRows := subFlags.Int("write_query_max_rows", defaultWriteQueryMaxRows, "maximum number of rows per write query") writeQueryMaxSize := subFlags.Int("write_query_max_size", defaultWriteQueryMaxSize, "maximum size (in bytes) per write query") destinationWriterCount := subFlags.Int("destination_writer_count", defaultDestinationWriterCount, "number of concurrent RPCs to execute on the destination") - minHealthyRdonlyTablets := subFlags.Int("min_healthy_rdonly_tablets", defaultMinHealthyRdonlyTablets, "minimum number of healthy RDONLY tablets in the source and destination shard at start") + tabletTypeStr := subFlags.String("tablet_type", "RDONLY", "tablet type to use (RDONLY or REPLICA)") + minHealthyTablets := subFlags.Int("min_healthy_rdonly_tablets", defaultMinHealthyTablets, "minimum number of healthy tablets in the source and destination shard at start") + useConsistentSnapshot := subFlags.Bool("use_consistent_snapshot", defaultUseConsistentSnapshot, "Instead of pausing replication on the source, uses transactions with consistent snapshot to have a stable view of the data.") maxTPS := subFlags.Int64("max_tps", defaultMaxTPS, "rate limit of maximum number of (write) transactions/second on the destination (unlimited by default)") maxReplicationLag := subFlags.Int64("max_replication_lag", defaultMaxReplicationLag, "if set, the adapative throttler will be enabled and automatically adjust the write rate to keep the lag below the set value in seconds (disabled by default)") if err := subFlags.Parse(args); err != nil { @@ -116,7 +127,7 @@ func commandSplitClone(wi *Instance, wr *wrangler.Wrangler, subFlags *flag.FlagS } if subFlags.NArg() != 1 { subFlags.Usage() - return nil, fmt.Errorf("command SplitClone requires ") + return nil, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "command SplitClone requires ") } keyspace, shard, err := topoproto.ParseKeyspaceShard(subFlags.Arg(0)) @@ -127,7 +138,11 @@ func commandSplitClone(wi *Instance, wr *wrangler.Wrangler, subFlags *flag.FlagS if *excludeTables != "" { excludeTableArray = strings.Split(*excludeTables, ",") } - worker, err := newSplitCloneWorker(wr, wi.cell, keyspace, shard, *online, *offline, excludeTableArray, *chunkCount, *minRowsPerChunk, *sourceReaderCount, *writeQueryMaxRows, *writeQueryMaxSize, *destinationWriterCount, *minHealthyRdonlyTablets, *maxTPS, *maxReplicationLag) + tabletType, ok := topodata.TabletType_value[*tabletTypeStr] + if !ok { + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "command SplitClone invalid tablet_type: %v", tabletType) + } + worker, err := newSplitCloneWorker(wr, wi.cell, keyspace, shard, *online, *offline, excludeTableArray, *chunkCount, *minRowsPerChunk, *sourceReaderCount, *writeQueryMaxRows, *writeQueryMaxSize, *destinationWriterCount, *minHealthyTablets, topodata.TabletType(tabletType), *maxTPS, *maxReplicationLag, *useConsistentSnapshot) if err != nil { return nil, vterrors.Wrap(err, "cannot create split clone worker") } @@ -173,7 +188,7 @@ func keyspacesWithOverlappingShards(ctx context.Context, wr *wrangler.Wrangler) return nil, rec.Error() } if len(result) == 0 { - return nil, fmt.Errorf("There are no keyspaces with overlapping shards") + return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "there are no keyspaces with overlapping shards") } return result, nil } @@ -212,9 +227,10 @@ func interactiveSplitClone(ctx context.Context, wi *Instance, wr *wrangler.Wrang result["DefaultWriteQueryMaxRows"] = fmt.Sprintf("%v", defaultWriteQueryMaxRows) result["DefaultWriteQueryMaxSize"] = fmt.Sprintf("%v", defaultWriteQueryMaxSize) result["DefaultDestinationWriterCount"] = fmt.Sprintf("%v", defaultDestinationWriterCount) - result["DefaultMinHealthyRdonlyTablets"] = fmt.Sprintf("%v", defaultMinHealthyRdonlyTablets) + result["DefaultMinHealthyTablets"] = fmt.Sprintf("%v", defaultMinHealthyTablets) result["DefaultMaxTPS"] = fmt.Sprintf("%v", defaultMaxTPS) result["DefaultMaxReplicationLag"] = fmt.Sprintf("%v", defaultMaxReplicationLag) + result["DefaultUseConsistentSnapshot"] = fmt.Sprintf("%v", defaultUseConsistentSnapshot) return nil, splitCloneTemplate2, result, nil } @@ -257,10 +273,15 @@ func interactiveSplitClone(ctx context.Context, wi *Instance, wr *wrangler.Wrang if err != nil { return nil, nil, nil, vterrors.Wrap(err, "cannot parse destinationWriterCount") } - minHealthyRdonlyTabletsStr := r.FormValue("minHealthyRdonlyTablets") - minHealthyRdonlyTablets, err := strconv.ParseInt(minHealthyRdonlyTabletsStr, 0, 64) + minHealthyTabletsStr := r.FormValue("minHealthyTablets") + minHealthyTablets, err := strconv.ParseInt(minHealthyTabletsStr, 0, 64) if err != nil { - return nil, nil, nil, vterrors.Wrap(err, "cannot parse minHealthyRdonlyTablets") + return nil, nil, nil, vterrors.Wrap(err, "cannot parse minHealthyTablets") + } + tabletTypeStr := r.FormValue("tabletType") + tabletType, ok := topodata.TabletType_value[tabletTypeStr] + if !ok { + return nil, nil, nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "command SplitClone invalid tablet_type: %v", tabletType) } maxTPSStr := r.FormValue("maxTPS") maxTPS, err := strconv.ParseInt(maxTPSStr, 0, 64) @@ -273,8 +294,11 @@ func interactiveSplitClone(ctx context.Context, wi *Instance, wr *wrangler.Wrang return nil, nil, nil, vterrors.Wrap(err, "cannot parse maxReplicationLag") } + useConsistentSnapshotStr := r.FormValue("useConsistentSnapshot") + useConsistentSnapshot := useConsistentSnapshotStr == "true" + // start the clone job - wrk, err := newSplitCloneWorker(wr, wi.cell, keyspace, shard, online, offline, excludeTableArray, int(chunkCount), int(minRowsPerChunk), int(sourceReaderCount), int(writeQueryMaxRows), int(writeQueryMaxSize), int(destinationWriterCount), int(minHealthyRdonlyTablets), maxTPS, maxReplicationLag) + wrk, err := newSplitCloneWorker(wr, wi.cell, keyspace, shard, online, offline, excludeTableArray, int(chunkCount), int(minRowsPerChunk), int(sourceReaderCount), int(writeQueryMaxRows), int(writeQueryMaxSize), int(destinationWriterCount), int(minHealthyTablets), topodata.TabletType(tabletType), maxTPS, maxReplicationLag, useConsistentSnapshot) if err != nil { return nil, nil, nil, vterrors.Wrap(err, "cannot create worker") } @@ -286,4 +310,4 @@ func init() { commandSplitClone, interactiveSplitClone, "[--online=false] [--offline=false] [--exclude_tables=''] ", "Replicates the data and creates configuration for a horizontal split."}) -} +} \ No newline at end of file diff --git a/go/vt/worker/split_clone_test.go b/go/vt/worker/split_clone_test.go index 911a5c8ff8..7e73330b27 100644 --- a/go/vt/worker/split_clone_test.go +++ b/go/vt/worker/split_clone_test.go @@ -27,7 +27,6 @@ import ( "time" "golang.org/x/net/context" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" @@ -291,6 +290,11 @@ func (tc *splitCloneTestCase) tearDown() { for _, ft := range tc.tablets { ft.StopActionLoop(tc.t) + ft.RPCServer.Stop() + ft.FakeMysqlDaemon.Close() + ft.Agent = nil + ft.RPCServer = nil + ft.FakeMysqlDaemon = nil } tc.leftMasterFakeDb.VerifyAllExecutedOrFail() tc.leftReplicaFakeDb.VerifyAllExecutedOrFail() @@ -342,7 +346,7 @@ func newTestQueryService(t *testing.T, target querypb.Target, shqs *fakes.Stream } } -func (sq *testQueryService) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions, callback func(reply *sqltypes.Result) error) error { +func (sq *testQueryService) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(reply *sqltypes.Result) error) error { // Custom parsing of the query we expect. // Example: SELECT `id`, `msg`, `keyspace_id` FROM table1 WHERE id>=180 AND id<190 ORDER BY id min := math.MinInt32 @@ -522,7 +526,7 @@ func TestSplitCloneV2_Offline(t *testing.T) { // Run the vtworker command. if err := runCommand(t, tc.wi, tc.wi.wr, tc.defaultWorkerArgs); err != nil { - t.Fatal(err) + t.Fatalf("%+v", err) } } diff --git a/go/vt/worker/split_diff_cmd.go b/go/vt/worker/split_diff_cmd.go index 39178646f8..460c8587c9 100644 --- a/go/vt/worker/split_diff_cmd.go +++ b/go/vt/worker/split_diff_cmd.go @@ -83,7 +83,7 @@ var splitDiffTemplate2 = mustParseTemplate("splitDiff2", splitDiffHTML2) func commandSplitDiff(wi *Instance, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (Worker, error) { sourceUID := subFlags.Int("source_uid", 0, "uid of the source shard to run the diff against") excludeTables := subFlags.String("exclude_tables", "", "comma separated list of tables to exclude") - minHealthyRdonlyTablets := subFlags.Int("min_healthy_rdonly_tablets", defaultMinHealthyRdonlyTablets, "minimum number of healthy RDONLY tablets before taking out one") + minHealthyRdonlyTablets := subFlags.Int("min_healthy_rdonly_tablets", defaultMinHealthyTablets, "minimum number of healthy RDONLY tablets before taking out one") destTabletTypeStr := subFlags.String("dest_tablet_type", defaultDestTabletType, "destination tablet type (RDONLY or REPLICA) that will be used to compare the shards") parallelDiffsCount := subFlags.Int("parallel_diffs_count", defaultParallelDiffsCount, "number of tables to diff in parallel") if err := subFlags.Parse(args); err != nil { @@ -196,7 +196,7 @@ func interactiveSplitDiff(ctx context.Context, wi *Instance, wr *wrangler.Wrangl result["Keyspace"] = keyspace result["Shard"] = shard result["DefaultSourceUID"] = "0" - result["DefaultMinHealthyRdonlyTablets"] = fmt.Sprintf("%v", defaultMinHealthyRdonlyTablets) + result["DefaultMinHealthyRdonlyTablets"] = fmt.Sprintf("%v", defaultMinHealthyTablets) result["DefaultParallelDiffsCount"] = fmt.Sprintf("%v", defaultParallelDiffsCount) return nil, splitDiffTemplate2, result, nil } diff --git a/go/vt/worker/split_diff_test.go b/go/vt/worker/split_diff_test.go index efc8f4be61..c281852f80 100644 --- a/go/vt/worker/split_diff_test.go +++ b/go/vt/worker/split_diff_test.go @@ -48,7 +48,7 @@ type destinationTabletServer struct { excludedTable string } -func (sq *destinationTabletServer) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions, callback func(reply *sqltypes.Result) error) error { +func (sq *destinationTabletServer) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(reply *sqltypes.Result) error) error { if strings.Contains(sql, sq.excludedTable) { sq.t.Errorf("Split Diff operation on destination should skip the excluded table: %v query: %v", sq.excludedTable, sql) } @@ -110,7 +110,7 @@ type sourceTabletServer struct { v3 bool } -func (sq *sourceTabletServer) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions, callback func(reply *sqltypes.Result) error) error { +func (sq *sourceTabletServer) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(reply *sqltypes.Result) error) error { if strings.Contains(sql, sq.excludedTable) { sq.t.Errorf("Split Diff operation on source should skip the excluded table: %v query: %v", sq.excludedTable, sql) } diff --git a/go/vt/worker/vertical_split_clone_cmd.go b/go/vt/worker/vertical_split_clone_cmd.go index c2d5a0b074..f221ee5d67 100644 --- a/go/vt/worker/vertical_split_clone_cmd.go +++ b/go/vt/worker/vertical_split_clone_cmd.go @@ -25,6 +25,7 @@ import ( "strings" "sync" + "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/vterrors" "golang.org/x/net/context" @@ -106,7 +107,8 @@ func commandVerticalSplitClone(wi *Instance, wr *wrangler.Wrangler, subFlags *fl writeQueryMaxRows := subFlags.Int("write_query_max_rows", defaultWriteQueryMaxRows, "maximum number of rows per write query") writeQueryMaxSize := subFlags.Int("write_query_max_size", defaultWriteQueryMaxSize, "maximum size (in bytes) per write query") destinationWriterCount := subFlags.Int("destination_writer_count", defaultDestinationWriterCount, "number of concurrent RPCs to execute on the destination") - minHealthyRdonlyTablets := subFlags.Int("min_healthy_rdonly_tablets", defaultMinHealthyRdonlyTablets, "minimum number of healthy RDONLY tablets before taking out one") + minHealthyRdonlyTablets := subFlags.Int("min_healthy_rdonly_tablets", defaultMinHealthyTablets, "minimum number of healthy RDONLY tablets before taking out one") + tabletTypeStr := subFlags.String("tablet_type", "RDONLY", "tablet type to use (RDONLY or REPLICA)") maxTPS := subFlags.Int64("max_tps", defaultMaxTPS, "if non-zero, limit copy to maximum number of (write) transactions/second on the destination (unlimited by default)") maxReplicationLag := subFlags.Int64("max_replication_lag", defaultMaxReplicationLag, "if set, the adapative throttler will be enabled and automatically adjust the write rate to keep the lag below the set value in seconds (disabled by default)") if err := subFlags.Parse(args); err != nil { @@ -127,7 +129,12 @@ func commandVerticalSplitClone(wi *Instance, wr *wrangler.Wrangler, subFlags *fl if *tables != "" { tableArray = strings.Split(*tables, ",") } - worker, err := newVerticalSplitCloneWorker(wr, wi.cell, keyspace, shard, *online, *offline, tableArray, *chunkCount, *minRowsPerChunk, *sourceReaderCount, *writeQueryMaxRows, *writeQueryMaxSize, *destinationWriterCount, *minHealthyRdonlyTablets, *maxTPS, *maxReplicationLag) + tabletType, ok := topodata.TabletType_value[*tabletTypeStr] + if !ok { + return nil, fmt.Errorf("command SplitClone invalid tablet_type: %v", tabletType) + } + + worker, err := newVerticalSplitCloneWorker(wr, wi.cell, keyspace, shard, *online, *offline, tableArray, *chunkCount, *minRowsPerChunk, *sourceReaderCount, *writeQueryMaxRows, *writeQueryMaxSize, *destinationWriterCount, *minHealthyRdonlyTablets, topodata.TabletType(tabletType), *maxTPS, *maxReplicationLag, /*useConsistentSnapshot*/false) if err != nil { return nil, vterrors.Wrap(err, "cannot create worker") } @@ -208,7 +215,7 @@ func interactiveVerticalSplitClone(ctx context.Context, wi *Instance, wr *wrangl result["DefaultWriteQueryMaxRows"] = fmt.Sprintf("%v", defaultWriteQueryMaxRows) result["DefaultWriteQueryMaxSize"] = fmt.Sprintf("%v", defaultWriteQueryMaxSize) result["DefaultDestinationWriterCount"] = fmt.Sprintf("%v", defaultDestinationWriterCount) - result["DefaultMinHealthyRdonlyTablets"] = fmt.Sprintf("%v", defaultMinHealthyRdonlyTablets) + result["DefaultMinHealthyRdonlyTablets"] = fmt.Sprintf("%v", defaultMinHealthyTablets) result["DefaultMaxTPS"] = fmt.Sprintf("%v", defaultMaxTPS) result["DefaultMaxReplicationLag"] = fmt.Sprintf("%v", defaultMaxReplicationLag) return nil, verticalSplitCloneTemplate2, result, nil @@ -265,6 +272,12 @@ func interactiveVerticalSplitClone(ctx context.Context, wi *Instance, wr *wrangl if err != nil { return nil, nil, nil, vterrors.Wrap(err, "cannot parse maxReplicationLag") } + tabletTypeStr := r.FormValue("tabletType") + tabletType, ok := topodata.TabletType_value[tabletTypeStr] + if !ok { + return nil, nil, nil, fmt.Errorf("cannot parse tabletType: %v", tabletType) + } + // Figure out the shard shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) @@ -282,7 +295,10 @@ func interactiveVerticalSplitClone(ctx context.Context, wi *Instance, wr *wrangl } // start the clone job - wrk, err := newVerticalSplitCloneWorker(wr, wi.cell, keyspace, shard, online, offline, tableArray, int(chunkCount), int(minRowsPerChunk), int(sourceReaderCount), int(writeQueryMaxRows), int(writeQueryMaxSize), int(destinationWriterCount), int(minHealthyRdonlyTablets), maxTPS, maxReplicationLag) + wrk, err := newVerticalSplitCloneWorker(wr, wi.cell, keyspace, shard, online, offline, tableArray, int(chunkCount), + int(minRowsPerChunk), int(sourceReaderCount), int(writeQueryMaxRows), int(writeQueryMaxSize), + int(destinationWriterCount), int(minHealthyRdonlyTablets), topodata.TabletType(tabletType), maxTPS, + maxReplicationLag, false) if err != nil { return nil, nil, nil, vterrors.Wrap(err, "cannot create worker") } diff --git a/go/vt/worker/vertical_split_diff_cmd.go b/go/vt/worker/vertical_split_diff_cmd.go index 7eebf106f5..e4cfd4d34c 100644 --- a/go/vt/worker/vertical_split_diff_cmd.go +++ b/go/vt/worker/vertical_split_diff_cmd.go @@ -75,7 +75,7 @@ var verticalSplitDiffTemplate = mustParseTemplate("verticalSplitDiff", verticalS var verticalSplitDiffTemplate2 = mustParseTemplate("verticalSplitDiff2", verticalSplitDiffHTML2) func commandVerticalSplitDiff(wi *Instance, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (Worker, error) { - minHealthyRdonlyTablets := subFlags.Int("min_healthy_rdonly_tablets", defaultMinHealthyRdonlyTablets, "minimum number of healthy RDONLY tablets before taking out one") + minHealthyRdonlyTablets := subFlags.Int("min_healthy_rdonly_tablets", defaultMinHealthyTablets, "minimum number of healthy RDONLY tablets before taking out one") parallelDiffsCount := subFlags.Int("parallel_diffs_count", defaultParallelDiffsCount, "number of tables to diff in parallel") destTabletTypeStr := subFlags.String("dest_tablet_type", defaultDestTabletType, "destination tablet type (RDONLY or REPLICA) that will be used to compare the shards") if err := subFlags.Parse(args); err != nil { @@ -183,7 +183,7 @@ func interactiveVerticalSplitDiff(ctx context.Context, wi *Instance, wr *wrangle result := make(map[string]interface{}) result["Keyspace"] = keyspace result["Shard"] = shard - result["DefaultMinHealthyRdonlyTablets"] = fmt.Sprintf("%v", defaultMinHealthyRdonlyTablets) + result["DefaultMinHealthyRdonlyTablets"] = fmt.Sprintf("%v", defaultMinHealthyTablets) result["DefaultParallelDiffsCount"] = fmt.Sprintf("%v", defaultParallelDiffsCount) return nil, verticalSplitDiffTemplate2, result, nil } diff --git a/go/vt/worker/vertical_split_diff_test.go b/go/vt/worker/vertical_split_diff_test.go index c2c05a97f2..826e965700 100644 --- a/go/vt/worker/vertical_split_diff_test.go +++ b/go/vt/worker/vertical_split_diff_test.go @@ -46,7 +46,7 @@ type verticalDiffTabletServer struct { *fakes.StreamHealthQueryService } -func (sq *verticalDiffTabletServer) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions, callback func(reply *sqltypes.Result) error) error { +func (sq *verticalDiffTabletServer) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(reply *sqltypes.Result) error) error { if !strings.Contains(sql, "moving1") { sq.t.Errorf("Vertical Split Diff operation should only operate on the 'moving1' table. query: %v", sql) } diff --git a/go/vt/workflow/parallel_runner.go b/go/vt/workflow/parallel_runner.go index 847bfb8b86..da8d0e3047 100644 --- a/go/vt/workflow/parallel_runner.go +++ b/go/vt/workflow/parallel_runner.go @@ -138,7 +138,7 @@ func (p *ParallelRunner) Run() error { } select { case <-p.ctx.Done(): - // Break this run and return early. Do not try to to execute any subsequent tasks. + // Break this run and return early. Do not try to execute any subsequent tasks. log.Infof("Workflow is cancelled, remaining tasks will be aborted") return nil default: diff --git a/go/vt/wrangler/keyspace.go b/go/vt/wrangler/keyspace.go index 1e5cd10200..a98a467fee 100644 --- a/go/vt/wrangler/keyspace.go +++ b/go/vt/wrangler/keyspace.go @@ -29,14 +29,13 @@ import ( "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/discovery" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/topotools/events" - - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - vschemapb "vitess.io/vitess/go/vt/proto/vschema" ) const ( @@ -475,13 +474,13 @@ func (wr *Wrangler) replicaMigrateServedType(ctx context.Context, keyspace strin // Check and update all source shard records. // Enable query service if needed event.DispatchUpdate(ev, "updating shards to migrate from") - if err = wr.updateShardRecords(ctx, fromShards, cells, servedType, true); err != nil { + if err = wr.updateShardRecords(ctx, fromShards, cells, servedType, true, false); err != nil { return err } // Do the same for destination shards event.DispatchUpdate(ev, "updating shards to migrate to") - if err = wr.updateShardRecords(ctx, toShards, cells, servedType, false); err != nil { + if err = wr.updateShardRecords(ctx, toShards, cells, servedType, false, false); err != nil { return err } @@ -521,7 +520,7 @@ func (wr *Wrangler) masterMigrateServedType(ctx context.Context, keyspace string // - wait for filtered replication to catch up // - mark source shards as frozen event.DispatchUpdate(ev, "disabling query service on all source masters") - if err := wr.updateShardRecords(ctx, sourceShards, nil, topodatapb.TabletType_MASTER, true); err != nil { + if err := wr.updateShardRecords(ctx, sourceShards, nil, topodatapb.TabletType_MASTER, true, false); err != nil { wr.cancelMasterMigrateServedTypes(ctx, sourceShards) return err } @@ -553,6 +552,13 @@ func (wr *Wrangler) masterMigrateServedType(ctx context.Context, keyspace string // Always setup reverse replication. We'll start it later if reverseReplication was specified. // This will allow someone to reverse the replication later if they change their mind. if err := wr.setupReverseReplication(ctx, sourceShards, destinationShards); err != nil { + // It's safe to unfreeze if reverse replication setup fails. + wr.cancelMasterMigrateServedTypes(ctx, sourceShards) + unfreezeErr := wr.updateFrozenFlag(ctx, sourceShards, false) + if unfreezeErr != nil { + wr.Logger().Errorf("Problem recovering for failed reverse replication: %v", unfreezeErr) + } + return err } @@ -606,7 +612,7 @@ func (wr *Wrangler) masterMigrateServedType(ctx context.Context, keyspace string } func (wr *Wrangler) cancelMasterMigrateServedTypes(ctx context.Context, sourceShards []*topo.ShardInfo) { - if err := wr.updateShardRecords(ctx, sourceShards, nil, topodatapb.TabletType_MASTER, false); err != nil { + if err := wr.updateShardRecords(ctx, sourceShards, nil, topodatapb.TabletType_MASTER, false, true); err != nil { wr.Logger().Errorf2(err, "failed to re-enable source masters") return } @@ -692,9 +698,12 @@ func (wr *Wrangler) startReverseReplication(ctx context.Context, sourceShards [] } // updateShardRecords updates the shard records based on 'from' or 'to' direction. -func (wr *Wrangler) updateShardRecords(ctx context.Context, shards []*topo.ShardInfo, cells []string, servedType topodatapb.TabletType, isFrom bool) (err error) { +func (wr *Wrangler) updateShardRecords(ctx context.Context, shards []*topo.ShardInfo, cells []string, servedType topodatapb.TabletType, isFrom bool, clearSourceShards bool) (err error) { for i, si := range shards { shards[i], err = wr.ts.UpdateShardFields(ctx, si.Keyspace(), si.ShardName(), func(si *topo.ShardInfo) error { + if clearSourceShards { + si.SourceShards = nil + } if err := si.UpdateServedTypesMap(servedType, cells, isFrom /* remove */); err != nil { return err } diff --git a/go/vt/wrangler/testlib/migrate_served_from_test.go b/go/vt/wrangler/testlib/migrate_served_from_test.go index 6194e2150e..303e820e5b 100644 --- a/go/vt/wrangler/testlib/migrate_served_from_test.go +++ b/go/vt/wrangler/testlib/migrate_served_from_test.go @@ -111,9 +111,11 @@ func TestMigrateServedFrom(t *testing.T) { if err := destMaster.Agent.VREngine.Open(context.Background()); err != nil { t.Fatal(err) } - // select pos from _vt.vreplication - dbClient.ExpectRequest("select pos from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ + // select pos, state, message from _vt.vreplication + dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ sqltypes.NewVarBinary("MariaDB/5-456-892"), + sqltypes.NewVarBinary("Running"), + sqltypes.NewVarBinary(""), }}}, nil) dbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) dbClient.ExpectRequest("delete from _vt.vreplication where id = 1", &sqltypes.Result{RowsAffected: 1}, nil) diff --git a/go/vt/wrangler/testlib/migrate_served_types_test.go b/go/vt/wrangler/testlib/migrate_served_types_test.go index 28ae7789ff..e0e8b84942 100644 --- a/go/vt/wrangler/testlib/migrate_served_types_test.go +++ b/go/vt/wrangler/testlib/migrate_served_types_test.go @@ -147,9 +147,11 @@ func TestMigrateServedTypes(t *testing.T) { if err := dest1Master.Agent.VREngine.Open(context.Background()); err != nil { t.Fatal(err) } - // select pos from _vt.vreplication - dbClient1.ExpectRequest("select pos from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ + // select pos, state, message from _vt.vreplication + dbClient1.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ sqltypes.NewVarBinary("MariaDB/5-456-892"), + sqltypes.NewVarBinary("Running"), + sqltypes.NewVarBinary(""), }}}, nil) dbClient1.ExpectRequest("use _vt", &sqltypes.Result{}, nil) dbClient1.ExpectRequest("delete from _vt.vreplication where id = 1", &sqltypes.Result{RowsAffected: 1}, nil) @@ -174,9 +176,11 @@ func TestMigrateServedTypes(t *testing.T) { if err := dest2Master.Agent.VREngine.Open(context.Background()); err != nil { t.Fatal(err) } - // select pos from _vt.vreplication - dbClient2.ExpectRequest("select pos from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ + // select pos, state, message from _vt.vreplication + dbClient2.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ sqltypes.NewVarBinary("MariaDB/5-456-892"), + sqltypes.NewVarBinary("Running"), + sqltypes.NewVarBinary(""), }}}, nil) dbClient2.ExpectRequest("use _vt", &sqltypes.Result{}, nil) dbClient2.ExpectRequest("delete from _vt.vreplication where id = 1", &sqltypes.Result{RowsAffected: 1}, nil) diff --git a/helm/release.sh b/helm/release.sh index 7e0f3a1642..7c8bb639b6 100755 --- a/helm/release.sh +++ b/helm/release.sh @@ -1,6 +1,6 @@ #!/bin/bash -version_tag=1.0.3 +version_tag=1.0.6 docker pull vitess/k8s:latest docker tag vitess/k8s:latest vitess/k8s:helm-$version_tag diff --git a/helm/vitess/CHANGELOG.md b/helm/vitess/CHANGELOG.md index 992437e922..77c035acdf 100644 --- a/helm/vitess/CHANGELOG.md +++ b/helm/vitess/CHANGELOG.md @@ -1,3 +1,27 @@ +## 1.0.6 - 2019-01-20 + +### Changes +* Update Orchestrator default to 3.0.14 +* Run `pmm-admin repair` on `pmm-client` startup to recover failures on `pmm-server` +* Backups now only run on `replica` (non-master), `rdonly`, or `spare` tablet types + +## 1.0.5 - 2019-01-12 + +### Changes +* Set FailMasterPromotionIfSQLThreadNotUpToDate = true in Orchestrator config, to prevent +lagging replicas from being promoted to master and causing errant GTID problems. + +**NOTE:** You need to manually restart your Orchestrator pods for this change to take effect + +## 1.0.4 - 2019-01-01 + +### Changes +* Use the [Orchestrator API](https://github.com/github/orchestrator/blob/master/docs/using-the-web-api.md) +to call `begin-downtime` before running `PlannedReparentShard` in the `preStopHook`, to make sure that Orchestrator +doesn't try to run an external failover while Vitess is reparenting. When it is complete, it calls `end-downtime`. +Also call `forget` on the instance after calling `vtctlclient DeleteTablet`. It will be rediscovered if/when +the tablet comes back up. This eliminates most possible race conditions that could cause split brain. + ## 1.0.3 - 2018-12-20 ### Changes diff --git a/helm/vitess/Chart.yaml b/helm/vitess/Chart.yaml index b65208e7bf..c070e4b07c 100644 --- a/helm/vitess/Chart.yaml +++ b/helm/vitess/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v1 name: vitess -version: 1.0.3 +version: 1.0.6 description: Single-Chart Vitess Cluster keywords: - vitess diff --git a/helm/vitess/templates/_orchestrator-conf.tpl b/helm/vitess/templates/_orchestrator-conf.tpl index 6ae31c21ff..bdcc7d3cd6 100644 --- a/helm/vitess/templates/_orchestrator-conf.tpl +++ b/helm/vitess/templates/_orchestrator-conf.tpl @@ -40,6 +40,7 @@ data: "DiscoverByShowSlaveHosts": true, "EnableSyslog": false, "ExpiryHostnameResolvesMinutes": 60, + "FailMasterPromotionIfSQLThreadNotUpToDate": true, "FailureDetectionPeriodBlockMinutes": 60, "GraphiteAddr": "", "GraphiteConvertHostnameDotsToUnderscores": true, diff --git a/helm/vitess/templates/_orchestrator.tpl b/helm/vitess/templates/_orchestrator.tpl index d0babc41a5..45b3f65f1a 100644 --- a/helm/vitess/templates/_orchestrator.tpl +++ b/helm/vitess/templates/_orchestrator.tpl @@ -123,7 +123,7 @@ spec: value: "15999" - name: recovery-log - image: vitess/logtail:helm-1.0.3 + image: vitess/logtail:helm-1.0.6 imagePullPolicy: IfNotPresent env: - name: TAIL_FILEPATH @@ -133,7 +133,7 @@ spec: mountPath: /tmp - name: audit-log - image: vitess/logtail:helm-1.0.3 + image: vitess/logtail:helm-1.0.6 imagePullPolicy: IfNotPresent env: - name: TAIL_FILEPATH diff --git a/helm/vitess/templates/_pmm.tpl b/helm/vitess/templates/_pmm.tpl index 9ff20aa94a..b6b4bdf22e 100644 --- a/helm/vitess/templates/_pmm.tpl +++ b/helm/vitess/templates/_pmm.tpl @@ -197,6 +197,7 @@ spec: # and we want to stop/remove running services, in case pod ips have changed if pmm-admin info; then pmm-admin stop --all + pmm-admin repair pmm-admin rm --all fi @@ -218,7 +219,7 @@ spec: trap : TERM INT; sleep infinity & wait - name: pmm-client-metrics-log - image: vitess/logtail:helm-1.0.3 + image: vitess/logtail:helm-1.0.6 imagePullPolicy: IfNotPresent env: - name: TAIL_FILEPATH diff --git a/helm/vitess/templates/_vttablet.tpl b/helm/vitess/templates/_vttablet.tpl index 0c3e4643fc..a480acaf33 100644 --- a/helm/vitess/templates/_vttablet.tpl +++ b/helm/vitess/templates/_vttablet.tpl @@ -345,13 +345,27 @@ spec: RETRY_COUNT=0 MAX_RETRY_COUNT=100000 + hostname=$(hostname -s) # retry reparenting until [ $DONE_REPARENTING ]; do +{{ if $orc.enabled }} + # tell orchestrator to not attempt a recovery for 10 seconds while we are in the middle of reparenting + wget -q -S -O - "http://orchestrator.{{ $namespace }}/api/begin-downtime/$hostname.vttablet/3306/preStopHook/VitessPlannedReparent/10s" +{{ end }} + # reparent before shutting down /vt/bin/vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC PlannedReparentShard -keyspace_shard={{ $keyspace.name }}/{{ $shard.name }} -avoid_master=$current_alias +{{ if $orc.enabled }} + # tell orchestrator to refresh its view of this tablet + wget -q -S -O - "http://orchestrator.{{ $namespace }}/api/refresh/$hostname.vttablet/3306" + + # let orchestrator attempt recoveries now + wget -q -S -O - "http://orchestrator.{{ $namespace }}/api/end-downtime/$hostname.vttablet/3306" +{{ end }} + # if PlannedReparentShard succeeded, then don't retry if [ $? -eq 0 ]; then DONE_REPARENTING=true @@ -372,6 +386,12 @@ spec: # edge cases where there are two masters /vt/bin/vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC DeleteTablet $current_alias + +{{ if $orc.enabled }} + # tell orchestrator to forget the tablet, to prevent confusion / race conditions while the tablet restarts + wget -q -S -O - "http://orchestrator.{{ $namespace }}/api/forget/$hostname.vttablet/3306" +{{ end }} + command: ["bash"] args: - "-c" @@ -513,7 +533,7 @@ spec: {{ define "cont-logrotate" }} - name: logrotate - image: vitess/logrotate:helm-1.0.3 + image: vitess/logrotate:helm-1.0.6 imagePullPolicy: IfNotPresent volumeMounts: - name: vtdataroot @@ -527,7 +547,7 @@ spec: {{ define "cont-mysql-errorlog" }} - name: error-log - image: vitess/logtail:helm-1.0.3 + image: vitess/logtail:helm-1.0.6 imagePullPolicy: IfNotPresent env: @@ -545,7 +565,7 @@ spec: {{ define "cont-mysql-slowlog" }} - name: slow-log - image: vitess/logtail:helm-1.0.3 + image: vitess/logtail:helm-1.0.6 imagePullPolicy: IfNotPresent env: @@ -563,7 +583,7 @@ spec: {{ define "cont-mysql-generallog" }} - name: general-log - image: vitess/logtail:helm-1.0.3 + image: vitess/logtail:helm-1.0.6 imagePullPolicy: IfNotPresent env: diff --git a/helm/vitess/values.yaml b/helm/vitess/values.yaml index 03d200e2a7..c4e5563a46 100644 --- a/helm/vitess/values.yaml +++ b/helm/vitess/values.yaml @@ -177,7 +177,7 @@ etcd: # Default values for vtctld resources defined in 'topology' vtctld: serviceType: ClusterIP - vitessTag: helm-1.0.3 + vitessTag: helm-1.0.6 resources: # requests: # cpu: 100m @@ -188,7 +188,7 @@ vtctld: # Default values for vtgate resources defined in 'topology' vtgate: serviceType: ClusterIP - vitessTag: helm-1.0.3 + vitessTag: helm-1.0.6 resources: # requests: # cpu: 500m @@ -207,13 +207,13 @@ vtgate: # Default values for vtctlclient resources defined in 'topology' vtctlclient: - vitessTag: helm-1.0.3 + vitessTag: helm-1.0.6 extraFlags: {} secrets: [] # secrets are mounted under /vt/usersecrets/{secretname} # Default values for vtworker resources defined in 'jobs' vtworker: - vitessTag: helm-1.0.3 + vitessTag: helm-1.0.6 extraFlags: {} resources: # requests: @@ -224,7 +224,7 @@ vtworker: # Default values for vttablet resources defined in 'topology' vttablet: - vitessTag: helm-1.0.3 + vitessTag: helm-1.0.6 # valid values are # - mysql56 (for MySQL 8.0) @@ -376,7 +376,7 @@ pmm: # Default values for orchestrator resources orchestrator: enabled: false - image: vitess/orchestrator:3.0.13 + image: vitess/orchestrator:3.0.14 replicas: 3 resources: requests: diff --git a/java/grpc-client/src/main/java/io/vitess/client/grpc/GrpcClient.java b/java/grpc-client/src/main/java/io/vitess/client/grpc/GrpcClient.java index a25784df22..11b8266e02 100644 --- a/java/grpc-client/src/main/java/io/vitess/client/grpc/GrpcClient.java +++ b/java/grpc-client/src/main/java/io/vitess/client/grpc/GrpcClient.java @@ -84,24 +84,36 @@ import java.util.concurrent.TimeUnit; * GrpcClient is a gRPC-based implementation of Vitess RpcClient. */ public class GrpcClient implements RpcClient { + private static final Duration DEFAULT_TIMEOUT = Duration.standardSeconds(30); private final ManagedChannel channel; private final String channelId; private final VitessStub asyncStub; private final VitessFutureStub futureStub; + private final Duration timeout; public GrpcClient(ManagedChannel channel) { this.channel = channel; channelId = toChannelId(channel); asyncStub = VitessGrpc.newStub(channel); futureStub = VitessGrpc.newFutureStub(channel); + timeout = DEFAULT_TIMEOUT; } - public GrpcClient(ManagedChannel channel, CallCredentials credentials) { + public GrpcClient(ManagedChannel channel, Context context) { + this.channel = channel; + channelId = toChannelId(channel); + asyncStub = VitessGrpc.newStub(channel); + futureStub = VitessGrpc.newFutureStub(channel); + timeout = getContextTimeoutOrDefault(context); + } + + public GrpcClient(ManagedChannel channel, CallCredentials credentials, Context context) { this.channel = channel; channelId = toChannelId(channel); asyncStub = VitessGrpc.newStub(channel).withCallCredentials(credentials); futureStub = VitessGrpc.newFutureStub(channel).withCallCredentials(credentials); + timeout = getContextTimeoutOrDefault(context); } private String toChannelId(ManagedChannel channel) { @@ -111,7 +123,16 @@ public class GrpcClient implements RpcClient { @Override public void close() throws IOException { - channel.shutdown(); + try { + if (!channel.shutdown().awaitTermination(timeout.getStandardSeconds(), TimeUnit.SECONDS)) { + // The channel failed to shut down cleanly within the specified window + // Now we try hard shutdown + channel.shutdownNow(); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } @Override @@ -330,4 +351,12 @@ public class GrpcClient implements RpcClient { channelId ); } + + private static Duration getContextTimeoutOrDefault(Context context) { + if (context.getTimeout() == null || context.getTimeout().getStandardSeconds() < 0) { + return DEFAULT_TIMEOUT; + } + + return context.getTimeout(); + } } diff --git a/java/grpc-client/src/main/java/io/vitess/client/grpc/GrpcClientFactory.java b/java/grpc-client/src/main/java/io/vitess/client/grpc/GrpcClientFactory.java index 25427c1b97..2a4b9c62f0 100644 --- a/java/grpc-client/src/main/java/io/vitess/client/grpc/GrpcClientFactory.java +++ b/java/grpc-client/src/main/java/io/vitess/client/grpc/GrpcClientFactory.java @@ -98,7 +98,7 @@ public class GrpcClientFactory implements RpcClientFactory { channel.nameResolverFactory(nameResolverFactory); } return callCredentials != null - ? new GrpcClient(channel.build(), callCredentials) : new GrpcClient(channel.build()); + ? new GrpcClient(channel.build(), callCredentials, ctx) : new GrpcClient(channel.build(), ctx); } /** @@ -181,7 +181,7 @@ public class GrpcClientFactory implements RpcClientFactory { return new GrpcClient( channelBuilder(target).negotiationType(NegotiationType.TLS).sslContext(sslContext) - .intercept(new RetryingInterceptor(config)).build()); + .intercept(new RetryingInterceptor(config)).build(), ctx); } /** diff --git a/java/pom.xml b/java/pom.xml index e127c984e0..d81deb143a 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -23,7 +23,7 @@ client example - grpc-client + grpc-client hadoop jdbc @@ -68,6 +68,11 @@ UTF-8 1.16.0 + + 4.1.30.Final + 2.0.17.Final + 3.6.1 3.6.1 3.0.0 @@ -131,14 +136,14 @@ io.netty netty-handler - 4.1.27.Final + ${netty.handler.version} io.netty netty-tcnative-boringssl-static - 2.0.17.Final + ${tcnative.boring.ssl.version} diff --git a/misc/git/hooks/unused b/misc/git/hooks/unused index a02b40c34b..935740d295 100755 --- a/misc/git/hooks/unused +++ b/misc/git/hooks/unused @@ -8,7 +8,7 @@ if [ -z "$GOPATH" ]; then fi if [ -z "$(which unused)" ]; then - echo "unused not found, please run: go get honnef.co/go/unused/cmd/unused" + echo "unused not found, please run: go get honnef.co/go/tools/cmd/unused" exit 1 fi diff --git a/proto/binlogdata.proto b/proto/binlogdata.proto index 252cdb05ab..a1471e44f5 100644 --- a/proto/binlogdata.proto +++ b/proto/binlogdata.proto @@ -23,6 +23,7 @@ option go_package = "vitess.io/vitess/go/vt/proto/binlogdata"; package binlogdata; +import "vtrpc.proto"; import "query.proto"; import "topodata.proto"; @@ -114,6 +115,31 @@ message StreamTablesResponse { BinlogTransaction binlog_transaction = 1; } +// Rule represents one rule. +message Rule { + // match can be a table name or a regular expression + // delineated by '/' and '/'. + string match = 1; + // filter can be an empty string or keyrange if the match + // is a regular expression. Otherwise, it must be a select + // query. + string filter = 2; +} + +// Filter represents a list of ordered rules. First match +// wins. +message Filter { + repeated Rule rules = 1; +} + +// OnDDLAction lists the possible actions for DDLs. +enum OnDDLAction { + IGNORE = 0; + STOP = 1; + EXEC = 2; + EXEC_IGNORE = 3; +} + // BinlogSource specifies the source and filter parameters for // Filtered Replication. It currently supports a keyrange // or a list of tables. @@ -132,4 +158,73 @@ message BinlogSource { // tables is set if the request is for a list of tables repeated string tables = 5; + + // filter is set if we're using the generalized representation + // for the filter. + Filter filter = 6; + + // on_ddl specifies the action to be taken when a DDL is encountered. + OnDDLAction on_ddl = 7; +} + +// VEventType enumerates the event types. +// This list is comprehensive. Many of these types +// will not be encountered in RBR mode. +enum VEventType { + UNKNOWN = 0; + GTID = 1; + BEGIN = 2; + COMMIT = 3; + ROLLBACK = 4; + DDL = 5; + INSERT = 6; + REPLACE = 7; + UPDATE = 8; + DELETE = 9; + SET = 10; + OTHER = 11; + ROW = 12; + FIELD = 13; +} + +// RowChange represents one row change +message RowChange { + query.Row before = 1; + query.Row after = 2; +} + +// RowEvent represent row events for one table +message RowEvent { + string table_name = 1; + repeated RowChange row_changes = 2; +} + +message FieldEvent { + string table_name = 1; + repeated query.Field fields = 2; +} + +// VEvent represents a vstream event +message VEvent { + VEventType type = 1; + int64 timestamp = 2; + string gtid = 3; + string ddl = 4; + RowEvent row_event = 5; + FieldEvent field_event = 6; +} + +// VStreamRequest is the payload for VStream +message VStreamRequest { + vtrpc.CallerID effective_caller_id = 1; + query.VTGateCallerID immediate_caller_id = 2; + query.Target target = 3; + + string position = 4; + Filter filter = 5; +} + +// VStreamResponse is the response from VStream +message VStreamResponse { + repeated VEvent events = 1; } diff --git a/proto/query.proto b/proto/query.proto index e55c95674b..5352532537 100644 --- a/proto/query.proto +++ b/proto/query.proto @@ -288,6 +288,10 @@ message ExecuteOptions { READ_COMMITTED = 2; READ_UNCOMMITTED = 3; SERIALIZABLE = 4; + + // This is not an "official" transaction level but it will do a + // START TRANSACTION WITH CONSISTENT SNAPSHOT, READ ONLY + CONSISTENT_SNAPSHOT_READ_ONLY = 5; } TransactionIsolation transaction_isolation = 9; @@ -453,6 +457,7 @@ message StreamExecuteRequest { Target target = 3; BoundQuery query = 4; ExecuteOptions options = 5; + int64 transaction_id = 6; } // StreamExecuteResponse is the returned value from StreamExecute diff --git a/proto/queryservice.proto b/proto/queryservice.proto index a6a7074c9d..897cbf3f03 100644 --- a/proto/queryservice.proto +++ b/proto/queryservice.proto @@ -22,6 +22,7 @@ package queryservice; option go_package = "vitess.io/vitess/go/vt/proto/queryservice"; import "query.proto"; +import "binlogdata.proto"; // Query defines the tablet query service, implemented by vttablet. service Query { @@ -94,4 +95,7 @@ service Query { // UpdateStream asks the server to return a stream of the updates that have been applied to its database. rpc UpdateStream(query.UpdateStreamRequest) returns (stream query.UpdateStreamResponse) {}; + + // VStream streams vreplication events. + rpc VStream(binlogdata.VStreamRequest) returns (stream binlogdata.VStreamResponse) {}; } diff --git a/proto/tabletmanagerdata.proto b/proto/tabletmanagerdata.proto index 0898209b5a..7606d56840 100644 --- a/proto/tabletmanagerdata.proto +++ b/proto/tabletmanagerdata.proto @@ -213,6 +213,18 @@ message ApplySchemaResponse { SchemaDefinition after_schema = 2; } +message LockTablesRequest { +} + +message LockTablesResponse { +} + +message UnlockTablesRequest { +} + +message UnlockTablesResponse { +} + message ExecuteFetchAsDbaRequest { bytes query = 1; string db_name = 2; @@ -280,6 +292,14 @@ message StartSlaveRequest { message StartSlaveResponse { } +message StartSlaveUntilAfterRequest { + string position = 1; + int64 wait_timeout = 2; +} + +message StartSlaveUntilAfterResponse { +} + message TabletExternallyReparentedRequest { // external_id is an string value that may be provided by an external // agent for tracking purposes. The tablet will emit this string in diff --git a/proto/tabletmanagerservice.proto b/proto/tabletmanagerservice.proto index e43de2938e..ebc83056d8 100644 --- a/proto/tabletmanagerservice.proto +++ b/proto/tabletmanagerservice.proto @@ -68,6 +68,10 @@ service TabletManager { rpc ApplySchema(tabletmanagerdata.ApplySchemaRequest) returns (tabletmanagerdata.ApplySchemaResponse) {}; + rpc LockTables(tabletmanagerdata.LockTablesRequest) returns (tabletmanagerdata.LockTablesResponse) {}; + + rpc UnlockTables(tabletmanagerdata.UnlockTablesRequest) returns (tabletmanagerdata.UnlockTablesResponse) {}; + rpc ExecuteFetchAsDba(tabletmanagerdata.ExecuteFetchAsDbaRequest) returns (tabletmanagerdata.ExecuteFetchAsDbaResponse) {}; rpc ExecuteFetchAsAllPrivs(tabletmanagerdata.ExecuteFetchAsAllPrivsRequest) returns (tabletmanagerdata.ExecuteFetchAsAllPrivsResponse) {}; @@ -94,6 +98,10 @@ service TabletManager { // StartSlave starts the mysql replication rpc StartSlave(tabletmanagerdata.StartSlaveRequest) returns (tabletmanagerdata.StartSlaveResponse) {}; + // StartSlave starts the mysql replication until and including + // the provided position + rpc StartSlaveUntilAfter(tabletmanagerdata.StartSlaveUntilAfterRequest) returns (tabletmanagerdata.StartSlaveUntilAfterResponse) {}; + // TabletExternallyReparented tells a tablet that its underlying MySQL is // currently the master. It is only used in environments (tabletmanagerdata.such as Vitess+MoB) // in which MySQL is reparented by some agent external to Vitess, and then diff --git a/proto/vtgate.proto b/proto/vtgate.proto index 823f4f4d07..58ae2b6106 100644 --- a/proto/vtgate.proto +++ b/proto/vtgate.proto @@ -702,7 +702,7 @@ message ResolveTransactionResponse { // SplitQuery takes a "SELECT" query and generates a list of queries called // "query-parts". Each query-part consists of the original query with an // added WHERE clause that restricts the query-part to operate only on -// rows whose values in the the columns listed in the "split_column" field +// rows whose values in the columns listed in the "split_column" field // of the request (see below) are in a particular range. // // It is guaranteed that the set of rows obtained from diff --git a/py/vtproto/binlogdata_pb2.py b/py/vtproto/binlogdata_pb2.py index 97cae7e7c3..e586658b98 100644 --- a/py/vtproto/binlogdata_pb2.py +++ b/py/vtproto/binlogdata_pb2.py @@ -3,16 +3,17 @@ import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() +import vtrpc_pb2 as vtrpc__pb2 import query_pb2 as query__pb2 import topodata_pb2 as topodata__pb2 @@ -21,10 +22,131 @@ DESCRIPTOR = _descriptor.FileDescriptor( name='binlogdata.proto', package='binlogdata', syntax='proto3', - serialized_pb=_b('\n\x10\x62inlogdata.proto\x12\nbinlogdata\x1a\x0bquery.proto\x1a\x0etopodata.proto\"7\n\x07\x43harset\x12\x0e\n\x06\x63lient\x18\x01 \x01(\x05\x12\x0c\n\x04\x63onn\x18\x02 \x01(\x05\x12\x0e\n\x06server\x18\x03 \x01(\x05\"\xb5\x03\n\x11\x42inlogTransaction\x12;\n\nstatements\x18\x01 \x03(\x0b\x32\'.binlogdata.BinlogTransaction.Statement\x12&\n\x0b\x65vent_token\x18\x04 \x01(\x0b\x32\x11.query.EventToken\x1a\xae\x02\n\tStatement\x12\x42\n\x08\x63\x61tegory\x18\x01 \x01(\x0e\x32\x30.binlogdata.BinlogTransaction.Statement.Category\x12$\n\x07\x63harset\x18\x02 \x01(\x0b\x32\x13.binlogdata.Charset\x12\x0b\n\x03sql\x18\x03 \x01(\x0c\"\xa9\x01\n\x08\x43\x61tegory\x12\x13\n\x0f\x42L_UNRECOGNIZED\x10\x00\x12\x0c\n\x08\x42L_BEGIN\x10\x01\x12\r\n\tBL_COMMIT\x10\x02\x12\x0f\n\x0b\x42L_ROLLBACK\x10\x03\x12\x15\n\x11\x42L_DML_DEPRECATED\x10\x04\x12\n\n\x06\x42L_DDL\x10\x05\x12\n\n\x06\x42L_SET\x10\x06\x12\r\n\tBL_INSERT\x10\x07\x12\r\n\tBL_UPDATE\x10\x08\x12\r\n\tBL_DELETE\x10\tJ\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\"v\n\x15StreamKeyRangeRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"S\n\x16StreamKeyRangeResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"]\n\x13StreamTablesRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\x0e\n\x06tables\x18\x02 \x03(\t\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"Q\n\x14StreamTablesResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"\x91\x01\n\x0c\x42inlogSource\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12)\n\x0btablet_type\x18\x03 \x01(\x0e\x32\x14.topodata.TabletType\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\tB)Z\'vitess.io/vitess/go/vt/proto/binlogdatab\x06proto3') + serialized_options=_b('Z\'vitess.io/vitess/go/vt/proto/binlogdata'), + serialized_pb=_b('\n\x10\x62inlogdata.proto\x12\nbinlogdata\x1a\x0bvtrpc.proto\x1a\x0bquery.proto\x1a\x0etopodata.proto\"7\n\x07\x43harset\x12\x0e\n\x06\x63lient\x18\x01 \x01(\x05\x12\x0c\n\x04\x63onn\x18\x02 \x01(\x05\x12\x0e\n\x06server\x18\x03 \x01(\x05\"\xb5\x03\n\x11\x42inlogTransaction\x12;\n\nstatements\x18\x01 \x03(\x0b\x32\'.binlogdata.BinlogTransaction.Statement\x12&\n\x0b\x65vent_token\x18\x04 \x01(\x0b\x32\x11.query.EventToken\x1a\xae\x02\n\tStatement\x12\x42\n\x08\x63\x61tegory\x18\x01 \x01(\x0e\x32\x30.binlogdata.BinlogTransaction.Statement.Category\x12$\n\x07\x63harset\x18\x02 \x01(\x0b\x32\x13.binlogdata.Charset\x12\x0b\n\x03sql\x18\x03 \x01(\x0c\"\xa9\x01\n\x08\x43\x61tegory\x12\x13\n\x0f\x42L_UNRECOGNIZED\x10\x00\x12\x0c\n\x08\x42L_BEGIN\x10\x01\x12\r\n\tBL_COMMIT\x10\x02\x12\x0f\n\x0b\x42L_ROLLBACK\x10\x03\x12\x15\n\x11\x42L_DML_DEPRECATED\x10\x04\x12\n\n\x06\x42L_DDL\x10\x05\x12\n\n\x06\x42L_SET\x10\x06\x12\r\n\tBL_INSERT\x10\x07\x12\r\n\tBL_UPDATE\x10\x08\x12\r\n\tBL_DELETE\x10\tJ\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\"v\n\x15StreamKeyRangeRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"S\n\x16StreamKeyRangeResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"]\n\x13StreamTablesRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\x0e\n\x06tables\x18\x02 \x03(\t\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"Q\n\x14StreamTablesResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"%\n\x04Rule\x12\r\n\x05match\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\")\n\x06\x46ilter\x12\x1f\n\x05rules\x18\x01 \x03(\x0b\x32\x10.binlogdata.Rule\"\xde\x01\n\x0c\x42inlogSource\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12)\n\x0btablet_type\x18\x03 \x01(\x0e\x32\x14.topodata.TabletType\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\t\x12\"\n\x06\x66ilter\x18\x06 \x01(\x0b\x32\x12.binlogdata.Filter\x12\'\n\x06on_ddl\x18\x07 \x01(\x0e\x32\x17.binlogdata.OnDDLAction\"B\n\tRowChange\x12\x1a\n\x06\x62\x65\x66ore\x18\x01 \x01(\x0b\x32\n.query.Row\x12\x19\n\x05\x61\x66ter\x18\x02 \x01(\x0b\x32\n.query.Row\"J\n\x08RowEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12*\n\x0brow_changes\x18\x02 \x03(\x0b\x32\x15.binlogdata.RowChange\">\n\nFieldEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x1c\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x0c.query.Field\"\xb2\x01\n\x06VEvent\x12$\n\x04type\x18\x01 \x01(\x0e\x32\x16.binlogdata.VEventType\x12\x11\n\ttimestamp\x18\x02 \x01(\x03\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x0b\n\x03\x64\x64l\x18\x04 \x01(\t\x12\'\n\trow_event\x18\x05 \x01(\x0b\x32\x14.binlogdata.RowEvent\x12+\n\x0b\x66ield_event\x18\x06 \x01(\x0b\x32\x16.binlogdata.FieldEvent\"\xc7\x01\n\x0eVStreamRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x10\n\x08position\x18\x04 \x01(\t\x12\"\n\x06\x66ilter\x18\x05 \x01(\x0b\x32\x12.binlogdata.Filter\"5\n\x0fVStreamResponse\x12\"\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x12.binlogdata.VEvent*>\n\x0bOnDDLAction\x12\n\n\x06IGNORE\x10\x00\x12\x08\n\x04STOP\x10\x01\x12\x08\n\x04\x45XEC\x10\x02\x12\x0f\n\x0b\x45XEC_IGNORE\x10\x03*\xaa\x01\n\nVEventType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x08\n\x04GTID\x10\x01\x12\t\n\x05\x42\x45GIN\x10\x02\x12\n\n\x06\x43OMMIT\x10\x03\x12\x0c\n\x08ROLLBACK\x10\x04\x12\x07\n\x03\x44\x44L\x10\x05\x12\n\n\x06INSERT\x10\x06\x12\x0b\n\x07REPLACE\x10\x07\x12\n\n\x06UPDATE\x10\x08\x12\n\n\x06\x44\x45LETE\x10\t\x12\x07\n\x03SET\x10\n\x12\t\n\x05OTHER\x10\x0b\x12\x07\n\x03ROW\x10\x0c\x12\t\n\x05\x46IELD\x10\rB)Z\'vitess.io/vitess/go/vt/proto/binlogdatab\x06proto3') , - dependencies=[query__pb2.DESCRIPTOR,topodata__pb2.DESCRIPTOR,]) + dependencies=[vtrpc__pb2.DESCRIPTOR,query__pb2.DESCRIPTOR,topodata__pb2.DESCRIPTOR,]) +_ONDDLACTION = _descriptor.EnumDescriptor( + name='OnDDLAction', + full_name='binlogdata.OnDDLAction', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='IGNORE', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='STOP', index=1, number=1, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='EXEC', index=2, number=2, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='EXEC_IGNORE', index=3, number=3, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=1907, + serialized_end=1969, +) +_sym_db.RegisterEnumDescriptor(_ONDDLACTION) + +OnDDLAction = enum_type_wrapper.EnumTypeWrapper(_ONDDLACTION) +_VEVENTTYPE = _descriptor.EnumDescriptor( + name='VEventType', + full_name='binlogdata.VEventType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GTID', index=1, number=1, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BEGIN', index=2, number=2, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='COMMIT', index=3, number=3, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ROLLBACK', index=4, number=4, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DDL', index=5, number=5, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='INSERT', index=6, number=6, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='REPLACE', index=7, number=7, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UPDATE', index=8, number=8, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DELETE', index=9, number=9, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SET', index=10, number=10, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='OTHER', index=11, number=11, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ROW', index=12, number=12, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FIELD', index=13, number=13, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=1972, + serialized_end=2142, +) +_sym_db.RegisterEnumDescriptor(_VEVENTTYPE) + +VEventType = enum_type_wrapper.EnumTypeWrapper(_VEVENTTYPE) +IGNORE = 0 +STOP = 1 +EXEC = 2 +EXEC_IGNORE = 3 +UNKNOWN = 0 +GTID = 1 +BEGIN = 2 +COMMIT = 3 +ROLLBACK = 4 +DDL = 5 +INSERT = 6 +REPLACE = 7 +UPDATE = 8 +DELETE = 9 +SET = 10 +OTHER = 11 +ROW = 12 +FIELD = 13 _BINLOGTRANSACTION_STATEMENT_CATEGORY = _descriptor.EnumDescriptor( @@ -35,49 +157,49 @@ _BINLOGTRANSACTION_STATEMENT_CATEGORY = _descriptor.EnumDescriptor( values=[ _descriptor.EnumValueDescriptor( name='BL_UNRECOGNIZED', index=0, number=0, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='BL_BEGIN', index=1, number=1, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='BL_COMMIT', index=2, number=2, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='BL_ROLLBACK', index=3, number=3, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='BL_DML_DEPRECATED', index=4, number=4, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='BL_DDL', index=5, number=5, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='BL_SET', index=6, number=6, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='BL_INSERT', index=7, number=7, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='BL_UPDATE', index=8, number=8, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='BL_DELETE', index=9, number=9, - options=None, + serialized_options=None, type=None), ], containing_type=None, - options=None, - serialized_start=375, - serialized_end=544, + serialized_options=None, + serialized_start=388, + serialized_end=557, ) _sym_db.RegisterEnumDescriptor(_BINLOGTRANSACTION_STATEMENT_CATEGORY) @@ -95,35 +217,35 @@ _CHARSET = _descriptor.Descriptor( has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='conn', full_name='binlogdata.Charset.conn', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='server', full_name='binlogdata.Charset.server', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], - serialized_start=61, - serialized_end=116, + serialized_start=74, + serialized_end=129, ) @@ -140,21 +262,21 @@ _BINLOGTRANSACTION_STATEMENT = _descriptor.Descriptor( has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='charset', full_name='binlogdata.BinlogTransaction.Statement.charset', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='sql', full_name='binlogdata.BinlogTransaction.Statement.sql', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -162,14 +284,14 @@ _BINLOGTRANSACTION_STATEMENT = _descriptor.Descriptor( enum_types=[ _BINLOGTRANSACTION_STATEMENT_CATEGORY, ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], - serialized_start=242, - serialized_end=544, + serialized_start=255, + serialized_end=557, ) _BINLOGTRANSACTION = _descriptor.Descriptor( @@ -185,28 +307,28 @@ _BINLOGTRANSACTION = _descriptor.Descriptor( has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='event_token', full_name='binlogdata.BinlogTransaction.event_token', index=1, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_BINLOGTRANSACTION_STATEMENT, ], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], - serialized_start=119, - serialized_end=556, + serialized_start=132, + serialized_end=569, ) @@ -223,35 +345,35 @@ _STREAMKEYRANGEREQUEST = _descriptor.Descriptor( has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='key_range', full_name='binlogdata.StreamKeyRangeRequest.key_range', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='charset', full_name='binlogdata.StreamKeyRangeRequest.charset', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], - serialized_start=558, - serialized_end=676, + serialized_start=571, + serialized_end=689, ) @@ -268,21 +390,21 @@ _STREAMKEYRANGERESPONSE = _descriptor.Descriptor( has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], - serialized_start=678, - serialized_end=761, + serialized_start=691, + serialized_end=774, ) @@ -299,35 +421,35 @@ _STREAMTABLESREQUEST = _descriptor.Descriptor( has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tables', full_name='binlogdata.StreamTablesRequest.tables', index=1, number=2, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='charset', full_name='binlogdata.StreamTablesRequest.charset', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], - serialized_start=763, - serialized_end=856, + serialized_start=776, + serialized_end=869, ) @@ -344,21 +466,90 @@ _STREAMTABLESRESPONSE = _descriptor.Descriptor( has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], - serialized_start=858, - serialized_end=939, + serialized_start=871, + serialized_end=952, +) + + +_RULE = _descriptor.Descriptor( + name='Rule', + full_name='binlogdata.Rule', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='match', full_name='binlogdata.Rule.match', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='filter', full_name='binlogdata.Rule.filter', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=954, + serialized_end=991, +) + + +_FILTER = _descriptor.Descriptor( + name='Filter', + full_name='binlogdata.Filter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='rules', full_name='binlogdata.Filter.rules', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=993, + serialized_end=1034, ) @@ -375,49 +566,333 @@ _BINLOGSOURCE = _descriptor.Descriptor( has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='shard', full_name='binlogdata.BinlogSource.shard', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tablet_type', full_name='binlogdata.BinlogSource.tablet_type', index=2, number=3, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='key_range', full_name='binlogdata.BinlogSource.key_range', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tables', full_name='binlogdata.BinlogSource.tables', index=4, number=5, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='filter', full_name='binlogdata.BinlogSource.filter', index=5, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='on_ddl', full_name='binlogdata.BinlogSource.on_ddl', index=6, + number=7, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], - serialized_start=942, - serialized_end=1087, + serialized_start=1037, + serialized_end=1259, +) + + +_ROWCHANGE = _descriptor.Descriptor( + name='RowChange', + full_name='binlogdata.RowChange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='before', full_name='binlogdata.RowChange.before', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='after', full_name='binlogdata.RowChange.after', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1261, + serialized_end=1327, +) + + +_ROWEVENT = _descriptor.Descriptor( + name='RowEvent', + full_name='binlogdata.RowEvent', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='binlogdata.RowEvent.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='row_changes', full_name='binlogdata.RowEvent.row_changes', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1329, + serialized_end=1403, +) + + +_FIELDEVENT = _descriptor.Descriptor( + name='FieldEvent', + full_name='binlogdata.FieldEvent', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='binlogdata.FieldEvent.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='fields', full_name='binlogdata.FieldEvent.fields', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1405, + serialized_end=1467, +) + + +_VEVENT = _descriptor.Descriptor( + name='VEvent', + full_name='binlogdata.VEvent', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='binlogdata.VEvent.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='timestamp', full_name='binlogdata.VEvent.timestamp', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='gtid', full_name='binlogdata.VEvent.gtid', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='ddl', full_name='binlogdata.VEvent.ddl', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='row_event', full_name='binlogdata.VEvent.row_event', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='field_event', full_name='binlogdata.VEvent.field_event', index=5, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1470, + serialized_end=1648, +) + + +_VSTREAMREQUEST = _descriptor.Descriptor( + name='VStreamRequest', + full_name='binlogdata.VStreamRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='effective_caller_id', full_name='binlogdata.VStreamRequest.effective_caller_id', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='immediate_caller_id', full_name='binlogdata.VStreamRequest.immediate_caller_id', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='target', full_name='binlogdata.VStreamRequest.target', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='position', full_name='binlogdata.VStreamRequest.position', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='filter', full_name='binlogdata.VStreamRequest.filter', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1651, + serialized_end=1850, +) + + +_VSTREAMRESPONSE = _descriptor.Descriptor( + name='VStreamResponse', + full_name='binlogdata.VStreamResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='events', full_name='binlogdata.VStreamResponse.events', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1852, + serialized_end=1905, ) _BINLOGTRANSACTION_STATEMENT.fields_by_name['category'].enum_type = _BINLOGTRANSACTION_STATEMENT_CATEGORY @@ -431,15 +906,40 @@ _STREAMKEYRANGEREQUEST.fields_by_name['charset'].message_type = _CHARSET _STREAMKEYRANGERESPONSE.fields_by_name['binlog_transaction'].message_type = _BINLOGTRANSACTION _STREAMTABLESREQUEST.fields_by_name['charset'].message_type = _CHARSET _STREAMTABLESRESPONSE.fields_by_name['binlog_transaction'].message_type = _BINLOGTRANSACTION +_FILTER.fields_by_name['rules'].message_type = _RULE _BINLOGSOURCE.fields_by_name['tablet_type'].enum_type = topodata__pb2._TABLETTYPE _BINLOGSOURCE.fields_by_name['key_range'].message_type = topodata__pb2._KEYRANGE +_BINLOGSOURCE.fields_by_name['filter'].message_type = _FILTER +_BINLOGSOURCE.fields_by_name['on_ddl'].enum_type = _ONDDLACTION +_ROWCHANGE.fields_by_name['before'].message_type = query__pb2._ROW +_ROWCHANGE.fields_by_name['after'].message_type = query__pb2._ROW +_ROWEVENT.fields_by_name['row_changes'].message_type = _ROWCHANGE +_FIELDEVENT.fields_by_name['fields'].message_type = query__pb2._FIELD +_VEVENT.fields_by_name['type'].enum_type = _VEVENTTYPE +_VEVENT.fields_by_name['row_event'].message_type = _ROWEVENT +_VEVENT.fields_by_name['field_event'].message_type = _FIELDEVENT +_VSTREAMREQUEST.fields_by_name['effective_caller_id'].message_type = vtrpc__pb2._CALLERID +_VSTREAMREQUEST.fields_by_name['immediate_caller_id'].message_type = query__pb2._VTGATECALLERID +_VSTREAMREQUEST.fields_by_name['target'].message_type = query__pb2._TARGET +_VSTREAMREQUEST.fields_by_name['filter'].message_type = _FILTER +_VSTREAMRESPONSE.fields_by_name['events'].message_type = _VEVENT DESCRIPTOR.message_types_by_name['Charset'] = _CHARSET DESCRIPTOR.message_types_by_name['BinlogTransaction'] = _BINLOGTRANSACTION DESCRIPTOR.message_types_by_name['StreamKeyRangeRequest'] = _STREAMKEYRANGEREQUEST DESCRIPTOR.message_types_by_name['StreamKeyRangeResponse'] = _STREAMKEYRANGERESPONSE DESCRIPTOR.message_types_by_name['StreamTablesRequest'] = _STREAMTABLESREQUEST DESCRIPTOR.message_types_by_name['StreamTablesResponse'] = _STREAMTABLESRESPONSE +DESCRIPTOR.message_types_by_name['Rule'] = _RULE +DESCRIPTOR.message_types_by_name['Filter'] = _FILTER DESCRIPTOR.message_types_by_name['BinlogSource'] = _BINLOGSOURCE +DESCRIPTOR.message_types_by_name['RowChange'] = _ROWCHANGE +DESCRIPTOR.message_types_by_name['RowEvent'] = _ROWEVENT +DESCRIPTOR.message_types_by_name['FieldEvent'] = _FIELDEVENT +DESCRIPTOR.message_types_by_name['VEvent'] = _VEVENT +DESCRIPTOR.message_types_by_name['VStreamRequest'] = _VSTREAMREQUEST +DESCRIPTOR.message_types_by_name['VStreamResponse'] = _VSTREAMRESPONSE +DESCRIPTOR.enum_types_by_name['OnDDLAction'] = _ONDDLACTION +DESCRIPTOR.enum_types_by_name['VEventType'] = _VEVENTTYPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) Charset = _reflection.GeneratedProtocolMessageType('Charset', (_message.Message,), dict( @@ -492,6 +992,20 @@ StreamTablesResponse = _reflection.GeneratedProtocolMessageType('StreamTablesRes )) _sym_db.RegisterMessage(StreamTablesResponse) +Rule = _reflection.GeneratedProtocolMessageType('Rule', (_message.Message,), dict( + DESCRIPTOR = _RULE, + __module__ = 'binlogdata_pb2' + # @@protoc_insertion_point(class_scope:binlogdata.Rule) + )) +_sym_db.RegisterMessage(Rule) + +Filter = _reflection.GeneratedProtocolMessageType('Filter', (_message.Message,), dict( + DESCRIPTOR = _FILTER, + __module__ = 'binlogdata_pb2' + # @@protoc_insertion_point(class_scope:binlogdata.Filter) + )) +_sym_db.RegisterMessage(Filter) + BinlogSource = _reflection.GeneratedProtocolMessageType('BinlogSource', (_message.Message,), dict( DESCRIPTOR = _BINLOGSOURCE, __module__ = 'binlogdata_pb2' @@ -499,7 +1013,48 @@ BinlogSource = _reflection.GeneratedProtocolMessageType('BinlogSource', (_messag )) _sym_db.RegisterMessage(BinlogSource) +RowChange = _reflection.GeneratedProtocolMessageType('RowChange', (_message.Message,), dict( + DESCRIPTOR = _ROWCHANGE, + __module__ = 'binlogdata_pb2' + # @@protoc_insertion_point(class_scope:binlogdata.RowChange) + )) +_sym_db.RegisterMessage(RowChange) -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z\'vitess.io/vitess/go/vt/proto/binlogdata')) +RowEvent = _reflection.GeneratedProtocolMessageType('RowEvent', (_message.Message,), dict( + DESCRIPTOR = _ROWEVENT, + __module__ = 'binlogdata_pb2' + # @@protoc_insertion_point(class_scope:binlogdata.RowEvent) + )) +_sym_db.RegisterMessage(RowEvent) + +FieldEvent = _reflection.GeneratedProtocolMessageType('FieldEvent', (_message.Message,), dict( + DESCRIPTOR = _FIELDEVENT, + __module__ = 'binlogdata_pb2' + # @@protoc_insertion_point(class_scope:binlogdata.FieldEvent) + )) +_sym_db.RegisterMessage(FieldEvent) + +VEvent = _reflection.GeneratedProtocolMessageType('VEvent', (_message.Message,), dict( + DESCRIPTOR = _VEVENT, + __module__ = 'binlogdata_pb2' + # @@protoc_insertion_point(class_scope:binlogdata.VEvent) + )) +_sym_db.RegisterMessage(VEvent) + +VStreamRequest = _reflection.GeneratedProtocolMessageType('VStreamRequest', (_message.Message,), dict( + DESCRIPTOR = _VSTREAMREQUEST, + __module__ = 'binlogdata_pb2' + # @@protoc_insertion_point(class_scope:binlogdata.VStreamRequest) + )) +_sym_db.RegisterMessage(VStreamRequest) + +VStreamResponse = _reflection.GeneratedProtocolMessageType('VStreamResponse', (_message.Message,), dict( + DESCRIPTOR = _VSTREAMRESPONSE, + __module__ = 'binlogdata_pb2' + # @@protoc_insertion_point(class_scope:binlogdata.VStreamResponse) + )) +_sym_db.RegisterMessage(VStreamResponse) + + +DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope) diff --git a/py/vtproto/query_pb2.py b/py/vtproto/query_pb2.py index fa74a6018b..77b47ac982 100644 --- a/py/vtproto/query_pb2.py +++ b/py/vtproto/query_pb2.py @@ -22,7 +22,7 @@ DESCRIPTOR = _descriptor.FileDescriptor( name='query.proto', package='query', syntax='proto3', - serialized_pb=_b('\n\x0bquery.proto\x12\x05query\x1a\x0etopodata.proto\x1a\x0bvtrpc.proto\"b\n\x06Target\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12)\n\x0btablet_type\x18\x03 \x01(\x0e\x32\x14.topodata.TabletType\x12\x0c\n\x04\x63\x65ll\x18\x04 \x01(\t\"2\n\x0eVTGateCallerID\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x0e\n\x06groups\x18\x02 \x03(\t\"@\n\nEventToken\x12\x11\n\ttimestamp\x18\x01 \x01(\x03\x12\r\n\x05shard\x18\x02 \x01(\t\x12\x10\n\x08position\x18\x03 \x01(\t\"1\n\x05Value\x12\x19\n\x04type\x18\x01 \x01(\x0e\x32\x0b.query.Type\x12\r\n\x05value\x18\x02 \x01(\x0c\"V\n\x0c\x42indVariable\x12\x19\n\x04type\x18\x01 \x01(\x0e\x32\x0b.query.Type\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x1c\n\x06values\x18\x03 \x03(\x0b\x32\x0c.query.Value\"\xa2\x01\n\nBoundQuery\x12\x0b\n\x03sql\x18\x01 \x01(\t\x12<\n\x0e\x62ind_variables\x18\x02 \x03(\x0b\x32$.query.BoundQuery.BindVariablesEntry\x1aI\n\x12\x42indVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.query.BindVariable:\x02\x38\x01\"\xe0\x04\n\x0e\x45xecuteOptions\x12\x1b\n\x13include_event_token\x18\x02 \x01(\x08\x12.\n\x13\x63ompare_event_token\x18\x03 \x01(\x0b\x32\x11.query.EventToken\x12=\n\x0fincluded_fields\x18\x04 \x01(\x0e\x32$.query.ExecuteOptions.IncludedFields\x12\x19\n\x11\x63lient_found_rows\x18\x05 \x01(\x08\x12\x30\n\x08workload\x18\x06 \x01(\x0e\x32\x1e.query.ExecuteOptions.Workload\x12\x18\n\x10sql_select_limit\x18\x08 \x01(\x03\x12I\n\x15transaction_isolation\x18\t \x01(\x0e\x32*.query.ExecuteOptions.TransactionIsolation\x12\x1d\n\x15skip_query_plan_cache\x18\n \x01(\x08\";\n\x0eIncludedFields\x12\x11\n\rTYPE_AND_NAME\x10\x00\x12\r\n\tTYPE_ONLY\x10\x01\x12\x07\n\x03\x41LL\x10\x02\"8\n\x08Workload\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x08\n\x04OLTP\x10\x01\x12\x08\n\x04OLAP\x10\x02\x12\x07\n\x03\x44\x42\x41\x10\x03\"t\n\x14TransactionIsolation\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x13\n\x0fREPEATABLE_READ\x10\x01\x12\x12\n\x0eREAD_COMMITTED\x10\x02\x12\x14\n\x10READ_UNCOMMITTED\x10\x03\x12\x10\n\x0cSERIALIZABLE\x10\x04J\x04\x08\x01\x10\x02\"\xbf\x01\n\x05\x46ield\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x04type\x18\x02 \x01(\x0e\x32\x0b.query.Type\x12\r\n\x05table\x18\x03 \x01(\t\x12\x11\n\torg_table\x18\x04 \x01(\t\x12\x10\n\x08\x64\x61tabase\x18\x05 \x01(\t\x12\x10\n\x08org_name\x18\x06 \x01(\t\x12\x15\n\rcolumn_length\x18\x07 \x01(\r\x12\x0f\n\x07\x63harset\x18\x08 \x01(\r\x12\x10\n\x08\x64\x65\x63imals\x18\t \x01(\r\x12\r\n\x05\x66lags\x18\n \x01(\r\"&\n\x03Row\x12\x0f\n\x07lengths\x18\x01 \x03(\x12\x12\x0e\n\x06values\x18\x02 \x01(\x0c\"G\n\x0cResultExtras\x12&\n\x0b\x65vent_token\x18\x01 \x01(\x0b\x32\x11.query.EventToken\x12\x0f\n\x07\x66resher\x18\x02 \x01(\x08\"\x94\x01\n\x0bQueryResult\x12\x1c\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x0c.query.Field\x12\x15\n\rrows_affected\x18\x02 \x01(\x04\x12\x11\n\tinsert_id\x18\x03 \x01(\x04\x12\x18\n\x04rows\x18\x04 \x03(\x0b\x32\n.query.Row\x12#\n\x06\x65xtras\x18\x05 \x01(\x0b\x32\x13.query.ResultExtras\"-\n\x0cQueryWarning\x12\x0c\n\x04\x63ode\x18\x01 \x01(\r\x12\x0f\n\x07message\x18\x02 \x01(\t\"\xca\x02\n\x0bStreamEvent\x12\x30\n\nstatements\x18\x01 \x03(\x0b\x32\x1c.query.StreamEvent.Statement\x12&\n\x0b\x65vent_token\x18\x02 \x01(\x0b\x32\x11.query.EventToken\x1a\xe0\x01\n\tStatement\x12\x37\n\x08\x63\x61tegory\x18\x01 \x01(\x0e\x32%.query.StreamEvent.Statement.Category\x12\x12\n\ntable_name\x18\x02 \x01(\t\x12(\n\x12primary_key_fields\x18\x03 \x03(\x0b\x32\x0c.query.Field\x12&\n\x12primary_key_values\x18\x04 \x03(\x0b\x32\n.query.Row\x12\x0b\n\x03sql\x18\x05 \x01(\x0c\"\'\n\x08\x43\x61tegory\x12\t\n\x05\x45rror\x10\x00\x12\x07\n\x03\x44ML\x10\x01\x12\x07\n\x03\x44\x44L\x10\x02\"\xf3\x01\n\x0e\x45xecuteRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12 \n\x05query\x18\x04 \x01(\x0b\x32\x11.query.BoundQuery\x12\x16\n\x0etransaction_id\x18\x05 \x01(\x03\x12&\n\x07options\x18\x06 \x01(\x0b\x32\x15.query.ExecuteOptions\"5\n\x0f\x45xecuteResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\"U\n\x0fResultWithError\x12\x1e\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x0f.vtrpc.RPCError\x12\"\n\x06result\x18\x02 \x01(\x0b\x32\x12.query.QueryResult\"\x92\x02\n\x13\x45xecuteBatchRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\"\n\x07queries\x18\x04 \x03(\x0b\x32\x11.query.BoundQuery\x12\x16\n\x0e\x61s_transaction\x18\x05 \x01(\x08\x12\x16\n\x0etransaction_id\x18\x06 \x01(\x03\x12&\n\x07options\x18\x07 \x01(\x0b\x32\x15.query.ExecuteOptions\";\n\x14\x45xecuteBatchResponse\x12#\n\x07results\x18\x01 \x03(\x0b\x32\x12.query.QueryResult\"\xe1\x01\n\x14StreamExecuteRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12 \n\x05query\x18\x04 \x01(\x0b\x32\x11.query.BoundQuery\x12&\n\x07options\x18\x05 \x01(\x0b\x32\x15.query.ExecuteOptions\";\n\x15StreamExecuteResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\"\xb7\x01\n\x0c\x42\x65ginRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12&\n\x07options\x18\x04 \x01(\x0b\x32\x15.query.ExecuteOptions\"\'\n\rBeginResponse\x12\x16\n\x0etransaction_id\x18\x01 \x01(\x03\"\xa8\x01\n\rCommitRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x16\n\x0etransaction_id\x18\x04 \x01(\x03\"\x10\n\x0e\x43ommitResponse\"\xaa\x01\n\x0fRollbackRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x16\n\x0etransaction_id\x18\x04 \x01(\x03\"\x12\n\x10RollbackResponse\"\xb7\x01\n\x0ePrepareRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x16\n\x0etransaction_id\x18\x04 \x01(\x03\x12\x0c\n\x04\x64tid\x18\x05 \x01(\t\"\x11\n\x0fPrepareResponse\"\xa6\x01\n\x15\x43ommitPreparedRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x0c\n\x04\x64tid\x18\x04 \x01(\t\"\x18\n\x16\x43ommitPreparedResponse\"\xc0\x01\n\x17RollbackPreparedRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x16\n\x0etransaction_id\x18\x04 \x01(\x03\x12\x0c\n\x04\x64tid\x18\x05 \x01(\t\"\x1a\n\x18RollbackPreparedResponse\"\xce\x01\n\x18\x43reateTransactionRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x0c\n\x04\x64tid\x18\x04 \x01(\t\x12#\n\x0cparticipants\x18\x05 \x03(\x0b\x32\r.query.Target\"\x1b\n\x19\x43reateTransactionResponse\"\xbb\x01\n\x12StartCommitRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x16\n\x0etransaction_id\x18\x04 \x01(\x03\x12\x0c\n\x04\x64tid\x18\x05 \x01(\t\"\x15\n\x13StartCommitResponse\"\xbb\x01\n\x12SetRollbackRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x16\n\x0etransaction_id\x18\x04 \x01(\x03\x12\x0c\n\x04\x64tid\x18\x05 \x01(\t\"\x15\n\x13SetRollbackResponse\"\xab\x01\n\x1a\x43oncludeTransactionRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x0c\n\x04\x64tid\x18\x04 \x01(\t\"\x1d\n\x1b\x43oncludeTransactionResponse\"\xa7\x01\n\x16ReadTransactionRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x0c\n\x04\x64tid\x18\x04 \x01(\t\"G\n\x17ReadTransactionResponse\x12,\n\x08metadata\x18\x01 \x01(\x0b\x32\x1a.query.TransactionMetadata\"\xe0\x01\n\x13\x42\x65ginExecuteRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12 \n\x05query\x18\x04 \x01(\x0b\x32\x11.query.BoundQuery\x12&\n\x07options\x18\x05 \x01(\x0b\x32\x15.query.ExecuteOptions\"r\n\x14\x42\x65ginExecuteResponse\x12\x1e\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x0f.vtrpc.RPCError\x12\"\n\x06result\x18\x02 \x01(\x0b\x32\x12.query.QueryResult\x12\x16\n\x0etransaction_id\x18\x03 \x01(\x03\"\xff\x01\n\x18\x42\x65ginExecuteBatchRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\"\n\x07queries\x18\x04 \x03(\x0b\x32\x11.query.BoundQuery\x12\x16\n\x0e\x61s_transaction\x18\x05 \x01(\x08\x12&\n\x07options\x18\x06 \x01(\x0b\x32\x15.query.ExecuteOptions\"x\n\x19\x42\x65ginExecuteBatchResponse\x12\x1e\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x0f.vtrpc.RPCError\x12#\n\x07results\x18\x02 \x03(\x0b\x32\x12.query.QueryResult\x12\x16\n\x0etransaction_id\x18\x03 \x01(\x03\"\xa5\x01\n\x14MessageStreamRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x0c\n\x04name\x18\x04 \x01(\t\";\n\x15MessageStreamResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\"\xbd\x01\n\x11MessageAckRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x19\n\x03ids\x18\x05 \x03(\x0b\x32\x0c.query.Value\"8\n\x12MessageAckResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\"\xe7\x02\n\x11SplitQueryRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12 \n\x05query\x18\x04 \x01(\x0b\x32\x11.query.BoundQuery\x12\x14\n\x0csplit_column\x18\x05 \x03(\t\x12\x13\n\x0bsplit_count\x18\x06 \x01(\x03\x12\x1f\n\x17num_rows_per_query_part\x18\x08 \x01(\x03\x12\x35\n\talgorithm\x18\t \x01(\x0e\x32\".query.SplitQueryRequest.Algorithm\",\n\tAlgorithm\x12\x10\n\x0c\x45QUAL_SPLITS\x10\x00\x12\r\n\tFULL_SCAN\x10\x01\"A\n\nQuerySplit\x12 \n\x05query\x18\x01 \x01(\x0b\x32\x11.query.BoundQuery\x12\x11\n\trow_count\x18\x02 \x01(\x03\"8\n\x12SplitQueryResponse\x12\"\n\x07queries\x18\x01 \x03(\x0b\x32\x11.query.QuerySplit\"\x15\n\x13StreamHealthRequest\"\xb6\x01\n\rRealtimeStats\x12\x14\n\x0chealth_error\x18\x01 \x01(\t\x12\x1d\n\x15seconds_behind_master\x18\x02 \x01(\r\x12\x1c\n\x14\x62inlog_players_count\x18\x03 \x01(\x05\x12\x32\n*seconds_behind_master_filtered_replication\x18\x04 \x01(\x03\x12\x11\n\tcpu_usage\x18\x05 \x01(\x01\x12\x0b\n\x03qps\x18\x06 \x01(\x01\"\x94\x01\n\x0e\x41ggregateStats\x12\x1c\n\x14healthy_tablet_count\x18\x01 \x01(\x05\x12\x1e\n\x16unhealthy_tablet_count\x18\x02 \x01(\x05\x12!\n\x19seconds_behind_master_min\x18\x03 \x01(\r\x12!\n\x19seconds_behind_master_max\x18\x04 \x01(\r\"\x81\x02\n\x14StreamHealthResponse\x12\x1d\n\x06target\x18\x01 \x01(\x0b\x32\r.query.Target\x12\x0f\n\x07serving\x18\x02 \x01(\x08\x12.\n&tablet_externally_reparented_timestamp\x18\x03 \x01(\x03\x12,\n\x0erealtime_stats\x18\x04 \x01(\x0b\x32\x14.query.RealtimeStats\x12.\n\x0f\x61ggregate_stats\x18\x06 \x01(\x0b\x32\x15.query.AggregateStats\x12+\n\x0ctablet_alias\x18\x05 \x01(\x0b\x32\x15.topodata.TabletAlias\"\xbb\x01\n\x13UpdateStreamRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x10\n\x08position\x18\x04 \x01(\t\x12\x11\n\ttimestamp\x18\x05 \x01(\x03\"9\n\x14UpdateStreamResponse\x12!\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x12.query.StreamEvent\"\x86\x01\n\x13TransactionMetadata\x12\x0c\n\x04\x64tid\x18\x01 \x01(\t\x12&\n\x05state\x18\x02 \x01(\x0e\x32\x17.query.TransactionState\x12\x14\n\x0ctime_created\x18\x03 \x01(\x03\x12#\n\x0cparticipants\x18\x04 \x03(\x0b\x32\r.query.Target*\x92\x03\n\tMySqlFlag\x12\t\n\x05\x45MPTY\x10\x00\x12\x11\n\rNOT_NULL_FLAG\x10\x01\x12\x10\n\x0cPRI_KEY_FLAG\x10\x02\x12\x13\n\x0fUNIQUE_KEY_FLAG\x10\x04\x12\x15\n\x11MULTIPLE_KEY_FLAG\x10\x08\x12\r\n\tBLOB_FLAG\x10\x10\x12\x11\n\rUNSIGNED_FLAG\x10 \x12\x11\n\rZEROFILL_FLAG\x10@\x12\x10\n\x0b\x42INARY_FLAG\x10\x80\x01\x12\x0e\n\tENUM_FLAG\x10\x80\x02\x12\x18\n\x13\x41UTO_INCREMENT_FLAG\x10\x80\x04\x12\x13\n\x0eTIMESTAMP_FLAG\x10\x80\x08\x12\r\n\x08SET_FLAG\x10\x80\x10\x12\x1a\n\x15NO_DEFAULT_VALUE_FLAG\x10\x80 \x12\x17\n\x12ON_UPDATE_NOW_FLAG\x10\x80@\x12\x0e\n\x08NUM_FLAG\x10\x80\x80\x02\x12\x13\n\rPART_KEY_FLAG\x10\x80\x80\x01\x12\x10\n\nGROUP_FLAG\x10\x80\x80\x02\x12\x11\n\x0bUNIQUE_FLAG\x10\x80\x80\x04\x12\x11\n\x0b\x42INCMP_FLAG\x10\x80\x80\x08\x1a\x02\x10\x01*k\n\x04\x46lag\x12\x08\n\x04NONE\x10\x00\x12\x0f\n\nISINTEGRAL\x10\x80\x02\x12\x0f\n\nISUNSIGNED\x10\x80\x04\x12\x0c\n\x07ISFLOAT\x10\x80\x08\x12\r\n\x08ISQUOTED\x10\x80\x10\x12\x0b\n\x06ISTEXT\x10\x80 \x12\r\n\x08ISBINARY\x10\x80@*\x99\x03\n\x04Type\x12\r\n\tNULL_TYPE\x10\x00\x12\t\n\x04INT8\x10\x81\x02\x12\n\n\x05UINT8\x10\x82\x06\x12\n\n\x05INT16\x10\x83\x02\x12\x0b\n\x06UINT16\x10\x84\x06\x12\n\n\x05INT24\x10\x85\x02\x12\x0b\n\x06UINT24\x10\x86\x06\x12\n\n\x05INT32\x10\x87\x02\x12\x0b\n\x06UINT32\x10\x88\x06\x12\n\n\x05INT64\x10\x89\x02\x12\x0b\n\x06UINT64\x10\x8a\x06\x12\x0c\n\x07\x46LOAT32\x10\x8b\x08\x12\x0c\n\x07\x46LOAT64\x10\x8c\x08\x12\x0e\n\tTIMESTAMP\x10\x8d\x10\x12\t\n\x04\x44\x41TE\x10\x8e\x10\x12\t\n\x04TIME\x10\x8f\x10\x12\r\n\x08\x44\x41TETIME\x10\x90\x10\x12\t\n\x04YEAR\x10\x91\x06\x12\x0b\n\x07\x44\x45\x43IMAL\x10\x12\x12\t\n\x04TEXT\x10\x93\x30\x12\t\n\x04\x42LOB\x10\x94P\x12\x0c\n\x07VARCHAR\x10\x95\x30\x12\x0e\n\tVARBINARY\x10\x96P\x12\t\n\x04\x43HAR\x10\x97\x30\x12\x0b\n\x06\x42INARY\x10\x98P\x12\x08\n\x03\x42IT\x10\x99\x10\x12\t\n\x04\x45NUM\x10\x9a\x10\x12\x08\n\x03SET\x10\x9b\x10\x12\t\n\x05TUPLE\x10\x1c\x12\r\n\x08GEOMETRY\x10\x9d\x10\x12\t\n\x04JSON\x10\x9e\x10\x12\x0e\n\nEXPRESSION\x10\x1f*F\n\x10TransactionState\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07PREPARE\x10\x01\x12\n\n\x06\x43OMMIT\x10\x02\x12\x0c\n\x08ROLLBACK\x10\x03\x42\x35\n\x0fio.vitess.protoZ\"vitess.io/vitess/go/vt/proto/queryb\x06proto3') + serialized_pb=_b('\n\x0bquery.proto\x12\x05query\x1a\x0etopodata.proto\x1a\x0bvtrpc.proto\"b\n\x06Target\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12)\n\x0btablet_type\x18\x03 \x01(\x0e\x32\x14.topodata.TabletType\x12\x0c\n\x04\x63\x65ll\x18\x04 \x01(\t\"2\n\x0eVTGateCallerID\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x0e\n\x06groups\x18\x02 \x03(\t\"@\n\nEventToken\x12\x11\n\ttimestamp\x18\x01 \x01(\x03\x12\r\n\x05shard\x18\x02 \x01(\t\x12\x10\n\x08position\x18\x03 \x01(\t\"1\n\x05Value\x12\x19\n\x04type\x18\x01 \x01(\x0e\x32\x0b.query.Type\x12\r\n\x05value\x18\x02 \x01(\x0c\"V\n\x0c\x42indVariable\x12\x19\n\x04type\x18\x01 \x01(\x0e\x32\x0b.query.Type\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x1c\n\x06values\x18\x03 \x03(\x0b\x32\x0c.query.Value\"\xa2\x01\n\nBoundQuery\x12\x0b\n\x03sql\x18\x01 \x01(\t\x12<\n\x0e\x62ind_variables\x18\x02 \x03(\x0b\x32$.query.BoundQuery.BindVariablesEntry\x1aI\n\x12\x42indVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.query.BindVariable:\x02\x38\x01\"\x84\x05\n\x0e\x45xecuteOptions\x12\x1b\n\x13include_event_token\x18\x02 \x01(\x08\x12.\n\x13\x63ompare_event_token\x18\x03 \x01(\x0b\x32\x11.query.EventToken\x12=\n\x0fincluded_fields\x18\x04 \x01(\x0e\x32$.query.ExecuteOptions.IncludedFields\x12\x19\n\x11\x63lient_found_rows\x18\x05 \x01(\x08\x12\x30\n\x08workload\x18\x06 \x01(\x0e\x32\x1e.query.ExecuteOptions.Workload\x12\x18\n\x10sql_select_limit\x18\x08 \x01(\x03\x12I\n\x15transaction_isolation\x18\t \x01(\x0e\x32*.query.ExecuteOptions.TransactionIsolation\x12\x1d\n\x15skip_query_plan_cache\x18\n \x01(\x08\";\n\x0eIncludedFields\x12\x11\n\rTYPE_AND_NAME\x10\x00\x12\r\n\tTYPE_ONLY\x10\x01\x12\x07\n\x03\x41LL\x10\x02\"8\n\x08Workload\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x08\n\x04OLTP\x10\x01\x12\x08\n\x04OLAP\x10\x02\x12\x07\n\x03\x44\x42\x41\x10\x03\"\x97\x01\n\x14TransactionIsolation\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x13\n\x0fREPEATABLE_READ\x10\x01\x12\x12\n\x0eREAD_COMMITTED\x10\x02\x12\x14\n\x10READ_UNCOMMITTED\x10\x03\x12\x10\n\x0cSERIALIZABLE\x10\x04\x12!\n\x1d\x43ONSISTENT_SNAPSHOT_READ_ONLY\x10\x05J\x04\x08\x01\x10\x02\"\xbf\x01\n\x05\x46ield\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x04type\x18\x02 \x01(\x0e\x32\x0b.query.Type\x12\r\n\x05table\x18\x03 \x01(\t\x12\x11\n\torg_table\x18\x04 \x01(\t\x12\x10\n\x08\x64\x61tabase\x18\x05 \x01(\t\x12\x10\n\x08org_name\x18\x06 \x01(\t\x12\x15\n\rcolumn_length\x18\x07 \x01(\r\x12\x0f\n\x07\x63harset\x18\x08 \x01(\r\x12\x10\n\x08\x64\x65\x63imals\x18\t \x01(\r\x12\r\n\x05\x66lags\x18\n \x01(\r\"&\n\x03Row\x12\x0f\n\x07lengths\x18\x01 \x03(\x12\x12\x0e\n\x06values\x18\x02 \x01(\x0c\"G\n\x0cResultExtras\x12&\n\x0b\x65vent_token\x18\x01 \x01(\x0b\x32\x11.query.EventToken\x12\x0f\n\x07\x66resher\x18\x02 \x01(\x08\"\x94\x01\n\x0bQueryResult\x12\x1c\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x0c.query.Field\x12\x15\n\rrows_affected\x18\x02 \x01(\x04\x12\x11\n\tinsert_id\x18\x03 \x01(\x04\x12\x18\n\x04rows\x18\x04 \x03(\x0b\x32\n.query.Row\x12#\n\x06\x65xtras\x18\x05 \x01(\x0b\x32\x13.query.ResultExtras\"-\n\x0cQueryWarning\x12\x0c\n\x04\x63ode\x18\x01 \x01(\r\x12\x0f\n\x07message\x18\x02 \x01(\t\"\xca\x02\n\x0bStreamEvent\x12\x30\n\nstatements\x18\x01 \x03(\x0b\x32\x1c.query.StreamEvent.Statement\x12&\n\x0b\x65vent_token\x18\x02 \x01(\x0b\x32\x11.query.EventToken\x1a\xe0\x01\n\tStatement\x12\x37\n\x08\x63\x61tegory\x18\x01 \x01(\x0e\x32%.query.StreamEvent.Statement.Category\x12\x12\n\ntable_name\x18\x02 \x01(\t\x12(\n\x12primary_key_fields\x18\x03 \x03(\x0b\x32\x0c.query.Field\x12&\n\x12primary_key_values\x18\x04 \x03(\x0b\x32\n.query.Row\x12\x0b\n\x03sql\x18\x05 \x01(\x0c\"\'\n\x08\x43\x61tegory\x12\t\n\x05\x45rror\x10\x00\x12\x07\n\x03\x44ML\x10\x01\x12\x07\n\x03\x44\x44L\x10\x02\"\xf3\x01\n\x0e\x45xecuteRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12 \n\x05query\x18\x04 \x01(\x0b\x32\x11.query.BoundQuery\x12\x16\n\x0etransaction_id\x18\x05 \x01(\x03\x12&\n\x07options\x18\x06 \x01(\x0b\x32\x15.query.ExecuteOptions\"5\n\x0f\x45xecuteResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\"U\n\x0fResultWithError\x12\x1e\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x0f.vtrpc.RPCError\x12\"\n\x06result\x18\x02 \x01(\x0b\x32\x12.query.QueryResult\"\x92\x02\n\x13\x45xecuteBatchRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\"\n\x07queries\x18\x04 \x03(\x0b\x32\x11.query.BoundQuery\x12\x16\n\x0e\x61s_transaction\x18\x05 \x01(\x08\x12\x16\n\x0etransaction_id\x18\x06 \x01(\x03\x12&\n\x07options\x18\x07 \x01(\x0b\x32\x15.query.ExecuteOptions\";\n\x14\x45xecuteBatchResponse\x12#\n\x07results\x18\x01 \x03(\x0b\x32\x12.query.QueryResult\"\xf9\x01\n\x14StreamExecuteRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12 \n\x05query\x18\x04 \x01(\x0b\x32\x11.query.BoundQuery\x12&\n\x07options\x18\x05 \x01(\x0b\x32\x15.query.ExecuteOptions\x12\x16\n\x0etransaction_id\x18\x06 \x01(\x03\";\n\x15StreamExecuteResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\"\xb7\x01\n\x0c\x42\x65ginRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12&\n\x07options\x18\x04 \x01(\x0b\x32\x15.query.ExecuteOptions\"\'\n\rBeginResponse\x12\x16\n\x0etransaction_id\x18\x01 \x01(\x03\"\xa8\x01\n\rCommitRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x16\n\x0etransaction_id\x18\x04 \x01(\x03\"\x10\n\x0e\x43ommitResponse\"\xaa\x01\n\x0fRollbackRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x16\n\x0etransaction_id\x18\x04 \x01(\x03\"\x12\n\x10RollbackResponse\"\xb7\x01\n\x0ePrepareRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x16\n\x0etransaction_id\x18\x04 \x01(\x03\x12\x0c\n\x04\x64tid\x18\x05 \x01(\t\"\x11\n\x0fPrepareResponse\"\xa6\x01\n\x15\x43ommitPreparedRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x0c\n\x04\x64tid\x18\x04 \x01(\t\"\x18\n\x16\x43ommitPreparedResponse\"\xc0\x01\n\x17RollbackPreparedRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x16\n\x0etransaction_id\x18\x04 \x01(\x03\x12\x0c\n\x04\x64tid\x18\x05 \x01(\t\"\x1a\n\x18RollbackPreparedResponse\"\xce\x01\n\x18\x43reateTransactionRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x0c\n\x04\x64tid\x18\x04 \x01(\t\x12#\n\x0cparticipants\x18\x05 \x03(\x0b\x32\r.query.Target\"\x1b\n\x19\x43reateTransactionResponse\"\xbb\x01\n\x12StartCommitRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x16\n\x0etransaction_id\x18\x04 \x01(\x03\x12\x0c\n\x04\x64tid\x18\x05 \x01(\t\"\x15\n\x13StartCommitResponse\"\xbb\x01\n\x12SetRollbackRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x16\n\x0etransaction_id\x18\x04 \x01(\x03\x12\x0c\n\x04\x64tid\x18\x05 \x01(\t\"\x15\n\x13SetRollbackResponse\"\xab\x01\n\x1a\x43oncludeTransactionRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x0c\n\x04\x64tid\x18\x04 \x01(\t\"\x1d\n\x1b\x43oncludeTransactionResponse\"\xa7\x01\n\x16ReadTransactionRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x0c\n\x04\x64tid\x18\x04 \x01(\t\"G\n\x17ReadTransactionResponse\x12,\n\x08metadata\x18\x01 \x01(\x0b\x32\x1a.query.TransactionMetadata\"\xe0\x01\n\x13\x42\x65ginExecuteRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12 \n\x05query\x18\x04 \x01(\x0b\x32\x11.query.BoundQuery\x12&\n\x07options\x18\x05 \x01(\x0b\x32\x15.query.ExecuteOptions\"r\n\x14\x42\x65ginExecuteResponse\x12\x1e\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x0f.vtrpc.RPCError\x12\"\n\x06result\x18\x02 \x01(\x0b\x32\x12.query.QueryResult\x12\x16\n\x0etransaction_id\x18\x03 \x01(\x03\"\xff\x01\n\x18\x42\x65ginExecuteBatchRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\"\n\x07queries\x18\x04 \x03(\x0b\x32\x11.query.BoundQuery\x12\x16\n\x0e\x61s_transaction\x18\x05 \x01(\x08\x12&\n\x07options\x18\x06 \x01(\x0b\x32\x15.query.ExecuteOptions\"x\n\x19\x42\x65ginExecuteBatchResponse\x12\x1e\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x0f.vtrpc.RPCError\x12#\n\x07results\x18\x02 \x03(\x0b\x32\x12.query.QueryResult\x12\x16\n\x0etransaction_id\x18\x03 \x01(\x03\"\xa5\x01\n\x14MessageStreamRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x0c\n\x04name\x18\x04 \x01(\t\";\n\x15MessageStreamResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\"\xbd\x01\n\x11MessageAckRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x19\n\x03ids\x18\x05 \x03(\x0b\x32\x0c.query.Value\"8\n\x12MessageAckResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\"\xe7\x02\n\x11SplitQueryRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12 \n\x05query\x18\x04 \x01(\x0b\x32\x11.query.BoundQuery\x12\x14\n\x0csplit_column\x18\x05 \x03(\t\x12\x13\n\x0bsplit_count\x18\x06 \x01(\x03\x12\x1f\n\x17num_rows_per_query_part\x18\x08 \x01(\x03\x12\x35\n\talgorithm\x18\t \x01(\x0e\x32\".query.SplitQueryRequest.Algorithm\",\n\tAlgorithm\x12\x10\n\x0c\x45QUAL_SPLITS\x10\x00\x12\r\n\tFULL_SCAN\x10\x01\"A\n\nQuerySplit\x12 \n\x05query\x18\x01 \x01(\x0b\x32\x11.query.BoundQuery\x12\x11\n\trow_count\x18\x02 \x01(\x03\"8\n\x12SplitQueryResponse\x12\"\n\x07queries\x18\x01 \x03(\x0b\x32\x11.query.QuerySplit\"\x15\n\x13StreamHealthRequest\"\xb6\x01\n\rRealtimeStats\x12\x14\n\x0chealth_error\x18\x01 \x01(\t\x12\x1d\n\x15seconds_behind_master\x18\x02 \x01(\r\x12\x1c\n\x14\x62inlog_players_count\x18\x03 \x01(\x05\x12\x32\n*seconds_behind_master_filtered_replication\x18\x04 \x01(\x03\x12\x11\n\tcpu_usage\x18\x05 \x01(\x01\x12\x0b\n\x03qps\x18\x06 \x01(\x01\"\x94\x01\n\x0e\x41ggregateStats\x12\x1c\n\x14healthy_tablet_count\x18\x01 \x01(\x05\x12\x1e\n\x16unhealthy_tablet_count\x18\x02 \x01(\x05\x12!\n\x19seconds_behind_master_min\x18\x03 \x01(\r\x12!\n\x19seconds_behind_master_max\x18\x04 \x01(\r\"\x81\x02\n\x14StreamHealthResponse\x12\x1d\n\x06target\x18\x01 \x01(\x0b\x32\r.query.Target\x12\x0f\n\x07serving\x18\x02 \x01(\x08\x12.\n&tablet_externally_reparented_timestamp\x18\x03 \x01(\x03\x12,\n\x0erealtime_stats\x18\x04 \x01(\x0b\x32\x14.query.RealtimeStats\x12.\n\x0f\x61ggregate_stats\x18\x06 \x01(\x0b\x32\x15.query.AggregateStats\x12+\n\x0ctablet_alias\x18\x05 \x01(\x0b\x32\x15.topodata.TabletAlias\"\xbb\x01\n\x13UpdateStreamRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x10\n\x08position\x18\x04 \x01(\t\x12\x11\n\ttimestamp\x18\x05 \x01(\x03\"9\n\x14UpdateStreamResponse\x12!\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x12.query.StreamEvent\"\x86\x01\n\x13TransactionMetadata\x12\x0c\n\x04\x64tid\x18\x01 \x01(\t\x12&\n\x05state\x18\x02 \x01(\x0e\x32\x17.query.TransactionState\x12\x14\n\x0ctime_created\x18\x03 \x01(\x03\x12#\n\x0cparticipants\x18\x04 \x03(\x0b\x32\r.query.Target*\x92\x03\n\tMySqlFlag\x12\t\n\x05\x45MPTY\x10\x00\x12\x11\n\rNOT_NULL_FLAG\x10\x01\x12\x10\n\x0cPRI_KEY_FLAG\x10\x02\x12\x13\n\x0fUNIQUE_KEY_FLAG\x10\x04\x12\x15\n\x11MULTIPLE_KEY_FLAG\x10\x08\x12\r\n\tBLOB_FLAG\x10\x10\x12\x11\n\rUNSIGNED_FLAG\x10 \x12\x11\n\rZEROFILL_FLAG\x10@\x12\x10\n\x0b\x42INARY_FLAG\x10\x80\x01\x12\x0e\n\tENUM_FLAG\x10\x80\x02\x12\x18\n\x13\x41UTO_INCREMENT_FLAG\x10\x80\x04\x12\x13\n\x0eTIMESTAMP_FLAG\x10\x80\x08\x12\r\n\x08SET_FLAG\x10\x80\x10\x12\x1a\n\x15NO_DEFAULT_VALUE_FLAG\x10\x80 \x12\x17\n\x12ON_UPDATE_NOW_FLAG\x10\x80@\x12\x0e\n\x08NUM_FLAG\x10\x80\x80\x02\x12\x13\n\rPART_KEY_FLAG\x10\x80\x80\x01\x12\x10\n\nGROUP_FLAG\x10\x80\x80\x02\x12\x11\n\x0bUNIQUE_FLAG\x10\x80\x80\x04\x12\x11\n\x0b\x42INCMP_FLAG\x10\x80\x80\x08\x1a\x02\x10\x01*k\n\x04\x46lag\x12\x08\n\x04NONE\x10\x00\x12\x0f\n\nISINTEGRAL\x10\x80\x02\x12\x0f\n\nISUNSIGNED\x10\x80\x04\x12\x0c\n\x07ISFLOAT\x10\x80\x08\x12\r\n\x08ISQUOTED\x10\x80\x10\x12\x0b\n\x06ISTEXT\x10\x80 \x12\r\n\x08ISBINARY\x10\x80@*\x99\x03\n\x04Type\x12\r\n\tNULL_TYPE\x10\x00\x12\t\n\x04INT8\x10\x81\x02\x12\n\n\x05UINT8\x10\x82\x06\x12\n\n\x05INT16\x10\x83\x02\x12\x0b\n\x06UINT16\x10\x84\x06\x12\n\n\x05INT24\x10\x85\x02\x12\x0b\n\x06UINT24\x10\x86\x06\x12\n\n\x05INT32\x10\x87\x02\x12\x0b\n\x06UINT32\x10\x88\x06\x12\n\n\x05INT64\x10\x89\x02\x12\x0b\n\x06UINT64\x10\x8a\x06\x12\x0c\n\x07\x46LOAT32\x10\x8b\x08\x12\x0c\n\x07\x46LOAT64\x10\x8c\x08\x12\x0e\n\tTIMESTAMP\x10\x8d\x10\x12\t\n\x04\x44\x41TE\x10\x8e\x10\x12\t\n\x04TIME\x10\x8f\x10\x12\r\n\x08\x44\x41TETIME\x10\x90\x10\x12\t\n\x04YEAR\x10\x91\x06\x12\x0b\n\x07\x44\x45\x43IMAL\x10\x12\x12\t\n\x04TEXT\x10\x93\x30\x12\t\n\x04\x42LOB\x10\x94P\x12\x0c\n\x07VARCHAR\x10\x95\x30\x12\x0e\n\tVARBINARY\x10\x96P\x12\t\n\x04\x43HAR\x10\x97\x30\x12\x0b\n\x06\x42INARY\x10\x98P\x12\x08\n\x03\x42IT\x10\x99\x10\x12\t\n\x04\x45NUM\x10\x9a\x10\x12\x08\n\x03SET\x10\x9b\x10\x12\t\n\x05TUPLE\x10\x1c\x12\r\n\x08GEOMETRY\x10\x9d\x10\x12\t\n\x04JSON\x10\x9e\x10\x12\x0e\n\nEXPRESSION\x10\x1f*F\n\x10TransactionState\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07PREPARE\x10\x01\x12\n\n\x06\x43OMMIT\x10\x02\x12\x0c\n\x08ROLLBACK\x10\x03\x42\x35\n\x0fio.vitess.protoZ\"vitess.io/vitess/go/vt/proto/queryb\x06proto3') , dependencies=[topodata__pb2.DESCRIPTOR,vtrpc__pb2.DESCRIPTOR,]) @@ -115,8 +115,8 @@ _MYSQLFLAG = _descriptor.EnumDescriptor( ], containing_type=None, options=_descriptor._ParseOptions(descriptor_pb2.EnumOptions(), _b('\020\001')), - serialized_start=8076, - serialized_end=8478, + serialized_start=8136, + serialized_end=8538, ) _sym_db.RegisterEnumDescriptor(_MYSQLFLAG) @@ -158,8 +158,8 @@ _FLAG = _descriptor.EnumDescriptor( ], containing_type=None, options=None, - serialized_start=8480, - serialized_end=8587, + serialized_start=8540, + serialized_end=8647, ) _sym_db.RegisterEnumDescriptor(_FLAG) @@ -301,8 +301,8 @@ _TYPE = _descriptor.EnumDescriptor( ], containing_type=None, options=None, - serialized_start=8590, - serialized_end=8999, + serialized_start=8650, + serialized_end=9059, ) _sym_db.RegisterEnumDescriptor(_TYPE) @@ -332,8 +332,8 @@ _TRANSACTIONSTATE = _descriptor.EnumDescriptor( ], containing_type=None, options=None, - serialized_start=9001, - serialized_end=9071, + serialized_start=9061, + serialized_end=9131, ) _sym_db.RegisterEnumDescriptor(_TRANSACTIONSTATE) @@ -485,11 +485,15 @@ _EXECUTEOPTIONS_TRANSACTIONISOLATION = _descriptor.EnumDescriptor( name='SERIALIZABLE', index=4, number=4, options=None, type=None), + _descriptor.EnumValueDescriptor( + name='CONSISTENT_SNAPSHOT_READ_ONLY', index=5, number=5, + options=None, + type=None), ], containing_type=None, options=None, - serialized_start=1060, - serialized_end=1176, + serialized_start=1061, + serialized_end=1212, ) _sym_db.RegisterEnumDescriptor(_EXECUTEOPTIONS_TRANSACTIONISOLATION) @@ -514,8 +518,8 @@ _STREAMEVENT_STATEMENT_CATEGORY = _descriptor.EnumDescriptor( ], containing_type=None, options=None, - serialized_start=1981, - serialized_end=2020, + serialized_start=2017, + serialized_end=2056, ) _sym_db.RegisterEnumDescriptor(_STREAMEVENT_STATEMENT_CATEGORY) @@ -536,8 +540,8 @@ _SPLITQUERYREQUEST_ALGORITHM = _descriptor.EnumDescriptor( ], containing_type=None, options=None, - serialized_start=6899, - serialized_end=6943, + serialized_start=6959, + serialized_end=7003, ) _sym_db.RegisterEnumDescriptor(_SPLITQUERYREQUEST_ALGORITHM) @@ -914,7 +918,7 @@ _EXECUTEOPTIONS = _descriptor.Descriptor( oneofs=[ ], serialized_start=574, - serialized_end=1182, + serialized_end=1218, ) @@ -1007,8 +1011,8 @@ _FIELD = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=1185, - serialized_end=1376, + serialized_start=1221, + serialized_end=1412, ) @@ -1045,8 +1049,8 @@ _ROW = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=1378, - serialized_end=1416, + serialized_start=1414, + serialized_end=1452, ) @@ -1083,8 +1087,8 @@ _RESULTEXTRAS = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=1418, - serialized_end=1489, + serialized_start=1454, + serialized_end=1525, ) @@ -1142,8 +1146,8 @@ _QUERYRESULT = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=1492, - serialized_end=1640, + serialized_start=1528, + serialized_end=1676, ) @@ -1180,8 +1184,8 @@ _QUERYWARNING = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=1642, - serialized_end=1687, + serialized_start=1678, + serialized_end=1723, ) @@ -1240,8 +1244,8 @@ _STREAMEVENT_STATEMENT = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=1796, - serialized_end=2020, + serialized_start=1832, + serialized_end=2056, ) _STREAMEVENT = _descriptor.Descriptor( @@ -1277,8 +1281,8 @@ _STREAMEVENT = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=1690, - serialized_end=2020, + serialized_start=1726, + serialized_end=2056, ) @@ -1343,8 +1347,8 @@ _EXECUTEREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=2023, - serialized_end=2266, + serialized_start=2059, + serialized_end=2302, ) @@ -1374,8 +1378,8 @@ _EXECUTERESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=2268, - serialized_end=2321, + serialized_start=2304, + serialized_end=2357, ) @@ -1412,8 +1416,8 @@ _RESULTWITHERROR = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=2323, - serialized_end=2408, + serialized_start=2359, + serialized_end=2444, ) @@ -1485,8 +1489,8 @@ _EXECUTEBATCHREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=2411, - serialized_end=2685, + serialized_start=2447, + serialized_end=2721, ) @@ -1516,8 +1520,8 @@ _EXECUTEBATCHRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=2687, - serialized_end=2746, + serialized_start=2723, + serialized_end=2782, ) @@ -1563,6 +1567,13 @@ _STREAMEXECUTEREQUEST = _descriptor.Descriptor( message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='transaction_id', full_name='query.StreamExecuteRequest.transaction_id', index=5, + number=6, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -1575,8 +1586,8 @@ _STREAMEXECUTEREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=2749, - serialized_end=2974, + serialized_start=2785, + serialized_end=3034, ) @@ -1606,8 +1617,8 @@ _STREAMEXECUTERESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=2976, - serialized_end=3035, + serialized_start=3036, + serialized_end=3095, ) @@ -1658,8 +1669,8 @@ _BEGINREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3038, - serialized_end=3221, + serialized_start=3098, + serialized_end=3281, ) @@ -1689,8 +1700,8 @@ _BEGINRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3223, - serialized_end=3262, + serialized_start=3283, + serialized_end=3322, ) @@ -1741,8 +1752,8 @@ _COMMITREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3265, - serialized_end=3433, + serialized_start=3325, + serialized_end=3493, ) @@ -1765,8 +1776,8 @@ _COMMITRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3435, - serialized_end=3451, + serialized_start=3495, + serialized_end=3511, ) @@ -1817,8 +1828,8 @@ _ROLLBACKREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3454, - serialized_end=3624, + serialized_start=3514, + serialized_end=3684, ) @@ -1841,8 +1852,8 @@ _ROLLBACKRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3626, - serialized_end=3644, + serialized_start=3686, + serialized_end=3704, ) @@ -1900,8 +1911,8 @@ _PREPAREREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3647, - serialized_end=3830, + serialized_start=3707, + serialized_end=3890, ) @@ -1924,8 +1935,8 @@ _PREPARERESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3832, - serialized_end=3849, + serialized_start=3892, + serialized_end=3909, ) @@ -1976,8 +1987,8 @@ _COMMITPREPAREDREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3852, - serialized_end=4018, + serialized_start=3912, + serialized_end=4078, ) @@ -2000,8 +2011,8 @@ _COMMITPREPAREDRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4020, - serialized_end=4044, + serialized_start=4080, + serialized_end=4104, ) @@ -2059,8 +2070,8 @@ _ROLLBACKPREPAREDREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4047, - serialized_end=4239, + serialized_start=4107, + serialized_end=4299, ) @@ -2083,8 +2094,8 @@ _ROLLBACKPREPAREDRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4241, - serialized_end=4267, + serialized_start=4301, + serialized_end=4327, ) @@ -2142,8 +2153,8 @@ _CREATETRANSACTIONREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4270, - serialized_end=4476, + serialized_start=4330, + serialized_end=4536, ) @@ -2166,8 +2177,8 @@ _CREATETRANSACTIONRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4478, - serialized_end=4505, + serialized_start=4538, + serialized_end=4565, ) @@ -2225,8 +2236,8 @@ _STARTCOMMITREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4508, - serialized_end=4695, + serialized_start=4568, + serialized_end=4755, ) @@ -2249,8 +2260,8 @@ _STARTCOMMITRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4697, - serialized_end=4718, + serialized_start=4757, + serialized_end=4778, ) @@ -2308,8 +2319,8 @@ _SETROLLBACKREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4721, - serialized_end=4908, + serialized_start=4781, + serialized_end=4968, ) @@ -2332,8 +2343,8 @@ _SETROLLBACKRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4910, - serialized_end=4931, + serialized_start=4970, + serialized_end=4991, ) @@ -2384,8 +2395,8 @@ _CONCLUDETRANSACTIONREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4934, - serialized_end=5105, + serialized_start=4994, + serialized_end=5165, ) @@ -2408,8 +2419,8 @@ _CONCLUDETRANSACTIONRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=5107, - serialized_end=5136, + serialized_start=5167, + serialized_end=5196, ) @@ -2460,8 +2471,8 @@ _READTRANSACTIONREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=5139, - serialized_end=5306, + serialized_start=5199, + serialized_end=5366, ) @@ -2491,8 +2502,8 @@ _READTRANSACTIONRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=5308, - serialized_end=5379, + serialized_start=5368, + serialized_end=5439, ) @@ -2550,8 +2561,8 @@ _BEGINEXECUTEREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=5382, - serialized_end=5606, + serialized_start=5442, + serialized_end=5666, ) @@ -2595,8 +2606,8 @@ _BEGINEXECUTERESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=5608, - serialized_end=5722, + serialized_start=5668, + serialized_end=5782, ) @@ -2661,8 +2672,8 @@ _BEGINEXECUTEBATCHREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=5725, - serialized_end=5980, + serialized_start=5785, + serialized_end=6040, ) @@ -2706,8 +2717,8 @@ _BEGINEXECUTEBATCHRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=5982, - serialized_end=6102, + serialized_start=6042, + serialized_end=6162, ) @@ -2758,8 +2769,8 @@ _MESSAGESTREAMREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=6105, - serialized_end=6270, + serialized_start=6165, + serialized_end=6330, ) @@ -2789,8 +2800,8 @@ _MESSAGESTREAMRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=6272, - serialized_end=6331, + serialized_start=6332, + serialized_end=6391, ) @@ -2848,8 +2859,8 @@ _MESSAGEACKREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=6334, - serialized_end=6523, + serialized_start=6394, + serialized_end=6583, ) @@ -2879,8 +2890,8 @@ _MESSAGEACKRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=6525, - serialized_end=6581, + serialized_start=6585, + serialized_end=6641, ) @@ -2960,8 +2971,8 @@ _SPLITQUERYREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=6584, - serialized_end=6943, + serialized_start=6644, + serialized_end=7003, ) @@ -2998,8 +3009,8 @@ _QUERYSPLIT = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=6945, - serialized_end=7010, + serialized_start=7005, + serialized_end=7070, ) @@ -3029,8 +3040,8 @@ _SPLITQUERYRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=7012, - serialized_end=7068, + serialized_start=7072, + serialized_end=7128, ) @@ -3053,8 +3064,8 @@ _STREAMHEALTHREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=7070, - serialized_end=7091, + serialized_start=7130, + serialized_end=7151, ) @@ -3119,8 +3130,8 @@ _REALTIMESTATS = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=7094, - serialized_end=7276, + serialized_start=7154, + serialized_end=7336, ) @@ -3171,8 +3182,8 @@ _AGGREGATESTATS = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=7279, - serialized_end=7427, + serialized_start=7339, + serialized_end=7487, ) @@ -3237,8 +3248,8 @@ _STREAMHEALTHRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=7430, - serialized_end=7687, + serialized_start=7490, + serialized_end=7747, ) @@ -3296,8 +3307,8 @@ _UPDATESTREAMREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=7690, - serialized_end=7877, + serialized_start=7750, + serialized_end=7937, ) @@ -3327,8 +3338,8 @@ _UPDATESTREAMRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=7879, - serialized_end=7936, + serialized_start=7939, + serialized_end=7996, ) @@ -3379,8 +3390,8 @@ _TRANSACTIONMETADATA = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=7939, - serialized_end=8073, + serialized_start=7999, + serialized_end=8133, ) _TARGET.fields_by_name['tablet_type'].enum_type = topodata__pb2._TABLETTYPE diff --git a/py/vtproto/queryservice_pb2.py b/py/vtproto/queryservice_pb2.py index 467e8b4dd4..3c443df129 100644 --- a/py/vtproto/queryservice_pb2.py +++ b/py/vtproto/queryservice_pb2.py @@ -7,39 +7,39 @@ from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() import query_pb2 as query__pb2 +import binlogdata_pb2 as binlogdata__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='queryservice.proto', package='queryservice', syntax='proto3', - serialized_pb=_b('\n\x12queryservice.proto\x12\x0cqueryservice\x1a\x0bquery.proto2\xa7\x0c\n\x05Query\x12:\n\x07\x45xecute\x12\x15.query.ExecuteRequest\x1a\x16.query.ExecuteResponse\"\x00\x12I\n\x0c\x45xecuteBatch\x12\x1a.query.ExecuteBatchRequest\x1a\x1b.query.ExecuteBatchResponse\"\x00\x12N\n\rStreamExecute\x12\x1b.query.StreamExecuteRequest\x1a\x1c.query.StreamExecuteResponse\"\x00\x30\x01\x12\x34\n\x05\x42\x65gin\x12\x13.query.BeginRequest\x1a\x14.query.BeginResponse\"\x00\x12\x37\n\x06\x43ommit\x12\x14.query.CommitRequest\x1a\x15.query.CommitResponse\"\x00\x12=\n\x08Rollback\x12\x16.query.RollbackRequest\x1a\x17.query.RollbackResponse\"\x00\x12:\n\x07Prepare\x12\x15.query.PrepareRequest\x1a\x16.query.PrepareResponse\"\x00\x12O\n\x0e\x43ommitPrepared\x12\x1c.query.CommitPreparedRequest\x1a\x1d.query.CommitPreparedResponse\"\x00\x12U\n\x10RollbackPrepared\x12\x1e.query.RollbackPreparedRequest\x1a\x1f.query.RollbackPreparedResponse\"\x00\x12X\n\x11\x43reateTransaction\x12\x1f.query.CreateTransactionRequest\x1a .query.CreateTransactionResponse\"\x00\x12\x46\n\x0bStartCommit\x12\x19.query.StartCommitRequest\x1a\x1a.query.StartCommitResponse\"\x00\x12\x46\n\x0bSetRollback\x12\x19.query.SetRollbackRequest\x1a\x1a.query.SetRollbackResponse\"\x00\x12^\n\x13\x43oncludeTransaction\x12!.query.ConcludeTransactionRequest\x1a\".query.ConcludeTransactionResponse\"\x00\x12R\n\x0fReadTransaction\x12\x1d.query.ReadTransactionRequest\x1a\x1e.query.ReadTransactionResponse\"\x00\x12I\n\x0c\x42\x65ginExecute\x12\x1a.query.BeginExecuteRequest\x1a\x1b.query.BeginExecuteResponse\"\x00\x12X\n\x11\x42\x65ginExecuteBatch\x12\x1f.query.BeginExecuteBatchRequest\x1a .query.BeginExecuteBatchResponse\"\x00\x12N\n\rMessageStream\x12\x1b.query.MessageStreamRequest\x1a\x1c.query.MessageStreamResponse\"\x00\x30\x01\x12\x43\n\nMessageAck\x12\x18.query.MessageAckRequest\x1a\x19.query.MessageAckResponse\"\x00\x12\x43\n\nSplitQuery\x12\x18.query.SplitQueryRequest\x1a\x19.query.SplitQueryResponse\"\x00\x12K\n\x0cStreamHealth\x12\x1a.query.StreamHealthRequest\x1a\x1b.query.StreamHealthResponse\"\x00\x30\x01\x12K\n\x0cUpdateStream\x12\x1a.query.UpdateStreamRequest\x1a\x1b.query.UpdateStreamResponse\"\x00\x30\x01\x42+Z)vitess.io/vitess/go/vt/proto/queryserviceb\x06proto3') + serialized_options=_b('Z)vitess.io/vitess/go/vt/proto/queryservice'), + serialized_pb=_b('\n\x12queryservice.proto\x12\x0cqueryservice\x1a\x0bquery.proto\x1a\x10\x62inlogdata.proto2\xef\x0c\n\x05Query\x12:\n\x07\x45xecute\x12\x15.query.ExecuteRequest\x1a\x16.query.ExecuteResponse\"\x00\x12I\n\x0c\x45xecuteBatch\x12\x1a.query.ExecuteBatchRequest\x1a\x1b.query.ExecuteBatchResponse\"\x00\x12N\n\rStreamExecute\x12\x1b.query.StreamExecuteRequest\x1a\x1c.query.StreamExecuteResponse\"\x00\x30\x01\x12\x34\n\x05\x42\x65gin\x12\x13.query.BeginRequest\x1a\x14.query.BeginResponse\"\x00\x12\x37\n\x06\x43ommit\x12\x14.query.CommitRequest\x1a\x15.query.CommitResponse\"\x00\x12=\n\x08Rollback\x12\x16.query.RollbackRequest\x1a\x17.query.RollbackResponse\"\x00\x12:\n\x07Prepare\x12\x15.query.PrepareRequest\x1a\x16.query.PrepareResponse\"\x00\x12O\n\x0e\x43ommitPrepared\x12\x1c.query.CommitPreparedRequest\x1a\x1d.query.CommitPreparedResponse\"\x00\x12U\n\x10RollbackPrepared\x12\x1e.query.RollbackPreparedRequest\x1a\x1f.query.RollbackPreparedResponse\"\x00\x12X\n\x11\x43reateTransaction\x12\x1f.query.CreateTransactionRequest\x1a .query.CreateTransactionResponse\"\x00\x12\x46\n\x0bStartCommit\x12\x19.query.StartCommitRequest\x1a\x1a.query.StartCommitResponse\"\x00\x12\x46\n\x0bSetRollback\x12\x19.query.SetRollbackRequest\x1a\x1a.query.SetRollbackResponse\"\x00\x12^\n\x13\x43oncludeTransaction\x12!.query.ConcludeTransactionRequest\x1a\".query.ConcludeTransactionResponse\"\x00\x12R\n\x0fReadTransaction\x12\x1d.query.ReadTransactionRequest\x1a\x1e.query.ReadTransactionResponse\"\x00\x12I\n\x0c\x42\x65ginExecute\x12\x1a.query.BeginExecuteRequest\x1a\x1b.query.BeginExecuteResponse\"\x00\x12X\n\x11\x42\x65ginExecuteBatch\x12\x1f.query.BeginExecuteBatchRequest\x1a .query.BeginExecuteBatchResponse\"\x00\x12N\n\rMessageStream\x12\x1b.query.MessageStreamRequest\x1a\x1c.query.MessageStreamResponse\"\x00\x30\x01\x12\x43\n\nMessageAck\x12\x18.query.MessageAckRequest\x1a\x19.query.MessageAckResponse\"\x00\x12\x43\n\nSplitQuery\x12\x18.query.SplitQueryRequest\x1a\x19.query.SplitQueryResponse\"\x00\x12K\n\x0cStreamHealth\x12\x1a.query.StreamHealthRequest\x1a\x1b.query.StreamHealthResponse\"\x00\x30\x01\x12K\n\x0cUpdateStream\x12\x1a.query.UpdateStreamRequest\x1a\x1b.query.UpdateStreamResponse\"\x00\x30\x01\x12\x46\n\x07VStream\x12\x1a.binlogdata.VStreamRequest\x1a\x1b.binlogdata.VStreamResponse\"\x00\x30\x01\x42+Z)vitess.io/vitess/go/vt/proto/queryserviceb\x06proto3') , - dependencies=[query__pb2.DESCRIPTOR,]) + dependencies=[query__pb2.DESCRIPTOR,binlogdata__pb2.DESCRIPTOR,]) _sym_db.RegisterFileDescriptor(DESCRIPTOR) -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z)vitess.io/vitess/go/vt/proto/queryservice')) +DESCRIPTOR._options = None _QUERY = _descriptor.ServiceDescriptor( name='Query', full_name='queryservice.Query', file=DESCRIPTOR, index=0, - options=None, - serialized_start=50, - serialized_end=1625, + serialized_options=None, + serialized_start=68, + serialized_end=1715, methods=[ _descriptor.MethodDescriptor( name='Execute', @@ -48,7 +48,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._EXECUTEREQUEST, output_type=query__pb2._EXECUTERESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='ExecuteBatch', @@ -57,7 +57,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._EXECUTEBATCHREQUEST, output_type=query__pb2._EXECUTEBATCHRESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='StreamExecute', @@ -66,7 +66,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._STREAMEXECUTEREQUEST, output_type=query__pb2._STREAMEXECUTERESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='Begin', @@ -75,7 +75,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._BEGINREQUEST, output_type=query__pb2._BEGINRESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='Commit', @@ -84,7 +84,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._COMMITREQUEST, output_type=query__pb2._COMMITRESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='Rollback', @@ -93,7 +93,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._ROLLBACKREQUEST, output_type=query__pb2._ROLLBACKRESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='Prepare', @@ -102,7 +102,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._PREPAREREQUEST, output_type=query__pb2._PREPARERESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='CommitPrepared', @@ -111,7 +111,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._COMMITPREPAREDREQUEST, output_type=query__pb2._COMMITPREPAREDRESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='RollbackPrepared', @@ -120,7 +120,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._ROLLBACKPREPAREDREQUEST, output_type=query__pb2._ROLLBACKPREPAREDRESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='CreateTransaction', @@ -129,7 +129,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._CREATETRANSACTIONREQUEST, output_type=query__pb2._CREATETRANSACTIONRESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='StartCommit', @@ -138,7 +138,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._STARTCOMMITREQUEST, output_type=query__pb2._STARTCOMMITRESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='SetRollback', @@ -147,7 +147,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._SETROLLBACKREQUEST, output_type=query__pb2._SETROLLBACKRESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='ConcludeTransaction', @@ -156,7 +156,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._CONCLUDETRANSACTIONREQUEST, output_type=query__pb2._CONCLUDETRANSACTIONRESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='ReadTransaction', @@ -165,7 +165,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._READTRANSACTIONREQUEST, output_type=query__pb2._READTRANSACTIONRESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='BeginExecute', @@ -174,7 +174,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._BEGINEXECUTEREQUEST, output_type=query__pb2._BEGINEXECUTERESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='BeginExecuteBatch', @@ -183,7 +183,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._BEGINEXECUTEBATCHREQUEST, output_type=query__pb2._BEGINEXECUTEBATCHRESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='MessageStream', @@ -192,7 +192,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._MESSAGESTREAMREQUEST, output_type=query__pb2._MESSAGESTREAMRESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='MessageAck', @@ -201,7 +201,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._MESSAGEACKREQUEST, output_type=query__pb2._MESSAGEACKRESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='SplitQuery', @@ -210,7 +210,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._SPLITQUERYREQUEST, output_type=query__pb2._SPLITQUERYRESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='StreamHealth', @@ -219,7 +219,7 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._STREAMHEALTHREQUEST, output_type=query__pb2._STREAMHEALTHRESPONSE, - options=None, + serialized_options=None, ), _descriptor.MethodDescriptor( name='UpdateStream', @@ -228,7 +228,16 @@ _QUERY = _descriptor.ServiceDescriptor( containing_service=None, input_type=query__pb2._UPDATESTREAMREQUEST, output_type=query__pb2._UPDATESTREAMRESPONSE, - options=None, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='VStream', + full_name='queryservice.Query.VStream', + index=21, + containing_service=None, + input_type=binlogdata__pb2._VSTREAMREQUEST, + output_type=binlogdata__pb2._VSTREAMRESPONSE, + serialized_options=None, ), ]) _sym_db.RegisterServiceDescriptor(_QUERY) diff --git a/py/vtproto/queryservice_pb2_grpc.py b/py/vtproto/queryservice_pb2_grpc.py index 3fc203d463..61adf9b42b 100644 --- a/py/vtproto/queryservice_pb2_grpc.py +++ b/py/vtproto/queryservice_pb2_grpc.py @@ -1,6 +1,7 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc +import binlogdata_pb2 as binlogdata__pb2 import query_pb2 as query__pb2 @@ -119,6 +120,11 @@ class QueryStub(object): request_serializer=query__pb2.UpdateStreamRequest.SerializeToString, response_deserializer=query__pb2.UpdateStreamResponse.FromString, ) + self.VStream = channel.unary_stream( + '/queryservice.Query/VStream', + request_serializer=binlogdata__pb2.VStreamRequest.SerializeToString, + response_deserializer=binlogdata__pb2.VStreamResponse.FromString, + ) class QueryServicer(object): @@ -279,6 +285,13 @@ class QueryServicer(object): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def VStream(self, request, context): + """VStream streams vreplication events. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def add_QueryServicer_to_server(servicer, server): rpc_method_handlers = { @@ -387,6 +400,11 @@ def add_QueryServicer_to_server(servicer, server): request_deserializer=query__pb2.UpdateStreamRequest.FromString, response_serializer=query__pb2.UpdateStreamResponse.SerializeToString, ), + 'VStream': grpc.unary_stream_rpc_method_handler( + servicer.VStream, + request_deserializer=binlogdata__pb2.VStreamRequest.FromString, + response_serializer=binlogdata__pb2.VStreamResponse.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( 'queryservice.Query', rpc_method_handlers) diff --git a/py/vtproto/tabletmanagerdata_pb2.py b/py/vtproto/tabletmanagerdata_pb2.py index a728e5b07f..62f4a97b3b 100644 --- a/py/vtproto/tabletmanagerdata_pb2.py +++ b/py/vtproto/tabletmanagerdata_pb2.py @@ -23,7 +23,7 @@ DESCRIPTOR = _descriptor.FileDescriptor( name='tabletmanagerdata.proto', package='tabletmanagerdata', syntax='proto3', - serialized_pb=_b('\n\x17tabletmanagerdata.proto\x12\x11tabletmanagerdata\x1a\x0bquery.proto\x1a\x0etopodata.proto\x1a\x15replicationdata.proto\x1a\rlogutil.proto\"\x93\x01\n\x0fTableDefinition\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06schema\x18\x02 \x01(\t\x12\x0f\n\x07\x63olumns\x18\x03 \x03(\t\x12\x1b\n\x13primary_key_columns\x18\x04 \x03(\t\x12\x0c\n\x04type\x18\x05 \x01(\t\x12\x13\n\x0b\x64\x61ta_length\x18\x06 \x01(\x04\x12\x11\n\trow_count\x18\x07 \x01(\x04\"{\n\x10SchemaDefinition\x12\x17\n\x0f\x64\x61tabase_schema\x18\x01 \x01(\t\x12=\n\x11table_definitions\x18\x02 \x03(\x0b\x32\".tabletmanagerdata.TableDefinition\x12\x0f\n\x07version\x18\x03 \x01(\t\"\x8b\x01\n\x12SchemaChangeResult\x12:\n\rbefore_schema\x18\x01 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\x12\x39\n\x0c\x61\x66ter_schema\x18\x02 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\"\xc1\x01\n\x0eUserPermission\x12\x0c\n\x04host\x18\x01 \x01(\t\x12\x0c\n\x04user\x18\x02 \x01(\t\x12\x19\n\x11password_checksum\x18\x03 \x01(\x04\x12\x45\n\nprivileges\x18\x04 \x03(\x0b\x32\x31.tabletmanagerdata.UserPermission.PrivilegesEntry\x1a\x31\n\x0fPrivilegesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xae\x01\n\x0c\x44\x62Permission\x12\x0c\n\x04host\x18\x01 \x01(\t\x12\n\n\x02\x64\x62\x18\x02 \x01(\t\x12\x0c\n\x04user\x18\x03 \x01(\t\x12\x43\n\nprivileges\x18\x04 \x03(\x0b\x32/.tabletmanagerdata.DbPermission.PrivilegesEntry\x1a\x31\n\x0fPrivilegesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x83\x01\n\x0bPermissions\x12;\n\x10user_permissions\x18\x01 \x03(\x0b\x32!.tabletmanagerdata.UserPermission\x12\x37\n\x0e\x64\x62_permissions\x18\x02 \x03(\x0b\x32\x1f.tabletmanagerdata.DbPermission\"\x1e\n\x0bPingRequest\x12\x0f\n\x07payload\x18\x01 \x01(\t\"\x1f\n\x0cPingResponse\x12\x0f\n\x07payload\x18\x01 \x01(\t\" \n\x0cSleepRequest\x12\x10\n\x08\x64uration\x18\x01 \x01(\x03\"\x0f\n\rSleepResponse\"\xaf\x01\n\x12\x45xecuteHookRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nparameters\x18\x02 \x03(\t\x12\x46\n\textra_env\x18\x03 \x03(\x0b\x32\x33.tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry\x1a/\n\rExtraEnvEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"J\n\x13\x45xecuteHookResponse\x12\x13\n\x0b\x65xit_status\x18\x01 \x01(\x03\x12\x0e\n\x06stdout\x18\x02 \x01(\t\x12\x0e\n\x06stderr\x18\x03 \x01(\t\"Q\n\x10GetSchemaRequest\x12\x0e\n\x06tables\x18\x01 \x03(\t\x12\x15\n\rinclude_views\x18\x02 \x01(\x08\x12\x16\n\x0e\x65xclude_tables\x18\x03 \x03(\t\"S\n\x11GetSchemaResponse\x12>\n\x11schema_definition\x18\x01 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\"\x17\n\x15GetPermissionsRequest\"M\n\x16GetPermissionsResponse\x12\x33\n\x0bpermissions\x18\x01 \x01(\x0b\x32\x1e.tabletmanagerdata.Permissions\"\x14\n\x12SetReadOnlyRequest\"\x15\n\x13SetReadOnlyResponse\"\x15\n\x13SetReadWriteRequest\"\x16\n\x14SetReadWriteResponse\">\n\x11\x43hangeTypeRequest\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\"\x14\n\x12\x43hangeTypeResponse\"\x15\n\x13RefreshStateRequest\"\x16\n\x14RefreshStateResponse\"\x17\n\x15RunHealthCheckRequest\"\x18\n\x16RunHealthCheckResponse\"+\n\x18IgnoreHealthErrorRequest\x12\x0f\n\x07pattern\x18\x01 \x01(\t\"\x1b\n\x19IgnoreHealthErrorResponse\",\n\x13ReloadSchemaRequest\x12\x15\n\rwait_position\x18\x01 \x01(\t\"\x16\n\x14ReloadSchemaResponse\")\n\x16PreflightSchemaRequest\x12\x0f\n\x07\x63hanges\x18\x01 \x03(\t\"X\n\x17PreflightSchemaResponse\x12=\n\x0e\x63hange_results\x18\x01 \x03(\x0b\x32%.tabletmanagerdata.SchemaChangeResult\"\xc2\x01\n\x12\x41pplySchemaRequest\x12\x0b\n\x03sql\x18\x01 \x01(\t\x12\r\n\x05\x66orce\x18\x02 \x01(\x08\x12\x19\n\x11\x61llow_replication\x18\x03 \x01(\x08\x12:\n\rbefore_schema\x18\x04 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\x12\x39\n\x0c\x61\x66ter_schema\x18\x05 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\"\x8c\x01\n\x13\x41pplySchemaResponse\x12:\n\rbefore_schema\x18\x01 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\x12\x39\n\x0c\x61\x66ter_schema\x18\x02 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\"|\n\x18\x45xecuteFetchAsDbaRequest\x12\r\n\x05query\x18\x01 \x01(\x0c\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x10\n\x08max_rows\x18\x03 \x01(\x04\x12\x17\n\x0f\x64isable_binlogs\x18\x04 \x01(\x08\x12\x15\n\rreload_schema\x18\x05 \x01(\x08\"?\n\x19\x45xecuteFetchAsDbaResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\"h\n\x1d\x45xecuteFetchAsAllPrivsRequest\x12\r\n\x05query\x18\x01 \x01(\x0c\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x10\n\x08max_rows\x18\x03 \x01(\x04\x12\x15\n\rreload_schema\x18\x04 \x01(\x08\"D\n\x1e\x45xecuteFetchAsAllPrivsResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\";\n\x18\x45xecuteFetchAsAppRequest\x12\r\n\x05query\x18\x01 \x01(\x0c\x12\x10\n\x08max_rows\x18\x02 \x01(\x04\"?\n\x19\x45xecuteFetchAsAppResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\"\x14\n\x12SlaveStatusRequest\">\n\x13SlaveStatusResponse\x12\'\n\x06status\x18\x01 \x01(\x0b\x32\x17.replicationdata.Status\"\x17\n\x15MasterPositionRequest\"*\n\x16MasterPositionResponse\x12\x10\n\x08position\x18\x01 \x01(\t\"\x12\n\x10StopSlaveRequest\"\x13\n\x11StopSlaveResponse\"A\n\x17StopSlaveMinimumRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\x14\n\x0cwait_timeout\x18\x02 \x01(\x03\",\n\x18StopSlaveMinimumResponse\x12\x10\n\x08position\x18\x01 \x01(\t\"\x13\n\x11StartSlaveRequest\"\x14\n\x12StartSlaveResponse\"8\n!TabletExternallyReparentedRequest\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\"$\n\"TabletExternallyReparentedResponse\" \n\x1eTabletExternallyElectedRequest\"!\n\x1fTabletExternallyElectedResponse\"\x12\n\x10GetSlavesRequest\"\"\n\x11GetSlavesResponse\x12\r\n\x05\x61\x64\x64rs\x18\x01 \x03(\t\"\x19\n\x17ResetReplicationRequest\"\x1a\n\x18ResetReplicationResponse\"(\n\x17VReplicationExecRequest\x12\r\n\x05query\x18\x01 \x01(\t\">\n\x18VReplicationExecResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\"=\n\x1dVReplicationWaitForPosRequest\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x10\n\x08position\x18\x02 \x01(\t\" \n\x1eVReplicationWaitForPosResponse\"\x13\n\x11InitMasterRequest\"&\n\x12InitMasterResponse\x12\x10\n\x08position\x18\x01 \x01(\t\"\x99\x01\n\x1ePopulateReparentJournalRequest\x12\x17\n\x0ftime_created_ns\x18\x01 \x01(\x03\x12\x13\n\x0b\x61\x63tion_name\x18\x02 \x01(\t\x12+\n\x0cmaster_alias\x18\x03 \x01(\x0b\x32\x15.topodata.TabletAlias\x12\x1c\n\x14replication_position\x18\x04 \x01(\t\"!\n\x1fPopulateReparentJournalResponse\"p\n\x10InitSlaveRequest\x12%\n\x06parent\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12\x1c\n\x14replication_position\x18\x02 \x01(\t\x12\x17\n\x0ftime_created_ns\x18\x03 \x01(\x03\"\x13\n\x11InitSlaveResponse\"\x15\n\x13\x44\x65moteMasterRequest\"(\n\x14\x44\x65moteMasterResponse\x12\x10\n\x08position\x18\x01 \x01(\t\"3\n\x1fPromoteSlaveWhenCaughtUpRequest\x12\x10\n\x08position\x18\x01 \x01(\t\"4\n PromoteSlaveWhenCaughtUpResponse\x12\x10\n\x08position\x18\x01 \x01(\t\"\x19\n\x17SlaveWasPromotedRequest\"\x1a\n\x18SlaveWasPromotedResponse\"m\n\x10SetMasterRequest\x12%\n\x06parent\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12\x17\n\x0ftime_created_ns\x18\x02 \x01(\x03\x12\x19\n\x11\x66orce_start_slave\x18\x03 \x01(\x08\"\x13\n\x11SetMasterResponse\"A\n\x18SlaveWasRestartedRequest\x12%\n\x06parent\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\"\x1b\n\x19SlaveWasRestartedResponse\"$\n\"StopReplicationAndGetStatusRequest\"N\n#StopReplicationAndGetStatusResponse\x12\'\n\x06status\x18\x01 \x01(\x0b\x32\x17.replicationdata.Status\"\x15\n\x13PromoteSlaveRequest\"(\n\x14PromoteSlaveResponse\x12\x10\n\x08position\x18\x01 \x01(\t\"$\n\rBackupRequest\x12\x13\n\x0b\x63oncurrency\x18\x01 \x01(\x03\"/\n\x0e\x42\x61\x63kupResponse\x12\x1d\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x0e.logutil.Event\"\x1a\n\x18RestoreFromBackupRequest\":\n\x19RestoreFromBackupResponse\x12\x1d\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x0e.logutil.EventB0Z.vitess.io/vitess/go/vt/proto/tabletmanagerdatab\x06proto3') + serialized_pb=_b('\n\x17tabletmanagerdata.proto\x12\x11tabletmanagerdata\x1a\x0bquery.proto\x1a\x0etopodata.proto\x1a\x15replicationdata.proto\x1a\rlogutil.proto\"\x93\x01\n\x0fTableDefinition\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06schema\x18\x02 \x01(\t\x12\x0f\n\x07\x63olumns\x18\x03 \x03(\t\x12\x1b\n\x13primary_key_columns\x18\x04 \x03(\t\x12\x0c\n\x04type\x18\x05 \x01(\t\x12\x13\n\x0b\x64\x61ta_length\x18\x06 \x01(\x04\x12\x11\n\trow_count\x18\x07 \x01(\x04\"{\n\x10SchemaDefinition\x12\x17\n\x0f\x64\x61tabase_schema\x18\x01 \x01(\t\x12=\n\x11table_definitions\x18\x02 \x03(\x0b\x32\".tabletmanagerdata.TableDefinition\x12\x0f\n\x07version\x18\x03 \x01(\t\"\x8b\x01\n\x12SchemaChangeResult\x12:\n\rbefore_schema\x18\x01 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\x12\x39\n\x0c\x61\x66ter_schema\x18\x02 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\"\xc1\x01\n\x0eUserPermission\x12\x0c\n\x04host\x18\x01 \x01(\t\x12\x0c\n\x04user\x18\x02 \x01(\t\x12\x19\n\x11password_checksum\x18\x03 \x01(\x04\x12\x45\n\nprivileges\x18\x04 \x03(\x0b\x32\x31.tabletmanagerdata.UserPermission.PrivilegesEntry\x1a\x31\n\x0fPrivilegesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xae\x01\n\x0c\x44\x62Permission\x12\x0c\n\x04host\x18\x01 \x01(\t\x12\n\n\x02\x64\x62\x18\x02 \x01(\t\x12\x0c\n\x04user\x18\x03 \x01(\t\x12\x43\n\nprivileges\x18\x04 \x03(\x0b\x32/.tabletmanagerdata.DbPermission.PrivilegesEntry\x1a\x31\n\x0fPrivilegesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x83\x01\n\x0bPermissions\x12;\n\x10user_permissions\x18\x01 \x03(\x0b\x32!.tabletmanagerdata.UserPermission\x12\x37\n\x0e\x64\x62_permissions\x18\x02 \x03(\x0b\x32\x1f.tabletmanagerdata.DbPermission\"\x1e\n\x0bPingRequest\x12\x0f\n\x07payload\x18\x01 \x01(\t\"\x1f\n\x0cPingResponse\x12\x0f\n\x07payload\x18\x01 \x01(\t\" \n\x0cSleepRequest\x12\x10\n\x08\x64uration\x18\x01 \x01(\x03\"\x0f\n\rSleepResponse\"\xaf\x01\n\x12\x45xecuteHookRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nparameters\x18\x02 \x03(\t\x12\x46\n\textra_env\x18\x03 \x03(\x0b\x32\x33.tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry\x1a/\n\rExtraEnvEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"J\n\x13\x45xecuteHookResponse\x12\x13\n\x0b\x65xit_status\x18\x01 \x01(\x03\x12\x0e\n\x06stdout\x18\x02 \x01(\t\x12\x0e\n\x06stderr\x18\x03 \x01(\t\"Q\n\x10GetSchemaRequest\x12\x0e\n\x06tables\x18\x01 \x03(\t\x12\x15\n\rinclude_views\x18\x02 \x01(\x08\x12\x16\n\x0e\x65xclude_tables\x18\x03 \x03(\t\"S\n\x11GetSchemaResponse\x12>\n\x11schema_definition\x18\x01 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\"\x17\n\x15GetPermissionsRequest\"M\n\x16GetPermissionsResponse\x12\x33\n\x0bpermissions\x18\x01 \x01(\x0b\x32\x1e.tabletmanagerdata.Permissions\"\x14\n\x12SetReadOnlyRequest\"\x15\n\x13SetReadOnlyResponse\"\x15\n\x13SetReadWriteRequest\"\x16\n\x14SetReadWriteResponse\">\n\x11\x43hangeTypeRequest\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\"\x14\n\x12\x43hangeTypeResponse\"\x15\n\x13RefreshStateRequest\"\x16\n\x14RefreshStateResponse\"\x17\n\x15RunHealthCheckRequest\"\x18\n\x16RunHealthCheckResponse\"+\n\x18IgnoreHealthErrorRequest\x12\x0f\n\x07pattern\x18\x01 \x01(\t\"\x1b\n\x19IgnoreHealthErrorResponse\",\n\x13ReloadSchemaRequest\x12\x15\n\rwait_position\x18\x01 \x01(\t\"\x16\n\x14ReloadSchemaResponse\")\n\x16PreflightSchemaRequest\x12\x0f\n\x07\x63hanges\x18\x01 \x03(\t\"X\n\x17PreflightSchemaResponse\x12=\n\x0e\x63hange_results\x18\x01 \x03(\x0b\x32%.tabletmanagerdata.SchemaChangeResult\"\xc2\x01\n\x12\x41pplySchemaRequest\x12\x0b\n\x03sql\x18\x01 \x01(\t\x12\r\n\x05\x66orce\x18\x02 \x01(\x08\x12\x19\n\x11\x61llow_replication\x18\x03 \x01(\x08\x12:\n\rbefore_schema\x18\x04 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\x12\x39\n\x0c\x61\x66ter_schema\x18\x05 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\"\x8c\x01\n\x13\x41pplySchemaResponse\x12:\n\rbefore_schema\x18\x01 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\x12\x39\n\x0c\x61\x66ter_schema\x18\x02 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\"\x13\n\x11LockTablesRequest\"\x14\n\x12LockTablesResponse\"\x15\n\x13UnlockTablesRequest\"\x16\n\x14UnlockTablesResponse\"|\n\x18\x45xecuteFetchAsDbaRequest\x12\r\n\x05query\x18\x01 \x01(\x0c\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x10\n\x08max_rows\x18\x03 \x01(\x04\x12\x17\n\x0f\x64isable_binlogs\x18\x04 \x01(\x08\x12\x15\n\rreload_schema\x18\x05 \x01(\x08\"?\n\x19\x45xecuteFetchAsDbaResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\"h\n\x1d\x45xecuteFetchAsAllPrivsRequest\x12\r\n\x05query\x18\x01 \x01(\x0c\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x10\n\x08max_rows\x18\x03 \x01(\x04\x12\x15\n\rreload_schema\x18\x04 \x01(\x08\"D\n\x1e\x45xecuteFetchAsAllPrivsResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\";\n\x18\x45xecuteFetchAsAppRequest\x12\r\n\x05query\x18\x01 \x01(\x0c\x12\x10\n\x08max_rows\x18\x02 \x01(\x04\"?\n\x19\x45xecuteFetchAsAppResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\"\x14\n\x12SlaveStatusRequest\">\n\x13SlaveStatusResponse\x12\'\n\x06status\x18\x01 \x01(\x0b\x32\x17.replicationdata.Status\"\x17\n\x15MasterPositionRequest\"*\n\x16MasterPositionResponse\x12\x10\n\x08position\x18\x01 \x01(\t\"\x12\n\x10StopSlaveRequest\"\x13\n\x11StopSlaveResponse\"A\n\x17StopSlaveMinimumRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\x14\n\x0cwait_timeout\x18\x02 \x01(\x03\",\n\x18StopSlaveMinimumResponse\x12\x10\n\x08position\x18\x01 \x01(\t\"\x13\n\x11StartSlaveRequest\"\x14\n\x12StartSlaveResponse\"E\n\x1bStartSlaveUntilAfterRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\x14\n\x0cwait_timeout\x18\x02 \x01(\x03\"\x1e\n\x1cStartSlaveUntilAfterResponse\"8\n!TabletExternallyReparentedRequest\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\"$\n\"TabletExternallyReparentedResponse\" \n\x1eTabletExternallyElectedRequest\"!\n\x1fTabletExternallyElectedResponse\"\x12\n\x10GetSlavesRequest\"\"\n\x11GetSlavesResponse\x12\r\n\x05\x61\x64\x64rs\x18\x01 \x03(\t\"\x19\n\x17ResetReplicationRequest\"\x1a\n\x18ResetReplicationResponse\"(\n\x17VReplicationExecRequest\x12\r\n\x05query\x18\x01 \x01(\t\">\n\x18VReplicationExecResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\"=\n\x1dVReplicationWaitForPosRequest\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x10\n\x08position\x18\x02 \x01(\t\" \n\x1eVReplicationWaitForPosResponse\"\x13\n\x11InitMasterRequest\"&\n\x12InitMasterResponse\x12\x10\n\x08position\x18\x01 \x01(\t\"\x99\x01\n\x1ePopulateReparentJournalRequest\x12\x17\n\x0ftime_created_ns\x18\x01 \x01(\x03\x12\x13\n\x0b\x61\x63tion_name\x18\x02 \x01(\t\x12+\n\x0cmaster_alias\x18\x03 \x01(\x0b\x32\x15.topodata.TabletAlias\x12\x1c\n\x14replication_position\x18\x04 \x01(\t\"!\n\x1fPopulateReparentJournalResponse\"p\n\x10InitSlaveRequest\x12%\n\x06parent\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12\x1c\n\x14replication_position\x18\x02 \x01(\t\x12\x17\n\x0ftime_created_ns\x18\x03 \x01(\x03\"\x13\n\x11InitSlaveResponse\"\x15\n\x13\x44\x65moteMasterRequest\"(\n\x14\x44\x65moteMasterResponse\x12\x10\n\x08position\x18\x01 \x01(\t\"3\n\x1fPromoteSlaveWhenCaughtUpRequest\x12\x10\n\x08position\x18\x01 \x01(\t\"4\n PromoteSlaveWhenCaughtUpResponse\x12\x10\n\x08position\x18\x01 \x01(\t\"\x19\n\x17SlaveWasPromotedRequest\"\x1a\n\x18SlaveWasPromotedResponse\"m\n\x10SetMasterRequest\x12%\n\x06parent\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12\x17\n\x0ftime_created_ns\x18\x02 \x01(\x03\x12\x19\n\x11\x66orce_start_slave\x18\x03 \x01(\x08\"\x13\n\x11SetMasterResponse\"A\n\x18SlaveWasRestartedRequest\x12%\n\x06parent\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\"\x1b\n\x19SlaveWasRestartedResponse\"$\n\"StopReplicationAndGetStatusRequest\"N\n#StopReplicationAndGetStatusResponse\x12\'\n\x06status\x18\x01 \x01(\x0b\x32\x17.replicationdata.Status\"\x15\n\x13PromoteSlaveRequest\"(\n\x14PromoteSlaveResponse\x12\x10\n\x08position\x18\x01 \x01(\t\"$\n\rBackupRequest\x12\x13\n\x0b\x63oncurrency\x18\x01 \x01(\x03\"/\n\x0e\x42\x61\x63kupResponse\x12\x1d\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x0e.logutil.Event\"\x1a\n\x18RestoreFromBackupRequest\":\n\x19RestoreFromBackupResponse\x12\x1d\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x0e.logutil.EventB0Z.vitess.io/vitess/go/vt/proto/tabletmanagerdatab\x06proto3') , dependencies=[query__pb2.DESCRIPTOR,topodata__pb2.DESCRIPTOR,replicationdata__pb2.DESCRIPTOR,logutil__pb2.DESCRIPTOR,]) @@ -1293,6 +1293,102 @@ _APPLYSCHEMARESPONSE = _descriptor.Descriptor( ) +_LOCKTABLESREQUEST = _descriptor.Descriptor( + name='LockTablesRequest', + full_name='tabletmanagerdata.LockTablesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2572, + serialized_end=2591, +) + + +_LOCKTABLESRESPONSE = _descriptor.Descriptor( + name='LockTablesResponse', + full_name='tabletmanagerdata.LockTablesResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2593, + serialized_end=2613, +) + + +_UNLOCKTABLESREQUEST = _descriptor.Descriptor( + name='UnlockTablesRequest', + full_name='tabletmanagerdata.UnlockTablesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2615, + serialized_end=2636, +) + + +_UNLOCKTABLESRESPONSE = _descriptor.Descriptor( + name='UnlockTablesResponse', + full_name='tabletmanagerdata.UnlockTablesResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2638, + serialized_end=2660, +) + + _EXECUTEFETCHASDBAREQUEST = _descriptor.Descriptor( name='ExecuteFetchAsDbaRequest', full_name='tabletmanagerdata.ExecuteFetchAsDbaRequest', @@ -1347,8 +1443,8 @@ _EXECUTEFETCHASDBAREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=2572, - serialized_end=2696, + serialized_start=2662, + serialized_end=2786, ) @@ -1378,8 +1474,8 @@ _EXECUTEFETCHASDBARESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=2698, - serialized_end=2761, + serialized_start=2788, + serialized_end=2851, ) @@ -1430,8 +1526,8 @@ _EXECUTEFETCHASALLPRIVSREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=2763, - serialized_end=2867, + serialized_start=2853, + serialized_end=2957, ) @@ -1461,8 +1557,8 @@ _EXECUTEFETCHASALLPRIVSRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=2869, - serialized_end=2937, + serialized_start=2959, + serialized_end=3027, ) @@ -1499,8 +1595,8 @@ _EXECUTEFETCHASAPPREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=2939, - serialized_end=2998, + serialized_start=3029, + serialized_end=3088, ) @@ -1530,8 +1626,8 @@ _EXECUTEFETCHASAPPRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3000, - serialized_end=3063, + serialized_start=3090, + serialized_end=3153, ) @@ -1554,8 +1650,8 @@ _SLAVESTATUSREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3065, - serialized_end=3085, + serialized_start=3155, + serialized_end=3175, ) @@ -1585,8 +1681,8 @@ _SLAVESTATUSRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3087, - serialized_end=3149, + serialized_start=3177, + serialized_end=3239, ) @@ -1609,8 +1705,8 @@ _MASTERPOSITIONREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3151, - serialized_end=3174, + serialized_start=3241, + serialized_end=3264, ) @@ -1640,8 +1736,8 @@ _MASTERPOSITIONRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3176, - serialized_end=3218, + serialized_start=3266, + serialized_end=3308, ) @@ -1664,8 +1760,8 @@ _STOPSLAVEREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3220, - serialized_end=3238, + serialized_start=3310, + serialized_end=3328, ) @@ -1688,8 +1784,8 @@ _STOPSLAVERESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3240, - serialized_end=3259, + serialized_start=3330, + serialized_end=3349, ) @@ -1726,8 +1822,8 @@ _STOPSLAVEMINIMUMREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3261, - serialized_end=3326, + serialized_start=3351, + serialized_end=3416, ) @@ -1757,8 +1853,8 @@ _STOPSLAVEMINIMUMRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3328, - serialized_end=3372, + serialized_start=3418, + serialized_end=3462, ) @@ -1781,8 +1877,8 @@ _STARTSLAVEREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3374, - serialized_end=3393, + serialized_start=3464, + serialized_end=3483, ) @@ -1805,8 +1901,70 @@ _STARTSLAVERESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3395, - serialized_end=3415, + serialized_start=3485, + serialized_end=3505, +) + + +_STARTSLAVEUNTILAFTERREQUEST = _descriptor.Descriptor( + name='StartSlaveUntilAfterRequest', + full_name='tabletmanagerdata.StartSlaveUntilAfterRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='position', full_name='tabletmanagerdata.StartSlaveUntilAfterRequest.position', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='wait_timeout', full_name='tabletmanagerdata.StartSlaveUntilAfterRequest.wait_timeout', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3507, + serialized_end=3576, +) + + +_STARTSLAVEUNTILAFTERRESPONSE = _descriptor.Descriptor( + name='StartSlaveUntilAfterResponse', + full_name='tabletmanagerdata.StartSlaveUntilAfterResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3578, + serialized_end=3608, ) @@ -1836,8 +1994,8 @@ _TABLETEXTERNALLYREPARENTEDREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3417, - serialized_end=3473, + serialized_start=3610, + serialized_end=3666, ) @@ -1860,8 +2018,8 @@ _TABLETEXTERNALLYREPARENTEDRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3475, - serialized_end=3511, + serialized_start=3668, + serialized_end=3704, ) @@ -1884,8 +2042,8 @@ _TABLETEXTERNALLYELECTEDREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3513, - serialized_end=3545, + serialized_start=3706, + serialized_end=3738, ) @@ -1908,8 +2066,8 @@ _TABLETEXTERNALLYELECTEDRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3547, - serialized_end=3580, + serialized_start=3740, + serialized_end=3773, ) @@ -1932,8 +2090,8 @@ _GETSLAVESREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3582, - serialized_end=3600, + serialized_start=3775, + serialized_end=3793, ) @@ -1963,8 +2121,8 @@ _GETSLAVESRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3602, - serialized_end=3636, + serialized_start=3795, + serialized_end=3829, ) @@ -1987,8 +2145,8 @@ _RESETREPLICATIONREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3638, - serialized_end=3663, + serialized_start=3831, + serialized_end=3856, ) @@ -2011,8 +2169,8 @@ _RESETREPLICATIONRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3665, - serialized_end=3691, + serialized_start=3858, + serialized_end=3884, ) @@ -2042,8 +2200,8 @@ _VREPLICATIONEXECREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3693, - serialized_end=3733, + serialized_start=3886, + serialized_end=3926, ) @@ -2073,8 +2231,8 @@ _VREPLICATIONEXECRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3735, - serialized_end=3797, + serialized_start=3928, + serialized_end=3990, ) @@ -2111,8 +2269,8 @@ _VREPLICATIONWAITFORPOSREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3799, - serialized_end=3860, + serialized_start=3992, + serialized_end=4053, ) @@ -2135,8 +2293,8 @@ _VREPLICATIONWAITFORPOSRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3862, - serialized_end=3894, + serialized_start=4055, + serialized_end=4087, ) @@ -2159,8 +2317,8 @@ _INITMASTERREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3896, - serialized_end=3915, + serialized_start=4089, + serialized_end=4108, ) @@ -2190,8 +2348,8 @@ _INITMASTERRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3917, - serialized_end=3955, + serialized_start=4110, + serialized_end=4148, ) @@ -2242,8 +2400,8 @@ _POPULATEREPARENTJOURNALREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=3958, - serialized_end=4111, + serialized_start=4151, + serialized_end=4304, ) @@ -2266,8 +2424,8 @@ _POPULATEREPARENTJOURNALRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4113, - serialized_end=4146, + serialized_start=4306, + serialized_end=4339, ) @@ -2311,8 +2469,8 @@ _INITSLAVEREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4148, - serialized_end=4260, + serialized_start=4341, + serialized_end=4453, ) @@ -2335,8 +2493,8 @@ _INITSLAVERESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4262, - serialized_end=4281, + serialized_start=4455, + serialized_end=4474, ) @@ -2359,8 +2517,8 @@ _DEMOTEMASTERREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4283, - serialized_end=4304, + serialized_start=4476, + serialized_end=4497, ) @@ -2390,8 +2548,8 @@ _DEMOTEMASTERRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4306, - serialized_end=4346, + serialized_start=4499, + serialized_end=4539, ) @@ -2421,8 +2579,8 @@ _PROMOTESLAVEWHENCAUGHTUPREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4348, - serialized_end=4399, + serialized_start=4541, + serialized_end=4592, ) @@ -2452,8 +2610,8 @@ _PROMOTESLAVEWHENCAUGHTUPRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4401, - serialized_end=4453, + serialized_start=4594, + serialized_end=4646, ) @@ -2476,8 +2634,8 @@ _SLAVEWASPROMOTEDREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4455, - serialized_end=4480, + serialized_start=4648, + serialized_end=4673, ) @@ -2500,8 +2658,8 @@ _SLAVEWASPROMOTEDRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4482, - serialized_end=4508, + serialized_start=4675, + serialized_end=4701, ) @@ -2545,8 +2703,8 @@ _SETMASTERREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4510, - serialized_end=4619, + serialized_start=4703, + serialized_end=4812, ) @@ -2569,8 +2727,8 @@ _SETMASTERRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4621, - serialized_end=4640, + serialized_start=4814, + serialized_end=4833, ) @@ -2600,8 +2758,8 @@ _SLAVEWASRESTARTEDREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4642, - serialized_end=4707, + serialized_start=4835, + serialized_end=4900, ) @@ -2624,8 +2782,8 @@ _SLAVEWASRESTARTEDRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4709, - serialized_end=4736, + serialized_start=4902, + serialized_end=4929, ) @@ -2648,8 +2806,8 @@ _STOPREPLICATIONANDGETSTATUSREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4738, - serialized_end=4774, + serialized_start=4931, + serialized_end=4967, ) @@ -2679,8 +2837,8 @@ _STOPREPLICATIONANDGETSTATUSRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4776, - serialized_end=4854, + serialized_start=4969, + serialized_end=5047, ) @@ -2703,8 +2861,8 @@ _PROMOTESLAVEREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4856, - serialized_end=4877, + serialized_start=5049, + serialized_end=5070, ) @@ -2734,8 +2892,8 @@ _PROMOTESLAVERESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4879, - serialized_end=4919, + serialized_start=5072, + serialized_end=5112, ) @@ -2765,8 +2923,8 @@ _BACKUPREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4921, - serialized_end=4957, + serialized_start=5114, + serialized_end=5150, ) @@ -2796,8 +2954,8 @@ _BACKUPRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=4959, - serialized_end=5006, + serialized_start=5152, + serialized_end=5199, ) @@ -2820,8 +2978,8 @@ _RESTOREFROMBACKUPREQUEST = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=5008, - serialized_end=5034, + serialized_start=5201, + serialized_end=5227, ) @@ -2851,8 +3009,8 @@ _RESTOREFROMBACKUPRESPONSE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=5036, - serialized_end=5094, + serialized_start=5229, + serialized_end=5287, ) _SCHEMADEFINITION.fields_by_name['table_definitions'].message_type = _TABLEDEFINITION @@ -2920,6 +3078,10 @@ DESCRIPTOR.message_types_by_name['PreflightSchemaRequest'] = _PREFLIGHTSCHEMAREQ DESCRIPTOR.message_types_by_name['PreflightSchemaResponse'] = _PREFLIGHTSCHEMARESPONSE DESCRIPTOR.message_types_by_name['ApplySchemaRequest'] = _APPLYSCHEMAREQUEST DESCRIPTOR.message_types_by_name['ApplySchemaResponse'] = _APPLYSCHEMARESPONSE +DESCRIPTOR.message_types_by_name['LockTablesRequest'] = _LOCKTABLESREQUEST +DESCRIPTOR.message_types_by_name['LockTablesResponse'] = _LOCKTABLESRESPONSE +DESCRIPTOR.message_types_by_name['UnlockTablesRequest'] = _UNLOCKTABLESREQUEST +DESCRIPTOR.message_types_by_name['UnlockTablesResponse'] = _UNLOCKTABLESRESPONSE DESCRIPTOR.message_types_by_name['ExecuteFetchAsDbaRequest'] = _EXECUTEFETCHASDBAREQUEST DESCRIPTOR.message_types_by_name['ExecuteFetchAsDbaResponse'] = _EXECUTEFETCHASDBARESPONSE DESCRIPTOR.message_types_by_name['ExecuteFetchAsAllPrivsRequest'] = _EXECUTEFETCHASALLPRIVSREQUEST @@ -2936,6 +3098,8 @@ DESCRIPTOR.message_types_by_name['StopSlaveMinimumRequest'] = _STOPSLAVEMINIMUMR DESCRIPTOR.message_types_by_name['StopSlaveMinimumResponse'] = _STOPSLAVEMINIMUMRESPONSE DESCRIPTOR.message_types_by_name['StartSlaveRequest'] = _STARTSLAVEREQUEST DESCRIPTOR.message_types_by_name['StartSlaveResponse'] = _STARTSLAVERESPONSE +DESCRIPTOR.message_types_by_name['StartSlaveUntilAfterRequest'] = _STARTSLAVEUNTILAFTERREQUEST +DESCRIPTOR.message_types_by_name['StartSlaveUntilAfterResponse'] = _STARTSLAVEUNTILAFTERRESPONSE DESCRIPTOR.message_types_by_name['TabletExternallyReparentedRequest'] = _TABLETEXTERNALLYREPARENTEDREQUEST DESCRIPTOR.message_types_by_name['TabletExternallyReparentedResponse'] = _TABLETEXTERNALLYREPARENTEDRESPONSE DESCRIPTOR.message_types_by_name['TabletExternallyElectedRequest'] = _TABLETEXTERNALLYELECTEDREQUEST @@ -3236,6 +3400,34 @@ ApplySchemaResponse = _reflection.GeneratedProtocolMessageType('ApplySchemaRespo )) _sym_db.RegisterMessage(ApplySchemaResponse) +LockTablesRequest = _reflection.GeneratedProtocolMessageType('LockTablesRequest', (_message.Message,), dict( + DESCRIPTOR = _LOCKTABLESREQUEST, + __module__ = 'tabletmanagerdata_pb2' + # @@protoc_insertion_point(class_scope:tabletmanagerdata.LockTablesRequest) + )) +_sym_db.RegisterMessage(LockTablesRequest) + +LockTablesResponse = _reflection.GeneratedProtocolMessageType('LockTablesResponse', (_message.Message,), dict( + DESCRIPTOR = _LOCKTABLESRESPONSE, + __module__ = 'tabletmanagerdata_pb2' + # @@protoc_insertion_point(class_scope:tabletmanagerdata.LockTablesResponse) + )) +_sym_db.RegisterMessage(LockTablesResponse) + +UnlockTablesRequest = _reflection.GeneratedProtocolMessageType('UnlockTablesRequest', (_message.Message,), dict( + DESCRIPTOR = _UNLOCKTABLESREQUEST, + __module__ = 'tabletmanagerdata_pb2' + # @@protoc_insertion_point(class_scope:tabletmanagerdata.UnlockTablesRequest) + )) +_sym_db.RegisterMessage(UnlockTablesRequest) + +UnlockTablesResponse = _reflection.GeneratedProtocolMessageType('UnlockTablesResponse', (_message.Message,), dict( + DESCRIPTOR = _UNLOCKTABLESRESPONSE, + __module__ = 'tabletmanagerdata_pb2' + # @@protoc_insertion_point(class_scope:tabletmanagerdata.UnlockTablesResponse) + )) +_sym_db.RegisterMessage(UnlockTablesResponse) + ExecuteFetchAsDbaRequest = _reflection.GeneratedProtocolMessageType('ExecuteFetchAsDbaRequest', (_message.Message,), dict( DESCRIPTOR = _EXECUTEFETCHASDBAREQUEST, __module__ = 'tabletmanagerdata_pb2' @@ -3348,6 +3540,20 @@ StartSlaveResponse = _reflection.GeneratedProtocolMessageType('StartSlaveRespons )) _sym_db.RegisterMessage(StartSlaveResponse) +StartSlaveUntilAfterRequest = _reflection.GeneratedProtocolMessageType('StartSlaveUntilAfterRequest', (_message.Message,), dict( + DESCRIPTOR = _STARTSLAVEUNTILAFTERREQUEST, + __module__ = 'tabletmanagerdata_pb2' + # @@protoc_insertion_point(class_scope:tabletmanagerdata.StartSlaveUntilAfterRequest) + )) +_sym_db.RegisterMessage(StartSlaveUntilAfterRequest) + +StartSlaveUntilAfterResponse = _reflection.GeneratedProtocolMessageType('StartSlaveUntilAfterResponse', (_message.Message,), dict( + DESCRIPTOR = _STARTSLAVEUNTILAFTERRESPONSE, + __module__ = 'tabletmanagerdata_pb2' + # @@protoc_insertion_point(class_scope:tabletmanagerdata.StartSlaveUntilAfterResponse) + )) +_sym_db.RegisterMessage(StartSlaveUntilAfterResponse) + TabletExternallyReparentedRequest = _reflection.GeneratedProtocolMessageType('TabletExternallyReparentedRequest', (_message.Message,), dict( DESCRIPTOR = _TABLETEXTERNALLYREPARENTEDREQUEST, __module__ = 'tabletmanagerdata_pb2' diff --git a/py/vtproto/tabletmanagerservice_pb2.py b/py/vtproto/tabletmanagerservice_pb2.py index 515efe7858..33d62d43bd 100644 --- a/py/vtproto/tabletmanagerservice_pb2.py +++ b/py/vtproto/tabletmanagerservice_pb2.py @@ -20,7 +20,7 @@ DESCRIPTOR = _descriptor.FileDescriptor( name='tabletmanagerservice.proto', package='tabletmanagerservice', syntax='proto3', - serialized_pb=_b('\n\x1atabletmanagerservice.proto\x12\x14tabletmanagerservice\x1a\x17tabletmanagerdata.proto2\x95!\n\rTabletManager\x12I\n\x04Ping\x12\x1e.tabletmanagerdata.PingRequest\x1a\x1f.tabletmanagerdata.PingResponse\"\x00\x12L\n\x05Sleep\x12\x1f.tabletmanagerdata.SleepRequest\x1a .tabletmanagerdata.SleepResponse\"\x00\x12^\n\x0b\x45xecuteHook\x12%.tabletmanagerdata.ExecuteHookRequest\x1a&.tabletmanagerdata.ExecuteHookResponse\"\x00\x12X\n\tGetSchema\x12#.tabletmanagerdata.GetSchemaRequest\x1a$.tabletmanagerdata.GetSchemaResponse\"\x00\x12g\n\x0eGetPermissions\x12(.tabletmanagerdata.GetPermissionsRequest\x1a).tabletmanagerdata.GetPermissionsResponse\"\x00\x12^\n\x0bSetReadOnly\x12%.tabletmanagerdata.SetReadOnlyRequest\x1a&.tabletmanagerdata.SetReadOnlyResponse\"\x00\x12\x61\n\x0cSetReadWrite\x12&.tabletmanagerdata.SetReadWriteRequest\x1a\'.tabletmanagerdata.SetReadWriteResponse\"\x00\x12[\n\nChangeType\x12$.tabletmanagerdata.ChangeTypeRequest\x1a%.tabletmanagerdata.ChangeTypeResponse\"\x00\x12\x61\n\x0cRefreshState\x12&.tabletmanagerdata.RefreshStateRequest\x1a\'.tabletmanagerdata.RefreshStateResponse\"\x00\x12g\n\x0eRunHealthCheck\x12(.tabletmanagerdata.RunHealthCheckRequest\x1a).tabletmanagerdata.RunHealthCheckResponse\"\x00\x12p\n\x11IgnoreHealthError\x12+.tabletmanagerdata.IgnoreHealthErrorRequest\x1a,.tabletmanagerdata.IgnoreHealthErrorResponse\"\x00\x12\x61\n\x0cReloadSchema\x12&.tabletmanagerdata.ReloadSchemaRequest\x1a\'.tabletmanagerdata.ReloadSchemaResponse\"\x00\x12j\n\x0fPreflightSchema\x12).tabletmanagerdata.PreflightSchemaRequest\x1a*.tabletmanagerdata.PreflightSchemaResponse\"\x00\x12^\n\x0b\x41pplySchema\x12%.tabletmanagerdata.ApplySchemaRequest\x1a&.tabletmanagerdata.ApplySchemaResponse\"\x00\x12p\n\x11\x45xecuteFetchAsDba\x12+.tabletmanagerdata.ExecuteFetchAsDbaRequest\x1a,.tabletmanagerdata.ExecuteFetchAsDbaResponse\"\x00\x12\x7f\n\x16\x45xecuteFetchAsAllPrivs\x12\x30.tabletmanagerdata.ExecuteFetchAsAllPrivsRequest\x1a\x31.tabletmanagerdata.ExecuteFetchAsAllPrivsResponse\"\x00\x12p\n\x11\x45xecuteFetchAsApp\x12+.tabletmanagerdata.ExecuteFetchAsAppRequest\x1a,.tabletmanagerdata.ExecuteFetchAsAppResponse\"\x00\x12^\n\x0bSlaveStatus\x12%.tabletmanagerdata.SlaveStatusRequest\x1a&.tabletmanagerdata.SlaveStatusResponse\"\x00\x12g\n\x0eMasterPosition\x12(.tabletmanagerdata.MasterPositionRequest\x1a).tabletmanagerdata.MasterPositionResponse\"\x00\x12X\n\tStopSlave\x12#.tabletmanagerdata.StopSlaveRequest\x1a$.tabletmanagerdata.StopSlaveResponse\"\x00\x12m\n\x10StopSlaveMinimum\x12*.tabletmanagerdata.StopSlaveMinimumRequest\x1a+.tabletmanagerdata.StopSlaveMinimumResponse\"\x00\x12[\n\nStartSlave\x12$.tabletmanagerdata.StartSlaveRequest\x1a%.tabletmanagerdata.StartSlaveResponse\"\x00\x12\x8b\x01\n\x1aTabletExternallyReparented\x12\x34.tabletmanagerdata.TabletExternallyReparentedRequest\x1a\x35.tabletmanagerdata.TabletExternallyReparentedResponse\"\x00\x12\x82\x01\n\x17TabletExternallyElected\x12\x31.tabletmanagerdata.TabletExternallyElectedRequest\x1a\x32.tabletmanagerdata.TabletExternallyElectedResponse\"\x00\x12X\n\tGetSlaves\x12#.tabletmanagerdata.GetSlavesRequest\x1a$.tabletmanagerdata.GetSlavesResponse\"\x00\x12m\n\x10VReplicationExec\x12*.tabletmanagerdata.VReplicationExecRequest\x1a+.tabletmanagerdata.VReplicationExecResponse\"\x00\x12\x7f\n\x16VReplicationWaitForPos\x12\x30.tabletmanagerdata.VReplicationWaitForPosRequest\x1a\x31.tabletmanagerdata.VReplicationWaitForPosResponse\"\x00\x12m\n\x10ResetReplication\x12*.tabletmanagerdata.ResetReplicationRequest\x1a+.tabletmanagerdata.ResetReplicationResponse\"\x00\x12[\n\nInitMaster\x12$.tabletmanagerdata.InitMasterRequest\x1a%.tabletmanagerdata.InitMasterResponse\"\x00\x12\x82\x01\n\x17PopulateReparentJournal\x12\x31.tabletmanagerdata.PopulateReparentJournalRequest\x1a\x32.tabletmanagerdata.PopulateReparentJournalResponse\"\x00\x12X\n\tInitSlave\x12#.tabletmanagerdata.InitSlaveRequest\x1a$.tabletmanagerdata.InitSlaveResponse\"\x00\x12\x61\n\x0c\x44\x65moteMaster\x12&.tabletmanagerdata.DemoteMasterRequest\x1a\'.tabletmanagerdata.DemoteMasterResponse\"\x00\x12\x85\x01\n\x18PromoteSlaveWhenCaughtUp\x12\x32.tabletmanagerdata.PromoteSlaveWhenCaughtUpRequest\x1a\x33.tabletmanagerdata.PromoteSlaveWhenCaughtUpResponse\"\x00\x12m\n\x10SlaveWasPromoted\x12*.tabletmanagerdata.SlaveWasPromotedRequest\x1a+.tabletmanagerdata.SlaveWasPromotedResponse\"\x00\x12X\n\tSetMaster\x12#.tabletmanagerdata.SetMasterRequest\x1a$.tabletmanagerdata.SetMasterResponse\"\x00\x12p\n\x11SlaveWasRestarted\x12+.tabletmanagerdata.SlaveWasRestartedRequest\x1a,.tabletmanagerdata.SlaveWasRestartedResponse\"\x00\x12\x8e\x01\n\x1bStopReplicationAndGetStatus\x12\x35.tabletmanagerdata.StopReplicationAndGetStatusRequest\x1a\x36.tabletmanagerdata.StopReplicationAndGetStatusResponse\"\x00\x12\x61\n\x0cPromoteSlave\x12&.tabletmanagerdata.PromoteSlaveRequest\x1a\'.tabletmanagerdata.PromoteSlaveResponse\"\x00\x12Q\n\x06\x42\x61\x63kup\x12 .tabletmanagerdata.BackupRequest\x1a!.tabletmanagerdata.BackupResponse\"\x00\x30\x01\x12r\n\x11RestoreFromBackup\x12+.tabletmanagerdata.RestoreFromBackupRequest\x1a,.tabletmanagerdata.RestoreFromBackupResponse\"\x00\x30\x01\x42\x33Z1vitess.io/vitess/go/vt/proto/tabletmanagerserviceb\x06proto3') + serialized_pb=_b('\n\x1atabletmanagerservice.proto\x12\x14tabletmanagerservice\x1a\x17tabletmanagerdata.proto2\xd0#\n\rTabletManager\x12I\n\x04Ping\x12\x1e.tabletmanagerdata.PingRequest\x1a\x1f.tabletmanagerdata.PingResponse\"\x00\x12L\n\x05Sleep\x12\x1f.tabletmanagerdata.SleepRequest\x1a .tabletmanagerdata.SleepResponse\"\x00\x12^\n\x0b\x45xecuteHook\x12%.tabletmanagerdata.ExecuteHookRequest\x1a&.tabletmanagerdata.ExecuteHookResponse\"\x00\x12X\n\tGetSchema\x12#.tabletmanagerdata.GetSchemaRequest\x1a$.tabletmanagerdata.GetSchemaResponse\"\x00\x12g\n\x0eGetPermissions\x12(.tabletmanagerdata.GetPermissionsRequest\x1a).tabletmanagerdata.GetPermissionsResponse\"\x00\x12^\n\x0bSetReadOnly\x12%.tabletmanagerdata.SetReadOnlyRequest\x1a&.tabletmanagerdata.SetReadOnlyResponse\"\x00\x12\x61\n\x0cSetReadWrite\x12&.tabletmanagerdata.SetReadWriteRequest\x1a\'.tabletmanagerdata.SetReadWriteResponse\"\x00\x12[\n\nChangeType\x12$.tabletmanagerdata.ChangeTypeRequest\x1a%.tabletmanagerdata.ChangeTypeResponse\"\x00\x12\x61\n\x0cRefreshState\x12&.tabletmanagerdata.RefreshStateRequest\x1a\'.tabletmanagerdata.RefreshStateResponse\"\x00\x12g\n\x0eRunHealthCheck\x12(.tabletmanagerdata.RunHealthCheckRequest\x1a).tabletmanagerdata.RunHealthCheckResponse\"\x00\x12p\n\x11IgnoreHealthError\x12+.tabletmanagerdata.IgnoreHealthErrorRequest\x1a,.tabletmanagerdata.IgnoreHealthErrorResponse\"\x00\x12\x61\n\x0cReloadSchema\x12&.tabletmanagerdata.ReloadSchemaRequest\x1a\'.tabletmanagerdata.ReloadSchemaResponse\"\x00\x12j\n\x0fPreflightSchema\x12).tabletmanagerdata.PreflightSchemaRequest\x1a*.tabletmanagerdata.PreflightSchemaResponse\"\x00\x12^\n\x0b\x41pplySchema\x12%.tabletmanagerdata.ApplySchemaRequest\x1a&.tabletmanagerdata.ApplySchemaResponse\"\x00\x12[\n\nLockTables\x12$.tabletmanagerdata.LockTablesRequest\x1a%.tabletmanagerdata.LockTablesResponse\"\x00\x12\x61\n\x0cUnlockTables\x12&.tabletmanagerdata.UnlockTablesRequest\x1a\'.tabletmanagerdata.UnlockTablesResponse\"\x00\x12p\n\x11\x45xecuteFetchAsDba\x12+.tabletmanagerdata.ExecuteFetchAsDbaRequest\x1a,.tabletmanagerdata.ExecuteFetchAsDbaResponse\"\x00\x12\x7f\n\x16\x45xecuteFetchAsAllPrivs\x12\x30.tabletmanagerdata.ExecuteFetchAsAllPrivsRequest\x1a\x31.tabletmanagerdata.ExecuteFetchAsAllPrivsResponse\"\x00\x12p\n\x11\x45xecuteFetchAsApp\x12+.tabletmanagerdata.ExecuteFetchAsAppRequest\x1a,.tabletmanagerdata.ExecuteFetchAsAppResponse\"\x00\x12^\n\x0bSlaveStatus\x12%.tabletmanagerdata.SlaveStatusRequest\x1a&.tabletmanagerdata.SlaveStatusResponse\"\x00\x12g\n\x0eMasterPosition\x12(.tabletmanagerdata.MasterPositionRequest\x1a).tabletmanagerdata.MasterPositionResponse\"\x00\x12X\n\tStopSlave\x12#.tabletmanagerdata.StopSlaveRequest\x1a$.tabletmanagerdata.StopSlaveResponse\"\x00\x12m\n\x10StopSlaveMinimum\x12*.tabletmanagerdata.StopSlaveMinimumRequest\x1a+.tabletmanagerdata.StopSlaveMinimumResponse\"\x00\x12[\n\nStartSlave\x12$.tabletmanagerdata.StartSlaveRequest\x1a%.tabletmanagerdata.StartSlaveResponse\"\x00\x12y\n\x14StartSlaveUntilAfter\x12..tabletmanagerdata.StartSlaveUntilAfterRequest\x1a/.tabletmanagerdata.StartSlaveUntilAfterResponse\"\x00\x12\x8b\x01\n\x1aTabletExternallyReparented\x12\x34.tabletmanagerdata.TabletExternallyReparentedRequest\x1a\x35.tabletmanagerdata.TabletExternallyReparentedResponse\"\x00\x12\x82\x01\n\x17TabletExternallyElected\x12\x31.tabletmanagerdata.TabletExternallyElectedRequest\x1a\x32.tabletmanagerdata.TabletExternallyElectedResponse\"\x00\x12X\n\tGetSlaves\x12#.tabletmanagerdata.GetSlavesRequest\x1a$.tabletmanagerdata.GetSlavesResponse\"\x00\x12m\n\x10VReplicationExec\x12*.tabletmanagerdata.VReplicationExecRequest\x1a+.tabletmanagerdata.VReplicationExecResponse\"\x00\x12\x7f\n\x16VReplicationWaitForPos\x12\x30.tabletmanagerdata.VReplicationWaitForPosRequest\x1a\x31.tabletmanagerdata.VReplicationWaitForPosResponse\"\x00\x12m\n\x10ResetReplication\x12*.tabletmanagerdata.ResetReplicationRequest\x1a+.tabletmanagerdata.ResetReplicationResponse\"\x00\x12[\n\nInitMaster\x12$.tabletmanagerdata.InitMasterRequest\x1a%.tabletmanagerdata.InitMasterResponse\"\x00\x12\x82\x01\n\x17PopulateReparentJournal\x12\x31.tabletmanagerdata.PopulateReparentJournalRequest\x1a\x32.tabletmanagerdata.PopulateReparentJournalResponse\"\x00\x12X\n\tInitSlave\x12#.tabletmanagerdata.InitSlaveRequest\x1a$.tabletmanagerdata.InitSlaveResponse\"\x00\x12\x61\n\x0c\x44\x65moteMaster\x12&.tabletmanagerdata.DemoteMasterRequest\x1a\'.tabletmanagerdata.DemoteMasterResponse\"\x00\x12\x85\x01\n\x18PromoteSlaveWhenCaughtUp\x12\x32.tabletmanagerdata.PromoteSlaveWhenCaughtUpRequest\x1a\x33.tabletmanagerdata.PromoteSlaveWhenCaughtUpResponse\"\x00\x12m\n\x10SlaveWasPromoted\x12*.tabletmanagerdata.SlaveWasPromotedRequest\x1a+.tabletmanagerdata.SlaveWasPromotedResponse\"\x00\x12X\n\tSetMaster\x12#.tabletmanagerdata.SetMasterRequest\x1a$.tabletmanagerdata.SetMasterResponse\"\x00\x12p\n\x11SlaveWasRestarted\x12+.tabletmanagerdata.SlaveWasRestartedRequest\x1a,.tabletmanagerdata.SlaveWasRestartedResponse\"\x00\x12\x8e\x01\n\x1bStopReplicationAndGetStatus\x12\x35.tabletmanagerdata.StopReplicationAndGetStatusRequest\x1a\x36.tabletmanagerdata.StopReplicationAndGetStatusResponse\"\x00\x12\x61\n\x0cPromoteSlave\x12&.tabletmanagerdata.PromoteSlaveRequest\x1a\'.tabletmanagerdata.PromoteSlaveResponse\"\x00\x12Q\n\x06\x42\x61\x63kup\x12 .tabletmanagerdata.BackupRequest\x1a!.tabletmanagerdata.BackupResponse\"\x00\x30\x01\x12r\n\x11RestoreFromBackup\x12+.tabletmanagerdata.RestoreFromBackupRequest\x1a,.tabletmanagerdata.RestoreFromBackupResponse\"\x00\x30\x01\x42\x33Z1vitess.io/vitess/go/vt/proto/tabletmanagerserviceb\x06proto3') , dependencies=[tabletmanagerdata__pb2.DESCRIPTOR,]) @@ -39,7 +39,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( index=0, options=None, serialized_start=78, - serialized_end=4323, + serialized_end=4638, methods=[ _descriptor.MethodDescriptor( name='Ping', @@ -167,10 +167,28 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( output_type=tabletmanagerdata__pb2._APPLYSCHEMARESPONSE, options=None, ), + _descriptor.MethodDescriptor( + name='LockTables', + full_name='tabletmanagerservice.TabletManager.LockTables', + index=14, + containing_service=None, + input_type=tabletmanagerdata__pb2._LOCKTABLESREQUEST, + output_type=tabletmanagerdata__pb2._LOCKTABLESRESPONSE, + options=None, + ), + _descriptor.MethodDescriptor( + name='UnlockTables', + full_name='tabletmanagerservice.TabletManager.UnlockTables', + index=15, + containing_service=None, + input_type=tabletmanagerdata__pb2._UNLOCKTABLESREQUEST, + output_type=tabletmanagerdata__pb2._UNLOCKTABLESRESPONSE, + options=None, + ), _descriptor.MethodDescriptor( name='ExecuteFetchAsDba', full_name='tabletmanagerservice.TabletManager.ExecuteFetchAsDba', - index=14, + index=16, containing_service=None, input_type=tabletmanagerdata__pb2._EXECUTEFETCHASDBAREQUEST, output_type=tabletmanagerdata__pb2._EXECUTEFETCHASDBARESPONSE, @@ -179,7 +197,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='ExecuteFetchAsAllPrivs', full_name='tabletmanagerservice.TabletManager.ExecuteFetchAsAllPrivs', - index=15, + index=17, containing_service=None, input_type=tabletmanagerdata__pb2._EXECUTEFETCHASALLPRIVSREQUEST, output_type=tabletmanagerdata__pb2._EXECUTEFETCHASALLPRIVSRESPONSE, @@ -188,7 +206,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='ExecuteFetchAsApp', full_name='tabletmanagerservice.TabletManager.ExecuteFetchAsApp', - index=16, + index=18, containing_service=None, input_type=tabletmanagerdata__pb2._EXECUTEFETCHASAPPREQUEST, output_type=tabletmanagerdata__pb2._EXECUTEFETCHASAPPRESPONSE, @@ -197,7 +215,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='SlaveStatus', full_name='tabletmanagerservice.TabletManager.SlaveStatus', - index=17, + index=19, containing_service=None, input_type=tabletmanagerdata__pb2._SLAVESTATUSREQUEST, output_type=tabletmanagerdata__pb2._SLAVESTATUSRESPONSE, @@ -206,7 +224,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='MasterPosition', full_name='tabletmanagerservice.TabletManager.MasterPosition', - index=18, + index=20, containing_service=None, input_type=tabletmanagerdata__pb2._MASTERPOSITIONREQUEST, output_type=tabletmanagerdata__pb2._MASTERPOSITIONRESPONSE, @@ -215,7 +233,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='StopSlave', full_name='tabletmanagerservice.TabletManager.StopSlave', - index=19, + index=21, containing_service=None, input_type=tabletmanagerdata__pb2._STOPSLAVEREQUEST, output_type=tabletmanagerdata__pb2._STOPSLAVERESPONSE, @@ -224,7 +242,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='StopSlaveMinimum', full_name='tabletmanagerservice.TabletManager.StopSlaveMinimum', - index=20, + index=22, containing_service=None, input_type=tabletmanagerdata__pb2._STOPSLAVEMINIMUMREQUEST, output_type=tabletmanagerdata__pb2._STOPSLAVEMINIMUMRESPONSE, @@ -233,16 +251,25 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='StartSlave', full_name='tabletmanagerservice.TabletManager.StartSlave', - index=21, + index=23, containing_service=None, input_type=tabletmanagerdata__pb2._STARTSLAVEREQUEST, output_type=tabletmanagerdata__pb2._STARTSLAVERESPONSE, options=None, ), + _descriptor.MethodDescriptor( + name='StartSlaveUntilAfter', + full_name='tabletmanagerservice.TabletManager.StartSlaveUntilAfter', + index=24, + containing_service=None, + input_type=tabletmanagerdata__pb2._STARTSLAVEUNTILAFTERREQUEST, + output_type=tabletmanagerdata__pb2._STARTSLAVEUNTILAFTERRESPONSE, + options=None, + ), _descriptor.MethodDescriptor( name='TabletExternallyReparented', full_name='tabletmanagerservice.TabletManager.TabletExternallyReparented', - index=22, + index=25, containing_service=None, input_type=tabletmanagerdata__pb2._TABLETEXTERNALLYREPARENTEDREQUEST, output_type=tabletmanagerdata__pb2._TABLETEXTERNALLYREPARENTEDRESPONSE, @@ -251,7 +278,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='TabletExternallyElected', full_name='tabletmanagerservice.TabletManager.TabletExternallyElected', - index=23, + index=26, containing_service=None, input_type=tabletmanagerdata__pb2._TABLETEXTERNALLYELECTEDREQUEST, output_type=tabletmanagerdata__pb2._TABLETEXTERNALLYELECTEDRESPONSE, @@ -260,7 +287,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='GetSlaves', full_name='tabletmanagerservice.TabletManager.GetSlaves', - index=24, + index=27, containing_service=None, input_type=tabletmanagerdata__pb2._GETSLAVESREQUEST, output_type=tabletmanagerdata__pb2._GETSLAVESRESPONSE, @@ -269,7 +296,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='VReplicationExec', full_name='tabletmanagerservice.TabletManager.VReplicationExec', - index=25, + index=28, containing_service=None, input_type=tabletmanagerdata__pb2._VREPLICATIONEXECREQUEST, output_type=tabletmanagerdata__pb2._VREPLICATIONEXECRESPONSE, @@ -278,7 +305,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='VReplicationWaitForPos', full_name='tabletmanagerservice.TabletManager.VReplicationWaitForPos', - index=26, + index=29, containing_service=None, input_type=tabletmanagerdata__pb2._VREPLICATIONWAITFORPOSREQUEST, output_type=tabletmanagerdata__pb2._VREPLICATIONWAITFORPOSRESPONSE, @@ -287,7 +314,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='ResetReplication', full_name='tabletmanagerservice.TabletManager.ResetReplication', - index=27, + index=30, containing_service=None, input_type=tabletmanagerdata__pb2._RESETREPLICATIONREQUEST, output_type=tabletmanagerdata__pb2._RESETREPLICATIONRESPONSE, @@ -296,7 +323,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='InitMaster', full_name='tabletmanagerservice.TabletManager.InitMaster', - index=28, + index=31, containing_service=None, input_type=tabletmanagerdata__pb2._INITMASTERREQUEST, output_type=tabletmanagerdata__pb2._INITMASTERRESPONSE, @@ -305,7 +332,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='PopulateReparentJournal', full_name='tabletmanagerservice.TabletManager.PopulateReparentJournal', - index=29, + index=32, containing_service=None, input_type=tabletmanagerdata__pb2._POPULATEREPARENTJOURNALREQUEST, output_type=tabletmanagerdata__pb2._POPULATEREPARENTJOURNALRESPONSE, @@ -314,7 +341,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='InitSlave', full_name='tabletmanagerservice.TabletManager.InitSlave', - index=30, + index=33, containing_service=None, input_type=tabletmanagerdata__pb2._INITSLAVEREQUEST, output_type=tabletmanagerdata__pb2._INITSLAVERESPONSE, @@ -323,7 +350,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='DemoteMaster', full_name='tabletmanagerservice.TabletManager.DemoteMaster', - index=31, + index=34, containing_service=None, input_type=tabletmanagerdata__pb2._DEMOTEMASTERREQUEST, output_type=tabletmanagerdata__pb2._DEMOTEMASTERRESPONSE, @@ -332,7 +359,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='PromoteSlaveWhenCaughtUp', full_name='tabletmanagerservice.TabletManager.PromoteSlaveWhenCaughtUp', - index=32, + index=35, containing_service=None, input_type=tabletmanagerdata__pb2._PROMOTESLAVEWHENCAUGHTUPREQUEST, output_type=tabletmanagerdata__pb2._PROMOTESLAVEWHENCAUGHTUPRESPONSE, @@ -341,7 +368,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='SlaveWasPromoted', full_name='tabletmanagerservice.TabletManager.SlaveWasPromoted', - index=33, + index=36, containing_service=None, input_type=tabletmanagerdata__pb2._SLAVEWASPROMOTEDREQUEST, output_type=tabletmanagerdata__pb2._SLAVEWASPROMOTEDRESPONSE, @@ -350,7 +377,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='SetMaster', full_name='tabletmanagerservice.TabletManager.SetMaster', - index=34, + index=37, containing_service=None, input_type=tabletmanagerdata__pb2._SETMASTERREQUEST, output_type=tabletmanagerdata__pb2._SETMASTERRESPONSE, @@ -359,7 +386,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='SlaveWasRestarted', full_name='tabletmanagerservice.TabletManager.SlaveWasRestarted', - index=35, + index=38, containing_service=None, input_type=tabletmanagerdata__pb2._SLAVEWASRESTARTEDREQUEST, output_type=tabletmanagerdata__pb2._SLAVEWASRESTARTEDRESPONSE, @@ -368,7 +395,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='StopReplicationAndGetStatus', full_name='tabletmanagerservice.TabletManager.StopReplicationAndGetStatus', - index=36, + index=39, containing_service=None, input_type=tabletmanagerdata__pb2._STOPREPLICATIONANDGETSTATUSREQUEST, output_type=tabletmanagerdata__pb2._STOPREPLICATIONANDGETSTATUSRESPONSE, @@ -377,7 +404,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='PromoteSlave', full_name='tabletmanagerservice.TabletManager.PromoteSlave', - index=37, + index=40, containing_service=None, input_type=tabletmanagerdata__pb2._PROMOTESLAVEREQUEST, output_type=tabletmanagerdata__pb2._PROMOTESLAVERESPONSE, @@ -386,7 +413,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='Backup', full_name='tabletmanagerservice.TabletManager.Backup', - index=38, + index=41, containing_service=None, input_type=tabletmanagerdata__pb2._BACKUPREQUEST, output_type=tabletmanagerdata__pb2._BACKUPRESPONSE, @@ -395,7 +422,7 @@ _TABLETMANAGER = _descriptor.ServiceDescriptor( _descriptor.MethodDescriptor( name='RestoreFromBackup', full_name='tabletmanagerservice.TabletManager.RestoreFromBackup', - index=39, + index=42, containing_service=None, input_type=tabletmanagerdata__pb2._RESTOREFROMBACKUPREQUEST, output_type=tabletmanagerdata__pb2._RESTOREFROMBACKUPRESPONSE, diff --git a/py/vtproto/tabletmanagerservice_pb2_grpc.py b/py/vtproto/tabletmanagerservice_pb2_grpc.py index f21854be9f..e559823327 100644 --- a/py/vtproto/tabletmanagerservice_pb2_grpc.py +++ b/py/vtproto/tabletmanagerservice_pb2_grpc.py @@ -87,6 +87,16 @@ class TabletManagerStub(object): request_serializer=tabletmanagerdata__pb2.ApplySchemaRequest.SerializeToString, response_deserializer=tabletmanagerdata__pb2.ApplySchemaResponse.FromString, ) + self.LockTables = channel.unary_unary( + '/tabletmanagerservice.TabletManager/LockTables', + request_serializer=tabletmanagerdata__pb2.LockTablesRequest.SerializeToString, + response_deserializer=tabletmanagerdata__pb2.LockTablesResponse.FromString, + ) + self.UnlockTables = channel.unary_unary( + '/tabletmanagerservice.TabletManager/UnlockTables', + request_serializer=tabletmanagerdata__pb2.UnlockTablesRequest.SerializeToString, + response_deserializer=tabletmanagerdata__pb2.UnlockTablesResponse.FromString, + ) self.ExecuteFetchAsDba = channel.unary_unary( '/tabletmanagerservice.TabletManager/ExecuteFetchAsDba', request_serializer=tabletmanagerdata__pb2.ExecuteFetchAsDbaRequest.SerializeToString, @@ -127,6 +137,11 @@ class TabletManagerStub(object): request_serializer=tabletmanagerdata__pb2.StartSlaveRequest.SerializeToString, response_deserializer=tabletmanagerdata__pb2.StartSlaveResponse.FromString, ) + self.StartSlaveUntilAfter = channel.unary_unary( + '/tabletmanagerservice.TabletManager/StartSlaveUntilAfter', + request_serializer=tabletmanagerdata__pb2.StartSlaveUntilAfterRequest.SerializeToString, + response_deserializer=tabletmanagerdata__pb2.StartSlaveUntilAfterResponse.FromString, + ) self.TabletExternallyReparented = channel.unary_unary( '/tabletmanagerservice.TabletManager/TabletExternallyReparented', request_serializer=tabletmanagerdata__pb2.TabletExternallyReparentedRequest.SerializeToString, @@ -327,6 +342,20 @@ class TabletManagerServicer(object): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def LockTables(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def UnlockTables(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def ExecuteFetchAsDba(self, request, context): # missing associated documentation comment in .proto file pass @@ -388,6 +417,14 @@ class TabletManagerServicer(object): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def StartSlaveUntilAfter(self, request, context): + """StartSlave starts the mysql replication until and including + the provided position + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def TabletExternallyReparented(self, request, context): """TabletExternallyReparented tells a tablet that its underlying MySQL is currently the master. It is only used in environments (tabletmanagerdata.such as Vitess+MoB) @@ -619,6 +656,16 @@ def add_TabletManagerServicer_to_server(servicer, server): request_deserializer=tabletmanagerdata__pb2.ApplySchemaRequest.FromString, response_serializer=tabletmanagerdata__pb2.ApplySchemaResponse.SerializeToString, ), + 'LockTables': grpc.unary_unary_rpc_method_handler( + servicer.LockTables, + request_deserializer=tabletmanagerdata__pb2.LockTablesRequest.FromString, + response_serializer=tabletmanagerdata__pb2.LockTablesResponse.SerializeToString, + ), + 'UnlockTables': grpc.unary_unary_rpc_method_handler( + servicer.UnlockTables, + request_deserializer=tabletmanagerdata__pb2.UnlockTablesRequest.FromString, + response_serializer=tabletmanagerdata__pb2.UnlockTablesResponse.SerializeToString, + ), 'ExecuteFetchAsDba': grpc.unary_unary_rpc_method_handler( servicer.ExecuteFetchAsDba, request_deserializer=tabletmanagerdata__pb2.ExecuteFetchAsDbaRequest.FromString, @@ -659,6 +706,11 @@ def add_TabletManagerServicer_to_server(servicer, server): request_deserializer=tabletmanagerdata__pb2.StartSlaveRequest.FromString, response_serializer=tabletmanagerdata__pb2.StartSlaveResponse.SerializeToString, ), + 'StartSlaveUntilAfter': grpc.unary_unary_rpc_method_handler( + servicer.StartSlaveUntilAfter, + request_deserializer=tabletmanagerdata__pb2.StartSlaveUntilAfterRequest.FromString, + response_serializer=tabletmanagerdata__pb2.StartSlaveUntilAfterResponse.SerializeToString, + ), 'TabletExternallyReparented': grpc.unary_unary_rpc_method_handler( servicer.TabletExternallyReparented, request_deserializer=tabletmanagerdata__pb2.TabletExternallyReparentedRequest.FromString, diff --git a/py/vtproto/topodata_pb2.py b/py/vtproto/topodata_pb2.py index e74204fad5..b048124834 100644 --- a/py/vtproto/topodata_pb2.py +++ b/py/vtproto/topodata_pb2.py @@ -8,7 +8,6 @@ from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -20,6 +19,7 @@ DESCRIPTOR = _descriptor.FileDescriptor( name='topodata.proto', package='topodata', syntax='proto3', + serialized_options=_b('\n\017io.vitess.protoZ%vitess.io/vitess/go/vt/proto/topodata'), serialized_pb=_b('\n\x0etopodata.proto\x12\x08topodata\"&\n\x08KeyRange\x12\r\n\x05start\x18\x01 \x01(\x0c\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x0c\"(\n\x0bTabletAlias\x12\x0c\n\x04\x63\x65ll\x18\x01 \x01(\t\x12\x0b\n\x03uid\x18\x02 \x01(\r\"\xb6\x03\n\x06Tablet\x12$\n\x05\x61lias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12\x10\n\x08hostname\x18\x02 \x01(\t\x12/\n\x08port_map\x18\x04 \x03(\x0b\x32\x1d.topodata.Tablet.PortMapEntry\x12\x10\n\x08keyspace\x18\x05 \x01(\t\x12\r\n\x05shard\x18\x06 \x01(\t\x12%\n\tkey_range\x18\x07 \x01(\x0b\x32\x12.topodata.KeyRange\x12\"\n\x04type\x18\x08 \x01(\x0e\x32\x14.topodata.TabletType\x12\x18\n\x10\x64\x62_name_override\x18\t \x01(\t\x12(\n\x04tags\x18\n \x03(\x0b\x32\x1a.topodata.Tablet.TagsEntry\x12\x16\n\x0emysql_hostname\x18\x0c \x01(\t\x12\x12\n\nmysql_port\x18\r \x01(\x05\x1a.\n\x0cPortMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01J\x04\x08\x03\x10\x04J\x04\x08\x0b\x10\x0c\"\xdb\x04\n\x05Shard\x12+\n\x0cmaster_alias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x30\n\x0cserved_types\x18\x03 \x03(\x0b\x32\x1a.topodata.Shard.ServedType\x12\x32\n\rsource_shards\x18\x04 \x03(\x0b\x32\x1b.topodata.Shard.SourceShard\x12\r\n\x05\x63\x65lls\x18\x05 \x03(\t\x12\x36\n\x0ftablet_controls\x18\x06 \x03(\x0b\x32\x1d.topodata.Shard.TabletControl\x1a\x46\n\nServedType\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x1ar\n\x0bSourceShard\x12\x0b\n\x03uid\x18\x01 \x01(\r\x12\x10\n\x08keyspace\x18\x02 \x01(\t\x12\r\n\x05shard\x18\x03 \x01(\t\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\t\x1a\x94\x01\n\rTabletControl\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x12\x1d\n\x15\x64isable_query_service\x18\x03 \x01(\x08\x12\x1a\n\x12\x62lacklisted_tables\x18\x04 \x03(\t\x12\x0e\n\x06\x66rozen\x18\x05 \x01(\x08\"\xf5\x01\n\x08Keyspace\x12\x1c\n\x14sharding_column_name\x18\x01 \x01(\t\x12\x36\n\x14sharding_column_type\x18\x02 \x01(\x0e\x32\x18.topodata.KeyspaceIdType\x12\x33\n\x0cserved_froms\x18\x04 \x03(\x0b\x32\x1d.topodata.Keyspace.ServedFrom\x1aX\n\nServedFrom\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x12\x10\n\x08keyspace\x18\x03 \x01(\tJ\x04\x08\x03\x10\x04\"w\n\x10ShardReplication\x12.\n\x05nodes\x18\x01 \x03(\x0b\x32\x1f.topodata.ShardReplication.Node\x1a\x33\n\x04Node\x12+\n\x0ctablet_alias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\"E\n\x0eShardReference\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\"\x9c\x03\n\x0bSrvKeyspace\x12;\n\npartitions\x18\x01 \x03(\x0b\x32\'.topodata.SrvKeyspace.KeyspacePartition\x12\x1c\n\x14sharding_column_name\x18\x02 \x01(\t\x12\x36\n\x14sharding_column_type\x18\x03 \x01(\x0e\x32\x18.topodata.KeyspaceIdType\x12\x35\n\x0bserved_from\x18\x04 \x03(\x0b\x32 .topodata.SrvKeyspace.ServedFrom\x1ar\n\x11KeyspacePartition\x12)\n\x0bserved_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\x32\n\x10shard_references\x18\x02 \x03(\x0b\x32\x18.topodata.ShardReference\x1aI\n\nServedFrom\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\x10\n\x08keyspace\x18\x02 \x01(\tJ\x04\x08\x05\x10\x06\"@\n\x08\x43\x65llInfo\x12\x16\n\x0eserver_address\x18\x01 \x01(\t\x12\x0c\n\x04root\x18\x02 \x01(\t\x12\x0e\n\x06region\x18\x03 \x01(\t*2\n\x0eKeyspaceIdType\x12\t\n\x05UNSET\x10\x00\x12\n\n\x06UINT64\x10\x01\x12\t\n\x05\x42YTES\x10\x02*\x90\x01\n\nTabletType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06MASTER\x10\x01\x12\x0b\n\x07REPLICA\x10\x02\x12\n\n\x06RDONLY\x10\x03\x12\t\n\x05\x42\x41TCH\x10\x03\x12\t\n\x05SPARE\x10\x04\x12\x10\n\x0c\x45XPERIMENTAL\x10\x05\x12\n\n\x06\x42\x41\x43KUP\x10\x06\x12\x0b\n\x07RESTORE\x10\x07\x12\x0b\n\x07\x44RAINED\x10\x08\x1a\x02\x10\x01\x42\x38\n\x0fio.vitess.protoZ%vitess.io/vitess/go/vt/proto/topodatab\x06proto3') ) @@ -31,19 +31,19 @@ _KEYSPACEIDTYPE = _descriptor.EnumDescriptor( values=[ _descriptor.EnumValueDescriptor( name='UNSET', index=0, number=0, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='UINT64', index=1, number=1, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='BYTES', index=2, number=2, - options=None, + serialized_options=None, type=None), ], containing_type=None, - options=None, + serialized_options=None, serialized_start=2078, serialized_end=2128, ) @@ -58,47 +58,47 @@ _TABLETTYPE = _descriptor.EnumDescriptor( values=[ _descriptor.EnumValueDescriptor( name='UNKNOWN', index=0, number=0, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='MASTER', index=1, number=1, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='REPLICA', index=2, number=2, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='RDONLY', index=3, number=3, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='BATCH', index=4, number=3, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='SPARE', index=5, number=4, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='EXPERIMENTAL', index=6, number=5, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='BACKUP', index=7, number=6, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='RESTORE', index=8, number=7, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='DRAINED', index=9, number=8, - options=None, + serialized_options=None, type=None), ], containing_type=None, - options=_descriptor._ParseOptions(descriptor_pb2.EnumOptions(), _b('\020\001')), + serialized_options=_b('\020\001'), serialized_start=2131, serialized_end=2275, ) @@ -134,21 +134,21 @@ _KEYRANGE = _descriptor.Descriptor( has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='end', full_name='topodata.KeyRange.end', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], @@ -172,21 +172,21 @@ _TABLETALIAS = _descriptor.Descriptor( has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='uid', full_name='topodata.TabletAlias.uid', index=1, number=2, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], @@ -210,21 +210,21 @@ _TABLET_PORTMAPENTRY = _descriptor.Descriptor( has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='topodata.Tablet.PortMapEntry.value', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + serialized_options=_b('8\001'), is_extendable=False, syntax='proto3', extension_ranges=[], @@ -247,21 +247,21 @@ _TABLET_TAGSENTRY = _descriptor.Descriptor( has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='topodata.Tablet.TagsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + serialized_options=_b('8\001'), is_extendable=False, syntax='proto3', extension_ranges=[], @@ -284,84 +284,84 @@ _TABLET = _descriptor.Descriptor( has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hostname', full_name='topodata.Tablet.hostname', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='port_map', full_name='topodata.Tablet.port_map', index=2, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='keyspace', full_name='topodata.Tablet.keyspace', index=3, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='shard', full_name='topodata.Tablet.shard', index=4, number=6, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='key_range', full_name='topodata.Tablet.key_range', index=5, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='type', full_name='topodata.Tablet.type', index=6, number=8, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='db_name_override', full_name='topodata.Tablet.db_name_override', index=7, number=9, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tags', full_name='topodata.Tablet.tags', index=8, number=10, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='mysql_hostname', full_name='topodata.Tablet.mysql_hostname', index=9, number=12, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='mysql_port', full_name='topodata.Tablet.mysql_port', index=10, number=13, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_TABLET_PORTMAPENTRY, _TABLET_TAGSENTRY, ], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], @@ -385,21 +385,21 @@ _SHARD_SERVEDTYPE = _descriptor.Descriptor( has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cells', full_name='topodata.Shard.ServedType.cells', index=1, number=2, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], @@ -422,42 +422,42 @@ _SHARD_SOURCESHARD = _descriptor.Descriptor( has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='keyspace', full_name='topodata.Shard.SourceShard.keyspace', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='shard', full_name='topodata.Shard.SourceShard.shard', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='key_range', full_name='topodata.Shard.SourceShard.key_range', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tables', full_name='topodata.Shard.SourceShard.tables', index=4, number=5, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], @@ -480,42 +480,42 @@ _SHARD_TABLETCONTROL = _descriptor.Descriptor( has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cells', full_name='topodata.Shard.TabletControl.cells', index=1, number=2, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='disable_query_service', full_name='topodata.Shard.TabletControl.disable_query_service', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='blacklisted_tables', full_name='topodata.Shard.TabletControl.blacklisted_tables', index=3, number=4, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='frozen', full_name='topodata.Shard.TabletControl.frozen', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], @@ -538,49 +538,49 @@ _SHARD = _descriptor.Descriptor( has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='key_range', full_name='topodata.Shard.key_range', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='served_types', full_name='topodata.Shard.served_types', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='source_shards', full_name='topodata.Shard.source_shards', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cells', full_name='topodata.Shard.cells', index=4, number=5, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tablet_controls', full_name='topodata.Shard.tablet_controls', index=5, number=6, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_SHARD_SERVEDTYPE, _SHARD_SOURCESHARD, _SHARD_TABLETCONTROL, ], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], @@ -604,28 +604,28 @@ _KEYSPACE_SERVEDFROM = _descriptor.Descriptor( has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cells', full_name='topodata.Keyspace.ServedFrom.cells', index=1, number=2, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='keyspace', full_name='topodata.Keyspace.ServedFrom.keyspace', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], @@ -648,28 +648,28 @@ _KEYSPACE = _descriptor.Descriptor( has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='sharding_column_type', full_name='topodata.Keyspace.sharding_column_type', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='served_froms', full_name='topodata.Keyspace.served_froms', index=2, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_KEYSPACE_SERVEDFROM, ], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], @@ -693,14 +693,14 @@ _SHARDREPLICATION_NODE = _descriptor.Descriptor( has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], @@ -723,14 +723,14 @@ _SHARDREPLICATION = _descriptor.Descriptor( has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_SHARDREPLICATION_NODE, ], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], @@ -754,21 +754,21 @@ _SHARDREFERENCE = _descriptor.Descriptor( has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='key_range', full_name='topodata.ShardReference.key_range', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], @@ -792,21 +792,21 @@ _SRVKEYSPACE_KEYSPACEPARTITION = _descriptor.Descriptor( has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='shard_references', full_name='topodata.SrvKeyspace.KeyspacePartition.shard_references', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], @@ -829,21 +829,21 @@ _SRVKEYSPACE_SERVEDFROM = _descriptor.Descriptor( has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='keyspace', full_name='topodata.SrvKeyspace.ServedFrom.keyspace', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], @@ -866,35 +866,35 @@ _SRVKEYSPACE = _descriptor.Descriptor( has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='sharding_column_name', full_name='topodata.SrvKeyspace.sharding_column_name', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='sharding_column_type', full_name='topodata.SrvKeyspace.sharding_column_type', index=2, number=3, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='served_from', full_name='topodata.SrvKeyspace.served_from', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_SRVKEYSPACE_KEYSPACEPARTITION, _SRVKEYSPACE_SERVEDFROM, ], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], @@ -918,28 +918,28 @@ _CELLINFO = _descriptor.Descriptor( has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='root', full_name='topodata.CellInfo.root', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='region', full_name='topodata.CellInfo.region', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], @@ -1132,12 +1132,8 @@ CellInfo = _reflection.GeneratedProtocolMessageType('CellInfo', (_message.Messag _sym_db.RegisterMessage(CellInfo) -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\017io.vitess.protoZ%vitess.io/vitess/go/vt/proto/topodata')) -_TABLETTYPE.has_options = True -_TABLETTYPE._options = _descriptor._ParseOptions(descriptor_pb2.EnumOptions(), _b('\020\001')) -_TABLET_PORTMAPENTRY.has_options = True -_TABLET_PORTMAPENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) -_TABLET_TAGSENTRY.has_options = True -_TABLET_TAGSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +DESCRIPTOR._options = None +_TABLETTYPE._options = None +_TABLET_PORTMAPENTRY._options = None +_TABLET_TAGSENTRY._options = None # @@protoc_insertion_point(module_scope) diff --git a/test/TestingStrategy.md b/test/TestingStrategy.md index 234fe9bd94..bc742ffcae 100644 --- a/test/TestingStrategy.md +++ b/test/TestingStrategy.md @@ -36,7 +36,7 @@ Due to its constant nature, this is not an appropriate framework to test cluster These tests run more complicated setups, and take a lot more resources. They are meant to test end-to-end behaviors of the Vitess ecosystem, and complement the unit tests. -For instance, we test each RPC interaction independently (client to vtgate, vtgate to vttablet, vttablet to MySQL, see previous sections). But is is also good to have an end-to-end test that validates everything works together. +For instance, we test each RPC interaction independently (client to vtgate, vtgate to vttablet, vttablet to MySQL, see previous sections). But is also good to have an end-to-end test that validates everything works together. These tests almost always launch a topology service, a few mysqld instances, a few vttablets, a vtctld process, a few vtgates, ... They use the real production processes, and real replication. This setup is mandatory for properly testing re-sharding, cluster operations, ... They all however run on the same machine, so they might be limited by the environment. diff --git a/test/base_sharding.py b/test/base_sharding.py index 3fcc92c1fb..acc6d3a6f3 100644 --- a/test/base_sharding.py +++ b/test/base_sharding.py @@ -383,7 +383,7 @@ class BaseShardingTest(object): auto_log=True, trap_output=True) for name in names: # The max should be set and have a non-zero value. - # We test only the the first field 'target_replication_lag_sec'. + # We test only the first field 'target_replication_lag_sec'. self.assertIn('| %s | target_replication_lag_sec:12345 ' % (name), stdout) # protobuf omits fields with a zero value in the text output. self.assertNotIn('ignore_n_slowest_replicas', stdout) diff --git a/test/cluster/keytar/requirements.txt b/test/cluster/keytar/requirements.txt index 76649d9b72..694958b30d 100644 --- a/test/cluster/keytar/requirements.txt +++ b/test/cluster/keytar/requirements.txt @@ -1,2 +1,2 @@ Flask==0.12.3 -pyyaml==3.10 +pyyaml==4.2b1 diff --git a/test/config.json b/test/config.json index 2222f06285..d629184ec8 100644 --- a/test/config.json +++ b/test/config.json @@ -90,17 +90,6 @@ "worker_test" ] }, - "initial_sharding_l2vtgate": { - "File": "initial_sharding_l2vtgate.py", - "Args": [], - "Command": [], - "Manual": false, - "Shard": 2, - "RetryMax": 0, - "Tags": [ - "worker_test" - ] - }, "legacy_resharding": { "File": "legacy_resharding.py", "Args": [], @@ -426,17 +415,6 @@ "site_test" ] }, - "vtgatev2_l2vtgate": { - "File": "vtgatev2_l2vtgate_test.py", - "Args": [], - "Command": [], - "Manual": false, - "Shard": 1, - "RetryMax": 0, - "Tags": [ - "site_test" - ] - }, "vtgatev3": { "File": "vtgatev3_test.py", "Args": [], diff --git a/test/initial_sharding.py b/test/initial_sharding.py index e6aa872825..cb4fb3428b 100755 --- a/test/initial_sharding.py +++ b/test/initial_sharding.py @@ -35,16 +35,6 @@ import environment import tablet import utils -# use_l2vtgate is set if we want to use l2vtgate processes. -# We'll set them up to have: -# l2vtgate1: covers the initial shard, and -80 -# l2vtgate2: covers 80- -use_l2vtgate = False - -# the l2vtgate processes, if applicable -l2vtgate1 = None -l2vtgate2 = None - # initial shard, covers everything shard_master = tablet.Tablet() shard_replica = tablet.Tablet() @@ -217,8 +207,6 @@ index by_msg (msg) should_be_here=False) def test_resharding(self): - global l2vtgate1, l2vtgate2 - # create the keyspace with just one shard shard_master.init_tablet( 'replica', @@ -280,33 +268,12 @@ index by_msg (msg) # We must start vtgate after tablets are up, or else wait until 1min refresh # (that is the tablet_refresh_interval parameter for discovery gateway) # we want cache_ttl at zero so we re-read the topology for every test query. - if use_l2vtgate: - l2vtgate1 = utils.VtGate() - l2vtgate1.start(extra_args=['--enable_forwarding'], tablets= - [shard_master, shard_replica, shard_rdonly1]) - l2vtgate1.wait_for_endpoints('test_keyspace.0.master', 1) - l2vtgate1.wait_for_endpoints('test_keyspace.0.replica', 1) - l2vtgate1.wait_for_endpoints('test_keyspace.0.rdonly', 1) - _, l2vtgate1_addr = l2vtgate1.rpc_endpoint() - - # Clear utils.vtgate, so it doesn't point to the previous l2vtgate1. - utils.vtgate = None - utils.VtGate().start(cache_ttl='0', l2vtgates=[l2vtgate1_addr,], - extra_args=['-disable_local_gateway']) - utils.vtgate.wait_for_endpoints('test_keyspace.0.master', 1, - var='L2VtgateConnections') - utils.vtgate.wait_for_endpoints('test_keyspace.0.replica', 1, - var='L2VtgateConnections') - utils.vtgate.wait_for_endpoints('test_keyspace.0.rdonly', 1, - var='L2VtgateConnections') - - else: - utils.VtGate().start(cache_ttl='0', tablets=[ - shard_master, shard_replica, shard_rdonly1]) - utils.vtgate.wait_for_endpoints('test_keyspace.0.master', 1) - utils.vtgate.wait_for_endpoints('test_keyspace.0.replica', 1) - utils.vtgate.wait_for_endpoints('test_keyspace.0.rdonly', 1) + utils.VtGate().start(cache_ttl='0', tablets=[ + shard_master, shard_replica, shard_rdonly1]) + utils.vtgate.wait_for_endpoints('test_keyspace.0.master', 1) + utils.vtgate.wait_for_endpoints('test_keyspace.0.replica', 1) + utils.vtgate.wait_for_endpoints('test_keyspace.0.rdonly', 1) # check the Map Reduce API works correctly, should use ExecuteShards, # as we're not sharded yet. @@ -391,62 +358,13 @@ index by_msg (msg) # must restart vtgate after tablets are up, or else wait until 1min refresh # we want cache_ttl at zero so we re-read the topology for every test query. utils.vtgate.kill() - if use_l2vtgate: - l2vtgate1.kill() - l2vtgate1 = utils.VtGate() - l2vtgate1.start(extra_args=['--enable_forwarding', - '-tablet_filters', - 'test_keyspace|0,test_keyspace|-80'], - tablets=[shard_master, shard_replica, shard_rdonly1, - shard_0_master, shard_0_replica, - shard_0_rdonly1]) - l2vtgate1.wait_for_endpoints('test_keyspace.0.master', 1) - l2vtgate1.wait_for_endpoints('test_keyspace.0.replica', 1) - l2vtgate1.wait_for_endpoints('test_keyspace.0.rdonly', 1) - l2vtgate1.wait_for_endpoints('test_keyspace.-80.master', 1) - l2vtgate1.wait_for_endpoints('test_keyspace.-80.replica', 1) - l2vtgate1.wait_for_endpoints('test_keyspace.-80.rdonly', 1) - l2vtgate1.verify_no_endpoint('test_keyspace.80-.master') - l2vtgate1.verify_no_endpoint('test_keyspace.80-.replica') - l2vtgate1.verify_no_endpoint('test_keyspace.80-.rdonly') - - # FIXME(alainjobart) we clear tablet_types_to_wait, as this - # l2vtgate2 doesn't serve the current test_keyspace shard, which - # is test_keyspace.0. This is not ideal, we should re-work - # which keyspace/shard a l2vtgate can wait for, as the ones - # filtered by tablet_filters. - l2vtgate2 = utils.VtGate() - l2vtgate2.start(extra_args=['--enable_forwarding', - '-tablet_filters', - 'test_keyspace|80-'], tablets= - [shard_1_master, shard_1_replica, shard_1_rdonly1], - tablet_types_to_wait='') - l2vtgate2.wait_for_endpoints('test_keyspace.80-.master', 1) - l2vtgate2.wait_for_endpoints('test_keyspace.80-.replica', 1) - l2vtgate2.wait_for_endpoints('test_keyspace.80-.rdonly', 1) - l2vtgate2.verify_no_endpoint('test_keyspace.0.master') - l2vtgate2.verify_no_endpoint('test_keyspace.0.replica') - l2vtgate2.verify_no_endpoint('test_keyspace.0.rdonly') - l2vtgate2.verify_no_endpoint('test_keyspace.-80.master') - l2vtgate2.verify_no_endpoint('test_keyspace.-80.replica') - l2vtgate2.verify_no_endpoint('test_keyspace.-80.rdonly') - - _, l2vtgate1_addr = l2vtgate1.rpc_endpoint() - _, l2vtgate2_addr = l2vtgate2.rpc_endpoint() - utils.vtgate = None - utils.VtGate().start(cache_ttl='0', l2vtgates=[l2vtgate1_addr, - l2vtgate2_addr,], - extra_args=['-disable_local_gateway']) - var = 'L2VtgateConnections' - - else: - utils.vtgate = None - utils.VtGate().start(cache_ttl='0', tablets=[ - shard_master, shard_replica, shard_rdonly1, - shard_0_master, shard_0_replica, shard_0_rdonly1, - shard_1_master, shard_1_replica, shard_1_rdonly1]) - var = None + utils.vtgate = None + utils.VtGate().start(cache_ttl='0', tablets=[ + shard_master, shard_replica, shard_rdonly1, + shard_0_master, shard_0_replica, shard_0_rdonly1, + shard_1_master, shard_1_replica, shard_1_rdonly1]) + var = None # Wait for the endpoints, either local or remote. utils.vtgate.wait_for_endpoints('test_keyspace.0.master', 1, var=var) @@ -626,12 +544,9 @@ index by_msg (msg) # make sure rdonly tablets are back to serving before hitting vtgate. for t in [shard_0_rdonly1, shard_1_rdonly1]: t.wait_for_vttablet_state('SERVING') - if use_l2vtgate: - l2vtgate1.wait_for_endpoints('test_keyspace.-80.rdonly', 1) - l2vtgate2.wait_for_endpoints('test_keyspace.80-.rdonly', 1) - else: - utils.vtgate.wait_for_endpoints('test_keyspace.-80.rdonly', 1) - utils.vtgate.wait_for_endpoints('test_keyspace.80-.rdonly', 1) + + utils.vtgate.wait_for_endpoints('test_keyspace.-80.rdonly', 1) + utils.vtgate.wait_for_endpoints('test_keyspace.80-.rdonly', 1) # check the Map Reduce API works correctly, should use ExecuteKeyRanges # on both destination shards now. diff --git a/test/initial_sharding_l2vtgate.py b/test/initial_sharding_l2vtgate.py deleted file mode 100755 index 5a2a3df68a..0000000000 --- a/test/initial_sharding_l2vtgate.py +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Re-runs initial_sharding.py with a l2vtgate process.""" - -import initial_sharding -import utils - -if __name__ == '__main__': - initial_sharding.use_l2vtgate = True - utils.main(initial_sharding) diff --git a/test/mysql_server_test.py b/test/mysql_server_test.py index b723eb33ec..d65380133b 100755 --- a/test/mysql_server_test.py +++ b/test/mysql_server_test.py @@ -100,6 +100,9 @@ class TestMySQL(unittest.TestCase): """This test makes sure the MySQL server connector is correct. """ + MYSQL_OPTION_MULTI_STATEMENTS_ON = 0 + MYSQL_OPTION_MULTI_STATEMENTS_OFF = 1 + def test_mysql_connector(self): with open(table_acl_config, 'w') as fd: fd.write("""{ @@ -160,6 +163,29 @@ class TestMySQL(unittest.TestCase): cursor.execute('select * from vt_insert_test', {}) cursor.close() + # Test multi-statement support. It should only work when + # COM_SET_OPTION has set the options to 0 + conn.set_server_option(self.MYSQL_OPTION_MULTI_STATEMENTS_ON) + cursor = conn.cursor() + cursor.execute("select 1; select 2") + self.assertEquals(((1L,),), cursor.fetchall()) + self.assertEquals(1, cursor.nextset()) + self.assertEquals(((2L,),), cursor.fetchall()) + self.assertEquals(None, cursor.nextset()) + cursor.close() + conn.set_server_option(self.MYSQL_OPTION_MULTI_STATEMENTS_OFF) + + # Multi-statement support should not work without the + # option enabled + cursor = conn.cursor() + try: + cursor.execute("select 1; select 2") + self.fail('Execute went through') + except MySQLdb.OperationalError, e: + s = str(e) + self.assertIn('syntax error', s) + cursor.close() + # verify that queries work end-to-end with large grpc messages largeComment = 'L' * ((4 * 1024 * 1024) + 1) cursor = conn.cursor() diff --git a/test/tablet.py b/test/tablet.py index 930f594129..1d64bfbe75 100644 --- a/test/tablet.py +++ b/test/tablet.py @@ -30,8 +30,12 @@ import environment from mysql_flavor import mysql_flavor from protocols_flavor import protocols_flavor from topo_flavor.server import topo_server +from urlparse import urlparse import utils +import grpc +from vtproto.tabletmanagerservice_pb2_grpc import TabletManagerStub + # Dropping a table inexplicably produces a warning despite # the 'IF EXISTS' clause. Squelch these warnings. warnings.simplefilter('ignore') @@ -473,6 +477,7 @@ class Tablet(object): args.extend(['-health_check_interval', '2s']) args.extend(['-enable_replication_reporter']) args.extend(['-degraded_threshold', '5s']) + args.extend(['-lock_tables_timeout', '5s']) args.extend(['-watch_replication_stream']) if enable_semi_sync: args.append('-enable_semi_sync') @@ -635,10 +640,10 @@ class Tablet(object): break else: logging.debug( - ' vttablet %s in state %s != %s', self.tablet_alias, s, + ' vttablet %s in state: %s, expected: %s', self.tablet_alias, s, expected) timeout = utils.wait_step( - 'waiting for %s state %s (last seen state: %s)' % + '%s state %s (last seen state: %s)' % (self.tablet_alias, expected, last_seen_state), timeout, sleep_time=0.1) @@ -842,6 +847,12 @@ class Tablet(object): return 'localhost:%d' % self.grpc_port return 'localhost:%d' % self.port + def tablet_manager(self): + """Returns a rpc client able to talk to the TabletManager rpc server in go""" + addr = self.rpc_endpoint() + p = urlparse('http://' + addr) + channel = grpc.insecure_channel('%s:%s' % (p.hostname, p.port)) + return TabletManagerStub(channel) def kill_tablets(tablets): for t in tablets: @@ -854,3 +865,5 @@ def kill_tablets(tablets): if t.proc is not None: t.proc.wait() t.proc = None + + diff --git a/test/tabletmanager.py b/test/tabletmanager.py index 68a5bf9f64..1956cf9d24 100755 --- a/test/tabletmanager.py +++ b/test/tabletmanager.py @@ -840,4 +840,4 @@ class TestTabletManager(unittest.TestCase): if __name__ == '__main__': - utils.main() + utils.main() \ No newline at end of file diff --git a/test/tabletmanager2.py b/test/tabletmanager2.py new file mode 100755 index 0000000000..e65af1f1f9 --- /dev/null +++ b/test/tabletmanager2.py @@ -0,0 +1,228 @@ +#!/usr/bin/env python + +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# vim: tabstop=8 expandtab shiftwidth=2 softtabstop=2 + +import MySQLdb +import logging +import re +import unittest +import os +import environment +import tablet +import utils +import time +from utils import TestError +from vtproto.tabletmanagerdata_pb2 import LockTablesRequest, UnlockTablesRequest, StopSlaveRequest, \ + MasterPositionRequest, StartSlaveUntilAfterRequest + +# regexp to check if the tablet status page reports healthy, +# regardless of actual replication lag +healthy_expr = re.compile(r'Current status: healthy') + +_create_vt_insert_test = '''create table vt_insert_test ( + id bigint auto_increment, + msg varchar(64), + primary key (id) + ) Engine=InnoDB''' + + +class TestServiceTestOfTabletManager(unittest.TestCase): + """This tests the locking functionality by running rpc calls against the tabletmanager, + in contrast to testing the tabletmanager through the vtcl""" + + replica = tablet.Tablet(62344) + master = tablet.Tablet(62044) + + def setUp(self): + try: + os.makedirs(environment.tmproot) + except OSError: + # directory already exists + pass + + try: + topo_flavor = environment.topo_server().flavor() + if topo_flavor == 'zk2': + # This is a one-off test to make sure our 'zk2' implementation + # behave with a server that is not DNS-resolveable. + environment.topo_server().setup(add_bad_host=True) + else: + environment.topo_server().setup() + + # start mysql instance external to the test + setup_procs = [ + self.replica.init_mysql(), + self.master.init_mysql(), + ] + utils.Vtctld().start() + logging.debug(utils.vtctld_connection) + utils.wait_procs(setup_procs) + + for t in self.master, self.replica: + t.create_db('vt_test_keyspace') + + self.master.init_tablet('replica', 'test_keyspace', '0', start=True) + self.replica.init_tablet('replica', 'test_keyspace', '0', start=True) + utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0', + self.master.tablet_alias]) + self.master.mquery('vt_test_keyspace', _create_vt_insert_test) + for t in [self.master, self.replica]: + t.set_semi_sync_enabled(master=False, slave=False) + except Exception as e: + logging.exception(e) + self.tearDown() + + def tearDown(self): + try: + for t in self.master, self.replica: + t.kill_vttablet() + tablet.Tablet.check_vttablet_count() + environment.topo_server().wipe() + for t in [self.master, self.replica]: + t.reset_replication() + t.set_semi_sync_enabled(master=False, slave=False) + t.clean_dbs() + finally: + utils.required_teardown() + + if utils.options.skip_teardown: + return + + teardown_procs = [ + self.master.teardown_mysql(), + self.replica.teardown_mysql(), + ] + utils.wait_procs(teardown_procs, raise_on_error=False) + + environment.topo_server().teardown() + utils.kill_sub_processes() + utils.remove_tmp_files() + + self.replica.remove_tree() + self.master.remove_tree() + + def _write_data_to_master(self): + """Write a single row to the master""" + self.master.mquery('vt_test_keyspace', "insert into vt_insert_test (msg) values ('test')", write=True) + + def _check_data_on_replica(self, count, msg): + """Check that the specified tablet has the expected number of rows.""" + timeout = 3 + while True: + try: + result = self.replica.mquery( + 'vt_test_keyspace', 'select count(*) from vt_insert_test') + if result[0][0] == count: + break + except MySQLdb.DatabaseError: + # ignore exceptions, we'll just timeout (the tablet creation + # can take some time to replicate, and we get a 'table vt_insert_test + # does not exist exception in some rare cases) + logging.exception('exception waiting for data to replicate') + timeout = utils.wait_step(msg, timeout) + + def test_lock_and_unlock(self): + """Test the lock ability by locking a replica and asserting it does not see changes""" + # first make sure that our writes to the master make it to the replica + self._write_data_to_master() + self._check_data_on_replica(1, "replica getting the data") + + # now lock the replica + tablet_manager = self.replica.tablet_manager() + tablet_manager.LockTables(LockTablesRequest()) + + # make sure that writing to the master does not show up on the replica while locked + self._write_data_to_master() + with self.assertRaises(TestError): + self._check_data_on_replica(2, "the replica should not see these updates") + + # finally, make sure that unlocking the replica leads to the previous write showing up + tablet_manager.UnlockTables(UnlockTablesRequest()) + self._check_data_on_replica(2, "after unlocking the replica, we should see these updates") + + def test_unlock_when_we_dont_have_a_lock(self): + """Unlocking when we do not have a valid lock should lead to an exception being raised""" + # unlock the replica + tablet_manager = self.replica.tablet_manager() + with self.assertRaises(Exception): + tablet_manager.UnlockTables(UnlockTablesRequest()) + + def test_start_slave_until_after(self): + """Test by writing three rows, noting the gtid after each, and then replaying them one by one""" + self.replica.start_vttablet() + self.master.start_vttablet() + + # first we stop replication to the replica, so we can move forward step by step. + replica_tablet_manager = self.replica.tablet_manager() + replica_tablet_manager.StopSlave(StopSlaveRequest()) + + master_tablet_manager = self.master.tablet_manager() + self._write_data_to_master() + pos1 = master_tablet_manager.MasterPosition(MasterPositionRequest()) + + self._write_data_to_master() + pos2 = master_tablet_manager.MasterPosition(MasterPositionRequest()) + + self._write_data_to_master() + pos3 = master_tablet_manager.MasterPosition(MasterPositionRequest()) + + # Now, we'll resume stepwise position by position and make sure that we see the expected data + self._check_data_on_replica(0, "no data has yet reached the replica") + + # timeout is given in nanoseconds. we want to wait no more than 10 seconds + timeout = int(10 * 1e9) + + replica_tablet_manager.StartSlaveUntilAfter( + StartSlaveUntilAfterRequest(position=pos1.position, wait_timeout=timeout)) + self._check_data_on_replica(1, "first row is now visible") + + replica_tablet_manager.StartSlaveUntilAfter( + StartSlaveUntilAfterRequest(position=pos2.position, wait_timeout=timeout)) + self._check_data_on_replica(2, "second row is now visible") + + replica_tablet_manager.StartSlaveUntilAfter( + StartSlaveUntilAfterRequest(position=pos3.position, wait_timeout=timeout)) + self._check_data_on_replica(3, "third row is now visible") + + def test_lock_and_timeout(self): + """Test that the lock times out and updates can be seen even though nothing is unlocked""" + + # first make sure that our writes to the master make it to the replica + self._write_data_to_master() + self._check_data_on_replica(1, "replica getting the data") + + # now lock the replica + tablet_manager = self.replica.tablet_manager() + tablet_manager.LockTables(LockTablesRequest()) + + # make sure that writing to the master does not show up on the replica while locked + self._write_data_to_master() + with self.assertRaises(TestError): + self._check_data_on_replica(2, "the replica should not see these updates") + + # the tests sets the lock timeout to 5 seconds, so sleeping 10 should be safe + time.sleep(10) + + self._check_data_on_replica(2, "the replica should now see these updates") + + # finally, trying to unlock should clearly tell us we did not have the lock + with self.assertRaises(Exception): + tablet_manager.UnlockTables(UnlockTablesRequest()) + + +if __name__ == '__main__': + utils.main() diff --git a/test/utils.py b/test/utils.py index d7e65ff23d..2bae9fa3eb 100644 --- a/test/utils.py +++ b/test/utils.py @@ -224,10 +224,11 @@ def required_teardown(): """Required cleanup steps that can't be skipped with --skip-teardown.""" # We can't skip closing of gRPC connections, because the Python interpreter # won't let us die if any connections are left open. - global vtctld_connection + global vtctld_connection, vtctld if vtctld_connection: vtctld_connection.close() vtctld_connection = None + vtctld = None def kill_sub_processes(): diff --git a/test/vtgate_buffer.py b/test/vtgate_buffer.py index 00bc7ea6c2..7710427214 100755 --- a/test/vtgate_buffer.py +++ b/test/vtgate_buffer.py @@ -172,7 +172,7 @@ class UpdateThread(AbstractVtgateThread): if self.ignore_error_func and self.ignore_error_func(e): logging.debug('UPDATE %d failed during COMMIT. But we cannot buffer' - ' this error and and ignore it. err: %s', attempt, str(e)) + ' this error and ignore it. err: %s', attempt, str(e)) else: self._commit_errors += 1 if self._commit_errors > 1: diff --git a/test/vtgatev2_l2vtgate_test.py b/test/vtgatev2_l2vtgate_test.py deleted file mode 100755 index cf869a2701..0000000000 --- a/test/vtgatev2_l2vtgate_test.py +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Re-runs vtgatev2_test.py with a l2vtgate process.""" - -import utils -import vtgatev2_test - -# This test is just re-running an entire vtgatev2_test.py with a -# l2vtgate process in the middle. -if __name__ == '__main__': - vtgatev2_test.use_l2vtgate = True - utils.main(vtgatev2_test) diff --git a/test/vtgatev2_test.py b/test/vtgatev2_test.py index a95d640202..4d45f44d30 100755 --- a/test/vtgatev2_test.py +++ b/test/vtgatev2_test.py @@ -35,16 +35,6 @@ from vtdb import vtdb_logger from vtdb import vtgate_client from vtdb import vtgate_cursor -# use_l2vtgate controls if we're adding a l2vtgate process in between -# vtgate and the tablets. -use_l2vtgate = False - -# l2vtgate is the L2VTGate object, if any -l2vtgate = None - -# l2vtgate_addr is the address of the l2vtgate to send to vtgate -l2vtgate_addr = None - shard_0_master = tablet.Tablet() shard_0_replica1 = tablet.Tablet() shard_0_replica2 = tablet.Tablet() @@ -154,8 +144,6 @@ def tearDownModule(): logging.debug('Tearing down the servers and setup') if utils.vtgate: utils.vtgate.kill() - if l2vtgate: - l2vtgate.kill() tablet.kill_tablets([shard_0_master, shard_0_replica1, shard_0_replica2, shard_1_master, @@ -184,7 +172,6 @@ def tearDownModule(): def setup_tablets(): """Start up a master mysql and vttablet.""" - global l2vtgate, l2vtgate_addr logging.debug('Setting up tablets') utils.run_vtctl(['CreateKeyspace', KEYSPACE_NAME]) @@ -252,46 +239,22 @@ def setup_tablets(): 'Partitions(rdonly): -80 80-\n' 'Partitions(replica): -80 80-\n') - if use_l2vtgate: - l2vtgate = utils.VtGate() - l2vtgate.start(extra_args=['--enable_forwarding'], tablets= - [shard_0_master, shard_0_replica1, shard_0_replica2, - shard_1_master, shard_1_replica1, shard_1_replica2]) - _, l2vtgate_addr = l2vtgate.rpc_endpoint() - # Clear utils.vtgate, so it doesn't point to the previous l2vtgate. - utils.vtgate = None - - # This vgate doesn't watch any local tablets, so we disable_local_gateway. - utils.VtGate().start(l2vtgates=[l2vtgate_addr,], - extra_args=['-disable_local_gateway']) - - else: - utils.VtGate().start(tablets= - [shard_0_master, shard_0_replica1, shard_0_replica2, - shard_1_master, shard_1_replica1, shard_1_replica2]) + utils.VtGate().start(tablets= + [shard_0_master, shard_0_replica1, shard_0_replica2, + shard_1_master, shard_1_replica1, shard_1_replica2]) wait_for_all_tablets() def restart_vtgate(port): - if use_l2vtgate: - utils.VtGate(port=port).start(l2vtgates=[l2vtgate_addr,], - extra_args=['-disable_local_gateway']) - else: - utils.VtGate(port=port).start( - tablets=[shard_0_master, shard_0_replica1, shard_0_replica2, - shard_1_master, shard_1_replica1, shard_1_replica2]) + utils.VtGate(port=port).start( + tablets=[shard_0_master, shard_0_replica1, shard_0_replica2, + shard_1_master, shard_1_replica1, shard_1_replica2]) def wait_for_endpoints(name, count): - if use_l2vtgate: - # Wait for the l2vtgate to have a healthy connection. - l2vtgate.wait_for_endpoints(name, count) - # Also wait for vtgate to have received the remote healthy connection. - utils.vtgate.wait_for_endpoints(name, count, var='L2VtgateConnections') - else: - utils.vtgate.wait_for_endpoints(name, count) + utils.vtgate.wait_for_endpoints(name, count) def wait_for_all_tablets(): @@ -411,17 +374,12 @@ class TestCoreVTGateFunctions(BaseTestCase): self.assertIn(kid, SHARD_KID_MAP[SHARD_NAMES[shard_index]]) # Do a cross shard range query and assert all rows are fetched. - # Use this test to also test the vtgate vars (and l2vtgate vars if - # applicable) are correctly updated. + # Use this test to also test the vtgate vars are correctly updated. v = utils.vtgate.get_vars() key0 = 'Execute.' + KEYSPACE_NAME + '.' + SHARD_NAMES[0] + '.master' key1 = 'Execute.' + KEYSPACE_NAME + '.' + SHARD_NAMES[1] + '.master' before0 = v['VttabletCall']['Histograms'][key0]['Count'] before1 = v['VttabletCall']['Histograms'][key1]['Count'] - if use_l2vtgate: - lv = l2vtgate.get_vars() - lbefore0 = lv['QueryServiceCall']['Histograms'][key0]['Count'] - lbefore1 = lv['QueryServiceCall']['Histograms'][key1]['Count'] cursor = vtgate_conn.cursor( tablet_type='master', keyspace=KEYSPACE_NAME, @@ -435,12 +393,6 @@ class TestCoreVTGateFunctions(BaseTestCase): after1 = v['VttabletCall']['Histograms'][key1]['Count'] self.assertEqual(after0 - before0, 1) self.assertEqual(after1 - before1, 1) - if use_l2vtgate: - lv = l2vtgate.get_vars() - lafter0 = lv['QueryServiceCall']['Histograms'][key0]['Count'] - lafter1 = lv['QueryServiceCall']['Histograms'][key1]['Count'] - self.assertEqual(lafter0 - lbefore0, 1) - self.assertEqual(lafter1 - lbefore1, 1) def test_rollback(self): vtgate_conn = get_connection() diff --git a/tools/check_make_parser.sh b/tools/check_make_parser.sh index 577b881dce..139e18199a 100755 --- a/tools/check_make_parser.sh +++ b/tools/check_make_parser.sh @@ -16,7 +16,15 @@ if ! cd go/vt/sqlparser/ ; then fi mv $CUR $TMP -goyacc -o $CUR sql.y +output=`goyacc -o $CUR sql.y` + +if [ -n "$output" ]; then + echo "Expected empty output from goyacc, got:" + echo $output + mv $TMP $CUR + exit 1 +fi + gofmt -w $CUR if ! diff -q $CUR $TMP > /dev/null ; then diff --git a/vendor/vendor.json b/vendor/vendor.json index cd518cfd8a..8fbf87b2dd 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -618,14 +618,6 @@ "revision": "d3a67ab21bc8a4643fa53a3633f2d951dd50c6ca", "revisionTime": "2016-12-07T01:17:43Z" }, - { - "checksumSHA1": "fe0NspvyJjx6DhmTjIpO0zmR+kg=", - "path": "github.com/influxdb/influxdb/client", - "revision": "afde71eb1740fd763ab9450e1f700ba0e53c36d0", - "revisionTime": "2014-12-28T19:15:54Z", - "version": "=v0.8.8", - "versionExact": "v0.8.8" - }, { "checksumSHA1": "0ZrwvB6KoGPj2PoDNSEJwxQ6Mog=", "path": "github.com/jmespath/go-jmespath", @@ -681,12 +673,52 @@ "revisionTime": "2016-04-24T11:30:07Z" }, { - "checksumSHA1": "Oi8lZYAVzy67goiwBWBDot28s6k=", + "checksumSHA1": "Yi/mSgnMkG30eC61vcQWVwmxQQ8=", "path": "github.com/minio/minio-go", - "revision": "9f282f76643244430e3d0b69dc7285628a8db8a7", - "revisionTime": "2016-07-19T21:19:03Z", - "version": "v2.0.1", - "versionExact": "v2.0.1" + "revision": "c8a261de75c1a9a9ece4dcc0c81ff6db525bcf27", + "revisionTime": "2019-01-31T01:53:50Z", + "version": "v6.0.16", + "versionExact": "v6.0.16" + }, + { + "checksumSHA1": "pw8e6bgWfeEZUIMnD0Zge4d/KPo=", + "path": "github.com/minio/minio-go/pkg/credentials", + "revision": "c8a261de75c1a9a9ece4dcc0c81ff6db525bcf27", + "revisionTime": "2019-01-31T01:53:50Z", + "version": "v6.0.16", + "versionExact": "v6.0.16" + }, + { + "checksumSHA1": "Md5pOKYfoKtrG7xNvs2FtiDPfDc=", + "path": "github.com/minio/minio-go/pkg/encrypt", + "revision": "c8a261de75c1a9a9ece4dcc0c81ff6db525bcf27", + "revisionTime": "2019-01-31T01:53:50Z", + "version": "v6.0.16", + "versionExact": "v6.0.16" + }, + { + "checksumSHA1": "1KcTZxPRRQ0BWLt1zDVG1bSjm/4=", + "path": "github.com/minio/minio-go/pkg/s3signer", + "revision": "c8a261de75c1a9a9ece4dcc0c81ff6db525bcf27", + "revisionTime": "2019-01-31T01:53:50Z", + "version": "v6.0.16", + "versionExact": "v6.0.16" + }, + { + "checksumSHA1": "7iUaZkEJdhkyAu3F07vrX8pyavI=", + "path": "github.com/minio/minio-go/pkg/s3utils", + "revision": "c8a261de75c1a9a9ece4dcc0c81ff6db525bcf27", + "revisionTime": "2019-01-31T01:53:50Z", + "version": "v6.0.16", + "versionExact": "v6.0.16" + }, + { + "checksumSHA1": "Wt8ej+rZXTdNBR9Xyw1eGo3Iq5o=", + "path": "github.com/minio/minio-go/pkg/set", + "revision": "c8a261de75c1a9a9ece4dcc0c81ff6db525bcf27", + "revisionTime": "2019-01-31T01:53:50Z", + "version": "v6.0.16", + "versionExact": "v6.0.16" }, { "checksumSHA1": "V/quM7+em2ByJbWBLOsEwnY3j/Q=", @@ -880,6 +912,18 @@ "revision": "ecda9a501e8220fae3b4b600c3db4b0ba22cfc68", "revisionTime": "2017-03-16T03:48:04Z" }, + { + "checksumSHA1": "FwW3Vv4jW0Nv7V2SZC7x/Huj5M4=", + "path": "golang.org/x/crypto/argon2", + "revision": "b8fe1690c61389d7d2a8074a507d1d40c5d30448", + "revisionTime": "2019-01-30T22:18:58Z" + }, + { + "checksumSHA1": "eaK7NuGdfEVypOnqYniZSuF2S6s=", + "path": "golang.org/x/crypto/blake2b", + "revision": "b8fe1690c61389d7d2a8074a507d1d40c5d30448", + "revisionTime": "2019-01-30T22:18:58Z" + }, { "checksumSHA1": "N5fb5y92DFIP+wUhi1rSwPp9vyk=", "path": "golang.org/x/crypto/ssh/terminal", @@ -934,6 +978,12 @@ "revision": "5f8847ae0d0e90b6a9dc8148e7ad616874625171", "revisionTime": "2017-06-23T17:10:45Z" }, + { + "checksumSHA1": "j6leSoJatxWHJGLjRxIjZ8GbaDQ=", + "path": "golang.org/x/net/publicsuffix", + "revision": "65e2d4e15006aab9813ff8769e768bbf4bb667a0", + "revisionTime": "2019-02-01T23:59:58Z" + }, { "checksumSHA1": "4vGl3N46SAJwQl/uSlQvZQvc734=", "path": "golang.org/x/net/trace", @@ -970,6 +1020,12 @@ "revision": "04e1573abc896e70388bd387a69753c378d46466", "revisionTime": "2016-07-30T22:43:56Z" }, + { + "checksumSHA1": "1CmUDjhZlyKZcbLYlWI7cRzK3fI=", + "path": "golang.org/x/sys/cpu", + "revision": "41f3e6584952bb034a481797859f6ab34b6803bd", + "revisionTime": "2019-02-04T12:38:20Z" + }, { "checksumSHA1": "QmmEQv1jLvjlVGPsWewqeNYNoyk=", "path": "golang.org/x/sys/unix", @@ -1378,6 +1434,12 @@ "revision": "4e86f4367175e39f69d9358a5f17b4dda270378d", "revisionTime": "2015-09-24T05:17:56Z" }, + { + "checksumSHA1": "8yg3QdSXVEmuHm2CgWXEMFN3K6Q=", + "path": "gopkg.in/ini.v1", + "revision": "6ed8d5f64cd79a498d1f3fab5880cc376ce41bbe", + "revisionTime": "2019-01-03T01:53:35Z" + }, { "checksumSHA1": "itYnRitfdzJjy2mZlvJ+hCJZvtY=", "path": "gopkg.in/ldap.v2", diff --git a/vitess.io/Gemfile.lock b/vitess.io/Gemfile.lock index 49a4a6e3af..dec6fbb7f7 100644 --- a/vitess.io/Gemfile.lock +++ b/vitess.io/Gemfile.lock @@ -26,7 +26,7 @@ GEM activesupport (>= 2) nokogiri (~> 1.4) i18n (0.8.6) - jekyll (3.5.2) + jekyll (3.6.3) addressable (~> 2.4) colorator (~> 1.0) jekyll-sass-converter (~> 1.0) @@ -67,7 +67,7 @@ GEM neat (2.1.0) sass (~> 3.4) thor (~> 0.19) - nokogiri (1.8.2) + nokogiri (1.8.5) mini_portile2 (~> 2.3.0) pathutil (0.14.0) forwardable-extended (~> 2.6) @@ -97,7 +97,7 @@ PLATFORMS DEPENDENCIES RedCloth (~> 4.3.2) bourbon - jekyll (= 3.5.2) + jekyll (= 3.6.3) jekyll-coffeescript (= 1.0.1) jekyll-feed (= 0.3.1) jekyll-redirect-from (= 0.11.0)