Merge branch 'master' into zklapow-checkstyle

Signed-off-by: Ze'ev Klapow <zklapow@hubspot.com>
This commit is contained in:
Ze'ev Klapow 2019-02-12 10:22:55 -05:00 коммит произвёл Ze'ev Klapow
Родитель b5973f5723 46205087f7
Коммит 7b51e94763
310 изменённых файлов: 17812 добавлений и 8993 удалений

Просмотреть файл

@ -23,6 +23,7 @@
# 3. Detection of installed MySQL and setting MYSQL_FLAVOR.
# 4. Installation of development related steps e.g. creating Git hooks.
BUILD_TESTS=${BUILD_TESTS:-1}
#
# 0. Initialization and helper methods.
@ -45,9 +46,14 @@ function fail() {
[[ "$(dirname "$0")" = "." ]] || fail "bootstrap.sh must be run from its current directory"
go version &>/dev/null || fail "Go is not installed or is not on \$PATH"
[[ "$(go version 2>&1)" =~ go1\.[1-9][1-9] ]] || fail "Go is not version 1.11+"
# Set up the proper GOPATH for go get below.
source ./dev.env
if [ "$BUILD_TESTS" == 1 ] ; then
source ./dev.env
else
source ./build.env
fi
# Create main directories.
mkdir -p "$VTROOT/dist"
@ -55,15 +61,21 @@ mkdir -p "$VTROOT/bin"
mkdir -p "$VTROOT/lib"
mkdir -p "$VTROOT/vthook"
# Set up required soft links.
# TODO(mberlin): Which of these can be deleted?
ln -snf "$VTTOP/config" "$VTROOT/config"
ln -snf "$VTTOP/data" "$VTROOT/data"
ln -snf "$VTTOP/py" "$VTROOT/py-vtdb"
ln -snf "$VTTOP/go/vt/zkctl/zksrv.sh" "$VTROOT/bin/zksrv.sh"
ln -snf "$VTTOP/test/vthook-test.sh" "$VTROOT/vthook/test.sh"
ln -snf "$VTTOP/test/vthook-test_backup_error" "$VTROOT/vthook/test_backup_error"
ln -snf "$VTTOP/test/vthook-test_backup_transform" "$VTROOT/vthook/test_backup_transform"
if [ "$BUILD_TESTS" == 1 ] ; then
# Set up required soft links.
# TODO(mberlin): Which of these can be deleted?
ln -snf "$VTTOP/config" "$VTROOT/config"
ln -snf "$VTTOP/data" "$VTROOT/data"
ln -snf "$VTTOP/py" "$VTROOT/py-vtdb"
ln -snf "$VTTOP/go/vt/zkctl/zksrv.sh" "$VTROOT/bin/zksrv.sh"
ln -snf "$VTTOP/test/vthook-test.sh" "$VTROOT/vthook/test.sh"
ln -snf "$VTTOP/test/vthook-test_backup_error" "$VTROOT/vthook/test_backup_error"
ln -snf "$VTTOP/test/vthook-test_backup_transform" "$VTROOT/vthook/test_backup_transform"
else
ln -snf "$VTTOP/config" "$VTROOT/config"
ln -snf "$VTTOP/data" "$VTROOT/data"
ln -snf "$VTTOP/go/vt/zkctl/zksrv.sh" "$VTROOT/bin/zksrv.sh"
fi
# install_dep is a helper function to generalize the download and installation of dependencies.
#
@ -136,8 +148,10 @@ function install_grpc() {
grpcio_ver=$version
$PIP install --upgrade grpcio=="$grpcio_ver" grpcio-tools=="$grpcio_ver"
}
install_dep "gRPC" "1.16.0" "$VTROOT/dist/grpc" install_grpc
if [ "$BUILD_TESTS" == 1 ] ; then
install_dep "gRPC" "1.16.0" "$VTROOT/dist/grpc" install_grpc
fi
# Install protoc.
function install_protoc() {
@ -225,8 +239,9 @@ function install_pymock() {
popd >/dev/null
}
pymock_version=1.0.1
install_dep "py-mock" "$pymock_version" "$VTROOT/dist/py-mock-$pymock_version" install_pymock
if [ "$BUILD_TESTS" == 1 ] ; then
install_dep "py-mock" "$pymock_version" "$VTROOT/dist/py-mock-$pymock_version" install_pymock
fi
# Download Selenium (necessary to run test/vtctld_web_test.py).
function install_selenium() {
@ -239,7 +254,9 @@ function install_selenium() {
# instead of go/dist/selenium/lib/python3.5/site-packages and then can't find module 'pip._vendor.requests'
PYTHONPATH='' $PIP install selenium
}
install_dep "Selenium" "latest" "$VTROOT/dist/selenium" install_selenium
if [ "$BUILD_TESTS" == 1 ] ; then
install_dep "Selenium" "latest" "$VTROOT/dist/selenium" install_selenium
fi
# Download chromedriver (necessary to run test/vtctld_web_test.py).
@ -247,11 +264,13 @@ function install_chromedriver() {
local version="$1"
local dist="$2"
curl -sL "http://chromedriver.storage.googleapis.com/$version/chromedriver_linux64.zip" > chromedriver_linux64.zip
curl -sL "https://chromedriver.storage.googleapis.com/$version/chromedriver_linux64.zip" > chromedriver_linux64.zip
unzip -o -q chromedriver_linux64.zip -d "$dist"
rm chromedriver_linux64.zip
}
install_dep "chromedriver" "2.44" "$VTROOT/dist/chromedriver" install_chromedriver
if [ "$BUILD_TESTS" == 1 ] ; then
install_dep "chromedriver" "2.44" "$VTROOT/dist/chromedriver" install_chromedriver
fi
#
@ -300,47 +319,52 @@ govendor sync || fail "Failed to download/update dependencies with govendor. Ple
# find mysql and prepare to use libmysqlclient
if [ -z "$MYSQL_FLAVOR" ]; then
export MYSQL_FLAVOR=MySQL56
echo "MYSQL_FLAVOR environment variable not set. Using default: $MYSQL_FLAVOR"
if [ "$BUILD_TESTS" == 1 ] ; then
if [ -z "$MYSQL_FLAVOR" ]; then
export MYSQL_FLAVOR=MySQL56
echo "MYSQL_FLAVOR environment variable not set. Using default: $MYSQL_FLAVOR"
fi
case "$MYSQL_FLAVOR" in
"MySQL56")
myversion="$("$VT_MYSQL_ROOT/bin/mysql" --version)"
[[ "$myversion" =~ Distrib\ 5\.[67] || "$myversion" =~ Ver\ 8\. ]] || fail "Couldn't find MySQL 5.6+ in $VT_MYSQL_ROOT. Set VT_MYSQL_ROOT to override search location."
echo "Found MySQL 5.6+ installation in $VT_MYSQL_ROOT."
;;
"MariaDB")
myversion="$("$VT_MYSQL_ROOT/bin/mysql" --version)"
[[ "$myversion" =~ MariaDB ]] || fail "Couldn't find MariaDB in $VT_MYSQL_ROOT. Set VT_MYSQL_ROOT to override search location."
echo "Found MariaDB installation in $VT_MYSQL_ROOT."
;;
*)
fail "Unsupported MYSQL_FLAVOR $MYSQL_FLAVOR"
;;
esac
# save the flavor that was used in bootstrap, so it can be restored
# every time dev.env is sourced.
echo "$MYSQL_FLAVOR" > "$VTROOT/dist/MYSQL_FLAVOR"
fi
case "$MYSQL_FLAVOR" in
"MySQL56")
myversion="$("$VT_MYSQL_ROOT/bin/mysql" --version)"
[[ "$myversion" =~ Distrib\ 5\.[67] || "$myversion" =~ Ver\ 8\. ]] || fail "Couldn't find MySQL 5.6+ in $VT_MYSQL_ROOT. Set VT_MYSQL_ROOT to override search location."
echo "Found MySQL 5.6+ installation in $VT_MYSQL_ROOT."
;;
"MariaDB")
myversion="$("$VT_MYSQL_ROOT/bin/mysql" --version)"
[[ "$myversion" =~ MariaDB ]] || fail "Couldn't find MariaDB in $VT_MYSQL_ROOT. Set VT_MYSQL_ROOT to override search location."
echo "Found MariaDB installation in $VT_MYSQL_ROOT."
;;
*)
fail "Unsupported MYSQL_FLAVOR $MYSQL_FLAVOR"
;;
esac
# save the flavor that was used in bootstrap, so it can be restored
# every time dev.env is sourced.
echo "$MYSQL_FLAVOR" > "$VTROOT/dist/MYSQL_FLAVOR"
#
# 4. Installation of development related steps e.g. creating Git hooks.
#
# Create the Git hooks.
echo "creating git hooks"
mkdir -p "$VTTOP/.git/hooks"
ln -sf "$VTTOP/misc/git/pre-commit" "$VTTOP/.git/hooks/pre-commit"
ln -sf "$VTTOP/misc/git/prepare-commit-msg.bugnumber" "$VTTOP/.git/hooks/prepare-commit-msg"
ln -sf "$VTTOP/misc/git/commit-msg" "$VTTOP/.git/hooks/commit-msg"
(cd "$VTTOP" && git config core.hooksPath "$VTTOP/.git/hooks")
if [ "$BUILD_TESTS" == 1 ] ; then
# Create the Git hooks.
echo "creating git hooks"
mkdir -p "$VTTOP/.git/hooks"
ln -sf "$VTTOP/misc/git/pre-commit" "$VTTOP/.git/hooks/pre-commit"
ln -sf "$VTTOP/misc/git/prepare-commit-msg.bugnumber" "$VTTOP/.git/hooks/prepare-commit-msg"
ln -sf "$VTTOP/misc/git/commit-msg" "$VTTOP/.git/hooks/commit-msg"
(cd "$VTTOP" && git config core.hooksPath "$VTTOP/.git/hooks")
echo
echo "bootstrap finished - run 'source dev.env' in your shell before building."
else
echo
echo "bootstrap finished - run 'source build.env' in your shell before building."
fi
echo
echo "bootstrap finished - run 'source dev.env' in your shell before building."

39
build.env Normal file
Просмотреть файл

@ -0,0 +1,39 @@
# No shebang line as this script is sourced from an external shell.
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Plese ensure dev.env is written in a way which is POSIX (bourne)
# shell compatible.
# - Some build systems like rpm require the different scriptlets used
# to build a package to be run under a POSIX shell so non-POSIX
# syntax will break that as dev.env will not be sourced by bash..
# Import prepend_path function.
dir="$(dirname "${BASH_SOURCE[0]}")"
# shellcheck source=tools/shell_functions.inc
if ! source "${dir}/tools/shell_functions.inc"; then
echo "failed to load tools/shell_functions.inc"
return 1
fi
VTTOP=$(pwd)
export VTTOP
VTROOT="${VTROOT:-${VTTOP/\/src\/vitess.io\/vitess/}}"
export VTROOT
# VTTOP sanity check
if [[ "$VTTOP" == "${VTTOP/\/src\/vitess.io\/vitess/}" ]]; then
echo "WARNING: VTTOP($VTTOP) does not contain src/vitess.io/vitess"
fi

Просмотреть файл

@ -84,3 +84,5 @@ GRANT SELECT
FLUSH PRIVILEGES;
RESET SLAVE ALL;
RESET MASTER;

Двоичные данные
data/test/cgzip_eof.gz

Двоичный файл не отображается.

16
dev.env
Просмотреть файл

@ -20,22 +20,8 @@
# to build a package to be run under a POSIX shell so non-POSIX
# syntax will break that as dev.env will not be sourced by bash..
# Import prepend_path function.
dir="$(dirname "${BASH_SOURCE[0]}")"
# shellcheck source=tools/shell_functions.inc
if ! source "${dir}/tools/shell_functions.inc"; then
echo "failed to load tools/shell_functions.inc"
return 1
fi
source ./build.env
VTTOP=$(pwd)
export VTTOP
VTROOT="${VTROOT:-${VTTOP/\/src\/vitess.io\/vitess/}}"
export VTROOT
# VTTOP sanity check
if [[ "$VTTOP" == "${VTTOP/\/src\/vitess.io\/vitess/}" ]]; then
echo "WARNING: VTTOP($VTTOP) does not contain src/vitess.io/vitess"
fi
export GOTOP=$VTTOP/go
export PYTOP=$VTTOP/py

Просмотреть файл

@ -20,7 +20,7 @@ If no tablet type was specified, then VTGate chooses its default, which can be o
Vitess supports different modes. In OLTP mode, the result size is typically limited to a preset number (10,000 rows by default). This limit can be adjusted based on your needs.
However, OLAP mode has no limit to the number of rows returned. In order to change to this mode, you may issue the following command command before executing your query:
However, OLAP mode has no limit to the number of rows returned. In order to change to this mode, you may issue the following command before executing your query:
```
set workload='olap'
@ -32,7 +32,7 @@ The general convention is to send OLTP queries to `REPLICA` tablet types, and OL
## Is there a list of supported/unsupported queries?
The list of unsupported constructs is currently in the form of test cases contained in this [test file](https://github.com/vitessio/vitess/blob/master/data/test/vtgate/unsupported_cases.txt). However, contrary to the test cases, there is limited support for SET, DDL and DBA constructs. This will be documented soon.
The list of unsupported constructs is currently in the form of test cases contained in this [test file](https://github.com/vitessio/vitess/blob/master/go/vt/vtgate/planbuilder/testdata/unsupported_cases.txt). However, contrary to the test cases, there is limited support for SET, DDL and DBA constructs. This will be documented soon.
## If I have a log of all queries from my app. Is there a way I can try them against vitess to see how theyll work?

Просмотреть файл

@ -168,7 +168,7 @@ In addition, Vitess requires the software and libraries listed below.
(install steps are below).
3. If Xcode is installed (with Console tools, which should be bundled automatically since the 7.1 version), all
the dev dependencies should be satisfied in this step. If no Xcode is present, it is necessery to install pkg-config.
the dev dependencies should be satisfied in this step. If no Xcode is present, it is necessary to install pkg-config.
``` sh
brew install pkg-config

Просмотреть файл

@ -132,7 +132,7 @@ on the clicked button. The approval buttons are cleared after the phase has
finished. The next phase will only starts if its previous phase has finished
successfully.
If the workflow is restored from a checkpoint, you will still see the the
If the workflow is restored from a checkpoint, you will still see the
approval button with approved message when there are running tasks under this
approval. But you don't need to approve the same tasks again for a restarted
workflow.

Просмотреть файл

@ -22,11 +22,11 @@ Scraping Vitess variables is a good way to integrate Vitess into an existing mon
Vitess also includes support for push-based metrics systems via plug-ins. Each Vitess component would need to be run with the `--emit_stats` flag.
By default, the stats_emit_period is 60s, so each component will push stats to the the selected backend every minute. This is configurable via the `--stats_emit_period` flag.
By default, the stats_emit_period is 60s, so each component will push stats to the selected backend every minute. This is configurable via the `--stats_emit_period` flag.
Vitess has preliminary plug-ins to support InfluxDB and OpenTSDB as push-based metrics backends. However, there is very limited support at this time, as InfluxDB itself is going through various API breaking changes.
Vitess has preliminary plug-ins to support OpenTSDB as a push-based metrics backend.
It should be fairly straightforward to write your own plug-in, if you want to support a different backend. The plug-in package simply needs to implement the `PushBackend` interface of the `stats` package. For an example, you can see the [InfluxDB plugin](https://github.com/vitessio/vitess/blob/master/go/stats/influxdbbackend/influxdb_backend.go).
It should be fairly straightforward to write your own plug-in, if you want to support a different backend. The plug-in package simply needs to implement the `PushBackend` interface of the `stats` package. For an example, you can see the [OpenTSDB plugin](https://github.com/vitessio/vitess/blob/master/go/stats/opentsdb/opentsdb.go).
Once youve written the backend plug-in, you also need to register the plug-in from within all the relevant Vitess binaries. An example of how to do this can be seen in [this pull request](https://github.com/vitessio/vitess/pull/469).
@ -36,7 +36,7 @@ Connecting Vitess to a push-based metrics system can be useful if youre alrea
## Monitoring with Kubernetes
The existing methods for integrating metrics are not supported in a Kubernetes environment by the Vitess team yet, but are on the roadmap for the future. However, it should be possible to get the InfluxDB backend working with Kubernetes, similar to how [Heapster for Kubernetes works](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/cluster/addons/cluster-monitoring).
The existing methods for integrating metrics are not supported in a Kubernetes environment by the Vitess team yet, but are on the roadmap for the future. However, it should be possible to get the Prometheus backend working with Kubernetes, similar to how [Heapster for Kubernetes works](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/prometheus).
In the meantime, if you run into issues or have questions, please post on our [forum](https://groups.google.com/forum/#!forum/vitess).

Просмотреть файл

@ -54,7 +54,7 @@ Setting up these components directly -- for example, writing your own topology s
* *Recommended*. Vitess has basic support for identifying or changing a master, but it doesn't aim to fully address this feature. As such, we recommend using another program, like [Orchestrator](https://github.com/github/orchestrator), to monitor the health of your servers and to change your master database when necessary. (In a sharded database, each shard has a master.)
* *Recommended*. You should have a way to monitor your database topology and set up alerts as needed. Vitess components facilitate this monitoring by exporting a lot of runtime variables, like QPS over the last few minutes, error rates, and query latency. The variables are exported in JSON format, and Vitess also supports an InfluxDB plug-in.
* *Recommended*. You should have a way to monitor your database topology and set up alerts as needed. Vitess components facilitate this monitoring by exporting a lot of runtime variables, like QPS over the last few minutes, error rates, and query latency. The variables are exported in JSON format, and Vitess also supports a Prometheus plug-in.
* *Optional*. Using the Kubernetes scripts as a base, you could run Vitess components with other configuration management systems (like Puppet) or frameworks (like Mesos or AWS images).

Просмотреть файл

@ -64,7 +64,7 @@ ValidateSchemaShard user/0
The <code>[ValidateSchemaKeyspace]({% link reference/vtctl.md %}#validateschemakeyspace)</code>
command confirms that all of the tablets in a given keyspace have
the the same schema as the master tablet on shard <code>0</code>
the same schema as the master tablet on shard <code>0</code>
in that keyspace. Thus, whereas the <code>ValidateSchemaShard</code>
command confirms the consistency of the schema on tablets within a shard
for a given keyspace, <code>ValidateSchemaKeyspace</code> confirms the

Просмотреть файл

@ -71,7 +71,7 @@ This rule is not strictly enforced. You are allowed to add these things, but at
Similar guidelines should be used when deciding to bypass Vitess to send statements directly to MySQL.
Vitess also requires you to turn on STRICT_TRANS_TABLES mode. Otherwise, it cannot accurately predict what will be written to the database.
Vitess also requires you to turn on STRICT_TRANS_TABLES or STRICT_ALL_TABLES mode. Otherwise, it cannot accurately predict what will be written to the database.
Its safe to apply backward compatible DDLs directly to MySQL. VTTablets can be configured to periodically check the schema for changes.

Просмотреть файл

@ -93,36 +93,7 @@ There are two implementations of the Gateway interface:
discovery section, one per cell) as a source of tablets, a HealthCheck module
to watch their health, and a TabletStatsCache to collect all the health
information. Based on this data, it can find the best tablet to use.
* l2VTGateGateway: It keeps a map of l2vtgate processes to send queries to. See
next section for more details.
## l2vtgate
As we started increasing the number of tablets in a cell, it became clear that a
bottleneck of the system was going to be how many tablets a single vtgate is
connecting to. Since vtgate maintains a streaming health check connection per
tablet, the number of these connections can grow to large numbers. It is common
for vtgate to watch tablets in other cells, to be able to find the master
tablet.
So l2vtgate came to exist, based on very similar concepts and interfaces:
* l2vtgate is an extra hop between a vtgate pool and tablets.
* A l2vtgate pool connects to a subset of tablets, therefore it can have a
reasonable number of streaming health connections. Externally, it exposes the
QueryService RPC interface (that has the Target for the query, keyspace /
shard / tablet type). Internally, it uses a discoveryGateway, as usual.
* vtgate connects to l2vtgate pools (using the l2VTGateGateway instead of the
discoveryGateway). It has a map of which keyspace / shard / tablet type needs
to go to wich l2vtgate pool. At this point, vtgate doesn't maintain any health
information about the tablets, it lets l2vtgate handle it.
Note l2vtgate is not an ideal solution as it is now. For instance, if there are
two cells, and the master for a shard can be in either, l2vtgate still has to
watch the tablets in both cells, to know where the master is. Ideally, we'd want
l2vtgate to be collocated with the tablets in a given cell, and not go
cross-cell.
# Extensions, work in progress
## Regions, cross-cell targeting
@ -169,31 +140,6 @@ between vtgate and l2vtgate:
This would also be a good time to merge the vtgate code that uses the VSchema
with the code that doesn't for SrvKeyspace access.
## Hybrid Gateway
It would be nice to re-organize the code a bit inside vtgate to allow for an
hybrid gateway, and get rid of l2vtgate alltogether:
* vtgate would use the discoveryGateway to watch the tablets in the current cell
(and optionally to any other cell we still want to consider local).
* vtgate would use l2vtgateGateway to watch the tablets in a different cell.
* vtgate would expose the RPC APIs currently exposed by the l2vtgate process.
So vtgate would watch the tablets in the local cell only, but also know what
healthy tablets are in the other cells, and be able to send query to them
through their vtgate. The extra hop to the other cell vtgate should be a small
latency price to pay, compared to going cross-cell already.
So queries would go one of two routes:
* client(cell1) -> vtgate(cell1) -> tablet(cell1)
* client(cell1) -> vtgate(cell1) -> vtgate(cell2) -> tablet(cell2)
If the number of tablets in a given cell is still too high for the local vtgate
pool, two or more pools can still be created, each of them knowing about a
subset of the tablets. And they would just forward queries to each others when
addressing the other tablet set.
## Config-based routing
Another possible extension would be to group all routing options for vtgate in a

Просмотреть файл

@ -2,7 +2,7 @@
This document highlights things to look after when upgrading a Vitess production installation to a newer Vitess release.
Generally speaking, upgrading Vitess is a safe and and easy process because it is explicitly designed for it. This is because in YouTube we follow the practice of releasing new versions often (usually from the tip of the Git master branch).
Generally speaking, upgrading Vitess is a safe and easy process because it is explicitly designed for it. This is because in YouTube we follow the practice of releasing new versions often (usually from the tip of the Git master branch).
## Compatibility

Просмотреть файл

@ -1311,7 +1311,7 @@ When two nodes are grouped, the current join condition becomes the root of the n
* If its a JOIN, the new property is the more restrictive of the two nodes. So, if one of them is a Route, then the new node is also a Route.
* For a LEFT JOIN, the new property is the same as the LHS node.
If the grouping conditions are not met, then the node remains a join node. In this case, we have to see if the ON clause conditions can be pushed down into the left and/or right nodes. By the fact that the current join is split into two, the ON clause cannot be be pushed as is. Instead, we use associativity rules to our benefit and merge the ON clause conditions into the WHERE clauses of the underlying nodes. The rules are the same as the ones described for a normal WHERE clause.
If the grouping conditions are not met, then the node remains a join node. In this case, we have to see if the ON clause conditions can be pushed down into the left and/or right nodes. By the fact that the current join is split into two, the ON clause cannot be pushed as is. Instead, we use associativity rules to our benefit and merge the ON clause conditions into the WHERE clauses of the underlying nodes. The rules are the same as the ones described for a normal WHERE clause.
But left joins are slightly different, because the join condition is applied *to the RHS only*. Also, the condition cannot be further pushed into other nested left joins, because they will change the meaning of the statement. For example:
@ -1491,7 +1491,7 @@ If a, b and c where in different groups, the output would be:
a b where (b.id=a.id) and (cond1(a.col, b.col))
```
The cond2 expression gets pushed into the the where clause for table c because its the right-most group thats referenced by the condition. External references will be changed to appropriate bind variables by the rewiring phase.
The cond2 expression gets pushed into the where clause for table c because its the right-most group thats referenced by the condition. External references will be changed to appropriate bind variables by the rewiring phase.
*Once VTGate acquires the ability to perform its own filters, should we stop pushing these conditions into the dependent queries and do it ourselves instead? The answer will usually be no. You almost always want to push down filters. This is because it will let the underlying database scan fewer rows, or choose better indexes. The more restrictive the query is, the better.*

Просмотреть файл

@ -251,7 +251,7 @@ When you fire up the schema editor, it should take you to the load workflow. The
The schema picks up the loaded JSON, parse it and display the various components of the schema in a page where the relationships are easily visualized. The vschema has four main components: keyspaces, tables, table classes and vindexes.
Keyspaces can be on a left navbar. Once you select the keyspaces, it will display the the rest of the three components in one column each.
Keyspaces can be on a left navbar. Once you select the keyspaces, it will display the rest of the three components in one column each.
The schema editor will sanity check the JSON file for inconsistencies and flag them using various color codes:

Просмотреть файл

@ -37,7 +37,7 @@ For `select` statements, we can follow the V3 design principles, there will be a
While analyzing the `WHERE` clause, if the primitive is a `vindexFunc`, we look for the three possible combinations listed above. Once they're matched, we can assign the corresponding opcode.
While analyizing the `SELECT` expression list, we verify that that the user has specified expressions as required by each opcode.
While analyzing the `SELECT` expression list, we verify that the user has specified expressions as required by each opcode.
Joins and subqueries will not be allowed, at least for now.

Просмотреть файл

@ -488,7 +488,7 @@ Split a query into non-overlapping sub queries
#### Request
SplitQueryRequest is the payload to SplitQuery. SplitQuery takes a "SELECT" query and generates a list of queries called "query-parts". Each query-part consists of the original query with an added WHERE clause that restricts the query-part to operate only on rows whose values in the the columns listed in the "split_column" field of the request (see below) are in a particular range. It is guaranteed that the set of rows obtained from executing each query-part on a database snapshot and merging (without deduping) the results is equal to the set of rows obtained from executing the original query on the same snapshot with the rows containing NULL values in any of the split_column's excluded. This is typically called by the MapReduce master when reading from Vitess. There it's desirable that the sets of rows returned by the query-parts have roughly the same size.
SplitQueryRequest is the payload to SplitQuery. SplitQuery takes a "SELECT" query and generates a list of queries called "query-parts". Each query-part consists of the original query with an added WHERE clause that restricts the query-part to operate only on rows whose values in the columns listed in the "split_column" field of the request (see below) are in a particular range. It is guaranteed that the set of rows obtained from executing each query-part on a database snapshot and merging (without deduping) the results is equal to the set of rows obtained from executing the original query on the same snapshot with the rows containing NULL values in any of the split_column's excluded. This is typically called by the MapReduce master when reading from Vitess. There it's desirable that the sets of rows returned by the query-parts have roughly the same size.
##### Parameters

Просмотреть файл

@ -33,7 +33,7 @@
* Should we require to add documentation for features as part of pull requests?
- Not in the short term, it could discourage contributions from new comers.
- We should make easier for new comers to add documentation (add more structure and guidance in how to add documenation).
- We should make easier for new comers to add documentation (add more structure and guidance in how to add documentation).
* We should be able to find a tech writer contractor that helps with editing / copy.
* @zmagg knows tech writers that could help. They are remote. She will be making an intro to @jitten.
* Some queries take a very long time without clear reason.

Просмотреть файл

@ -52,7 +52,7 @@ ENV PKG_CONFIG_PATH $VTROOT/lib
ENV USER vitess
# Copy files needed for bootstrap
COPY bootstrap.sh dev.env /vt/src/vitess.io/vitess/
COPY bootstrap.sh dev.env build.env /vt/src/vitess.io/vitess/
COPY config /vt/src/vitess.io/vitess/config
COPY third_party /vt/src/vitess.io/vitess/third_party
COPY tools /vt/src/vitess.io/vitess/tools

Просмотреть файл

@ -9,5 +9,6 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-ins
# Bootstrap Vitess
WORKDIR /vt/src/vitess.io/vitess
ENV MYSQL_FLAVOR MariaDB
USER vitess
RUN ./bootstrap.sh

Просмотреть файл

@ -4,19 +4,16 @@ FROM debian:stretch-slim
RUN apt-get update && \
apt-get upgrade -qq && \
apt-get install wget -qq --no-install-recommends && \
wget https://github.com/github/orchestrator/releases/download/v3.0.13/orchestrator_3.0.13_amd64.deb && \
dpkg -i orchestrator_3.0.13_amd64.deb && \
rm orchestrator_3.0.13_amd64.deb && \
apt-get install wget ca-certificates jq -qq --no-install-recommends && \
wget https://github.com/github/orchestrator/releases/download/v3.0.14/orchestrator_3.0.14_amd64.deb && \
dpkg -i orchestrator_3.0.14_amd64.deb && \
rm orchestrator_3.0.14_amd64.deb && \
apt-get purge wget -qq && \
apt-get autoremove -qq && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Copy certs to allow https calls
COPY --from=k8s /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
# Copy vtctlclient to be used to notify
# Copy vtctlclient to be used to notify
COPY --from=k8s /vt/bin/vtctlclient /usr/bin/
WORKDIR /usr/local/orchestrator

Просмотреть файл

@ -13,8 +13,6 @@ COPY --from=k8s /vt/bin/vtctlclient /usr/bin/
# add vitess user/group and add permissions
RUN groupadd -r --gid 2000 vitess && \
useradd -r -g vitess --uid 1000 vitess && \
chown -R vitess:vitess /vt && \
chown -R vitess:vitess /vtdataroot
useradd -r -g vitess --uid 1000 vitess
CMD ["/usr/bin/vtctlclient"]

Просмотреть файл

@ -5,7 +5,7 @@ FROM debian:stretch-slim
# TODO: remove when https://github.com/vitessio/vitess/issues/3553 is fixed
RUN apt-get update && \
apt-get upgrade -qq && \
apt-get install mysql-client jq -qq --no-install-recommends && \
apt-get install wget mysql-client jq -qq --no-install-recommends && \
apt-get autoremove && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*

Просмотреть файл

@ -162,7 +162,6 @@ esac
# Construct "cp" command to copy the source code.
#
# TODO(mberlin): Copy vendor/vendor.json file such that we can run a diff against the file on the image.
# Copy the full source tree except:
# - vendor
# That's because these directories are already part of the image.
@ -172,11 +171,19 @@ esac
# we do not move or overwrite the existing files while copying the other
# directories. Therefore, the existing files do not count as changed and will
# not be part of the new Docker layer of the cache image.
copy_src_cmd="cp -R /tmp/src/!(vendor) ."
copy_src_cmd="cp -R /tmp/src/!(vendor|bootstrap.sh) ."
# Copy the .git directory because travis/check_make_proto.sh needs a working
# Git repository.
copy_src_cmd=$(append_cmd "$copy_src_cmd" "cp -R /tmp/src/.git .")
# Copy vendor/vendor.json file if it changed
run_bootstrap_cmd="if [[ \$(diff -w vendor/vendor.json /tmp/src/vendor/vendor.json) ]]; then cp -f /tmp/src/vendor/vendor.json vendor/; sync_vendor=1; fi"
# Copy bootstrap.sh if it changed
run_bootstrap_cmd=$(append_cmd "$run_bootstrap_cmd" "if [[ \$(diff -w bootstrap.sh /tmp/src/bootstrap.sh) ]]; then cp -f /tmp/src/bootstrap.sh .; bootstrap=1; fi")
# run bootstrap.sh if necessary
run_bootstrap_cmd=$(append_cmd "$run_bootstrap_cmd" "if [[ -n \$bootstrap ]]; then ./bootstrap.sh; else if [[ -n \$sync_vendor ]]; then govendor sync; fi; fi")
copy_src_cmd=$(append_cmd "$copy_src_cmd" "$run_bootstrap_cmd")
# Construct the command we will actually run.
#
# Uncomment the next line if you need to debug "bashcmd".

Просмотреть файл

@ -19,4 +19,18 @@
host=$(minikube service vtgate-zone1 --format "{{.IP}}" | tail -n 1)
port=$(minikube service vtgate-zone1 --format "{{.Port}}" | tail -n 1)
if [ -z $port ]; then
#This checks K8s runing on an single node by kubeadm
if [ $(kubectl get nodes | grep -v NAM | wc -l) -eq 1 -o $(kubectl get nodes | grep -v NAM | grep master | wc -l ) -eq 1 ]; then
host="127.0.0.1"
port=`kubectl describe service vtgate-zone1 | grep NodePort | grep mysql | awk '{print $3}' | awk -F'/' '{print $1}'`
fi
fi
if [ -z $port ]; then
echo "Error: failed to obtain [host:port] minikube or kubectl."
exit 1;
fi
mysql -h "$host" -P "$port" $*

Просмотреть файл

@ -5,6 +5,6 @@ local machine, which may be useful for experimentation. These scripts can
also serve as a starting point for configuring Vitess into your preferred
deployment strategy or toolset.
See the [Run Vitess Locally](http://vitess.io/getting-started/local-instance/)
guide for instructions on using these scripts.
See the [Run Vitess Locally](https://vitess.io/docs/tutorials/local/)
tutorial ("Start a Vitess cluster" section) for instructions on using these scripts.

Просмотреть файл

@ -16,7 +16,7 @@ hostname=`hostname -f`
vtctld_web_port=15000
# Set up environment.
export VTTOP=$VTROOT/src/vitess.io/vitess
export VTTOP=${VTTOP-$VTROOT/src/vitess.io/vitess}
# Try to find mysqld_safe on PATH.
if [ -z "$VT_MYSQL_ROOT" ]; then

Просмотреть файл

@ -1,79 +0,0 @@
// +build cgo
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgzip
/*
#cgo CFLAGS: -Werror=implicit
#cgo pkg-config: zlib
#include "zlib.h"
*/
import "C"
import (
"hash"
"unsafe"
)
type adler32Hash struct {
adler C.uLong
}
// NewAdler32 creates an empty buffer which has an adler32 of '1'. The go
// hash/adler32 does the same.
func NewAdler32() hash.Hash32 {
a := &adler32Hash{}
a.Reset()
return a
}
// io.Writer interface
func (a *adler32Hash) Write(p []byte) (n int, err error) {
if len(p) > 0 {
a.adler = C.adler32(a.adler, (*C.Bytef)(unsafe.Pointer(&p[0])), (C.uInt)(len(p)))
}
return len(p), nil
}
// hash.Hash interface
func (a *adler32Hash) Sum(b []byte) []byte {
s := a.Sum32()
b = append(b, byte(s>>24))
b = append(b, byte(s>>16))
b = append(b, byte(s>>8))
b = append(b, byte(s))
return b
}
func (a *adler32Hash) Reset() {
a.adler = C.adler32(0, (*C.Bytef)(unsafe.Pointer(nil)), 0)
}
func (a *adler32Hash) Size() int {
return 4
}
func (a *adler32Hash) BlockSize() int {
return 1
}
// hash.Hash32 interface
func (a *adler32Hash) Sum32() uint32 {
return uint32(a.adler)
}

Просмотреть файл

@ -1,276 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreedto in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgzip
import (
"bytes"
"compress/gzip"
"fmt"
"hash/adler32"
"hash/crc32"
"hash/crc64"
"io"
"math/rand"
"os/exec"
"sync"
"testing"
"time"
)
type prettyTimer struct {
name string
before time.Time
}
func newPrettyTimer(name string) *prettyTimer {
return &prettyTimer{name, time.Now()}
}
func (pt *prettyTimer) stopAndPrintCompress(t *testing.T, size, processed int) {
duration := time.Since(pt.before)
t.Log(pt.name + ":")
t.Log(" size :", size)
t.Log(" time :", duration.String())
if duration != 0 {
t.Logf(" speed: %.0f KB/s", float64(processed)/duration.Seconds()/1024.0)
} else {
t.Log(" processed:", processed, "B")
}
}
func (pt *prettyTimer) stopAndPrintUncompress(t *testing.T, processed int) {
duration := time.Since(pt.before)
t.Log(" " + pt.name + ":")
t.Log(" time :", duration.String())
if duration != 0 {
t.Logf(" speed: %.0f KB/s", float64(processed)/duration.Seconds()/1024.0)
} else {
t.Log(" processed:", processed, "B")
}
}
func compareCompressedBuffer(t *testing.T, source []byte, compressed *bytes.Buffer) {
// compare using go's gunzip
toGunzip := bytes.NewBuffer(compressed.Bytes())
gunzip, err := gzip.NewReader(toGunzip)
if err != nil {
t.Errorf("gzip.NewReader failed: %v", err)
}
uncompressed := &bytes.Buffer{}
pt := newPrettyTimer("go unzip")
_, err = io.Copy(uncompressed, gunzip)
if err != nil {
t.Errorf("Copy failed: %v", err)
}
pt.stopAndPrintUncompress(t, uncompressed.Len())
if !bytes.Equal(source, uncompressed.Bytes()) {
t.Errorf("Bytes are not equal")
}
// compare using cgzip gunzip
toGunzip = bytes.NewBuffer(compressed.Bytes())
cgunzip, err := NewReader(toGunzip)
if err != nil {
t.Errorf("cgzip.NewReader failed: %v", err)
}
uncompressed = &bytes.Buffer{}
pt = newPrettyTimer("cgzip unzip")
_, err = io.Copy(uncompressed, cgunzip)
if err != nil {
t.Errorf("Copy failed: %v", err)
}
pt.stopAndPrintUncompress(t, uncompressed.Len())
if !bytes.Equal(source, uncompressed.Bytes()) {
t.Errorf("Bytes are not equal")
}
}
func testChecksums(t *testing.T, data []byte) {
t.Log("Checksums:")
// crc64 with go library
goCrc64 := crc64.New(crc64.MakeTable(crc64.ECMA))
toChecksum := bytes.NewBuffer(data)
pt := newPrettyTimer("go crc64")
_, err := io.Copy(goCrc64, toChecksum)
if err != nil {
t.Errorf("Copy failed: %v", err)
}
pt.stopAndPrintUncompress(t, len(data))
// adler32 with go library
goAdler32 := adler32.New()
toChecksum = bytes.NewBuffer(data)
pt = newPrettyTimer("go adler32")
_, err = io.Copy(goAdler32, toChecksum)
if err != nil {
t.Errorf("Copy failed: %v", err)
}
goResult := goAdler32.Sum32()
pt.stopAndPrintUncompress(t, len(data))
t.Log(" sum :", goResult)
// adler32 with cgzip library
cgzipAdler32 := NewAdler32()
toChecksum = bytes.NewBuffer(data)
pt = newPrettyTimer("cgzip adler32")
_, err = io.Copy(cgzipAdler32, toChecksum)
if err != nil {
t.Errorf("Copy failed: %v", err)
}
cgzipResult := cgzipAdler32.Sum32()
pt.stopAndPrintUncompress(t, len(data))
t.Log(" sum :", cgzipResult)
// test both results are the same
if goResult != cgzipResult {
t.Errorf("go and cgzip adler32 mismatch")
}
// crc32 with go library
goCrc32 := crc32.New(crc32.MakeTable(crc32.IEEE))
toChecksum = bytes.NewBuffer(data)
pt = newPrettyTimer("go crc32")
_, err = io.Copy(goCrc32, toChecksum)
if err != nil {
t.Errorf("Copy failed: %v", err)
}
goResult = goCrc32.Sum32()
pt.stopAndPrintUncompress(t, len(data))
t.Log(" sum :", goResult)
// crc32 with cgzip library
cgzipCrc32 := NewCrc32()
toChecksum = bytes.NewBuffer(data)
pt = newPrettyTimer("cgzip crc32")
_, err = io.Copy(cgzipCrc32, toChecksum)
if err != nil {
t.Errorf("Copy failed: %v", err)
}
cgzipResult = cgzipCrc32.Sum32()
pt.stopAndPrintUncompress(t, len(data))
t.Log(" sum :", cgzipResult)
// test both results are the same
if goResult != cgzipResult {
t.Errorf("go and cgzip crc32 mismatch")
}
}
func runCompare(t *testing.T, testSize int, level int) {
// create a test chunk, put semi-random bytes in there
// (so compression actually will compress some)
toEncode := make([]byte, testSize)
where := 0
for where < testSize {
toFill := rand.Intn(16)
filler := 0x61 + rand.Intn(24)
for i := 0; i < toFill && where < testSize; i++ {
toEncode[where] = byte(filler)
where++
}
}
t.Log("Original size:", len(toEncode))
// now time a regular gzip writer to a Buffer
compressed := &bytes.Buffer{}
reader := bytes.NewBuffer(toEncode)
pt := newPrettyTimer("Go gzip")
gz, err := gzip.NewWriterLevel(compressed, level)
_, err = io.Copy(gz, reader)
if err != nil {
t.Errorf("Copy failed: %v", err)
}
gz.Close()
pt.stopAndPrintCompress(t, compressed.Len(), len(toEncode))
compareCompressedBuffer(t, toEncode, compressed)
// now time a forked gzip
compressed2 := &bytes.Buffer{}
reader = bytes.NewBuffer(toEncode)
cmd := exec.Command("gzip", fmt.Sprintf("-%v", level), "-c")
stdout, err := cmd.StdoutPipe()
if err != nil {
t.Errorf("StdoutPipe failed: %v", err)
}
stdin, err := cmd.StdinPipe()
if err != nil {
t.Errorf("StdinPipe failed: %v", err)
}
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
io.Copy(compressed2, stdout)
wg.Done()
}()
if err = cmd.Start(); err != nil {
t.Errorf("Start failed: %v", err)
}
pt = newPrettyTimer("Forked gzip")
_, err = io.Copy(stdin, reader)
if err != nil {
t.Errorf("Copy failed: %v", err)
}
stdin.Close()
wg.Wait()
if err := cmd.Wait(); err != nil {
t.Errorf("Wait failed: %v", err)
}
pt.stopAndPrintCompress(t, compressed2.Len(), len(toEncode))
compareCompressedBuffer(t, toEncode, compressed2)
// and time the cgo version
compressed3 := &bytes.Buffer{}
reader = bytes.NewBuffer(toEncode)
pt = newPrettyTimer("cgzip")
cgz, err := NewWriterLevel(compressed3, level)
if err != nil {
t.Errorf("NewWriterLevel failed: %v", err)
}
_, err = io.Copy(cgz, reader)
if err != nil {
t.Errorf("Copy failed: %v", err)
}
if err := cgz.Flush(); err != nil {
t.Errorf("Flush failed: %v", err)
}
if err := cgz.Close(); err != nil {
t.Errorf("Close failed: %v", err)
}
pt.stopAndPrintCompress(t, compressed3.Len(), len(toEncode))
compareCompressedBuffer(t, toEncode, compressed3)
testChecksums(t, toEncode)
}
// use 'go test -v' and bigger sizes to show meaningful rates
func TestCompare(t *testing.T) {
testSize := 1 * 1024 * 1024
if testing.Short() {
testSize /= 10
}
runCompare(t, testSize, 1)
}
func TestCompareBest(t *testing.T) {
testSize := 1 * 1024 * 1024
if testing.Short() {
testSize /= 10
}
runCompare(t, testSize, 9)
}

Просмотреть файл

@ -1,79 +0,0 @@
// +build cgo
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgzip
/*
#cgo CFLAGS: -Werror=implicit
#cgo pkg-config: zlib
#include "zlib.h"
*/
import "C"
import (
"hash"
"unsafe"
)
type crc32Hash struct {
crc C.uLong
}
// NewCrc32 creates an empty buffer which has an crc32 of '1'. The go
// hash/crc32 does the same.
func NewCrc32() hash.Hash32 {
c := &crc32Hash{}
c.Reset()
return c
}
// io.Writer interface
func (a *crc32Hash) Write(p []byte) (n int, err error) {
if len(p) > 0 {
a.crc = C.crc32(a.crc, (*C.Bytef)(unsafe.Pointer(&p[0])), (C.uInt)(len(p)))
}
return len(p), nil
}
// hash.Hash interface
func (a *crc32Hash) Sum(b []byte) []byte {
s := a.Sum32()
b = append(b, byte(s>>24))
b = append(b, byte(s>>16))
b = append(b, byte(s>>8))
b = append(b, byte(s))
return b
}
func (a *crc32Hash) Reset() {
a.crc = C.crc32(0, (*C.Bytef)(unsafe.Pointer(nil)), 0)
}
func (a *crc32Hash) Size() int {
return 4
}
func (a *crc32Hash) BlockSize() int {
return 1
}
// hash.Hash32 interface
func (a *crc32Hash) Sum32() uint32 {
return uint32(a.crc)
}

Просмотреть файл

@ -1,18 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cgzip wraps the C library for gzip.
package cgzip

Просмотреть файл

@ -1,93 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreedto in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgzip
import (
"io"
"io/ioutil"
"testing"
"vitess.io/vitess/go/testfiles"
)
// specialReader is a test class that will return bytes it reads from a file,
// returning EOF and data in the last chunk.
type specialReader struct {
t *testing.T
contents []byte
sent int
}
func newSpecialReader(t *testing.T, filename string) *specialReader {
filename = testfiles.Locate(filename)
b, err := ioutil.ReadFile(filename)
if err != nil {
t.Fatalf("Cannot read file %v: %v", filename, err)
}
return &specialReader{t, b, 0}
}
// Read is the implementation of Reader
func (sr *specialReader) Read(p []byte) (int, error) {
if len(p) > len(sr.contents)-sr.sent {
toCopy := len(sr.contents) - sr.sent
sr.t.Logf("Sending %v bytes and EOF", toCopy)
sr.sent += copy(p, sr.contents[sr.sent:])
return toCopy, io.EOF
}
toCopy := len(p)
sr.sent += copy(p, sr.contents[sr.sent:sr.sent+toCopy])
sr.t.Logf("Sending %v bytes", toCopy)
return toCopy, nil
}
// TestEofAndData is the main test here: if we return data and EOF,
// it needs to be fully processed.
// The file is a 55k file, that uncompresses into a 10 MB file.
// So it will be read as 32k + 22k, and decompressed into 2MB + 2MB + 1M and
// then 2MB + 2MB + 1M again. So it's a great test for corner cases.
func TestEofAndData(t *testing.T) {
r := newSpecialReader(t, "cgzip_eof.gz")
gz, err := NewReader(r)
if err != nil {
t.Fatalf("NewReader failed: %v", err)
}
n := 0
dst := make([]byte, 2*1024*1024)
for {
nRead, err := gz.Read(dst)
t.Logf("Got: %v %v", nRead, err)
n += nRead
switch err {
case nil:
case io.EOF:
if n != 10485760 {
t.Fatalf("Read wrong number of bytes: got %v expected 10485760", n)
}
// test we also get 0 / EOF if we read again
nRead, err = gz.Read(dst)
if nRead != 0 || err != io.EOF {
t.Fatalf("After-EOF read got %v %v", nRead, err)
}
return
default:
t.Fatalf("Unexpected error: %v", err)
}
}
}

Просмотреть файл

@ -1,22 +0,0 @@
// +build !cgo
// A slower, pure go alternative to cgzip to allow for cross compilation.
package cgzip
import (
"compress/gzip"
"hash/adler32"
"hash/crc32"
)
// Writer is an io.WriteCloser. Writes to a Writer are compressed.
type Writer = gzip.Writer
var (
Z_BEST_SPEED = gzip.BestSpeed
NewWriterLevel = gzip.NewWriterLevel
NewReader = gzip.NewReader
NewCrc32 = crc32.NewIEEE
NewAdler32 = adler32.New
)

Просмотреть файл

@ -1,119 +0,0 @@
// +build cgo
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgzip
import "io"
// err starts out as nil
// we will call inflateEnd when we set err to a value:
// - whatever error is returned by the underlying reader
// - io.EOF if Close was called
type reader struct {
r io.Reader
in []byte
strm zstream
err error
skipIn bool
}
// NewReader returns a new cgzip.reader for reading gzip files with the C gzip
// library.
func NewReader(r io.Reader) (io.ReadCloser, error) {
return NewReaderBuffer(r, DEFAULT_COMPRESSED_BUFFER_SIZE)
}
// NewReaderBuffer returns a new cgzip.reader with a given buffer size for
// reading gzip files with the C gzip library.
func NewReaderBuffer(r io.Reader, bufferSize int) (io.ReadCloser, error) {
z := &reader{r: r, in: make([]byte, bufferSize)}
if err := z.strm.inflateInit(); err != nil {
return nil, err
}
return z, nil
}
// Read reads from the gz stream.
func (z *reader) Read(p []byte) (int, error) {
if z.err != nil {
return 0, z.err
}
if len(p) == 0 {
return 0, nil
}
// read and deflate until the output buffer is full
z.strm.setOutBuf(p, len(p))
for {
// if we have no data to inflate, read more
if !z.skipIn && z.strm.availIn() == 0 {
var n int
n, z.err = z.r.Read(z.in)
// If we got data and EOF, pretend we didn't get the
// EOF. That way we will return the right values
// upstream. Note this will trigger another read
// later on, that should return (0, EOF).
if n > 0 && z.err == io.EOF {
z.err = nil
}
// FIXME(alainjobart) this code is not compliant with
// the Reader interface. We should process all the
// data we got from the reader, and then return the
// error, whatever it is.
if (z.err != nil && z.err != io.EOF) || (n == 0 && z.err == io.EOF) {
z.strm.inflateEnd()
return 0, z.err
}
z.strm.setInBuf(z.in, n)
} else {
z.skipIn = false
}
// inflate some
ret, err := z.strm.inflate(zNoFlush)
if err != nil {
z.err = err
z.strm.inflateEnd()
return 0, z.err
}
// if we read something, we're good
have := len(p) - z.strm.availOut()
if have > 0 {
z.skipIn = ret == Z_OK && z.strm.availOut() == 0
return have, z.err
}
}
}
// Close closes the Reader. It does not close the underlying io.Reader.
func (z *reader) Close() error {
if z.err != nil {
if z.err != io.EOF {
return z.err
}
return nil
}
z.strm.inflateEnd()
z.err = io.EOF
return nil
}

Просмотреть файл

@ -1,158 +0,0 @@
// +build cgo
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgzip
import (
"fmt"
"io"
)
const (
// Allowed flush values
Z_NO_FLUSH = 0
Z_PARTIAL_FLUSH = 1
Z_SYNC_FLUSH = 2
Z_FULL_FLUSH = 3
Z_FINISH = 4
Z_BLOCK = 5
Z_TREES = 6
// Return codes
Z_OK = 0
Z_STREAM_END = 1
Z_NEED_DICT = 2
Z_ERRNO = -1
Z_STREAM_ERROR = -2
Z_DATA_ERROR = -3
Z_MEM_ERROR = -4
Z_BUF_ERROR = -5
Z_VERSION_ERROR = -6
// compression levels
Z_NO_COMPRESSION = 0
Z_BEST_SPEED = 1
Z_BEST_COMPRESSION = 9
Z_DEFAULT_COMPRESSION = -1
// our default buffer size
// most go io functions use 32KB as buffer size, so 32KB
// works well here for compressed data buffer
DEFAULT_COMPRESSED_BUFFER_SIZE = 32 * 1024
)
// err starts out as nil
// we will call deflateEnd when we set err to a value:
// - whatever error is returned by the underlying writer
// - io.EOF if Close was called
type Writer struct {
w io.Writer
out []byte
strm zstream
err error
}
func NewWriter(w io.Writer) *Writer {
z, _ := NewWriterLevelBuffer(w, Z_DEFAULT_COMPRESSION, DEFAULT_COMPRESSED_BUFFER_SIZE)
return z
}
func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
return NewWriterLevelBuffer(w, level, DEFAULT_COMPRESSED_BUFFER_SIZE)
}
func NewWriterLevelBuffer(w io.Writer, level, bufferSize int) (*Writer, error) {
z := &Writer{w: w, out: make([]byte, bufferSize)}
if err := z.strm.deflateInit(level); err != nil {
return nil, err
}
return z, nil
}
// this is the main function: it advances the write with either
// new data or something else to do, like a flush
func (z *Writer) write(p []byte, flush int) int {
if len(p) == 0 {
z.strm.setInBuf(nil, 0)
} else {
z.strm.setInBuf(p, len(p))
}
// we loop until we don't get a full output buffer
// each loop completely writes the output buffer to the underlying
// writer
for {
// deflate one buffer
z.strm.setOutBuf(z.out, len(z.out))
z.strm.deflate(flush)
// write everything
from := 0
have := len(z.out) - int(z.strm.availOut())
for have > 0 {
var n int
n, z.err = z.w.Write(z.out[from:have])
if z.err != nil {
z.strm.deflateEnd()
return 0
}
from += n
have -= n
}
// we stop trying if we get a partial response
if z.strm.availOut() != 0 {
break
}
}
// the library guarantees this
if z.strm.availIn() != 0 {
panic(fmt.Errorf("cgzip: Unexpected error (2)"))
}
return len(p)
}
func (z *Writer) Write(p []byte) (n int, err error) {
if z.err != nil {
return 0, z.err
}
n = z.write(p, Z_NO_FLUSH)
return n, z.err
}
func (z *Writer) Flush() error {
if z.err != nil {
return z.err
}
z.write(nil, Z_SYNC_FLUSH)
return z.err
}
// Calling Close does not close the wrapped io.Writer originally
// passed to NewWriterX.
func (z *Writer) Close() error {
if z.err != nil {
return z.err
}
z.write(nil, Z_FINISH)
if z.err != nil {
return z.err
}
z.strm.deflateEnd()
z.err = io.EOF
return nil
}

Просмотреть файл

@ -1,177 +0,0 @@
// +build cgo
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgzip
// See http://www.zlib.net/zlib_how.html for more information on this
/*
#cgo CFLAGS: -Werror=implicit
#cgo pkg-config: zlib
#include "zlib.h"
// inflateInit2 is a macro, so using a wrapper function
int zstream_inflate_init(char *strm) {
((z_stream*)strm)->zalloc = Z_NULL;
((z_stream*)strm)->zfree = Z_NULL;
((z_stream*)strm)->opaque = Z_NULL;
((z_stream*)strm)->avail_in = 0;
((z_stream*)strm)->next_in = Z_NULL;
return inflateInit2((z_stream*)strm,
16+15); // 16 makes it understand only gzip files
}
// deflateInit2 is a macro, so using a wrapper function
// using deflateInit2 instead of deflateInit to be able to specify gzip format
int zstream_deflate_init(char *strm, int level) {
((z_stream*)strm)->zalloc = Z_NULL;
((z_stream*)strm)->zfree = Z_NULL;
((z_stream*)strm)->opaque = Z_NULL;
return deflateInit2((z_stream*)strm, level, Z_DEFLATED,
16+15, // 16 makes it a gzip file, 15 is default
8, Z_DEFAULT_STRATEGY); // default values
}
unsigned int zstream_avail_in(char *strm) {
return ((z_stream*)strm)->avail_in;
}
unsigned int zstream_avail_out(char *strm) {
return ((z_stream*)strm)->avail_out;
}
char* zstream_msg(char *strm) {
return ((z_stream*)strm)->msg;
}
void zstream_set_in_buf(char *strm, void *buf, unsigned int len) {
((z_stream*)strm)->next_in = (Bytef*)buf;
((z_stream*)strm)->avail_in = len;
}
void zstream_set_out_buf(char *strm, void *buf, unsigned int len) {
((z_stream*)strm)->next_out = (Bytef*)buf;
((z_stream*)strm)->avail_out = len;
}
int zstream_inflate(char *strm, int flag) {
return inflate((z_stream*)strm, flag);
}
int zstream_deflate(char *strm, int flag) {
return deflate((z_stream*)strm, flag);
}
void zstream_inflate_end(char *strm) {
inflateEnd((z_stream*)strm);
}
void zstream_deflate_end(char *strm) {
deflateEnd((z_stream*)strm);
}
*/
import "C"
import (
"fmt"
"unsafe"
)
const (
zNoFlush = C.Z_NO_FLUSH
)
// z_stream is a buffer that's big enough to fit a C.z_stream.
// This lets us allocate a C.z_stream within Go, while keeping the contents
// opaque to the Go GC. Otherwise, the GC would look inside and complain that
// the pointers are invalid, since they point to objects allocated by C code.
type zstream [unsafe.Sizeof(C.z_stream{})]C.char
func (strm *zstream) inflateInit() error {
result := C.zstream_inflate_init(&strm[0])
if result != Z_OK {
return fmt.Errorf("cgzip: failed to initialize inflate (%v): %v", result, strm.msg())
}
return nil
}
func (strm *zstream) deflateInit(level int) error {
result := C.zstream_deflate_init(&strm[0], C.int(level))
if result != Z_OK {
return fmt.Errorf("cgzip: failed to initialize deflate (%v): %v", result, strm.msg())
}
return nil
}
func (strm *zstream) inflateEnd() {
C.zstream_inflate_end(&strm[0])
}
func (strm *zstream) deflateEnd() {
C.zstream_deflate_end(&strm[0])
}
func (strm *zstream) availIn() int {
return int(C.zstream_avail_in(&strm[0]))
}
func (strm *zstream) availOut() int {
return int(C.zstream_avail_out(&strm[0]))
}
func (strm *zstream) msg() string {
return C.GoString(C.zstream_msg(&strm[0]))
}
func (strm *zstream) setInBuf(buf []byte, size int) {
if buf == nil {
C.zstream_set_in_buf(&strm[0], nil, C.uint(size))
} else {
C.zstream_set_in_buf(&strm[0], unsafe.Pointer(&buf[0]), C.uint(size))
}
}
func (strm *zstream) setOutBuf(buf []byte, size int) {
if buf == nil {
C.zstream_set_out_buf(&strm[0], nil, C.uint(size))
} else {
C.zstream_set_out_buf(&strm[0], unsafe.Pointer(&buf[0]), C.uint(size))
}
}
func (strm *zstream) inflate(flag int) (int, error) {
ret := C.zstream_inflate(&strm[0], C.int(flag))
switch ret {
case Z_NEED_DICT:
ret = Z_DATA_ERROR
fallthrough
case Z_DATA_ERROR, Z_MEM_ERROR:
return int(ret), fmt.Errorf("cgzip: failed to inflate (%v): %v", ret, strm.msg())
}
return int(ret), nil
}
func (strm *zstream) deflate(flag int) {
ret := C.zstream_deflate(&strm[0], C.int(flag))
if ret == Z_STREAM_ERROR {
// all the other error cases are normal,
// and this should never happen
panic(fmt.Errorf("cgzip: Unexpected error (1)"))
}
}

Просмотреть файл

@ -1,23 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
// This plugin imports influxdbbackend to register the influxdbbackend stats backend.
import (
_ "vitess.io/vitess/go/stats/influxdbbackend"
)

Просмотреть файл

@ -1,23 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
// This plugin imports influxdbbackend to register the influxdbbackend stats backend.
import (
_ "vitess.io/vitess/go/stats/influxdbbackend"
)

Просмотреть файл

@ -1,23 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
// This plugin imports influxdbbackend to register the influxdbbackend stats backend.
import (
_ "vitess.io/vitess/go/stats/influxdbbackend"
)

Просмотреть файл

@ -1,23 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
// This plugin imports influxdbbackend to register the influxdbbackend stats backend.
import (
_ "vitess.io/vitess/go/stats/influxdbbackend"
)

Просмотреть файл

@ -1,34 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
// Imports and register the gRPC queryservice server
import (
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/vtgate"
"vitess.io/vitess/go/vt/vttablet/grpcqueryservice"
"vitess.io/vitess/go/vt/vttablet/queryservice"
)
func init() {
vtgate.RegisterL2VTGates = append(vtgate.RegisterL2VTGates, func(qs queryservice.QueryService) {
if servenv.GRPCCheckServiceMap("queryservice") {
grpcqueryservice.Register(servenv.GRPCServer, qs)
}
})
}

Просмотреть файл

@ -1,23 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
// This plugin imports influxdbbackend to register the influxdbbackend stats backend.
import (
_ "vitess.io/vitess/go/stats/influxdbbackend"
)

Просмотреть файл

@ -1,23 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
// This plugin imports influxdbbackend to register the influxdbbackend stats backend.
import (
_ "vitess.io/vitess/go/stats/influxdbbackend"
)

Просмотреть файл

@ -1,23 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
// This plugin imports influxdbbackend to register the influxdbbackend stats backend.
import (
_ "vitess.io/vitess/go/stats/influxdbbackend"
)

Просмотреть файл

@ -1,23 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
// This plugin imports influxdbbackend to register the influxdbbackend stats backend.
import (
_ "vitess.io/vitess/go/stats/influxdbbackend"
)

Просмотреть файл

@ -72,6 +72,7 @@ type AuthServerStaticEntry struct {
Password string
UserData string
SourceHost string
Groups []string
}
// InitAuthServerStatic Handles initializing the AuthServerStatic if necessary.
@ -147,8 +148,7 @@ func (a *AuthServerStatic) installSignalHandlers() {
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGHUP)
go func() {
for {
<-sigChan
for range sigChan {
a.loadConfigFromParams(*mysqlAuthServerStaticFile, "")
}
}()
@ -204,24 +204,24 @@ func (a *AuthServerStatic) ValidateHash(salt []byte, user string, authResponse [
a.mu.Unlock()
if !ok {
return &StaticUserData{""}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
}
for _, entry := range entries {
if entry.MysqlNativePassword != "" {
isPass := isPassScrambleMysqlNativePassword(authResponse, salt, entry.MysqlNativePassword)
if matchSourceHost(remoteAddr, entry.SourceHost) && isPass {
return &StaticUserData{entry.UserData}, nil
return &StaticUserData{entry.UserData, entry.Groups}, nil
}
} else {
computedAuthResponse := ScramblePassword(salt, []byte(entry.Password))
// Validate the password.
if matchSourceHost(remoteAddr, entry.SourceHost) && bytes.Compare(authResponse, computedAuthResponse) == 0 {
return &StaticUserData{entry.UserData}, nil
return &StaticUserData{entry.UserData, entry.Groups}, nil
}
}
}
return &StaticUserData{""}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
}
// Negotiate is part of the AuthServer interface.
@ -239,15 +239,15 @@ func (a *AuthServerStatic) Negotiate(c *Conn, user string, remoteAddr net.Addr)
a.mu.Unlock()
if !ok {
return &StaticUserData{""}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
}
for _, entry := range entries {
// Validate the password.
if matchSourceHost(remoteAddr, entry.SourceHost) && entry.Password == password {
return &StaticUserData{entry.UserData}, nil
return &StaticUserData{entry.UserData, entry.Groups}, nil
}
}
return &StaticUserData{""}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
}
func matchSourceHost(remoteAddr net.Addr, targetSourceHost string) bool {
@ -264,12 +264,13 @@ func matchSourceHost(remoteAddr net.Addr, targetSourceHost string) bool {
return false
}
// StaticUserData holds the username
// StaticUserData holds the username and groups
type StaticUserData struct {
value string
username string
groups []string
}
// Get returns the wrapped username
// Get returns the wrapped username and groups
func (sud *StaticUserData) Get() *querypb.VTGateCallerID {
return &querypb.VTGateCallerID{Username: sud.value}
return &querypb.VTGateCallerID{Username: sud.username, Groups: sud.groups}
}

Просмотреть файл

@ -42,17 +42,53 @@ func TestJsonConfigParser(t *testing.T) {
t.Fatalf("mysql_user config size should be equal to 1")
}
// works with new format
jsonConfig = "{\"mysql_user\":[{\"Password\":\"123\", \"UserData\":\"dummy\", \"SourceHost\": \"localhost\"}, {\"Password\": \"123\", \"UserData\": \"mysql_user_all\"}]}"
jsonConfig = `{"mysql_user":[
{"Password":"123", "UserData":"dummy", "SourceHost": "localhost"},
{"Password": "123", "UserData": "mysql_user_all"},
{"Password": "456", "UserData": "mysql_user_with_groups", "Groups": ["user_group"]}
]}`
err = parseConfig([]byte(jsonConfig), &config)
if err != nil {
t.Fatalf("should not get an error, but got: %v", err)
}
if len(config["mysql_user"]) != 2 {
t.Fatalf("mysql_user config size should be equal to 2")
if len(config["mysql_user"]) != 3 {
t.Fatalf("mysql_user config size should be equal to 3")
}
if config["mysql_user"][0].SourceHost != "localhost" {
t.Fatalf("SourceHost should be equal localhost")
t.Fatalf("SourceHost should be equal to localhost")
}
if len(config["mysql_user"][2].Groups) != 1 || config["mysql_user"][2].Groups[0] != "user_group" {
t.Fatalf("Groups should be equal to [\"user_group\"]")
}
}
func TestValidateHashGetter(t *testing.T) {
jsonConfig := `{"mysql_user": [{"Password": "password", "UserData": "user.name", "Groups": ["user_group"]}]}`
auth := NewAuthServerStatic()
auth.loadConfigFromParams("", jsonConfig)
ip := net.ParseIP("127.0.0.1")
addr := &net.IPAddr{IP: ip, Zone: ""}
salt, err := NewSalt()
if err != nil {
t.Fatalf("error generating salt: %v", err)
}
scrambled := ScramblePassword(salt, []byte("password"))
getter, err := auth.ValidateHash(salt, "mysql_user", scrambled, addr)
if err != nil {
t.Fatalf("error validating password: %v", err)
}
callerId := getter.Get()
if callerId.Username != "user.name" {
t.Fatalf("getter username incorrect, expected \"user.name\", got %v", callerId.Username)
}
if len(callerId.Groups) != 1 || callerId.Groups[0] != "user_group" {
t.Fatalf("getter groups incorrect, expected [\"user_group\"], got %v", callerId.Groups)
}
}

Просмотреть файл

@ -70,13 +70,13 @@ type BinlogEvent interface {
// RBR events.
// IsTableMapEvent returns true if this is a TABLE_MAP_EVENT.
// IsTableMap returns true if this is a TABLE_MAP_EVENT.
IsTableMap() bool
// IsWriteRowsEvent returns true if this is a WRITE_ROWS_EVENT.
// IsWriteRows returns true if this is a WRITE_ROWS_EVENT.
IsWriteRows() bool
// IsUpdateRowsEvent returns true if this is a UPDATE_ROWS_EVENT.
// IsUpdateRows returns true if this is a UPDATE_ROWS_EVENT.
IsUpdateRows() bool
// IsDeleteRowsEvent returns true if this is a DELETE_ROWS_EVENT.
// IsDeleteRows returns true if this is a DELETE_ROWS_EVENT.
IsDeleteRows() bool
// Timestamp returns the timestamp from the event header.

Просмотреть файл

@ -127,7 +127,7 @@ func NewFormatDescriptionEvent(f BinlogFormat, s *FakeBinlogStream) BinlogEvent
1 // (undocumented) checksum algorithm
data := make([]byte, length)
binary.LittleEndian.PutUint16(data[0:2], f.FormatVersion)
copy(data[2:52], []byte(f.ServerVersion))
copy(data[2:52], f.ServerVersion)
binary.LittleEndian.PutUint32(data[52:56], s.Timestamp)
data[56] = f.HeaderLength
copy(data[57:], f.HeaderSizes)
@ -197,7 +197,7 @@ func NewQueryEvent(f BinlogFormat, s *FakeBinlogStream, q Query) BinlogEvent {
data[pos+6] = byte(q.Charset.Server >> 8)
pos += 7
}
pos += copy(data[pos:pos+len(q.Database)], []byte(q.Database))
pos += copy(data[pos:pos+len(q.Database)], q.Database)
data[pos] = 0
pos++
copy(data[pos:], q.SQL)
@ -310,11 +310,11 @@ func NewTableMapEvent(f BinlogFormat, s *FakeBinlogStream, tableID uint64, tm *T
data[6] = byte(tm.Flags)
data[7] = byte(tm.Flags >> 8)
data[8] = byte(len(tm.Database))
pos := 6 + 2 + 1 + copy(data[9:], []byte(tm.Database))
pos := 6 + 2 + 1 + copy(data[9:], tm.Database)
data[pos] = 0
pos++
data[pos] = byte(len(tm.Name))
pos += 1 + copy(data[pos+1:], []byte(tm.Name))
pos += 1 + copy(data[pos+1:], tm.Name)
data[pos] = 0
pos++

Просмотреть файл

@ -853,14 +853,26 @@ func CellValue(data []byte, pos int, typ byte, metadata uint16, styp querypb.Typ
max := int((((metadata >> 4) & 0x300) ^ 0x300) + (metadata & 0xff))
// Length is encoded in 1 or 2 bytes.
if max > 255 {
// This code path exists due to https://bugs.mysql.com/bug.php?id=37426.
// CHAR types need to allocate 3 bytes per char. So, the length for CHAR(255)
// cannot be represented in 1 byte. This also means that this rule does not
// apply to BINARY data.
l := int(uint64(data[pos]) |
uint64(data[pos+1])<<8)
return sqltypes.MakeTrusted(querypb.Type_VARCHAR,
data[pos+2:pos+2+l]), l + 2, nil
}
l := int(data[pos])
return sqltypes.MakeTrusted(querypb.Type_VARCHAR,
data[pos+1:pos+1+l]), l + 1, nil
mdata := data[pos+1 : pos+1+l]
if sqltypes.IsBinary(styp) {
// Fixed length binaries have to be padded with zeroes
// up to the length of the field. Otherwise, equality checks
// fail against saved data. See https://github.com/vitessio/vitess/issues/3984.
ret := make([]byte, max)
copy(ret, mdata)
return sqltypes.MakeTrusted(querypb.Type_BINARY, ret), l + 1, nil
}
return sqltypes.MakeTrusted(querypb.Type_VARCHAR, mdata), l + 1, nil
case TypeGeometry:
l := 0

Просмотреть файл

@ -31,6 +31,7 @@ import (
"vitess.io/vitess/go/sync2"
"vitess.io/vitess/go/vt/log"
querypb "vitess.io/vitess/go/vt/proto/query"
"vitess.io/vitess/go/vt/sqlparser"
)
const (
@ -723,65 +724,28 @@ func (c *Conn) handleNextCommand(handler Handler) error {
queryStart := time.Now()
query := c.parseComQuery(data)
c.recycleReadPacket()
fieldSent := false
// sendFinished is set if the response should just be an OK packet.
sendFinished := false
err := handler.ComQuery(c, query, func(qr *sqltypes.Result) error {
if sendFinished {
// Failsafe: Unreachable if server is well-behaved.
return io.EOF
}
if !fieldSent {
fieldSent = true
if len(qr.Fields) == 0 {
sendFinished = true
// A successful callback with no fields means that this was a
// DML or other write-only operation.
//
// We should not send any more packets after this, but make sure
// to extract the affected rows and last insert id from the result
// struct here since clients expect it.
return c.writeOKPacket(qr.RowsAffected, qr.InsertID, c.StatusFlags, handler.WarningCount(c))
var queries []string
if c.Capabilities&CapabilityClientMultiStatements != 0 {
queries, err = sqlparser.SplitStatementToPieces(query)
if err != nil {
log.Errorf("Conn %v: Error splitting query: %v", c, err)
if werr := c.writeErrorPacketFromError(err); werr != nil {
// If we can't even write the error, we're done.
log.Errorf("Conn %v: Error writing query error: %v", c, werr)
return werr
}
if err := c.writeFields(qr); err != nil {
return err
}
}
return c.writeRows(qr)
})
// If no field was sent, we expect an error.
if !fieldSent {
// This is just a failsafe. Should never happen.
if err == nil || err == io.EOF {
err = NewSQLErrorFromError(errors.New("unexpected: query ended without no results and no error"))
}
if werr := c.writeErrorPacketFromError(err); werr != nil {
// If we can't even write the error, we're done.
log.Errorf("Error writing query error to %s: %v", c, werr)
return werr
}
} else {
if err != nil {
// We can't send an error in the middle of a stream.
// All we can do is abort the send, which will cause a 2013.
log.Errorf("Error in the middle of a stream to %s: %v", c, err)
return err
queries = []string{query}
}
for index, sql := range queries {
more := false
if index != len(queries)-1 {
more = true
}
// Send the end packet only sendFinished is false (results were streamed).
// In this case the affectedRows and lastInsertID are always 0 since it
// was a read operation.
if !sendFinished {
if err := c.writeEndResult(false, 0, 0, handler.WarningCount(c)); err != nil {
log.Errorf("Error writing result to %s: %v", c, err)
return err
}
if err := c.execQuery(sql, handler, more); err != nil {
return err
}
}
@ -807,14 +771,16 @@ func (c *Conn) handleNextCommand(handler Handler) error {
}
}
case ComSetOption:
if operation, ok := c.parseComSetOption(data); ok {
operation, ok := c.parseComSetOption(data)
c.recycleReadPacket()
if ok {
switch operation {
case 0:
c.Capabilities |= CapabilityClientMultiStatements
case 1:
c.Capabilities &^= CapabilityClientMultiStatements
default:
log.Errorf("Got unhandled packet from client %v, returning error: %v", c.ConnectionID, data)
log.Errorf("Got unhandled packet (ComSetOption default) from client %v, returning error: %v", c.ConnectionID, data)
if err := c.writeErrorPacket(ERUnknownComError, SSUnknownComError, "error handling packet: %v", data); err != nil {
log.Errorf("Error writing error packet to client: %v", err)
return err
@ -825,14 +791,14 @@ func (c *Conn) handleNextCommand(handler Handler) error {
return err
}
} else {
log.Errorf("Got unhandled packet from client %v, returning error: %v", c.ConnectionID, data)
log.Errorf("Got unhandled packet (ComSetOption else) from client %v, returning error: %v", c.ConnectionID, data)
if err := c.writeErrorPacket(ERUnknownComError, SSUnknownComError, "error handling packet: %v", data); err != nil {
log.Errorf("Error writing error packet to client: %v", err)
return err
}
}
default:
log.Errorf("Got unhandled packet from %s, returning error: %v", c, data)
log.Errorf("Got unhandled packet (default) from %s, returning error: %v", c, data)
c.recycleReadPacket()
if err := c.writeErrorPacket(ERUnknownComError, SSUnknownComError, "command handling not implemented yet: %v", data[0]); err != nil {
log.Errorf("Error writing error packet to %s: %s", c, err)
@ -843,6 +809,76 @@ func (c *Conn) handleNextCommand(handler Handler) error {
return nil
}
func (c *Conn) execQuery(query string, handler Handler, more bool) error {
fieldSent := false
// sendFinished is set if the response should just be an OK packet.
sendFinished := false
err := handler.ComQuery(c, query, func(qr *sqltypes.Result) error {
flag := c.StatusFlags
if more {
flag |= ServerMoreResultsExists
}
if sendFinished {
// Failsafe: Unreachable if server is well-behaved.
return io.EOF
}
if !fieldSent {
fieldSent = true
if len(qr.Fields) == 0 {
sendFinished = true
// A successful callback with no fields means that this was a
// DML or other write-only operation.
//
// We should not send any more packets after this, but make sure
// to extract the affected rows and last insert id from the result
// struct here since clients expect it.
return c.writeOKPacket(qr.RowsAffected, qr.InsertID, flag, handler.WarningCount(c))
}
if err := c.writeFields(qr); err != nil {
return err
}
}
return c.writeRows(qr)
})
// If no field was sent, we expect an error.
if !fieldSent {
// This is just a failsafe. Should never happen.
if err == nil || err == io.EOF {
err = NewSQLErrorFromError(errors.New("unexpected: query ended without no results and no error"))
}
if werr := c.writeErrorPacketFromError(err); werr != nil {
// If we can't even write the error, we're done.
log.Errorf("Error writing query error to %s: %v", c, werr)
return werr
}
} else {
if err != nil {
// We can't send an error in the middle of a stream.
// All we can do is abort the send, which will cause a 2013.
log.Errorf("Error in the middle of a stream to %s: %v", c, err)
return err
}
// Send the end packet only sendFinished is false (results were streamed).
// In this case the affectedRows and lastInsertID are always 0 since it
// was a read operation.
if !sendFinished {
if err := c.writeEndResult(more, 0, 0, handler.WarningCount(c)); err != nil {
log.Errorf("Error writing result to %s: %v", c, err)
return err
}
}
}
return nil
}
//
// Packet parsing methods, for generic packets.
//

Просмотреть файл

@ -150,11 +150,6 @@ func doTestMultiResult(t *testing.T, disableClientDeprecateEOF bool) {
expectNoError(t, err)
defer conn.Close()
connParams.DisableClientDeprecateEOF = false
expectFlag(t, "Negotiated ClientDeprecateEOF flag", (conn.Capabilities&mysql.CapabilityClientDeprecateEOF) != 0, !disableClientDeprecateEOF)
defer conn.Close()
qr, more, err := conn.ExecuteFetchMulti("select 1 from dual; set autocommit=1; select 1 from dual", 10, true)
expectNoError(t, err)
expectFlag(t, "ExecuteMultiFetch(multi result)", more, true)

Просмотреть файл

@ -247,11 +247,6 @@ func doTestWarnings(t *testing.T, disableClientDeprecateEOF bool) {
expectNoError(t, err)
defer conn.Close()
connParams.DisableClientDeprecateEOF = false
expectFlag(t, "Negotiated ClientDeprecateEOF flag", (conn.Capabilities&mysql.CapabilityClientDeprecateEOF) != 0, !disableClientDeprecateEOF)
defer conn.Close()
result, err := conn.ExecuteFetch("create table a(id int, val int not null, primary key(id))", 0, false)
if err != nil {
t.Fatalf("create table failed: %v", err)

Просмотреть файл

@ -52,10 +52,9 @@ func testDescribeTable(t *testing.T) {
t.Fatal(err)
}
// MariaDB has '81' instead of '90' of Extra ColumnLength.
// Just try it and see if it's the only difference.
if conn.IsMariaDB() && result.Fields[5].ColumnLength == 81 {
result.Fields[5].ColumnLength = 90
// Zero-out the column lengths, because they can't be compared.
for i := range result.Fields {
result.Fields[i].ColumnLength = 0
}
if !sqltypes.FieldsEqual(result.Fields, mysql.DescribeTableFields) {

Просмотреть файл

@ -45,6 +45,10 @@ type flavor interface {
// startSlave returns the command to start the slave.
startSlaveCommand() string
// startSlaveUntilAfter will restart replication, but only allow it
// to run until `pos` is reached. After reaching pos, replication will be stopped again
startSlaveUntilAfter(pos Position) string
// stopSlave returns the command to stop the slave.
stopSlaveCommand() string
@ -146,6 +150,11 @@ func (c *Conn) StartSlaveCommand() string {
return c.flavor.startSlaveCommand()
}
// StartSlaveUntilAfterCommand returns the command to start the slave.
func (c *Conn) StartSlaveUntilAfterCommand(pos Position) string {
return c.flavor.startSlaveUntilAfter(pos)
}
// StopSlaveCommand returns the command to stop the slave.
func (c *Conn) StopSlaveCommand() string {
return c.flavor.stopSlaveCommand()

Просмотреть файл

@ -41,6 +41,10 @@ func (mariadbFlavor) masterGTIDSet(c *Conn) (GTIDSet, error) {
return parseMariadbGTIDSet(qr.Rows[0][0].ToString())
}
func (mariadbFlavor) startSlaveUntilAfter(pos Position) string {
return fmt.Sprintf("START SLAVE UNTIL master_gtid_pos = \"%s\"", pos)
}
func (mariadbFlavor) startSlaveCommand() string {
return "START SLAVE"
}

Просмотреть файл

@ -43,6 +43,10 @@ func (mysqlFlavor) startSlaveCommand() string {
return "START SLAVE"
}
func (mysqlFlavor) startSlaveUntilAfter(pos Position) string {
return fmt.Sprintf("START SLAVE UNTIL SQL_AFTER_GTIDS = '%s'", pos)
}
func (mysqlFlavor) stopSlaveCommand() string {
return "STOP SLAVE"
}

Просмотреть файл

@ -34,6 +34,8 @@ import (
// DescribeTableFields contains the fields returned by a
// 'describe <table>' command. They are validated by the testDescribeTable
// test.
// Column lengths returned seem to differ between versions. So, we
// don't compare them.
var DescribeTableFields = []*querypb.Field{
{
Name: "Field",
@ -42,7 +44,7 @@ var DescribeTableFields = []*querypb.Field{
OrgTable: "COLUMNS",
Database: "information_schema",
OrgName: "COLUMN_NAME",
ColumnLength: 192,
ColumnLength: 0,
Charset: 33,
Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG),
},
@ -53,7 +55,7 @@ var DescribeTableFields = []*querypb.Field{
OrgTable: "COLUMNS",
Database: "information_schema",
OrgName: "COLUMN_TYPE",
ColumnLength: 589815,
ColumnLength: 0,
Charset: 33,
Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_BLOB_FLAG),
},
@ -64,7 +66,7 @@ var DescribeTableFields = []*querypb.Field{
OrgTable: "COLUMNS",
Database: "information_schema",
OrgName: "IS_NULLABLE",
ColumnLength: 9,
ColumnLength: 0,
Charset: 33,
Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG),
},
@ -75,7 +77,7 @@ var DescribeTableFields = []*querypb.Field{
OrgTable: "COLUMNS",
Database: "information_schema",
OrgName: "COLUMN_KEY",
ColumnLength: 9,
ColumnLength: 0,
Charset: 33,
Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG),
},
@ -86,7 +88,7 @@ var DescribeTableFields = []*querypb.Field{
OrgTable: "COLUMNS",
Database: "information_schema",
OrgName: "COLUMN_DEFAULT",
ColumnLength: 589815,
ColumnLength: 0,
Charset: 33,
Flags: uint32(querypb.MySqlFlag_BLOB_FLAG),
},
@ -97,7 +99,7 @@ var DescribeTableFields = []*querypb.Field{
OrgTable: "COLUMNS",
Database: "information_schema",
OrgName: "EXTRA",
ColumnLength: 90,
ColumnLength: 0,
Charset: 33,
Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG),
},

Просмотреть файл

@ -41,7 +41,7 @@ import (
var emitStats = flag.Bool("emit_stats", false, "true iff we should emit stats to push-based monitoring/stats backends")
var statsEmitPeriod = flag.Duration("stats_emit_period", time.Duration(60*time.Second), "Interval between emitting stats to all registered backends")
var statsBackend = flag.String("stats_backend", "influxdb", "The name of the registered push-based monitoring/stats backend to use")
var statsBackend = flag.String("stats_backend", "", "The name of the registered push-based monitoring/stats backend to use")
// NewVarHook is the type of a hook to export variables in a different way
type NewVarHook func(name string, v expvar.Var)

Просмотреть файл

@ -1,104 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package influxdbbackend is useful for publishing metrics to an InfluxDB backend (tested on v0.88).
// It requires a database to already have been created in InfluxDB, and then specified via the
// "--influxdb_database" flag.
//
// It's still a work in progress, as it publishes almost all stats as key-value string pairs,
// instead of better JSON representations. This limitation will hopefully be fixed after the
// release of InfluxDB v0.9, as it has better support for arbitrary metadata dicts in the
// form of tags.
package influxdbbackend
import (
"expvar"
"flag"
influxClient "github.com/influxdb/influxdb/client"
"vitess.io/vitess/go/stats"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/servenv"
)
var influxDBHost = flag.String("influxdb_host", "localhost:8086", "the influxdb host (with port)")
var influxDBDatabase = flag.String("influxdb_database", "vitess", "the name of the influxdb database")
var influxDBUsername = flag.String("influxdb_username", "root", "influxdb username")
var influxDBPassword = flag.String("influxdb_password", "root", "influxdb password")
// InfluxDBBackend implements stats.PushBackend
type InfluxDBBackend struct {
client *influxClient.Client
}
// init attempts to create a singleton InfluxDBBackend and register it as a PushBackend.
// If it fails to create one, this is a noop.
func init() {
// Needs to happen in servenv.OnRun() instead of init because it requires flag parsing and logging
servenv.OnRun(func() {
config := &influxClient.ClientConfig{
Host: *influxDBHost,
Username: *influxDBUsername,
Password: *influxDBPassword,
Database: *influxDBDatabase,
}
client, err := influxClient.NewClient(config)
if err != nil {
log.Errorf("Unable to create an InfluxDB client: %v", err)
return
}
stats.RegisterPushBackend("influxdb", &InfluxDBBackend{
client: client,
})
})
}
// PushAll pushes all expvar stats to InfluxDB
func (backend *InfluxDBBackend) PushAll() error {
series := []*influxClient.Series{}
expvar.Do(func(kv expvar.KeyValue) {
series = append(series, &influxClient.Series{
Name: "stats",
// TODO(aaijazi): This would be much better suited to InfluxDB v0.90's tags.
// Ideally, we'd use some of the expvars as tags, and some as values.
// However, as of 03/11/2015, InfluxDB v0.90 hasn't proven quite stable enough to use.
Columns: []string{"key", "value"},
Points: [][]interface{}{
{kv.Key, statToValue(kv.Value)},
},
})
})
err := backend.client.WriteSeries(series)
return err
}
// statToValue converts from a stats.Stat type to a JSON representable value.
// This is preferred to just calling the String() for things like numbers, so that
// InfluxDB can also represent the metrics as numbers.
// TODO(aaijazi): this needs to be extended to support better serialization of other types..
// It's probably good to do this after InfluxDB 0.9 is released, as it has has better support
// for arbitrary dict values (as tags).
func statToValue(v expvar.Var) interface{} {
switch v := v.(type) {
case *stats.Counter:
return v.Get()
case stats.FloatFunc:
return v()
default:
return v.String()
}
}

Просмотреть файл

@ -32,6 +32,7 @@ type Batcher struct {
queue chan int
waiters AtomicInt32
nextID AtomicInt32
after func(time.Duration) <-chan time.Time
}
// NewBatcher returns a new Batcher
@ -41,6 +42,19 @@ func NewBatcher(interval time.Duration) *Batcher {
queue: make(chan int),
waiters: NewAtomicInt32(0),
nextID: NewAtomicInt32(0),
after: time.After,
}
}
// newBatcherForTest returns a Batcher for testing where time.After can
// be replaced by a fake alternative.
func newBatcherForTest(interval time.Duration, after func(time.Duration) <-chan time.Time) *Batcher {
return &Batcher{
interval: interval,
queue: make(chan int),
waiters: NewAtomicInt32(0),
nextID: NewAtomicInt32(0),
after: after,
}
}
@ -56,7 +70,7 @@ func (b *Batcher) Wait() int {
// newBatch starts a new batch
func (b *Batcher) newBatch() {
go func() {
time.Sleep(b.interval)
<-b.after(b.interval)
id := b.nextID.Add(1)

Просмотреть файл

@ -1,69 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreedto in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sync2
import (
"testing"
"time"
)
func expectBatch(testcase string, b *Batcher, want int, t *testing.T) {
id := b.Wait()
if id != want {
t.Errorf("%s: got %d, want %d", testcase, id, want)
}
}
func TestBatcher(t *testing.T) {
interval := time.Duration(50 * time.Millisecond)
b := NewBatcher(interval)
// test single waiter
go expectBatch("single waiter", b, 1, t)
time.Sleep(interval * 2)
// multiple waiters all at once
go expectBatch("concurrent waiter", b, 2, t)
go expectBatch("concurrent waiter", b, 2, t)
go expectBatch("concurrent waiter", b, 2, t)
time.Sleep(interval * 2)
// stagger the waiters out in time but cross two intervals
go expectBatch("staggered waiter", b, 3, t)
time.Sleep(interval / 5)
go expectBatch("staggered waiter", b, 3, t)
time.Sleep(interval / 5)
go expectBatch("staggered waiter", b, 3, t)
time.Sleep(interval / 5)
go expectBatch("staggered waiter", b, 3, t)
time.Sleep(interval / 5)
go expectBatch("staggered waiter", b, 3, t)
time.Sleep(interval / 5)
go expectBatch("staggered waiter 2", b, 4, t)
time.Sleep(interval / 5)
go expectBatch("staggered waiter 2", b, 4, t)
time.Sleep(interval / 5)
go expectBatch("staggered waiter 2", b, 4, t)
time.Sleep(interval / 5)
go expectBatch("staggered waiter 2", b, 4, t)
time.Sleep(interval / 5)
go expectBatch("staggered waiter 2", b, 4, t)
time.Sleep(interval / 5)
time.Sleep(interval * 2)
}

101
go/sync2/batcher_test.go Normal file
Просмотреть файл

@ -0,0 +1,101 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreedto in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sync2
import (
"testing"
"time"
)
// makeAfterFnWithLatch returns a fake alternative to time.After that blocks until
// the release function is called. The fake doesn't support having multiple concurrent
// calls to the After function, which is ok because Batcher should never do that.
func makeAfterFnWithLatch(t *testing.T) (func(time.Duration) <-chan time.Time, func()) {
latch := make(chan time.Time, 1)
afterFn := func(d time.Duration) <-chan time.Time {
return latch
}
releaseFn := func() {
select {
case latch <- time.Now():
default:
t.Errorf("Previous batch still hasn't been released")
}
}
return afterFn, releaseFn
}
func TestBatcher(t *testing.T) {
interval := time.Duration(50 * time.Millisecond)
afterFn, releaseBatch := makeAfterFnWithLatch(t)
b := newBatcherForTest(interval, afterFn)
waitersFinished := NewAtomicInt32(0)
startWaiter := func(testcase string, want int) {
go func() {
id := b.Wait()
if id != want {
t.Errorf("%s: got %d, want %d", testcase, id, want)
}
waitersFinished.Add(1)
}()
}
awaitVal := func(name string, val *AtomicInt32, expected int32) {
for count := 0; val.Get() != expected; count++ {
time.Sleep(50 * time.Millisecond)
if count > 5 {
t.Errorf("Timed out waiting for %s to be %v", name, expected)
return
}
}
}
awaitBatch := func(name string, n int32) {
// Wait for all the waiters to register
awaitVal("Batcher.waiters for "+name, &b.waiters, n)
// Release the batch and wait for the batcher to catch up.
if waitersFinished.Get() != 0 {
t.Errorf("Waiters finished before being released")
}
releaseBatch()
awaitVal("Batcher.waiters for "+name, &b.waiters, 0)
// Make sure the waiters actually run so they can verify their batch number.
awaitVal("waitersFinshed for "+name, &waitersFinished, n)
waitersFinished.Set(0)
}
// test single waiter
startWaiter("single waiter", 1)
awaitBatch("single waiter", 1)
// multiple waiters all at once
startWaiter("concurrent waiter", 2)
startWaiter("concurrent waiter", 2)
startWaiter("concurrent waiter", 2)
awaitBatch("concurrent waiter", 3)
startWaiter("more waiters", 3)
startWaiter("more waiters", 3)
startWaiter("more waiters", 3)
startWaiter("more waiters", 3)
startWaiter("more waiters", 3)
awaitBatch("more waiters", 5)
}

Просмотреть файл

@ -1,64 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package testfiles locates test files within the Vitess directory tree.
// It also handles test port allocation.
package testfiles
import (
"fmt"
"os"
"path"
"path/filepath"
)
// Locate returns a file path that came from $VTROOT/data/test.
func Locate(filename string) string {
vtroot := os.Getenv("VTROOT")
if vtroot == "" {
panic(fmt.Errorf("VTROOT is not set"))
}
return path.Join(vtroot, "data", "test", filename)
}
// Glob returns all files matching a pattern in $VTROOT/data/test.
func Glob(pattern string) []string {
vtroot := os.Getenv("VTROOT")
if vtroot == "" {
panic(fmt.Errorf("VTROOT is not set"))
}
dir := path.Join(vtroot, "data", "test")
if exists, err := exists(dir); !exists {
panic(err)
}
resolved := path.Join(dir, pattern)
out, err := filepath.Glob(resolved)
if err != nil {
panic(err)
}
return out
}
func exists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, err
}
return false, err
}

Просмотреть файл

@ -23,7 +23,7 @@ import (
)
// WaitForFilteredReplicationTask runs vtctl WaitForFilteredReplication to block until the destination master
// (i.e. the receiving side of the filtered replication) has caught up up to max_delay with the source shard.
// (i.e. the receiving side of the filtered replication) has caught up to max_delay with the source shard.
type WaitForFilteredReplicationTask struct {
}

Просмотреть файл

@ -170,7 +170,7 @@ func NewBinlogPlayerTables(dbClient DBClient, tablet *topodatapb.Tablet, tables
// If an error is encountered, it updates the vreplication state to "Error".
// If a stop position was specifed, and reached, the state is updated to "Stopped".
func (blp *BinlogPlayer) ApplyBinlogEvents(ctx context.Context) error {
if err := setVReplicationState(blp.dbClient, blp.uid, BlpRunning, ""); err != nil {
if err := SetVReplicationState(blp.dbClient, blp.uid, BlpRunning, ""); err != nil {
log.Errorf("Error writing Running state: %v", err)
}
@ -180,7 +180,7 @@ func (blp *BinlogPlayer) ApplyBinlogEvents(ctx context.Context) error {
Time: time.Now(),
Message: msg,
})
if err := setVReplicationState(blp.dbClient, blp.uid, BlpError, msg); err != nil {
if err := SetVReplicationState(blp.dbClient, blp.uid, BlpError, msg); err != nil {
log.Errorf("Error writing stop state: %v", err)
}
return err
@ -191,7 +191,7 @@ func (blp *BinlogPlayer) ApplyBinlogEvents(ctx context.Context) error {
// applyEvents returns a recordable status message on termination or an error otherwise.
func (blp *BinlogPlayer) applyEvents(ctx context.Context) error {
// Read starting values for vreplication.
pos, stopPos, maxTPS, maxReplicationLag, err := readVRSettings(blp.dbClient, blp.uid)
pos, stopPos, maxTPS, maxReplicationLag, err := ReadVRSettings(blp.dbClient, blp.uid)
if err != nil {
log.Error(err)
return err
@ -233,8 +233,8 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error {
} else {
log.Infof("BinlogPlayer client %v for keyrange '%v-%v' starting @ '%v', server: %v",
blp.uid,
hex.EncodeToString(blp.keyRange.Start),
hex.EncodeToString(blp.keyRange.End),
hex.EncodeToString(blp.keyRange.GetStart()),
hex.EncodeToString(blp.keyRange.GetEnd()),
blp.position,
blp.tablet,
)
@ -244,14 +244,14 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error {
case blp.position.Equal(blp.stopPosition):
msg := fmt.Sprintf("not starting BinlogPlayer, we're already at the desired position %v", blp.stopPosition)
log.Info(msg)
if err := setVReplicationState(blp.dbClient, blp.uid, BlpStopped, msg); err != nil {
if err := SetVReplicationState(blp.dbClient, blp.uid, BlpStopped, msg); err != nil {
log.Errorf("Error writing stop state: %v", err)
}
return nil
case blp.position.AtLeast(blp.stopPosition):
msg := fmt.Sprintf("starting point %v greater than stopping point %v", blp.position, blp.stopPosition)
log.Error(msg)
if err := setVReplicationState(blp.dbClient, blp.uid, BlpStopped, msg); err != nil {
if err := SetVReplicationState(blp.dbClient, blp.uid, BlpStopped, msg); err != nil {
log.Errorf("Error writing stop state: %v", err)
}
// Don't return an error. Otherwise, it will keep retrying.
@ -351,7 +351,7 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error {
if blp.position.AtLeast(blp.stopPosition) {
msg := "Reached stopping position, done playing logs"
log.Info(msg)
if err := setVReplicationState(blp.dbClient, blp.uid, BlpStopped, msg); err != nil {
if err := SetVReplicationState(blp.dbClient, blp.uid, BlpStopped, msg); err != nil {
log.Errorf("Error writing stop state: %v", err)
}
return nil
@ -447,7 +447,7 @@ func (blp *BinlogPlayer) writeRecoveryPosition(tx *binlogdatapb.BinlogTransactio
}
now := time.Now().Unix()
updateRecovery := updateVReplicationPos(blp.uid, position, now, tx.EventToken.Timestamp)
updateRecovery := GenerateUpdatePos(blp.uid, position, now, tx.EventToken.Timestamp)
qr, err := blp.exec(updateRecovery)
if err != nil {
@ -503,8 +503,8 @@ func CreateVReplicationTable() []string {
) ENGINE=InnoDB`}
}
// setVReplicationState updates the state in the _vt.vreplication table.
func setVReplicationState(dbClient DBClient, uid uint32, state, message string) error {
// SetVReplicationState updates the state in the _vt.vreplication table.
func SetVReplicationState(dbClient DBClient, uid uint32, state, message string) error {
query := fmt.Sprintf("update _vt.vreplication set state='%v', message=%v where id=%v", state, encodeString(message), uid)
if _, err := dbClient.ExecuteFetch(query, 1); err != nil {
return fmt.Errorf("could not set state: %v: %v", query, err)
@ -512,9 +512,9 @@ func setVReplicationState(dbClient DBClient, uid uint32, state, message string)
return nil
}
// readVRSettings retrieves the throttler settings for
// ReadVRSettings retrieves the throttler settings for
// vreplication from the checkpoint table.
func readVRSettings(dbClient DBClient, uid uint32) (pos, stopPos string, maxTPS, maxReplicationLag int64, err error) {
func ReadVRSettings(dbClient DBClient, uid uint32) (pos, stopPos string, maxTPS, maxReplicationLag int64, err error) {
query := fmt.Sprintf("select pos, stop_pos, max_tps, max_replication_lag from _vt.vreplication where id=%v", uid)
qr, err := dbClient.ExecuteFetch(query, 1)
if err != nil {
@ -554,9 +554,9 @@ func CreateVReplicationStopped(workflow string, source *binlogdatapb.BinlogSourc
encodeString(workflow), encodeString(source.String()), encodeString(position), throttler.MaxRateModuleDisabled, throttler.ReplicationLagModuleDisabled, time.Now().Unix(), BlpStopped)
}
// updateVReplicationPos returns a statement to update a value in the
// GenerateUpdatePos returns a statement to update a value in the
// _vt.vreplication table.
func updateVReplicationPos(uid uint32, pos mysql.Position, timeUpdated int64, txTimestamp int64) string {
func GenerateUpdatePos(uid uint32, pos mysql.Position, timeUpdated int64, txTimestamp int64) string {
if txTimestamp != 0 {
return fmt.Sprintf(
"update _vt.vreplication set pos=%v, time_updated=%v, transaction_timestamp=%v where id=%v",
@ -601,11 +601,17 @@ func encodeString(in string) string {
}
// ReadVReplicationPos returns a statement to query the gtid for a
// given shard from the _vt.vreplication table.
// given stream from the _vt.vreplication table.
func ReadVReplicationPos(index uint32) string {
return fmt.Sprintf("select pos from _vt.vreplication where id=%v", index)
}
// ReadVReplicationStatus returns a statement to query the status fields for a
// given stream from the _vt.vreplication table.
func ReadVReplicationStatus(index uint32) string {
return fmt.Sprintf("select pos, state, message from _vt.vreplication where id=%v", index)
}
// StatsHistoryRecord is used to store a Message with timestamp
type StatsHistoryRecord struct {
Time time.Time

Просмотреть файл

@ -355,7 +355,7 @@ func TestUpdateVReplicationPos(t *testing.T) {
"set pos='MariaDB/0-1-8283', time_updated=88822 " +
"where id=78522"
got := updateVReplicationPos(78522, mysql.Position{GTIDSet: gtid.GTIDSet()}, 88822, 0)
got := GenerateUpdatePos(78522, mysql.Position{GTIDSet: gtid.GTIDSet()}, 88822, 0)
if got != want {
t.Errorf("updateVReplicationPos() = %#v, want %#v", got, want)
}
@ -367,7 +367,7 @@ func TestUpdateVReplicationTimestamp(t *testing.T) {
"set pos='MariaDB/0-2-582', time_updated=88822, transaction_timestamp=481828 " +
"where id=78522"
got := updateVReplicationPos(78522, mysql.Position{GTIDSet: gtid.GTIDSet()}, 88822, 481828)
got := GenerateUpdatePos(78522, mysql.Position{GTIDSet: gtid.GTIDSet()}, 88822, 481828)
if got != want {
t.Errorf("updateVReplicationPos() = %#v, want %#v", got, want)
}
@ -377,6 +377,14 @@ func TestReadVReplicationPos(t *testing.T) {
want := "select pos from _vt.vreplication where id=482821"
got := ReadVReplicationPos(482821)
if got != want {
t.Errorf("ReadVReplicationThrottlerSettings(482821) = %#v, want %#v", got, want)
t.Errorf("ReadVReplicationPos(482821) = %#v, want %#v", got, want)
}
}
func TestReadVReplicationStatus(t *testing.T) {
want := "select pos, state, message from _vt.vreplication where id=482821"
got := ReadVReplicationStatus(482821)
if got != want {
t.Errorf("ReadVReplicationStatus(482821) = %#v, want %#v", got, want)
}
}

Просмотреть файл

@ -96,7 +96,7 @@ var testBinlogTransaction = &binlogdatapb.BinlogTransaction{
},
}
// StreamKeyRange is part of the the UpdateStream interface
// StreamKeyRange is part of the UpdateStream interface
func (fake *FakeBinlogStreamer) StreamKeyRange(ctx context.Context, position string, keyRange *topodatapb.KeyRange, charset *binlogdatapb.Charset, callback func(reply *binlogdatapb.BinlogTransaction) error) error {
if fake.panics {
panic(fmt.Errorf("test-triggered panic"))
@ -162,7 +162,7 @@ var testTablesRequest = &tablesRequest{
},
}
// StreamTables is part of the the UpdateStream interface
// StreamTables is part of the UpdateStream interface
func (fake *FakeBinlogStreamer) StreamTables(ctx context.Context, position string, tables []string, charset *binlogdatapb.Charset, callback func(reply *binlogdatapb.BinlogTransaction) error) error {
if fake.panics {
panic(fmt.Errorf("test-triggered panic"))
@ -214,7 +214,7 @@ func testStreamTablesPanics(t *testing.T, bpc binlogplayer.Client) {
}
}
// HandlePanic is part of the the UpdateStream interface
// HandlePanic is part of the UpdateStream interface
func (fake *FakeBinlogStreamer) HandlePanic(err *error) {
if x := recover(); x != nil {
*err = fmt.Errorf("Caught panic: %v", x)

Просмотреть файл

@ -26,7 +26,10 @@ import (
"errors"
"flag"
"io/ioutil"
"os"
"os/signal"
"sync"
"syscall"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/vt/log"
@ -37,7 +40,7 @@ var (
dbCredentialsServer = flag.String("db-credentials-server", "file", "db credentials server type (use 'file' for the file implementation)")
// 'file' implementation flags
dbCredentialsFile = flag.String("db-credentials-file", "", "db credentials file")
dbCredentialsFile = flag.String("db-credentials-file", "", "db credentials file; send SIGHUP to reload this file")
// ErrUnknownUser is returned by credential server when the
// user doesn't exist
@ -126,4 +129,16 @@ func WithCredentials(cp *mysql.ConnParams) (*mysql.ConnParams, error) {
func init() {
AllCredentialsServers["file"] = &FileCredentialsServer{}
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGHUP)
go func() {
for range sigChan {
if fcs, ok := AllCredentialsServers["file"].(*FileCredentialsServer); ok {
fcs.mu.Lock()
fcs.dbCredentials = nil
fcs.mu.Unlock()
}
}
}()
}

Просмотреть файл

@ -17,8 +17,13 @@ limitations under the License.
package dbconfigs
import (
"fmt"
"io/ioutil"
"os"
"reflect"
"syscall"
"testing"
"time"
"vitess.io/vitess/go/mysql"
)
@ -217,3 +222,47 @@ func TestCopy(t *testing.T) {
t.Errorf("DBConfig: %v, want %v", got, want)
}
}
func TestCredentialsFileHUP(t *testing.T) {
tmpFile, err := ioutil.TempFile("", "credentials.json")
if err != nil {
t.Fatalf("couldn't create temp file: %v", err)
}
defer os.Remove(tmpFile.Name())
*dbCredentialsFile = tmpFile.Name()
*dbCredentialsServer = "file"
oldStr := "str1"
jsonConfig := fmt.Sprintf("{\"%s\": [\"%s\"]}", oldStr, oldStr)
if err := ioutil.WriteFile(tmpFile.Name(), []byte(jsonConfig), 0600); err != nil {
t.Fatalf("couldn't write temp file: %v", err)
}
cs := GetCredentialsServer()
_, pass, err := cs.GetUserAndPassword(oldStr)
if pass != oldStr {
t.Fatalf("%s's Password should still be '%s'", oldStr, oldStr)
}
hupTest(t, tmpFile, oldStr, "str2")
hupTest(t, tmpFile, "str2", "str3") // still handling the signal
}
func hupTest(t *testing.T, tmpFile *os.File, oldStr, newStr string) {
cs := GetCredentialsServer()
jsonConfig := fmt.Sprintf("{\"%s\": [\"%s\"]}", newStr, newStr)
if err := ioutil.WriteFile(tmpFile.Name(), []byte(jsonConfig), 0600); err != nil {
t.Fatalf("couldn't overwrite temp file: %v", err)
}
_, pass, err := cs.GetUserAndPassword(oldStr)
if pass != oldStr {
t.Fatalf("%s's Password should still be '%s'", oldStr, oldStr)
}
syscall.Kill(syscall.Getpid(), syscall.SIGHUP)
time.Sleep(100 * time.Millisecond) // wait for signal handler
_, pass, err = cs.GetUserAndPassword(oldStr)
if err != ErrUnknownUser {
t.Fatalf("Should not have old %s after config reload", oldStr)
}
_, pass, err = cs.GetUserAndPassword(newStr)
if pass != newStr {
t.Fatalf("%s's Password should be '%s'", newStr, newStr)
}
}

Просмотреть файл

@ -68,7 +68,7 @@ type tabletStatsCacheEntry struct {
all map[string]*TabletStats
// healthy only has the healthy ones.
healthy []*TabletStats
// aggregates has the per-cell aggregates.
// aggregates has the per-region aggregates.
aggregates map[string]*querypb.AggregateStats
}
@ -141,7 +141,6 @@ func newTabletStatsCache(hc HealthCheck, ts *topo.Server, cell string, setListen
// upon type change.
hc.SetListener(tc, true /*sendDownEvents*/)
}
go tc.broadcastAggregateStats()
return tc
}
@ -266,18 +265,18 @@ func (tc *TabletStatsCache) StatsUpdate(ts *TabletStats) {
tc.updateAggregateMap(ts.Target.Keyspace, ts.Target.Shard, ts.Target.TabletType, e, allArray)
}
// MakeAggregateMap takes a list of TabletStats and builds a per-cell
// makeAggregateMap takes a list of TabletStats and builds a per-region
// AggregateStats map.
func MakeAggregateMap(stats []*TabletStats) map[string]*querypb.AggregateStats {
func (tc *TabletStatsCache) makeAggregateMap(stats []*TabletStats) map[string]*querypb.AggregateStats {
result := make(map[string]*querypb.AggregateStats)
for _, ts := range stats {
cell := ts.Tablet.Alias.Cell
agg, ok := result[cell]
region := tc.getRegionByCell(ts.Tablet.Alias.Cell)
agg, ok := result[region]
if !ok {
agg = &querypb.AggregateStats{
SecondsBehindMasterMin: math.MaxUint32,
}
result[cell] = agg
result[region] = agg
}
if ts.Serving && ts.LastError == nil {
@ -295,101 +294,12 @@ func MakeAggregateMap(stats []*TabletStats) map[string]*querypb.AggregateStats {
return result
}
// MakeAggregateMapDiff computes the entries that need to be broadcast
// when the map goes from oldMap to newMap.
func MakeAggregateMapDiff(keyspace, shard string, tabletType topodatapb.TabletType, ter int64, oldMap map[string]*querypb.AggregateStats, newMap map[string]*querypb.AggregateStats) []*srvtopo.TargetStatsEntry {
var result []*srvtopo.TargetStatsEntry
for cell, oldValue := range oldMap {
newValue, ok := newMap[cell]
if ok {
// We have both an old and a new value. If equal,
// skip it.
if oldValue.HealthyTabletCount == newValue.HealthyTabletCount &&
oldValue.UnhealthyTabletCount == newValue.UnhealthyTabletCount &&
oldValue.SecondsBehindMasterMin == newValue.SecondsBehindMasterMin &&
oldValue.SecondsBehindMasterMax == newValue.SecondsBehindMasterMax {
continue
}
// The new value is different, send it.
result = append(result, &srvtopo.TargetStatsEntry{
Target: &querypb.Target{
Keyspace: keyspace,
Shard: shard,
TabletType: tabletType,
Cell: cell,
},
Stats: newValue,
TabletExternallyReparentedTimestamp: ter,
})
} else {
// We only have the old value, send an empty
// record to clear it.
result = append(result, &srvtopo.TargetStatsEntry{
Target: &querypb.Target{
Keyspace: keyspace,
Shard: shard,
TabletType: tabletType,
Cell: cell,
},
})
}
}
for cell, newValue := range newMap {
if _, ok := oldMap[cell]; ok {
continue
}
// New value, no old value, just send it.
result = append(result, &srvtopo.TargetStatsEntry{
Target: &querypb.Target{
Keyspace: keyspace,
Shard: shard,
TabletType: tabletType,
Cell: cell,
},
Stats: newValue,
TabletExternallyReparentedTimestamp: ter,
})
}
return result
}
// updateAggregateMap will update the aggregate map for the
// tabletStatsCacheEntry. It may broadcast the changes too if we have listeners.
// e.mu needs to be locked.
func (tc *TabletStatsCache) updateAggregateMap(keyspace, shard string, tabletType topodatapb.TabletType, e *tabletStatsCacheEntry, stats []*TabletStats) {
// Save the new value
oldAgg := e.aggregates
newAgg := MakeAggregateMap(stats)
e.aggregates = newAgg
// And broadcast the change in the background, if we need to.
tc.mu.RLock()
if !tc.tsm.HasSubscribers() {
// Shortcut: no subscriber, we can be done.
tc.mu.RUnlock()
return
}
tc.mu.RUnlock()
var ter int64
if len(stats) > 0 {
ter = stats[0].TabletExternallyReparentedTimestamp
}
diffs := MakeAggregateMapDiff(keyspace, shard, tabletType, ter, oldAgg, newAgg)
tc.aggregatesChan <- diffs
}
// broadcastAggregateStats is called in the background to send aggregate stats
// in the right order to our subscribers.
func (tc *TabletStatsCache) broadcastAggregateStats() {
for diffs := range tc.aggregatesChan {
tc.mu.RLock()
for _, d := range diffs {
tc.tsm.Broadcast(d)
}
tc.mu.RUnlock()
}
e.aggregates = tc.makeAggregateMap(stats)
}
// GetTabletStats returns the full list of available targets.
@ -436,51 +346,6 @@ func (tc *TabletStatsCache) ResetForTesting() {
tc.entries = make(map[string]map[string]map[topodatapb.TabletType]*tabletStatsCacheEntry)
}
// Subscribe is part of the TargetStatsListener interface.
func (tc *TabletStatsCache) Subscribe() (int, []srvtopo.TargetStatsEntry, <-chan (*srvtopo.TargetStatsEntry), error) {
var allTS []srvtopo.TargetStatsEntry
// Make sure the map cannot change. Also blocks any update from
// propagating.
tc.mu.Lock()
defer tc.mu.Unlock()
for keyspace, shardMap := range tc.entries {
for shard, typeMap := range shardMap {
for tabletType, e := range typeMap {
e.mu.RLock()
var ter int64
if len(e.healthy) > 0 {
ter = e.healthy[0].TabletExternallyReparentedTimestamp
}
for cell, agg := range e.aggregates {
allTS = append(allTS, srvtopo.TargetStatsEntry{
Target: &querypb.Target{
Keyspace: keyspace,
Shard: shard,
TabletType: tabletType,
Cell: cell,
},
Stats: agg,
TabletExternallyReparentedTimestamp: ter,
})
}
e.mu.RUnlock()
}
}
}
// Now create the listener, add it to our list.
id, c := tc.tsm.Subscribe()
return id, allTS, c, nil
}
// Unsubscribe is part of the TargetStatsListener interface.
func (tc *TabletStatsCache) Unsubscribe(i int) error {
tc.mu.Lock()
defer tc.mu.Unlock()
return tc.tsm.Unsubscribe(i)
}
// GetAggregateStats is part of the TargetStatsListener interface.
func (tc *TabletStatsCache) GetAggregateStats(target *querypb.Target) (*querypb.AggregateStats, error) {
e := tc.getEntry(target.Keyspace, target.Shard, target.TabletType)
@ -498,7 +363,8 @@ func (tc *TabletStatsCache) GetAggregateStats(target *querypb.Target) (*querypb.
return agg, nil
}
}
agg, ok := e.aggregates[target.Cell]
targetRegion := tc.getRegionByCell(target.Cell)
agg, ok := e.aggregates[targetRegion]
if !ok {
return nil, topo.NewError(topo.NoNode, topotools.TargetIdent(target))
}
@ -530,4 +396,3 @@ func (tc *TabletStatsCache) GetMasterCell(keyspace, shard string) (cell string,
// Compile-time interface check.
var _ HealthCheckStatsListener = (*TabletStatsCache)(nil)
var _ srvtopo.TargetStatsListener = (*TabletStatsCache)(nil)

Просмотреть файл

@ -19,6 +19,7 @@ package key
import (
"bytes"
"encoding/hex"
"math/rand"
"strings"
"vitess.io/vitess/go/vt/vterrors"
@ -27,6 +28,9 @@ import (
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
)
// AnyShardPicker makes a choice on what shard to use when any shard will do. Used for testing.
var AnyShardPicker DestinationAnyShardPicker = DestinationAnyShardPickerRandomShard{}
// Destination is an interface definition for a query destination,
// within a given Keyspace / Tablet Type. It is meant to be an internal
// data structure, with multiple possible implementations.
@ -369,12 +373,25 @@ func (d DestinationKeyspaceIDs) String() string {
return buffer.String()
}
// DestinationAnyShardPicker exposes an interface that will pick an index given a number of available shards.
type DestinationAnyShardPicker interface {
// PickShard picks a shard given a number of shards
PickShard(shardCount int) int
}
// DestinationAnyShardPickerRandomShard picks a random shard.
type DestinationAnyShardPickerRandomShard struct{}
// PickShard is DestinationAnyShardPickerRandomShard's implmentation.
func (dp DestinationAnyShardPickerRandomShard) PickShard(shardCount int) int {
return rand.Intn(shardCount)
}
//
// DestinationAnyShard
//
// DestinationAnyShard is the destination for any one shard in the
// keyspace. This usually maps to the first one in the list.
// DestinationAnyShard is the destination for any one shard in the keyspace.
// It implements the Destination interface.
type DestinationAnyShard struct{}
@ -388,7 +405,7 @@ func (d DestinationAnyShard) Resolve(allShards []*topodatapb.ShardReference, add
if len(allShards) == 0 {
return vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "no shard in keyspace")
}
return addShard(allShards[0].Name)
return addShard(allShards[AnyShardPicker.PickShard(len(allShards))].Name)
}
// String is part of the Destination interface.

Просмотреть файл

@ -5,7 +5,10 @@
package log
import "github.com/golang/glog"
import (
"flag"
"github.com/golang/glog"
)
// Level is used with V() to test log verbosity.
type Level = glog.Level
@ -52,3 +55,7 @@ var (
// FatalDepth formats arguments like fmt.Print and uses depth to choose which call frame to log.
FatalDepth = glog.FatalDepth
)
func init() {
flag.Uint64Var(&glog.MaxSize, "log_rotate_max_size", glog.MaxSize, "size in bytes at which logs are rotated (glog.MaxSize)")
}

Просмотреть файл

@ -164,7 +164,7 @@ func isDbDir(p string) bool {
return true
}
// Look for at least one .frm file
// Look for at least one database file
fis, err := ioutil.ReadDir(p)
if err != nil {
return false
@ -174,6 +174,12 @@ func isDbDir(p string) bool {
return true
}
// the MyRocks engine stores data in RocksDB .sst files
// https://github.com/facebook/rocksdb/wiki/Rocksdb-BlockBasedTable-Format
if strings.HasSuffix(fi.Name(), ".sst") {
return true
}
// .frm files were removed in MySQL 8, so we need to check for two other file types
// https://dev.mysql.com/doc/refman/8.0/en/data-dictionary-file-removal.html
if strings.HasSuffix(fi.Name(), ".ibd") {
@ -820,6 +826,12 @@ func Restore(
if len(bhs) == 0 {
// There are no backups (not even broken/incomplete ones).
logger.Errorf("No backup to restore on BackupStorage for directory %v. Starting up empty.", dir)
// Since this Was an empty database make sure we start replication at the beginning
if err = mysqld.ResetReplication(ctx); err == nil {
logger.Errorf("Error reseting slave replication: %v. Continuing", err)
err = ErrNoBackup
}
if err = PopulateMetadataTables(mysqld, localMetadata); err == nil {
err = ErrNoBackup
}

Просмотреть файл

@ -39,7 +39,9 @@ func TestFindFilesToBackup(t *testing.T) {
dataDbDir := path.Join(dataDir, "vt_db")
extraDir := path.Join(dataDir, "extra_dir")
outsideDbDir := path.Join(root, "outside_db")
for _, s := range []string{innodbDataDir, innodbLogDir, dataDbDir, extraDir, outsideDbDir} {
rocksdbDir := path.Join(dataDir, ".rocksdb")
sdiOnlyDir := path.Join(dataDir, "sdi_dir")
for _, s := range []string{innodbDataDir, innodbLogDir, dataDbDir, extraDir, outsideDbDir, rocksdbDir, sdiOnlyDir} {
if err := os.MkdirAll(s, os.ModePerm); err != nil {
t.Fatalf("failed to create directory %v: %v", s, err)
}
@ -62,6 +64,12 @@ func TestFindFilesToBackup(t *testing.T) {
if err := os.Symlink(outsideDbDir, path.Join(dataDir, "vt_symlink")); err != nil {
t.Fatalf("failed to symlink vt_symlink: %v", err)
}
if err := ioutil.WriteFile(path.Join(rocksdbDir, "000011.sst"), []byte("rocksdb file"), os.ModePerm); err != nil {
t.Fatalf("failed to write file 000011.sst: %v", err)
}
if err := ioutil.WriteFile(path.Join(sdiOnlyDir, "table1.sdi"), []byte("sdi file"), os.ModePerm); err != nil {
t.Fatalf("failed to write file table1.sdi: %v", err)
}
cnf := &Mycnf{
InnodbDataHomeDir: innodbDataDir,
@ -76,6 +84,14 @@ func TestFindFilesToBackup(t *testing.T) {
sort.Sort(forTest(result))
t.Logf("findFilesToBackup returned: %v", result)
expected := []FileEntry{
{
Base: "Data",
Name: ".rocksdb/000011.sst",
},
{
Base: "Data",
Name: "sdi_dir/table1.sdi",
},
{
Base: "Data",
Name: "vt_db/db.opt",

Просмотреть файл

@ -88,7 +88,7 @@ func (bh *CephBackupHandle) AddFile(ctx context.Context, filename string, filesi
// Give PutObject() the read end of the pipe.
object := objName(bh.dir, bh.name, filename)
_, err := bh.client.PutObject(bucket, object, reader, "application/octet-stream")
_, err := bh.client.PutObjectWithContext(ctx, bucket, object, reader, -1, minio.PutObjectOptions{ContentType: "application/octet-stream"})
if err != nil {
// Signal the writer that an error occurred, in case it's not done writing yet.
reader.CloseWithError(err)
@ -126,7 +126,7 @@ func (bh *CephBackupHandle) ReadFile(ctx context.Context, filename string) (io.R
// ceph bucket name
bucket := alterBucketName(bh.dir)
object := objName(bh.dir, bh.name, filename)
return bh.client.GetObject(bucket, object)
return bh.client.GetObjectWithContext(ctx, bucket, object, minio.GetObjectOptions{})
}
// CephBackupStorage implements BackupStorage for Ceph Cloud Storage.
@ -154,7 +154,7 @@ func (bs *CephBackupStorage) ListBackups(ctx context.Context, dir string) ([]bac
doneCh := make(chan struct{})
for object := range c.ListObjects(bucket, searchPrefix, false, doneCh) {
if object.Err != nil {
err := c.BucketExists(bucket)
_, err := c.BucketExists(bucket)
if err != nil {
return nil, nil
}
@ -190,8 +190,13 @@ func (bs *CephBackupStorage) StartBackup(ctx context.Context, dir, name string)
// ceph bucket name
bucket := alterBucketName(dir)
err = c.BucketExists(bucket)
found, err := c.BucketExists(bucket)
if err != nil {
log.Info("Error from BucketExists: %v, quitting", bucket)
return nil, errors.New("Error checking whether bucket exists: " + bucket)
}
if !found {
log.Info("Bucket: %v doesn't exist, creating new bucket with the required name", bucket)
err = c.MakeBucket(bucket, "")
if err != nil {

Просмотреть файл

@ -80,6 +80,9 @@ type FakeMysqlDaemon struct {
// If it doesn't match, SetSlavePosition will return an error.
SetSlavePositionPos mysql.Position
// StartSlaveUntilAfterPos is matched against the input
StartSlaveUntilAfterPos mysql.Position
// SetMasterInput is matched against the input of SetMaster
// (as "%v:%v"). If it doesn't match, SetMaster will return an error.
SetMasterInput string
@ -240,6 +243,17 @@ func (fmd *FakeMysqlDaemon) StartSlave(hookExtraEnv map[string]string) error {
})
}
// StartSlaveUntilAfter is part of the MysqlDaemon interface.
func (fmd *FakeMysqlDaemon) StartSlaveUntilAfter(ctx context.Context, pos mysql.Position) error {
if !reflect.DeepEqual(fmd.StartSlaveUntilAfterPos, pos) {
return fmt.Errorf("wrong pos for StartSlaveUntilAfter: expected %v got %v", fmd.SetSlavePositionPos, pos)
}
return fmd.ExecuteSuperQueryList(context.Background(), []string{
"START SLAVE UNTIL AFTER",
})
}
// StopSlave is part of the MysqlDaemon interface.
func (fmd *FakeMysqlDaemon) StopSlave(hookExtraEnv map[string]string) error {
return fmd.ExecuteSuperQueryList(context.Background(), []string{
@ -383,6 +397,16 @@ func (fmd *FakeMysqlDaemon) GetSchema(dbName string, tables, excludeTables []str
return tmutils.FilterTables(fmd.Schema, tables, excludeTables, includeViews)
}
// GetColumns is part of the MysqlDaemon interface
func (fmd *FakeMysqlDaemon) GetColumns(dbName, table string) ([]string, error) {
return []string{}, nil
}
// GetPrimaryKeyColumns is part of the MysqlDaemon interface
func (fmd *FakeMysqlDaemon) GetPrimaryKeyColumns(dbName, table string) ([]string, error) {
return []string{}, nil
}
// PreflightSchemaChange is part of the MysqlDaemon interface
func (fmd *FakeMysqlDaemon) PreflightSchemaChange(dbName string, changes []string) ([]*tabletmanagerdatapb.SchemaChangeResult, error) {
if fmd.PreflightSchemaChangeResult == nil {

Просмотреть файл

@ -1,5 +1,5 @@
/*
Copyright 2017 Google Inc.
Copyright 2018 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -17,13 +17,10 @@ limitations under the License.
package mysqlctl
import (
// "crypto/md5"
"encoding/hex"
"hash"
// "hash/crc64"
"hash/crc32"
"os"
"vitess.io/vitess/go/cgzip"
)
// Use this to simulate failures in tests
@ -36,39 +33,13 @@ func init() {
simulateFailures = statErr == nil
}
// our hasher, implemented using md5
// type hasher struct {
// hash.Hash
// }
// func newHasher() *hasher {
// return &hasher{md5.New()}
// }
// func (h *hasher) HashString() string {
// return hex.EncodeToString(h.Sum(nil))
// }
// our hasher, implemented using crc64
//type hasher struct {
// hash.Hash64
//}
//func newHasher() *hasher {
// return &hasher{crc64.New(crc64.MakeTable(crc64.ECMA))}
//}
//func (h *hasher) HashString() string {
// return hex.EncodeToString(h.Sum(nil))
//}
// our hasher, implemented using cgzip crc32
// our hasher, implemented using crc32
type hasher struct {
hash.Hash32
}
func newHasher() *hasher {
return &hasher{cgzip.NewCrc32()}
return &hasher{crc32.NewIEEE()}
}
func (h *hasher) HashString() string {

Просмотреть файл

@ -41,6 +41,7 @@ type MysqlDaemon interface {
// replication related methods
StartSlave(hookExtraEnv map[string]string) error
StartSlaveUntilAfter(ctx context.Context, pos mysql.Position) error
StopSlave(hookExtraEnv map[string]string) error
SlaveStatus() (mysql.SlaveStatus, error)
SetSemiSyncEnabled(master, slave bool) error
@ -69,6 +70,8 @@ type MysqlDaemon interface {
// Schema related methods
GetSchema(dbName string, tables, excludeTables []string, includeViews bool) (*tabletmanagerdatapb.SchemaDefinition, error)
GetColumns(dbName, table string) ([]string, error)
GetPrimaryKeyColumns(dbName, table string) ([]string, error)
PreflightSchemaChange(dbName string, changes []string) ([]*tabletmanagerdatapb.SchemaChangeResult, error)
ApplySchemaChange(dbName string, change *tmutils.SchemaChange) (*tabletmanagerdatapb.SchemaChangeResult, error)

Просмотреть файл

@ -210,7 +210,7 @@ func (mysqld *Mysqld) startNoWait(ctx context.Context, cnf *Mycnf, mysqldArgs ..
name, err = binaryPath(dir, "mysqld_safe")
if err != nil {
// The movement to use systemd means that mysqld_safe is not always provided.
// This should not be considered an issue do do not generate a warning.
// This should not be considered an issue do not generate a warning.
log.Infof("%v: trying to launch mysqld instead", err)
name, err = binaryPath(dir, "mysqld")
// If this also fails, return an error.

Просмотреть файл

@ -83,6 +83,19 @@ func (mysqld *Mysqld) StartSlave(hookExtraEnv map[string]string) error {
return h.ExecuteOptional()
}
// StartSlaveUntilAfter starts a slave until replication has come to `targetPos`, then it stops replication
func (mysqld *Mysqld) StartSlaveUntilAfter(ctx context.Context, targetPos mysql.Position) error {
conn, err := getPoolReconnect(ctx, mysqld.dbaPool)
if err != nil {
return err
}
defer conn.Recycle()
queries := []string{conn.StartSlaveUntilAfterCommand(targetPos)}
return mysqld.executeSuperQueryListConn(ctx, conn, queries)
}
// StopSlave stops a slave.
func (mysqld *Mysqld) StopSlave(hookExtraEnv map[string]string) error {
h := hook.NewSimpleHook("preflight_stop_slave")

Просмотреть файл

@ -44,7 +44,7 @@ func (x ClusterOperationState) String() string {
return proto.EnumName(ClusterOperationState_name, int32(x))
}
func (ClusterOperationState) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_automation_7092712054bc689e, []int{0}
return fileDescriptor_automation_4d7d55680fa173cc, []int{0}
}
type TaskState int32
@ -73,7 +73,7 @@ func (x TaskState) String() string {
return proto.EnumName(TaskState_name, int32(x))
}
func (TaskState) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_automation_7092712054bc689e, []int{1}
return fileDescriptor_automation_4d7d55680fa173cc, []int{1}
}
type ClusterOperation struct {
@ -93,7 +93,7 @@ func (m *ClusterOperation) Reset() { *m = ClusterOperation{} }
func (m *ClusterOperation) String() string { return proto.CompactTextString(m) }
func (*ClusterOperation) ProtoMessage() {}
func (*ClusterOperation) Descriptor() ([]byte, []int) {
return fileDescriptor_automation_7092712054bc689e, []int{0}
return fileDescriptor_automation_4d7d55680fa173cc, []int{0}
}
func (m *ClusterOperation) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ClusterOperation.Unmarshal(m, b)
@ -155,7 +155,7 @@ func (m *TaskContainer) Reset() { *m = TaskContainer{} }
func (m *TaskContainer) String() string { return proto.CompactTextString(m) }
func (*TaskContainer) ProtoMessage() {}
func (*TaskContainer) Descriptor() ([]byte, []int) {
return fileDescriptor_automation_7092712054bc689e, []int{1}
return fileDescriptor_automation_4d7d55680fa173cc, []int{1}
}
func (m *TaskContainer) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_TaskContainer.Unmarshal(m, b)
@ -210,7 +210,7 @@ func (m *Task) Reset() { *m = Task{} }
func (m *Task) String() string { return proto.CompactTextString(m) }
func (*Task) ProtoMessage() {}
func (*Task) Descriptor() ([]byte, []int) {
return fileDescriptor_automation_7092712054bc689e, []int{2}
return fileDescriptor_automation_4d7d55680fa173cc, []int{2}
}
func (m *Task) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Task.Unmarshal(m, b)
@ -284,7 +284,7 @@ func (m *EnqueueClusterOperationRequest) Reset() { *m = EnqueueClusterOp
func (m *EnqueueClusterOperationRequest) String() string { return proto.CompactTextString(m) }
func (*EnqueueClusterOperationRequest) ProtoMessage() {}
func (*EnqueueClusterOperationRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_automation_7092712054bc689e, []int{3}
return fileDescriptor_automation_4d7d55680fa173cc, []int{3}
}
func (m *EnqueueClusterOperationRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EnqueueClusterOperationRequest.Unmarshal(m, b)
@ -329,7 +329,7 @@ func (m *EnqueueClusterOperationResponse) Reset() { *m = EnqueueClusterO
func (m *EnqueueClusterOperationResponse) String() string { return proto.CompactTextString(m) }
func (*EnqueueClusterOperationResponse) ProtoMessage() {}
func (*EnqueueClusterOperationResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_automation_7092712054bc689e, []int{4}
return fileDescriptor_automation_4d7d55680fa173cc, []int{4}
}
func (m *EnqueueClusterOperationResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EnqueueClusterOperationResponse.Unmarshal(m, b)
@ -367,7 +367,7 @@ func (m *GetClusterOperationStateRequest) Reset() { *m = GetClusterOpera
func (m *GetClusterOperationStateRequest) String() string { return proto.CompactTextString(m) }
func (*GetClusterOperationStateRequest) ProtoMessage() {}
func (*GetClusterOperationStateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_automation_7092712054bc689e, []int{5}
return fileDescriptor_automation_4d7d55680fa173cc, []int{5}
}
func (m *GetClusterOperationStateRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetClusterOperationStateRequest.Unmarshal(m, b)
@ -405,7 +405,7 @@ func (m *GetClusterOperationStateResponse) Reset() { *m = GetClusterOper
func (m *GetClusterOperationStateResponse) String() string { return proto.CompactTextString(m) }
func (*GetClusterOperationStateResponse) ProtoMessage() {}
func (*GetClusterOperationStateResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_automation_7092712054bc689e, []int{6}
return fileDescriptor_automation_4d7d55680fa173cc, []int{6}
}
func (m *GetClusterOperationStateResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetClusterOperationStateResponse.Unmarshal(m, b)
@ -443,7 +443,7 @@ func (m *GetClusterOperationDetailsRequest) Reset() { *m = GetClusterOpe
func (m *GetClusterOperationDetailsRequest) String() string { return proto.CompactTextString(m) }
func (*GetClusterOperationDetailsRequest) ProtoMessage() {}
func (*GetClusterOperationDetailsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_automation_7092712054bc689e, []int{7}
return fileDescriptor_automation_4d7d55680fa173cc, []int{7}
}
func (m *GetClusterOperationDetailsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetClusterOperationDetailsRequest.Unmarshal(m, b)
@ -482,7 +482,7 @@ func (m *GetClusterOperationDetailsResponse) Reset() { *m = GetClusterOp
func (m *GetClusterOperationDetailsResponse) String() string { return proto.CompactTextString(m) }
func (*GetClusterOperationDetailsResponse) ProtoMessage() {}
func (*GetClusterOperationDetailsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_automation_7092712054bc689e, []int{8}
return fileDescriptor_automation_4d7d55680fa173cc, []int{8}
}
func (m *GetClusterOperationDetailsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetClusterOperationDetailsResponse.Unmarshal(m, b)
@ -525,9 +525,9 @@ func init() {
proto.RegisterEnum("automation.TaskState", TaskState_name, TaskState_value)
}
func init() { proto.RegisterFile("automation.proto", fileDescriptor_automation_7092712054bc689e) }
func init() { proto.RegisterFile("automation.proto", fileDescriptor_automation_4d7d55680fa173cc) }
var fileDescriptor_automation_7092712054bc689e = []byte{
var fileDescriptor_automation_4d7d55680fa173cc = []byte{
// 588 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xdd, 0x6e, 0xd3, 0x3e,
0x18, 0xc6, 0xff, 0x49, 0xdb, 0xfd, 0xe9, 0x1b, 0xb6, 0x45, 0x16, 0x9b, 0xb2, 0x89, 0xb1, 0x2c,

Просмотреть файл

@ -136,10 +136,10 @@ var _Automation_serviceDesc = grpc.ServiceDesc{
}
func init() {
proto.RegisterFile("automationservice.proto", fileDescriptor_automationservice_42ff8d484b987c6f)
proto.RegisterFile("automationservice.proto", fileDescriptor_automationservice_5369cb995212ce22)
}
var fileDescriptor_automationservice_42ff8d484b987c6f = []byte{
var fileDescriptor_automationservice_5369cb995212ce22 = []byte{
// 178 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0x2c, 0x2d, 0xc9,
0xcf, 0x4d, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28,

Просмотреть файл

@ -8,6 +8,7 @@ import fmt "fmt"
import math "math"
import query "vitess.io/vitess/go/vt/proto/query"
import topodata "vitess.io/vitess/go/vt/proto/topodata"
import vtrpc "vitess.io/vitess/go/vt/proto/vtrpc"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -20,6 +21,98 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// OnDDLAction lists the possible actions for DDLs.
type OnDDLAction int32
const (
OnDDLAction_IGNORE OnDDLAction = 0
OnDDLAction_STOP OnDDLAction = 1
OnDDLAction_EXEC OnDDLAction = 2
OnDDLAction_EXEC_IGNORE OnDDLAction = 3
)
var OnDDLAction_name = map[int32]string{
0: "IGNORE",
1: "STOP",
2: "EXEC",
3: "EXEC_IGNORE",
}
var OnDDLAction_value = map[string]int32{
"IGNORE": 0,
"STOP": 1,
"EXEC": 2,
"EXEC_IGNORE": 3,
}
func (x OnDDLAction) String() string {
return proto.EnumName(OnDDLAction_name, int32(x))
}
func (OnDDLAction) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_6d214635eb8c538c, []int{0}
}
// VEventType enumerates the event types.
// This list is comprehensive. Many of these types
// will not be encountered in RBR mode.
type VEventType int32
const (
VEventType_UNKNOWN VEventType = 0
VEventType_GTID VEventType = 1
VEventType_BEGIN VEventType = 2
VEventType_COMMIT VEventType = 3
VEventType_ROLLBACK VEventType = 4
VEventType_DDL VEventType = 5
VEventType_INSERT VEventType = 6
VEventType_REPLACE VEventType = 7
VEventType_UPDATE VEventType = 8
VEventType_DELETE VEventType = 9
VEventType_SET VEventType = 10
VEventType_OTHER VEventType = 11
VEventType_ROW VEventType = 12
VEventType_FIELD VEventType = 13
)
var VEventType_name = map[int32]string{
0: "UNKNOWN",
1: "GTID",
2: "BEGIN",
3: "COMMIT",
4: "ROLLBACK",
5: "DDL",
6: "INSERT",
7: "REPLACE",
8: "UPDATE",
9: "DELETE",
10: "SET",
11: "OTHER",
12: "ROW",
13: "FIELD",
}
var VEventType_value = map[string]int32{
"UNKNOWN": 0,
"GTID": 1,
"BEGIN": 2,
"COMMIT": 3,
"ROLLBACK": 4,
"DDL": 5,
"INSERT": 6,
"REPLACE": 7,
"UPDATE": 8,
"DELETE": 9,
"SET": 10,
"OTHER": 11,
"ROW": 12,
"FIELD": 13,
}
func (x VEventType) String() string {
return proto.EnumName(VEventType_name, int32(x))
}
func (VEventType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_6d214635eb8c538c, []int{1}
}
type BinlogTransaction_Statement_Category int32
const (
@ -65,7 +158,7 @@ func (x BinlogTransaction_Statement_Category) String() string {
return proto.EnumName(BinlogTransaction_Statement_Category_name, int32(x))
}
func (BinlogTransaction_Statement_Category) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_ac14f15f6b19a931, []int{1, 0, 0}
return fileDescriptor_binlogdata_6d214635eb8c538c, []int{1, 0, 0}
}
// Charset is the per-statement charset info from a QUERY_EVENT binlog entry.
@ -85,7 +178,7 @@ func (m *Charset) Reset() { *m = Charset{} }
func (m *Charset) String() string { return proto.CompactTextString(m) }
func (*Charset) ProtoMessage() {}
func (*Charset) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_ac14f15f6b19a931, []int{0}
return fileDescriptor_binlogdata_6d214635eb8c538c, []int{0}
}
func (m *Charset) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Charset.Unmarshal(m, b)
@ -142,7 +235,7 @@ func (m *BinlogTransaction) Reset() { *m = BinlogTransaction{} }
func (m *BinlogTransaction) String() string { return proto.CompactTextString(m) }
func (*BinlogTransaction) ProtoMessage() {}
func (*BinlogTransaction) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_ac14f15f6b19a931, []int{1}
return fileDescriptor_binlogdata_6d214635eb8c538c, []int{1}
}
func (m *BinlogTransaction) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BinlogTransaction.Unmarshal(m, b)
@ -192,7 +285,7 @@ func (m *BinlogTransaction_Statement) Reset() { *m = BinlogTransaction_S
func (m *BinlogTransaction_Statement) String() string { return proto.CompactTextString(m) }
func (*BinlogTransaction_Statement) ProtoMessage() {}
func (*BinlogTransaction_Statement) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_ac14f15f6b19a931, []int{1, 0}
return fileDescriptor_binlogdata_6d214635eb8c538c, []int{1, 0}
}
func (m *BinlogTransaction_Statement) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BinlogTransaction_Statement.Unmarshal(m, b)
@ -250,7 +343,7 @@ func (m *StreamKeyRangeRequest) Reset() { *m = StreamKeyRangeRequest{} }
func (m *StreamKeyRangeRequest) String() string { return proto.CompactTextString(m) }
func (*StreamKeyRangeRequest) ProtoMessage() {}
func (*StreamKeyRangeRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_ac14f15f6b19a931, []int{2}
return fileDescriptor_binlogdata_6d214635eb8c538c, []int{2}
}
func (m *StreamKeyRangeRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamKeyRangeRequest.Unmarshal(m, b)
@ -303,7 +396,7 @@ func (m *StreamKeyRangeResponse) Reset() { *m = StreamKeyRangeResponse{}
func (m *StreamKeyRangeResponse) String() string { return proto.CompactTextString(m) }
func (*StreamKeyRangeResponse) ProtoMessage() {}
func (*StreamKeyRangeResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_ac14f15f6b19a931, []int{3}
return fileDescriptor_binlogdata_6d214635eb8c538c, []int{3}
}
func (m *StreamKeyRangeResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamKeyRangeResponse.Unmarshal(m, b)
@ -347,7 +440,7 @@ func (m *StreamTablesRequest) Reset() { *m = StreamTablesRequest{} }
func (m *StreamTablesRequest) String() string { return proto.CompactTextString(m) }
func (*StreamTablesRequest) ProtoMessage() {}
func (*StreamTablesRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_ac14f15f6b19a931, []int{4}
return fileDescriptor_binlogdata_6d214635eb8c538c, []int{4}
}
func (m *StreamTablesRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamTablesRequest.Unmarshal(m, b)
@ -400,7 +493,7 @@ func (m *StreamTablesResponse) Reset() { *m = StreamTablesResponse{} }
func (m *StreamTablesResponse) String() string { return proto.CompactTextString(m) }
func (*StreamTablesResponse) ProtoMessage() {}
func (*StreamTablesResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_ac14f15f6b19a931, []int{5}
return fileDescriptor_binlogdata_6d214635eb8c538c, []int{5}
}
func (m *StreamTablesResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamTablesResponse.Unmarshal(m, b)
@ -427,6 +520,98 @@ func (m *StreamTablesResponse) GetBinlogTransaction() *BinlogTransaction {
return nil
}
// Rule represents one rule.
type Rule struct {
// match can be a table name or a regular expression
// delineated by '/' and '/'.
Match string `protobuf:"bytes,1,opt,name=match,proto3" json:"match,omitempty"`
// filter can be an empty string or keyrange if the match
// is a regular expression. Otherwise, it must be a select
// query.
Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Rule) Reset() { *m = Rule{} }
func (m *Rule) String() string { return proto.CompactTextString(m) }
func (*Rule) ProtoMessage() {}
func (*Rule) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_6d214635eb8c538c, []int{6}
}
func (m *Rule) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Rule.Unmarshal(m, b)
}
func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Rule.Marshal(b, m, deterministic)
}
func (dst *Rule) XXX_Merge(src proto.Message) {
xxx_messageInfo_Rule.Merge(dst, src)
}
func (m *Rule) XXX_Size() int {
return xxx_messageInfo_Rule.Size(m)
}
func (m *Rule) XXX_DiscardUnknown() {
xxx_messageInfo_Rule.DiscardUnknown(m)
}
var xxx_messageInfo_Rule proto.InternalMessageInfo
func (m *Rule) GetMatch() string {
if m != nil {
return m.Match
}
return ""
}
func (m *Rule) GetFilter() string {
if m != nil {
return m.Filter
}
return ""
}
// Filter represents a list of ordered rules. First match
// wins.
type Filter struct {
Rules []*Rule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Filter) Reset() { *m = Filter{} }
func (m *Filter) String() string { return proto.CompactTextString(m) }
func (*Filter) ProtoMessage() {}
func (*Filter) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_6d214635eb8c538c, []int{7}
}
func (m *Filter) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Filter.Unmarshal(m, b)
}
func (m *Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Filter.Marshal(b, m, deterministic)
}
func (dst *Filter) XXX_Merge(src proto.Message) {
xxx_messageInfo_Filter.Merge(dst, src)
}
func (m *Filter) XXX_Size() int {
return xxx_messageInfo_Filter.Size(m)
}
func (m *Filter) XXX_DiscardUnknown() {
xxx_messageInfo_Filter.DiscardUnknown(m)
}
var xxx_messageInfo_Filter proto.InternalMessageInfo
func (m *Filter) GetRules() []*Rule {
if m != nil {
return m.Rules
}
return nil
}
// BinlogSource specifies the source and filter parameters for
// Filtered Replication. It currently supports a keyrange
// or a list of tables.
@ -440,17 +625,22 @@ type BinlogSource struct {
// key_range is set if the request is for a keyrange
KeyRange *topodata.KeyRange `protobuf:"bytes,4,opt,name=key_range,json=keyRange,proto3" json:"key_range,omitempty"`
// tables is set if the request is for a list of tables
Tables []string `protobuf:"bytes,5,rep,name=tables,proto3" json:"tables,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
Tables []string `protobuf:"bytes,5,rep,name=tables,proto3" json:"tables,omitempty"`
// filter is set if we're using the generalized representation
// for the filter.
Filter *Filter `protobuf:"bytes,6,opt,name=filter,proto3" json:"filter,omitempty"`
// on_ddl specifies the action to be taken when a DDL is encountered.
OnDdl OnDDLAction `protobuf:"varint,7,opt,name=on_ddl,json=onDdl,proto3,enum=binlogdata.OnDDLAction" json:"on_ddl,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BinlogSource) Reset() { *m = BinlogSource{} }
func (m *BinlogSource) String() string { return proto.CompactTextString(m) }
func (*BinlogSource) ProtoMessage() {}
func (*BinlogSource) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_ac14f15f6b19a931, []int{6}
return fileDescriptor_binlogdata_6d214635eb8c538c, []int{8}
}
func (m *BinlogSource) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BinlogSource.Unmarshal(m, b)
@ -505,6 +695,349 @@ func (m *BinlogSource) GetTables() []string {
return nil
}
func (m *BinlogSource) GetFilter() *Filter {
if m != nil {
return m.Filter
}
return nil
}
func (m *BinlogSource) GetOnDdl() OnDDLAction {
if m != nil {
return m.OnDdl
}
return OnDDLAction_IGNORE
}
// RowChange represents one row change
type RowChange struct {
Before *query.Row `protobuf:"bytes,1,opt,name=before,proto3" json:"before,omitempty"`
After *query.Row `protobuf:"bytes,2,opt,name=after,proto3" json:"after,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RowChange) Reset() { *m = RowChange{} }
func (m *RowChange) String() string { return proto.CompactTextString(m) }
func (*RowChange) ProtoMessage() {}
func (*RowChange) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_6d214635eb8c538c, []int{9}
}
func (m *RowChange) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RowChange.Unmarshal(m, b)
}
func (m *RowChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RowChange.Marshal(b, m, deterministic)
}
func (dst *RowChange) XXX_Merge(src proto.Message) {
xxx_messageInfo_RowChange.Merge(dst, src)
}
func (m *RowChange) XXX_Size() int {
return xxx_messageInfo_RowChange.Size(m)
}
func (m *RowChange) XXX_DiscardUnknown() {
xxx_messageInfo_RowChange.DiscardUnknown(m)
}
var xxx_messageInfo_RowChange proto.InternalMessageInfo
func (m *RowChange) GetBefore() *query.Row {
if m != nil {
return m.Before
}
return nil
}
func (m *RowChange) GetAfter() *query.Row {
if m != nil {
return m.After
}
return nil
}
// RowEvent represent row events for one table
type RowEvent struct {
TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"`
RowChanges []*RowChange `protobuf:"bytes,2,rep,name=row_changes,json=rowChanges,proto3" json:"row_changes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RowEvent) Reset() { *m = RowEvent{} }
func (m *RowEvent) String() string { return proto.CompactTextString(m) }
func (*RowEvent) ProtoMessage() {}
func (*RowEvent) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_6d214635eb8c538c, []int{10}
}
func (m *RowEvent) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RowEvent.Unmarshal(m, b)
}
func (m *RowEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RowEvent.Marshal(b, m, deterministic)
}
func (dst *RowEvent) XXX_Merge(src proto.Message) {
xxx_messageInfo_RowEvent.Merge(dst, src)
}
func (m *RowEvent) XXX_Size() int {
return xxx_messageInfo_RowEvent.Size(m)
}
func (m *RowEvent) XXX_DiscardUnknown() {
xxx_messageInfo_RowEvent.DiscardUnknown(m)
}
var xxx_messageInfo_RowEvent proto.InternalMessageInfo
func (m *RowEvent) GetTableName() string {
if m != nil {
return m.TableName
}
return ""
}
func (m *RowEvent) GetRowChanges() []*RowChange {
if m != nil {
return m.RowChanges
}
return nil
}
type FieldEvent struct {
TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"`
Fields []*query.Field `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *FieldEvent) Reset() { *m = FieldEvent{} }
func (m *FieldEvent) String() string { return proto.CompactTextString(m) }
func (*FieldEvent) ProtoMessage() {}
func (*FieldEvent) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_6d214635eb8c538c, []int{11}
}
func (m *FieldEvent) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_FieldEvent.Unmarshal(m, b)
}
func (m *FieldEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_FieldEvent.Marshal(b, m, deterministic)
}
func (dst *FieldEvent) XXX_Merge(src proto.Message) {
xxx_messageInfo_FieldEvent.Merge(dst, src)
}
func (m *FieldEvent) XXX_Size() int {
return xxx_messageInfo_FieldEvent.Size(m)
}
func (m *FieldEvent) XXX_DiscardUnknown() {
xxx_messageInfo_FieldEvent.DiscardUnknown(m)
}
var xxx_messageInfo_FieldEvent proto.InternalMessageInfo
func (m *FieldEvent) GetTableName() string {
if m != nil {
return m.TableName
}
return ""
}
func (m *FieldEvent) GetFields() []*query.Field {
if m != nil {
return m.Fields
}
return nil
}
// VEvent represents a vstream event
type VEvent struct {
Type VEventType `protobuf:"varint,1,opt,name=type,proto3,enum=binlogdata.VEventType" json:"type,omitempty"`
Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
Gtid string `protobuf:"bytes,3,opt,name=gtid,proto3" json:"gtid,omitempty"`
Ddl string `protobuf:"bytes,4,opt,name=ddl,proto3" json:"ddl,omitempty"`
RowEvent *RowEvent `protobuf:"bytes,5,opt,name=row_event,json=rowEvent,proto3" json:"row_event,omitempty"`
FieldEvent *FieldEvent `protobuf:"bytes,6,opt,name=field_event,json=fieldEvent,proto3" json:"field_event,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VEvent) Reset() { *m = VEvent{} }
func (m *VEvent) String() string { return proto.CompactTextString(m) }
func (*VEvent) ProtoMessage() {}
func (*VEvent) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_6d214635eb8c538c, []int{12}
}
func (m *VEvent) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_VEvent.Unmarshal(m, b)
}
func (m *VEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_VEvent.Marshal(b, m, deterministic)
}
func (dst *VEvent) XXX_Merge(src proto.Message) {
xxx_messageInfo_VEvent.Merge(dst, src)
}
func (m *VEvent) XXX_Size() int {
return xxx_messageInfo_VEvent.Size(m)
}
func (m *VEvent) XXX_DiscardUnknown() {
xxx_messageInfo_VEvent.DiscardUnknown(m)
}
var xxx_messageInfo_VEvent proto.InternalMessageInfo
func (m *VEvent) GetType() VEventType {
if m != nil {
return m.Type
}
return VEventType_UNKNOWN
}
func (m *VEvent) GetTimestamp() int64 {
if m != nil {
return m.Timestamp
}
return 0
}
func (m *VEvent) GetGtid() string {
if m != nil {
return m.Gtid
}
return ""
}
func (m *VEvent) GetDdl() string {
if m != nil {
return m.Ddl
}
return ""
}
func (m *VEvent) GetRowEvent() *RowEvent {
if m != nil {
return m.RowEvent
}
return nil
}
func (m *VEvent) GetFieldEvent() *FieldEvent {
if m != nil {
return m.FieldEvent
}
return nil
}
// VStreamRequest is the payload for VStream
type VStreamRequest struct {
EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"`
ImmediateCallerId *query.VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"`
Target *query.Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"`
Position string `protobuf:"bytes,4,opt,name=position,proto3" json:"position,omitempty"`
Filter *Filter `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VStreamRequest) Reset() { *m = VStreamRequest{} }
func (m *VStreamRequest) String() string { return proto.CompactTextString(m) }
func (*VStreamRequest) ProtoMessage() {}
func (*VStreamRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_6d214635eb8c538c, []int{13}
}
func (m *VStreamRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_VStreamRequest.Unmarshal(m, b)
}
func (m *VStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_VStreamRequest.Marshal(b, m, deterministic)
}
func (dst *VStreamRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_VStreamRequest.Merge(dst, src)
}
func (m *VStreamRequest) XXX_Size() int {
return xxx_messageInfo_VStreamRequest.Size(m)
}
func (m *VStreamRequest) XXX_DiscardUnknown() {
xxx_messageInfo_VStreamRequest.DiscardUnknown(m)
}
var xxx_messageInfo_VStreamRequest proto.InternalMessageInfo
func (m *VStreamRequest) GetEffectiveCallerId() *vtrpc.CallerID {
if m != nil {
return m.EffectiveCallerId
}
return nil
}
func (m *VStreamRequest) GetImmediateCallerId() *query.VTGateCallerID {
if m != nil {
return m.ImmediateCallerId
}
return nil
}
func (m *VStreamRequest) GetTarget() *query.Target {
if m != nil {
return m.Target
}
return nil
}
func (m *VStreamRequest) GetPosition() string {
if m != nil {
return m.Position
}
return ""
}
func (m *VStreamRequest) GetFilter() *Filter {
if m != nil {
return m.Filter
}
return nil
}
// VStreamResponse is the response from VStream
type VStreamResponse struct {
Events []*VEvent `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VStreamResponse) Reset() { *m = VStreamResponse{} }
func (m *VStreamResponse) String() string { return proto.CompactTextString(m) }
func (*VStreamResponse) ProtoMessage() {}
func (*VStreamResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_6d214635eb8c538c, []int{14}
}
func (m *VStreamResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_VStreamResponse.Unmarshal(m, b)
}
func (m *VStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_VStreamResponse.Marshal(b, m, deterministic)
}
func (dst *VStreamResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_VStreamResponse.Merge(dst, src)
}
func (m *VStreamResponse) XXX_Size() int {
return xxx_messageInfo_VStreamResponse.Size(m)
}
func (m *VStreamResponse) XXX_DiscardUnknown() {
xxx_messageInfo_VStreamResponse.DiscardUnknown(m)
}
var xxx_messageInfo_VStreamResponse proto.InternalMessageInfo
func (m *VStreamResponse) GetEvents() []*VEvent {
if m != nil {
return m.Events
}
return nil
}
func init() {
proto.RegisterType((*Charset)(nil), "binlogdata.Charset")
proto.RegisterType((*BinlogTransaction)(nil), "binlogdata.BinlogTransaction")
@ -513,52 +1046,96 @@ func init() {
proto.RegisterType((*StreamKeyRangeResponse)(nil), "binlogdata.StreamKeyRangeResponse")
proto.RegisterType((*StreamTablesRequest)(nil), "binlogdata.StreamTablesRequest")
proto.RegisterType((*StreamTablesResponse)(nil), "binlogdata.StreamTablesResponse")
proto.RegisterType((*Rule)(nil), "binlogdata.Rule")
proto.RegisterType((*Filter)(nil), "binlogdata.Filter")
proto.RegisterType((*BinlogSource)(nil), "binlogdata.BinlogSource")
proto.RegisterType((*RowChange)(nil), "binlogdata.RowChange")
proto.RegisterType((*RowEvent)(nil), "binlogdata.RowEvent")
proto.RegisterType((*FieldEvent)(nil), "binlogdata.FieldEvent")
proto.RegisterType((*VEvent)(nil), "binlogdata.VEvent")
proto.RegisterType((*VStreamRequest)(nil), "binlogdata.VStreamRequest")
proto.RegisterType((*VStreamResponse)(nil), "binlogdata.VStreamResponse")
proto.RegisterEnum("binlogdata.OnDDLAction", OnDDLAction_name, OnDDLAction_value)
proto.RegisterEnum("binlogdata.VEventType", VEventType_name, VEventType_value)
proto.RegisterEnum("binlogdata.BinlogTransaction_Statement_Category", BinlogTransaction_Statement_Category_name, BinlogTransaction_Statement_Category_value)
}
func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_binlogdata_ac14f15f6b19a931) }
func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_binlogdata_6d214635eb8c538c) }
var fileDescriptor_binlogdata_ac14f15f6b19a931 = []byte{
// 640 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0xcd, 0x6e, 0xda, 0x4a,
0x14, 0xbe, 0xc6, 0x40, 0xec, 0xe3, 0xdc, 0x64, 0x98, 0xfc, 0x08, 0x21, 0x5d, 0x09, 0xb1, 0x09,
0x77, 0x71, 0xcd, 0x95, 0xab, 0x3e, 0x40, 0x8c, 0xad, 0x88, 0xc4, 0x90, 0x68, 0x70, 0x36, 0xd9,
0x58, 0xc6, 0x99, 0x12, 0x04, 0xf1, 0x38, 0x9e, 0x09, 0xaa, 0x9f, 0xa3, 0x4f, 0xd1, 0xb7, 0xe8,
0xaa, 0x6f, 0xd2, 0xf7, 0xa8, 0x3c, 0x36, 0x86, 0xa4, 0x52, 0x9b, 0x2e, 0xba, 0x3b, 0xdf, 0x99,
0xef, 0x9c, 0x39, 0xdf, 0x37, 0x47, 0x03, 0x68, 0xb6, 0x88, 0x57, 0x6c, 0x7e, 0x1f, 0x8a, 0xd0,
0x4c, 0x52, 0x26, 0x18, 0x86, 0x6d, 0xa6, 0x63, 0x3c, 0x3d, 0xd3, 0x34, 0x2b, 0x0e, 0x3a, 0x07,
0x82, 0x25, 0x6c, 0x4b, 0xec, 0x8d, 0x61, 0x6f, 0xf8, 0x10, 0xa6, 0x9c, 0x0a, 0x7c, 0x0a, 0xcd,
0x68, 0xb5, 0xa0, 0xb1, 0x68, 0x2b, 0x5d, 0xa5, 0xdf, 0x20, 0x25, 0xc2, 0x18, 0xea, 0x11, 0x8b,
0xe3, 0x76, 0x4d, 0x66, 0x65, 0x9c, 0x73, 0x39, 0x4d, 0xd7, 0x34, 0x6d, 0xab, 0x05, 0xb7, 0x40,
0xbd, 0x6f, 0x2a, 0xb4, 0x6c, 0x79, 0xb5, 0x9f, 0x86, 0x31, 0x0f, 0x23, 0xb1, 0x60, 0x31, 0xbe,
0x00, 0xe0, 0x22, 0x14, 0xf4, 0x91, 0xc6, 0x82, 0xb7, 0x95, 0xae, 0xda, 0x37, 0xac, 0x33, 0x73,
0x67, 0xe8, 0x1f, 0x4a, 0xcc, 0xe9, 0x86, 0x4f, 0x76, 0x4a, 0xb1, 0x05, 0x06, 0x5d, 0xd3, 0x58,
0x04, 0x82, 0x2d, 0x69, 0xdc, 0xae, 0x77, 0x95, 0xbe, 0x61, 0xb5, 0xcc, 0x42, 0xa0, 0x9b, 0x9f,
0xf8, 0xf9, 0x01, 0x01, 0x5a, 0xc5, 0x9d, 0xaf, 0x35, 0xd0, 0xab, 0x6e, 0xd8, 0x03, 0x2d, 0x0a,
0x05, 0x9d, 0xb3, 0x34, 0x93, 0x32, 0x0f, 0xac, 0xff, 0xdf, 0x38, 0x88, 0x39, 0x2c, 0xeb, 0x48,
0xd5, 0x01, 0xff, 0x07, 0x7b, 0x51, 0xe1, 0x9e, 0x74, 0xc7, 0xb0, 0x8e, 0x76, 0x9b, 0x95, 0xc6,
0x92, 0x0d, 0x07, 0x23, 0x50, 0xf9, 0xd3, 0x4a, 0x5a, 0xb6, 0x4f, 0xf2, 0xb0, 0xf7, 0x59, 0x01,
0x6d, 0xd3, 0x17, 0x1f, 0xc1, 0xa1, 0xed, 0x05, 0xb7, 0x13, 0xe2, 0x0e, 0xaf, 0x2f, 0x26, 0xa3,
0x3b, 0xd7, 0x41, 0x7f, 0xe1, 0x7d, 0xd0, 0x6c, 0x2f, 0xb0, 0xdd, 0x8b, 0xd1, 0x04, 0x29, 0xf8,
0x6f, 0xd0, 0x6d, 0x2f, 0x18, 0x5e, 0x8f, 0xc7, 0x23, 0x1f, 0xd5, 0xf0, 0x21, 0x18, 0xb6, 0x17,
0x90, 0x6b, 0xcf, 0xb3, 0xcf, 0x87, 0x57, 0x48, 0xc5, 0x27, 0xd0, 0xb2, 0xbd, 0xc0, 0x19, 0x7b,
0x81, 0xe3, 0xde, 0x10, 0x77, 0x78, 0xee, 0xbb, 0x0e, 0xaa, 0x63, 0x80, 0x66, 0x9e, 0x76, 0x3c,
0xd4, 0x28, 0xe3, 0xa9, 0xeb, 0xa3, 0x66, 0xd9, 0x6e, 0x34, 0x99, 0xba, 0xc4, 0x47, 0x7b, 0x25,
0xbc, 0xbd, 0x71, 0xce, 0x7d, 0x17, 0x69, 0x25, 0x74, 0x5c, 0xcf, 0xf5, 0x5d, 0xa4, 0x5f, 0xd6,
0xb5, 0x1a, 0x52, 0x2f, 0xeb, 0x9a, 0x8a, 0xea, 0xbd, 0x4f, 0x0a, 0x9c, 0x4c, 0x45, 0x4a, 0xc3,
0xc7, 0x2b, 0x9a, 0x91, 0x30, 0x9e, 0x53, 0x42, 0x9f, 0x9e, 0x29, 0x17, 0xb8, 0x03, 0x5a, 0xc2,
0xf8, 0x22, 0xf7, 0x4e, 0x1a, 0xac, 0x93, 0x0a, 0xe3, 0x01, 0xe8, 0x4b, 0x9a, 0x05, 0x69, 0xce,
0x2f, 0x0d, 0xc3, 0x66, 0xb5, 0x90, 0x55, 0x27, 0x6d, 0x59, 0x46, 0xbb, 0xfe, 0xaa, 0xbf, 0xf6,
0xb7, 0xf7, 0x01, 0x4e, 0x5f, 0x0f, 0xc5, 0x13, 0x16, 0x73, 0x8a, 0x3d, 0xc0, 0x45, 0x61, 0x20,
0xb6, 0x6f, 0x2b, 0xe7, 0x33, 0xac, 0x7f, 0x7e, 0xba, 0x00, 0xa4, 0x35, 0x7b, 0x9d, 0xea, 0x7d,
0x84, 0xa3, 0xe2, 0x1e, 0x3f, 0x9c, 0xad, 0x28, 0x7f, 0x8b, 0xf4, 0x53, 0x68, 0x0a, 0x49, 0x6e,
0xd7, 0xba, 0x6a, 0x5f, 0x27, 0x25, 0xfa, 0x5d, 0x85, 0xf7, 0x70, 0xfc, 0xf2, 0xe6, 0x3f, 0xa2,
0xef, 0x8b, 0x02, 0xfb, 0x05, 0x71, 0xca, 0x9e, 0xd3, 0x88, 0xe6, 0xca, 0x96, 0x34, 0xe3, 0x49,
0x18, 0xd1, 0x8d, 0xb2, 0x0d, 0xc6, 0xc7, 0xd0, 0xe0, 0x0f, 0x61, 0x7a, 0x2f, 0x1f, 0x54, 0x27,
0x05, 0xc0, 0xef, 0xc1, 0x90, 0x0a, 0x45, 0x20, 0xb2, 0x84, 0x4a, 0x6d, 0x07, 0xd6, 0xf1, 0xf6,
0xb1, 0xe5, 0xfc, 0xc2, 0xcf, 0x12, 0x4a, 0x40, 0x54, 0xf1, 0xcb, 0x0d, 0xa9, 0xbf, 0x61, 0x43,
0xb6, 0xbe, 0x36, 0x76, 0x7d, 0xb5, 0xff, 0xbd, 0x3b, 0x5b, 0x2f, 0x04, 0xe5, 0xdc, 0x5c, 0xb0,
0x41, 0x11, 0x0d, 0xe6, 0x6c, 0xb0, 0x16, 0x03, 0xf9, 0xef, 0x0d, 0xb6, 0x96, 0xcc, 0x9a, 0x32,
0xf3, 0xee, 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x08, 0xae, 0x13, 0x46, 0x05, 0x00, 0x00,
var fileDescriptor_binlogdata_6d214635eb8c538c = []byte{
// 1184 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x5b, 0x6e, 0xdb, 0x56,
0x13, 0x8e, 0x44, 0x8a, 0x12, 0x87, 0x8e, 0x4d, 0x1f, 0x5f, 0x7e, 0xc1, 0xf8, 0x03, 0x18, 0x44,
0xdb, 0xb8, 0x06, 0x2a, 0xa7, 0xea, 0xed, 0xa9, 0x2d, 0x2c, 0x91, 0x71, 0x95, 0xd0, 0x92, 0x73,
0xcc, 0x24, 0x45, 0x5e, 0x08, 0x9a, 0x3c, 0xb2, 0x09, 0x53, 0xa4, 0x4c, 0x1e, 0xcb, 0xd5, 0x0a,
0xba, 0x80, 0xbe, 0x76, 0x03, 0xed, 0x42, 0xba, 0x92, 0x76, 0x1f, 0xc5, 0xb9, 0x90, 0x92, 0x1c,
0xa0, 0x71, 0x1f, 0xfa, 0x36, 0xf7, 0x33, 0xf3, 0xcd, 0x70, 0x86, 0x60, 0x5e, 0xc4, 0x69, 0x92,
0x5d, 0x46, 0x01, 0x0d, 0x3a, 0xd3, 0x3c, 0xa3, 0x19, 0x82, 0x85, 0x64, 0xcf, 0x98, 0xd1, 0x7c,
0x1a, 0x0a, 0xc5, 0x9e, 0x71, 0x73, 0x4b, 0xf2, 0xb9, 0x64, 0xd6, 0x69, 0x36, 0xcd, 0x16, 0x5e,
0xd6, 0x29, 0x34, 0xfb, 0x57, 0x41, 0x5e, 0x10, 0x8a, 0x76, 0x41, 0x0b, 0x93, 0x98, 0xa4, 0xb4,
0x5d, 0xdb, 0xaf, 0x1d, 0x34, 0xb0, 0xe4, 0x10, 0x02, 0x35, 0xcc, 0xd2, 0xb4, 0x5d, 0xe7, 0x52,
0x4e, 0x33, 0xdb, 0x82, 0xe4, 0x33, 0x92, 0xb7, 0x15, 0x61, 0x2b, 0x38, 0xeb, 0x2f, 0x05, 0x36,
0x7b, 0x3c, 0x0f, 0x2f, 0x0f, 0xd2, 0x22, 0x08, 0x69, 0x9c, 0xa5, 0xe8, 0x04, 0xa0, 0xa0, 0x01,
0x25, 0x13, 0x92, 0xd2, 0xa2, 0x5d, 0xdb, 0x57, 0x0e, 0x8c, 0xee, 0xd3, 0xce, 0x52, 0x05, 0xef,
0xb9, 0x74, 0xce, 0x4b, 0x7b, 0xbc, 0xe4, 0x8a, 0xba, 0x60, 0x90, 0x19, 0x49, 0xa9, 0x4f, 0xb3,
0x6b, 0x92, 0xb6, 0xd5, 0xfd, 0xda, 0x81, 0xd1, 0xdd, 0xec, 0x88, 0x02, 0x1d, 0xa6, 0xf1, 0x98,
0x02, 0x03, 0xa9, 0xe8, 0xbd, 0x3f, 0xea, 0xa0, 0x57, 0xd1, 0x90, 0x0b, 0xad, 0x30, 0xa0, 0xe4,
0x32, 0xcb, 0xe7, 0xbc, 0xcc, 0xf5, 0xee, 0xb3, 0x07, 0x26, 0xd2, 0xe9, 0x4b, 0x3f, 0x5c, 0x45,
0x40, 0x9f, 0x41, 0x33, 0x14, 0xe8, 0x71, 0x74, 0x8c, 0xee, 0xd6, 0x72, 0x30, 0x09, 0x2c, 0x2e,
0x6d, 0x90, 0x09, 0x4a, 0x71, 0x93, 0x70, 0xc8, 0xd6, 0x30, 0x23, 0xad, 0xdf, 0x6a, 0xd0, 0x2a,
0xe3, 0xa2, 0x2d, 0xd8, 0xe8, 0xb9, 0xfe, 0xeb, 0x21, 0x76, 0xfa, 0xa3, 0x93, 0xe1, 0xe0, 0x9d,
0x63, 0x9b, 0x8f, 0xd0, 0x1a, 0xb4, 0x7a, 0xae, 0xdf, 0x73, 0x4e, 0x06, 0x43, 0xb3, 0x86, 0x1e,
0x83, 0xde, 0x73, 0xfd, 0xfe, 0xe8, 0xf4, 0x74, 0xe0, 0x99, 0x75, 0xb4, 0x01, 0x46, 0xcf, 0xf5,
0xf1, 0xc8, 0x75, 0x7b, 0xc7, 0xfd, 0x97, 0xa6, 0x82, 0x76, 0x60, 0xb3, 0xe7, 0xfa, 0xf6, 0xa9,
0xeb, 0xdb, 0xce, 0x19, 0x76, 0xfa, 0xc7, 0x9e, 0x63, 0x9b, 0x2a, 0x02, 0xd0, 0x98, 0xd8, 0x76,
0xcd, 0x86, 0xa4, 0xcf, 0x1d, 0xcf, 0xd4, 0x64, 0xb8, 0xc1, 0xf0, 0xdc, 0xc1, 0x9e, 0xd9, 0x94,
0xec, 0xeb, 0x33, 0xfb, 0xd8, 0x73, 0xcc, 0x96, 0x64, 0x6d, 0xc7, 0x75, 0x3c, 0xc7, 0xd4, 0x5f,
0xa8, 0xad, 0xba, 0xa9, 0xbc, 0x50, 0x5b, 0x8a, 0xa9, 0x5a, 0xbf, 0xd4, 0x60, 0xe7, 0x9c, 0xe6,
0x24, 0x98, 0xbc, 0x24, 0x73, 0x1c, 0xa4, 0x97, 0x04, 0x93, 0x9b, 0x5b, 0x52, 0x50, 0xb4, 0x07,
0xad, 0x69, 0x56, 0xc4, 0x0c, 0x3b, 0x0e, 0xb0, 0x8e, 0x2b, 0x1e, 0x1d, 0x81, 0x7e, 0x4d, 0xe6,
0x7e, 0xce, 0xec, 0x25, 0x60, 0xa8, 0x53, 0x0d, 0x64, 0x15, 0xa9, 0x75, 0x2d, 0xa9, 0x65, 0x7c,
0x95, 0x0f, 0xe3, 0x6b, 0x8d, 0x61, 0xf7, 0x7e, 0x52, 0xc5, 0x34, 0x4b, 0x0b, 0x82, 0x5c, 0x40,
0xc2, 0xd1, 0xa7, 0x8b, 0xde, 0xf2, 0xfc, 0x8c, 0xee, 0x93, 0x7f, 0x1c, 0x00, 0xbc, 0x79, 0x71,
0x5f, 0x64, 0xfd, 0x04, 0x5b, 0xe2, 0x1d, 0x2f, 0xb8, 0x48, 0x48, 0xf1, 0x90, 0xd2, 0x77, 0x41,
0xa3, 0xdc, 0xb8, 0x5d, 0xdf, 0x57, 0x0e, 0x74, 0x2c, 0xb9, 0x7f, 0x5b, 0x61, 0x04, 0xdb, 0xab,
0x2f, 0xff, 0x27, 0xf5, 0x7d, 0x09, 0x2a, 0xbe, 0x4d, 0x08, 0xda, 0x86, 0xc6, 0x24, 0xa0, 0xe1,
0x95, 0xac, 0x46, 0x30, 0xac, 0x94, 0x71, 0x9c, 0x50, 0x92, 0xf3, 0x16, 0xea, 0x58, 0x72, 0xd6,
0x33, 0xd0, 0x9e, 0x73, 0x0a, 0x7d, 0x02, 0x8d, 0xfc, 0x96, 0xd5, 0x2a, 0x3e, 0x75, 0x73, 0x39,
0x01, 0x16, 0x18, 0x0b, 0xb5, 0xf5, 0x6b, 0x1d, 0xd6, 0x44, 0x42, 0xe7, 0xd9, 0x6d, 0x1e, 0x12,
0x86, 0xe0, 0x35, 0x99, 0x17, 0xd3, 0x20, 0x24, 0x25, 0x82, 0x25, 0xcf, 0x92, 0x29, 0xae, 0x82,
0x3c, 0x92, 0xaf, 0x0a, 0x06, 0x7d, 0x05, 0x06, 0x47, 0x92, 0xfa, 0x74, 0x3e, 0x25, 0x1c, 0xc3,
0xf5, 0xee, 0xf6, 0x62, 0xa8, 0x38, 0x4e, 0xd4, 0x9b, 0x4f, 0x09, 0x06, 0x5a, 0xd1, 0xab, 0x93,
0xa8, 0x3e, 0x60, 0x12, 0x17, 0xfd, 0x6b, 0xac, 0xf4, 0xef, 0xb0, 0x02, 0x43, 0x93, 0x51, 0x96,
0x6a, 0x15, 0x70, 0x94, 0x00, 0xa1, 0x0e, 0x68, 0x59, 0xea, 0x47, 0x51, 0xd2, 0x6e, 0xf2, 0x34,
0xff, 0xb7, 0x6c, 0x3b, 0x4a, 0x6d, 0xdb, 0x3d, 0x16, 0x2d, 0x69, 0x64, 0xa9, 0x1d, 0x25, 0xd6,
0x2b, 0xd0, 0x71, 0x76, 0xd7, 0xbf, 0xe2, 0x09, 0x58, 0xa0, 0x5d, 0x90, 0x71, 0x96, 0x13, 0xd9,
0x55, 0x90, 0x5b, 0x0f, 0x67, 0x77, 0x58, 0x6a, 0xd0, 0x3e, 0x34, 0x82, 0x71, 0xd9, 0x98, 0x55,
0x13, 0xa1, 0xb0, 0x02, 0x68, 0xe1, 0xec, 0x8e, 0x6f, 0x4a, 0xf4, 0x04, 0x04, 0x22, 0x7e, 0x1a,
0x4c, 0x4a, 0xb8, 0x75, 0x2e, 0x19, 0x06, 0x13, 0x82, 0xbe, 0x06, 0x23, 0xcf, 0xee, 0xfc, 0x90,
0x3f, 0x2f, 0xc6, 0xd6, 0xe8, 0xee, 0xac, 0xb4, 0xb2, 0x4c, 0x0e, 0x43, 0x5e, 0x92, 0x85, 0xf5,
0x0a, 0xe0, 0x79, 0x4c, 0x92, 0xe8, 0x41, 0x8f, 0x7c, 0xc4, 0xe0, 0x23, 0x49, 0x54, 0xc6, 0x5f,
0x93, 0x29, 0xf3, 0x08, 0x58, 0xea, 0xac, 0x3f, 0x6b, 0xa0, 0xbd, 0x11, 0xf1, 0x0e, 0x41, 0xe5,
0x8d, 0x16, 0xbb, 0x7b, 0x77, 0x39, 0x1d, 0x61, 0xc1, 0x5b, 0xcd, 0x6d, 0xd0, 0xff, 0x41, 0xa7,
0xf1, 0x84, 0x14, 0x34, 0x98, 0x4c, 0x39, 0x24, 0x0a, 0x5e, 0x08, 0xd8, 0x59, 0xbb, 0xa4, 0x71,
0xc4, 0x47, 0x46, 0xc7, 0x9c, 0x66, 0x0b, 0x9a, 0xb5, 0x47, 0xe5, 0x22, 0x46, 0xa2, 0xcf, 0x41,
0x67, 0x28, 0xf0, 0x7b, 0xd2, 0x6e, 0x70, 0x58, 0xb7, 0xef, 0x61, 0xc0, 0x9f, 0xc5, 0xad, 0xbc,
0xc4, 0xf5, 0x1b, 0x30, 0x78, 0xde, 0xd2, 0x49, 0xcc, 0xc5, 0xee, 0xea, 0x5c, 0x94, 0xf8, 0x60,
0x18, 0x57, 0xb4, 0xf5, 0x73, 0x1d, 0xd6, 0xdf, 0x88, 0xcf, 0xbb, 0x5c, 0x29, 0xdf, 0xc3, 0x16,
0x19, 0x8f, 0x49, 0x48, 0xe3, 0x19, 0xf1, 0xc3, 0x20, 0x49, 0x48, 0xee, 0xc7, 0x91, 0x1c, 0x81,
0x8d, 0x8e, 0x38, 0xf3, 0x7d, 0x2e, 0x1f, 0xd8, 0x78, 0xb3, 0xb2, 0x95, 0xa2, 0x08, 0x39, 0xb0,
0x15, 0x4f, 0x26, 0x24, 0x8a, 0x03, 0xba, 0x1c, 0x40, 0x0c, 0xc8, 0x8e, 0x44, 0xfb, 0x8d, 0x77,
0x12, 0x50, 0xb2, 0x08, 0x53, 0x79, 0x54, 0x61, 0x3e, 0x66, 0xe3, 0x9f, 0x5f, 0x56, 0x5b, 0xea,
0xb1, 0xf4, 0xf4, 0xb8, 0x10, 0x4b, 0xe5, 0xca, 0x06, 0x54, 0xef, 0x6d, 0xc0, 0xc5, 0x97, 0xd2,
0xf8, 0xd0, 0x97, 0x62, 0x7d, 0x0b, 0x1b, 0x15, 0x10, 0x72, 0xc3, 0x1d, 0x82, 0xc6, 0xf1, 0x2c,
0x97, 0x0a, 0x7a, 0xbf, 0xf5, 0x58, 0x5a, 0x1c, 0x7e, 0x07, 0xc6, 0xd2, 0xe7, 0xc4, 0x2e, 0xde,
0xe0, 0x64, 0x38, 0xc2, 0x8e, 0xf9, 0x08, 0xb5, 0x40, 0x3d, 0xf7, 0x46, 0x67, 0x66, 0x8d, 0x51,
0xce, 0x8f, 0x4e, 0x5f, 0x5c, 0x51, 0x46, 0xf9, 0xd2, 0x48, 0x39, 0xfc, 0xbd, 0x06, 0xb0, 0x98,
0x26, 0x64, 0x40, 0xf3, 0xf5, 0xf0, 0xe5, 0x70, 0xf4, 0x76, 0x28, 0x02, 0x9c, 0x78, 0x03, 0xdb,
0xac, 0x21, 0x1d, 0x1a, 0xe2, 0x2c, 0xd7, 0xd9, 0x0b, 0xf2, 0x26, 0x2b, 0xec, 0x60, 0x57, 0x07,
0x59, 0x45, 0x4d, 0x50, 0xaa, 0xb3, 0x2b, 0xef, 0xac, 0xc6, 0x02, 0x62, 0xe7, 0xcc, 0x3d, 0xee,
0x3b, 0x66, 0x93, 0x29, 0xaa, 0x8b, 0x0b, 0xa0, 0x95, 0xe7, 0x96, 0x79, 0xb2, 0x23, 0x0d, 0xec,
0x9d, 0x91, 0xf7, 0x83, 0x83, 0x4d, 0x83, 0xc9, 0xf0, 0xe8, 0xad, 0xb9, 0xc6, 0x64, 0xcf, 0x07,
0x8e, 0x6b, 0x9b, 0x8f, 0x7b, 0x9f, 0xbe, 0x7b, 0x3a, 0x8b, 0x29, 0x29, 0x8a, 0x4e, 0x9c, 0x1d,
0x09, 0xea, 0xe8, 0x32, 0x3b, 0x9a, 0xd1, 0x23, 0xfe, 0x87, 0x77, 0xb4, 0x80, 0xe9, 0x42, 0xe3,
0x92, 0x2f, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x68, 0xbd, 0x20, 0x05, 0x3d, 0x0a, 0x00, 0x00,
}

Просмотреть файл

@ -191,9 +191,9 @@ var _UpdateStream_serviceDesc = grpc.ServiceDesc{
Metadata: "binlogservice.proto",
}
func init() { proto.RegisterFile("binlogservice.proto", fileDescriptor_binlogservice_0e1eb8b2f97a2dc1) }
func init() { proto.RegisterFile("binlogservice.proto", fileDescriptor_binlogservice_bfebf84e565603b8) }
var fileDescriptor_binlogservice_0e1eb8b2f97a2dc1 = []byte{
var fileDescriptor_binlogservice_bfebf84e565603b8 = []byte{
// 177 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4e, 0xca, 0xcc, 0xcb,
0xc9, 0x4f, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17,

Просмотреть файл

@ -49,7 +49,7 @@ func (x Level) String() string {
return proto.EnumName(Level_name, int32(x))
}
func (Level) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_logutil_39c26af5691dd7cd, []int{0}
return fileDescriptor_logutil_1922c06158165cc5, []int{0}
}
// Time represents a time stamp in nanoseconds. In go, use logutil library
@ -66,7 +66,7 @@ func (m *Time) Reset() { *m = Time{} }
func (m *Time) String() string { return proto.CompactTextString(m) }
func (*Time) ProtoMessage() {}
func (*Time) Descriptor() ([]byte, []int) {
return fileDescriptor_logutil_39c26af5691dd7cd, []int{0}
return fileDescriptor_logutil_1922c06158165cc5, []int{0}
}
func (m *Time) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Time.Unmarshal(m, b)
@ -116,7 +116,7 @@ func (m *Event) Reset() { *m = Event{} }
func (m *Event) String() string { return proto.CompactTextString(m) }
func (*Event) ProtoMessage() {}
func (*Event) Descriptor() ([]byte, []int) {
return fileDescriptor_logutil_39c26af5691dd7cd, []int{1}
return fileDescriptor_logutil_1922c06158165cc5, []int{1}
}
func (m *Event) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Event.Unmarshal(m, b)
@ -177,9 +177,9 @@ func init() {
proto.RegisterEnum("logutil.Level", Level_name, Level_value)
}
func init() { proto.RegisterFile("logutil.proto", fileDescriptor_logutil_39c26af5691dd7cd) }
func init() { proto.RegisterFile("logutil.proto", fileDescriptor_logutil_1922c06158165cc5) }
var fileDescriptor_logutil_39c26af5691dd7cd = []byte{
var fileDescriptor_logutil_1922c06158165cc5 = []byte{
// 260 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x90, 0x41, 0x4b, 0xfb, 0x40,
0x10, 0xc5, 0xff, 0xdb, 0x64, 0xff, 0xb1, 0x13, 0x5a, 0xc2, 0xe0, 0x21, 0xc7, 0x58, 0x8a, 0x04,

Просмотреть файл

@ -34,7 +34,7 @@ func (m *StartRequest) Reset() { *m = StartRequest{} }
func (m *StartRequest) String() string { return proto.CompactTextString(m) }
func (*StartRequest) ProtoMessage() {}
func (*StartRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{0}
return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{0}
}
func (m *StartRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StartRequest.Unmarshal(m, b)
@ -71,7 +71,7 @@ func (m *StartResponse) Reset() { *m = StartResponse{} }
func (m *StartResponse) String() string { return proto.CompactTextString(m) }
func (*StartResponse) ProtoMessage() {}
func (*StartResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{1}
return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{1}
}
func (m *StartResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StartResponse.Unmarshal(m, b)
@ -102,7 +102,7 @@ func (m *ShutdownRequest) Reset() { *m = ShutdownRequest{} }
func (m *ShutdownRequest) String() string { return proto.CompactTextString(m) }
func (*ShutdownRequest) ProtoMessage() {}
func (*ShutdownRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{2}
return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{2}
}
func (m *ShutdownRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ShutdownRequest.Unmarshal(m, b)
@ -139,7 +139,7 @@ func (m *ShutdownResponse) Reset() { *m = ShutdownResponse{} }
func (m *ShutdownResponse) String() string { return proto.CompactTextString(m) }
func (*ShutdownResponse) ProtoMessage() {}
func (*ShutdownResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{3}
return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{3}
}
func (m *ShutdownResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ShutdownResponse.Unmarshal(m, b)
@ -169,7 +169,7 @@ func (m *RunMysqlUpgradeRequest) Reset() { *m = RunMysqlUpgradeRequest{}
func (m *RunMysqlUpgradeRequest) String() string { return proto.CompactTextString(m) }
func (*RunMysqlUpgradeRequest) ProtoMessage() {}
func (*RunMysqlUpgradeRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{4}
return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{4}
}
func (m *RunMysqlUpgradeRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RunMysqlUpgradeRequest.Unmarshal(m, b)
@ -199,7 +199,7 @@ func (m *RunMysqlUpgradeResponse) Reset() { *m = RunMysqlUpgradeResponse
func (m *RunMysqlUpgradeResponse) String() string { return proto.CompactTextString(m) }
func (*RunMysqlUpgradeResponse) ProtoMessage() {}
func (*RunMysqlUpgradeResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{5}
return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{5}
}
func (m *RunMysqlUpgradeResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RunMysqlUpgradeResponse.Unmarshal(m, b)
@ -229,7 +229,7 @@ func (m *ReinitConfigRequest) Reset() { *m = ReinitConfigRequest{} }
func (m *ReinitConfigRequest) String() string { return proto.CompactTextString(m) }
func (*ReinitConfigRequest) ProtoMessage() {}
func (*ReinitConfigRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{6}
return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{6}
}
func (m *ReinitConfigRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ReinitConfigRequest.Unmarshal(m, b)
@ -259,7 +259,7 @@ func (m *ReinitConfigResponse) Reset() { *m = ReinitConfigResponse{} }
func (m *ReinitConfigResponse) String() string { return proto.CompactTextString(m) }
func (*ReinitConfigResponse) ProtoMessage() {}
func (*ReinitConfigResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{7}
return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{7}
}
func (m *ReinitConfigResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ReinitConfigResponse.Unmarshal(m, b)
@ -289,7 +289,7 @@ func (m *RefreshConfigRequest) Reset() { *m = RefreshConfigRequest{} }
func (m *RefreshConfigRequest) String() string { return proto.CompactTextString(m) }
func (*RefreshConfigRequest) ProtoMessage() {}
func (*RefreshConfigRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{8}
return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{8}
}
func (m *RefreshConfigRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RefreshConfigRequest.Unmarshal(m, b)
@ -319,7 +319,7 @@ func (m *RefreshConfigResponse) Reset() { *m = RefreshConfigResponse{} }
func (m *RefreshConfigResponse) String() string { return proto.CompactTextString(m) }
func (*RefreshConfigResponse) ProtoMessage() {}
func (*RefreshConfigResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_mysqlctl_dc3430948664e7fa, []int{9}
return fileDescriptor_mysqlctl_6cf72a3618d6fe7c, []int{9}
}
func (m *RefreshConfigResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RefreshConfigResponse.Unmarshal(m, b)
@ -556,9 +556,9 @@ var _MysqlCtl_serviceDesc = grpc.ServiceDesc{
Metadata: "mysqlctl.proto",
}
func init() { proto.RegisterFile("mysqlctl.proto", fileDescriptor_mysqlctl_dc3430948664e7fa) }
func init() { proto.RegisterFile("mysqlctl.proto", fileDescriptor_mysqlctl_6cf72a3618d6fe7c) }
var fileDescriptor_mysqlctl_dc3430948664e7fa = []byte{
var fileDescriptor_mysqlctl_6cf72a3618d6fe7c = []byte{
// 339 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0x4d, 0x4f, 0xfa, 0x30,
0x1c, 0xc7, 0xff, 0x84, 0xfc, 0xcd, 0xfc, 0x09, 0xce, 0x54, 0x79, 0x6a, 0xa2, 0xe0, 0x12, 0x95,

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -6,6 +6,7 @@ package queryservice // import "vitess.io/vitess/go/vt/proto/queryservice"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import binlogdata "vitess.io/vitess/go/vt/proto/binlogdata"
import query "vitess.io/vitess/go/vt/proto/query"
import (
@ -85,6 +86,8 @@ type QueryClient interface {
StreamHealth(ctx context.Context, in *query.StreamHealthRequest, opts ...grpc.CallOption) (Query_StreamHealthClient, error)
// UpdateStream asks the server to return a stream of the updates that have been applied to its database.
UpdateStream(ctx context.Context, in *query.UpdateStreamRequest, opts ...grpc.CallOption) (Query_UpdateStreamClient, error)
// VStream streams vreplication events.
VStream(ctx context.Context, in *binlogdata.VStreamRequest, opts ...grpc.CallOption) (Query_VStreamClient, error)
}
type queryClient struct {
@ -376,6 +379,38 @@ func (x *queryUpdateStreamClient) Recv() (*query.UpdateStreamResponse, error) {
return m, nil
}
func (c *queryClient) VStream(ctx context.Context, in *binlogdata.VStreamRequest, opts ...grpc.CallOption) (Query_VStreamClient, error) {
stream, err := c.cc.NewStream(ctx, &_Query_serviceDesc.Streams[4], "/queryservice.Query/VStream", opts...)
if err != nil {
return nil, err
}
x := &queryVStreamClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type Query_VStreamClient interface {
Recv() (*binlogdata.VStreamResponse, error)
grpc.ClientStream
}
type queryVStreamClient struct {
grpc.ClientStream
}
func (x *queryVStreamClient) Recv() (*binlogdata.VStreamResponse, error) {
m := new(binlogdata.VStreamResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// QueryServer is the server API for Query service.
type QueryServer interface {
// Execute executes the specified SQL query (might be in a
@ -427,6 +462,8 @@ type QueryServer interface {
StreamHealth(*query.StreamHealthRequest, Query_StreamHealthServer) error
// UpdateStream asks the server to return a stream of the updates that have been applied to its database.
UpdateStream(*query.UpdateStreamRequest, Query_UpdateStreamServer) error
// VStream streams vreplication events.
VStream(*binlogdata.VStreamRequest, Query_VStreamServer) error
}
func RegisterQueryServer(s *grpc.Server, srv QueryServer) {
@ -823,6 +860,27 @@ func (x *queryUpdateStreamServer) Send(m *query.UpdateStreamResponse) error {
return x.ServerStream.SendMsg(m)
}
func _Query_VStream_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(binlogdata.VStreamRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(QueryServer).VStream(m, &queryVStreamServer{stream})
}
type Query_VStreamServer interface {
Send(*binlogdata.VStreamResponse) error
grpc.ServerStream
}
type queryVStreamServer struct {
grpc.ServerStream
}
func (x *queryVStreamServer) Send(m *binlogdata.VStreamResponse) error {
return x.ServerStream.SendMsg(m)
}
var _Query_serviceDesc = grpc.ServiceDesc{
ServiceName: "queryservice.Query",
HandlerType: (*QueryServer)(nil),
@ -917,45 +975,51 @@ var _Query_serviceDesc = grpc.ServiceDesc{
Handler: _Query_UpdateStream_Handler,
ServerStreams: true,
},
{
StreamName: "VStream",
Handler: _Query_VStream_Handler,
ServerStreams: true,
},
},
Metadata: "queryservice.proto",
}
func init() { proto.RegisterFile("queryservice.proto", fileDescriptor_queryservice_81e549fbfb878a8d) }
func init() { proto.RegisterFile("queryservice.proto", fileDescriptor_queryservice_17509881eb07629d) }
var fileDescriptor_queryservice_81e549fbfb878a8d = []byte{
// 519 bytes of a gzipped FileDescriptorProto
var fileDescriptor_queryservice_17509881eb07629d = []byte{
// 544 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x95, 0xdf, 0x6b, 0xd4, 0x40,
0x10, 0xc7, 0xf5, 0xa1, 0xad, 0x4c, 0xe3, 0xaf, 0xad, 0x55, 0x9b, 0xd6, 0xb6, 0xf6, 0x4d, 0x84,
0x46, 0x54, 0x10, 0x0a, 0x3e, 0xf4, 0x82, 0xa2, 0x14, 0x7f, 0xdd, 0x59, 0x10, 0x1f, 0x84, 0x6d,
0x6e, 0x38, 0x43, 0x73, 0x49, 0xba, 0xbb, 0x77, 0xe8, 0x5f, 0xe4, 0xbf, 0x29, 0x66, 0x33, 0x93,
0xdd, 0xbd, 0xc4, 0xb7, 0xce, 0xf7, 0x3b, 0xf3, 0x61, 0x6e, 0xa7, 0x33, 0x01, 0x71, 0xb5, 0x40,
0xf5, 0x5b, 0xa3, 0x5a, 0xe6, 0x19, 0x1e, 0xd7, 0xaa, 0x32, 0x95, 0x88, 0x5c, 0x2d, 0xde, 0x6c,
0x22, 0x6b, 0x3d, 0xff, 0x13, 0xc1, 0xda, 0x97, 0x7f, 0xb1, 0x38, 0x81, 0x8d, 0x37, 0xbf, 0x30,
0x5b, 0x18, 0x14, 0xdb, 0xc7, 0x36, 0xa5, 0x8d, 0xc7, 0x78, 0xb5, 0x40, 0x6d, 0xe2, 0xfb, 0xa1,
0xac, 0xeb, 0xaa, 0xd4, 0x78, 0x74, 0x4d, 0xbc, 0x87, 0xa8, 0x15, 0x47, 0xd2, 0x64, 0x3f, 0x45,
0xec, 0x67, 0x36, 0x22, 0x51, 0x76, 0x7b, 0x3d, 0x46, 0x7d, 0x84, 0x9b, 0x13, 0xa3, 0x50, 0xce,
0xa9, 0x19, 0xca, 0xf7, 0x54, 0x82, 0xed, 0xf5, 0x9b, 0x44, 0x7b, 0x76, 0x5d, 0xbc, 0x84, 0xb5,
0x11, 0xce, 0xf2, 0x52, 0x6c, 0xb5, 0xa9, 0x4d, 0x44, 0xf5, 0xf7, 0x7c, 0x91, 0xbb, 0x78, 0x05,
0xeb, 0x69, 0x35, 0x9f, 0xe7, 0x46, 0x50, 0x86, 0x0d, 0xa9, 0x6e, 0x3b, 0x50, 0xb9, 0xf0, 0x35,
0xdc, 0x18, 0x57, 0x45, 0x71, 0x21, 0xb3, 0x4b, 0x41, 0xef, 0x45, 0x02, 0x15, 0x3f, 0x58, 0xd1,
0xb9, 0xfc, 0x04, 0x36, 0x3e, 0x2b, 0xac, 0xa5, 0xea, 0x86, 0xd0, 0xc6, 0xe1, 0x10, 0x58, 0xe6,
0xda, 0x4f, 0x70, 0xcb, 0xb6, 0xd3, 0x5a, 0x53, 0xb1, 0xe7, 0x75, 0x49, 0x32, 0x91, 0x1e, 0x0d,
0xb8, 0x0c, 0x3c, 0x87, 0x3b, 0xd4, 0x22, 0x23, 0xf7, 0x83, 0xde, 0x43, 0xe8, 0xc1, 0xa0, 0xcf,
0xd8, 0x6f, 0x70, 0x37, 0x55, 0x28, 0x0d, 0x7e, 0x55, 0xb2, 0xd4, 0x32, 0x33, 0x79, 0x55, 0x0a,
0xaa, 0x5b, 0x71, 0x08, 0x7c, 0x38, 0x9c, 0xc0, 0xe4, 0xb7, 0xb0, 0x39, 0x31, 0x52, 0x99, 0x76,
0x74, 0x3b, 0xfc, 0xcf, 0xc1, 0x1a, 0xd1, 0xe2, 0x3e, 0xcb, 0xe3, 0xa0, 0xe1, 0x39, 0x32, 0xa7,
0xd3, 0x56, 0x38, 0xae, 0xc5, 0x9c, 0x1f, 0xb0, 0x95, 0x56, 0x65, 0x56, 0x2c, 0xa6, 0xde, 0x6f,
0x7d, 0xcc, 0x0f, 0xbf, 0xe2, 0x11, 0xf7, 0xe8, 0x7f, 0x29, 0xcc, 0x1f, 0xc3, 0xed, 0x31, 0xca,
0xa9, 0xcb, 0xa6, 0xa1, 0x06, 0x3a, 0x71, 0xf7, 0x87, 0x6c, 0x77, 0x95, 0x9b, 0x65, 0xa0, 0xf5,
0x8b, 0xdd, 0x0d, 0x09, 0xb6, 0x6f, 0xb7, 0xd7, 0x73, 0x07, 0xed, 0x3a, 0xf6, 0x34, 0x1c, 0xf4,
0xd4, 0x78, 0xf7, 0xe1, 0x70, 0x38, 0xc1, 0x3d, 0x12, 0x1f, 0x50, 0x6b, 0x39, 0x43, 0xbb, 0xf8,
0x7c, 0x24, 0x3c, 0x35, 0x3c, 0x12, 0x81, 0xe9, 0x1c, 0x89, 0x14, 0xa0, 0x35, 0x4f, 0xb3, 0x4b,
0xf1, 0xd0, 0xcf, 0x3f, 0xed, 0xc6, 0xbd, 0xd3, 0xe3, 0x70, 0x53, 0x29, 0xc0, 0xa4, 0x2e, 0x72,
0x63, 0xcf, 0x29, 0x41, 0x3a, 0x29, 0x84, 0xb8, 0x0e, 0x43, 0xce, 0x20, 0xb2, 0xfd, 0xbd, 0x43,
0x59, 0x98, 0xee, 0x92, 0xba, 0x62, 0xf8, 0xfc, 0xbe, 0xe7, 0xfc, 0xac, 0x33, 0x88, 0xce, 0xeb,
0xa9, 0x34, 0xf4, 0x4a, 0x04, 0x73, 0xc5, 0x10, 0xe6, 0x7b, 0x1d, 0x6c, 0xf4, 0xf4, 0xfb, 0x93,
0x65, 0x6e, 0x50, 0xeb, 0xe3, 0xbc, 0x4a, 0xec, 0x5f, 0xc9, 0xac, 0x4a, 0x96, 0x26, 0x69, 0xbe,
0x24, 0x89, 0xfb, 0x8d, 0xb9, 0x58, 0x6f, 0xb4, 0x17, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x51,
0x5a, 0xbc, 0xc0, 0x8e, 0x06, 0x00, 0x00,
0x10, 0xc7, 0xf5, 0xa1, 0xad, 0x4c, 0x4f, 0xad, 0x5b, 0xab, 0x36, 0xad, 0x6d, 0xed, 0x9b, 0x08,
0x17, 0x51, 0x41, 0x28, 0xf8, 0xd0, 0x0b, 0x16, 0xa5, 0xf8, 0xeb, 0xce, 0x16, 0xf1, 0x41, 0xd8,
0x4b, 0x86, 0x33, 0x34, 0x97, 0x4d, 0x93, 0xbd, 0x43, 0xff, 0x6a, 0xff, 0x05, 0x31, 0x9b, 0x99,
0xec, 0xee, 0x25, 0xbe, 0xdd, 0x7e, 0xbf, 0x33, 0x1f, 0x26, 0x3b, 0x37, 0xb3, 0x20, 0xae, 0x17,
0x58, 0xfe, 0xae, 0xb0, 0x5c, 0xa6, 0x31, 0x0e, 0x8b, 0x52, 0x69, 0x25, 0x06, 0xb6, 0x16, 0x6c,
0xd6, 0x27, 0x63, 0x05, 0x5b, 0xd3, 0x34, 0xcf, 0xd4, 0x2c, 0x91, 0x5a, 0x1a, 0xe5, 0xc5, 0x9f,
0x01, 0xac, 0x7d, 0xf9, 0x17, 0x21, 0x4e, 0x60, 0xe3, 0xed, 0x2f, 0x8c, 0x17, 0x1a, 0xc5, 0xce,
0xd0, 0x24, 0x35, 0xe7, 0x31, 0x5e, 0x2f, 0xb0, 0xd2, 0xc1, 0x03, 0x5f, 0xae, 0x0a, 0x95, 0x57,
0x78, 0x7c, 0x43, 0xbc, 0x87, 0x41, 0x23, 0x8e, 0xa4, 0x8e, 0x7f, 0x8a, 0xc0, 0x8d, 0xac, 0x45,
0xa2, 0xec, 0x75, 0x7a, 0x8c, 0xfa, 0x08, 0xb7, 0x27, 0xba, 0x44, 0x39, 0xa7, 0x62, 0x28, 0xde,
0x51, 0x09, 0xb6, 0xdf, 0x6d, 0x12, 0xed, 0xf9, 0x4d, 0xf1, 0x0a, 0xd6, 0x46, 0x38, 0x4b, 0x73,
0xb1, 0xdd, 0x84, 0xd6, 0x27, 0xca, 0xbf, 0xef, 0x8a, 0x5c, 0xc5, 0x6b, 0x58, 0x8f, 0xd4, 0x7c,
0x9e, 0x6a, 0x41, 0x11, 0xe6, 0x48, 0x79, 0x3b, 0x9e, 0xca, 0x89, 0x6f, 0xe0, 0xd6, 0x58, 0x65,
0xd9, 0x54, 0xc6, 0x57, 0x82, 0xee, 0x8b, 0x04, 0x4a, 0x7e, 0xb8, 0xa2, 0x73, 0xfa, 0x09, 0x6c,
0x7c, 0x2e, 0xb1, 0x90, 0x65, 0xdb, 0x84, 0xe6, 0xec, 0x37, 0x81, 0x65, 0xce, 0xfd, 0x04, 0x77,
0x4c, 0x39, 0x8d, 0x95, 0x88, 0x7d, 0xa7, 0x4a, 0x92, 0x89, 0xf4, 0xb8, 0xc7, 0x65, 0xe0, 0x05,
0x6c, 0x51, 0x89, 0x8c, 0x3c, 0xf0, 0x6a, 0xf7, 0xa1, 0x87, 0xbd, 0x3e, 0x63, 0xbf, 0xc1, 0xbd,
0xa8, 0x44, 0xa9, 0xf1, 0x6b, 0x29, 0xf3, 0x4a, 0xc6, 0x3a, 0x55, 0xb9, 0xa0, 0xbc, 0x15, 0x87,
0xc0, 0x47, 0xfd, 0x01, 0x4c, 0x3e, 0x83, 0xcd, 0x89, 0x96, 0xa5, 0x6e, 0x5a, 0xb7, 0xcb, 0x7f,
0x0e, 0xd6, 0x88, 0x16, 0x74, 0x59, 0x0e, 0x07, 0x35, 0xf7, 0x91, 0x39, 0xad, 0xb6, 0xc2, 0xb1,
0x2d, 0xe6, 0xfc, 0x80, 0xed, 0x48, 0xe5, 0x71, 0xb6, 0x48, 0x9c, 0x6f, 0x7d, 0xc2, 0x17, 0xbf,
0xe2, 0x11, 0xf7, 0xf8, 0x7f, 0x21, 0xcc, 0x1f, 0xc3, 0xdd, 0x31, 0xca, 0xc4, 0x66, 0x53, 0x53,
0x3d, 0x9d, 0xb8, 0x07, 0x7d, 0xb6, 0x3d, 0xca, 0xf5, 0x30, 0xd0, 0xf8, 0x05, 0xf6, 0x84, 0x78,
0xd3, 0xb7, 0xd7, 0xe9, 0xd9, 0x8d, 0xb6, 0x1d, 0xb3, 0x1a, 0x0e, 0x3b, 0x72, 0x9c, 0xfd, 0x70,
0xd4, 0x1f, 0x60, 0x2f, 0x89, 0x0f, 0x58, 0x55, 0x72, 0x86, 0x66, 0xf0, 0x79, 0x49, 0x38, 0xaa,
0xbf, 0x24, 0x3c, 0xd3, 0x5a, 0x12, 0x11, 0x40, 0x63, 0x9e, 0xc6, 0x57, 0xe2, 0x91, 0x1b, 0x7f,
0xda, 0xb6, 0x7b, 0xb7, 0xc3, 0xe1, 0xa2, 0x22, 0x80, 0x49, 0x91, 0xa5, 0xda, 0xac, 0x53, 0x82,
0xb4, 0x92, 0x0f, 0xb1, 0x1d, 0x86, 0x9c, 0xc3, 0xc0, 0xd4, 0xf7, 0x0e, 0x65, 0xa6, 0xdb, 0x4d,
0x6a, 0x8b, 0xfe, 0xf5, 0xbb, 0x9e, 0xf5, 0x59, 0xe7, 0x30, 0xb8, 0x28, 0x12, 0xa9, 0xe9, 0x96,
0x08, 0x66, 0x8b, 0x3e, 0xcc, 0xf5, 0x2c, 0xd8, 0x19, 0x6c, 0x5c, 0x32, 0xc7, 0x7a, 0x47, 0x2e,
0x7d, 0x4e, 0x97, 0xd7, 0x72, 0x46, 0xcf, 0xbe, 0x3f, 0x5d, 0xa6, 0x1a, 0xab, 0x6a, 0x98, 0xaa,
0xd0, 0xfc, 0x0a, 0x67, 0x2a, 0x5c, 0xea, 0xb0, 0x7e, 0x91, 0x42, 0xfb, 0xf5, 0x9a, 0xae, 0xd7,
0xda, 0xcb, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x47, 0x8e, 0x80, 0xe8, 0x06, 0x00, 0x00,
}

Просмотреть файл

@ -37,7 +37,7 @@ func (m *Status) Reset() { *m = Status{} }
func (m *Status) String() string { return proto.CompactTextString(m) }
func (*Status) ProtoMessage() {}
func (*Status) Descriptor() ([]byte, []int) {
return fileDescriptor_replicationdata_535db925ee5677f7, []int{0}
return fileDescriptor_replicationdata_1dfa1a45cfa5e522, []int{0}
}
func (m *Status) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Status.Unmarshal(m, b)
@ -111,10 +111,10 @@ func init() {
}
func init() {
proto.RegisterFile("replicationdata.proto", fileDescriptor_replicationdata_535db925ee5677f7)
proto.RegisterFile("replicationdata.proto", fileDescriptor_replicationdata_1dfa1a45cfa5e522)
}
var fileDescriptor_replicationdata_535db925ee5677f7 = []byte{
var fileDescriptor_replicationdata_1dfa1a45cfa5e522 = []byte{
// 264 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x90, 0xc1, 0x4a, 0x03, 0x31,
0x10, 0x86, 0xd9, 0x6a, 0xd7, 0x1a, 0xd1, 0x6a, 0xb4, 0x10, 0xbc, 0xb8, 0x78, 0x5a, 0x44, 0x36,

Просмотреть файл

@ -35,7 +35,7 @@ func (m *TableGroupSpec) Reset() { *m = TableGroupSpec{} }
func (m *TableGroupSpec) String() string { return proto.CompactTextString(m) }
func (*TableGroupSpec) ProtoMessage() {}
func (*TableGroupSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_tableacl_38276fd243b9c307, []int{0}
return fileDescriptor_tableacl_82b5f1376534b35e, []int{0}
}
func (m *TableGroupSpec) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_TableGroupSpec.Unmarshal(m, b)
@ -101,7 +101,7 @@ func (m *Config) Reset() { *m = Config{} }
func (m *Config) String() string { return proto.CompactTextString(m) }
func (*Config) ProtoMessage() {}
func (*Config) Descriptor() ([]byte, []int) {
return fileDescriptor_tableacl_38276fd243b9c307, []int{1}
return fileDescriptor_tableacl_82b5f1376534b35e, []int{1}
}
func (m *Config) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Config.Unmarshal(m, b)
@ -133,9 +133,9 @@ func init() {
proto.RegisterType((*Config)(nil), "tableacl.Config")
}
func init() { proto.RegisterFile("tableacl.proto", fileDescriptor_tableacl_38276fd243b9c307) }
func init() { proto.RegisterFile("tableacl.proto", fileDescriptor_tableacl_82b5f1376534b35e) }
var fileDescriptor_tableacl_38276fd243b9c307 = []byte{
var fileDescriptor_tableacl_82b5f1376534b35e = []byte{
// 232 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0xc1, 0x4b, 0xc3, 0x30,
0x14, 0xc6, 0x89, 0x9d, 0xd5, 0xbd, 0xc9, 0x0e, 0x41, 0x34, 0xc7, 0x32, 0x10, 0x7b, 0x6a, 0x40,

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -56,6 +56,8 @@ type TabletManagerClient interface {
ReloadSchema(ctx context.Context, in *tabletmanagerdata.ReloadSchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReloadSchemaResponse, error)
PreflightSchema(ctx context.Context, in *tabletmanagerdata.PreflightSchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PreflightSchemaResponse, error)
ApplySchema(ctx context.Context, in *tabletmanagerdata.ApplySchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ApplySchemaResponse, error)
LockTables(ctx context.Context, in *tabletmanagerdata.LockTablesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.LockTablesResponse, error)
UnlockTables(ctx context.Context, in *tabletmanagerdata.UnlockTablesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UnlockTablesResponse, error)
ExecuteFetchAsDba(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsDbaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsDbaResponse, error)
ExecuteFetchAsAllPrivs(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsAllPrivsRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsAllPrivsResponse, error)
ExecuteFetchAsApp(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsAppRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsAppResponse, error)
@ -70,6 +72,9 @@ type TabletManagerClient interface {
StopSlaveMinimum(ctx context.Context, in *tabletmanagerdata.StopSlaveMinimumRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StopSlaveMinimumResponse, error)
// StartSlave starts the mysql replication
StartSlave(ctx context.Context, in *tabletmanagerdata.StartSlaveRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StartSlaveResponse, error)
// StartSlave starts the mysql replication until and including
// the provided position
StartSlaveUntilAfter(ctx context.Context, in *tabletmanagerdata.StartSlaveUntilAfterRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StartSlaveUntilAfterResponse, error)
// TabletExternallyReparented tells a tablet that its underlying MySQL is
// currently the master. It is only used in environments (tabletmanagerdata.such as Vitess+MoB)
// in which MySQL is reparented by some agent external to Vitess, and then
@ -265,6 +270,24 @@ func (c *tabletManagerClient) ApplySchema(ctx context.Context, in *tabletmanager
return out, nil
}
func (c *tabletManagerClient) LockTables(ctx context.Context, in *tabletmanagerdata.LockTablesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.LockTablesResponse, error) {
out := new(tabletmanagerdata.LockTablesResponse)
err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/LockTables", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *tabletManagerClient) UnlockTables(ctx context.Context, in *tabletmanagerdata.UnlockTablesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UnlockTablesResponse, error) {
out := new(tabletmanagerdata.UnlockTablesResponse)
err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/UnlockTables", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *tabletManagerClient) ExecuteFetchAsDba(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsDbaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsDbaResponse, error) {
out := new(tabletmanagerdata.ExecuteFetchAsDbaResponse)
err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ExecuteFetchAsDba", in, out, opts...)
@ -337,6 +360,15 @@ func (c *tabletManagerClient) StartSlave(ctx context.Context, in *tabletmanagerd
return out, nil
}
func (c *tabletManagerClient) StartSlaveUntilAfter(ctx context.Context, in *tabletmanagerdata.StartSlaveUntilAfterRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StartSlaveUntilAfterResponse, error) {
out := new(tabletmanagerdata.StartSlaveUntilAfterResponse)
err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/StartSlaveUntilAfter", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *tabletManagerClient) TabletExternallyReparented(ctx context.Context, in *tabletmanagerdata.TabletExternallyReparentedRequest, opts ...grpc.CallOption) (*tabletmanagerdata.TabletExternallyReparentedResponse, error) {
out := new(tabletmanagerdata.TabletExternallyReparentedResponse)
err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/TabletExternallyReparented", in, out, opts...)
@ -567,6 +599,8 @@ type TabletManagerServer interface {
ReloadSchema(context.Context, *tabletmanagerdata.ReloadSchemaRequest) (*tabletmanagerdata.ReloadSchemaResponse, error)
PreflightSchema(context.Context, *tabletmanagerdata.PreflightSchemaRequest) (*tabletmanagerdata.PreflightSchemaResponse, error)
ApplySchema(context.Context, *tabletmanagerdata.ApplySchemaRequest) (*tabletmanagerdata.ApplySchemaResponse, error)
LockTables(context.Context, *tabletmanagerdata.LockTablesRequest) (*tabletmanagerdata.LockTablesResponse, error)
UnlockTables(context.Context, *tabletmanagerdata.UnlockTablesRequest) (*tabletmanagerdata.UnlockTablesResponse, error)
ExecuteFetchAsDba(context.Context, *tabletmanagerdata.ExecuteFetchAsDbaRequest) (*tabletmanagerdata.ExecuteFetchAsDbaResponse, error)
ExecuteFetchAsAllPrivs(context.Context, *tabletmanagerdata.ExecuteFetchAsAllPrivsRequest) (*tabletmanagerdata.ExecuteFetchAsAllPrivsResponse, error)
ExecuteFetchAsApp(context.Context, *tabletmanagerdata.ExecuteFetchAsAppRequest) (*tabletmanagerdata.ExecuteFetchAsAppResponse, error)
@ -581,6 +615,9 @@ type TabletManagerServer interface {
StopSlaveMinimum(context.Context, *tabletmanagerdata.StopSlaveMinimumRequest) (*tabletmanagerdata.StopSlaveMinimumResponse, error)
// StartSlave starts the mysql replication
StartSlave(context.Context, *tabletmanagerdata.StartSlaveRequest) (*tabletmanagerdata.StartSlaveResponse, error)
// StartSlave starts the mysql replication until and including
// the provided position
StartSlaveUntilAfter(context.Context, *tabletmanagerdata.StartSlaveUntilAfterRequest) (*tabletmanagerdata.StartSlaveUntilAfterResponse, error)
// TabletExternallyReparented tells a tablet that its underlying MySQL is
// currently the master. It is only used in environments (tabletmanagerdata.such as Vitess+MoB)
// in which MySQL is reparented by some agent external to Vitess, and then
@ -898,6 +935,42 @@ func _TabletManager_ApplySchema_Handler(srv interface{}, ctx context.Context, de
return interceptor(ctx, in, info, handler)
}
func _TabletManager_LockTables_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(tabletmanagerdata.LockTablesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TabletManagerServer).LockTables(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/tabletmanagerservice.TabletManager/LockTables",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TabletManagerServer).LockTables(ctx, req.(*tabletmanagerdata.LockTablesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _TabletManager_UnlockTables_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(tabletmanagerdata.UnlockTablesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TabletManagerServer).UnlockTables(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/tabletmanagerservice.TabletManager/UnlockTables",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TabletManagerServer).UnlockTables(ctx, req.(*tabletmanagerdata.UnlockTablesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _TabletManager_ExecuteFetchAsDba_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(tabletmanagerdata.ExecuteFetchAsDbaRequest)
if err := dec(in); err != nil {
@ -1042,6 +1115,24 @@ func _TabletManager_StartSlave_Handler(srv interface{}, ctx context.Context, dec
return interceptor(ctx, in, info, handler)
}
func _TabletManager_StartSlaveUntilAfter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(tabletmanagerdata.StartSlaveUntilAfterRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TabletManagerServer).StartSlaveUntilAfter(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/tabletmanagerservice.TabletManager/StartSlaveUntilAfter",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TabletManagerServer).StartSlaveUntilAfter(ctx, req.(*tabletmanagerdata.StartSlaveUntilAfterRequest))
}
return interceptor(ctx, in, info, handler)
}
func _TabletManager_TabletExternallyReparented_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(tabletmanagerdata.TabletExternallyReparentedRequest)
if err := dec(in); err != nil {
@ -1432,6 +1523,14 @@ var _TabletManager_serviceDesc = grpc.ServiceDesc{
MethodName: "ApplySchema",
Handler: _TabletManager_ApplySchema_Handler,
},
{
MethodName: "LockTables",
Handler: _TabletManager_LockTables_Handler,
},
{
MethodName: "UnlockTables",
Handler: _TabletManager_UnlockTables_Handler,
},
{
MethodName: "ExecuteFetchAsDba",
Handler: _TabletManager_ExecuteFetchAsDba_Handler,
@ -1464,6 +1563,10 @@ var _TabletManager_serviceDesc = grpc.ServiceDesc{
MethodName: "StartSlave",
Handler: _TabletManager_StartSlave_Handler,
},
{
MethodName: "StartSlaveUntilAfter",
Handler: _TabletManager_StartSlaveUntilAfter_Handler,
},
{
MethodName: "TabletExternallyReparented",
Handler: _TabletManager_TabletExternallyReparented_Handler,
@ -1545,69 +1648,73 @@ var _TabletManager_serviceDesc = grpc.ServiceDesc{
}
func init() {
proto.RegisterFile("tabletmanagerservice.proto", fileDescriptor_tabletmanagerservice_a64e2f6154f58360)
proto.RegisterFile("tabletmanagerservice.proto", fileDescriptor_tabletmanagerservice_d0dfb5502bc9cb1c)
}
var fileDescriptor_tabletmanagerservice_a64e2f6154f58360 = []byte{
// 956 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0xdf, 0x8f, 0x1b, 0x35,
0x10, 0xc7, 0x89, 0x04, 0x95, 0x30, 0x3f, 0x6b, 0x21, 0x8a, 0x0e, 0x09, 0x28, 0x6d, 0xf9, 0xd1,
0xa2, 0x4b, 0xaf, 0x47, 0x79, 0x4f, 0xaf, 0x77, 0xed, 0x21, 0x4e, 0x84, 0xa4, 0x70, 0x08, 0x24,
0x24, 0x5f, 0x32, 0xcd, 0x2e, 0xb7, 0x59, 0x1b, 0xdb, 0x1b, 0xdd, 0x3d, 0x21, 0x21, 0xf1, 0x84,
0xc4, 0x1b, 0xff, 0x2f, 0xf2, 0xee, 0xda, 0x19, 0x27, 0xb3, 0x4e, 0xf2, 0x76, 0xca, 0xf7, 0x33,
0x33, 0xf6, 0x78, 0x66, 0xec, 0x5b, 0xb6, 0x67, 0xc5, 0x45, 0x01, 0x76, 0x2e, 0x4a, 0x31, 0x03,
0x6d, 0x40, 0x2f, 0xf2, 0x09, 0xec, 0x2b, 0x2d, 0xad, 0xe4, 0xef, 0x51, 0xda, 0xde, 0xad, 0xe8,
0xd7, 0xa9, 0xb0, 0xa2, 0xc1, 0x1f, 0xfd, 0x77, 0x9b, 0xbd, 0xf5, 0xa2, 0xd6, 0xce, 0x1a, 0x8d,
0x9f, 0xb2, 0x57, 0x87, 0x79, 0x39, 0xe3, 0x1f, 0xed, 0xaf, 0xdb, 0x38, 0x61, 0x04, 0x7f, 0x54,
0x60, 0xec, 0xde, 0xc7, 0x9d, 0xba, 0x51, 0xb2, 0x34, 0xf0, 0xe9, 0x2b, 0xfc, 0x3b, 0xf6, 0xda,
0xb8, 0x00, 0x50, 0x9c, 0x62, 0x6b, 0xc5, 0x3b, 0xfb, 0xa4, 0x1b, 0x08, 0xde, 0x7e, 0x63, 0x6f,
0x1c, 0x5f, 0xc1, 0xa4, 0xb2, 0xf0, 0x5c, 0xca, 0x4b, 0x7e, 0x8f, 0x30, 0x41, 0xba, 0xf7, 0xfc,
0xd9, 0x26, 0x2c, 0xf8, 0xff, 0x99, 0xbd, 0xfe, 0x0c, 0xec, 0x78, 0x92, 0xc1, 0x5c, 0xf0, 0x3b,
0x84, 0x59, 0x50, 0xbd, 0xef, 0xbb, 0x69, 0x28, 0x78, 0x9e, 0xb1, 0xb7, 0x9f, 0x81, 0x1d, 0x82,
0x9e, 0xe7, 0xc6, 0xe4, 0xb2, 0x34, 0xfc, 0x0b, 0xda, 0x12, 0x21, 0x3e, 0xc6, 0x97, 0x5b, 0x90,
0x38, 0x45, 0x63, 0xb0, 0x23, 0x10, 0xd3, 0xef, 0xcb, 0xe2, 0x9a, 0x4c, 0x11, 0xd2, 0x53, 0x29,
0x8a, 0xb0, 0xe0, 0x5f, 0xb0, 0x37, 0x5b, 0xe1, 0x5c, 0xe7, 0x16, 0x78, 0xc2, 0xb2, 0x06, 0x7c,
0x84, 0xcf, 0x37, 0x72, 0x21, 0xc4, 0xaf, 0x8c, 0x1d, 0x65, 0xa2, 0x9c, 0xc1, 0x8b, 0x6b, 0x05,
0x9c, 0xca, 0xf0, 0x52, 0xf6, 0xee, 0xef, 0x6d, 0xa0, 0xf0, 0xfa, 0x47, 0xf0, 0x52, 0x83, 0xc9,
0xc6, 0x56, 0x74, 0xac, 0x1f, 0x03, 0xa9, 0xf5, 0xc7, 0x1c, 0x3e, 0xeb, 0x51, 0x55, 0x3e, 0x07,
0x51, 0xd8, 0xec, 0x28, 0x83, 0xc9, 0x25, 0x79, 0xd6, 0x31, 0x92, 0x3a, 0xeb, 0x55, 0x32, 0x04,
0x52, 0xec, 0xe6, 0xe9, 0xac, 0x94, 0x1a, 0x1a, 0xf9, 0x58, 0x6b, 0xa9, 0xf9, 0x03, 0xc2, 0xc3,
0x1a, 0xe5, 0xc3, 0x7d, 0xb5, 0x1d, 0x1c, 0x67, 0xaf, 0x90, 0x62, 0xda, 0xf6, 0x08, 0x9d, 0xbd,
0x25, 0x90, 0xce, 0x1e, 0xe6, 0x42, 0x88, 0xdf, 0xd9, 0x3b, 0x43, 0x0d, 0x2f, 0x8b, 0x7c, 0x96,
0xf9, 0x4e, 0xa4, 0x92, 0xb2, 0xc2, 0xf8, 0x40, 0xf7, 0xb7, 0x41, 0x71, 0xb3, 0x0c, 0x94, 0x2a,
0xae, 0xdb, 0x38, 0x54, 0x11, 0x21, 0x3d, 0xd5, 0x2c, 0x11, 0x86, 0x0f, 0xa8, 0x1d, 0x34, 0x27,
0x60, 0x27, 0xd9, 0xc0, 0x3c, 0xbd, 0x10, 0xe4, 0x01, 0xad, 0x51, 0xa9, 0x03, 0x22, 0xe0, 0x10,
0xf1, 0x4f, 0xf6, 0x7e, 0x2c, 0x0f, 0x8a, 0x62, 0xa8, 0xf3, 0x85, 0xe1, 0x0f, 0x37, 0x7a, 0xf2,
0xa8, 0x8f, 0x7d, 0xb0, 0x83, 0x45, 0xf7, 0x96, 0x07, 0x4a, 0x6d, 0xb1, 0xe5, 0x81, 0x52, 0xdb,
0x6f, 0xb9, 0x86, 0xa3, 0x89, 0x57, 0x88, 0x05, 0xb8, 0x36, 0xac, 0x0c, 0x3d, 0xf1, 0x96, 0x7a,
0x72, 0xe2, 0x61, 0x0c, 0xb7, 0xf3, 0x99, 0x30, 0x16, 0xf4, 0x50, 0x9a, 0xdc, 0xe6, 0xb2, 0x24,
0xdb, 0x39, 0x46, 0x52, 0xed, 0xbc, 0x4a, 0xe2, 0xdb, 0x67, 0x6c, 0xa5, 0xaa, 0x57, 0x41, 0xde,
0x3e, 0x41, 0x4d, 0xdd, 0x3e, 0x08, 0x0a, 0x9e, 0xe7, 0xec, 0xdd, 0xf0, 0xf3, 0x59, 0x5e, 0xe6,
0xf3, 0x6a, 0xce, 0xef, 0xa7, 0x6c, 0x5b, 0xc8, 0xc7, 0x79, 0xb0, 0x15, 0x8b, 0x07, 0xf8, 0xd8,
0x0a, 0x6d, 0x9b, 0x9d, 0xd0, 0x8b, 0xf4, 0x72, 0x6a, 0x80, 0x63, 0x2a, 0x38, 0xff, 0xa7, 0xc7,
0xf6, 0x9a, 0xe7, 0xca, 0xf1, 0x95, 0x05, 0x5d, 0x8a, 0xc2, 0xdd, 0x4f, 0x4a, 0x68, 0x28, 0x2d,
0x4c, 0xf9, 0xd7, 0x84, 0x9f, 0x6e, 0xdc, 0x47, 0x7f, 0xbc, 0xa3, 0x55, 0x58, 0xcd, 0x5f, 0x3d,
0x76, 0x6b, 0x15, 0x3c, 0x2e, 0x60, 0xe2, 0x96, 0x72, 0xb0, 0x85, 0xd3, 0x96, 0xf5, 0xeb, 0x78,
0xb4, 0x8b, 0xc9, 0xea, 0xb3, 0xc5, 0x25, 0xca, 0x74, 0x3e, 0x5b, 0x6a, 0x75, 0xd3, 0xb3, 0xa5,
0x85, 0x70, 0xe1, 0xfc, 0x34, 0x02, 0x55, 0xe4, 0x13, 0xe1, 0x8a, 0xd5, 0xb5, 0x21, 0x59, 0x38,
0xab, 0x50, 0xaa, 0x70, 0xd6, 0x59, 0x3c, 0xbd, 0xb0, 0x7a, 0x2e, 0x72, 0x7b, 0x22, 0x5d, 0xab,
0x90, 0xd3, 0x8b, 0x46, 0x53, 0xd3, 0xab, 0xcb, 0x02, 0xef, 0x77, 0x04, 0xc6, 0x3d, 0x4b, 0x02,
0x47, 0xee, 0x77, 0x15, 0x4a, 0xed, 0x77, 0x9d, 0xc5, 0x8d, 0x72, 0x5a, 0xe6, 0xb6, 0x99, 0x08,
0x64, 0xa3, 0x2c, 0xe5, 0x54, 0xa3, 0x60, 0x2a, 0x2a, 0xcd, 0xa1, 0x54, 0x55, 0x51, 0xbf, 0x4e,
0x9a, 0xda, 0xfd, 0x56, 0x56, 0xae, 0x88, 0xc8, 0xd2, 0xec, 0x60, 0x53, 0xa5, 0xd9, 0x69, 0x82,
0x4b, 0xd3, 0x2d, 0xae, 0x7b, 0xa6, 0x05, 0x35, 0x55, 0x9a, 0x08, 0xc2, 0x4f, 0x91, 0xa7, 0x30,
0x97, 0x16, 0xda, 0xec, 0x51, 0x03, 0x1d, 0x03, 0xa9, 0xa7, 0x48, 0xcc, 0x85, 0x10, 0x7f, 0xf7,
0xd8, 0x07, 0x43, 0x2d, 0x9d, 0x56, 0x47, 0x3f, 0xcf, 0xa0, 0x3c, 0x12, 0xd5, 0x2c, 0xb3, 0x3f,
0x2a, 0x4e, 0xe6, 0xa3, 0x03, 0xf6, 0xb1, 0x0f, 0x77, 0xb2, 0x89, 0xc6, 0x77, 0x2d, 0x0b, 0xd3,
0xd2, 0x53, 0x7a, 0x7c, 0xaf, 0x40, 0xc9, 0xf1, 0xbd, 0xc6, 0x46, 0xf7, 0x10, 0xf8, 0xa2, 0xbc,
0x43, 0xbf, 0xdb, 0xe3, 0x9c, 0xde, 0x4d, 0x43, 0xf8, 0x71, 0xe0, 0xe3, 0x8e, 0xc0, 0xb8, 0xe9,
0x0e, 0x53, 0x9e, 0x5a, 0x5d, 0xa0, 0x52, 0x8f, 0x03, 0x02, 0x0e, 0x11, 0xff, 0xed, 0xb1, 0x0f,
0xdd, 0x4d, 0x85, 0xfa, 0x6f, 0x50, 0x4e, 0xdd, 0xa8, 0x6b, 0x5e, 0x0b, 0x8f, 0x3b, 0x6e, 0xb6,
0x0e, 0xde, 0x2f, 0xe3, 0x9b, 0x5d, 0xcd, 0x70, 0xd9, 0xe2, 0x13, 0x27, 0xcb, 0x16, 0x03, 0xa9,
0xb2, 0x8d, 0xb9, 0x10, 0xe2, 0x07, 0x76, 0xe3, 0x89, 0x98, 0x5c, 0x56, 0x8a, 0x53, 0xff, 0x53,
0x37, 0x92, 0x77, 0x7b, 0x3b, 0x41, 0x78, 0x87, 0x0f, 0x7b, 0x5c, 0xb3, 0x9b, 0x2e, 0xbb, 0x52,
0xc3, 0x89, 0x96, 0xf3, 0xd6, 0x7b, 0xc7, 0xb0, 0x8b, 0xa9, 0xd4, 0xc1, 0x11, 0xf0, 0x32, 0xe6,
0x93, 0xc3, 0x5f, 0x0e, 0x16, 0xb9, 0x05, 0x63, 0xf6, 0x73, 0xd9, 0x6f, 0xfe, 0xea, 0xcf, 0x64,
0x7f, 0x61, 0xfb, 0xf5, 0x77, 0x8b, 0x3e, 0xf5, 0x95, 0xe3, 0xe2, 0x46, 0xad, 0x1d, 0xfe, 0x1f,
0x00, 0x00, 0xff, 0xff, 0x2e, 0xeb, 0x66, 0x65, 0x20, 0x11, 0x00, 0x00,
var fileDescriptor_tabletmanagerservice_d0dfb5502bc9cb1c = []byte{
// 1012 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x5b, 0x6f, 0x1b, 0x45,
0x14, 0xc7, 0xb1, 0x04, 0x95, 0x18, 0xae, 0x1d, 0x55, 0x14, 0x05, 0x89, 0x5b, 0x5a, 0x2e, 0x2d,
0x8a, 0x9b, 0x86, 0xf2, 0xee, 0xa6, 0x49, 0x1b, 0xd4, 0x08, 0x63, 0x37, 0x04, 0x81, 0x84, 0x34,
0xb1, 0x4f, 0xbc, 0x4b, 0xd6, 0x3b, 0xc3, 0xcc, 0xac, 0x95, 0x3c, 0x21, 0x21, 0xf1, 0x84, 0xc4,
0x67, 0xe3, 0x23, 0xa1, 0xbd, 0xcc, 0xec, 0x19, 0xfb, 0xec, 0xd8, 0x7e, 0x8b, 0xfc, 0xff, 0x9d,
0xcb, 0x9c, 0x39, 0x73, 0x66, 0xb2, 0x6c, 0xc7, 0x8a, 0x8b, 0x0c, 0xec, 0x5c, 0xe4, 0x62, 0x06,
0xda, 0x80, 0x5e, 0xa4, 0x13, 0xd8, 0x53, 0x5a, 0x5a, 0xc9, 0xef, 0x50, 0xda, 0xce, 0xdd, 0xe0,
0xd7, 0xa9, 0xb0, 0xa2, 0xc6, 0x1f, 0xff, 0xb7, 0xcb, 0xde, 0x79, 0x55, 0x69, 0xa7, 0xb5, 0xc6,
0x4f, 0xd8, 0xeb, 0xc3, 0x34, 0x9f, 0xf1, 0x8f, 0xf7, 0x56, 0x6d, 0x4a, 0x61, 0x04, 0x7f, 0x14,
0x60, 0xec, 0xce, 0x27, 0x9d, 0xba, 0x51, 0x32, 0x37, 0xf0, 0xf9, 0x6b, 0xfc, 0x25, 0x7b, 0x63,
0x9c, 0x01, 0x28, 0x4e, 0xb1, 0x95, 0xe2, 0x9c, 0x7d, 0xda, 0x0d, 0x78, 0x6f, 0xbf, 0xb1, 0xb7,
0x8e, 0xae, 0x61, 0x52, 0x58, 0x78, 0x21, 0xe5, 0x15, 0xbf, 0x4f, 0x98, 0x20, 0xdd, 0x79, 0xfe,
0x62, 0x1d, 0xe6, 0xfd, 0xff, 0xcc, 0xde, 0x7c, 0x0e, 0x76, 0x3c, 0x49, 0x60, 0x2e, 0xf8, 0x2e,
0x61, 0xe6, 0x55, 0xe7, 0xfb, 0x5e, 0x1c, 0xf2, 0x9e, 0x67, 0xec, 0xdd, 0xe7, 0x60, 0x87, 0xa0,
0xe7, 0xa9, 0x31, 0xa9, 0xcc, 0x0d, 0xff, 0x8a, 0xb6, 0x44, 0x88, 0x8b, 0xf1, 0xf5, 0x06, 0x24,
0x2e, 0xd1, 0x18, 0xec, 0x08, 0xc4, 0xf4, 0x87, 0x3c, 0xbb, 0x21, 0x4b, 0x84, 0xf4, 0x58, 0x89,
0x02, 0xcc, 0xfb, 0x17, 0xec, 0xed, 0x46, 0x38, 0xd7, 0xa9, 0x05, 0x1e, 0xb1, 0xac, 0x00, 0x17,
0xe1, 0xcb, 0xb5, 0x9c, 0x0f, 0xf1, 0x2b, 0x63, 0x87, 0x89, 0xc8, 0x67, 0xf0, 0xea, 0x46, 0x01,
0xa7, 0x2a, 0xdc, 0xca, 0xce, 0xfd, 0xfd, 0x35, 0x14, 0xce, 0x7f, 0x04, 0x97, 0x1a, 0x4c, 0x32,
0xb6, 0xa2, 0x23, 0x7f, 0x0c, 0xc4, 0xf2, 0x0f, 0x39, 0xbc, 0xd7, 0xa3, 0x22, 0x7f, 0x01, 0x22,
0xb3, 0xc9, 0x61, 0x02, 0x93, 0x2b, 0x72, 0xaf, 0x43, 0x24, 0xb6, 0xd7, 0xcb, 0xa4, 0x0f, 0xa4,
0xd8, 0xed, 0x93, 0x59, 0x2e, 0x35, 0xd4, 0xf2, 0x91, 0xd6, 0x52, 0xf3, 0x87, 0x84, 0x87, 0x15,
0xca, 0x85, 0xfb, 0x66, 0x33, 0x38, 0xac, 0x5e, 0x26, 0xc5, 0xb4, 0x39, 0x23, 0x74, 0xf5, 0x5a,
0x20, 0x5e, 0x3d, 0xcc, 0xf9, 0x10, 0xbf, 0xb3, 0xf7, 0x86, 0x1a, 0x2e, 0xb3, 0x74, 0x96, 0xb8,
0x93, 0x48, 0x15, 0x65, 0x89, 0x71, 0x81, 0x1e, 0x6c, 0x82, 0xe2, 0xc3, 0x32, 0x50, 0x2a, 0xbb,
0x69, 0xe2, 0x50, 0x4d, 0x84, 0xf4, 0xd8, 0x61, 0x09, 0x30, 0xdc, 0xc9, 0x2f, 0xe5, 0xe4, 0xaa,
0x9a, 0xae, 0x86, 0xec, 0xe4, 0x56, 0x8e, 0x75, 0x32, 0xa6, 0xf0, 0x5e, 0x9c, 0xe5, 0x59, 0xeb,
0x9e, 0x4a, 0x0b, 0x03, 0xb1, 0xbd, 0x08, 0x39, 0xdc, 0x60, 0xcd, 0xa0, 0x3c, 0x06, 0x3b, 0x49,
0x06, 0xe6, 0xd9, 0x85, 0x20, 0x1b, 0x6c, 0x85, 0x8a, 0x35, 0x18, 0x01, 0xfb, 0x88, 0x7f, 0xb2,
0x0f, 0x42, 0x79, 0x90, 0x65, 0x43, 0x9d, 0x2e, 0x0c, 0x7f, 0xb4, 0xd6, 0x93, 0x43, 0x5d, 0xec,
0xfd, 0x2d, 0x2c, 0xba, 0x97, 0x3c, 0x50, 0x6a, 0x83, 0x25, 0x0f, 0x94, 0xda, 0x7c, 0xc9, 0x15,
0x1c, 0x4c, 0xec, 0x4c, 0x2c, 0xa0, 0x1c, 0x23, 0x85, 0xa1, 0x27, 0x76, 0xab, 0x47, 0x27, 0x36,
0xc6, 0xf0, 0x38, 0x3a, 0x15, 0xc6, 0x82, 0x1e, 0x4a, 0x93, 0xda, 0x54, 0xe6, 0xe4, 0x38, 0x0a,
0x91, 0xd8, 0x38, 0x5a, 0x26, 0xf1, 0xed, 0x39, 0xb6, 0x52, 0x55, 0x59, 0x90, 0xb7, 0xa7, 0x57,
0x63, 0xb7, 0x27, 0x82, 0xbc, 0xe7, 0x39, 0x7b, 0xdf, 0xff, 0x7c, 0x9a, 0xe6, 0xe9, 0xbc, 0x98,
0xf3, 0x07, 0x31, 0xdb, 0x06, 0x72, 0x71, 0x1e, 0x6e, 0xc4, 0xe2, 0x63, 0x3b, 0xb6, 0x42, 0xdb,
0x7a, 0x25, 0x74, 0x92, 0x4e, 0x8e, 0x1d, 0x5b, 0x4c, 0x79, 0xe7, 0x37, 0xec, 0x4e, 0xfb, 0xfb,
0x59, 0x6e, 0xd3, 0x6c, 0x70, 0x69, 0x41, 0xf3, 0xbd, 0xa8, 0x83, 0x16, 0x74, 0x01, 0xfb, 0x1b,
0xf3, 0x3e, 0xf4, 0x3f, 0x3d, 0xb6, 0x53, 0xbf, 0xf4, 0x8e, 0xae, 0x2d, 0xe8, 0x5c, 0x64, 0xe5,
0xd5, 0xae, 0x84, 0x86, 0xdc, 0xc2, 0x94, 0x7f, 0x4b, 0x78, 0xec, 0xc6, 0x5d, 0x1e, 0x4f, 0xb6,
0xb4, 0xf2, 0xd9, 0xfc, 0xd5, 0x63, 0x77, 0x97, 0xc1, 0xa3, 0x0c, 0x26, 0x65, 0x2a, 0xfb, 0x1b,
0x38, 0x6d, 0x58, 0x97, 0xc7, 0xe3, 0x6d, 0x4c, 0x96, 0x5f, 0x7c, 0x65, 0xc9, 0x4c, 0xe7, 0x8b,
0xaf, 0x52, 0xd7, 0xbd, 0xf8, 0x1a, 0x08, 0xf7, 0xec, 0x4f, 0x23, 0x50, 0x59, 0x3a, 0x11, 0xe5,
0x39, 0x29, 0x27, 0x00, 0xd9, 0xb3, 0xcb, 0x50, 0xac, 0x67, 0x57, 0x59, 0x3c, 0x38, 0xb1, 0x7a,
0x2e, 0x52, 0x7b, 0x2c, 0xcb, 0x53, 0x4a, 0x0e, 0x4e, 0x1a, 0x8d, 0x0d, 0xce, 0x2e, 0x0b, 0xbc,
0xde, 0x11, 0x98, 0xf2, 0x45, 0xe7, 0x39, 0x72, 0xbd, 0xcb, 0x50, 0x6c, 0xbd, 0xab, 0x2c, 0x3e,
0xa3, 0x27, 0x79, 0x6a, 0xeb, 0x61, 0x44, 0x9e, 0xd1, 0x56, 0x8e, 0x9d, 0x51, 0x4c, 0x05, 0xad,
0x39, 0x94, 0xaa, 0xc8, 0xaa, 0x87, 0x5d, 0xdd, 0xbb, 0xdf, 0xcb, 0xa2, 0x6c, 0x22, 0xb2, 0x35,
0x3b, 0xd8, 0x58, 0x6b, 0x76, 0x9a, 0xe0, 0xd6, 0x2c, 0x93, 0xeb, 0x1e, 0xa7, 0x5e, 0x8d, 0xb5,
0x26, 0x82, 0xf0, 0xcb, 0xe1, 0x19, 0xcc, 0xa5, 0x85, 0xa6, 0x7a, 0xd4, 0x5d, 0x82, 0x81, 0xd8,
0xcb, 0x21, 0xe4, 0x7c, 0x88, 0xbf, 0x7b, 0xec, 0xc3, 0xa1, 0x96, 0xa5, 0x56, 0x45, 0x3f, 0x4f,
0x20, 0x3f, 0x14, 0xc5, 0x2c, 0xb1, 0x67, 0x8a, 0x93, 0xf5, 0xe8, 0x80, 0x5d, 0xec, 0x83, 0xad,
0x6c, 0x82, 0x9b, 0xa3, 0x92, 0x85, 0x69, 0xe8, 0x29, 0x7d, 0x73, 0x2c, 0x41, 0xd1, 0x9b, 0x63,
0x85, 0x0d, 0xae, 0x40, 0x70, 0x4d, 0xb9, 0x4b, 0xff, 0xcb, 0x13, 0xd6, 0xf4, 0x5e, 0x1c, 0xc2,
0xef, 0x12, 0x17, 0x77, 0x04, 0xa6, 0x9c, 0xf3, 0x30, 0xe5, 0xb1, 0xec, 0x3c, 0x15, 0x7b, 0x97,
0x10, 0xb0, 0x8f, 0xf8, 0x6f, 0x8f, 0x7d, 0x54, 0x5e, 0x92, 0xe8, 0xfc, 0x0d, 0xf2, 0x69, 0x39,
0xea, 0xea, 0x87, 0xca, 0x93, 0x8e, 0x4b, 0xb5, 0x83, 0x77, 0x69, 0x7c, 0xb7, 0xad, 0x19, 0x6e,
0x5b, 0xbc, 0xe3, 0x64, 0xdb, 0x62, 0x20, 0xd6, 0xb6, 0x21, 0xe7, 0x43, 0xfc, 0xc8, 0x6e, 0x3d,
0x15, 0x93, 0xab, 0x42, 0x71, 0xea, 0x73, 0x44, 0x2d, 0x39, 0xb7, 0x9f, 0x45, 0x08, 0xe7, 0xf0,
0x51, 0x8f, 0x6b, 0x76, 0xbb, 0xac, 0xae, 0xd4, 0x70, 0xac, 0xe5, 0xbc, 0xf1, 0xde, 0x31, 0xec,
0x42, 0x2a, 0xb6, 0x71, 0x04, 0xdc, 0xc6, 0x7c, 0x7a, 0xf0, 0xcb, 0xfe, 0x22, 0xb5, 0x60, 0xcc,
0x5e, 0x2a, 0xfb, 0xf5, 0x5f, 0xfd, 0x99, 0xec, 0x2f, 0x6c, 0xbf, 0xfa, 0xe4, 0xd3, 0xa7, 0x3e,
0x10, 0x5d, 0xdc, 0xaa, 0xb4, 0x83, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x46, 0xe6, 0xae,
0x5b, 0x12, 0x00, 0x00,
}

Просмотреть файл

@ -29,7 +29,7 @@ func (m *MaxRatesRequest) Reset() { *m = MaxRatesRequest{} }
func (m *MaxRatesRequest) String() string { return proto.CompactTextString(m) }
func (*MaxRatesRequest) ProtoMessage() {}
func (*MaxRatesRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{0}
return fileDescriptor_throttlerdata_d10a8d735853021e, []int{0}
}
func (m *MaxRatesRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MaxRatesRequest.Unmarshal(m, b)
@ -63,7 +63,7 @@ func (m *MaxRatesResponse) Reset() { *m = MaxRatesResponse{} }
func (m *MaxRatesResponse) String() string { return proto.CompactTextString(m) }
func (*MaxRatesResponse) ProtoMessage() {}
func (*MaxRatesResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{1}
return fileDescriptor_throttlerdata_d10a8d735853021e, []int{1}
}
func (m *MaxRatesResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MaxRatesResponse.Unmarshal(m, b)
@ -102,7 +102,7 @@ func (m *SetMaxRateRequest) Reset() { *m = SetMaxRateRequest{} }
func (m *SetMaxRateRequest) String() string { return proto.CompactTextString(m) }
func (*SetMaxRateRequest) ProtoMessage() {}
func (*SetMaxRateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{2}
return fileDescriptor_throttlerdata_d10a8d735853021e, []int{2}
}
func (m *SetMaxRateRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SetMaxRateRequest.Unmarshal(m, b)
@ -142,7 +142,7 @@ func (m *SetMaxRateResponse) Reset() { *m = SetMaxRateResponse{} }
func (m *SetMaxRateResponse) String() string { return proto.CompactTextString(m) }
func (*SetMaxRateResponse) ProtoMessage() {}
func (*SetMaxRateResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{3}
return fileDescriptor_throttlerdata_d10a8d735853021e, []int{3}
}
func (m *SetMaxRateResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SetMaxRateResponse.Unmarshal(m, b)
@ -259,7 +259,7 @@ func (m *Configuration) Reset() { *m = Configuration{} }
func (m *Configuration) String() string { return proto.CompactTextString(m) }
func (*Configuration) ProtoMessage() {}
func (*Configuration) Descriptor() ([]byte, []int) {
return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{4}
return fileDescriptor_throttlerdata_d10a8d735853021e, []int{4}
}
func (m *Configuration) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Configuration.Unmarshal(m, b)
@ -391,7 +391,7 @@ func (m *GetConfigurationRequest) Reset() { *m = GetConfigurationRequest
func (m *GetConfigurationRequest) String() string { return proto.CompactTextString(m) }
func (*GetConfigurationRequest) ProtoMessage() {}
func (*GetConfigurationRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{5}
return fileDescriptor_throttlerdata_d10a8d735853021e, []int{5}
}
func (m *GetConfigurationRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetConfigurationRequest.Unmarshal(m, b)
@ -432,7 +432,7 @@ func (m *GetConfigurationResponse) Reset() { *m = GetConfigurationRespon
func (m *GetConfigurationResponse) String() string { return proto.CompactTextString(m) }
func (*GetConfigurationResponse) ProtoMessage() {}
func (*GetConfigurationResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{6}
return fileDescriptor_throttlerdata_d10a8d735853021e, []int{6}
}
func (m *GetConfigurationResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetConfigurationResponse.Unmarshal(m, b)
@ -478,7 +478,7 @@ func (m *UpdateConfigurationRequest) Reset() { *m = UpdateConfigurationR
func (m *UpdateConfigurationRequest) String() string { return proto.CompactTextString(m) }
func (*UpdateConfigurationRequest) ProtoMessage() {}
func (*UpdateConfigurationRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{7}
return fileDescriptor_throttlerdata_d10a8d735853021e, []int{7}
}
func (m *UpdateConfigurationRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UpdateConfigurationRequest.Unmarshal(m, b)
@ -532,7 +532,7 @@ func (m *UpdateConfigurationResponse) Reset() { *m = UpdateConfiguration
func (m *UpdateConfigurationResponse) String() string { return proto.CompactTextString(m) }
func (*UpdateConfigurationResponse) ProtoMessage() {}
func (*UpdateConfigurationResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{8}
return fileDescriptor_throttlerdata_d10a8d735853021e, []int{8}
}
func (m *UpdateConfigurationResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UpdateConfigurationResponse.Unmarshal(m, b)
@ -573,7 +573,7 @@ func (m *ResetConfigurationRequest) Reset() { *m = ResetConfigurationReq
func (m *ResetConfigurationRequest) String() string { return proto.CompactTextString(m) }
func (*ResetConfigurationRequest) ProtoMessage() {}
func (*ResetConfigurationRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{9}
return fileDescriptor_throttlerdata_d10a8d735853021e, []int{9}
}
func (m *ResetConfigurationRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ResetConfigurationRequest.Unmarshal(m, b)
@ -613,7 +613,7 @@ func (m *ResetConfigurationResponse) Reset() { *m = ResetConfigurationRe
func (m *ResetConfigurationResponse) String() string { return proto.CompactTextString(m) }
func (*ResetConfigurationResponse) ProtoMessage() {}
func (*ResetConfigurationResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_throttlerdata_7d084fd3a7704c85, []int{10}
return fileDescriptor_throttlerdata_d10a8d735853021e, []int{10}
}
func (m *ResetConfigurationResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ResetConfigurationResponse.Unmarshal(m, b)
@ -656,9 +656,9 @@ func init() {
proto.RegisterType((*ResetConfigurationResponse)(nil), "throttlerdata.ResetConfigurationResponse")
}
func init() { proto.RegisterFile("throttlerdata.proto", fileDescriptor_throttlerdata_7d084fd3a7704c85) }
func init() { proto.RegisterFile("throttlerdata.proto", fileDescriptor_throttlerdata_d10a8d735853021e) }
var fileDescriptor_throttlerdata_7d084fd3a7704c85 = []byte{
var fileDescriptor_throttlerdata_d10a8d735853021e = []byte{
// 734 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x5f, 0x4f, 0x03, 0x45,
0x10, 0xcf, 0x51, 0x8a, 0x30, 0xa5, 0x40, 0x17, 0x84, 0xa3, 0x18, 0x53, 0x2f, 0x31, 0x36, 0x8d,

Просмотреть файл

@ -255,10 +255,10 @@ var _Throttler_serviceDesc = grpc.ServiceDesc{
}
func init() {
proto.RegisterFile("throttlerservice.proto", fileDescriptor_throttlerservice_151ce3faa7ac0b15)
proto.RegisterFile("throttlerservice.proto", fileDescriptor_throttlerservice_8b1d9f2a5de89835)
}
var fileDescriptor_throttlerservice_151ce3faa7ac0b15 = []byte{
var fileDescriptor_throttlerservice_8b1d9f2a5de89835 = []byte{
// 241 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x3d, 0x4b, 0xc4, 0x40,
0x10, 0x86, 0x05, 0x41, 0x74, 0xaa, 0x63, 0x0f, 0x2c, 0xae, 0xf0, 0xab, 0x50, 0x4f, 0x30, 0x0b,

Просмотреть файл

@ -48,7 +48,7 @@ func (x KeyspaceIdType) String() string {
return proto.EnumName(KeyspaceIdType_name, int32(x))
}
func (KeyspaceIdType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_topodata_23985cc74c86747c, []int{0}
return fileDescriptor_topodata_693bf5422a92a7f4, []int{0}
}
// TabletType represents the type of a given tablet.
@ -117,7 +117,7 @@ func (x TabletType) String() string {
return proto.EnumName(TabletType_name, int32(x))
}
func (TabletType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_topodata_23985cc74c86747c, []int{1}
return fileDescriptor_topodata_693bf5422a92a7f4, []int{1}
}
// KeyRange describes a range of sharding keys, when range-based
@ -134,7 +134,7 @@ func (m *KeyRange) Reset() { *m = KeyRange{} }
func (m *KeyRange) String() string { return proto.CompactTextString(m) }
func (*KeyRange) ProtoMessage() {}
func (*KeyRange) Descriptor() ([]byte, []int) {
return fileDescriptor_topodata_23985cc74c86747c, []int{0}
return fileDescriptor_topodata_693bf5422a92a7f4, []int{0}
}
func (m *KeyRange) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_KeyRange.Unmarshal(m, b)
@ -184,7 +184,7 @@ func (m *TabletAlias) Reset() { *m = TabletAlias{} }
func (m *TabletAlias) String() string { return proto.CompactTextString(m) }
func (*TabletAlias) ProtoMessage() {}
func (*TabletAlias) Descriptor() ([]byte, []int) {
return fileDescriptor_topodata_23985cc74c86747c, []int{1}
return fileDescriptor_topodata_693bf5422a92a7f4, []int{1}
}
func (m *TabletAlias) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_TabletAlias.Unmarshal(m, b)
@ -260,7 +260,7 @@ func (m *Tablet) Reset() { *m = Tablet{} }
func (m *Tablet) String() string { return proto.CompactTextString(m) }
func (*Tablet) ProtoMessage() {}
func (*Tablet) Descriptor() ([]byte, []int) {
return fileDescriptor_topodata_23985cc74c86747c, []int{2}
return fileDescriptor_topodata_693bf5422a92a7f4, []int{2}
}
func (m *Tablet) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Tablet.Unmarshal(m, b)
@ -394,7 +394,7 @@ func (m *Shard) Reset() { *m = Shard{} }
func (m *Shard) String() string { return proto.CompactTextString(m) }
func (*Shard) ProtoMessage() {}
func (*Shard) Descriptor() ([]byte, []int) {
return fileDescriptor_topodata_23985cc74c86747c, []int{3}
return fileDescriptor_topodata_693bf5422a92a7f4, []int{3}
}
func (m *Shard) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Shard.Unmarshal(m, b)
@ -469,7 +469,7 @@ func (m *Shard_ServedType) Reset() { *m = Shard_ServedType{} }
func (m *Shard_ServedType) String() string { return proto.CompactTextString(m) }
func (*Shard_ServedType) ProtoMessage() {}
func (*Shard_ServedType) Descriptor() ([]byte, []int) {
return fileDescriptor_topodata_23985cc74c86747c, []int{3, 0}
return fileDescriptor_topodata_693bf5422a92a7f4, []int{3, 0}
}
func (m *Shard_ServedType) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Shard_ServedType.Unmarshal(m, b)
@ -526,7 +526,7 @@ func (m *Shard_SourceShard) Reset() { *m = Shard_SourceShard{} }
func (m *Shard_SourceShard) String() string { return proto.CompactTextString(m) }
func (*Shard_SourceShard) ProtoMessage() {}
func (*Shard_SourceShard) Descriptor() ([]byte, []int) {
return fileDescriptor_topodata_23985cc74c86747c, []int{3, 1}
return fileDescriptor_topodata_693bf5422a92a7f4, []int{3, 1}
}
func (m *Shard_SourceShard) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Shard_SourceShard.Unmarshal(m, b)
@ -601,7 +601,7 @@ func (m *Shard_TabletControl) Reset() { *m = Shard_TabletControl{} }
func (m *Shard_TabletControl) String() string { return proto.CompactTextString(m) }
func (*Shard_TabletControl) ProtoMessage() {}
func (*Shard_TabletControl) Descriptor() ([]byte, []int) {
return fileDescriptor_topodata_23985cc74c86747c, []int{3, 2}
return fileDescriptor_topodata_693bf5422a92a7f4, []int{3, 2}
}
func (m *Shard_TabletControl) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Shard_TabletControl.Unmarshal(m, b)
@ -676,7 +676,7 @@ func (m *Keyspace) Reset() { *m = Keyspace{} }
func (m *Keyspace) String() string { return proto.CompactTextString(m) }
func (*Keyspace) ProtoMessage() {}
func (*Keyspace) Descriptor() ([]byte, []int) {
return fileDescriptor_topodata_23985cc74c86747c, []int{4}
return fileDescriptor_topodata_693bf5422a92a7f4, []int{4}
}
func (m *Keyspace) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Keyspace.Unmarshal(m, b)
@ -735,7 +735,7 @@ func (m *Keyspace_ServedFrom) Reset() { *m = Keyspace_ServedFrom{} }
func (m *Keyspace_ServedFrom) String() string { return proto.CompactTextString(m) }
func (*Keyspace_ServedFrom) ProtoMessage() {}
func (*Keyspace_ServedFrom) Descriptor() ([]byte, []int) {
return fileDescriptor_topodata_23985cc74c86747c, []int{4, 0}
return fileDescriptor_topodata_693bf5422a92a7f4, []int{4, 0}
}
func (m *Keyspace_ServedFrom) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Keyspace_ServedFrom.Unmarshal(m, b)
@ -791,7 +791,7 @@ func (m *ShardReplication) Reset() { *m = ShardReplication{} }
func (m *ShardReplication) String() string { return proto.CompactTextString(m) }
func (*ShardReplication) ProtoMessage() {}
func (*ShardReplication) Descriptor() ([]byte, []int) {
return fileDescriptor_topodata_23985cc74c86747c, []int{5}
return fileDescriptor_topodata_693bf5422a92a7f4, []int{5}
}
func (m *ShardReplication) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ShardReplication.Unmarshal(m, b)
@ -830,7 +830,7 @@ func (m *ShardReplication_Node) Reset() { *m = ShardReplication_Node{} }
func (m *ShardReplication_Node) String() string { return proto.CompactTextString(m) }
func (*ShardReplication_Node) ProtoMessage() {}
func (*ShardReplication_Node) Descriptor() ([]byte, []int) {
return fileDescriptor_topodata_23985cc74c86747c, []int{5, 0}
return fileDescriptor_topodata_693bf5422a92a7f4, []int{5, 0}
}
func (m *ShardReplication_Node) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ShardReplication_Node.Unmarshal(m, b)
@ -871,7 +871,7 @@ func (m *ShardReference) Reset() { *m = ShardReference{} }
func (m *ShardReference) String() string { return proto.CompactTextString(m) }
func (*ShardReference) ProtoMessage() {}
func (*ShardReference) Descriptor() ([]byte, []int) {
return fileDescriptor_topodata_23985cc74c86747c, []int{6}
return fileDescriptor_topodata_693bf5422a92a7f4, []int{6}
}
func (m *ShardReference) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ShardReference.Unmarshal(m, b)
@ -922,7 +922,7 @@ func (m *SrvKeyspace) Reset() { *m = SrvKeyspace{} }
func (m *SrvKeyspace) String() string { return proto.CompactTextString(m) }
func (*SrvKeyspace) ProtoMessage() {}
func (*SrvKeyspace) Descriptor() ([]byte, []int) {
return fileDescriptor_topodata_23985cc74c86747c, []int{7}
return fileDescriptor_topodata_693bf5422a92a7f4, []int{7}
}
func (m *SrvKeyspace) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SrvKeyspace.Unmarshal(m, b)
@ -984,7 +984,7 @@ func (m *SrvKeyspace_KeyspacePartition) Reset() { *m = SrvKeyspace_Keysp
func (m *SrvKeyspace_KeyspacePartition) String() string { return proto.CompactTextString(m) }
func (*SrvKeyspace_KeyspacePartition) ProtoMessage() {}
func (*SrvKeyspace_KeyspacePartition) Descriptor() ([]byte, []int) {
return fileDescriptor_topodata_23985cc74c86747c, []int{7, 0}
return fileDescriptor_topodata_693bf5422a92a7f4, []int{7, 0}
}
func (m *SrvKeyspace_KeyspacePartition) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SrvKeyspace_KeyspacePartition.Unmarshal(m, b)
@ -1034,7 +1034,7 @@ func (m *SrvKeyspace_ServedFrom) Reset() { *m = SrvKeyspace_ServedFrom{}
func (m *SrvKeyspace_ServedFrom) String() string { return proto.CompactTextString(m) }
func (*SrvKeyspace_ServedFrom) ProtoMessage() {}
func (*SrvKeyspace_ServedFrom) Descriptor() ([]byte, []int) {
return fileDescriptor_topodata_23985cc74c86747c, []int{7, 1}
return fileDescriptor_topodata_693bf5422a92a7f4, []int{7, 1}
}
func (m *SrvKeyspace_ServedFrom) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SrvKeyspace_ServedFrom.Unmarshal(m, b)
@ -1092,7 +1092,7 @@ func (m *CellInfo) Reset() { *m = CellInfo{} }
func (m *CellInfo) String() string { return proto.CompactTextString(m) }
func (*CellInfo) ProtoMessage() {}
func (*CellInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_topodata_23985cc74c86747c, []int{8}
return fileDescriptor_topodata_693bf5422a92a7f4, []int{8}
}
func (m *CellInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CellInfo.Unmarshal(m, b)
@ -1156,9 +1156,9 @@ func init() {
proto.RegisterEnum("topodata.TabletType", TabletType_name, TabletType_value)
}
func init() { proto.RegisterFile("topodata.proto", fileDescriptor_topodata_23985cc74c86747c) }
func init() { proto.RegisterFile("topodata.proto", fileDescriptor_topodata_693bf5422a92a7f4) }
var fileDescriptor_topodata_23985cc74c86747c = []byte{
var fileDescriptor_topodata_693bf5422a92a7f4 = []byte{
// 1162 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x6f, 0x8f, 0xda, 0x46,
0x13, 0x7f, 0x0c, 0x86, 0x33, 0x63, 0x8e, 0x38, 0xfb, 0x24, 0x95, 0xe5, 0x2a, 0x2a, 0x42, 0x8a,

Просмотреть файл

@ -34,7 +34,7 @@ func (m *Keyspace) Reset() { *m = Keyspace{} }
func (m *Keyspace) String() string { return proto.CompactTextString(m) }
func (*Keyspace) ProtoMessage() {}
func (*Keyspace) Descriptor() ([]byte, []int) {
return fileDescriptor_vschema_5ecfaf46981fe072, []int{0}
return fileDescriptor_vschema_58a865bec489dd60, []int{0}
}
func (m *Keyspace) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Keyspace.Unmarshal(m, b)
@ -99,7 +99,7 @@ func (m *Vindex) Reset() { *m = Vindex{} }
func (m *Vindex) String() string { return proto.CompactTextString(m) }
func (*Vindex) ProtoMessage() {}
func (*Vindex) Descriptor() ([]byte, []int) {
return fileDescriptor_vschema_5ecfaf46981fe072, []int{1}
return fileDescriptor_vschema_58a865bec489dd60, []int{1}
}
func (m *Vindex) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Vindex.Unmarshal(m, b)
@ -170,7 +170,7 @@ func (m *Table) Reset() { *m = Table{} }
func (m *Table) String() string { return proto.CompactTextString(m) }
func (*Table) ProtoMessage() {}
func (*Table) Descriptor() ([]byte, []int) {
return fileDescriptor_vschema_5ecfaf46981fe072, []int{2}
return fileDescriptor_vschema_58a865bec489dd60, []int{2}
}
func (m *Table) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Table.Unmarshal(m, b)
@ -249,7 +249,7 @@ func (m *ColumnVindex) Reset() { *m = ColumnVindex{} }
func (m *ColumnVindex) String() string { return proto.CompactTextString(m) }
func (*ColumnVindex) ProtoMessage() {}
func (*ColumnVindex) Descriptor() ([]byte, []int) {
return fileDescriptor_vschema_5ecfaf46981fe072, []int{3}
return fileDescriptor_vschema_58a865bec489dd60, []int{3}
}
func (m *ColumnVindex) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ColumnVindex.Unmarshal(m, b)
@ -304,7 +304,7 @@ func (m *AutoIncrement) Reset() { *m = AutoIncrement{} }
func (m *AutoIncrement) String() string { return proto.CompactTextString(m) }
func (*AutoIncrement) ProtoMessage() {}
func (*AutoIncrement) Descriptor() ([]byte, []int) {
return fileDescriptor_vschema_5ecfaf46981fe072, []int{4}
return fileDescriptor_vschema_58a865bec489dd60, []int{4}
}
func (m *AutoIncrement) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AutoIncrement.Unmarshal(m, b)
@ -351,7 +351,7 @@ func (m *Column) Reset() { *m = Column{} }
func (m *Column) String() string { return proto.CompactTextString(m) }
func (*Column) ProtoMessage() {}
func (*Column) Descriptor() ([]byte, []int) {
return fileDescriptor_vschema_5ecfaf46981fe072, []int{5}
return fileDescriptor_vschema_58a865bec489dd60, []int{5}
}
func (m *Column) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Column.Unmarshal(m, b)
@ -398,7 +398,7 @@ func (m *SrvVSchema) Reset() { *m = SrvVSchema{} }
func (m *SrvVSchema) String() string { return proto.CompactTextString(m) }
func (*SrvVSchema) ProtoMessage() {}
func (*SrvVSchema) Descriptor() ([]byte, []int) {
return fileDescriptor_vschema_5ecfaf46981fe072, []int{6}
return fileDescriptor_vschema_58a865bec489dd60, []int{6}
}
func (m *SrvVSchema) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SrvVSchema.Unmarshal(m, b)
@ -439,9 +439,9 @@ func init() {
proto.RegisterMapType((map[string]*Keyspace)(nil), "vschema.SrvVSchema.KeyspacesEntry")
}
func init() { proto.RegisterFile("vschema.proto", fileDescriptor_vschema_5ecfaf46981fe072) }
func init() { proto.RegisterFile("vschema.proto", fileDescriptor_vschema_58a865bec489dd60) }
var fileDescriptor_vschema_5ecfaf46981fe072 = []byte{
var fileDescriptor_vschema_58a865bec489dd60 = []byte{
// 562 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x54, 0x41, 0x6f, 0xd3, 0x4c,
0x10, 0x95, 0x93, 0xc6, 0x4d, 0xc6, 0x5f, 0xd2, 0x8f, 0x55, 0x29, 0xc6, 0x08, 0x35, 0xb2, 0x0a,

Просмотреть файл

@ -33,7 +33,7 @@ func (m *ExecuteVtctlCommandRequest) Reset() { *m = ExecuteVtctlCommandR
func (m *ExecuteVtctlCommandRequest) String() string { return proto.CompactTextString(m) }
func (*ExecuteVtctlCommandRequest) ProtoMessage() {}
func (*ExecuteVtctlCommandRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_vtctldata_1ba3ba7c409e0e97, []int{0}
return fileDescriptor_vtctldata_116d6c451a061272, []int{0}
}
func (m *ExecuteVtctlCommandRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ExecuteVtctlCommandRequest.Unmarshal(m, b)
@ -79,7 +79,7 @@ func (m *ExecuteVtctlCommandResponse) Reset() { *m = ExecuteVtctlCommand
func (m *ExecuteVtctlCommandResponse) String() string { return proto.CompactTextString(m) }
func (*ExecuteVtctlCommandResponse) ProtoMessage() {}
func (*ExecuteVtctlCommandResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_vtctldata_1ba3ba7c409e0e97, []int{1}
return fileDescriptor_vtctldata_116d6c451a061272, []int{1}
}
func (m *ExecuteVtctlCommandResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ExecuteVtctlCommandResponse.Unmarshal(m, b)
@ -111,9 +111,9 @@ func init() {
proto.RegisterType((*ExecuteVtctlCommandResponse)(nil), "vtctldata.ExecuteVtctlCommandResponse")
}
func init() { proto.RegisterFile("vtctldata.proto", fileDescriptor_vtctldata_1ba3ba7c409e0e97) }
func init() { proto.RegisterFile("vtctldata.proto", fileDescriptor_vtctldata_116d6c451a061272) }
var fileDescriptor_vtctldata_1ba3ba7c409e0e97 = []byte{
var fileDescriptor_vtctldata_116d6c451a061272 = []byte{
// 200 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0xd1, 0x4a, 0x87, 0x30,
0x14, 0x06, 0x70, 0xd6, 0xbf, 0x82, 0xff, 0x42, 0x83, 0x5d, 0x89, 0xdd, 0x88, 0x54, 0xec, 0xca,

Просмотреть файл

@ -123,9 +123,9 @@ var _Vtctl_serviceDesc = grpc.ServiceDesc{
Metadata: "vtctlservice.proto",
}
func init() { proto.RegisterFile("vtctlservice.proto", fileDescriptor_vtctlservice_a3582c3eb674ce30) }
func init() { proto.RegisterFile("vtctlservice.proto", fileDescriptor_vtctlservice_af4114a311e29c50) }
var fileDescriptor_vtctlservice_a3582c3eb674ce30 = []byte{
var fileDescriptor_vtctlservice_af4114a311e29c50 = []byte{
// 146 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2a, 0x2b, 0x49, 0x2e,
0xc9, 0x29, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2,

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше