зеркало из https://github.com/github/vitess-gh.git
Merge branch 'master' into tests
This commit is contained in:
Коммит
4f3bfbfd7b
10
Makefile
10
Makefile
|
@ -104,11 +104,12 @@ proto: install_protoc-gen-go
|
|||
find go/vt/proto -name "*.pb.go" | xargs sed --in-place -r -e 's,import ([a-z0-9_]+) ".",import \1 "github.com/youtube/vitess/go/vt/proto/\1",g'
|
||||
find proto -maxdepth 1 -name '*.proto' -print | sed 's/^proto\///' | sed 's/\.proto//' | xargs -I{} $$VTROOT/dist/grpc/usr/local/bin/protoc -Iproto proto/{}.proto --python_out=py/vtproto --grpc_out=py/vtproto --plugin=protoc-gen-grpc=$$VTROOT/dist/grpc/usr/local/bin/grpc_python_plugin
|
||||
|
||||
# build a new image, create the PHP proto files, and copy them back.
|
||||
php_proto: docker_php_proto
|
||||
docker run -ti --name=vitess_php-proto vitess/php-proto bash -c 'tools/proto-gen-php.sh'
|
||||
# Generate the PHP proto files in a Docker container, and copy them back.
|
||||
php_proto:
|
||||
docker run -ti --name=vitess_php-proto -v $$PWD/proto:/in vitess/bootstrap:common bash -c 'cd $$VTTOP && mkdir -p proto && cp -R /in/* proto/ && tools/proto-gen-php.sh'
|
||||
docker cp vitess_php-proto:/vt/src/github.com/youtube/vitess/php/src/descriptor.php php/src/
|
||||
docker cp vitess_php-proto:/vt/src/github.com/youtube/vitess/php/src/php.php php/src/
|
||||
rm -r php/src/Vitess/Proto/*
|
||||
docker cp vitess_php-proto:/vt/src/github.com/youtube/vitess/php/src/Vitess/Proto/. php/src/Vitess/Proto/
|
||||
docker rm vitess_php-proto
|
||||
|
||||
|
@ -150,9 +151,6 @@ docker_etcd:
|
|||
docker_publish_site:
|
||||
docker build -f docker/publish-site/Dockerfile -t vitess/publish-site .
|
||||
|
||||
docker_php_proto:
|
||||
docker build -f docker/php-proto/Dockerfile -t vitess/php-proto .
|
||||
|
||||
# This rule loads the working copy of the code into a bootstrap image,
|
||||
# and then runs the tests inside Docker.
|
||||
# Example: $ make docker_test flavor=mariadb
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
"TableName": "a",
|
||||
"FieldQuery": "select * from a where 1 != 1",
|
||||
"FullQuery": "select * from a limit 5",
|
||||
"ColumnNumbers": [0, 1, 2, 3]
|
||||
"ColumnNumbers": [0, 1, 2, 3, 4]
|
||||
}
|
||||
|
||||
# cross-db
|
||||
|
@ -162,7 +162,7 @@
|
|||
"TableName": "a",
|
||||
"FieldQuery": "select * from a where 1 != 1",
|
||||
"FullQuery": "select * from a limit :#maxLimit",
|
||||
"ColumnNumbers": [0, 1, 2, 3]
|
||||
"ColumnNumbers": [0, 1, 2, 3, 4]
|
||||
}
|
||||
|
||||
# c.eid
|
||||
|
@ -213,9 +213,9 @@
|
|||
"TableName": "a",
|
||||
"FieldQuery": "select * from a where 1 != 1",
|
||||
"FullQuery": "select * from a where eid = 1 and id in (1, 2) limit :#maxLimit",
|
||||
"OuterQuery": "select eid, id, name, foo from a where :#pk",
|
||||
"OuterQuery": "select eid, id, name, foo, CamelCase from a where :#pk",
|
||||
"IndexUsed": "PRIMARY",
|
||||
"ColumnNumbers": [0, 1, 2, 3],
|
||||
"ColumnNumbers": [0, 1, 2, 3, 4],
|
||||
"PKValues": [1,[1,2]]
|
||||
}
|
||||
|
||||
|
@ -226,9 +226,9 @@
|
|||
"TableName": "a",
|
||||
"FieldQuery": "select * from a where 1 != 1",
|
||||
"FullQuery": "select * from a where eid = :v1 and id in (:v2, :v3) limit :#maxLimit",
|
||||
"OuterQuery": "select eid, id, name, foo from a where :#pk",
|
||||
"OuterQuery": "select eid, id, name, foo, CamelCase from a where :#pk",
|
||||
"IndexUsed": "PRIMARY",
|
||||
"ColumnNumbers": [0, 1, 2, 3],
|
||||
"ColumnNumbers": [0, 1, 2, 3, 4],
|
||||
"PKValues": [":v1",[":v2",":v3"]]
|
||||
}
|
||||
|
||||
|
@ -239,10 +239,10 @@
|
|||
"TableName": "a",
|
||||
"FieldQuery": "select * from a where 1 != 1",
|
||||
"FullQuery": "select * from a where name = 'foo' limit :#maxLimit",
|
||||
"OuterQuery": "select eid, id, name, foo from a where :#pk",
|
||||
"OuterQuery": "select eid, id, name, foo, CamelCase from a where :#pk",
|
||||
"Subquery": "select eid, id from a use index (b_name) where name = 'foo' limit :#maxLimit",
|
||||
"IndexUsed": "b_name",
|
||||
"ColumnNumbers": [0, 1, 2, 3]
|
||||
"ColumnNumbers": [0, 1, 2, 3, 4]
|
||||
}
|
||||
|
||||
# covering index
|
||||
|
@ -291,7 +291,7 @@
|
|||
"TableName": "a",
|
||||
"FieldQuery": "select * from a where 1 != 1",
|
||||
"FullQuery": "select * from a where eid + 1 = 1 limit :#maxLimit",
|
||||
"ColumnNumbers": [0, 1, 2, 3]
|
||||
"ColumnNumbers": [0, 1, 2, 3, 4]
|
||||
}
|
||||
|
||||
# complex where (non-value operand)
|
||||
|
@ -302,7 +302,7 @@
|
|||
"TableName": "a",
|
||||
"FieldQuery": "select * from a where 1 != 1",
|
||||
"FullQuery": "select * from a where eid = id limit :#maxLimit",
|
||||
"ColumnNumbers": [0, 1, 2, 3]
|
||||
"ColumnNumbers": [0, 1, 2, 3, 4]
|
||||
}
|
||||
|
||||
# inequality on pk columns
|
||||
|
@ -324,9 +324,9 @@
|
|||
"TableName": "a",
|
||||
"FieldQuery": "select * from a where 1 != 1",
|
||||
"FullQuery": "select * from a where (eid = 1) and (id = 2) limit :#maxLimit",
|
||||
"OuterQuery": "select eid, id, name, foo from a where :#pk",
|
||||
"OuterQuery": "select eid, id, name, foo, CamelCase from a where :#pk",
|
||||
"IndexUsed": "PRIMARY",
|
||||
"ColumnNumbers": [0, 1, 2, 3],
|
||||
"ColumnNumbers": [0, 1, 2, 3, 4],
|
||||
"PKValues": [1, 2]
|
||||
}
|
||||
|
||||
|
@ -337,9 +337,9 @@
|
|||
"TableName": "a",
|
||||
"FieldQuery": "select * from a where 1 != 1",
|
||||
"FullQuery": "select * from a where eid = 1 and id = 1 limit :#maxLimit",
|
||||
"OuterQuery": "select eid, id, name, foo from a where :#pk",
|
||||
"OuterQuery": "select eid, id, name, foo, CamelCase from a where :#pk",
|
||||
"IndexUsed": "PRIMARY",
|
||||
"ColumnNumbers": [0, 1, 2, 3],
|
||||
"ColumnNumbers": [0, 1, 2, 3, 4],
|
||||
"PKValues": [1, 1]
|
||||
}
|
||||
|
||||
|
@ -518,7 +518,7 @@
|
|||
"TableName": "a",
|
||||
"FieldQuery": "select * from a where 1 != 1",
|
||||
"FullQuery": "select * from a where eid in (1) and id in (1, 2) limit :#maxLimit",
|
||||
"ColumnNumbers": [0, 1, 2, 3]
|
||||
"ColumnNumbers": [0, 1, 2, 3, 4]
|
||||
}
|
||||
|
||||
# double pk IN 2
|
||||
|
@ -529,7 +529,7 @@
|
|||
"TableName": "a",
|
||||
"FieldQuery": "select * from a where 1 != 1",
|
||||
"FullQuery": "select * from a where eid in (1, 2) and id in (1, 2) limit :#maxLimit",
|
||||
"ColumnNumbers": [0, 1, 2, 3]
|
||||
"ColumnNumbers": [0, 1, 2, 3, 4]
|
||||
}
|
||||
|
||||
# pk as tuple
|
||||
|
@ -540,7 +540,7 @@
|
|||
"TableName": "a",
|
||||
"FieldQuery": "select * from a where 1 != 1",
|
||||
"FullQuery": "select * from a where (eid, id) in ((1, 1), (2, 2)) limit :#maxLimit",
|
||||
"ColumnNumbers": [0, 1, 2, 3]
|
||||
"ColumnNumbers": [0, 1, 2, 3, 4]
|
||||
}
|
||||
|
||||
# no index match
|
||||
|
@ -645,7 +645,7 @@
|
|||
"TableName": "a",
|
||||
"FieldQuery": "select * from a where 1 != 1",
|
||||
"FullQuery": "select * from a where eid = 1 and id = 1 order by name asc limit :#maxLimit",
|
||||
"ColumnNumbers": [0, 1, 2, 3]
|
||||
"ColumnNumbers": [0, 1, 2, 3, 4]
|
||||
}
|
||||
|
||||
# cardinality override
|
||||
|
@ -683,6 +683,17 @@
|
|||
"ColumnNumbers": [0, 1, 2, 3]
|
||||
}
|
||||
|
||||
# camel case preserved
|
||||
"select Name, id, FOO, bar from d"
|
||||
{
|
||||
"PlanID": "PASS_SELECT",
|
||||
"Reason": "WHERE",
|
||||
"TableName": "d",
|
||||
"FieldQuery": "select Name, id, FOO, bar from d where 1 != 1",
|
||||
"FullQuery": "select Name, id, FOO, bar from d limit :#maxLimit",
|
||||
"ColumnNumbers": [0, 1, 2, 3]
|
||||
}
|
||||
|
||||
# column not found
|
||||
"select missing from a"
|
||||
"column missing not found in table a"
|
||||
|
|
|
@ -25,6 +25,12 @@
|
|||
"Category": 0,
|
||||
"IsAuto": false,
|
||||
"Default": "MA=="
|
||||
},
|
||||
{
|
||||
"Name": "CamelCase",
|
||||
"Category": 0,
|
||||
"IsAuto": false,
|
||||
"Default": "MA=="
|
||||
}
|
||||
],
|
||||
"Indexes": [
|
||||
|
|
|
@ -116,7 +116,7 @@
|
|||
"Vindex": "user_index",
|
||||
"Values": 1,
|
||||
"Table": "user",
|
||||
"Subquery": "select name, costly from user where id = 1 for update"
|
||||
"Subquery": "select Name, Costly from user where id = 1 for update"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -205,7 +205,7 @@
|
|||
"Name": "user",
|
||||
"Sharded": true
|
||||
},
|
||||
"Query": "insert into user(id, name, costly) values (:_id, :_name, :_costly)",
|
||||
"Query": "insert into user(id, Name, Costly) values (:_Id, :_Name, :_Costly)",
|
||||
"Values": [
|
||||
":__seq",
|
||||
null,
|
||||
|
@ -234,7 +234,7 @@
|
|||
"Name": "user",
|
||||
"Sharded": true
|
||||
},
|
||||
"Query": "insert into user(nonid, id, name, costly) values (2, :_id, :_name, :_costly)",
|
||||
"Query": "insert into user(nonid, Id, Name, Costly) values (2, :_Id, :_Name, :_Costly)",
|
||||
"Values": [
|
||||
":__seq",
|
||||
null,
|
||||
|
@ -262,7 +262,7 @@
|
|||
"Name": "user",
|
||||
"Sharded": true
|
||||
},
|
||||
"Query": "insert into user(nonid, name, id, costly) values (2, :_name, :_id, :_costly)",
|
||||
"Query": "insert into user(nonid, name, id, Costly) values (2, :_Name, :_Id, :_Costly)",
|
||||
"Values": [
|
||||
":__seq",
|
||||
"foo",
|
||||
|
|
|
@ -700,6 +700,23 @@
|
|||
}
|
||||
}
|
||||
|
||||
# Case preservation test
|
||||
"select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5"
|
||||
{
|
||||
"Original": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5",
|
||||
"Instructions": {
|
||||
"Opcode": "SelectEqualUnique",
|
||||
"Keyspace": {
|
||||
"Name": "user",
|
||||
"Sharded": true
|
||||
},
|
||||
"Query": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5",
|
||||
"FieldQuery": "select user_extra.Id from user join user_extra where 1 != 1",
|
||||
"Vindex": "user_index",
|
||||
"Values": 5
|
||||
}
|
||||
}
|
||||
|
||||
# outer and inner subquery route reference the same "uu.id" name
|
||||
# but they refer to different things. The first reference is to the outermost query,
|
||||
# and the second reference is to the the innermost 'from' subquery.
|
||||
|
|
|
@ -24,15 +24,15 @@
|
|||
"user": {
|
||||
"ColVindexes": [
|
||||
{
|
||||
"Col": "id",
|
||||
"Col": "Id",
|
||||
"Name": "user_index"
|
||||
},
|
||||
{
|
||||
"Col": "name",
|
||||
"Col": "Name",
|
||||
"Name": "name_user_map"
|
||||
},
|
||||
{
|
||||
"Col": "costly",
|
||||
"Col": "Costly",
|
||||
"Name": "costly_map"
|
||||
}
|
||||
],
|
||||
|
|
|
@ -326,6 +326,37 @@
|
|||
}
|
||||
}
|
||||
|
||||
# Case preservation
|
||||
"select user.Col, user_extra.Id from user join user_extra"
|
||||
{
|
||||
"Original": "select user.Col, user_extra.Id from user join user_extra",
|
||||
"Instructions": {
|
||||
"Opcode": "Join",
|
||||
"Left": {
|
||||
"Opcode": "SelectScatter",
|
||||
"Keyspace": {
|
||||
"Name": "user",
|
||||
"Sharded": true
|
||||
},
|
||||
"Query": "select user.Col from user",
|
||||
"FieldQuery": "select user.Col from user where 1 != 1"
|
||||
},
|
||||
"Right": {
|
||||
"Opcode": "SelectScatter",
|
||||
"Keyspace": {
|
||||
"Name": "user",
|
||||
"Sharded": true
|
||||
},
|
||||
"Query": "select user_extra.Id from user_extra",
|
||||
"FieldQuery": "select user_extra.Id from user_extra where 1 != 1"
|
||||
},
|
||||
"Cols": [
|
||||
-1,
|
||||
1
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# syntax error
|
||||
"the quick brown fox"
|
||||
"syntax error at position 4 near 'the'"
|
||||
|
|
|
@ -395,7 +395,7 @@ lock service. ZooKeeper is included in the Vitess distribution.
|
|||
|
||||
1. **Start vtctld**
|
||||
|
||||
The `vtctld` server provides a web interface that
|
||||
The *vtctld* server provides a web interface that
|
||||
displays all of the coordination information stored in ZooKeeper.
|
||||
|
||||
``` sh
|
||||
|
@ -406,19 +406,22 @@ lock service. ZooKeeper is included in the Vitess distribution.
|
|||
```
|
||||
|
||||
Open `http://localhost:15000` to verify that
|
||||
`vtctld` is running. There won't be any information
|
||||
*vtctld* is running. There won't be any information
|
||||
there yet, but the menu should come up, which indicates that
|
||||
`vtctld` is running.
|
||||
*vtctld* is running.
|
||||
|
||||
The `vtctld` server also accepts commands from the `vtctlclient` tool,
|
||||
The *vtctld* server also accepts commands from the `vtctlclient` tool,
|
||||
which is used to administer the cluster. Note that the port for RPCs
|
||||
(in this case `15999`) is different from the web UI port (`15000`).
|
||||
These ports can be configured with command-line flags, as demonstrated
|
||||
in `vtctld-up.sh`.
|
||||
|
||||
For convenience, we'll use the `lvtctl.sh` script in example commands,
|
||||
to avoid having to type the *vtctld* address every time.
|
||||
|
||||
``` sh
|
||||
# List available commands
|
||||
$ $VTROOT/bin/vtctlclient -server localhost:15999 Help
|
||||
vitess/examples/local$ ./lvtctl.sh help
|
||||
```
|
||||
|
||||
1. **Start vttablets**
|
||||
|
@ -442,7 +445,7 @@ lock service. ZooKeeper is included in the Vitess distribution.
|
|||
# Access tablet test-0000000102 at http://localhost:15102/debug/status
|
||||
```
|
||||
|
||||
After this command completes, refresh the `vtctld` web UI, and you should
|
||||
After this command completes, refresh the *vtctld* web UI, and you should
|
||||
see a keyspace named `test_keyspace` with a single shard named `0`.
|
||||
This is what an unsharded keyspace looks like.
|
||||
|
||||
|
@ -455,20 +458,6 @@ lock service. ZooKeeper is included in the Vitess distribution.
|
|||
status page, showing more details on its operation. Every Vitess server has
|
||||
a status page served at `/debug/status` on its web port.
|
||||
|
||||
1. **Initialize the new keyspace**
|
||||
|
||||
By launching tablets assigned to a nonexistent keyspace, we've essentially
|
||||
created a new keyspace. To complete the initialization of the
|
||||
[local topology data](http://vitess.io/doc/TopologyService/#local-data),
|
||||
perform a keyspace rebuild:
|
||||
|
||||
``` sh
|
||||
$ $VTROOT/bin/vtctlclient -server localhost:15999 RebuildKeyspaceGraph test_keyspace
|
||||
```
|
||||
|
||||
**Note:** Many `vtctlclient` commands yield no output if
|
||||
they run successfully.
|
||||
|
||||
1. **Initialize MySQL databases**
|
||||
|
||||
Next, designate one of the tablets to be the initial master.
|
||||
|
@ -478,7 +467,7 @@ lock service. ZooKeeper is included in the Vitess distribution.
|
|||
named `test_keyspace`, the MySQL database will be named `vt_test_keyspace`.
|
||||
|
||||
``` sh
|
||||
$ $VTROOT/bin/vtctlclient -server localhost:15999 InitShardMaster -force test_keyspace/0 test-0000000100
|
||||
vitess/examples/local$ ./lvtctl.sh InitShardMaster -force test_keyspace/0 test-100
|
||||
### example output:
|
||||
# master-elect tablet test-0000000100 is not the shard master, proceeding anyway as -force was used
|
||||
# master-elect tablet test-0000000100 is not a master in the shard, proceeding anyway as -force was used
|
||||
|
@ -491,14 +480,14 @@ lock service. ZooKeeper is included in the Vitess distribution.
|
|||
brand new shard.
|
||||
|
||||
After running this command, go back to the **Shard Status** page
|
||||
in the `vtctld` web interface. When you refresh the
|
||||
page, you should see that one `vttablet` is the master
|
||||
in the *vtctld* web interface. When you refresh the
|
||||
page, you should see that one *vttablet* is the master
|
||||
and the other two are replicas.
|
||||
|
||||
You can also see this on the command line:
|
||||
|
||||
``` sh
|
||||
$ $VTROOT/bin/vtctlclient -server localhost:15999 ListAllTablets test
|
||||
vitess/examples/local$ ./lvtctl.sh ListAllTablets test
|
||||
### example output:
|
||||
# test-0000000100 test_keyspace 0 master localhost:15100 localhost:33100 []
|
||||
# test-0000000101 test_keyspace 0 replica localhost:15101 localhost:33101 []
|
||||
|
@ -513,17 +502,18 @@ lock service. ZooKeeper is included in the Vitess distribution.
|
|||
|
||||
``` sh
|
||||
# Make sure to run this from the examples/local dir, so it finds the file.
|
||||
vitess/examples/local$ $VTROOT/bin/vtctlclient -server localhost:15999 ApplySchema -sql "$(cat create_test_table.sql)" test_keyspace
|
||||
vitess/examples/local$ ./lvtctl.sh ApplySchema -sql "$(cat create_test_table.sql)" test_keyspace
|
||||
```
|
||||
|
||||
The SQL to create the table is shown below:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE test_table (
|
||||
id BIGINT AUTO_INCREMENT,
|
||||
msg VARCHAR(250),
|
||||
PRIMARY KEY(id)
|
||||
) Engine=InnoDB
|
||||
CREATE TABLE messages (
|
||||
page BIGINT(20) UNSIGNED,
|
||||
time_created_ns BIGINT(20) UNSIGNED,
|
||||
message VARCHAR(10000),
|
||||
PRIMARY KEY (page, time_created_ns)
|
||||
) ENGINE=InnoDB
|
||||
```
|
||||
|
||||
1. **Take a backup**
|
||||
|
@ -536,29 +526,29 @@ lock service. ZooKeeper is included in the Vitess distribution.
|
|||
also automatically restore from the latest backup and then resume replication.
|
||||
|
||||
``` sh
|
||||
$ $VTROOT/bin/vtctlclient -server localhost:15999 Backup test-0000000101
|
||||
vitess/examples/local$ ./lvtctl.sh Backup test-0000000102
|
||||
```
|
||||
|
||||
After the backup completes, you can list available backups for the shard:
|
||||
|
||||
``` sh
|
||||
$ $VTROOT/bin/vtctlclient -server localhost:15999 ListBackups test_keyspace/0
|
||||
vitess/examples/local$ ./lvtctl.sh ListBackups test_keyspace/0
|
||||
### example output:
|
||||
# 2015-10-21.042940.test-0000000104
|
||||
# 2016-05-06.072724.test-0000000102
|
||||
```
|
||||
|
||||
**Note:** In this single-server example setup, backups are stored at
|
||||
`$VTDATAROOT/backups`. In a multi-server deployment, you would usually mount
|
||||
an NFS directory there. You can also change the location by setting the
|
||||
`-file_backup_storage_root` flag on `vtctld` and `vttablet`, as demonstrated
|
||||
`-file_backup_storage_root` flag on *vtctld* and *vttablet*, as demonstrated
|
||||
in `vtctld-up.sh` and `vttablet-up.sh`.
|
||||
|
||||
1. **Start vtgate**
|
||||
|
||||
Vitess uses `vtgate` to route each client query to
|
||||
the correct `vttablet`. This local example runs a
|
||||
single `vtgate` instance, though a real deployment
|
||||
would likely run multiple `vtgate` instances to share
|
||||
Vitess uses *vtgate* to route each client query to
|
||||
the correct *vttablet*. This local example runs a
|
||||
single *vtgate* instance, though a real deployment
|
||||
would likely run multiple *vtgate* instances to share
|
||||
the load.
|
||||
|
||||
``` sh
|
||||
|
@ -568,7 +558,7 @@ lock service. ZooKeeper is included in the Vitess distribution.
|
|||
### Run a Client Application
|
||||
|
||||
The `client.py` file is a simple sample application
|
||||
that connects to `vtgate` and executes some queries.
|
||||
that connects to *vtgate* and executes some queries.
|
||||
To run it, you need to either:
|
||||
|
||||
* Add the Vitess Python packages to your `PYTHONPATH`.
|
||||
|
@ -583,11 +573,25 @@ To run it, you need to either:
|
|||
### example output:
|
||||
# Inserting into master...
|
||||
# Reading from master...
|
||||
# (1L, 'V is for speed')
|
||||
# Reading from replica...
|
||||
# (1L, 'V is for speed')
|
||||
# (5L, 1462510331910124032L, 'V is for speed')
|
||||
# (15L, 1462519383758071808L, 'V is for speed')
|
||||
# (42L, 1462510369213753088L, 'V is for speed')
|
||||
# ...
|
||||
```
|
||||
|
||||
There are also sample clients in the same directory for Java, PHP, and Go.
|
||||
See the comments at the top of each sample file for usage instructions.
|
||||
|
||||
### Try Vitess resharding
|
||||
|
||||
Now that you have a full Vitess stack running, you may want to go on to the
|
||||
[Horizontal Sharding](http://vitess.io/user-guide/horizontal-sharding.html)
|
||||
guide to try out
|
||||
[dynamic resharding](http://vitess.io/user-guide/sharding.html#resharding).
|
||||
|
||||
If so, you can skip the tear-down since the sharding guide picks up right here.
|
||||
If not, continue to the clean-up steps below.
|
||||
|
||||
### Tear down the cluster
|
||||
|
||||
Each `-up.sh` script has a corresponding `-down.sh` script to stop the servers.
|
||||
|
|
|
@ -1,381 +1,263 @@
|
|||
This step-by-step guide explains how to split an unsharded keyspace into two shards.
|
||||
(An unsharded keyspace has exactly one shard.)
|
||||
The examples assume that the keyspace is named `user_keyspace` and the shard is `0`.
|
||||
The sharded keyspace will use the `user_keyspace_id` column as the keyspace ID.
|
||||
|
||||
You can use the same general instructions to reshard a sharded keyspace.
|
||||
This guide walks you through the process of sharding an existing unsharded
|
||||
Vitess [keyspace](http://vitess.io/overview/concepts.html#keyspace).
|
||||
|
||||
## Prerequisites
|
||||
|
||||
To complete these steps, you must have:
|
||||
We begin by assuming you've completed the
|
||||
[Getting Started](http://vitess.io/getting-started/local-instance.html) guide,
|
||||
and have left the cluster running.
|
||||
|
||||
1. A running [keyspace](http://vitess.io/overview/concepts.html#keyspace).
|
||||
A keyspace is a logical database that maps to one or more MySQL databases.
|
||||
## Overview
|
||||
|
||||
1. Two or more [rdonly tablets](http://vitess.io/overview/concepts.html#tablet)
|
||||
running on the source shard. You set the desired tablet type when starting
|
||||
`vttablet` with the `-target_tablet_type` flag. See the
|
||||
[vttablet-up.sh](https://github.com/youtube/vitess/blob/master/examples/local/vttablet-up.sh)
|
||||
script for example.
|
||||
The sample clients in the `examples/local` folder use the following schema:
|
||||
|
||||
During resharding, one of these tablets will pause its replication to ensure
|
||||
a consistent snapshot of the data. For this reason, you can't do resharding
|
||||
if there are only `master` and `replica` tablets, because those are reserved
|
||||
for live traffic and Vitess will never take them out of service for batch
|
||||
processes like resharding.
|
||||
``` sql
|
||||
CREATE TABLE messages (
|
||||
page BIGINT(20) UNSIGNED,
|
||||
time_created_ns BIGINT(20) UNSIGNED,
|
||||
message VARCHAR(10000),
|
||||
PRIMARY KEY (page, time_created_ns)
|
||||
) ENGINE=InnoDB
|
||||
```
|
||||
|
||||
Having at least two `rdonly` tablets ensures that data updates that occur on
|
||||
the source shard during the resharding process propagate to the destination
|
||||
shard. Steps 3 and 4 of the resharding process discuss this in more detail.
|
||||
The idea is that each page number represents a separate guestbook in a
|
||||
multi-tenant app. Each guestbook page consists of a list of messages.
|
||||
|
||||
We recommend that you also review the
|
||||
[Range-Based Sharding](http://vitess.io/user-guide/sharding.html#range-based-sharding)
|
||||
section of the *Sharding* guide.
|
||||
In this guide, we'll introduce sharding by page number.
|
||||
That means pages will be randomly distributed across shards,
|
||||
but all records for a given page are always guaranteed to be on the same shard.
|
||||
In this way, we can transparently scale the database to support arbitrary growth
|
||||
in the number of pages.
|
||||
|
||||
## Step 1: Define your Keyspace ID on the Source Shard
|
||||
## Configure sharding information
|
||||
|
||||
**Note:** Skip this step if your keyspace already has multiple shards.
|
||||
The first step is to tell Vitess how we want to partition the data.
|
||||
We do this by providing a VSchema definition as follows:
|
||||
|
||||
In this step, you add a column, which will serve as the
|
||||
[keyspace ID](http://vitess.io/overview/concepts.html#keyspace-id), to each
|
||||
table in the soon-to-be-sharded keyspace.
|
||||
After the keyspace has been sharded, Vitess will use the column's value to route
|
||||
each query to the proper shard.
|
||||
``` json
|
||||
{
|
||||
"Sharded": true,
|
||||
"Vindexes": {
|
||||
"hash": {
|
||||
"Type": "hash"
|
||||
}
|
||||
},
|
||||
"Tables": {
|
||||
"messages": {
|
||||
"ColVindexes": [
|
||||
{
|
||||
"Col": "page",
|
||||
"Name": "hash"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Step 1.1: Add keyspace ID to each database table
|
||||
This says that we want to shard the data by a hash of the `page` column.
|
||||
In other words, keep each page's messages together, but spread pages around
|
||||
the shards randomly.
|
||||
|
||||
For each table in the unsharded keyspace, run the following `alter` statement:
|
||||
We can load this VSchema into Vitess like this:
|
||||
|
||||
``` sh
|
||||
vtctlclient -server <vtctld host:port> ApplySchema \
|
||||
-sql "alter table <table name> add <keyspace ID column>" \
|
||||
<keyspace name>
|
||||
vitess/examples/local$ ./lvtctl.sh SetKeyspaceShardingInfo test_keyspace keyspace_id uint64
|
||||
vitess/examples/local$ ./lvtctl.sh ApplyVSchema -vschema "$(cat vschema.json)" test_keyspace
|
||||
```
|
||||
|
||||
In this example, the command looks like this:
|
||||
## Bring up tablets for new shards
|
||||
|
||||
In the unsharded example, you started tablets for a shard
|
||||
named *0* in *test_keyspace*, written as *test_keyspace/0*.
|
||||
Now you'll start tablets for two additional shards,
|
||||
named *test_keyspace/-80* and *test_keyspace/80-*:
|
||||
|
||||
``` sh
|
||||
vtctlclient -server <vtctld host:port> ApplySchema \
|
||||
-sql "alter table <table name> add user_keyspace_id" \
|
||||
user_keyspace
|
||||
vitess/examples/local$ ./sharded-vttablet-up.sh
|
||||
```
|
||||
|
||||
In the above statement, replace `user_keyspace_id` with the column name that you
|
||||
want to use to store the keyspace ID value.
|
||||
Also replace `user_keyspace` with the name of your keyspace.
|
||||
Since the sharding key is the page number,
|
||||
this will result in half the pages going to each shard,
|
||||
since *0x80* is the midpoint of the
|
||||
[sharding key range](http://vitess.io/user-guide/sharding.html#key-ranges-and-partitions).
|
||||
|
||||
### Step 1.2: Update tables to contain keyspace ID values
|
||||
These new shards will run in parallel with the original shard during the
|
||||
transition, but actual traffic will be served only by the original shard
|
||||
until we tell it to switch over.
|
||||
|
||||
Backfill each row in each table with the appropriate keyspace ID value.
|
||||
In this example, each `user_keyspace_id` column contains a 64-bit hash of the
|
||||
user ID in that column's row. Using a hash ensures that user IDs are randomly
|
||||
and evenly distributed across shards.
|
||||
Check the *vtctld* web UI, or the output of `lvtctl.sh ListAllTablets test`,
|
||||
to see when the tablets are ready. There should be 5 tablets in each shard.
|
||||
|
||||
### Step 1.3: Set keyspace ID in topology server
|
||||
|
||||
Tell Vitess which column value identifies the keyspace ID by running the
|
||||
following command:
|
||||
Once the tablets are ready, initialize replication by electing the first master
|
||||
for each of the new shards:
|
||||
|
||||
``` sh
|
||||
vtctlclient -server <vtctld host:port> \
|
||||
SetKeyspaceShardingInfo <keyspace name> <keyspace ID column> <keyspace type>
|
||||
vitess/examples/local$ ./lvtctl.sh InitShardMaster -force test_keyspace/-80 test-0000000200
|
||||
vitess/examples/local$ ./lvtctl.sh InitShardMaster -force test_keyspace/80- test-0000000300
|
||||
```
|
||||
|
||||
In this example, the command looks like this:
|
||||
Now there should be a total of 15 tablets, with one master for each shard:
|
||||
|
||||
``` sh
|
||||
vtctlclient -server <vtctld host:port> \
|
||||
SetKeyspaceShardingInfo user_keyspace user_keyspace_id uint64
|
||||
vitess/examples/local$ ./lvtctl.sh ListAllTablets test
|
||||
### example output:
|
||||
# test-0000000100 test_keyspace 0 master 10.64.3.4:15002 10.64.3.4:3306 []
|
||||
# ...
|
||||
# test-0000000200 test_keyspace -80 master 10.64.0.7:15002 10.64.0.7:3306 []
|
||||
# ...
|
||||
# test-0000000300 test_keyspace 80- master 10.64.0.9:15002 10.64.0.9:3306 []
|
||||
# ...
|
||||
```
|
||||
|
||||
Note that each table in the keyspace must have a column to identify the keyspace ID.
|
||||
In addition, all of those columns must have the same name.
|
||||
## Copy data from original shard
|
||||
|
||||
## Step 2: Prepare the destination shards
|
||||
|
||||
In this step, you create the destination shards and tablets.
|
||||
At the end of this step, the destination shards will have been created,
|
||||
but they will not contain any data and will not serve any traffic.
|
||||
|
||||
This example shows how to split an unsharded database into two destination shards.
|
||||
As noted in the
|
||||
[Key Ranges and Partitions](http://vitess.io/user-guide/sharding.html#key-ranges-and-partitions)
|
||||
section, the value [ 0x80 ] is the middle value for sharding keys.
|
||||
So, when you split this database into two shards, the
|
||||
[range-based shard names](http://vitess.io/user-guide/sharding.html#shard-names-in-range-based-keyspaces)
|
||||
for those shards will be:
|
||||
|
||||
* -80
|
||||
* 80-
|
||||
|
||||
### Step 2.1: Create destination shards
|
||||
|
||||
To create the destination shards, call the `CreateShard` command.
|
||||
You would have used the same command to create the source shard. Repeat the
|
||||
following command for each shard you need to create:
|
||||
The new tablets start out empty, so we need to copy everything from the
|
||||
original shard to the two new ones, starting with the schema:
|
||||
|
||||
``` sh
|
||||
vtctlclient -server <vtctld host:port> CreateShard <keyspace name>/<shard name>
|
||||
vitess/examples/local$ ./lvtctl.sh CopySchemaShard test_keyspace/0 test_keyspace/-80
|
||||
vitess/examples/local$ ./lvtctl.sh CopySchemaShard test_keyspace/0 test_keyspace/80-
|
||||
```
|
||||
|
||||
In this example, you would run the command twice:
|
||||
Next we copy the data. Since the amount of data to copy can be very large,
|
||||
we use a special batch process called *vtworker* to stream the data from a
|
||||
single source to multiple destinations, routing each row based on its
|
||||
*keyspace_id*:
|
||||
|
||||
``` sh
|
||||
vtctlclient -server <vtctld host:port> CreateShard user_keyspace/80-
|
||||
vtctlclient -server <vtctld host:port> CreateShard user_keyspace/-80
|
||||
vitess/examples/local$ ./sharded-vtworker.sh SplitClone test_keyspace/0
|
||||
### example output:
|
||||
# I0416 02:08:59.952805 9 instance.go:115] Starting worker...
|
||||
# ...
|
||||
# State: done
|
||||
# Success:
|
||||
# messages: copy done, copied 11 rows
|
||||
```
|
||||
|
||||
### Step 2.2: Create destination tablets
|
||||
Notice that we've only specified the source shard, *test_keyspace/0*.
|
||||
The *SplitClone* process will automatically figure out which shards to use
|
||||
as the destinations based on the key range that needs to be covered.
|
||||
In this case, shard *0* covers the entire range, so it identifies
|
||||
*-80* and *80-* as the destination shards, since they combine to cover the
|
||||
same range.
|
||||
|
||||
Start up `mysqld` and `vttablet` for the destination shards just like you did
|
||||
for the source shards, but with a different `-init_shard` argument and a
|
||||
different unique tablet ID (specified via `-tablet-path`).
|
||||
Next, it will pause replication on one *rdonly* (offline processing) tablet
|
||||
to serve as a consistent snapshot of the data. The app can continue without
|
||||
downtime, since live traffic is served by *replica* and *master* tablets,
|
||||
which are unaffected. Other batch jobs will also be unaffected, since they
|
||||
will be served only by the remaining, un-paused *rdonly* tablets.
|
||||
|
||||
The example [vttablet-up.sh](https://github.com/youtube/vitess/blob/master/examples/local/vttablet-up.sh)
|
||||
script has parameters at the top named `shard` and `uid_base` that can be used
|
||||
to make these modifications.
|
||||
## Check filtered replication
|
||||
|
||||
As with the source shard, you should have two [rdonly tablets](http://vitess.io/overview/concepts.html#tablet)
|
||||
on each of the destination shards. The `tablet_type` parameter at the top of
|
||||
`vttablet-up.sh` can be used to set this.
|
||||
Once the copy from the paused snapshot finishes, *vtworker* turns on
|
||||
[filtered replication](http://vitess.io/user-guide/sharding.html#filtered-replication)
|
||||
from the source shard to each destination shard. This allows the destination
|
||||
shards to catch up on updates that have continued to flow in from the app since
|
||||
the time of the snapshot.
|
||||
|
||||
### Step 2.3: Initialize replication on destination shards
|
||||
|
||||
Next call the `InitShardMaster` command to initialize MySQL replication in each destination shard.
|
||||
You would have used the same commands to elect the master tablet on the source shard.
|
||||
When the destination shards are caught up, they will continue to replicate
|
||||
new updates. You can see this by looking at the contents of each shard as
|
||||
you add new messages to various pages in the Guestbook app. Shard *0* will
|
||||
see all the messages, while the new shards will only see messages for pages
|
||||
that live on that shard.
|
||||
|
||||
``` sh
|
||||
vtctlclient -server <vtctld host:port> \
|
||||
InitShardMaster -force <keyspace name>/<shard name> <tablet alias>
|
||||
# See what's on shard test_keyspace/0:
|
||||
vitess/examples/local$ ./lvtctl.sh ExecuteFetchAsDba test-0000000100 "SELECT * FROM messages"
|
||||
# See what's on shard test_keyspace/-80:
|
||||
vitess/examples/local$ ./lvtctl.sh ExecuteFetchAsDba test-0000000200 "SELECT * FROM messages"
|
||||
# See what's on shard test_keyspace/80-:
|
||||
vitess/examples/local$ ./lvtctl.sh ExecuteFetchAsDba test-0000000300 "SELECT * FROM messages"
|
||||
```
|
||||
|
||||
In this example, you would run these commands:
|
||||
You can run the client script again to add some messages on various pages
|
||||
and see how they get routed.
|
||||
|
||||
## Check copied data integrity
|
||||
|
||||
The *vtworker* batch process has another mode that will compare the source
|
||||
and destination to ensure all the data is present and correct.
|
||||
The following commands will run a diff for each destination shard:
|
||||
|
||||
``` sh
|
||||
vtctlclient -server <vtctld host:port> \
|
||||
InitShardMaster -force user_keyspace/-80 <tablet alias>
|
||||
vtctlclient -server <vtctld host:port> \
|
||||
InitShardMaster -force user_keyspace/80- <tablet alias>
|
||||
vitess/examples/local$ ./sharded-vtworker.sh SplitDiff test_keyspace/-80
|
||||
vitess/examples/local$ ./sharded-vtworker.sh SplitDiff test_keyspace/80-
|
||||
```
|
||||
|
||||
## Step 3: Clone data to the destination shards
|
||||
If any discrepancies are found, they will be printed.
|
||||
If everything is good, you should see something like this:
|
||||
|
||||
In this step, you copy the database schema to each destination shard.
|
||||
Then you copy the data to the destination shards. At the end of this
|
||||
step, the destination tablets will be populated with data but will not
|
||||
yet be serving traffic.
|
||||
```
|
||||
I0416 02:10:56.927313 10 split_diff.go:496] Table messages checks out (4 rows processed, 1072961 qps)
|
||||
```
|
||||
|
||||
### Step 3.1: Copy schema to destination shards
|
||||
## Switch over to new shards
|
||||
|
||||
Call the `CopySchemaShard` command to copy the database schema
|
||||
from a rdonly tablet on the source shard to the destination shards:
|
||||
Now we're ready to switch over to serving from the new shards.
|
||||
The [MigrateServedTypes](http://vitess.io/reference/vtctl.html#migrateservedtypes)
|
||||
command lets you do this one
|
||||
[tablet type](http://vitess.io/overview/concepts.html#tablet) at a time,
|
||||
and even one [cell](http://vitess.io/overview/concepts.html#cell-data-center)
|
||||
at a time. The process can be rolled back at any point *until* the master is
|
||||
switched over.
|
||||
|
||||
``` sh
|
||||
vtctlclient -server <vtctld host:port> CopySchemaShard \
|
||||
<keyspace>/<source shard> \
|
||||
<keyspace>/<destination shard>
|
||||
vitess/examples/local$ ./lvtctl.sh MigrateServedTypes test_keyspace/0 rdonly
|
||||
vitess/examples/local$ ./lvtctl.sh MigrateServedTypes test_keyspace/0 replica
|
||||
vitess/examples/local$ ./lvtctl.sh MigrateServedTypes test_keyspace/0 master
|
||||
```
|
||||
|
||||
In this example, you would run these two commands:
|
||||
During the *master* migration, the original shard master will first stop
|
||||
accepting updates. Then the process will wait for the new shard masters to
|
||||
fully catch up on filtered replication before allowing them to begin serving.
|
||||
Since filtered replication has been following along with live updates, there
|
||||
should only be a few seconds of master unavailability.
|
||||
|
||||
When the master traffic is migrated, the filtered replication will be stopped.
|
||||
Data updates will be visible on the new shards, but not on the original shard.
|
||||
See it for yourself: Add a message to the guestbook page and then inspect
|
||||
the database content:
|
||||
|
||||
``` sh
|
||||
vtctlclient -server <vtctld host:port> \
|
||||
CopySchemaShard user_keyspace/0 user_keyspace/-80
|
||||
vtctlclient -server <vtctld host:port> \
|
||||
CopySchemaShard user_keyspace/0 user_keyspace/80-
|
||||
# See what's on shard test_keyspace/0
|
||||
# (no updates visible since we migrated away from it):
|
||||
vitess/examples/local$ ./lvtctl.sh ExecuteFetchAsDba test-0000000100 "SELECT * FROM messages"
|
||||
# See what's on shard test_keyspace/-80:
|
||||
vitess/examples/local$ ./lvtctl.sh ExecuteFetchAsDba test-0000000200 "SELECT * FROM messages"
|
||||
# See what's on shard test_keyspace/80-:
|
||||
vitess/examples/local$ ./lvtctl.sh ExecuteFetchAsDba test-0000000300 "SELECT * FROM messages"
|
||||
```
|
||||
|
||||
### Step 3.2: Copy data from source shard to destination shards
|
||||
## Remove original shard
|
||||
|
||||
This step uses a `vtworker` process to copy data from the source shard
|
||||
to the destination shards. The `vtworker` performs the following tasks:
|
||||
|
||||
1. It finds a `rdonly` tablet on the source shard and stops data
|
||||
replication on the tablet. This prevents the data from changing
|
||||
while it is being copied. During this time, the `rdonly` tablet's
|
||||
status is changed to `worker`, and Vitess will stop routing app
|
||||
traffic to it since it might not have up-to-date data.
|
||||
|
||||
1. It does a (concurrent) full scan of each table on the source shard.
|
||||
|
||||
1. It identifies the appropriate destination shard for each source row
|
||||
based on the row's sharding key.
|
||||
|
||||
1. It streams the data to the master tablet on the correct destination shard.
|
||||
|
||||
The following command starts the `vtworker`:
|
||||
|
||||
```
|
||||
vtworker -cell=<cell name> \
|
||||
SplitClone -min_healthy_rdonly_endpoints=1 <keyspace name>/<source shard name>
|
||||
```
|
||||
|
||||
For this example, run this command:
|
||||
|
||||
```
|
||||
vtworker -cell=<cell name> \
|
||||
SplitClone -min_healthy_rdonly_endpoints=1 user_keyspace/0
|
||||
```
|
||||
|
||||
The amount of time that the worker takes to complete will depend
|
||||
on the size of your dataset. When the process completes, the destination
|
||||
shards contain the correct data but do not yet serve traffic.
|
||||
The destination shards are also now running
|
||||
[filtered replication](http://vitess.io/user-guide/sharding.html#filtered-replication).
|
||||
|
||||
## Step 4: Run a data diff to verify integrity
|
||||
|
||||
Before the destination shard starts serving data, you want to ensure that
|
||||
its data is up-to-date. Remember that the source tablet would not have
|
||||
received updates to any of its records while the vtworker process was
|
||||
copying data to the destination shards.
|
||||
|
||||
### Step 4.1: Use filtered replication to catch up to source data changes
|
||||
|
||||
Vitess uses [filtered replication](http://vitess.io/user-guide/sharding.html#filtered-replication) to ensure that
|
||||
data changes on the source shard during step 3 propagate successfully
|
||||
to the destination shards. While this process happens automatically, the
|
||||
time it takes to complete depends on how long step 3 took to complete and
|
||||
the scope of the data changes on the source shard during that time.
|
||||
|
||||
You can see the filtered replication state for a destination shard by
|
||||
viewing the status page of the shard's master tablet in your browser
|
||||
(the vtctld web UI will link there from the tablet's **STATUS** button).
|
||||
The Binlog Player table shows a **SecondsBehindMaster** column that
|
||||
indicates how far the destination master is still behind the source shard.
|
||||
|
||||
### Step 4.2: Compare data on source and destination shards
|
||||
|
||||
In this step, you use another `vtworker` process to ensure that the data
|
||||
on the source and destination shards is identical. The vtworker can also
|
||||
catch potential problems that might have occurred during the copying
|
||||
process. For example, if the sharding key changed for a particular row
|
||||
during step 3 or step 4.1, the data on the source and destination shards
|
||||
might not be equal.
|
||||
|
||||
To start the `vtworker`, run the following `SplitDiff` command:
|
||||
Now that all traffic is being served from the new shards, we can remove the
|
||||
original one. To do that, we use the `vttablet-down.sh` script from the
|
||||
unsharded example:
|
||||
|
||||
``` sh
|
||||
vtworker -cell=<cell name> \
|
||||
SplitDiff -min_healthy_rdonly_endpoints=1 <keyspace name>/<shard name>
|
||||
vitess/examples/local$ ./vttablet-down.sh
|
||||
```
|
||||
|
||||
The commands for the two new destination shards in this example are shown
|
||||
below. You need to complete this process for each destination shard.
|
||||
However, you must remove one rdonly tablet from the source shard for each
|
||||
diff process that is running. As such, it is recommended to run diffs
|
||||
sequentially rather than in parallel.
|
||||
|
||||
Then we can delete the now-empty shard:
|
||||
|
||||
``` sh
|
||||
vtworker -cell=<cell name> \
|
||||
SplitDiff -min_healthy_rdonly_endpoints=1 user_keyspace/-80
|
||||
vtworker -cell=<cell name> \
|
||||
SplitDiff -min_healthy_rdonly_endpoints=1 user_keyspace/80-
|
||||
vitess/examples/local$ ./lvtctl.sh DeleteShard -recursive test_keyspace/0
|
||||
```
|
||||
|
||||
The vtworker performs the following tasks:
|
||||
You should then see in the vtctld **Topology** page, or in the output of
|
||||
`lvtctl.sh ListAllTablets test` that the tablets for shard *0* are gone.
|
||||
|
||||
1. It finds a health `rdonly` tablet in the source shard and a healthy
|
||||
`rdonly` tablet in the destination shard.
|
||||
## Tear down and clean up
|
||||
|
||||
1. It sets both tablets to stop serving app traffic, so data can be compared
|
||||
reliably.
|
||||
|
||||
1. It pauses filtered replication on the destination master tablet.
|
||||
|
||||
1. It pauses replication on the source `rdonly` tablet at a position higher
|
||||
than the destination master's filtered replication position.
|
||||
|
||||
1. It resumes the destination master's filtered replication.
|
||||
|
||||
1. It allows the destination `rdonly` tablet to catch up to the same position
|
||||
as the source `rdonly` tablet and then stops replication on the
|
||||
destination `rdonly` tablet.
|
||||
|
||||
1. It compares the schema on the source and destination `rdonly` tablets.
|
||||
|
||||
1. It streams data from the source and destination tablets, using the
|
||||
same sharding key constraints, and verifies that the data is equal.
|
||||
|
||||
If the diff is successful on the first destination shard, repeat it
|
||||
on the next destination shard.
|
||||
|
||||
## Step 5: Direct traffic to destination shards
|
||||
|
||||
After verifying that your destination shards contain the correct data,
|
||||
you can start serving traffic from those shards.
|
||||
|
||||
### Step 5.1 Migrate read-only traffic
|
||||
|
||||
The safest process is to migrate read-only traffic first. You will migrate
|
||||
write operations in the following step, after the read-only traffic is
|
||||
stable. The reason for splitting the migration into two steps is that
|
||||
you can reverse the migration of read-only traffic without creating data
|
||||
inconsistencies. However, you cannot reverse the migration of master
|
||||
traffic without creating data inconsistencies.
|
||||
|
||||
Use the `MigrateServedTypes` command to migrate `rdonly` and `replica` traffic.
|
||||
|
||||
```
|
||||
vtctlclient -server <vtctld host:port> \
|
||||
MigrateServedTypes <keyspace name>/<source shard name> rdonly
|
||||
vtctlclient -server <vtctld host:port> \
|
||||
MigrateServedTypes <keyspace name>/<source shard name> replica
|
||||
```
|
||||
|
||||
If something goes wrong during the migration of read-only traffic,
|
||||
run the same commands with the `-reverse` flag to return
|
||||
read-only traffic to the source shard:
|
||||
|
||||
```
|
||||
vtctlclient -server <vtctld host:port> \
|
||||
MigrateServedTypes -reverse <keyspace name>/<source shard name> rdonly
|
||||
vtctlclient -server <vtctld host:port> \
|
||||
MigrateServedTypes -reverse <keyspace name>/<source shard name> replica
|
||||
```
|
||||
|
||||
### Step 5.2 Migrate master traffic
|
||||
|
||||
Use the `MigrateServedTypes` command again to migrate `master`
|
||||
traffic to the destination shard:
|
||||
|
||||
```
|
||||
vtctlclient -server <vtctld host:port> \
|
||||
MigrateServedTypes <keyspace name>/<source shard name> master
|
||||
```
|
||||
|
||||
For this example, the command is:
|
||||
|
||||
```
|
||||
vtctlclient -server <vtctld host:port> MigrateServedTypes user_keyspace/0 master
|
||||
```
|
||||
|
||||
## Step 6: Scrap source shard
|
||||
|
||||
If all of the other steps were successful, you can remove the source
|
||||
shard, which should no longer be in use.
|
||||
|
||||
### Step 6.1: Remove source shard tablets
|
||||
|
||||
Run the following command for each tablet in the source shard:
|
||||
Since you already cleaned up the tablets from the original unsharded example by
|
||||
running `./vttablet-down.sh`, that step has been replaced with
|
||||
`./sharded-vttablet-down.sh` to clean up the new sharded tablets.
|
||||
|
||||
``` sh
|
||||
vtctlclient -server <vtctld host:port> DeleteTablet -allow_master <source tablet alias>
|
||||
```
|
||||
|
||||
### Step 6.2: Delete source shard
|
||||
|
||||
Run the following command:
|
||||
|
||||
``` sh
|
||||
vtctlclient -server <vtctld host:port> \
|
||||
DeleteShard <keyspace name>/<source shard name>
|
||||
```
|
||||
|
||||
For this example, the command is:
|
||||
|
||||
``` sh
|
||||
vtctlclient -server <vtctld host:port> DeleteShard user_keyspace/0
|
||||
vitess/examples/local$ ./vtgate-down.sh
|
||||
vitess/examples/local$ ./sharded-vttablet-down.sh
|
||||
vitess/examples/local$ ./vtctld-down.sh
|
||||
vitess/examples/local$ ./zk-down.sh
|
||||
```
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ have left the cluster running.
|
|||
|
||||
We will follow a process similar to the one in the general
|
||||
[Horizontal Sharding](http://vitess.io/user-guide/horizontal-sharding.html)
|
||||
guide, except that here we'll give the exact commands you'll need to do it for
|
||||
guide, except that here we'll give the commands you'll need to do it for
|
||||
the example Vitess cluster in Kubernetes.
|
||||
|
||||
Since Vitess makes [sharding](http://vitess.io/user-guide/sharding.html)
|
||||
|
@ -119,12 +119,12 @@ vitess/examples/kubernetes$ ./kvtctl.sh CopySchemaShard test_keyspace/0 test_key
|
|||
```
|
||||
|
||||
Next we copy the data. Since the amount of data to copy can be very large,
|
||||
we use a special batch process called `vtworker` to stream the data from a
|
||||
we use a special batch process called *vtworker* to stream the data from a
|
||||
single source to multiple destinations, routing each row based on its
|
||||
*keyspace_id*:
|
||||
|
||||
``` sh
|
||||
vitess/examples/kubernetes$ ./sharded-vtworker.sh -use_v3_resharding_mode SplitClone test_keyspace/0
|
||||
vitess/examples/kubernetes$ ./sharded-vtworker.sh SplitClone test_keyspace/0
|
||||
### example output:
|
||||
# Creating vtworker pod in cell test...
|
||||
# pods/vtworker
|
||||
|
@ -153,7 +153,7 @@ will be served only by the remaining, un-paused *rdonly* tablets.
|
|||
|
||||
## Check filtered replication
|
||||
|
||||
Once the copy from the paused snapshot finishes, `vtworker` turns on
|
||||
Once the copy from the paused snapshot finishes, *vtworker* turns on
|
||||
[filtered replication](http://vitess.io/user-guide/sharding.html#filtered-replication)
|
||||
from the source shard to each destination shard. This allows the destination
|
||||
shards to catch up on updates that have continued to flow in from the app since
|
||||
|
@ -178,13 +178,13 @@ Add some messages on various pages of the Guestbook to see how they get routed.
|
|||
|
||||
## Check copied data integrity
|
||||
|
||||
The `vtworker` batch process has another mode that will compare the source
|
||||
The *vtworker* batch process has another mode that will compare the source
|
||||
and destination to ensure all the data is present and correct.
|
||||
The following commands will run a diff for each destination shard:
|
||||
|
||||
``` sh
|
||||
vitess/examples/kubernetes$ ./sharded-vtworker.sh -use_v3_resharding_mode SplitDiff test_keyspace/-80
|
||||
vitess/examples/kubernetes$ ./sharded-vtworker.sh -use_v3_resharding_mode SplitDiff test_keyspace/80-
|
||||
vitess/examples/kubernetes$ ./sharded-vtworker.sh SplitDiff test_keyspace/-80
|
||||
vitess/examples/kubernetes$ ./sharded-vtworker.sh SplitDiff test_keyspace/80-
|
||||
```
|
||||
|
||||
If any discrepancies are found, they will be printed.
|
||||
|
|
342
doc/VitessApi.md
342
doc/VitessApi.md
|
@ -89,7 +89,7 @@ ExecuteBatchKeyspaceIds executes the list of queries based on the specified keys
|
|||
|
||||
#### Request
|
||||
|
||||
ExecuteBatchKeyspaceIdsRequest is the payload to ExecuteBatchKeyspaceId
|
||||
ExecuteBatchKeyspaceIdsRequest is the payload to ExecuteBatchKeyspaceId.
|
||||
|
||||
##### Parameters
|
||||
|
||||
|
@ -99,11 +99,11 @@ ExecuteBatchKeyspaceIds executes the list of queries based on the specified keys
|
|||
| <code>session</code> <br>[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. |
|
||||
| <code>queries</code> <br>list <[BoundKeyspaceIdQuery](#boundkeyspaceidquery)>| BoundKeyspaceIdQuery represents a single query request for the specified list of keyspace ids. This is used in a list for ExecuteBatchKeyspaceIdsRequest. |
|
||||
| <code>tablet_type</code> <br>[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. |
|
||||
| <code>as_transaction</code> <br>bool| |
|
||||
| <code>as_transaction</code> <br>bool| as_transaction will execute the queries in this batch in a single transaction per shard, created for this purpose. (this can be seen as adding a 'begin' before and 'commit' after the queries). Only makes sense if tablet_type is master. If set, the Session is ignored. |
|
||||
|
||||
#### Response
|
||||
|
||||
ExecuteBatchKeyspaceIdsResponse is the returned value from ExecuteBatchKeyspaceId
|
||||
ExecuteBatchKeyspaceIdsResponse is the returned value from ExecuteBatchKeyspaceId.
|
||||
|
||||
##### Properties
|
||||
|
||||
|
@ -119,7 +119,7 @@ ExecuteEntityIds executes the query based on the specified external id to keyspa
|
|||
|
||||
#### Request
|
||||
|
||||
ExecuteEntityIdsRequest is the payload to ExecuteEntityIds
|
||||
ExecuteEntityIdsRequest is the payload to ExecuteEntityIds.
|
||||
|
||||
##### Parameters
|
||||
|
||||
|
@ -128,11 +128,11 @@ ExecuteEntityIds executes the query based on the specified external id to keyspa
|
|||
| <code>caller_id</code> <br>[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. |
|
||||
| <code>session</code> <br>[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. |
|
||||
| <code>query</code> <br>[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables |
|
||||
| <code>keyspace</code> <br>string| |
|
||||
| <code>entity_column_name</code> <br>string| |
|
||||
| <code>entity_keyspace_ids</code> <br>list <[EntityId](#executeentityidsrequest.entityid)>| |
|
||||
| <code>keyspace</code> <br>string| keyspace to target the query to. |
|
||||
| <code>entity_column_name</code> <br>string| entity_column_name is the column name to use. |
|
||||
| <code>entity_keyspace_ids</code> <br>list <[EntityId](#executeentityidsrequest.entityid)>| entity_keyspace_ids are pairs of entity_column_name values associated with its corresponding keyspace_id. |
|
||||
| <code>tablet_type</code> <br>[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. |
|
||||
| <code>not_in_transaction</code> <br>bool| |
|
||||
| <code>not_in_transaction</code> <br>bool| not_in_transaction is deprecated and should not be used. |
|
||||
|
||||
#### Messages
|
||||
|
||||
|
@ -142,28 +142,13 @@ ExecuteEntityIds executes the query based on the specified external id to keyspa
|
|||
|
||||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>xid_type</code> <br>[Type](#executeentityidsrequest.entityid.type)| |
|
||||
| <code>xid_bytes</code> <br>bytes| |
|
||||
| <code>xid_int</code> <br>int64| |
|
||||
| <code>xid_uint</code> <br>uint64| |
|
||||
| <code>xid_float</code> <br>double| |
|
||||
| <code>keyspace_id</code> <br>bytes| |
|
||||
|
||||
#### Enums
|
||||
|
||||
##### ExecuteEntityIdsRequest.EntityId.Type
|
||||
|
||||
| Name |Value |Description |
|
||||
| :-------- | :-------- | :--------
|
||||
| <code>TYPE_NULL</code> | <code>0</code> | |
|
||||
| <code>TYPE_BYTES</code> | <code>1</code> | |
|
||||
| <code>TYPE_INT</code> | <code>2</code> | |
|
||||
| <code>TYPE_UINT</code> | <code>3</code> | |
|
||||
| <code>TYPE_FLOAT</code> | <code>4</code> | |
|
||||
| <code>type</code> <br>[query.Type](#query.type)| Type defines the various supported data types in bind vars and query results. |
|
||||
| <code>value</code> <br>bytes| value is the value for the entity. Not set if type is NULL_TYPE. |
|
||||
| <code>keyspace_id</code> <br>bytes| keyspace_id is the associated keyspace_id for the entity. |
|
||||
|
||||
#### Response
|
||||
|
||||
ExecuteEntityIdsResponse is the returned value from ExecuteEntityIds
|
||||
ExecuteEntityIdsResponse is the returned value from ExecuteEntityIds.
|
||||
|
||||
##### Properties
|
||||
|
||||
|
@ -179,7 +164,7 @@ ExecuteKeyRanges executes the query based on the specified key ranges.
|
|||
|
||||
#### Request
|
||||
|
||||
ExecuteKeyRangesRequest is the payload to ExecuteKeyRanges
|
||||
ExecuteKeyRangesRequest is the payload to ExecuteKeyRanges.
|
||||
|
||||
##### Parameters
|
||||
|
||||
|
@ -188,14 +173,14 @@ ExecuteKeyRanges executes the query based on the specified key ranges.
|
|||
| <code>caller_id</code> <br>[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. |
|
||||
| <code>session</code> <br>[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. |
|
||||
| <code>query</code> <br>[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables |
|
||||
| <code>keyspace</code> <br>string| |
|
||||
| <code>keyspace</code> <br>string| keyspace to target the query to |
|
||||
| <code>key_ranges</code> <br>list <[topodata.KeyRange](#topodata.keyrange)>| KeyRange describes a range of sharding keys, when range-based sharding is used. |
|
||||
| <code>tablet_type</code> <br>[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. |
|
||||
| <code>not_in_transaction</code> <br>bool| |
|
||||
| <code>not_in_transaction</code> <br>bool| not_in_transaction is deprecated and should not be used. |
|
||||
|
||||
#### Response
|
||||
|
||||
ExecuteKeyRangesResponse is the returned value from ExecuteKeyRanges
|
||||
ExecuteKeyRangesResponse is the returned value from ExecuteKeyRanges.
|
||||
|
||||
##### Properties
|
||||
|
||||
|
@ -211,7 +196,7 @@ ExecuteKeyspaceIds executes the query based on the specified keyspace ids.
|
|||
|
||||
#### Request
|
||||
|
||||
ExecuteKeyspaceIdsRequest is the payload to ExecuteKeyspaceIds
|
||||
ExecuteKeyspaceIdsRequest is the payload to ExecuteKeyspaceIds.
|
||||
|
||||
##### Parameters
|
||||
|
||||
|
@ -220,14 +205,14 @@ ExecuteKeyspaceIds executes the query based on the specified keyspace ids.
|
|||
| <code>caller_id</code> <br>[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. |
|
||||
| <code>session</code> <br>[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. |
|
||||
| <code>query</code> <br>[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables |
|
||||
| <code>keyspace</code> <br>string| |
|
||||
| <code>keyspace_ids</code> <br>list <bytes>| |
|
||||
| <code>keyspace</code> <br>string| keyspace to target the query to. |
|
||||
| <code>keyspace_ids</code> <br>list <bytes>| keyspace_ids contains the list of keyspace_ids affected by this query. Will be used to find the shards to send the query to. |
|
||||
| <code>tablet_type</code> <br>[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. |
|
||||
| <code>not_in_transaction</code> <br>bool| |
|
||||
| <code>not_in_transaction</code> <br>bool| not_in_transaction is deprecated and should not be used. |
|
||||
|
||||
#### Response
|
||||
|
||||
ExecuteKeyspaceIdsResponse is the returned value from ExecuteKeyspaceIds
|
||||
ExecuteKeyspaceIdsResponse is the returned value from ExecuteKeyspaceIds.
|
||||
|
||||
##### Properties
|
||||
|
||||
|
@ -243,7 +228,7 @@ StreamExecuteKeyRanges executes a streaming query based on key ranges. Use this
|
|||
|
||||
#### Request
|
||||
|
||||
StreamExecuteKeyRangesRequest is the payload to StreamExecuteKeyRanges
|
||||
StreamExecuteKeyRangesRequest is the payload to StreamExecuteKeyRanges.
|
||||
|
||||
##### Parameters
|
||||
|
||||
|
@ -251,19 +236,18 @@ StreamExecuteKeyRanges executes a streaming query based on key ranges. Use this
|
|||
| :-------- | :--------
|
||||
| <code>caller_id</code> <br>[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. |
|
||||
| <code>query</code> <br>[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables |
|
||||
| <code>keyspace</code> <br>string| |
|
||||
| <code>keyspace</code> <br>string| keyspace to target the query to. |
|
||||
| <code>key_ranges</code> <br>list <[topodata.KeyRange](#topodata.keyrange)>| KeyRange describes a range of sharding keys, when range-based sharding is used. |
|
||||
| <code>tablet_type</code> <br>[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. |
|
||||
|
||||
#### Response
|
||||
|
||||
StreamExecuteKeyRangesResponse is the returned value from StreamExecuteKeyRanges
|
||||
StreamExecuteKeyRangesResponse is the returned value from StreamExecuteKeyRanges.
|
||||
|
||||
##### Properties
|
||||
|
||||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>error</code> <br>[vtrpc.RPCError](#vtrpc.rpcerror)| RPCError is an application-level error structure returned by VtTablet (and passed along by VtGate if appropriate). We use this so the clients don't have to parse the error messages, but instead can depend on the value of the code. |
|
||||
| <code>result</code> <br>[query.QueryResult](#query.queryresult)| QueryResult is returned by Execute and ExecuteStream. As returned by Execute, len(fields) is always equal to len(row) (for each row in rows). As returned by StreamExecute, the first QueryResult has the fields set, and subsequent QueryResult have rows set. And as Execute, len(QueryResult[0].fields) is always equal to len(row) (for each row in rows for each QueryResult in QueryResult[1:]). |
|
||||
|
||||
### StreamExecuteKeyspaceIds
|
||||
|
@ -272,7 +256,7 @@ StreamExecuteKeyspaceIds executes a streaming query based on keyspace ids. Use t
|
|||
|
||||
#### Request
|
||||
|
||||
StreamExecuteKeyspaceIdsRequest is the payload to StreamExecuteKeyspaceIds
|
||||
StreamExecuteKeyspaceIdsRequest is the payload to StreamExecuteKeyspaceIds.
|
||||
|
||||
##### Parameters
|
||||
|
||||
|
@ -280,19 +264,18 @@ StreamExecuteKeyspaceIds executes a streaming query based on keyspace ids. Use t
|
|||
| :-------- | :--------
|
||||
| <code>caller_id</code> <br>[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. |
|
||||
| <code>query</code> <br>[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables |
|
||||
| <code>keyspace</code> <br>string| |
|
||||
| <code>keyspace_ids</code> <br>list <bytes>| |
|
||||
| <code>keyspace</code> <br>string| keyspace to target the query to. |
|
||||
| <code>keyspace_ids</code> <br>list <bytes>| keyspace_ids contains the list of keyspace_ids affected by this query. Will be used to find the shards to send the query to. |
|
||||
| <code>tablet_type</code> <br>[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. |
|
||||
|
||||
#### Response
|
||||
|
||||
StreamExecuteKeyspaceIdsResponse is the returned value from StreamExecuteKeyspaceIds
|
||||
StreamExecuteKeyspaceIdsResponse is the returned value from StreamExecuteKeyspaceIds.
|
||||
|
||||
##### Properties
|
||||
|
||||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>error</code> <br>[vtrpc.RPCError](#vtrpc.rpcerror)| RPCError is an application-level error structure returned by VtTablet (and passed along by VtGate if appropriate). We use this so the clients don't have to parse the error messages, but instead can depend on the value of the code. |
|
||||
| <code>result</code> <br>[query.QueryResult](#query.queryresult)| QueryResult is returned by Execute and ExecuteStream. As returned by Execute, len(fields) is always equal to len(row) (for each row in rows). As returned by StreamExecute, the first QueryResult has the fields set, and subsequent QueryResult have rows set. And as Execute, len(QueryResult[0].fields) is always equal to len(row) (for each row in rows for each QueryResult in QueryResult[1:]). |
|
||||
|
||||
##Transactions
|
||||
|
@ -302,7 +285,7 @@ Begin a transaction.
|
|||
|
||||
#### Request
|
||||
|
||||
BeginRequest is the payload to Begin
|
||||
BeginRequest is the payload to Begin.
|
||||
|
||||
##### Parameters
|
||||
|
||||
|
@ -312,13 +295,12 @@ Begin a transaction.
|
|||
|
||||
#### Response
|
||||
|
||||
BeginResponse is the returned value from Begin
|
||||
BeginResponse is the returned value from Begin.
|
||||
|
||||
##### Properties
|
||||
|
||||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>error</code> <br>[vtrpc.RPCError](#vtrpc.rpcerror)| RPCError is an application-level error structure returned by VtTablet (and passed along by VtGate if appropriate). We use this so the clients don't have to parse the error messages, but instead can depend on the value of the code. |
|
||||
| <code>session</code> <br>[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. |
|
||||
|
||||
### Commit
|
||||
|
@ -327,7 +309,7 @@ Commit a transaction.
|
|||
|
||||
#### Request
|
||||
|
||||
CommitRequest is the payload to Commit
|
||||
CommitRequest is the payload to Commit.
|
||||
|
||||
##### Parameters
|
||||
|
||||
|
@ -338,13 +320,12 @@ Commit a transaction.
|
|||
|
||||
#### Response
|
||||
|
||||
CommitResponse is the returned value from Commit
|
||||
CommitResponse is the returned value from Commit.
|
||||
|
||||
##### Properties
|
||||
|
||||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>error</code> <br>[vtrpc.RPCError](#vtrpc.rpcerror)| RPCError is an application-level error structure returned by VtTablet (and passed along by VtGate if appropriate). We use this so the clients don't have to parse the error messages, but instead can depend on the value of the code. |
|
||||
|
||||
### Rollback
|
||||
|
||||
|
@ -352,7 +333,7 @@ Rollback a transaction.
|
|||
|
||||
#### Request
|
||||
|
||||
RollbackRequest is the payload to Rollback
|
||||
RollbackRequest is the payload to Rollback.
|
||||
|
||||
##### Parameters
|
||||
|
||||
|
@ -363,13 +344,12 @@ Rollback a transaction.
|
|||
|
||||
#### Response
|
||||
|
||||
RollbackResponse is the returned value from Rollback
|
||||
RollbackResponse is the returned value from Rollback.
|
||||
|
||||
##### Properties
|
||||
|
||||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>error</code> <br>[vtrpc.RPCError](#vtrpc.rpcerror)| RPCError is an application-level error structure returned by VtTablet (and passed along by VtGate if appropriate). We use this so the clients don't have to parse the error messages, but instead can depend on the value of the code. |
|
||||
|
||||
##Custom Sharding
|
||||
### ExecuteBatchShards
|
||||
|
@ -388,11 +368,11 @@ ExecuteBatchShards executes the list of queries on the specified shards.
|
|||
| <code>session</code> <br>[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. |
|
||||
| <code>queries</code> <br>list <[BoundShardQuery](#boundshardquery)>| BoundShardQuery represents a single query request for the specified list of shards. This is used in a list for ExecuteBatchShardsRequest. |
|
||||
| <code>tablet_type</code> <br>[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. |
|
||||
| <code>as_transaction</code> <br>bool| |
|
||||
| <code>as_transaction</code> <br>bool| as_transaction will execute the queries in this batch in a single transaction per shard, created for this purpose. (this can be seen as adding a 'begin' before and 'commit' after the queries). Only makes sense if tablet_type is master. If set, the Session is ignored. |
|
||||
|
||||
#### Response
|
||||
|
||||
ExecuteBatchShardsResponse is the returned value from ExecuteBatchShards
|
||||
ExecuteBatchShardsResponse is the returned value from ExecuteBatchShards.
|
||||
|
||||
##### Properties
|
||||
|
||||
|
@ -408,7 +388,7 @@ ExecuteShards executes the query on the specified shards.
|
|||
|
||||
#### Request
|
||||
|
||||
ExecuteShardsRequest is the payload to ExecuteShards
|
||||
ExecuteShardsRequest is the payload to ExecuteShards.
|
||||
|
||||
##### Parameters
|
||||
|
||||
|
@ -417,14 +397,14 @@ ExecuteShards executes the query on the specified shards.
|
|||
| <code>caller_id</code> <br>[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. |
|
||||
| <code>session</code> <br>[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. |
|
||||
| <code>query</code> <br>[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables |
|
||||
| <code>keyspace</code> <br>string| |
|
||||
| <code>shards</code> <br>list <string>| |
|
||||
| <code>keyspace</code> <br>string| keyspace to target the query to. |
|
||||
| <code>shards</code> <br>list <string>| shards to target the query to. A DML can only target one shard. |
|
||||
| <code>tablet_type</code> <br>[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. |
|
||||
| <code>not_in_transaction</code> <br>bool| |
|
||||
| <code>not_in_transaction</code> <br>bool| not_in_transaction is deprecated and should not be used. |
|
||||
|
||||
#### Response
|
||||
|
||||
ExecuteShardsResponse is the returned value from ExecuteShards
|
||||
ExecuteShardsResponse is the returned value from ExecuteShards.
|
||||
|
||||
##### Properties
|
||||
|
||||
|
@ -440,7 +420,7 @@ StreamExecuteShards executes a streaming query based on shards. Use this method
|
|||
|
||||
#### Request
|
||||
|
||||
StreamExecuteShardsRequest is the payload to StreamExecuteShards
|
||||
StreamExecuteShardsRequest is the payload to StreamExecuteShards.
|
||||
|
||||
##### Parameters
|
||||
|
||||
|
@ -448,19 +428,18 @@ StreamExecuteShards executes a streaming query based on shards. Use this method
|
|||
| :-------- | :--------
|
||||
| <code>caller_id</code> <br>[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. |
|
||||
| <code>query</code> <br>[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables |
|
||||
| <code>keyspace</code> <br>string| |
|
||||
| <code>shards</code> <br>list <string>| |
|
||||
| <code>keyspace</code> <br>string| keyspace to target the query to. |
|
||||
| <code>shards</code> <br>list <string>| shards to target the query to. |
|
||||
| <code>tablet_type</code> <br>[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. |
|
||||
|
||||
#### Response
|
||||
|
||||
StreamExecuteShardsResponse is the returned value from StreamExecuteShards
|
||||
StreamExecuteShardsResponse is the returned value from StreamExecuteShards.
|
||||
|
||||
##### Properties
|
||||
|
||||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>error</code> <br>[vtrpc.RPCError](#vtrpc.rpcerror)| RPCError is an application-level error structure returned by VtTablet (and passed along by VtGate if appropriate). We use this so the clients don't have to parse the error messages, but instead can depend on the value of the code. |
|
||||
| <code>result</code> <br>[query.QueryResult](#query.queryresult)| QueryResult is returned by Execute and ExecuteStream. As returned by Execute, len(fields) is always equal to len(row) (for each row in rows). As returned by StreamExecute, the first QueryResult has the fields set, and subsequent QueryResult have rows set. And as Execute, len(QueryResult[0].fields) is always equal to len(row) (for each row in rows for each QueryResult in QueryResult[1:]). |
|
||||
|
||||
##Map Reduce
|
||||
|
@ -470,27 +449,30 @@ Split a query into non-overlapping sub queries
|
|||
|
||||
#### Request
|
||||
|
||||
SplitQueryRequest is the payload to SplitQuery
|
||||
SplitQueryRequest is the payload to SplitQuery. SplitQuery takes a "SELECT" query and generates a list of queries called "query-parts". Each query-part consists of the original query with an added WHERE clause that restricts the query-part to operate only on rows whose values in the the columns listed in the "split_column" field of the request (see below) are in a particular range. It is guaranteed that the set of rows obtained from executing each query-part on a database snapshot and merging (without deduping) the results is equal to the set of rows obtained from executing the original query on the same snapshot with the rows containing NULL values in any of the split_column's excluded. This is typically called by the MapReduce master when reading from Vitess. There it's desirable that the sets of rows returned by the query-parts have roughly the same size.
|
||||
|
||||
##### Parameters
|
||||
|
||||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>caller_id</code> <br>[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. |
|
||||
| <code>keyspace</code> <br>string| |
|
||||
| <code>keyspace</code> <br>string| keyspace to target the query to. |
|
||||
| <code>query</code> <br>[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables |
|
||||
| <code>split_column</code> <br>string| |
|
||||
| <code>split_count</code> <br>int64| |
|
||||
| <code>split_column</code> <br>list <string>| Each generated query-part will be restricted to rows whose values in the columns listed in this field are in a particular range. The list of columns named here must be a prefix of the list of columns defining some index or primary key of the table referenced in 'query'. For many tables using the primary key columns (in order) is sufficient and this is the default if this field is omitted. See the comment on the 'algorithm' field for more restrictions and information. |
|
||||
| <code>split_count</code> <br>int64| You can specify either an estimate of the number of query-parts to generate or an estimate of the number of rows each query-part should return. Thus, exactly one of split_count or num_rows_per_query_part should be nonzero. The non-given parameter is calculated from the given parameter using the formula: split_count * num_rows_per_query_pary = table_size, where table_size is an approximation of the number of rows in the table. Note that if "split_count" is given it is regarded as an estimate. The number of query-parts returned may differ slightly (in particular, if it's not a whole multiple of the number of vitess shards). |
|
||||
| <code>num_rows_per_query_part</code> <br>int64| |
|
||||
| <code>algorithm</code> <br>query.SplitQueryRequest.Algorithm| The algorithm to use to split the query. The split algorithm is performed on each database shard in parallel. The lists of query-parts generated by the shards are merged and returned to the caller. Two algorithms are supported: EQUAL_SPLITS If this algorithm is selected then only the first 'split_column' given is used (or the first primary key column if the 'split_column' field is empty). In the rest of this algorithm's description, we refer to this column as "the split column". The split column must have numeric type (integral or floating point). The algorithm works by taking the interval [min, max], where min and max are the minimum and maximum values of the split column in the table-shard, respectively, and partitioning it into 'split_count' sub-intervals of equal size. The added WHERE clause of each query-part restricts that part to rows whose value in the split column belongs to a particular sub-interval. This is fast, but requires that the distribution of values of the split column be uniform in [min, max] for the number of rows returned by each query part to be roughly the same. FULL_SCAN If this algorithm is used then the split_column must be the primary key columns (in order). This algorithm performs a full-scan of the table-shard referenced in 'query' to get "boundary" rows that are num_rows_per_query_part apart when the table is ordered by the columns listed in 'split_column'. It then restricts each query-part to the rows located between two successive boundary rows. This algorithm supports multiple split_column's of any type, but is slower than EQUAL_SPLITS. |
|
||||
| <code>use_split_query_v2</code> <br>bool| Whether to use the new split-query code that supports multiple split-columns and the FULL_SCAN algorithm. This is a temporary field which aids in the migration of SplitQuery to the new code. to the SplitQuery version 2. |
|
||||
|
||||
#### Response
|
||||
|
||||
SplitQueryResponse is the returned value from SplitQuery
|
||||
SplitQueryResponse is the returned value from SplitQuery.
|
||||
|
||||
##### Properties
|
||||
|
||||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>splits</code> <br>list <[Part](#splitqueryresponse.part)>| |
|
||||
| <code>splits</code> <br>list <[Part](#splitqueryresponse.part)>| splits contains the queries to run to fetch the entire data set. |
|
||||
|
||||
#### Messages
|
||||
|
||||
|
@ -500,7 +482,7 @@ Split a query into non-overlapping sub queries
|
|||
|
||||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>keyspace</code> <br>string| |
|
||||
| <code>keyspace</code> <br>string| keyspace to target the query to. |
|
||||
| <code>key_ranges</code> <br>list <[topodata.KeyRange](#topodata.keyrange)>| KeyRange describes a range of sharding keys, when range-based sharding is used. |
|
||||
|
||||
##### SplitQueryResponse.Part
|
||||
|
@ -510,9 +492,9 @@ Split a query into non-overlapping sub queries
|
|||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>query</code> <br>[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables |
|
||||
| <code>key_range_part</code> <br>[KeyRangePart](#splitqueryresponse.keyrangepart)| |
|
||||
| <code>shard_part</code> <br>[ShardPart](#splitqueryresponse.shardpart)| |
|
||||
| <code>size</code> <br>int64| |
|
||||
| <code>key_range_part</code> <br>[KeyRangePart](#splitqueryresponse.keyrangepart)| key_range_part is set if the query should be executed by ExecuteKeyRanges. |
|
||||
| <code>shard_part</code> <br>[ShardPart](#splitqueryresponse.shardpart)| shard_part is set if the query should be executed by ExecuteShards. |
|
||||
| <code>size</code> <br>int64| size is the approximate number of rows this query will return. |
|
||||
|
||||
##### SplitQueryResponse.ShardPart
|
||||
|
||||
|
@ -520,8 +502,8 @@ Split a query into non-overlapping sub queries
|
|||
|
||||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>keyspace</code> <br>string| |
|
||||
| <code>shards</code> <br>list <string>| |
|
||||
| <code>keyspace</code> <br>string| keyspace to target the query to. |
|
||||
| <code>shards</code> <br>list <string>| shards to target the query to. |
|
||||
|
||||
##Topology
|
||||
### GetSrvKeyspace
|
||||
|
@ -530,17 +512,17 @@ GetSrvKeyspace returns a SrvKeyspace object (as seen by this vtgate). This metho
|
|||
|
||||
#### Request
|
||||
|
||||
GetSrvKeyspaceRequest is the payload to GetSrvKeyspace
|
||||
GetSrvKeyspaceRequest is the payload to GetSrvKeyspace.
|
||||
|
||||
##### Parameters
|
||||
|
||||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>keyspace</code> <br>string| |
|
||||
| <code>keyspace</code> <br>string| keyspace name to fetch. |
|
||||
|
||||
#### Response
|
||||
|
||||
GetSrvKeyspaceResponse is the returned value from GetSrvKeyspace
|
||||
GetSrvKeyspaceResponse is the returned value from GetSrvKeyspace.
|
||||
|
||||
##### Properties
|
||||
|
||||
|
@ -555,7 +537,7 @@ Execute tries to route the query to the right shard. It depends on the query and
|
|||
|
||||
#### Request
|
||||
|
||||
ExecuteRequest is the payload to Execute
|
||||
ExecuteRequest is the payload to Execute.
|
||||
|
||||
##### Parameters
|
||||
|
||||
|
@ -565,11 +547,12 @@ Execute tries to route the query to the right shard. It depends on the query and
|
|||
| <code>session</code> <br>[Session](#session)| Session objects are session cookies and are invalidated on use. Query results will contain updated session values. Their content should be opaque to the user. |
|
||||
| <code>query</code> <br>[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables |
|
||||
| <code>tablet_type</code> <br>[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. |
|
||||
| <code>not_in_transaction</code> <br>bool| |
|
||||
| <code>not_in_transaction</code> <br>bool| not_in_transaction is deprecated and should not be used. |
|
||||
| <code>keyspace</code> <br>string| keyspace to target the query to. |
|
||||
|
||||
#### Response
|
||||
|
||||
ExecuteResponse is the returned value from Execute
|
||||
ExecuteResponse is the returned value from Execute.
|
||||
|
||||
##### Properties
|
||||
|
||||
|
@ -585,7 +568,7 @@ StreamExecute executes a streaming query based on shards. It depends on the quer
|
|||
|
||||
#### Request
|
||||
|
||||
StreamExecuteRequest is the payload to StreamExecute
|
||||
StreamExecuteRequest is the payload to StreamExecute.
|
||||
|
||||
##### Parameters
|
||||
|
||||
|
@ -594,20 +577,56 @@ StreamExecute executes a streaming query based on shards. It depends on the quer
|
|||
| <code>caller_id</code> <br>[vtrpc.CallerID](#vtrpc.callerid)| CallerID is passed along RPCs to identify the originating client for a request. It is not meant to be secure, but only informational. The client can put whatever info they want in these fields, and they will be trusted by the servers. The fields will just be used for logging purposes, and to easily find a client. VtGate propagates it to VtTablet, and VtTablet may use this information for monitoring purposes, to display on dashboards, or for blacklisting purposes. |
|
||||
| <code>query</code> <br>[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables |
|
||||
| <code>tablet_type</code> <br>[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. |
|
||||
| <code>keyspace</code> <br>string| keyspace to target the query to. |
|
||||
|
||||
#### Response
|
||||
|
||||
StreamExecuteResponse is the returned value from StreamExecute
|
||||
StreamExecuteResponse is the returned value from StreamExecute.
|
||||
|
||||
##### Properties
|
||||
|
||||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>error</code> <br>[vtrpc.RPCError](#vtrpc.rpcerror)| RPCError is an application-level error structure returned by VtTablet (and passed along by VtGate if appropriate). We use this so the clients don't have to parse the error messages, but instead can depend on the value of the code. |
|
||||
| <code>result</code> <br>[query.QueryResult](#query.queryresult)| QueryResult is returned by Execute and ExecuteStream. As returned by Execute, len(fields) is always equal to len(row) (for each row in rows). As returned by StreamExecute, the first QueryResult has the fields set, and subsequent QueryResult have rows set. And as Execute, len(QueryResult[0].fields) is always equal to len(row) (for each row in rows for each QueryResult in QueryResult[1:]). |
|
||||
|
||||
## Enums
|
||||
|
||||
### query.Type
|
||||
|
||||
Type defines the various supported data types in bind vars and query results.
|
||||
|
||||
| Name |Value |Description |
|
||||
| :-------- | :-------- | :--------
|
||||
| <code>NULL_TYPE</code> | <code>0</code> | NULL_TYPE specifies a NULL type. |
|
||||
| <code>INT8</code> | <code>257</code> | INT8 specifies a TINYINT type. Properties: 1, IsNumber. |
|
||||
| <code>UINT8</code> | <code>770</code> | UINT8 specifies a TINYINT UNSIGNED type. Properties: 2, IsNumber, IsUnsigned. |
|
||||
| <code>INT16</code> | <code>259</code> | INT16 specifies a SMALLINT type. Properties: 3, IsNumber. |
|
||||
| <code>UINT16</code> | <code>772</code> | UINT16 specifies a SMALLINT UNSIGNED type. Properties: 4, IsNumber, IsUnsigned. |
|
||||
| <code>INT24</code> | <code>261</code> | INT24 specifies a MEDIUMINT type. Properties: 5, IsNumber. |
|
||||
| <code>UINT24</code> | <code>774</code> | UINT24 specifies a MEDIUMINT UNSIGNED type. Properties: 6, IsNumber, IsUnsigned. |
|
||||
| <code>INT32</code> | <code>263</code> | INT32 specifies a INTEGER type. Properties: 7, IsNumber. |
|
||||
| <code>UINT32</code> | <code>776</code> | UINT32 specifies a INTEGER UNSIGNED type. Properties: 8, IsNumber, IsUnsigned. |
|
||||
| <code>INT64</code> | <code>265</code> | INT64 specifies a BIGINT type. Properties: 9, IsNumber. |
|
||||
| <code>UINT64</code> | <code>778</code> | UINT64 specifies a BIGINT UNSIGNED type. Properties: 10, IsNumber, IsUnsigned. |
|
||||
| <code>FLOAT32</code> | <code>1035</code> | FLOAT32 specifies a FLOAT type. Properties: 11, IsFloat. |
|
||||
| <code>FLOAT64</code> | <code>1036</code> | FLOAT64 specifies a DOUBLE or REAL type. Properties: 12, IsFloat. |
|
||||
| <code>TIMESTAMP</code> | <code>2061</code> | TIMESTAMP specifies a TIMESTAMP type. Properties: 13, IsQuoted. |
|
||||
| <code>DATE</code> | <code>2062</code> | DATE specifies a DATE type. Properties: 14, IsQuoted. |
|
||||
| <code>TIME</code> | <code>2063</code> | TIME specifies a TIME type. Properties: 15, IsQuoted. |
|
||||
| <code>DATETIME</code> | <code>2064</code> | DATETIME specifies a DATETIME type. Properties: 16, IsQuoted. |
|
||||
| <code>YEAR</code> | <code>785</code> | YEAR specifies a YEAR type. Properties: 17, IsNumber, IsUnsigned. |
|
||||
| <code>DECIMAL</code> | <code>18</code> | DECIMAL specifies a DECIMAL or NUMERIC type. Properties: 18, None. |
|
||||
| <code>TEXT</code> | <code>6163</code> | TEXT specifies a TEXT type. Properties: 19, IsQuoted, IsText. |
|
||||
| <code>BLOB</code> | <code>10260</code> | BLOB specifies a BLOB type. Properties: 20, IsQuoted, IsBinary. |
|
||||
| <code>VARCHAR</code> | <code>6165</code> | VARCHAR specifies a VARCHAR type. Properties: 21, IsQuoted, IsText. |
|
||||
| <code>VARBINARY</code> | <code>10262</code> | VARBINARY specifies a VARBINARY type. Properties: 22, IsQuoted, IsBinary. |
|
||||
| <code>CHAR</code> | <code>6167</code> | CHAR specifies a CHAR type. Properties: 23, IsQuoted, IsText. |
|
||||
| <code>BINARY</code> | <code>10264</code> | BINARY specifies a BINARY type. Properties: 24, IsQuoted, IsBinary. |
|
||||
| <code>BIT</code> | <code>2073</code> | BIT specifies a BIT type. Properties: 25, IsQuoted. |
|
||||
| <code>ENUM</code> | <code>2074</code> | ENUM specifies an ENUM type. Properties: 26, IsQuoted. |
|
||||
| <code>SET</code> | <code>2075</code> | SET specifies a SET type. Properties: 27, IsQuoted. |
|
||||
| <code>TUPLE</code> | <code>28</code> | TUPLE specifies a a tuple. This cannot be returned in a QueryResult, but it can be sent as a bind var. Properties: 28, None. |
|
||||
|
||||
### topodata.KeyspaceIdType
|
||||
|
||||
KeyspaceIdType describes the type of the sharding key for a range-based sharded keyspace.
|
||||
|
@ -624,19 +643,16 @@ StreamExecute executes a streaming query based on shards. It depends on the quer
|
|||
|
||||
| Name |Value |Description |
|
||||
| :-------- | :-------- | :--------
|
||||
| <code>UNKNOWN</code> | <code>0</code> | |
|
||||
| <code>IDLE</code> | <code>1</code> | |
|
||||
| <code>MASTER</code> | <code>2</code> | |
|
||||
| <code>REPLICA</code> | <code>3</code> | |
|
||||
| <code>RDONLY</code> | <code>4</code> | |
|
||||
| <code>BATCH</code> | <code>4</code> | |
|
||||
| <code>SPARE</code> | <code>5</code> | |
|
||||
| <code>EXPERIMENTAL</code> | <code>6</code> | |
|
||||
| <code>SCHEMA_UPGRADE</code> | <code>7</code> | |
|
||||
| <code>BACKUP</code> | <code>8</code> | |
|
||||
| <code>RESTORE</code> | <code>9</code> | |
|
||||
| <code>WORKER</code> | <code>10</code> | |
|
||||
| <code>SCRAP</code> | <code>11</code> | |
|
||||
| <code>UNKNOWN</code> | <code>0</code> | UNKNOWN is not a valid value. |
|
||||
| <code>MASTER</code> | <code>1</code> | MASTER is the master server for the shard. Only MASTER allows DMLs. |
|
||||
| <code>REPLICA</code> | <code>2</code> | REPLICA is a slave type. It is used to serve live traffic. A REPLICA can be promoted to MASTER. A demoted MASTER will go to REPLICA. |
|
||||
| <code>RDONLY</code> | <code>3</code> | RDONLY (old name) / BATCH (new name) is used to serve traffic for long-running jobs. It is a separate type from REPLICA so long-running queries don't affect web-like traffic. |
|
||||
| <code>BATCH</code> | <code>3</code> | |
|
||||
| <code>SPARE</code> | <code>4</code> | SPARE is a type of servers that cannot serve queries, but is available in case an extra server is needed. |
|
||||
| <code>EXPERIMENTAL</code> | <code>5</code> | EXPERIMENTAL is like SPARE, except it can serve queries. This type can be used for usages not planned by Vitess, like online export to another storage engine. |
|
||||
| <code>BACKUP</code> | <code>6</code> | BACKUP is the type a server goes to when taking a backup. No queries can be served in BACKUP mode. |
|
||||
| <code>RESTORE</code> | <code>7</code> | RESTORE is the type a server uses when restoring a backup, at startup time. No queries can be served in RESTORE mode. |
|
||||
| <code>WORKER</code> | <code>8</code> | WORKER is the type a server goes into when used by a vtworker process to perform an offline action. It is a serving type (as the vtworker processes may need queries to run). In this state, this tablet is dedicated to the vtworker process that uses it. |
|
||||
|
||||
### vtrpc.ErrorCode
|
||||
|
||||
|
@ -644,17 +660,17 @@ StreamExecute executes a streaming query based on shards. It depends on the quer
|
|||
|
||||
| Name |Value |Description |
|
||||
| :-------- | :-------- | :--------
|
||||
| <code>SUCCESS</code> | <code>0</code> | SUCCESS is returned from a successful call |
|
||||
| <code>CANCELLED</code> | <code>1</code> | CANCELLED means that the context was cancelled (and noticed in the app layer, as opposed to the RPC layer) |
|
||||
| <code>SUCCESS</code> | <code>0</code> | SUCCESS is returned from a successful call. |
|
||||
| <code>CANCELLED</code> | <code>1</code> | CANCELLED means that the context was cancelled (and noticed in the app layer, as opposed to the RPC layer). |
|
||||
| <code>UNKNOWN_ERROR</code> | <code>2</code> | UNKNOWN_ERROR includes: 1. MySQL error codes that we don't explicitly handle. 2. MySQL response that wasn't as expected. For example, we might expect a MySQL timestamp to be returned in a particular way, but it wasn't. 3. Anything else that doesn't fall into a different bucket. |
|
||||
| <code>BAD_INPUT</code> | <code>3</code> | BAD_INPUT is returned when an end-user either sends SQL that couldn't be parsed correctly, or tries a query that isn't supported by Vitess. |
|
||||
| <code>DEADLINE_EXCEEDED</code> | <code>4</code> | DEADLINE_EXCEEDED is returned when an action is taking longer than a given timeout. |
|
||||
| <code>INTEGRITY_ERROR</code> | <code>5</code> | INTEGRITY_ERROR is returned on integrity error from MySQL, usually due to duplicate primary keys |
|
||||
| <code>INTEGRITY_ERROR</code> | <code>5</code> | INTEGRITY_ERROR is returned on integrity error from MySQL, usually due to duplicate primary keys. |
|
||||
| <code>PERMISSION_DENIED</code> | <code>6</code> | PERMISSION_DENIED errors are returned when a user requests access to something that they don't have permissions for. |
|
||||
| <code>RESOURCE_EXHAUSTED</code> | <code>7</code> | RESOURCE_EXHAUSTED is returned when a query exceeds its quota in some dimension and can't be completed due to that. Queries that return RESOURCE_EXHAUSTED should not be retried, as it could be detrimental to the server's health. Examples of errors that will cause the RESOURCE_EXHAUSTED code: 1. TxPoolFull: this is retried server-side, and is only returned as an error if the server-side retries failed. 2. Query is killed due to it taking too long. |
|
||||
| <code>QUERY_NOT_SERVED</code> | <code>8</code> | QUERY_NOT_SERVED means that a query could not be served right now. Client can interpret it as: "the tablet that you sent this query to cannot serve the query right now, try a different tablet or try again later." This could be due to various reasons: QueryService is not serving, should not be serving, wrong shard, wrong tablet type, blacklisted table, etc. Clients that receive this error should usually retry the query, but after taking the appropriate steps to make sure that the query will get sent to the correct tablet. |
|
||||
| <code>NOT_IN_TX</code> | <code>9</code> | NOT_IN_TX means that we're not currently in a transaction, but we should be. |
|
||||
| <code>INTERNAL_ERROR</code> | <code>10</code> | INTERNAL_ERRORs are problems that only the server can fix, not the client. These errors are not due to a query itself, but rather due to the state of the system. Generally, we don't expect the errors to go away by themselves, but they may go away after human intervention. Examples of scenarios where INTERNAL_ERROR is returned: 1. Something is not configured correctly internally. 2. A necessary resource is not available, and we don't expect it to become available by itself. 3. A sanity check fails 4. Some other internal error occurs Clients should not retry immediately, as there is little chance of success. However, it's acceptable for retries to happen internally, for example to multiple backends, in case only a subset of backend are not functional. |
|
||||
| <code>INTERNAL_ERROR</code> | <code>10</code> | INTERNAL_ERRORs are problems that only the server can fix, not the client. These errors are not due to a query itself, but rather due to the state of the system. Generally, we don't expect the errors to go away by themselves, but they may go away after human intervention. Examples of scenarios where INTERNAL_ERROR is returned: 1. Something is not configured correctly internally. 2. A necessary resource is not available, and we don't expect it to become available by itself. 3. A sanity check fails. 4. Some other internal error occurs. Clients should not retry immediately, as there is little chance of success. However, it's acceptable for retries to happen internally, for example to multiple backends, in case only a subset of backend are not functional. |
|
||||
| <code>TRANSIENT_ERROR</code> | <code>11</code> | TRANSIENT_ERROR is used for when there is some error that we expect we can recover from automatically - often due to a resource limit temporarily being reached. Retrying this error, with an exponential backoff, should succeed. Clients should be able to successfully retry the query on the same backends. Examples of things that can trigger this error: 1. Query has been throttled 2. VtGate could have request backlog |
|
||||
| <code>UNAUTHENTICATED</code> | <code>12</code> | UNAUTHENTICATED errors are returned when a user requests access to something, and we're unable to verify the user's authentication. |
|
||||
|
||||
|
@ -669,8 +685,8 @@ BoundKeyspaceIdQuery represents a single query request for the specified list of
|
|||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>query</code> <br>[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables |
|
||||
| <code>keyspace</code> <br>string| |
|
||||
| <code>keyspace_ids</code> <br>list <bytes>| |
|
||||
| <code>keyspace</code> <br>string| keyspace to target the query to. |
|
||||
| <code>keyspace_ids</code> <br>list <bytes>| keyspace_ids contains the list of keyspace_ids affected by this query. Will be used to find the shards to send the query to. |
|
||||
|
||||
### BoundShardQuery
|
||||
|
||||
|
@ -681,8 +697,8 @@ BoundShardQuery represents a single query request for the specified list of shar
|
|||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>query</code> <br>[query.BoundQuery](#query.boundquery)| BoundQuery is a query with its bind variables |
|
||||
| <code>keyspace</code> <br>string| |
|
||||
| <code>shards</code> <br>list <string>| |
|
||||
| <code>keyspace</code> <br>string| keyspace to target the query to. |
|
||||
| <code>shards</code> <br>list <string>| shards to target the query to. A DML can only target one shard. |
|
||||
|
||||
### Session
|
||||
|
||||
|
@ -708,37 +724,15 @@ Session objects are session cookies and are invalidated on use. Query results wi
|
|||
|
||||
### query.BindVariable
|
||||
|
||||
BindVariable represents a single bind variable in a Query
|
||||
BindVariable represents a single bind variable in a Query.
|
||||
|
||||
#### Properties
|
||||
|
||||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>type</code> <br>[Type](#bindvariable.type)| |
|
||||
| <code>value_bytes</code> <br>bytes| Depending on type, only one value below is set. |
|
||||
| <code>value_int</code> <br>int64| |
|
||||
| <code>value_uint</code> <br>uint64| |
|
||||
| <code>value_float</code> <br>double| |
|
||||
| <code>value_bytes_list</code> <br>list <bytes>| |
|
||||
| <code>value_int_list</code> <br>list <int64>| |
|
||||
| <code>value_uint_list</code> <br>list <uint64>| |
|
||||
| <code>value_float_list</code> <br>list <double>| |
|
||||
|
||||
#### Enums
|
||||
|
||||
##### BindVariable.Type
|
||||
|
||||
| Name |Value |Description |
|
||||
| :-------- | :-------- | :--------
|
||||
| <code>TYPE_NULL</code> | <code>0</code> | |
|
||||
| <code>TYPE_BYTES</code> | <code>1</code> | |
|
||||
| <code>TYPE_INT</code> | <code>2</code> | |
|
||||
| <code>TYPE_UINT</code> | <code>3</code> | |
|
||||
| <code>TYPE_FLOAT</code> | <code>4</code> | |
|
||||
| <code>TYPE_BYTES_LIST</code> | <code>5</code> | |
|
||||
| <code>TYPE_INT_LIST</code> | <code>6</code> | |
|
||||
| <code>TYPE_UINT_LIST</code> | <code>7</code> | |
|
||||
| <code>TYPE_FLOAT_LIST</code> | <code>8</code> | |
|
||||
| <code>type</code> <br>[Type](#query.type)| |
|
||||
| <code>value</code> <br>bytes| |
|
||||
| <code>values</code> <br>list <[Value](#query.value)>| Value represents a typed value. |
|
||||
|
||||
### query.BoundQuery
|
||||
|
||||
|
@ -748,8 +742,8 @@ BoundQuery is a query with its bind variables
|
|||
|
||||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>sql</code> <br>bytes| |
|
||||
| <code>bind_variables</code> <br>map <string, [BindVariable](#query.bindvariable)>| |
|
||||
| <code>sql</code> <br>string| sql is the SQL query to execute |
|
||||
| <code>bind_variables</code> <br>map <string, [BindVariable](#query.bindvariable)>| bind_variables is a map of all bind variables to expand in the query |
|
||||
|
||||
### query.Field
|
||||
|
||||
|
@ -760,67 +754,7 @@ Field describes a single column returned by a query
|
|||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>name</code> <br>string| name of the field as returned by mysql C API |
|
||||
| <code>type</code> <br>[Type](#field.type)| |
|
||||
| <code>flags</code> <br>int64| flags is essentially a bitset<Flag>. |
|
||||
|
||||
#### Enums
|
||||
|
||||
##### Field.Flag
|
||||
|
||||
Flag contains the MySQL field flags bitset values e.g. to distinguish between signed and unsigned integer. These numbers should exactly match values defined in dist/mysql-5.1.52/include/mysql_com.h
|
||||
|
||||
| Name |Value |Description |
|
||||
| :-------- | :-------- | :--------
|
||||
| <code>VT_ZEROVALUE_FLAG</code> | <code>0</code> | ZEROVALUE_FLAG is not part of the MySQL specification and only used in unit tests. |
|
||||
| <code>VT_NOT_NULL_FLAG</code> | <code>1</code> | |
|
||||
| <code>VT_PRI_KEY_FLAG</code> | <code>2</code> | |
|
||||
| <code>VT_UNIQUE_KEY_FLAG</code> | <code>4</code> | |
|
||||
| <code>VT_MULTIPLE_KEY_FLAG</code> | <code>8</code> | |
|
||||
| <code>VT_BLOB_FLAG</code> | <code>16</code> | |
|
||||
| <code>VT_UNSIGNED_FLAG</code> | <code>32</code> | |
|
||||
| <code>VT_ZEROFILL_FLAG</code> | <code>64</code> | |
|
||||
| <code>VT_BINARY_FLAG</code> | <code>128</code> | |
|
||||
| <code>VT_ENUM_FLAG</code> | <code>256</code> | |
|
||||
| <code>VT_AUTO_INCREMENT_FLAG</code> | <code>512</code> | |
|
||||
| <code>VT_TIMESTAMP_FLAG</code> | <code>1024</code> | |
|
||||
| <code>VT_SET_FLAG</code> | <code>2048</code> | |
|
||||
| <code>VT_NO_DEFAULT_VALUE_FLAG</code> | <code>4096</code> | |
|
||||
| <code>VT_ON_UPDATE_NOW_FLAG</code> | <code>8192</code> | |
|
||||
| <code>VT_NUM_FLAG</code> | <code>32768</code> | |
|
||||
|
||||
##### Field.Type
|
||||
|
||||
Type follows enum_field_types from mysql.h.
|
||||
|
||||
| Name |Value |Description |
|
||||
| :-------- | :-------- | :--------
|
||||
| <code>TYPE_DECIMAL</code> | <code>0</code> | |
|
||||
| <code>TYPE_TINY</code> | <code>1</code> | |
|
||||
| <code>TYPE_SHORT</code> | <code>2</code> | |
|
||||
| <code>TYPE_LONG</code> | <code>3</code> | |
|
||||
| <code>TYPE_FLOAT</code> | <code>4</code> | |
|
||||
| <code>TYPE_DOUBLE</code> | <code>5</code> | |
|
||||
| <code>TYPE_NULL</code> | <code>6</code> | |
|
||||
| <code>TYPE_TIMESTAMP</code> | <code>7</code> | |
|
||||
| <code>TYPE_LONGLONG</code> | <code>8</code> | |
|
||||
| <code>TYPE_INT24</code> | <code>9</code> | |
|
||||
| <code>TYPE_DATE</code> | <code>10</code> | |
|
||||
| <code>TYPE_TIME</code> | <code>11</code> | |
|
||||
| <code>TYPE_DATETIME</code> | <code>12</code> | |
|
||||
| <code>TYPE_YEAR</code> | <code>13</code> | |
|
||||
| <code>TYPE_NEWDATE</code> | <code>14</code> | |
|
||||
| <code>TYPE_VARCHAR</code> | <code>15</code> | |
|
||||
| <code>TYPE_BIT</code> | <code>16</code> | |
|
||||
| <code>TYPE_NEWDECIMAL</code> | <code>246</code> | |
|
||||
| <code>TYPE_ENUM</code> | <code>247</code> | |
|
||||
| <code>TYPE_SET</code> | <code>248</code> | |
|
||||
| <code>TYPE_TINY_BLOB</code> | <code>249</code> | |
|
||||
| <code>TYPE_MEDIUM_BLOB</code> | <code>250</code> | |
|
||||
| <code>TYPE_LONG_BLOB</code> | <code>251</code> | |
|
||||
| <code>TYPE_BLOB</code> | <code>252</code> | |
|
||||
| <code>TYPE_VAR_STRING</code> | <code>253</code> | |
|
||||
| <code>TYPE_STRING</code> | <code>254</code> | |
|
||||
| <code>TYPE_GEOMETRY</code> | <code>255</code> | |
|
||||
| <code>type</code> <br>[Type](#query.type)| vitess-defined type. Conversion function is in sqltypes package. |
|
||||
|
||||
### query.QueryResult
|
||||
|
||||
|
@ -843,7 +777,8 @@ Row is a database row.
|
|||
|
||||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>values</code> <br>list <bytes>| |
|
||||
| <code>lengths</code> <br>list <sint64>| lengths contains the length of each value in values. A length of -1 means that the field is NULL. While reading values, you have to accummulate the length to know the offset where the next value begins in values. |
|
||||
| <code>values</code> <br>bytes| values contains a concatenation of all values in the row. |
|
||||
|
||||
### query.Target
|
||||
|
||||
|
@ -857,6 +792,17 @@ Target describes what the client expects the tablet is. If the tablet does not m
|
|||
| <code>shard</code> <br>string| |
|
||||
| <code>tablet_type</code> <br>[topodata.TabletType](#topodata.tablettype)| TabletType represents the type of a given tablet. |
|
||||
|
||||
### query.Value
|
||||
|
||||
Value represents a typed value.
|
||||
|
||||
#### Properties
|
||||
|
||||
| Name |Description |
|
||||
| :-------- | :--------
|
||||
| <code>type</code> <br>[Type](#query.type)| |
|
||||
| <code>value</code> <br>bytes| |
|
||||
|
||||
### topodata.KeyRange
|
||||
|
||||
KeyRange describes a range of sharding keys, when range-based sharding is used.
|
||||
|
|
|
@ -636,7 +636,7 @@ Starts a transaction on the provided server.
|
|||
| connect_timeout | Duration | Connection timeout for vttablet client |
|
||||
| keyspace | string | keyspace the tablet belongs to |
|
||||
| shard | string | shard the tablet belongs to |
|
||||
| tablet_type | string | tablet type we expect from the tablet (use unknown to use sessionId) |
|
||||
| tablet_type | string | tablet type we expect from the tablet |
|
||||
|
||||
|
||||
#### Arguments
|
||||
|
@ -669,7 +669,7 @@ Commits a transaction on the provided server.
|
|||
| connect_timeout | Duration | Connection timeout for vttablet client |
|
||||
| keyspace | string | keyspace the tablet belongs to |
|
||||
| shard | string | shard the tablet belongs to |
|
||||
| tablet_type | string | tablet type we expect from the tablet (use unknown to use sessionId) |
|
||||
| tablet_type | string | tablet type we expect from the tablet |
|
||||
|
||||
|
||||
#### Arguments
|
||||
|
@ -703,7 +703,7 @@ Executes the given query on the given tablet.
|
|||
| json | Boolean | Output JSON instead of human-readable table |
|
||||
| keyspace | string | keyspace the tablet belongs to |
|
||||
| shard | string | shard the tablet belongs to |
|
||||
| tablet_type | string | tablet type we expect from the tablet (use unknown to use sessionId) |
|
||||
| tablet_type | string | tablet type we expect from the tablet |
|
||||
| transaction_id | Int | transaction id to use, if inside a transaction. |
|
||||
|
||||
|
||||
|
@ -738,7 +738,7 @@ Rollbacks a transaction on the provided server.
|
|||
| connect_timeout | Duration | Connection timeout for vttablet client |
|
||||
| keyspace | string | keyspace the tablet belongs to |
|
||||
| shard | string | shard the tablet belongs to |
|
||||
| tablet_type | string | tablet type we expect from the tablet (use unknown to use sessionId) |
|
||||
| tablet_type | string | tablet type we expect from the tablet |
|
||||
|
||||
|
||||
#### Arguments
|
||||
|
@ -2000,35 +2000,19 @@ Reparent a tablet to the current master in the shard. This only works if the cur
|
|||
|
||||
### RunHealthCheck
|
||||
|
||||
Runs a health check on a remote tablet with the specified target type.
|
||||
Runs a health check on a remote tablet.
|
||||
|
||||
#### Example
|
||||
|
||||
<pre class="command-example">RunHealthCheck <tablet alias> <target tablet type></pre>
|
||||
<pre class="command-example">RunHealthCheck <tablet alias></pre>
|
||||
|
||||
#### Arguments
|
||||
|
||||
* <code><tablet alias></code> – Required. A Tablet Alias uniquely identifies a vttablet. The argument value is in the format <code><cell name>-<uid></code>.
|
||||
* <code><target tablet type></code> – Required. The vttablet's role. Valid values are:
|
||||
|
||||
* <code>backup</code> – A slaved copy of data that is offline to queries other than for backup purposes
|
||||
* <code>batch</code> – A slaved copy of data for OLAP load patterns (typically for MapReduce jobs)
|
||||
* <code>experimental</code> – A slaved copy of data that is ready but not serving query traffic. The value indicates a special characteristic of the tablet that indicates the tablet should not be considered a potential master. Vitess also does not worry about lag for experimental tablets when reparenting.
|
||||
* <code>master</code> – A primary copy of data
|
||||
* <code>rdonly</code> – A slaved copy of data for OLAP load patterns
|
||||
* <code>replica</code> – A slaved copy of data ready to be promoted to master
|
||||
* <code>restore</code> – A tablet that is restoring from a snapshot. Typically, this happens at tablet startup, then it goes to its right state.
|
||||
* <code>schema_apply</code> – A slaved copy of data that had been serving query traffic but that is now applying a schema change. Following the change, the tablet will revert to its serving type.
|
||||
* <code>snapshot_source</code> – A slaved copy of data where mysqld is <b>not</b> running and where Vitess is serving data files to clone slaves. Use this command to enter this mode: <pre>vtctl Snapshot -server-mode ...</pre> Use this command to exit this mode: <pre>vtctl SnapshotSourceEnd ...</pre>
|
||||
* <code>spare</code> – A slaved copy of data that is ready but not serving query traffic. The data could be a potential master tablet.
|
||||
* <code>worker</code> – A tablet that is in use by a vtworker process. The tablet is likely lagging in replication.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Errors
|
||||
|
||||
* The <code><tablet alias></code> and <code><target tablet type></code> arguments are required for the <code><RunHealthCheck></code> command. This error occurs if the command is not called with exactly 2 arguments.
|
||||
* The <code><tablet alias></code> argument is required for the <code><RunHealthCheck></code> command. This error occurs if the command is not called with exactly one argument.
|
||||
|
||||
|
||||
### SetReadOnly
|
||||
|
|
|
@ -21,6 +21,8 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-ins
|
|||
python-crypto \
|
||||
python-dev \
|
||||
python-mysqldb \
|
||||
ruby \
|
||||
ruby-dev \
|
||||
software-properties-common \
|
||||
virtualenv \
|
||||
unzip \
|
||||
|
@ -99,6 +101,12 @@ RUN cd /vt/src/github.com/youtube/vitess && \
|
|||
find php/vendor/grpc/grpc -mindepth 1 -maxdepth 1 ! -name src | xargs rm -rf && \
|
||||
find php/vendor/grpc/grpc/src -mindepth 1 -maxdepth 1 ! -name php | xargs rm -rf
|
||||
|
||||
# Install PHP protobuf compiler plugin
|
||||
RUN cd /vt/src/github.com/youtube/vitess/php/vendor/datto/protobuf-php && \
|
||||
gem install rake ronn && \
|
||||
rake pear:package version=1.0 && \
|
||||
pear install Protobuf-1.0.tgz
|
||||
|
||||
# Create vitess user
|
||||
RUN groupadd -r vitess && useradd -r -g vitess vitess && \
|
||||
mkdir -p /vt/vtdataroot /home/vitess && \
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
FROM vitess/bootstrap:mariadb
|
||||
|
||||
USER root
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
|
||||
rubygems \
|
||||
ruby-dev
|
||||
RUN gem install rake ronn
|
||||
|
||||
# copy proto files
|
||||
COPY proto /vt/src/github.com/youtube/vitess/proto
|
||||
RUN chown -R vitess:vitess /vt/src/github.com/youtube/vitess/proto
|
||||
|
||||
# install necessary packages for proto compiler
|
||||
WORKDIR /vt
|
||||
RUN git clone https://github.com/stanley-cheung/Protobuf-PHP
|
||||
WORKDIR /vt/Protobuf-PHP
|
||||
RUN rake pear:package version=1.0
|
||||
RUN pear install Protobuf-1.0.tgz
|
||||
|
||||
# Bootstrap Vitess
|
||||
WORKDIR /vt/src/github.com/youtube/vitess
|
||||
USER vitess
|
||||
ENV MYSQL_FLAVOR MariaDB
|
||||
|
|
@ -29,6 +29,7 @@ spec:
|
|||
-cell {{cell}}
|
||||
-tablet_protocol grpc
|
||||
-tablet_manager_protocol grpc
|
||||
-use_v3_resharding_mode
|
||||
{{vtworker_command}}" vitess
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// client.go is a sample for using the Vitess Go SQL driver with an unsharded keyspace.
|
||||
// client.go is a sample for using the Vitess Go SQL driver.
|
||||
//
|
||||
// Before running this, start up a local example cluster as described in the
|
||||
// README.md file.
|
||||
|
@ -12,6 +12,7 @@ package main
|
|||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
|
@ -24,50 +25,54 @@ var (
|
|||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
keyspace := "test_keyspace"
|
||||
shard := "0"
|
||||
timeout := 10 * time.Second
|
||||
|
||||
// Connect to vtgate.
|
||||
db, err := vitessdriver.OpenShard(*server, keyspace, shard, "master", timeout)
|
||||
db, err := vitessdriver.Open(*server, "master", timeout)
|
||||
if err != nil {
|
||||
fmt.Printf("client error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Insert something.
|
||||
// Insert some messages on random pages.
|
||||
fmt.Println("Inserting into master...")
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
fmt.Printf("begin failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if _, err := tx.Exec("INSERT INTO test_table (msg) VALUES (?)", "V is for speed"); err != nil {
|
||||
fmt.Printf("exec failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
fmt.Printf("commit failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
for i := 0; i < 3; i++ {
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
fmt.Printf("begin failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
page := rand.Intn(100) + 1
|
||||
timeCreated := time.Now().UnixNano()
|
||||
if _, err := tx.Exec("INSERT INTO messages (page,time_created_ns,message) VALUES (?,?,?)",
|
||||
page, timeCreated, "V is for speed"); err != nil {
|
||||
fmt.Printf("exec failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
fmt.Printf("commit failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Read it back from the master.
|
||||
fmt.Println("Reading from master...")
|
||||
rows, err := db.Query("SELECT id, msg FROM test_table")
|
||||
rows, err := db.Query("SELECT page, time_created_ns, message FROM messages")
|
||||
if err != nil {
|
||||
fmt.Printf("query failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
for rows.Next() {
|
||||
var id int
|
||||
var page, timeCreated uint64
|
||||
var msg string
|
||||
if err := rows.Scan(&id, &msg); err != nil {
|
||||
if err := rows.Scan(&page, &timeCreated, &msg); err != nil {
|
||||
fmt.Printf("scan failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("(%v, %q)\n", id, msg)
|
||||
fmt.Printf("(%v, %v, %q)\n", page, timeCreated, msg)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
fmt.Printf("row iteration failed: %v\n", err)
|
||||
|
@ -78,26 +83,26 @@ func main() {
|
|||
// Note that this may be behind master due to replication lag.
|
||||
fmt.Println("Reading from replica...")
|
||||
|
||||
dbr, err := vitessdriver.OpenShard(*server, keyspace, shard, "replica", timeout)
|
||||
dbr, err := vitessdriver.Open(*server, "replica", timeout)
|
||||
if err != nil {
|
||||
fmt.Printf("client error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer dbr.Close()
|
||||
|
||||
rows, err = dbr.Query("SELECT id, msg FROM test_table")
|
||||
rows, err = dbr.Query("SELECT page, time_created_ns, message FROM messages")
|
||||
if err != nil {
|
||||
fmt.Printf("query failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
for rows.Next() {
|
||||
var id int
|
||||
var page, timeCreated uint64
|
||||
var msg string
|
||||
if err := rows.Scan(&id, &msg); err != nil {
|
||||
if err := rows.Scan(&page, &timeCreated, &msg); err != nil {
|
||||
fmt.Printf("scan failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("(%v, %q)\n", id, msg)
|
||||
fmt.Printf("(%v, %v, %q)\n", page, timeCreated, msg)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
fmt.Printf("row iteration failed: %v\n", err)
|
||||
|
|
|
@ -1,11 +1,16 @@
|
|||
<?php
|
||||
|
||||
/*
|
||||
* This is a sample for using the PHP Vitess client with an unsharded keyspace.
|
||||
* This is a sample for using the low-level PHP Vitess client.
|
||||
* For a sample of using the PDO wrapper, see client_pdo.php.
|
||||
*
|
||||
* Before running this, start up a local example cluster as described in the
|
||||
* README.md file.
|
||||
*
|
||||
* You will also need to install the gRPC PHP extension as described in
|
||||
* vitess/php/README.md, and download dependencies with:
|
||||
* vitess$ composer install
|
||||
*
|
||||
* Then run:
|
||||
* vitess/examples/local$ php client.php --server=localhost:15991
|
||||
*/
|
||||
|
@ -19,41 +24,40 @@ $opts = getopt('', array(
|
|||
'server:'
|
||||
));
|
||||
|
||||
$keyspace = 'test_keyspace';
|
||||
|
||||
// An unsharded keyspace is the same as custom sharding (0, 1, 2, ...),
|
||||
// but with only a single shard (0).
|
||||
$shards = array(
|
||||
'0'
|
||||
);
|
||||
|
||||
// Create a connection.
|
||||
$ctx = Context::getDefault();
|
||||
$conn = new VTGateConn(new \Vitess\Grpc\Client($opts['server'], [
|
||||
'credentials' => Grpc\ChannelCredentials::createInsecure()
|
||||
]));
|
||||
|
||||
// Insert something.
|
||||
// Insert some messages on random pages.
|
||||
echo "Inserting into master...\n";
|
||||
$tx = $conn->begin($ctx);
|
||||
$tx->executeShards($ctx, 'INSERT INTO test_table (msg) VALUES (:msg)', $keyspace, $shards, array(
|
||||
'msg' => 'V is for speed'
|
||||
));
|
||||
$tx->commit($ctx);
|
||||
for ($i = 0; $i < 3; $i ++) {
|
||||
$page = rand(1, 100);
|
||||
$time_created = sprintf('%.0f', microtime(true) * 1000000000);;
|
||||
|
||||
$tx = $conn->begin($ctx);
|
||||
$tx->execute($ctx, 'INSERT INTO messages (page,time_created_ns,message) VALUES (:page,:time_created_ns,:message)', array(
|
||||
'page' => $page,
|
||||
'time_created_ns' => $time_created,
|
||||
'message' => 'V is for speed'
|
||||
));
|
||||
$tx->commit($ctx);
|
||||
}
|
||||
|
||||
// Read it back from the master.
|
||||
echo "Reading from master...\n";
|
||||
$cursor = $conn->executeShards($ctx, 'SELECT * FROM test_table', $keyspace, $shards, array(), TabletType::MASTER);
|
||||
$cursor = $conn->execute($ctx, 'SELECT page, time_created_ns, message FROM messages', array(), TabletType::MASTER);
|
||||
while (($row = $cursor->next()) !== FALSE) {
|
||||
printf("(%s)\n", implode(', ', $row));
|
||||
printf("(%d, %d, %s)\n", $row[0], $row[1], $row[2]);
|
||||
}
|
||||
|
||||
// Read from a replica.
|
||||
// Note that this may be behind master due to replication lag.
|
||||
echo "Reading from replica...\n";
|
||||
$cursor = $conn->executeShards($ctx, 'SELECT * FROM test_table', $keyspace, $shards, array(), TabletType::REPLICA);
|
||||
$cursor = $conn->execute($ctx, 'SELECT page, time_created_ns, message FROM messages', array(), TabletType::REPLICA);
|
||||
while (($row = $cursor->next()) !== FALSE) {
|
||||
printf("(%s)\n", implode(', ', $row));
|
||||
printf("(%d, %d, %s)\n", $row[0], $row[1], $row[2]);
|
||||
}
|
||||
|
||||
$conn->close();
|
||||
|
|
|
@ -2,7 +2,9 @@
|
|||
|
||||
"""Sample Vitess client in Python.
|
||||
|
||||
This is a sample for using the Python Vitess client with an unsharded keyspace.
|
||||
This is a sample for using the Python Vitess client.
|
||||
It's a script that inserts some random messages on random pages of the
|
||||
guestbook sample app.
|
||||
|
||||
Before running this, start up a local example cluster as described in the
|
||||
README.md file.
|
||||
|
@ -12,17 +14,14 @@ vitess/examples/local$ ./client.sh
|
|||
"""
|
||||
|
||||
import argparse
|
||||
import random
|
||||
import time
|
||||
|
||||
from vtdb import keyrange
|
||||
from vtdb import keyrange_constants
|
||||
from vtdb import vtgate_client
|
||||
|
||||
# register the python gRPC client upon import
|
||||
from vtdb import grpc_vtgate_client # pylint: disable=unused-import
|
||||
|
||||
# Constants and params
|
||||
UNSHARDED = [keyrange.KeyRange(keyrange_constants.NON_PARTIAL_KEYRANGE)]
|
||||
|
||||
# Parse args
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--server', dest='server', default='localhost:15991')
|
||||
|
@ -33,20 +32,26 @@ args = parser.parse_args()
|
|||
conn = vtgate_client.connect('grpc', args.server, args.timeout)
|
||||
|
||||
try:
|
||||
# Insert something.
|
||||
# Insert some messages on random pages.
|
||||
print 'Inserting into master...'
|
||||
cursor = conn.cursor(
|
||||
tablet_type='master', keyspace='test_keyspace',
|
||||
keyranges=UNSHARDED, writable=True)
|
||||
cursor.begin()
|
||||
cursor.execute(
|
||||
'INSERT INTO test_table (msg) VALUES (:msg)',
|
||||
{'msg': 'V is for speed'})
|
||||
cursor.commit()
|
||||
cursor = conn.cursor(tablet_type='master', writable=True)
|
||||
for i in range(3):
|
||||
page = random.randint(1, 100)
|
||||
|
||||
cursor.begin()
|
||||
cursor.execute(
|
||||
'INSERT INTO messages (page, time_created_ns, message)'
|
||||
' VALUES (:page, :time_created_ns, :message)',
|
||||
{
|
||||
'page': page,
|
||||
'time_created_ns': int(time.time() * 1e9),
|
||||
'message': 'V is for speed',
|
||||
})
|
||||
cursor.commit()
|
||||
|
||||
# Read it back from the master.
|
||||
print 'Reading from master...'
|
||||
cursor.execute('SELECT * FROM test_table', {})
|
||||
cursor.execute('SELECT page, time_created_ns, message FROM messages', {})
|
||||
for row in cursor.fetchall():
|
||||
print row
|
||||
|
||||
|
@ -55,9 +60,8 @@ try:
|
|||
# Read from a replica.
|
||||
# Note that this may be behind master due to replication lag.
|
||||
print 'Reading from replica...'
|
||||
cursor = conn.cursor(
|
||||
tablet_type='replica', keyspace='test_keyspace', keyranges=UNSHARDED)
|
||||
cursor.execute('SELECT * FROM test_table', {})
|
||||
cursor = conn.cursor(tablet_type='replica')
|
||||
cursor.execute('SELECT page, time_created_ns, message FROM messages', {})
|
||||
for row in cursor.fetchall():
|
||||
print row
|
||||
cursor.close()
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This is a wrapper script that installs and runs the example
|
||||
# client for the low-level Java interface.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
|
||||
# We have to install the "example" module first because Maven cannot resolve
|
||||
# them when we run "exec:java". See also: http://stackoverflow.com/questions/11091311/maven-execjava-goal-on-a-multi-module-project
|
||||
# Install only "example". See also: http://stackoverflow.com/questions/1114026/maven-modules-building-a-single-specific-module
|
||||
mvn -f $script_root/../../java/pom.xml -pl example -am install -DskipTests
|
||||
mvn -f $script_root/../../java/example/pom.xml exec:java -Dexec.cleanupDaemonThreads=false -Dexec.mainClass="com.youtube.vitess.example.VitessClientExample" -Dexec.args="localhost:15991"
|
|
@ -0,0 +1,14 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This is a wrapper script that installs and runs the example
|
||||
# client for the JDBC interface.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
|
||||
# We have to install the "example" module first because Maven cannot resolve
|
||||
# them when we run "exec:java". See also: http://stackoverflow.com/questions/11091311/maven-execjava-goal-on-a-multi-module-project
|
||||
# Install only "example". See also: http://stackoverflow.com/questions/1114026/maven-modules-building-a-single-specific-module
|
||||
mvn -f $script_root/../../java/pom.xml -pl example -am install -DskipTests
|
||||
mvn -f $script_root/../../java/example/pom.xml exec:java -Dexec.cleanupDaemonThreads=false -Dexec.mainClass="com.youtube.vitess.example.VitessJDBCExample" -Dexec.args="localhost:15991"
|
|
@ -0,0 +1,46 @@
|
|||
<?php
|
||||
|
||||
/*
|
||||
* This is a sample for using the PDO Vitess client.
|
||||
*
|
||||
* Before running this, start up a local example cluster as described in the
|
||||
* README.md file.
|
||||
*
|
||||
* You'll also need to install the gRPC PHP extension as described in
|
||||
* php/README.md, and then download dependencies for the PDO wrapper:
|
||||
* vitess/php/pdo$ composer install
|
||||
*
|
||||
* Then run:
|
||||
* vitess/examples/local$ php client.php --server=localhost:15991
|
||||
*/
|
||||
require_once __DIR__ . '/../../php/pdo/vendor/autoload.php';
|
||||
|
||||
$opts = getopt('', array(
|
||||
'server:'
|
||||
));
|
||||
list($host, $port) = explode(':', $opts['server']);
|
||||
|
||||
$keyspace = 'test_keyspace';
|
||||
|
||||
// Create a connection.
|
||||
$pdo = new \VitessPdo\PDO("vitess:dbname={$keyspace};host={$host};port={$port}");
|
||||
$pdo->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION);
|
||||
|
||||
// Insert some messages on random pages.
|
||||
echo "Inserting into master...\n";
|
||||
for ($i = 0; $i < 3; $i ++) {
|
||||
$page = rand(1, 100);
|
||||
$time_created = sprintf('%.0f', microtime(true) * 1000000000);;
|
||||
|
||||
$stmt = $pdo->prepare('INSERT INTO messages (page,time_created_ns,message) VALUES (?,?,?)');
|
||||
$stmt->execute([$page, $time_created, 'V is for speed']);
|
||||
}
|
||||
|
||||
// Read from a replica.
|
||||
// Note that this may be behind master due to replication lag.
|
||||
echo "Reading from replica...\n";
|
||||
$stmt = $pdo->prepare('SELECT page, time_created_ns, message FROM messages');
|
||||
$stmt->execute();
|
||||
while (($row = $stmt->fetch()) !== FALSE) {
|
||||
printf("(%d, %d, %s)\n", $row[0], $row[1], $row[2]);
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
CREATE TABLE test_table (
|
||||
id BIGINT AUTO_INCREMENT,
|
||||
msg VARCHAR(250),
|
||||
PRIMARY KEY (id)
|
||||
CREATE TABLE messages (
|
||||
page BIGINT(20) UNSIGNED,
|
||||
time_created_ns BIGINT(20) UNSIGNED,
|
||||
message VARCHAR(10000),
|
||||
PRIMARY KEY (page, time_created_ns)
|
||||
) ENGINE=InnoDB
|
||||
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This is a convenience script to run vtctlclient against the local example.
|
||||
|
||||
exec vtctlclient -server localhost:15999 "$@"
|
|
@ -0,0 +1,10 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This is an example script that stops the mysqld and vttablet instances
|
||||
# created by sharded-vttablet-up.sh
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
|
||||
UID_BASE=200 $script_root/vttablet-down.sh "$@"
|
||||
UID_BASE=300 $script_root/vttablet-down.sh "$@"
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This is an example script that creates a sharded vttablet deployment.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
|
||||
# Shard -80 contains all entries whose keyspace ID has a first byte < 0x80.
|
||||
# See: http://vitess.io/overview/concepts.html#keyspace-id
|
||||
SHARD=-80 UID_BASE=200 $script_root/vttablet-up.sh "$@"
|
||||
|
||||
# Shard 80- contains all entries whose keyspace ID has a first byte >= 0x80.
|
||||
SHARD=80- UID_BASE=300 $script_root/vttablet-up.sh "$@"
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This is an example script that runs vtworker.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
echo "Starting vtworker..."
|
||||
exec $VTROOT/bin/vtworker \
|
||||
-cell test \
|
||||
-tablet_protocol grpc \
|
||||
-tablet_manager_protocol grpc \
|
||||
-log_dir $VTDATAROOT/tmp \
|
||||
-alsologtostderr \
|
||||
-use_v3_resharding_mode \
|
||||
"$@"
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
{
|
||||
"Sharded": true,
|
||||
"Vindexes": {
|
||||
"hash": {
|
||||
"Type": "hash"
|
||||
}
|
||||
},
|
||||
"Tables": {
|
||||
"messages": {
|
||||
"ColVindexes": [
|
||||
{
|
||||
"Col": "page",
|
||||
"Name": "hash"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
|
@ -3,17 +3,15 @@
|
|||
# This is an example script that stops the mysqld and vttablet instances
|
||||
# created by vttablet-up.sh
|
||||
|
||||
set -e
|
||||
|
||||
cell='test'
|
||||
uid_base=100
|
||||
uid_base=${UID_BASE:-'100'}
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
# Stop 3 vttablets by default.
|
||||
# Pass a list of UID indices on the command line to override.
|
||||
uids=${@:-'0 1 2'}
|
||||
uids=${@:-'0 1 2 3 4'}
|
||||
|
||||
wait_pids=''
|
||||
|
||||
|
|
|
@ -6,12 +6,11 @@ set -e
|
|||
|
||||
cell='test'
|
||||
keyspace='test_keyspace'
|
||||
shard=0
|
||||
uid_base=100
|
||||
tablet_type='replica'
|
||||
port_base=15100
|
||||
grpc_port_base=16100
|
||||
mysql_port_base=33100
|
||||
shard=${SHARD:-'0'}
|
||||
uid_base=${UID_BASE:-'100'}
|
||||
port_base=$[15000 + $uid_base]
|
||||
grpc_port_base=$[16000 + $uid_base]
|
||||
mysql_port_base=$[17000 + $uid_base]
|
||||
tablet_hostname=''
|
||||
|
||||
# Travis hostnames are too long for MySQL, so we use IP.
|
||||
|
@ -59,9 +58,9 @@ if [ -z "$memcached_path" ]; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
# Start 3 vttablets by default.
|
||||
# Start 5 vttablets by default.
|
||||
# Pass a list of UID indices on the command line to override.
|
||||
uids=${@:-'0 1 2'}
|
||||
uids=${@:-'0 1 2 3 4'}
|
||||
|
||||
# Start all mysqlds in background.
|
||||
for uid_index in $uids; do
|
||||
|
@ -94,6 +93,10 @@ for uid_index in $uids; do
|
|||
grpc_port=$[$grpc_port_base + $uid_index]
|
||||
printf -v alias '%s-%010d' $cell $uid
|
||||
printf -v tablet_dir 'vt_%010d' $uid
|
||||
tablet_type=replica
|
||||
if [[ $uid_index -gt 2 ]]; then
|
||||
tablet_type=rdonly
|
||||
fi
|
||||
|
||||
echo "Starting vttablet for $alias..."
|
||||
$VTROOT/bin/vttablet \
|
||||
|
|
|
@ -0,0 +1,92 @@
|
|||
// Copyright 2016, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cistring implements a case-insensitive string type.
|
||||
package cistring
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CIString is an immutable case-insensitive string.
|
||||
// It precomputes and stores the lower case version of the string
|
||||
// internally. This increases the initial memory cost of the object
|
||||
// but saves the CPU (and memory) cost of lowercasing as needed.
|
||||
// This should generally trade off favorably because there are many
|
||||
// situations where comparisons are performed in a loop against
|
||||
// the same object.
|
||||
type CIString struct {
|
||||
// This artifact prevents this struct from being compared
|
||||
// with itself. It consumes no space as long as it's not the
|
||||
// last field in the struct.
|
||||
_ [0]struct{ notComparable []byte }
|
||||
val, lowered string
|
||||
}
|
||||
|
||||
// New creates a new CIString.
|
||||
func New(str string) CIString {
|
||||
return CIString{
|
||||
val: str,
|
||||
lowered: strings.ToLower(str),
|
||||
}
|
||||
}
|
||||
|
||||
func (s CIString) String() string {
|
||||
return s.val
|
||||
}
|
||||
|
||||
// Original returns the case-preserved value of the string.
|
||||
func (s CIString) Original() string {
|
||||
return s.val
|
||||
}
|
||||
|
||||
// Lowered returns the lower-case value of the string.
|
||||
// This function should generally be used only for optimizing
|
||||
// comparisons.
|
||||
func (s CIString) Lowered() string {
|
||||
return s.lowered
|
||||
}
|
||||
|
||||
// Equal performs a case-insensitive compare. For comparing
|
||||
// in a loop, it's beneficial to build a CIString outside
|
||||
// the loop and using it to compare with other CIString
|
||||
// variables inside the loop.
|
||||
func (s CIString) Equal(in CIString) bool {
|
||||
return s.lowered == in.lowered
|
||||
}
|
||||
|
||||
// EqualString performs a case-insensitive compare with str.
|
||||
// If the input is already lower-cased, it's more efficient
|
||||
// to check if s.Lowered()==in.
|
||||
func (s CIString) EqualString(in string) bool {
|
||||
return s.lowered == strings.ToLower(in)
|
||||
}
|
||||
|
||||
// MarshalJSON marshals into JSON.
|
||||
func (s CIString) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(s.val)
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals from JSON.
|
||||
func (s *CIString) UnmarshalJSON(b []byte) error {
|
||||
var result string
|
||||
err := json.Unmarshal(b, &result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.val = result
|
||||
s.lowered = strings.ToLower(result)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ToStrings converts a []CIString to a case-preserved
|
||||
// []string.
|
||||
func ToStrings(in []CIString) []string {
|
||||
s := make([]string, len(in))
|
||||
for i := 0; i < len(in); i++ {
|
||||
s[i] = in[i].Original()
|
||||
}
|
||||
return s
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
// Copyright 2016, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cistring
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func TestCIString(t *testing.T) {
|
||||
str := New("Ab")
|
||||
if str.String() != "Ab" {
|
||||
t.Errorf("String=%s, want Ab", str.Original())
|
||||
}
|
||||
if str.Original() != "Ab" {
|
||||
t.Errorf("Val=%s, want Ab", str.Original())
|
||||
}
|
||||
if str.Lowered() != "ab" {
|
||||
t.Errorf("Val=%s, want ab", str.Lowered())
|
||||
}
|
||||
str2 := New("aB")
|
||||
if !str.Equal(str2) {
|
||||
t.Error("str.Equal(New(aB))=false, want true")
|
||||
}
|
||||
if !str.EqualString("ab") {
|
||||
t.Error("str.Equal(ab)=false, want true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCIStringMarshal(t *testing.T) {
|
||||
str := New("Ab")
|
||||
b, err := json.Marshal(str)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got := string(b)
|
||||
want := `"Ab"`
|
||||
if got != want {
|
||||
t.Errorf("json.Marshal()= %s, want %s", got, want)
|
||||
}
|
||||
var out CIString
|
||||
err = json.Unmarshal(b, &out)
|
||||
if !reflect.DeepEqual(out, str) {
|
||||
t.Errorf("Unmarshal: %v, want %v", out, str)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToStrings(t *testing.T) {
|
||||
in := []CIString{
|
||||
New("Ab"),
|
||||
New("aB"),
|
||||
}
|
||||
want := []string{"Ab", "aB"}
|
||||
got := ToStrings(in)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("ToStrings(in)=%+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
size := unsafe.Sizeof(New(""))
|
||||
want := 2 * unsafe.Sizeof("")
|
||||
if size != want {
|
||||
t.Errorf("Size of CIString: %d, want 32", want)
|
||||
}
|
||||
}
|
|
@ -557,13 +557,13 @@ func (itmc *internalTabletManagerClient) RefreshState(ctx context.Context, table
|
|||
})
|
||||
}
|
||||
|
||||
func (itmc *internalTabletManagerClient) RunHealthCheck(ctx context.Context, tablet *topo.TabletInfo, targetTabletType topodatapb.TabletType) error {
|
||||
func (itmc *internalTabletManagerClient) RunHealthCheck(ctx context.Context, tablet *topo.TabletInfo) error {
|
||||
t, ok := tabletMap[tablet.Tablet.Alias.Uid]
|
||||
if !ok {
|
||||
return fmt.Errorf("tmclient: cannot find tablet %v", tablet.Tablet.Alias.Uid)
|
||||
}
|
||||
return t.agent.RPCWrap(ctx, actionnode.TabletActionRunHealthCheck, nil, nil, func() error {
|
||||
t.agent.RunHealthCheck(ctx, targetTabletType)
|
||||
t.agent.RunHealthCheck(ctx)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
|
|
@ -46,8 +46,11 @@ var (
|
|||
{{if .BlacklistedTables}}
|
||||
BlacklistedTables: {{range .BlacklistedTables}}{{.}} {{end}}<br>
|
||||
{{end}}
|
||||
{{if .DisableQueryService}}
|
||||
Query Service disabled by TabletControl<br>
|
||||
{{if .DisallowQueryService}}
|
||||
Query Service disabled: {{.DisallowQueryService}}<br>
|
||||
{{end}}
|
||||
{{if .DisableUpdateStream}}
|
||||
Update Stream disabled<br>
|
||||
{{end}}
|
||||
</td>
|
||||
<td width="25%" border="">
|
||||
|
@ -173,22 +176,21 @@ var onStatusRegistered func()
|
|||
func addStatusParts(qsc tabletserver.Controller) {
|
||||
servenv.AddStatusPart("Tablet", tabletTemplate, func() interface{} {
|
||||
return map[string]interface{}{
|
||||
"Tablet": topo.NewTabletInfo(agent.Tablet(), -1),
|
||||
"BlacklistedTables": agent.BlacklistedTables(),
|
||||
"DisableQueryService": agent.DisableQueryService(),
|
||||
"Tablet": topo.NewTabletInfo(agent.Tablet(), -1),
|
||||
"BlacklistedTables": agent.BlacklistedTables(),
|
||||
"DisallowQueryService": agent.DisallowQueryService(),
|
||||
"DisableUpdateStream": !agent.EnableUpdateStream(),
|
||||
}
|
||||
})
|
||||
servenv.AddStatusFuncs(template.FuncMap{
|
||||
"github_com_youtube_vitess_health_html_name": healthHTMLName,
|
||||
})
|
||||
servenv.AddStatusPart("Health", healthTemplate, func() interface{} {
|
||||
return &healthStatus{
|
||||
Records: agent.History.Records(),
|
||||
Config: tabletmanager.ConfigHTML(),
|
||||
}
|
||||
})
|
||||
if agent.IsRunningHealthCheck() {
|
||||
servenv.AddStatusFuncs(template.FuncMap{
|
||||
"github_com_youtube_vitess_health_html_name": healthHTMLName,
|
||||
})
|
||||
servenv.AddStatusPart("Health", healthTemplate, func() interface{} {
|
||||
return &healthStatus{
|
||||
Records: agent.History.Records(),
|
||||
Config: tabletmanager.ConfigHTML(),
|
||||
}
|
||||
})
|
||||
}
|
||||
qsc.AddStatusPart()
|
||||
servenv.AddStatusPart("Binlog Player", binlogTemplate, func() interface{} {
|
||||
return agent.BinlogPlayerMap.Status()
|
||||
|
|
|
@ -1,20 +1,25 @@
|
|||
package health
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/youtube/vitess/go/vt/concurrency"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultAggregator is the global aggregator to use for real
|
||||
// programs. Use a custom one for tests.
|
||||
DefaultAggregator *Aggregator
|
||||
|
||||
// ErrSlaveNotRunning is returned by health plugins when replication
|
||||
// is not running and we can't figure out the replication delay.
|
||||
// Note everything else should be operational, and the underlying
|
||||
// MySQL instance should be capable of answering queries.
|
||||
ErrSlaveNotRunning = errors.New("slave is not running")
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -64,6 +69,12 @@ func NewAggregator() *Aggregator {
|
|||
}
|
||||
}
|
||||
|
||||
type singleResult struct {
|
||||
name string
|
||||
delay time.Duration
|
||||
err error
|
||||
}
|
||||
|
||||
// Report aggregates health statuses from all the reporters. If any
|
||||
// errors occur during the reporting, they will be logged, but only
|
||||
// the first error will be returned.
|
||||
|
@ -71,40 +82,40 @@ func NewAggregator() *Aggregator {
|
|||
// delays returned by the Reporter implementations (although typically
|
||||
// only one implementation will actually return a meaningful one).
|
||||
func (ag *Aggregator) Report(isSlaveType, shouldQueryServiceBeRunning bool) (time.Duration, error) {
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
rec concurrency.AllErrorRecorder
|
||||
)
|
||||
|
||||
results := make(chan time.Duration, len(ag.reporters))
|
||||
wg := sync.WaitGroup{}
|
||||
results := make([]singleResult, len(ag.reporters))
|
||||
index := 0
|
||||
ag.mu.Lock()
|
||||
for name, rep := range ag.reporters {
|
||||
wg.Add(1)
|
||||
go func(name string, rep Reporter) {
|
||||
go func(index int, name string, rep Reporter) {
|
||||
defer wg.Done()
|
||||
replicationDelay, err := rep.Report(isSlaveType, shouldQueryServiceBeRunning)
|
||||
if err != nil {
|
||||
rec.RecordError(fmt.Errorf("%v: %v", name, err))
|
||||
return
|
||||
}
|
||||
results <- replicationDelay
|
||||
}(name, rep)
|
||||
results[index].name = name
|
||||
results[index].delay, results[index].err = rep.Report(isSlaveType, shouldQueryServiceBeRunning)
|
||||
}(index, name, rep)
|
||||
index++
|
||||
}
|
||||
ag.mu.Unlock()
|
||||
wg.Wait()
|
||||
close(results)
|
||||
if err := rec.Error(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// merge and return the results
|
||||
var result time.Duration
|
||||
for replicationDelay := range results {
|
||||
if replicationDelay > result {
|
||||
result = replicationDelay
|
||||
var err error
|
||||
for _, s := range results {
|
||||
switch s.err {
|
||||
case ErrSlaveNotRunning:
|
||||
// Return the ErrSlaveNotRunning sentinel
|
||||
// value, only if there are no other errors.
|
||||
err = ErrSlaveNotRunning
|
||||
case nil:
|
||||
if s.delay > result {
|
||||
result = s.delay
|
||||
}
|
||||
default:
|
||||
return 0, fmt.Errorf("%v: %v", s.name, s.err)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Register registers rep with ag. Only keys specified in keys will be
|
||||
|
|
|
@ -8,18 +8,15 @@ import (
|
|||
|
||||
func TestReporters(t *testing.T) {
|
||||
|
||||
// two aggregators returning valid numbers
|
||||
ag := NewAggregator()
|
||||
|
||||
ag.Register("a", FunctionReporter(func(bool, bool) (time.Duration, error) {
|
||||
return 10 * time.Second, nil
|
||||
}))
|
||||
|
||||
ag.Register("b", FunctionReporter(func(bool, bool) (time.Duration, error) {
|
||||
return 5 * time.Second, nil
|
||||
}))
|
||||
|
||||
delay, err := ag.Report(true, true)
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -27,13 +24,27 @@ func TestReporters(t *testing.T) {
|
|||
t.Errorf("delay=%v, want 10s", delay)
|
||||
}
|
||||
|
||||
// three aggregators, third one returning an error
|
||||
cReturns := errors.New("e error")
|
||||
ag.Register("c", FunctionReporter(func(bool, bool) (time.Duration, error) {
|
||||
return 0, errors.New("e error")
|
||||
return 0, cReturns
|
||||
}))
|
||||
if _, err := ag.Report(true, false); err == nil {
|
||||
t.Errorf("ag.Run: expected error")
|
||||
} else {
|
||||
want := "c: e error"
|
||||
if got := err.Error(); got != want {
|
||||
t.Errorf("got wrong error: got '%v' expected '%v'", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// three aggregators, third one returning ErrSlaveNotRunning
|
||||
cReturns = ErrSlaveNotRunning
|
||||
if _, err := ag.Report(true, false); err != ErrSlaveNotRunning {
|
||||
t.Errorf("ag.Run: expected error: %v", err)
|
||||
}
|
||||
|
||||
// check name is good
|
||||
name := ag.HTMLName()
|
||||
if string(name) != "FunctionReporter + FunctionReporter + FunctionReporter" {
|
||||
t.Errorf("ag.HTMLName() returned: %v", name)
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package mysqlctl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"html/template"
|
||||
"time"
|
||||
|
||||
|
@ -10,7 +9,14 @@ import (
|
|||
|
||||
// mysqlReplicationLag implements health.Reporter
|
||||
type mysqlReplicationLag struct {
|
||||
mysqld *Mysqld
|
||||
// set at construction time
|
||||
mysqld MysqlDaemon
|
||||
now func() time.Time
|
||||
|
||||
// store the last time we successfully got the lag, so if we
|
||||
// can't get the lag any more, we can extrapolate.
|
||||
lastKnownValue time.Duration
|
||||
lastKnownTime time.Time
|
||||
}
|
||||
|
||||
// Report is part of the health.Reporter interface
|
||||
|
@ -21,12 +27,28 @@ func (mrl *mysqlReplicationLag) Report(isSlaveType, shouldQueryServiceBeRunning
|
|||
|
||||
slaveStatus, err := mrl.mysqld.SlaveStatus()
|
||||
if err != nil {
|
||||
// mysqld is not running. We can't report healthy.
|
||||
return 0, err
|
||||
}
|
||||
if !slaveStatus.SlaveRunning() {
|
||||
return 0, fmt.Errorf("Replication is not running")
|
||||
// mysqld is running, but slave is not replicating (most likely,
|
||||
// replication has been stopped). See if we can extrapolate.
|
||||
if mrl.lastKnownTime.IsZero() {
|
||||
// we can't.
|
||||
return 0, health.ErrSlaveNotRunning
|
||||
}
|
||||
|
||||
// we can extrapolate with the worst possible
|
||||
// value (that is we made no replication
|
||||
// progress since last time, and just fell more behind).
|
||||
elapsed := mrl.now().Sub(mrl.lastKnownTime)
|
||||
return elapsed + mrl.lastKnownValue, nil
|
||||
}
|
||||
return time.Duration(slaveStatus.SecondsBehindMaster) * time.Second, nil
|
||||
|
||||
// we got a real value, save it.
|
||||
mrl.lastKnownValue = time.Duration(slaveStatus.SecondsBehindMaster) * time.Second
|
||||
mrl.lastKnownTime = mrl.now()
|
||||
return mrl.lastKnownValue, nil
|
||||
}
|
||||
|
||||
// HTMLName is part of the health.Reporter interface
|
||||
|
@ -36,6 +58,9 @@ func (mrl *mysqlReplicationLag) HTMLName() template.HTML {
|
|||
|
||||
// MySQLReplicationLag lag returns a reporter that reports the MySQL
|
||||
// replication lag.
|
||||
func MySQLReplicationLag(mysqld *Mysqld) health.Reporter {
|
||||
return &mysqlReplicationLag{mysqld}
|
||||
func MySQLReplicationLag(mysqld MysqlDaemon) health.Reporter {
|
||||
return &mysqlReplicationLag{
|
||||
mysqld: mysqld,
|
||||
now: time.Now,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,91 @@
|
|||
package mysqlctl
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/youtube/vitess/go/vt/health"
|
||||
)
|
||||
|
||||
func TestBasicMySQLReplicationLag(t *testing.T) {
|
||||
mysqld := NewFakeMysqlDaemon(nil)
|
||||
mysqld.Replicating = true
|
||||
mysqld.SecondsBehindMaster = 10
|
||||
|
||||
lag := &mysqlReplicationLag{
|
||||
mysqld: mysqld,
|
||||
now: time.Now,
|
||||
}
|
||||
dur, err := lag.Report(true, true)
|
||||
if err != nil || dur != 10*time.Second {
|
||||
t.Fatalf("wrong Report result: %v %v", dur, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoKnownMySQLReplicationLag(t *testing.T) {
|
||||
mysqld := NewFakeMysqlDaemon(nil)
|
||||
mysqld.Replicating = false
|
||||
|
||||
lag := &mysqlReplicationLag{
|
||||
mysqld: mysqld,
|
||||
now: time.Now,
|
||||
}
|
||||
dur, err := lag.Report(true, true)
|
||||
if err != health.ErrSlaveNotRunning {
|
||||
t.Fatalf("wrong Report result: %v %v", dur, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtrapolatedMySQLReplicationLag(t *testing.T) {
|
||||
mysqld := NewFakeMysqlDaemon(nil)
|
||||
mysqld.Replicating = true
|
||||
mysqld.SecondsBehindMaster = 10
|
||||
|
||||
now := time.Now()
|
||||
lag := &mysqlReplicationLag{
|
||||
mysqld: mysqld,
|
||||
now: func() time.Time { return now },
|
||||
}
|
||||
|
||||
// seed the last known value with a good value
|
||||
dur, err := lag.Report(true, true)
|
||||
if err != nil || dur != 10*time.Second {
|
||||
t.Fatalf("wrong Report result: %v %v", dur, err)
|
||||
}
|
||||
|
||||
// now 20 seconds later, we're not replicating any more,
|
||||
// we should get 20 more seconds in lag
|
||||
now = now.Add(20 * time.Second)
|
||||
mysqld.Replicating = false
|
||||
dur, err = lag.Report(true, true)
|
||||
if err != nil || dur != 30*time.Second {
|
||||
t.Fatalf("wrong Report result: %v %v", dur, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoExtrapolatedMySQLReplicationLag(t *testing.T) {
|
||||
mysqld := NewFakeMysqlDaemon(nil)
|
||||
mysqld.Replicating = true
|
||||
mysqld.SecondsBehindMaster = 10
|
||||
|
||||
now := time.Now()
|
||||
lag := &mysqlReplicationLag{
|
||||
mysqld: mysqld,
|
||||
now: func() time.Time { return now },
|
||||
}
|
||||
|
||||
// seed the last known value with a good value
|
||||
dur, err := lag.Report(true, true)
|
||||
if err != nil || dur != 10*time.Second {
|
||||
t.Fatalf("wrong Report result: %v %v", dur, err)
|
||||
}
|
||||
|
||||
// now 20 seconds later, mysqld is down
|
||||
now = now.Add(20 * time.Second)
|
||||
mysqld.SlaveStatusError = errors.New("mysql is down")
|
||||
dur, err = lag.Report(true, true)
|
||||
if err != mysqld.SlaveStatusError {
|
||||
t.Fatalf("wrong Report error: %v", err)
|
||||
}
|
||||
}
|
|
@ -121,12 +121,18 @@ type FakeMysqlDaemon struct {
|
|||
// and SlaveStatus
|
||||
CurrentMasterPosition replication.Position
|
||||
|
||||
// SlaveStatusError is used by SlaveStatus
|
||||
SlaveStatusError error
|
||||
|
||||
// CurrentMasterHost is returned by SlaveStatus
|
||||
CurrentMasterHost string
|
||||
|
||||
// CurrentMasterport is returned by SlaveStatus
|
||||
CurrentMasterPort int
|
||||
|
||||
// SecondsBehindMaster is returned by SlaveStatus
|
||||
SecondsBehindMaster uint
|
||||
|
||||
// ReadOnly is the current value of the flag
|
||||
ReadOnly bool
|
||||
|
||||
|
@ -253,12 +259,16 @@ func (fmd *FakeMysqlDaemon) GetMysqlPort() (int32, error) {
|
|||
|
||||
// SlaveStatus is part of the MysqlDaemon interface
|
||||
func (fmd *FakeMysqlDaemon) SlaveStatus() (replication.Status, error) {
|
||||
if fmd.SlaveStatusError != nil {
|
||||
return replication.Status{}, fmd.SlaveStatusError
|
||||
}
|
||||
return replication.Status{
|
||||
Position: fmd.CurrentMasterPosition,
|
||||
SlaveIORunning: fmd.Replicating,
|
||||
SlaveSQLRunning: fmd.Replicating,
|
||||
MasterHost: fmd.CurrentMasterHost,
|
||||
MasterPort: fmd.CurrentMasterPort,
|
||||
Position: fmd.CurrentMasterPosition,
|
||||
SecondsBehindMaster: fmd.SecondsBehindMaster,
|
||||
SlaveIORunning: fmd.Replicating,
|
||||
SlaveSQLRunning: fmd.Replicating,
|
||||
MasterHost: fmd.CurrentMasterHost,
|
||||
MasterPort: fmd.CurrentMasterPort,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -23,12 +23,16 @@ import (
|
|||
// underlying GTIDSet might use slices, which are not comparable. Using == in
|
||||
// those cases will result in a run-time panic.
|
||||
type Position struct {
|
||||
GTIDSet GTIDSet
|
||||
|
||||
// This is a zero byte compile-time check that no one is trying to
|
||||
// use == or != with Position. Without this, we won't know there's
|
||||
// a problem until the runtime panic.
|
||||
// a problem until the runtime panic. Note that this must not be
|
||||
// the last field of the struct, or else the Go compiler will add
|
||||
// padding to prevent pointers to this field from becoming invalid.
|
||||
_ [0]struct{ notComparable []byte }
|
||||
|
||||
// GTIDSet is the underlying GTID set. It must not be anonymous,
|
||||
// or else Position would itself also implement the GTIDSet interface.
|
||||
GTIDSet GTIDSet
|
||||
}
|
||||
|
||||
// Equal returns true if this position is equal to another.
|
||||
|
|
|
@ -28,8 +28,9 @@ const (
|
|||
// TableDefinitionGetColumn returns the index of a column inside a
|
||||
// TableDefinition.
|
||||
func TableDefinitionGetColumn(td *tabletmanagerdatapb.TableDefinition, name string) (index int, ok bool) {
|
||||
lowered := strings.ToLower(name)
|
||||
for i, n := range td.Columns {
|
||||
if name == n {
|
||||
if lowered == strings.ToLower(n) {
|
||||
return i, true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -417,7 +417,6 @@ func (*RefreshStateResponse) ProtoMessage() {}
|
|||
func (*RefreshStateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
|
||||
|
||||
type RunHealthCheckRequest struct {
|
||||
TabletType topodata.TabletType `protobuf:"varint,1,opt,name=tablet_type,json=tabletType,enum=topodata.TabletType" json:"tablet_type,omitempty"`
|
||||
}
|
||||
|
||||
func (m *RunHealthCheckRequest) Reset() { *m = RunHealthCheckRequest{} }
|
||||
|
@ -1201,127 +1200,127 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 1946 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x58, 0x5b, 0x6f, 0xdb, 0xc8,
|
||||
0x15, 0x86, 0x6c, 0xc7, 0x71, 0x0e, 0x25, 0x59, 0xa2, 0x6f, 0x8a, 0x17, 0x48, 0x1c, 0x66, 0xdb,
|
||||
0x4d, 0x53, 0xd4, 0xbb, 0xf1, 0x6e, 0x8b, 0x45, 0x17, 0x5b, 0xd4, 0xb1, 0x9d, 0x4d, 0xf6, 0x92,
|
||||
0x78, 0x99, 0x5b, 0xd1, 0x17, 0x62, 0x44, 0x8e, 0x25, 0x22, 0x14, 0xc9, 0xe5, 0x0c, 0x15, 0x0b,
|
||||
0x28, 0xfa, 0x2f, 0xfa, 0xd6, 0xb7, 0x02, 0xed, 0x43, 0xdf, 0xfa, 0x63, 0x5a, 0xf4, 0x97, 0xf4,
|
||||
0xa1, 0x2f, 0x3d, 0x73, 0x93, 0x86, 0x92, 0x1c, 0x2b, 0x69, 0x0a, 0xf4, 0x45, 0xe0, 0x7c, 0x73,
|
||||
0xe6, 0xdc, 0xe7, 0x9c, 0x33, 0x82, 0x1d, 0x4e, 0xba, 0x09, 0xe5, 0x03, 0x92, 0x92, 0x1e, 0x2d,
|
||||
0x22, 0xc2, 0xc9, 0x7e, 0x5e, 0x64, 0x3c, 0x73, 0xdb, 0x33, 0x1b, 0xbb, 0xce, 0x0f, 0x25, 0x2d,
|
||||
0x46, 0x6a, 0x7f, 0xb7, 0xc9, 0xb3, 0x3c, 0x9b, 0xd0, 0xef, 0x6e, 0x15, 0x34, 0x4f, 0xe2, 0x90,
|
||||
0xf0, 0x38, 0x4b, 0x2d, 0xb8, 0x91, 0x64, 0xbd, 0x92, 0xc7, 0x89, 0x5a, 0x7a, 0xff, 0xac, 0xc1,
|
||||
0xfa, 0x33, 0xc1, 0xf8, 0x98, 0x9e, 0xc5, 0x69, 0x2c, 0x88, 0x5d, 0x17, 0x56, 0x52, 0x32, 0xa0,
|
||||
0x9d, 0xda, 0x5e, 0xed, 0xce, 0x35, 0x5f, 0x7e, 0xbb, 0xdb, 0xb0, 0xca, 0xc2, 0x3e, 0x1d, 0x90,
|
||||
0xce, 0x92, 0x44, 0xf5, 0xca, 0xed, 0xc0, 0xd5, 0x30, 0x4b, 0xca, 0x41, 0xca, 0x3a, 0xcb, 0x7b,
|
||||
0xcb, 0xb8, 0x61, 0x96, 0xee, 0x3e, 0x6c, 0xe4, 0x45, 0x3c, 0x20, 0xc5, 0x28, 0x78, 0x45, 0x47,
|
||||
0x81, 0xa1, 0x5a, 0x91, 0x54, 0x6d, 0xbd, 0xf5, 0x0d, 0x1d, 0x1d, 0x69, 0x7a, 0x94, 0xca, 0x47,
|
||||
0x39, 0xed, 0x5c, 0x51, 0x52, 0xc5, 0xb7, 0x7b, 0x13, 0x1c, 0xa1, 0x7a, 0x90, 0xd0, 0xb4, 0xc7,
|
||||
0xfb, 0x9d, 0x55, 0xdc, 0x5a, 0xf1, 0x41, 0x40, 0xdf, 0x4a, 0xc4, 0xfd, 0x00, 0xae, 0x15, 0xd9,
|
||||
0x6b, 0x64, 0x5e, 0xa6, 0xbc, 0x73, 0x55, 0x6e, 0xaf, 0x21, 0x70, 0x24, 0xd6, 0xde, 0x9f, 0x6b,
|
||||
0xd0, 0x7a, 0x2a, 0xd5, 0xb4, 0x8c, 0xfb, 0x08, 0xd6, 0xc5, 0xf9, 0x2e, 0x61, 0x34, 0xd0, 0x16,
|
||||
0x29, 0x3b, 0x9b, 0x06, 0x56, 0x47, 0xdc, 0x27, 0xa0, 0x3c, 0x1e, 0x44, 0xe3, 0xc3, 0x0c, 0x8d,
|
||||
0x5f, 0xbe, 0xe3, 0x1c, 0x78, 0xfb, 0xb3, 0x41, 0x9a, 0x72, 0xa2, 0xdf, 0xe2, 0x55, 0x80, 0x09,
|
||||
0x57, 0x0d, 0x69, 0xc1, 0xf0, 0x1b, 0x5d, 0x25, 0x24, 0x9a, 0xa5, 0xf7, 0xaf, 0x1a, 0x34, 0x9f,
|
||||
0x33, 0x5a, 0x9c, 0xd2, 0x62, 0x10, 0x33, 0xa6, 0x63, 0xd0, 0xcf, 0x18, 0x37, 0x31, 0x10, 0xdf,
|
||||
0x02, 0x2b, 0x91, 0x4a, 0x47, 0x40, 0x7e, 0xbb, 0x3f, 0x85, 0x76, 0x4e, 0x18, 0x7b, 0x9d, 0x15,
|
||||
0x51, 0x80, 0x7a, 0x87, 0xaf, 0x58, 0x39, 0x90, 0xec, 0x57, 0xfc, 0x96, 0xd9, 0x38, 0xd2, 0xb8,
|
||||
0xfb, 0x3d, 0x00, 0xfa, 0x7d, 0x18, 0x27, 0xb4, 0x47, 0x55, 0x24, 0x9c, 0x83, 0x7b, 0x73, 0x6c,
|
||||
0xa9, 0xea, 0xb2, 0x7f, 0x3a, 0x3e, 0x73, 0x92, 0xf2, 0x62, 0xe4, 0x5b, 0x4c, 0x76, 0xbf, 0x84,
|
||||
0xf5, 0xa9, 0x6d, 0xb7, 0x05, 0xcb, 0x18, 0x70, 0xad, 0xb9, 0xf8, 0x74, 0x37, 0xe1, 0xca, 0x90,
|
||||
0x24, 0x25, 0xd5, 0x9a, 0xab, 0xc5, 0x2f, 0x97, 0x3e, 0xaf, 0x79, 0x7f, 0xaf, 0x41, 0xfd, 0xb8,
|
||||
0x7b, 0x89, 0xdd, 0x4d, 0x58, 0x8a, 0xba, 0xfa, 0x2c, 0x7e, 0x8d, 0xfd, 0xb0, 0x6c, 0xf9, 0xe1,
|
||||
0xc9, 0x1c, 0xd3, 0x3e, 0x9e, 0x63, 0x9a, 0x2d, 0xec, 0x7f, 0x69, 0xd8, 0x9f, 0x6a, 0xe0, 0x4c,
|
||||
0x24, 0x31, 0xf7, 0x5b, 0x68, 0x09, 0x3d, 0x83, 0x7c, 0x82, 0x21, 0x23, 0xa1, 0xe5, 0xad, 0x4b,
|
||||
0x03, 0xe0, 0xaf, 0x97, 0x95, 0x35, 0x73, 0x1f, 0x40, 0x33, 0xea, 0x56, 0x78, 0xa9, 0xc4, 0xbc,
|
||||
0x79, 0x89, 0xc5, 0x7e, 0x23, 0xb2, 0x56, 0xcc, 0xfb, 0x02, 0x9c, 0xfb, 0x49, 0x7e, 0x9a, 0x31,
|
||||
0x75, 0x37, 0xd0, 0xc0, 0x32, 0x8e, 0xa4, 0x81, 0x0d, 0x5f, 0x7c, 0xba, 0xbb, 0xb0, 0x96, 0xeb,
|
||||
0x5d, 0x6d, 0xe3, 0x78, 0xed, 0x7d, 0x84, 0x16, 0xc6, 0x69, 0xcf, 0xa7, 0x58, 0x85, 0x30, 0x4a,
|
||||
0x98, 0xde, 0x39, 0x19, 0x25, 0x19, 0x89, 0xb4, 0x87, 0xcc, 0xd2, 0xbb, 0x03, 0x75, 0x45, 0xc8,
|
||||
0x72, 0x14, 0x4a, 0xdf, 0x40, 0x79, 0x17, 0xea, 0x4f, 0x13, 0x4a, 0x73, 0xc3, 0x13, 0xc5, 0x47,
|
||||
0x65, 0x21, 0x4b, 0x98, 0x24, 0x5d, 0xf6, 0xc7, 0x6b, 0x6f, 0x1d, 0x1a, 0x9a, 0x56, 0xb1, 0xf5,
|
||||
0xfe, 0x51, 0x03, 0xf7, 0xe4, 0x9c, 0x86, 0x25, 0xa7, 0x0f, 0xb3, 0xec, 0x95, 0xe1, 0x31, 0xaf,
|
||||
0x9a, 0xdd, 0xc0, 0x6c, 0x21, 0x05, 0x7e, 0x71, 0xbc, 0x81, 0xd2, 0x77, 0xd7, 0x7c, 0x0b, 0x71,
|
||||
0x4f, 0xe1, 0x1a, 0x3d, 0xe7, 0x05, 0x09, 0x68, 0x3a, 0x94, 0x75, 0xcd, 0x39, 0xf8, 0x74, 0x8e,
|
||||
0x6b, 0x67, 0xa5, 0x21, 0x84, 0xc7, 0x4e, 0xd2, 0xa1, 0x4a, 0xa8, 0x35, 0xaa, 0x97, 0xbb, 0x5f,
|
||||
0x40, 0xa3, 0xb2, 0xf5, 0x56, 0xc9, 0x74, 0x06, 0x1b, 0x15, 0x51, 0xda, 0x8f, 0x58, 0x1d, 0xe9,
|
||||
0x79, 0xcc, 0x03, 0xc6, 0x09, 0x2f, 0x99, 0x76, 0x10, 0x08, 0xe8, 0xa9, 0x44, 0x64, 0xd1, 0xe6,
|
||||
0x51, 0x56, 0xf2, 0x71, 0xd1, 0x96, 0x2b, 0x8d, 0xd3, 0xc2, 0x5c, 0x21, 0xbd, 0xf2, 0x86, 0xd0,
|
||||
0xfa, 0x8a, 0x72, 0x55, 0xff, 0x8c, 0xfb, 0x90, 0x56, 0x1a, 0xae, 0xd2, 0x15, 0x69, 0xd5, 0xca,
|
||||
0xbd, 0x0d, 0x8d, 0x38, 0x0d, 0x93, 0x32, 0xa2, 0xc1, 0x30, 0xa6, 0xaf, 0x99, 0x14, 0xb1, 0xe6,
|
||||
0xd7, 0x35, 0xf8, 0x42, 0x60, 0xee, 0x8f, 0xa0, 0x49, 0xcf, 0x15, 0x91, 0x66, 0xa2, 0x9a, 0x44,
|
||||
0x43, 0xa3, 0xb2, 0x68, 0x32, 0x8f, 0x42, 0xdb, 0x92, 0xab, 0xad, 0x3b, 0x85, 0xb6, 0xaa, 0xcf,
|
||||
0x56, 0x01, 0x96, 0x36, 0x3a, 0x07, 0xb7, 0xe7, 0xc4, 0x62, 0xba, 0xd0, 0xfb, 0x2d, 0x36, 0x85,
|
||||
0x78, 0x3b, 0xb0, 0x85, 0x62, 0xac, 0xfc, 0xd7, 0x36, 0x7a, 0xbf, 0x85, 0xed, 0xe9, 0x0d, 0xad,
|
||||
0xc4, 0xaf, 0xc1, 0xa9, 0xde, 0x58, 0x21, 0xfe, 0xc6, 0x1c, 0xf1, 0xf6, 0x61, 0xfb, 0x88, 0xb7,
|
||||
0x09, 0xee, 0x53, 0xca, 0x7d, 0x4a, 0xa2, 0x27, 0x69, 0x32, 0x32, 0x12, 0xb7, 0x60, 0xa3, 0x82,
|
||||
0xea, 0x14, 0x9e, 0xc0, 0x2f, 0x8b, 0x98, 0x53, 0x43, 0xbd, 0x0d, 0x9b, 0x55, 0x58, 0x93, 0x7f,
|
||||
0x0d, 0xed, 0xa3, 0x3e, 0x49, 0x7b, 0xf4, 0x19, 0x36, 0x4b, 0x13, 0xb0, 0x9f, 0x83, 0xa3, 0xd4,
|
||||
0x0b, 0x64, 0x3b, 0x15, 0x2a, 0x37, 0x0f, 0x36, 0xf7, 0xc7, 0xd3, 0x81, 0xf4, 0x39, 0x97, 0x27,
|
||||
0x80, 0x8f, 0xbf, 0x85, 0x9e, 0x36, 0xaf, 0x89, 0x42, 0x3e, 0x3d, 0x2b, 0x28, 0xeb, 0x8b, 0x94,
|
||||
0xb2, 0x15, 0xaa, 0xc2, 0x9a, 0xfc, 0x31, 0x6c, 0xf9, 0x65, 0xfa, 0x90, 0x92, 0x84, 0xf7, 0x65,
|
||||
0xd7, 0xf9, 0x2f, 0x95, 0xea, 0xc0, 0xf6, 0x34, 0x3f, 0x2d, 0xe9, 0x33, 0xe8, 0x3c, 0xea, 0xa5,
|
||||
0x59, 0x41, 0xd5, 0xe6, 0x49, 0x51, 0x64, 0x45, 0xa5, 0x12, 0x71, 0xbc, 0xc8, 0xe9, 0xa4, 0xbe,
|
||||
0xc8, 0xa5, 0xf7, 0x01, 0x5c, 0x9f, 0x73, 0xca, 0xb6, 0x55, 0x94, 0xa1, 0xca, 0x05, 0x50, 0xb6,
|
||||
0xda, 0xb0, 0x26, 0xff, 0x04, 0xb6, 0x4f, 0x0b, 0x7a, 0x96, 0xc4, 0xbd, 0xfe, 0xec, 0x95, 0x09,
|
||||
0xa5, 0x2b, 0xb5, 0x78, 0xbd, 0xf2, 0xfe, 0x5a, 0x83, 0x9d, 0x99, 0x23, 0x3a, 0xd1, 0x1e, 0x42,
|
||||
0xa3, 0x4b, 0xcf, 0x50, 0x33, 0x7b, 0x28, 0x59, 0x30, 0xd3, 0xeb, 0xea, 0xa4, 0x9e, 0x5b, 0x1e,
|
||||
0x40, 0x9d, 0x9c, 0xa1, 0xb5, 0x81, 0x35, 0xaf, 0x2d, 0xc8, 0xc8, 0x91, 0x07, 0x15, 0xec, 0xfd,
|
||||
0x1b, 0xcb, 0xe9, 0x61, 0x9e, 0x27, 0xa3, 0xaa, 0x71, 0x58, 0xb7, 0xd8, 0x0f, 0x89, 0xa9, 0x5b,
|
||||
0xf8, 0x29, 0xea, 0x16, 0x8a, 0x0f, 0xa9, 0xae, 0x00, 0x6a, 0x21, 0x06, 0x13, 0x92, 0x24, 0x38,
|
||||
0x9b, 0x59, 0x63, 0xa8, 0x2c, 0x37, 0x6b, 0x7e, 0x4b, 0x6e, 0xf8, 0x13, 0x7c, 0xd6, 0xfa, 0x95,
|
||||
0xf7, 0x65, 0xfd, 0x95, 0x77, 0xb4, 0xfe, 0x2f, 0x35, 0xd8, 0xa8, 0x58, 0xff, 0x7f, 0x1b, 0xa7,
|
||||
0xbf, 0xd5, 0xa0, 0xa3, 0xbb, 0xc3, 0x03, 0xca, 0xc3, 0xfe, 0x21, 0x3b, 0xee, 0x8e, 0xa3, 0x85,
|
||||
0xb1, 0x91, 0x6f, 0x04, 0x1d, 0x2f, 0xb5, 0x70, 0x77, 0xe0, 0x2a, 0x8e, 0x0f, 0xb2, 0x2b, 0xea,
|
||||
0xc6, 0x10, 0x75, 0x1f, 0x8b, 0xbe, 0x78, 0x1d, 0xd6, 0x06, 0xe4, 0x3c, 0xc0, 0x09, 0x9a, 0xe9,
|
||||
0x21, 0xf2, 0x2a, 0xae, 0x7d, 0x5c, 0xca, 0xb9, 0x39, 0x66, 0x72, 0x20, 0xee, 0xc6, 0x29, 0x3e,
|
||||
0x22, 0x98, 0x0c, 0xd2, 0x1a, 0xce, 0xcd, 0x0a, 0xbe, 0xaf, 0x50, 0xd1, 0x18, 0x0a, 0x79, 0x5f,
|
||||
0xec, 0x10, 0x60, 0x63, 0x28, 0xac, 0x4b, 0xe4, 0x7d, 0x05, 0xd7, 0xe7, 0xe8, 0xac, 0x7d, 0x7c,
|
||||
0x17, 0x56, 0xb1, 0xb4, 0x94, 0x09, 0xd7, 0xce, 0x75, 0xf7, 0xd5, 0x3b, 0xe7, 0x7b, 0xf1, 0xeb,
|
||||
0xcb, 0x1d, 0x5f, 0x53, 0x78, 0xdf, 0x4c, 0x1b, 0x8f, 0x41, 0x7b, 0xb3, 0xf1, 0xb6, 0x8d, 0x4b,
|
||||
0x15, 0x1b, 0x67, 0xb5, 0x92, 0xcc, 0xde, 0x41, 0x2b, 0x51, 0xf4, 0x13, 0x32, 0xa4, 0xaa, 0x0f,
|
||||
0x9b, 0x4a, 0xf2, 0x00, 0xab, 0xbb, 0x8d, 0x6a, 0xc6, 0x1f, 0x8b, 0x6e, 0x3c, 0xee, 0xe0, 0xce,
|
||||
0xc1, 0xce, 0xfe, 0xf4, 0xcb, 0x4d, 0x1f, 0xd0, 0x64, 0xa2, 0x8f, 0x7d, 0x47, 0x18, 0x66, 0x80,
|
||||
0x19, 0xdc, 0x8c, 0x80, 0xcf, 0x60, 0x7b, 0x7a, 0x43, 0xcb, 0xb0, 0xe7, 0xb8, 0xda, 0xd4, 0x1c,
|
||||
0xe7, 0xe2, 0x2b, 0x09, 0x0b, 0xb1, 0x54, 0xcd, 0x70, 0xda, 0x80, 0xb6, 0x85, 0xe9, 0x8a, 0xf7,
|
||||
0x1b, 0xd8, 0x19, 0x83, 0xdf, 0x61, 0x2e, 0x0e, 0xca, 0x81, 0x35, 0xa8, 0x5d, 0xc4, 0xdf, 0xbd,
|
||||
0x05, 0xf5, 0xd7, 0x04, 0xc7, 0x14, 0x1e, 0x0f, 0xa8, 0x99, 0x45, 0x96, 0x7d, 0x47, 0x60, 0xcf,
|
||||
0x14, 0xe4, 0xfd, 0x02, 0x3a, 0xb3, 0x9c, 0x17, 0x50, 0x5d, 0xaa, 0x49, 0x0a, 0x5e, 0xd1, 0x5d,
|
||||
0x38, 0xdf, 0x02, 0xb5, 0xf2, 0xc7, 0x70, 0x4b, 0x35, 0x19, 0x1c, 0xc3, 0xb0, 0x15, 0x60, 0x09,
|
||||
0xc2, 0xa0, 0xe1, 0xc8, 0x47, 0x53, 0x4e, 0x23, 0x63, 0x86, 0x9c, 0xa8, 0xd4, 0x76, 0x10, 0x9b,
|
||||
0xe9, 0x14, 0x0c, 0xf4, 0x28, 0xf2, 0x3e, 0x04, 0xef, 0x4d, 0x5c, 0xb4, 0xac, 0x3d, 0xb8, 0x31,
|
||||
0x4d, 0x75, 0x92, 0xd0, 0x70, 0x22, 0xc8, 0xbb, 0x05, 0x37, 0x2f, 0xa4, 0xd0, 0x4c, 0x5c, 0x35,
|
||||
0x8c, 0x09, 0x23, 0xc6, 0x19, 0xf4, 0x13, 0x35, 0x28, 0x69, 0x4c, 0x3b, 0x08, 0xd3, 0x9c, 0x44,
|
||||
0x51, 0x61, 0x06, 0x34, 0xb5, 0xf0, 0x7e, 0x0f, 0xdb, 0x2f, 0xd1, 0xc3, 0xd6, 0x78, 0x6f, 0x8c,
|
||||
0x3c, 0x84, 0x7a, 0x37, 0xc9, 0x83, 0x8a, 0x53, 0xe7, 0x0f, 0x35, 0xf6, 0x61, 0xa7, 0x6b, 0x3d,
|
||||
0x14, 0x16, 0x08, 0xe9, 0x75, 0xd8, 0x99, 0x91, 0xaf, 0x2d, 0x6b, 0x41, 0x53, 0x44, 0x1b, 0xb7,
|
||||
0x8c, 0x5d, 0x2f, 0x60, 0x7d, 0x8c, 0x68, 0xab, 0x8e, 0xb0, 0xd0, 0x5a, 0x5a, 0x9a, 0xd7, 0xd2,
|
||||
0x65, 0x6a, 0xd6, 0x2d, 0x35, 0x99, 0xd7, 0x16, 0x7c, 0x31, 0x15, 0x2c, 0x51, 0x32, 0xdb, 0x0d,
|
||||
0xa4, 0x15, 0xfa, 0x1d, 0xb8, 0x38, 0x66, 0x20, 0xf2, 0x3c, 0xe5, 0x71, 0x62, 0xfc, 0xf4, 0x3e,
|
||||
0x34, 0x58, 0xc4, 0x53, 0xf7, 0x70, 0xee, 0xb0, 0xa5, 0x2f, 0x90, 0xf7, 0xe8, 0x5c, 0xa4, 0x13,
|
||||
0x23, 0xe1, 0xb8, 0x50, 0x18, 0xfb, 0x76, 0xa1, 0x33, 0xbb, 0xa5, 0xed, 0xc4, 0xeb, 0xf2, 0x08,
|
||||
0x5b, 0x88, 0xaa, 0x11, 0xe6, 0xc0, 0x27, 0xe0, 0xda, 0xe0, 0x02, 0xd2, 0xf1, 0xd1, 0x7e, 0xe3,
|
||||
0x34, 0xcb, 0xcb, 0x44, 0x8e, 0x7e, 0x2a, 0xfb, 0xbf, 0xce, 0x4a, 0x91, 0xc6, 0xc6, 0x77, 0x3f,
|
||||
0x86, 0x75, 0x61, 0x71, 0x10, 0x16, 0x14, 0x89, 0xa2, 0x20, 0x35, 0xcf, 0x93, 0x86, 0x80, 0x8f,
|
||||
0x14, 0xfa, 0x98, 0x89, 0x0b, 0x47, 0x42, 0xc1, 0xd4, 0xee, 0x46, 0xa0, 0x20, 0xd9, 0x91, 0x3e,
|
||||
0x87, 0xfa, 0x40, 0x6a, 0x16, 0x90, 0x24, 0x26, 0xaa, 0x2b, 0x39, 0x07, 0x5b, 0xd3, 0x93, 0xe3,
|
||||
0xa1, 0xd8, 0xf4, 0x1d, 0x45, 0x2a, 0x17, 0xee, 0x3d, 0xd8, 0xb4, 0xea, 0xe8, 0x24, 0xdd, 0x57,
|
||||
0xa4, 0x8c, 0x0d, 0x6b, 0xcf, 0x44, 0x4b, 0xdc, 0xca, 0x0b, 0xed, 0xd2, 0x2e, 0xfc, 0x63, 0x0d,
|
||||
0x5a, 0xc2, 0x5d, 0x76, 0xc5, 0x71, 0x7f, 0x06, 0xab, 0x8a, 0x5a, 0xdf, 0xa5, 0x0b, 0xd4, 0xd3,
|
||||
0x44, 0x17, 0x6a, 0xb6, 0x74, 0xa1, 0x66, 0xf3, 0xfc, 0xb9, 0x3c, 0xc7, 0x9f, 0x26, 0xc2, 0xd5,
|
||||
0xd2, 0x87, 0x83, 0xed, 0x31, 0x1d, 0x64, 0x9c, 0x56, 0x03, 0x7f, 0x00, 0x9b, 0x55, 0x78, 0x81,
|
||||
0xd0, 0x7f, 0x89, 0x1e, 0x2a, 0x32, 0x71, 0x48, 0x8a, 0x78, 0xd9, 0xa7, 0xe9, 0x11, 0x29, 0x71,
|
||||
0x9c, 0x7d, 0x9e, 0x2f, 0xd0, 0x0a, 0xbc, 0x5f, 0xc1, 0xde, 0xc5, 0xc7, 0x17, 0xcb, 0x7b, 0x75,
|
||||
0x90, 0x30, 0xcd, 0x27, 0xb2, 0xf2, 0x7e, 0x76, 0x4b, 0x3b, 0xe0, 0x0f, 0xe2, 0x8f, 0x40, 0x5a,
|
||||
0xcd, 0xfb, 0xb7, 0x0d, 0xda, 0x9c, 0x08, 0x2c, 0xcd, 0xcb, 0xe8, 0xbb, 0xd0, 0x96, 0x03, 0xb0,
|
||||
0x78, 0x95, 0x17, 0xf8, 0x36, 0x17, 0x3a, 0xe9, 0xb9, 0x77, 0x5d, 0x6e, 0x4c, 0x7a, 0x93, 0x6c,
|
||||
0x5f, 0x74, 0xea, 0xe6, 0x79, 0x8f, 0x26, 0x86, 0x20, 0x26, 0x88, 0x27, 0xfd, 0xe9, 0xed, 0x74,
|
||||
0x16, 0xcf, 0x9d, 0x39, 0xac, 0xb4, 0x1c, 0x6c, 0x65, 0xa2, 0xe6, 0x5a, 0x75, 0xe2, 0x30, 0x8d,
|
||||
0x44, 0x77, 0xa9, 0xcc, 0x2c, 0x2f, 0xe0, 0xf6, 0x1b, 0xa9, 0xde, 0x75, 0x86, 0xc1, 0x9c, 0xb4,
|
||||
0x33, 0xc1, 0xca, 0xc9, 0x2a, 0xbc, 0x40, 0x52, 0xdc, 0x83, 0xc6, 0x7d, 0x12, 0xbe, 0x2a, 0xc7,
|
||||
0x19, 0xb8, 0x07, 0x4e, 0x98, 0xa5, 0x61, 0x59, 0xa0, 0x13, 0xc2, 0x91, 0x2e, 0x3c, 0x36, 0x84,
|
||||
0xf3, 0x46, 0xd3, 0x1c, 0xd1, 0x02, 0x3e, 0x84, 0x2b, 0x74, 0x38, 0x71, 0x6c, 0x73, 0xdf, 0xfc,
|
||||
0x4d, 0x7e, 0x22, 0x50, 0x5f, 0x6d, 0x76, 0x57, 0xe5, 0x9f, 0xe6, 0x9f, 0xfe, 0x27, 0x00, 0x00,
|
||||
0xff, 0xff, 0x57, 0x17, 0x30, 0xf4, 0xa5, 0x17, 0x00, 0x00,
|
||||
// 1945 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x6f, 0x1b, 0xc7,
|
||||
0x15, 0x07, 0x25, 0x59, 0x96, 0xdf, 0x92, 0x14, 0xb9, 0x92, 0x25, 0x5a, 0x01, 0x6c, 0x79, 0x9d,
|
||||
0x36, 0xae, 0x8b, 0x2a, 0xb1, 0x92, 0x16, 0x41, 0x83, 0x14, 0x95, 0x25, 0x39, 0x76, 0xfe, 0x59,
|
||||
0x59, 0xff, 0x2b, 0x7a, 0x59, 0x0c, 0xb9, 0x23, 0x71, 0xe1, 0xe5, 0xee, 0x66, 0x77, 0x96, 0x16,
|
||||
0x81, 0xa2, 0xdf, 0xa2, 0xb7, 0xde, 0x0a, 0xb4, 0x87, 0xde, 0xfa, 0x61, 0x5a, 0xf4, 0x93, 0xf4,
|
||||
0xd0, 0x4b, 0xdf, 0xcc, 0xbc, 0x21, 0x67, 0x49, 0xca, 0xa2, 0x8d, 0x14, 0xc8, 0x85, 0x98, 0xf9,
|
||||
0xbd, 0x37, 0xef, 0xff, 0xbc, 0x79, 0x4b, 0xd8, 0x16, 0xac, 0x1b, 0x73, 0x31, 0x60, 0x09, 0x3b,
|
||||
0xe3, 0x79, 0xc8, 0x04, 0xdb, 0xcb, 0xf2, 0x54, 0xa4, 0x6e, 0x7b, 0x86, 0xb0, 0xe3, 0x7c, 0x5f,
|
||||
0xf2, 0x7c, 0xa4, 0xe9, 0x3b, 0x4d, 0x91, 0x66, 0xe9, 0x84, 0x7f, 0xe7, 0x7a, 0xce, 0xb3, 0x38,
|
||||
0xea, 0x31, 0x11, 0xa5, 0x89, 0x05, 0x37, 0xe2, 0xf4, 0xac, 0x14, 0x51, 0xac, 0xb7, 0xde, 0xbf,
|
||||
0x6b, 0xb0, 0xfe, 0x4c, 0x0a, 0x3e, 0xe2, 0xa7, 0x51, 0x12, 0x49, 0x66, 0xd7, 0x85, 0x95, 0x84,
|
||||
0x0d, 0x78, 0xa7, 0xb6, 0x5b, 0xbb, 0x7b, 0xcd, 0x57, 0x6b, 0x77, 0x0b, 0x56, 0x8b, 0x5e, 0x9f,
|
||||
0x0f, 0x58, 0x67, 0x49, 0xa1, 0xb4, 0x73, 0x3b, 0x70, 0xb5, 0x97, 0xc6, 0xe5, 0x20, 0x29, 0x3a,
|
||||
0xcb, 0xbb, 0xcb, 0x48, 0x30, 0x5b, 0x77, 0x0f, 0x36, 0xb2, 0x3c, 0x1a, 0xb0, 0x7c, 0x14, 0xbc,
|
||||
0xe2, 0xa3, 0xc0, 0x70, 0xad, 0x28, 0xae, 0x36, 0x91, 0xbe, 0xe2, 0xa3, 0x43, 0xe2, 0x47, 0xad,
|
||||
0x62, 0x94, 0xf1, 0xce, 0x15, 0xad, 0x55, 0xae, 0xdd, 0x5b, 0xe0, 0x48, 0xd3, 0x83, 0x98, 0x27,
|
||||
0x67, 0xa2, 0xdf, 0x59, 0x45, 0xd2, 0x8a, 0x0f, 0x12, 0xfa, 0x5a, 0x21, 0xee, 0x7b, 0x70, 0x2d,
|
||||
0x4f, 0x5f, 0xa3, 0xf0, 0x32, 0x11, 0x9d, 0xab, 0x8a, 0xbc, 0x86, 0xc0, 0xa1, 0xdc, 0x7b, 0x7f,
|
||||
0xad, 0x41, 0xeb, 0xa9, 0x32, 0xd3, 0x72, 0xee, 0x03, 0x58, 0x97, 0xe7, 0xbb, 0xac, 0xe0, 0x01,
|
||||
0x79, 0xa4, 0xfd, 0x6c, 0x1a, 0x58, 0x1f, 0x71, 0x9f, 0x80, 0x8e, 0x78, 0x10, 0x8e, 0x0f, 0x17,
|
||||
0xe8, 0xfc, 0xf2, 0x5d, 0x67, 0xdf, 0xdb, 0x9b, 0x4d, 0xd2, 0x54, 0x10, 0xfd, 0x96, 0xa8, 0x02,
|
||||
0x85, 0x0c, 0xd5, 0x90, 0xe7, 0x05, 0xae, 0x31, 0x54, 0x52, 0xa3, 0xd9, 0x7a, 0xff, 0xa9, 0x41,
|
||||
0xf3, 0x79, 0xc1, 0xf3, 0x13, 0x9e, 0x0f, 0xa2, 0xa2, 0xa0, 0x1c, 0xf4, 0xd3, 0x42, 0x98, 0x1c,
|
||||
0xc8, 0xb5, 0xc4, 0x4a, 0xe4, 0xa2, 0x0c, 0xa8, 0xb5, 0xfb, 0x73, 0x68, 0x67, 0xac, 0x28, 0x5e,
|
||||
0xa7, 0x79, 0x18, 0xa0, 0xdd, 0xbd, 0x57, 0x45, 0x39, 0x50, 0xe2, 0x57, 0xfc, 0x96, 0x21, 0x1c,
|
||||
0x12, 0xee, 0x7e, 0x07, 0x80, 0x71, 0x1f, 0x46, 0x31, 0x3f, 0xe3, 0x3a, 0x13, 0xce, 0xfe, 0xfd,
|
||||
0x39, 0xbe, 0x54, 0x6d, 0xd9, 0x3b, 0x19, 0x9f, 0x39, 0x4e, 0x44, 0x3e, 0xf2, 0x2d, 0x21, 0x3b,
|
||||
0x9f, 0xc3, 0xfa, 0x14, 0xd9, 0x6d, 0xc1, 0x32, 0x26, 0x9c, 0x2c, 0x97, 0x4b, 0x77, 0x13, 0xae,
|
||||
0x0c, 0x59, 0x5c, 0x72, 0xb2, 0x5c, 0x6f, 0x7e, 0xbd, 0xf4, 0x69, 0xcd, 0xfb, 0x67, 0x0d, 0xea,
|
||||
0x47, 0xdd, 0x4b, 0xfc, 0x6e, 0xc2, 0x52, 0xd8, 0xa5, 0xb3, 0xb8, 0x1a, 0xc7, 0x61, 0xd9, 0x8a,
|
||||
0xc3, 0x93, 0x39, 0xae, 0x7d, 0x38, 0xc7, 0x35, 0x5b, 0xd9, 0xff, 0xd3, 0xb1, 0xbf, 0xd4, 0xc0,
|
||||
0x99, 0x68, 0x2a, 0xdc, 0xaf, 0xa1, 0x25, 0xed, 0x0c, 0xb2, 0x09, 0x86, 0x82, 0xa4, 0x95, 0xb7,
|
||||
0x2f, 0x4d, 0x80, 0xbf, 0x5e, 0x56, 0xf6, 0x85, 0xfb, 0x10, 0x9a, 0x61, 0xb7, 0x22, 0x4b, 0x17,
|
||||
0xe6, 0xad, 0x4b, 0x3c, 0xf6, 0x1b, 0xa1, 0xb5, 0x2b, 0xbc, 0xcf, 0xc0, 0x79, 0x10, 0x67, 0x27,
|
||||
0x69, 0xa1, 0xef, 0x06, 0x3a, 0x58, 0x46, 0xa1, 0x72, 0xb0, 0xe1, 0xcb, 0xa5, 0xbb, 0x03, 0x6b,
|
||||
0x19, 0x51, 0xc9, 0xc7, 0xf1, 0xde, 0xfb, 0x00, 0x3d, 0x8c, 0x92, 0x33, 0x9f, 0x63, 0x17, 0xc2,
|
||||
0x2c, 0x61, 0x79, 0x67, 0x6c, 0x14, 0xa7, 0x2c, 0xa4, 0x08, 0x99, 0xad, 0x77, 0x17, 0xea, 0x9a,
|
||||
0xb1, 0xc8, 0x50, 0x29, 0x7f, 0x03, 0xe7, 0x3d, 0xa8, 0x3f, 0x8d, 0x39, 0xcf, 0x8c, 0x4c, 0x54,
|
||||
0x1f, 0x96, 0xb9, 0x6a, 0x61, 0x8a, 0x75, 0xd9, 0x1f, 0xef, 0xbd, 0x75, 0x68, 0x10, 0xaf, 0x16,
|
||||
0xeb, 0xfd, 0xab, 0x06, 0xee, 0xf1, 0x39, 0xef, 0x95, 0x82, 0x3f, 0x4a, 0xd3, 0x57, 0x46, 0xc6,
|
||||
0xbc, 0x6e, 0x76, 0x13, 0xab, 0x85, 0xe5, 0xb8, 0x12, 0x78, 0x03, 0x55, 0xec, 0xae, 0xf9, 0x16,
|
||||
0xe2, 0x9e, 0xc0, 0x35, 0x7e, 0x2e, 0x72, 0x16, 0xf0, 0x64, 0xa8, 0xfa, 0x9a, 0xb3, 0xff, 0xf1,
|
||||
0x9c, 0xd0, 0xce, 0x6a, 0x43, 0x08, 0x8f, 0x1d, 0x27, 0x43, 0x5d, 0x50, 0x6b, 0x9c, 0xb6, 0x3b,
|
||||
0x9f, 0x41, 0xa3, 0x42, 0x7a, 0xab, 0x62, 0x3a, 0x85, 0x8d, 0x8a, 0x2a, 0x8a, 0x23, 0x76, 0x47,
|
||||
0x7e, 0x1e, 0x89, 0xa0, 0x10, 0x4c, 0x94, 0x05, 0x05, 0x08, 0x24, 0xf4, 0x54, 0x21, 0xaa, 0x69,
|
||||
0x8b, 0x30, 0x2d, 0xc5, 0xb8, 0x69, 0xab, 0x1d, 0xe1, 0x3c, 0x37, 0x57, 0x88, 0x76, 0xde, 0x10,
|
||||
0x5a, 0x5f, 0x70, 0xa1, 0xfb, 0x9f, 0x09, 0x1f, 0xf2, 0x2a, 0xc7, 0x75, 0xb9, 0x22, 0xaf, 0xde,
|
||||
0xb9, 0x77, 0xa0, 0x11, 0x25, 0xbd, 0xb8, 0x0c, 0x79, 0x30, 0x8c, 0xf8, 0xeb, 0x42, 0xa9, 0x58,
|
||||
0xf3, 0xeb, 0x04, 0xbe, 0x90, 0x98, 0xfb, 0x13, 0x68, 0xf2, 0x73, 0xcd, 0x44, 0x42, 0xf4, 0x23,
|
||||
0xd1, 0x20, 0x54, 0x35, 0xcd, 0xc2, 0xe3, 0xd0, 0xb6, 0xf4, 0x92, 0x77, 0x27, 0xd0, 0xd6, 0xfd,
|
||||
0xd9, 0x6a, 0xc0, 0xca, 0x47, 0x67, 0xff, 0xce, 0x9c, 0x5c, 0x4c, 0x37, 0x7a, 0xbf, 0x55, 0x4c,
|
||||
0x21, 0xde, 0x36, 0x5c, 0x47, 0x35, 0x56, 0xfd, 0x93, 0x8f, 0xde, 0xef, 0x61, 0x6b, 0x9a, 0x40,
|
||||
0x46, 0xfc, 0x16, 0x9c, 0xea, 0x8d, 0x95, 0xea, 0x6f, 0xce, 0x51, 0x6f, 0x1f, 0xb6, 0x8f, 0x78,
|
||||
0x9b, 0xe0, 0x3e, 0xe5, 0xc2, 0xe7, 0x2c, 0x7c, 0x92, 0xc4, 0x23, 0xa3, 0xf1, 0x3a, 0x6c, 0x54,
|
||||
0x50, 0x2a, 0xe1, 0x09, 0xfc, 0x32, 0x8f, 0x04, 0x37, 0xdc, 0x5b, 0xb0, 0x59, 0x85, 0x89, 0xfd,
|
||||
0x4b, 0x68, 0x1f, 0xf6, 0x59, 0x72, 0xc6, 0x9f, 0xe1, 0x63, 0x69, 0x12, 0xf6, 0x4b, 0x70, 0xb4,
|
||||
0x79, 0x81, 0x7a, 0x4e, 0xa5, 0xc9, 0xcd, 0xfd, 0xcd, 0xbd, 0xf1, 0x74, 0xa0, 0x62, 0x2e, 0xd4,
|
||||
0x09, 0x10, 0xe3, 0xb5, 0xb4, 0xd3, 0x96, 0x35, 0x31, 0xc8, 0xe7, 0xa7, 0x39, 0x2f, 0xfa, 0xb2,
|
||||
0xa4, 0x6c, 0x83, 0xaa, 0x30, 0xb1, 0x63, 0x84, 0xfd, 0x32, 0x79, 0xc4, 0x59, 0x2c, 0xfa, 0xea,
|
||||
0xd5, 0x31, 0x07, 0x3a, 0xb0, 0x35, 0x4d, 0xa0, 0x23, 0x9f, 0x40, 0xe7, 0xf1, 0x59, 0x92, 0xe6,
|
||||
0x5c, 0x13, 0x8f, 0xf3, 0x3c, 0xcd, 0x2b, 0x2d, 0x45, 0xe0, 0x8d, 0x4c, 0x26, 0x8d, 0x42, 0x6d,
|
||||
0xbd, 0xf7, 0xe0, 0xc6, 0x9c, 0x53, 0xb6, 0xd1, 0xb2, 0x9f, 0x54, 0x2a, 0x59, 0x1b, 0x6d, 0xc3,
|
||||
0xc4, 0xfe, 0x11, 0x6c, 0x9d, 0xe4, 0xfc, 0x34, 0x8e, 0xce, 0xfa, 0xb3, 0xb5, 0xdf, 0x53, 0x31,
|
||||
0x21, 0xf5, 0xb4, 0xf3, 0xfe, 0x5e, 0x83, 0xed, 0x99, 0x23, 0x54, 0x31, 0x8f, 0xa0, 0xd1, 0xe5,
|
||||
0xa7, 0x68, 0x99, 0x3d, 0x5d, 0x2c, 0x58, 0xb2, 0x75, 0x7d, 0x92, 0x06, 0x90, 0x87, 0x50, 0x67,
|
||||
0xa7, 0xe8, 0x6d, 0x60, 0x0d, 0x5e, 0x0b, 0x0a, 0x72, 0xd4, 0x41, 0x0d, 0x7b, 0xff, 0xc5, 0xbe,
|
||||
0x78, 0x90, 0x65, 0xf1, 0xa8, 0xea, 0x1c, 0x36, 0xa0, 0xe2, 0xfb, 0xd8, 0x34, 0x20, 0x5c, 0xca,
|
||||
0x06, 0x84, 0xea, 0x7b, 0x9c, 0xae, 0xb2, 0xde, 0xc8, 0x09, 0x83, 0xc5, 0x31, 0x0e, 0x59, 0xd6,
|
||||
0x3c, 0xa9, 0xfa, 0xc6, 0x9a, 0xdf, 0x52, 0x04, 0x7f, 0x82, 0xcf, 0x7a, 0xbf, 0xf2, 0x43, 0x79,
|
||||
0x7f, 0xe5, 0x1d, 0xbd, 0xff, 0x5b, 0x0d, 0x36, 0x2a, 0xde, 0xff, 0x68, 0xf3, 0xf4, 0x8f, 0x1a,
|
||||
0x74, 0xa8, 0xcd, 0x3f, 0xe4, 0xa2, 0xd7, 0x3f, 0x28, 0x8e, 0xba, 0xe3, 0x6c, 0x61, 0x6e, 0xd4,
|
||||
0xb0, 0x4f, 0xf9, 0xd2, 0x1b, 0x77, 0x1b, 0xae, 0xe2, 0x1c, 0xa0, 0x9e, 0x37, 0xea, 0xf0, 0x61,
|
||||
0xf7, 0x5b, 0xf9, 0xc0, 0xdd, 0x80, 0xb5, 0x01, 0x3b, 0x0f, 0x70, 0x14, 0x2e, 0x68, 0x1a, 0xbc,
|
||||
0x8a, 0x7b, 0x1f, 0xb7, 0x6a, 0x00, 0x8e, 0x0a, 0x35, 0xd9, 0x76, 0xa3, 0x04, 0xbf, 0x06, 0x0a,
|
||||
0x95, 0xa4, 0x35, 0x1c, 0x80, 0x35, 0xfc, 0x40, 0xa3, 0xb2, 0xc3, 0xe7, 0xea, 0xbe, 0xd8, 0x29,
|
||||
0xc0, 0x0e, 0x9f, 0x5b, 0x97, 0xc8, 0xfb, 0x02, 0x6e, 0xcc, 0xb1, 0x99, 0x62, 0x7c, 0x0f, 0x56,
|
||||
0xb1, 0x47, 0x94, 0xb1, 0xa0, 0xe0, 0xba, 0x7b, 0xfa, 0x83, 0xe5, 0x3b, 0xf9, 0xeb, 0x2b, 0x8a,
|
||||
0x4f, 0x1c, 0xde, 0x57, 0xd3, 0xce, 0x63, 0xd2, 0xde, 0xec, 0xbc, 0xed, 0xe3, 0x52, 0xc5, 0xc7,
|
||||
0x59, 0xab, 0x94, 0xb0, 0x77, 0xb0, 0x4a, 0x76, 0xef, 0x98, 0x0d, 0xb9, 0x7e, 0x50, 0x4d, 0x27,
|
||||
0x79, 0x88, 0x6d, 0xda, 0x46, 0x49, 0xf0, 0x87, 0xf2, 0x59, 0x1d, 0x3f, 0xc5, 0xce, 0xfe, 0xf6,
|
||||
0xde, 0xf4, 0x27, 0x18, 0x1d, 0x20, 0x36, 0xd9, 0x2e, 0xbf, 0x61, 0x05, 0x56, 0x80, 0x99, 0xc0,
|
||||
0x8c, 0x82, 0x4f, 0x60, 0x6b, 0x9a, 0x40, 0x3a, 0xec, 0x81, 0xac, 0x36, 0x35, 0x90, 0xb9, 0xf8,
|
||||
0xb9, 0x83, 0x6d, 0x5e, 0x99, 0x66, 0x24, 0x6d, 0x40, 0xdb, 0xc2, 0xa8, 0xe3, 0xfd, 0x0e, 0xb6,
|
||||
0xc7, 0xe0, 0x37, 0x58, 0x8b, 0x83, 0x72, 0x60, 0x4d, 0x5c, 0x17, 0xc9, 0x77, 0x6f, 0x43, 0xfd,
|
||||
0x35, 0xc3, 0x79, 0x43, 0x44, 0x03, 0x6e, 0x86, 0x8a, 0x65, 0xdf, 0x91, 0xd8, 0x33, 0x0d, 0x79,
|
||||
0xbf, 0x82, 0xce, 0xac, 0xe4, 0x05, 0x4c, 0x57, 0x66, 0xb2, 0x5c, 0x54, 0x6c, 0x97, 0xc1, 0xb7,
|
||||
0x40, 0x32, 0xfe, 0x08, 0x6e, 0xeb, 0x27, 0x0c, 0xe7, 0x29, 0x7c, 0x0a, 0xb0, 0x05, 0x61, 0xd2,
|
||||
0x70, 0x76, 0xe3, 0x89, 0xe0, 0xa1, 0x71, 0x43, 0x8d, 0x46, 0x9a, 0x1c, 0x44, 0x66, 0xcc, 0x04,
|
||||
0x03, 0x3d, 0x0e, 0xbd, 0xf7, 0xc1, 0x7b, 0x93, 0x14, 0xd2, 0xb5, 0x0b, 0x37, 0xa7, 0xb9, 0x8e,
|
||||
0x63, 0xde, 0x9b, 0x28, 0xf2, 0x6e, 0xc3, 0xad, 0x0b, 0x39, 0x48, 0x88, 0xab, 0xa7, 0x2a, 0xe9,
|
||||
0xc4, 0xb8, 0x82, 0x7e, 0xa6, 0x27, 0x1e, 0xc2, 0x28, 0x40, 0x58, 0xe6, 0x2c, 0x0c, 0x73, 0x33,
|
||||
0x69, 0xe9, 0x8d, 0xf7, 0x47, 0xd8, 0x7a, 0x89, 0x11, 0xb6, 0xe6, 0x74, 0xe3, 0xe4, 0x01, 0xd4,
|
||||
0xbb, 0x71, 0x16, 0x54, 0x82, 0x3a, 0x7f, 0x3a, 0xb1, 0x0f, 0x3b, 0x5d, 0x6b, 0xe2, 0x5f, 0x20,
|
||||
0xa5, 0x37, 0x60, 0x7b, 0x46, 0x3f, 0x79, 0xd6, 0x82, 0xa6, 0xcc, 0x36, 0x92, 0x8c, 0x5f, 0x2f,
|
||||
0x60, 0x7d, 0x8c, 0x90, 0x57, 0x87, 0xd8, 0x68, 0x2d, 0x2b, 0xcd, 0x67, 0xcf, 0x65, 0x66, 0xd6,
|
||||
0x2d, 0x33, 0x0b, 0xaf, 0x2d, 0xe5, 0x62, 0x29, 0x58, 0xaa, 0x54, 0xb5, 0x1b, 0x88, 0x0c, 0xfa,
|
||||
0x03, 0xb8, 0x38, 0x66, 0x20, 0xf2, 0x3c, 0x11, 0x51, 0x6c, 0xe2, 0xf4, 0x43, 0x58, 0xb0, 0x48,
|
||||
0xa4, 0xee, 0xe3, 0xdc, 0x61, 0x6b, 0x5f, 0xa0, 0xee, 0x31, 0xb8, 0xc8, 0x27, 0x67, 0xbb, 0x71,
|
||||
0xa3, 0x30, 0xfe, 0xed, 0x40, 0x67, 0x96, 0x44, 0x7e, 0xe2, 0x75, 0x79, 0x8c, 0x4f, 0x88, 0xee,
|
||||
0x11, 0xe6, 0xc0, 0x47, 0xe0, 0xda, 0xe0, 0x02, 0xda, 0xf1, 0xeb, 0xfb, 0xe6, 0x49, 0x9a, 0x95,
|
||||
0xb1, 0x9a, 0xe1, 0x74, 0xf5, 0x7f, 0x99, 0x96, 0xb2, 0x8c, 0x4d, 0xec, 0x7e, 0x0a, 0xeb, 0xd2,
|
||||
0xe3, 0xa0, 0x97, 0x73, 0x64, 0x0a, 0x83, 0xc4, 0x7c, 0x67, 0x34, 0x24, 0x7c, 0xa8, 0xd1, 0x6f,
|
||||
0x0b, 0x79, 0xe1, 0x58, 0x4f, 0x0a, 0xb5, 0x5f, 0x23, 0xd0, 0x90, 0x7a, 0x91, 0x3e, 0x85, 0xfa,
|
||||
0x40, 0x59, 0x16, 0xb0, 0x38, 0x62, 0xfa, 0x55, 0x72, 0xf6, 0xaf, 0x4f, 0xcf, 0xa5, 0x07, 0x92,
|
||||
0xe8, 0x3b, 0x9a, 0x55, 0x6d, 0xdc, 0xfb, 0xb0, 0x69, 0xf5, 0xd1, 0x49, 0xb9, 0xaf, 0x28, 0x1d,
|
||||
0x1b, 0x16, 0xcd, 0x64, 0x4b, 0xde, 0xca, 0x0b, 0xfd, 0xa2, 0x10, 0xfe, 0xb9, 0x06, 0x2d, 0x19,
|
||||
0x2e, 0xbb, 0xe3, 0xb8, 0xbf, 0x80, 0x55, 0xcd, 0x4d, 0x77, 0xe9, 0x02, 0xf3, 0x88, 0xe9, 0x42,
|
||||
0xcb, 0x96, 0x2e, 0xb4, 0x6c, 0x5e, 0x3c, 0x97, 0xe7, 0xc4, 0xd3, 0x64, 0xb8, 0xda, 0xfa, 0x70,
|
||||
0xb0, 0x3d, 0xe2, 0x83, 0x54, 0xf0, 0x6a, 0xe2, 0xf7, 0x61, 0xb3, 0x0a, 0x2f, 0x90, 0xfa, 0xcf,
|
||||
0x31, 0x42, 0x79, 0x2a, 0x0f, 0x29, 0x15, 0x2f, 0xfb, 0x3c, 0x39, 0x64, 0x25, 0x8e, 0xb3, 0xcf,
|
||||
0xb3, 0x05, 0x9e, 0x02, 0xef, 0x37, 0xb0, 0x7b, 0xf1, 0xf1, 0xc5, 0xea, 0x5e, 0x1f, 0x64, 0x05,
|
||||
0xc9, 0x09, 0xad, 0xba, 0x9f, 0x25, 0x51, 0x00, 0xfe, 0x24, 0xff, 0xd1, 0xe3, 0xd5, 0xba, 0x7f,
|
||||
0xdb, 0xa4, 0xcd, 0xc9, 0xc0, 0xd2, 0xbc, 0x8a, 0xbe, 0x07, 0x6d, 0x35, 0x00, 0xcb, 0xcf, 0xeb,
|
||||
0x1c, 0x3f, 0xb2, 0xa5, 0x4d, 0x34, 0xf7, 0xae, 0x2b, 0xc2, 0xe4, 0x6d, 0x52, 0xcf, 0x17, 0x9f,
|
||||
0xba, 0x79, 0xde, 0xe3, 0x89, 0x23, 0x88, 0x49, 0xe6, 0xc9, 0xfb, 0xf4, 0x76, 0x36, 0xcb, 0xcf,
|
||||
0x9d, 0x39, 0xa2, 0x48, 0x0f, 0x3e, 0x65, 0xb2, 0xe7, 0x5a, 0x7d, 0xe2, 0x20, 0x09, 0xe5, 0xeb,
|
||||
0x52, 0x99, 0x59, 0x5e, 0xc0, 0x9d, 0x37, 0x72, 0xbd, 0xeb, 0x0c, 0x83, 0x35, 0x69, 0x57, 0x82,
|
||||
0x55, 0x93, 0x55, 0x78, 0x81, 0xa2, 0xb8, 0x0f, 0x8d, 0x07, 0xac, 0xf7, 0xaa, 0x1c, 0x57, 0xe0,
|
||||
0x2e, 0x38, 0xbd, 0x34, 0xe9, 0x95, 0x39, 0x06, 0xa1, 0x37, 0xa2, 0xc6, 0x63, 0x43, 0x38, 0x6f,
|
||||
0x34, 0xcd, 0x11, 0x52, 0xf0, 0x3e, 0x5c, 0xe1, 0xc3, 0x49, 0x60, 0x9b, 0x7b, 0xe6, 0xff, 0xee,
|
||||
0x63, 0x89, 0xfa, 0x9a, 0xd8, 0x5d, 0x55, 0xff, 0x7e, 0x7f, 0xfc, 0xbf, 0x00, 0x00, 0x00, 0xff,
|
||||
0xff, 0x02, 0xd3, 0x4c, 0xf2, 0x6e, 0x17, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -71,16 +71,36 @@ func (KeyspaceIdType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0,
|
|||
type TabletType int32
|
||||
|
||||
const (
|
||||
TabletType_UNKNOWN TabletType = 0
|
||||
TabletType_MASTER TabletType = 1
|
||||
TabletType_REPLICA TabletType = 2
|
||||
TabletType_RDONLY TabletType = 3
|
||||
TabletType_BATCH TabletType = 3
|
||||
TabletType_SPARE TabletType = 4
|
||||
// UNKNOWN is not a valid value.
|
||||
TabletType_UNKNOWN TabletType = 0
|
||||
// MASTER is the master server for the shard. Only MASTER allows DMLs.
|
||||
TabletType_MASTER TabletType = 1
|
||||
// REPLICA is a slave type. It is used to serve live traffic.
|
||||
// A REPLICA can be promoted to MASTER. A demoted MASTER will go to REPLICA.
|
||||
TabletType_REPLICA TabletType = 2
|
||||
// RDONLY (old name) / BATCH (new name) is used to serve traffic for
|
||||
// long-running jobs. It is a separate type from REPLICA so
|
||||
// long-running queries don't affect web-like traffic.
|
||||
TabletType_RDONLY TabletType = 3
|
||||
TabletType_BATCH TabletType = 3
|
||||
// SPARE is a type of servers that cannot serve queries, but is available
|
||||
// in case an extra server is needed.
|
||||
TabletType_SPARE TabletType = 4
|
||||
// EXPERIMENTAL is like SPARE, except it can serve queries. This
|
||||
// type can be used for usages not planned by Vitess, like online
|
||||
// export to another storage engine.
|
||||
TabletType_EXPERIMENTAL TabletType = 5
|
||||
TabletType_BACKUP TabletType = 6
|
||||
TabletType_RESTORE TabletType = 7
|
||||
TabletType_WORKER TabletType = 8
|
||||
// BACKUP is the type a server goes to when taking a backup. No queries
|
||||
// can be served in BACKUP mode.
|
||||
TabletType_BACKUP TabletType = 6
|
||||
// RESTORE is the type a server uses when restoring a backup, at
|
||||
// startup time. No queries can be served in RESTORE mode.
|
||||
TabletType_RESTORE TabletType = 7
|
||||
// WORKER is the type a server goes into when used by a vtworker
|
||||
// process to perform an offline action. It is a serving type (as
|
||||
// the vtworker processes may need queries to run). In this state,
|
||||
// this tablet is dedicated to the vtworker process that uses it.
|
||||
TabletType_WORKER TabletType = 8
|
||||
)
|
||||
|
||||
var TabletType_name = map[int32]string{
|
||||
|
@ -163,8 +183,6 @@ type Tablet struct {
|
|||
DbNameOverride string `protobuf:"bytes,9,opt,name=db_name_override,json=dbNameOverride" json:"db_name_override,omitempty"`
|
||||
// tablet tags
|
||||
Tags map[string]string `protobuf:"bytes,10,rep,name=tags" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
// tablet health information
|
||||
HealthMap map[string]string `protobuf:"bytes,11,rep,name=health_map,json=healthMap" json:"health_map,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
}
|
||||
|
||||
func (m *Tablet) Reset() { *m = Tablet{} }
|
||||
|
@ -200,13 +218,6 @@ func (m *Tablet) GetTags() map[string]string {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *Tablet) GetHealthMap() map[string]string {
|
||||
if m != nil {
|
||||
return m.HealthMap
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A Shard contains data about a subset of the data whithin a keyspace.
|
||||
type Shard struct {
|
||||
// master_alias is the tablet alias of the master for the shard.
|
||||
|
@ -412,8 +423,6 @@ type EndPoint struct {
|
|||
Host string `protobuf:"bytes,2,opt,name=host" json:"host,omitempty"`
|
||||
// The ports opened for service.
|
||||
PortMap map[string]int32 `protobuf:"bytes,3,rep,name=port_map,json=portMap" json:"port_map,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
|
||||
// The health entries.
|
||||
HealthMap map[string]string `protobuf:"bytes,4,rep,name=health_map,json=healthMap" json:"health_map,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
}
|
||||
|
||||
func (m *EndPoint) Reset() { *m = EndPoint{} }
|
||||
|
@ -428,13 +437,6 @@ func (m *EndPoint) GetPortMap() map[string]int32 {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *EndPoint) GetHealthMap() map[string]string {
|
||||
if m != nil {
|
||||
return m.HealthMap
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EndPoints corresponds to a list of tablets.
|
||||
type EndPoints struct {
|
||||
Entries []*EndPoint `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"`
|
||||
|
@ -581,80 +583,78 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 1196 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x57, 0xdd, 0x72, 0x22, 0x45,
|
||||
0x14, 0x96, 0xe1, 0x27, 0x70, 0x60, 0xd9, 0xd9, 0x36, 0x6b, 0x4d, 0x8d, 0x65, 0x6d, 0xe4, 0xc6,
|
||||
0xad, 0xa8, 0x68, 0x65, 0xfd, 0x89, 0x29, 0xb5, 0x42, 0x90, 0x75, 0x63, 0x12, 0x82, 0x0d, 0xa9,
|
||||
0x35, 0x57, 0x53, 0x03, 0xf4, 0x26, 0x53, 0x0b, 0x33, 0xe3, 0x74, 0x43, 0x15, 0xcf, 0xb0, 0x17,
|
||||
0xde, 0xfb, 0x04, 0x3e, 0x81, 0xb7, 0x3e, 0x91, 0x97, 0xde, 0xdb, 0x7d, 0x7a, 0x06, 0x06, 0x48,
|
||||
0x62, 0xa2, 0xf1, 0x8a, 0x73, 0xfa, 0xfc, 0xf4, 0xf9, 0xfd, 0x7a, 0x80, 0xaa, 0x08, 0xc2, 0x60,
|
||||
0xe8, 0x0a, 0xb7, 0x1e, 0x46, 0x81, 0x08, 0x48, 0x31, 0xe1, 0x6b, 0x3b, 0x50, 0x3c, 0x62, 0x33,
|
||||
0xea, 0xfa, 0x17, 0x8c, 0x6c, 0x42, 0x9e, 0x0b, 0x37, 0x12, 0x56, 0x66, 0x2b, 0xf3, 0xb4, 0x42,
|
||||
0x35, 0x43, 0x4c, 0xc8, 0x32, 0x7f, 0x68, 0x19, 0x78, 0xa6, 0xc8, 0xda, 0x33, 0x28, 0xf7, 0xdc,
|
||||
0xfe, 0x88, 0x89, 0xc6, 0xc8, 0x73, 0x39, 0x21, 0x90, 0x1b, 0xb0, 0xd1, 0x08, 0xad, 0x4a, 0x14,
|
||||
0x69, 0x65, 0x34, 0xf1, 0xb4, 0xd1, 0x03, 0xaa, 0xc8, 0xda, 0x5f, 0x39, 0x28, 0x68, 0x2b, 0xf2,
|
||||
0x21, 0xe4, 0x5d, 0x65, 0x89, 0x16, 0xe5, 0x9d, 0xc7, 0xf5, 0x79, 0x74, 0x29, 0xb7, 0x54, 0xeb,
|
||||
0x10, 0x1b, 0x8a, 0x97, 0x01, 0x17, 0xbe, 0x3b, 0x66, 0xe8, 0xae, 0x44, 0xe7, 0x3c, 0xa9, 0x82,
|
||||
0xe1, 0x85, 0x56, 0x16, 0x4f, 0x25, 0x45, 0x76, 0xa1, 0x18, 0x06, 0x91, 0x70, 0xc6, 0x6e, 0x68,
|
||||
0xe5, 0xb6, 0xb2, 0xd2, 0xf7, 0x7b, 0xab, 0xbe, 0xeb, 0x1d, 0xa9, 0x70, 0xe2, 0x86, 0x2d, 0x5f,
|
||||
0x44, 0x33, 0xba, 0x11, 0x6a, 0x4e, 0xdd, 0xf2, 0x9a, 0xcd, 0x78, 0xe8, 0x0e, 0x98, 0x95, 0xd7,
|
||||
0xb7, 0x24, 0x3c, 0x96, 0xe5, 0xd2, 0x8d, 0x86, 0x56, 0x01, 0x05, 0x9a, 0x21, 0x9f, 0x40, 0x49,
|
||||
0x6a, 0x38, 0x91, 0xaa, 0x9c, 0xb5, 0x81, 0x89, 0x90, 0xc5, 0x65, 0x49, 0x4d, 0xd1, 0x8d, 0xae,
|
||||
0xee, 0x53, 0xc8, 0x89, 0x59, 0xc8, 0xac, 0xa2, 0xd4, 0xad, 0xee, 0x6c, 0xae, 0x06, 0xd6, 0x93,
|
||||
0x32, 0x8a, 0x1a, 0x52, 0xd3, 0x1c, 0xf6, 0x1d, 0x95, 0xa1, 0x13, 0x4c, 0x59, 0x14, 0x79, 0x43,
|
||||
0x66, 0x95, 0xf0, 0xee, 0xea, 0xb0, 0xdf, 0x96, 0xc7, 0xa7, 0xf1, 0x29, 0xa9, 0x4b, 0x9f, 0xee,
|
||||
0x05, 0xb7, 0x00, 0x93, 0xb5, 0xd7, 0x92, 0xed, 0x49, 0xa1, 0xce, 0x14, 0xf5, 0xc8, 0xb7, 0x00,
|
||||
0x97, 0xcc, 0x1d, 0x89, 0x4b, 0x2c, 0x51, 0x19, 0xad, 0x9e, 0xac, 0x59, 0xbd, 0x40, 0x95, 0x79,
|
||||
0x91, 0x4a, 0x97, 0x09, 0x6f, 0xef, 0x41, 0x25, 0x5d, 0x3f, 0xd5, 0x66, 0x99, 0x5f, 0xdc, 0x79,
|
||||
0x45, 0xaa, 0x62, 0x4d, 0xdd, 0xd1, 0x44, 0xf7, 0x2a, 0x4f, 0x35, 0xb3, 0x67, 0xec, 0x66, 0xec,
|
||||
0x2f, 0xa1, 0x34, 0x0f, 0xe7, 0x9f, 0x0c, 0x4b, 0x69, 0xc3, 0xaf, 0xa1, 0xba, 0x1c, 0xd1, 0x5d,
|
||||
0xac, 0x6b, 0x6f, 0x0a, 0x90, 0xef, 0x62, 0xc7, 0x76, 0xa1, 0x32, 0x76, 0xb9, 0x60, 0x91, 0x73,
|
||||
0x8b, 0xe9, 0x2b, 0x6b, 0x55, 0x3d, 0xe1, 0x4b, 0xbd, 0x36, 0x6e, 0xd1, 0xeb, 0x6f, 0xa0, 0xc2,
|
||||
0x59, 0x34, 0x65, 0x43, 0x47, 0x35, 0x94, 0xcb, 0x11, 0x5d, 0xe9, 0x0f, 0x46, 0x54, 0xef, 0xa2,
|
||||
0x0e, 0x76, 0xbe, 0xcc, 0xe7, 0x34, 0x27, 0xfb, 0xf0, 0x80, 0x07, 0x93, 0x68, 0xc0, 0x1c, 0x9c,
|
||||
0x35, 0x1e, 0x0f, 0xf3, 0xbb, 0x6b, 0xf6, 0xa8, 0x84, 0x34, 0xad, 0xf0, 0x05, 0xc3, 0x55, 0x3d,
|
||||
0xd4, 0x1e, 0x72, 0x39, 0xcc, 0x59, 0x55, 0x0f, 0x64, 0xc8, 0x73, 0x78, 0x28, 0x30, 0x47, 0x67,
|
||||
0x10, 0xc8, 0x42, 0x06, 0x52, 0x5e, 0x58, 0x5d, 0x13, 0xed, 0x59, 0x97, 0xa2, 0xa9, 0xb5, 0x68,
|
||||
0x55, 0xa4, 0x59, 0x6e, 0x9f, 0x03, 0x2c, 0x42, 0x27, 0x9f, 0x43, 0x39, 0xf6, 0x8a, 0xf3, 0x9d,
|
||||
0xb9, 0x61, 0xbe, 0x41, 0xcc, 0xe9, 0x45, 0x88, 0x46, 0x2a, 0x44, 0xfb, 0xd7, 0x0c, 0x94, 0x53,
|
||||
0x69, 0x25, 0x40, 0x92, 0x99, 0x03, 0xc9, 0xd2, 0xaa, 0x1a, 0xd7, 0xad, 0x6a, 0xf6, 0xda, 0x55,
|
||||
0xcd, 0xdd, 0xa2, 0x7d, 0xef, 0x40, 0x01, 0x03, 0x4d, 0xca, 0x17, 0x73, 0xf6, 0x1f, 0x19, 0x78,
|
||||
0xb0, 0x54, 0x99, 0x7b, 0xcd, 0x9d, 0xec, 0xc0, 0xe3, 0xa1, 0xc7, 0x95, 0x96, 0xf3, 0xf3, 0x84,
|
||||
0x45, 0x33, 0x47, 0xcd, 0x84, 0x27, 0xd3, 0x54, 0xd9, 0x14, 0xe9, 0xdb, 0xb1, 0xf0, 0x47, 0x25,
|
||||
0xeb, 0x6a, 0x11, 0xf9, 0x18, 0x48, 0x7f, 0xe4, 0x0e, 0x5e, 0x8f, 0x3c, 0x39, 0xae, 0x72, 0xdc,
|
||||
0x74, 0xd8, 0x39, 0x74, 0xfb, 0x28, 0x25, 0xc1, 0x40, 0x78, 0xed, 0x4f, 0x03, 0xf1, 0x5e, 0x57,
|
||||
0xeb, 0x53, 0xd8, 0xc4, 0x02, 0x79, 0xfe, 0x85, 0x1c, 0x88, 0xd1, 0x64, 0xec, 0x23, 0xe8, 0xc4,
|
||||
0x7b, 0x45, 0x12, 0x59, 0x13, 0x45, 0x0a, 0x77, 0xc8, 0x0f, 0xeb, 0x16, 0x98, 0xb7, 0x81, 0x79,
|
||||
0x5b, 0x4b, 0x45, 0xc5, 0x3b, 0x0e, 0xf5, 0x74, 0xaf, 0xf8, 0xc2, 0x1a, 0x6c, 0xc3, 0x23, 0x1e,
|
||||
0x8e, 0x3c, 0xa1, 0x67, 0x5c, 0xba, 0x9b, 0xf8, 0x02, 0x33, 0xcd, 0xd3, 0x87, 0x28, 0xc0, 0x01,
|
||||
0x68, 0xaa, 0x63, 0xb9, 0x10, 0xc9, 0x3e, 0xbd, 0x8a, 0x82, 0x31, 0x5f, 0x07, 0xf7, 0xe4, 0xbe,
|
||||
0x78, 0xa5, 0x9e, 0x4b, 0xad, 0x64, 0xa5, 0x14, 0xcd, 0xed, 0x49, 0x32, 0xb2, 0x8a, 0xbd, 0xdf,
|
||||
0xb6, 0xa5, 0x07, 0x32, 0xbb, 0x3c, 0x90, 0xb5, 0x37, 0x19, 0x30, 0xf5, 0x7e, 0x32, 0x99, 0xd2,
|
||||
0xc0, 0x15, 0x5e, 0xe0, 0xcb, 0xdb, 0xf3, 0x7e, 0x30, 0x64, 0x0a, 0x81, 0x56, 0x00, 0x78, 0x55,
|
||||
0xb5, 0xde, 0x96, 0x7a, 0x54, 0x6b, 0xdb, 0xfb, 0x90, 0x53, 0xac, 0xc2, 0xb1, 0x38, 0xf8, 0xdb,
|
||||
0xe0, 0x98, 0x58, 0x30, 0xb5, 0xdf, 0x64, 0xf7, 0x5b, 0xfe, 0xb0, 0x13, 0x78, 0xbe, 0xb8, 0x62,
|
||||
0xb3, 0xe4, 0x43, 0xae, 0x9e, 0xd6, 0x78, 0xab, 0x90, 0x26, 0x7b, 0xa9, 0x27, 0x35, 0xbb, 0x1a,
|
||||
0x6e, 0xe2, 0xeb, 0x9a, 0x47, 0x75, 0x7f, 0xe9, 0xb5, 0xd1, 0x3d, 0x7b, 0xff, 0x0a, 0xeb, 0xff,
|
||||
0xe7, 0xbd, 0xf9, 0x6f, 0xcf, 0xc6, 0x57, 0x50, 0x4a, 0xe2, 0xe3, 0xe4, 0x23, 0xd8, 0x60, 0xd2,
|
||||
0x83, 0x37, 0x6f, 0x19, 0x59, 0xcf, 0x82, 0x26, 0x2a, 0xb5, 0x10, 0x8a, 0xdd, 0x68, 0xaa, 0xe1,
|
||||
0x4b, 0x96, 0x34, 0xb5, 0x52, 0x48, 0xdf, 0xfd, 0x35, 0x79, 0x02, 0xf1, 0x6b, 0xe4, 0xe0, 0x77,
|
||||
0x96, 0x9e, 0x31, 0xd0, 0x47, 0x4d, 0x79, 0x52, 0x3b, 0x83, 0x6a, 0x3c, 0x39, 0xaf, 0x58, 0xc4,
|
||||
0x7c, 0xb9, 0xda, 0xf7, 0x71, 0x6f, 0xed, 0xf7, 0x9c, 0xc4, 0xe2, 0x68, 0x3a, 0xc7, 0x8b, 0xef,
|
||||
0x01, 0x42, 0xf9, 0x45, 0xe8, 0xa9, 0xc9, 0x4c, 0x2a, 0xf1, 0x41, 0x6a, 0x78, 0x17, 0xaa, 0xf3,
|
||||
0x7d, 0xec, 0x24, 0xfa, 0x34, 0x65, 0x7a, 0x2d, 0xf0, 0x18, 0x77, 0x06, 0x9e, 0xec, 0xbf, 0x00,
|
||||
0x9e, 0x06, 0x94, 0x53, 0x60, 0x12, 0xcf, 0xe5, 0xd6, 0xd5, 0x79, 0xa4, 0xe0, 0x04, 0x16, 0x70,
|
||||
0x72, 0x35, 0x76, 0xe5, 0xaf, 0xc4, 0x2e, 0xfb, 0x97, 0x0c, 0x3c, 0x5a, 0x2b, 0x87, 0x42, 0xa0,
|
||||
0xd4, 0x17, 0xc2, 0xcd, 0x08, 0xb4, 0xf8, 0x34, 0x20, 0x4d, 0x30, 0xf5, 0x95, 0x51, 0xd2, 0x6a,
|
||||
0x0d, 0x46, 0xe5, 0x74, 0x0d, 0x96, 0x67, 0x41, 0x46, 0xb4, 0xc4, 0x73, 0xdb, 0xb9, 0x0f, 0x2c,
|
||||
0xbc, 0xe1, 0x19, 0xde, 0xde, 0x81, 0xea, 0x72, 0x1f, 0x48, 0x09, 0xf2, 0x67, 0xed, 0x6e, 0xab,
|
||||
0x67, 0xbe, 0x45, 0x00, 0x0a, 0x67, 0x87, 0xed, 0xde, 0x17, 0x9f, 0x99, 0x19, 0x75, 0x7c, 0x70,
|
||||
0xde, 0x6b, 0x75, 0x4d, 0x63, 0x5b, 0x96, 0x09, 0x16, 0x57, 0x91, 0x32, 0x6c, 0x9c, 0xb5, 0x8f,
|
||||
0xda, 0xa7, 0x2f, 0xdb, 0xda, 0xe4, 0xa4, 0xd1, 0xed, 0xb5, 0xa8, 0x34, 0x91, 0x02, 0xda, 0xea,
|
||||
0x1c, 0x1f, 0x36, 0x1b, 0xa6, 0xa1, 0x04, 0xf4, 0xbb, 0xd3, 0xf6, 0xf1, 0xb9, 0x99, 0x45, 0x5f,
|
||||
0x8d, 0x5e, 0xf3, 0x85, 0x26, 0xbb, 0x9d, 0x06, 0x6d, 0x99, 0x39, 0xb9, 0xf3, 0x95, 0xd6, 0x4f,
|
||||
0x9d, 0x16, 0x3d, 0x3c, 0x69, 0xb5, 0x7b, 0x8d, 0x63, 0x33, 0xaf, 0x6c, 0x0e, 0x1a, 0xcd, 0xa3,
|
||||
0xb3, 0x8e, 0x59, 0xd0, 0xce, 0xba, 0xbd, 0x53, 0xa9, 0xba, 0xa1, 0x04, 0x2f, 0x4f, 0xe9, 0x91,
|
||||
0xbc, 0xa5, 0x68, 0x1b, 0x66, 0xe6, 0xc0, 0x06, 0x6b, 0x10, 0x8c, 0xeb, 0xb3, 0x60, 0x22, 0x26,
|
||||
0x7d, 0x56, 0x9f, 0x7a, 0x82, 0x71, 0xae, 0xff, 0x40, 0xf5, 0x0b, 0xf8, 0xf3, 0xec, 0xef, 0x00,
|
||||
0x00, 0x00, 0xff, 0xff, 0xb0, 0x7e, 0xeb, 0x5d, 0x59, 0x0d, 0x00, 0x00,
|
||||
// 1162 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x57, 0xdd, 0x6e, 0xe3, 0x44,
|
||||
0x14, 0xc6, 0x4e, 0x9c, 0x26, 0x27, 0xd9, 0xac, 0x3b, 0x74, 0x91, 0x65, 0x84, 0xb6, 0xf2, 0x0d,
|
||||
0xab, 0x02, 0x01, 0x75, 0xf9, 0x29, 0x95, 0x90, 0x36, 0x0d, 0x59, 0xe8, 0xb6, 0x4d, 0xc3, 0x24,
|
||||
0xd5, 0xd2, 0x2b, 0xcb, 0x49, 0x66, 0xbb, 0xd6, 0x26, 0xb6, 0xf1, 0x38, 0x91, 0xf2, 0x0c, 0x7b,
|
||||
0xc1, 0x3d, 0x0f, 0xc1, 0x25, 0xb7, 0x3c, 0x11, 0x8f, 0x80, 0xc4, 0xcc, 0x19, 0x3b, 0x71, 0x92,
|
||||
0xb6, 0x74, 0xa1, 0x57, 0x9d, 0xe3, 0xf3, 0xff, 0xcd, 0xf9, 0xce, 0xa4, 0x50, 0x4f, 0xc2, 0x28,
|
||||
0x1c, 0x79, 0x89, 0xd7, 0x88, 0xe2, 0x30, 0x09, 0x49, 0x39, 0x93, 0x9d, 0x7d, 0x28, 0x9f, 0xb0,
|
||||
0x39, 0xf5, 0x82, 0x2b, 0x46, 0x76, 0xc0, 0xe0, 0x89, 0x17, 0x27, 0x96, 0xb6, 0xab, 0x3d, 0xa9,
|
||||
0x51, 0x25, 0x10, 0x13, 0x0a, 0x2c, 0x18, 0x59, 0x3a, 0x7e, 0x93, 0x47, 0xe7, 0x29, 0x54, 0xfb,
|
||||
0xde, 0x60, 0xcc, 0x92, 0xe6, 0xd8, 0xf7, 0x38, 0x21, 0x50, 0x1c, 0xb2, 0xf1, 0x18, 0xbd, 0x2a,
|
||||
0x14, 0xcf, 0xd2, 0x69, 0xea, 0x2b, 0xa7, 0x07, 0x54, 0x1e, 0x9d, 0xbf, 0x0b, 0x50, 0x52, 0x5e,
|
||||
0xe4, 0x13, 0x30, 0x3c, 0xe9, 0x89, 0x1e, 0xd5, 0xfd, 0x47, 0x8d, 0x45, 0x75, 0xb9, 0xb0, 0x54,
|
||||
0xd9, 0x10, 0x1b, 0xca, 0xaf, 0x43, 0x9e, 0x04, 0xde, 0x84, 0x61, 0xb8, 0x0a, 0x5d, 0xc8, 0xa4,
|
||||
0x0e, 0xba, 0x1f, 0x59, 0x05, 0xfc, 0x2a, 0x4e, 0xe4, 0x00, 0xca, 0x51, 0x18, 0x27, 0xee, 0xc4,
|
||||
0x8b, 0xac, 0xe2, 0x6e, 0x41, 0xc4, 0xfe, 0x68, 0x3d, 0x76, 0xa3, 0x2b, 0x0c, 0xce, 0xbc, 0xa8,
|
||||
0x1d, 0x24, 0xf1, 0x9c, 0x6e, 0x45, 0x4a, 0x92, 0x59, 0xde, 0xb0, 0x39, 0x8f, 0xbc, 0x21, 0xb3,
|
||||
0x0c, 0x95, 0x25, 0x93, 0x11, 0x96, 0xd7, 0x5e, 0x3c, 0xb2, 0x4a, 0xa8, 0x50, 0x02, 0xf9, 0x1c,
|
||||
0x2a, 0xc2, 0xc2, 0x8d, 0x25, 0x72, 0xd6, 0x16, 0x36, 0x42, 0x96, 0xc9, 0x32, 0x4c, 0x31, 0x8c,
|
||||
0x42, 0xf7, 0x09, 0x14, 0x93, 0x79, 0xc4, 0xac, 0xb2, 0xb0, 0xad, 0xef, 0xef, 0xac, 0x17, 0xd6,
|
||||
0x17, 0x3a, 0x8a, 0x16, 0xc2, 0xd2, 0x1c, 0x0d, 0x5c, 0xd9, 0xa1, 0x1b, 0xce, 0x58, 0x1c, 0xfb,
|
||||
0x23, 0x66, 0x55, 0x30, 0x77, 0x7d, 0x34, 0xe8, 0x88, 0xcf, 0xe7, 0xe9, 0x57, 0xd2, 0x10, 0x31,
|
||||
0xbd, 0x2b, 0x6e, 0x01, 0x36, 0x6b, 0x6f, 0x34, 0xdb, 0x17, 0x4a, 0xd5, 0x29, 0xda, 0xd9, 0x87,
|
||||
0x50, 0xcb, 0xf7, 0x2f, 0xaf, 0x49, 0xd4, 0x97, 0xde, 0x9c, 0x3c, 0xca, 0x66, 0x67, 0xde, 0x78,
|
||||
0xaa, 0xb0, 0x36, 0xa8, 0x12, 0x0e, 0xf5, 0x03, 0xcd, 0xfe, 0x06, 0x2a, 0x8b, 0x70, 0xff, 0xe6,
|
||||
0x58, 0xc9, 0x39, 0xbe, 0x28, 0x96, 0xab, 0x66, 0xcd, 0x79, 0x5b, 0x02, 0xa3, 0x87, 0xc8, 0x1d,
|
||||
0x40, 0x6d, 0xe2, 0xf1, 0x84, 0xc5, 0xee, 0x1d, 0xa6, 0xa0, 0xaa, 0x4c, 0xd5, 0xa4, 0xad, 0x60,
|
||||
0xae, 0xdf, 0x01, 0xf3, 0xef, 0xa0, 0xc6, 0x59, 0x3c, 0x63, 0x23, 0x57, 0x02, 0xcb, 0xc5, 0xa8,
|
||||
0xac, 0xe1, 0x84, 0x15, 0x35, 0x7a, 0x68, 0x83, 0x37, 0x50, 0xe5, 0x8b, 0x33, 0x27, 0xcf, 0xe0,
|
||||
0x01, 0x0f, 0xa7, 0xf1, 0x90, 0xb9, 0x78, 0xe7, 0x3c, 0x1d, 0xaa, 0x0f, 0x37, 0xfc, 0xd1, 0x08,
|
||||
0xcf, 0xb4, 0xc6, 0x97, 0x02, 0x97, 0xa8, 0x48, 0x3e, 0x70, 0x31, 0x54, 0x05, 0x89, 0x0a, 0x0a,
|
||||
0xe4, 0x39, 0x3c, 0x4c, 0xb0, 0x47, 0x77, 0x18, 0x0a, 0x38, 0x43, 0xa1, 0x2f, 0xad, 0x8f, 0xab,
|
||||
0x8a, 0xac, 0xa0, 0x68, 0x29, 0x2b, 0x5a, 0x4f, 0xf2, 0x22, 0xb7, 0x2f, 0x01, 0x96, 0xa5, 0x93,
|
||||
0xaf, 0xa0, 0x9a, 0x46, 0xc5, 0x39, 0xd3, 0x6e, 0x99, 0x33, 0x48, 0x16, 0xe7, 0x65, 0x89, 0x7a,
|
||||
0xae, 0x44, 0xfb, 0x37, 0x0d, 0xaa, 0xb9, 0xb6, 0x32, 0x42, 0x6b, 0x0b, 0x42, 0xaf, 0x50, 0x46,
|
||||
0xbf, 0x89, 0x32, 0x85, 0x1b, 0x29, 0x53, 0xbc, 0xc3, 0xf5, 0x7d, 0x00, 0x25, 0x2c, 0x34, 0x83,
|
||||
0x2f, 0x95, 0xec, 0x3f, 0x35, 0x78, 0xb0, 0x82, 0xcc, 0xbd, 0xf6, 0x4e, 0xf6, 0xe1, 0xd1, 0xc8,
|
||||
0xe7, 0xd2, 0xca, 0xfd, 0x65, 0xca, 0xe2, 0xb9, 0x2b, 0x67, 0xc2, 0x17, 0x6d, 0xca, 0x6e, 0xca,
|
||||
0xf4, 0xfd, 0x54, 0xf9, 0x93, 0xd4, 0xf5, 0x94, 0x8a, 0x7c, 0x06, 0x64, 0x30, 0xf6, 0x86, 0x6f,
|
||||
0xc6, 0xbe, 0x18, 0x57, 0x31, 0x6e, 0xaa, 0xec, 0x22, 0x86, 0xdd, 0xce, 0x69, 0xb0, 0x10, 0xee,
|
||||
0xfc, 0xa5, 0xe3, 0xde, 0x55, 0x68, 0x7d, 0x01, 0x3b, 0x08, 0x90, 0x1f, 0x5c, 0x89, 0x81, 0x18,
|
||||
0x4f, 0x27, 0x01, 0x92, 0x3f, 0x65, 0x17, 0xc9, 0x74, 0x2d, 0x54, 0x49, 0xfe, 0x93, 0x17, 0x9b,
|
||||
0x1e, 0xd8, 0xb7, 0x8e, 0x7d, 0x5b, 0x2b, 0xa0, 0x62, 0x8e, 0x63, 0x35, 0xdd, 0x6b, 0xb1, 0x10,
|
||||
0x83, 0x3d, 0xd8, 0xe6, 0xd1, 0xd8, 0x4f, 0xd4, 0x8c, 0x8b, 0x70, 0xd3, 0x20, 0xc1, 0x4e, 0x0d,
|
||||
0xfa, 0x10, 0x15, 0x38, 0x00, 0x2d, 0xf9, 0x59, 0x10, 0x22, 0xe3, 0xd3, 0xab, 0x38, 0x9c, 0xf0,
|
||||
0xcd, 0x25, 0x9b, 0xe5, 0x4b, 0x29, 0xf5, 0x5c, 0x58, 0x65, 0x94, 0x92, 0x67, 0x6e, 0x4f, 0xb3,
|
||||
0x91, 0x95, 0xe2, 0xfd, 0x5e, 0x5b, 0x7e, 0x20, 0x0b, 0xab, 0x03, 0xe9, 0xbc, 0xd5, 0xc0, 0x54,
|
||||
0xfc, 0x64, 0xa2, 0xa5, 0xa1, 0x97, 0xf8, 0x61, 0x20, 0xb2, 0x1b, 0x41, 0x38, 0x62, 0x72, 0x03,
|
||||
0xc9, 0x36, 0x1e, 0xaf, 0x91, 0x2f, 0x67, 0xda, 0xe8, 0x08, 0x3b, 0xaa, 0xac, 0xed, 0x67, 0x50,
|
||||
0x94, 0xa2, 0xdc, 0x63, 0x69, 0xf1, 0x77, 0xd9, 0x63, 0xc9, 0x52, 0x70, 0x7e, 0xd7, 0xa0, 0xdc,
|
||||
0x0e, 0x46, 0xdd, 0xd0, 0x0f, 0x92, 0x6b, 0x98, 0x25, 0x1e, 0x54, 0xf9, 0xc4, 0xa5, 0xac, 0xc2,
|
||||
0x33, 0x39, 0xcc, 0x3d, 0x6d, 0x85, 0xf5, 0x72, 0xb3, 0x58, 0xd7, 0x3f, 0x6e, 0xff, 0x67, 0xeb,
|
||||
0x8b, 0xe5, 0x5d, 0x34, 0x0d, 0xe7, 0x5b, 0xa8, 0x64, 0x39, 0x38, 0xf9, 0x14, 0xb6, 0x98, 0x88,
|
||||
0xe3, 0x2f, 0x80, 0x23, 0x9b, 0x95, 0xd0, 0xcc, 0xc4, 0x89, 0xa0, 0xdc, 0x8b, 0x67, 0x6a, 0x89,
|
||||
0x88, 0xc6, 0x72, 0x83, 0x8d, 0xe7, 0x77, 0xdf, 0xe9, 0x8f, 0x21, 0x7d, 0x13, 0x5c, 0xfc, 0xd5,
|
||||
0xa1, 0x6e, 0x1a, 0xd4, 0xa7, 0x96, 0xf8, 0xe2, 0x5c, 0x40, 0x3d, 0xbd, 0xbf, 0x57, 0x2c, 0x66,
|
||||
0x81, 0x20, 0xd8, 0x7d, 0xe4, 0x75, 0xfe, 0x28, 0x8a, 0x8d, 0x18, 0xcf, 0x16, 0xac, 0xfd, 0x01,
|
||||
0x20, 0x12, 0xbf, 0x8f, 0x7c, 0x39, 0x1f, 0x19, 0x12, 0x1f, 0xe7, 0x46, 0x68, 0x69, 0xba, 0x60,
|
||||
0x45, 0x37, 0xb3, 0xa7, 0x39, 0xd7, 0x1b, 0xe9, 0xaf, 0xbf, 0x33, 0xfd, 0x0b, 0xff, 0x81, 0xfe,
|
||||
0x4d, 0xa8, 0xe6, 0x28, 0x9d, 0x32, 0x7a, 0xf7, 0xfa, 0x3e, 0x72, 0xa4, 0x86, 0x25, 0xa9, 0xaf,
|
||||
0xdf, 0x20, 0xc6, 0xb5, 0x1b, 0xc4, 0xfe, 0x55, 0x83, 0xed, 0x0d, 0x38, 0xe4, 0x1e, 0xc8, 0xbd,
|
||||
0xd3, 0xb7, 0xef, 0x81, 0xe5, 0x03, 0x4d, 0x5a, 0x60, 0xaa, 0x94, 0x71, 0x76, 0xd5, 0x6a, 0x25,
|
||||
0x54, 0xf3, 0x18, 0xac, 0xce, 0x82, 0xa8, 0x68, 0x45, 0xe6, 0xb6, 0x7b, 0x1f, 0x1b, 0xe9, 0x96,
|
||||
0xc7, 0x70, 0x6f, 0x1f, 0xea, 0xab, 0xf7, 0x40, 0x2a, 0x60, 0x5c, 0x74, 0x7a, 0xed, 0xbe, 0xf9,
|
||||
0x1e, 0x01, 0x28, 0x5d, 0x1c, 0x77, 0xfa, 0x5f, 0x7f, 0x69, 0x6a, 0xf2, 0xf3, 0xd1, 0x65, 0xbf,
|
||||
0xdd, 0x33, 0xf5, 0x3d, 0x01, 0x13, 0x2c, 0x53, 0x91, 0x2a, 0x6c, 0x5d, 0x74, 0x4e, 0x3a, 0xe7,
|
||||
0x2f, 0x3b, 0xca, 0xe5, 0xac, 0xd9, 0xeb, 0xb7, 0xa9, 0x70, 0x11, 0x0a, 0xda, 0xee, 0x9e, 0x1e,
|
||||
0xb7, 0x9a, 0xa6, 0x2e, 0x15, 0xf4, 0xfb, 0xf3, 0xce, 0xe9, 0xa5, 0x59, 0xc0, 0x58, 0xcd, 0x7e,
|
||||
0xeb, 0x47, 0x75, 0xec, 0x75, 0x9b, 0xb4, 0x6d, 0x16, 0x05, 0xf3, 0x6b, 0xed, 0x9f, 0xbb, 0x6d,
|
||||
0x7a, 0x7c, 0xd6, 0xee, 0xf4, 0x9b, 0xa7, 0xa6, 0x21, 0x7d, 0x8e, 0x9a, 0xad, 0x93, 0x8b, 0xae,
|
||||
0x59, 0x52, 0xc1, 0x7a, 0xfd, 0x73, 0x61, 0xba, 0x25, 0x15, 0x2f, 0xcf, 0xe9, 0x89, 0xc8, 0x52,
|
||||
0xb6, 0x75, 0x53, 0x3b, 0xb2, 0xc1, 0x1a, 0x86, 0x93, 0xc6, 0x3c, 0x9c, 0x26, 0xd3, 0x01, 0x6b,
|
||||
0xcc, 0xfc, 0x84, 0x71, 0xae, 0xfe, 0x9d, 0x18, 0x94, 0xf0, 0xcf, 0xd3, 0x7f, 0x02, 0x00, 0x00,
|
||||
0xff, 0xff, 0x24, 0x31, 0x3d, 0x8a, 0x67, 0x0c, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -8,8 +8,7 @@ package schema
|
|||
// It contains a data structure that's shared between sqlparser & tabletserver
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/youtube/vitess/go/cistring"
|
||||
"github.com/youtube/vitess/go/sqltypes"
|
||||
"github.com/youtube/vitess/go/sync2"
|
||||
querypb "github.com/youtube/vitess/go/vt/proto/query"
|
||||
|
@ -25,7 +24,7 @@ const (
|
|||
|
||||
// TableColumn contains info about a table's column.
|
||||
type TableColumn struct {
|
||||
Name string
|
||||
Name cistring.CIString
|
||||
Type querypb.Type
|
||||
IsAuto bool
|
||||
Default sqltypes.Value
|
||||
|
@ -67,7 +66,7 @@ func (ta *Table) IsReadCached() bool {
|
|||
// AddColumn adds a column to the Table.
|
||||
func (ta *Table) AddColumn(name string, columnType querypb.Type, defval sqltypes.Value, extra string) {
|
||||
index := len(ta.Columns)
|
||||
ta.Columns = append(ta.Columns, TableColumn{Name: strings.ToLower(name)})
|
||||
ta.Columns = append(ta.Columns, TableColumn{Name: cistring.New(name)})
|
||||
ta.Columns[index].Type = columnType
|
||||
if extra == "auto_increment" {
|
||||
ta.Columns[index].IsAuto = true
|
||||
|
@ -84,8 +83,9 @@ func (ta *Table) AddColumn(name string, columnType querypb.Type, defval sqltypes
|
|||
// FindColumn finds a column in the table. It returns the index if found.
|
||||
// Otherwise, it returns -1.
|
||||
func (ta *Table) FindColumn(name string) int {
|
||||
ciName := cistring.New(name)
|
||||
for i, col := range ta.Columns {
|
||||
if col.Name == name {
|
||||
if col.Name.Equal(ciName) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
@ -118,25 +118,25 @@ func (ta *Table) SetMysqlStats(tr, dl, il, df sqltypes.Value) {
|
|||
|
||||
// Index contains info about a table index.
|
||||
type Index struct {
|
||||
Name string
|
||||
Name cistring.CIString
|
||||
// Columns are the columns comprising the index.
|
||||
Columns []string
|
||||
Columns []cistring.CIString
|
||||
// Cardinality[i] is the number of distinct values of Columns[i] in the
|
||||
// table.
|
||||
Cardinality []uint64
|
||||
// DataColumns are the primary-key columns for secondary indices and
|
||||
// all the columns for the primary-key index.
|
||||
DataColumns []string
|
||||
DataColumns []cistring.CIString
|
||||
}
|
||||
|
||||
// NewIndex creates a new Index.
|
||||
func NewIndex(name string) *Index {
|
||||
return &Index{name, make([]string, 0, 8), make([]uint64, 0, 8), nil}
|
||||
return &Index{Name: cistring.New(name)}
|
||||
}
|
||||
|
||||
// AddColumn adds a column to the index.
|
||||
func (idx *Index) AddColumn(name string, cardinality uint64) {
|
||||
idx.Columns = append(idx.Columns, strings.ToLower(name))
|
||||
idx.Columns = append(idx.Columns, cistring.New(name))
|
||||
if cardinality == 0 {
|
||||
cardinality = uint64(len(idx.Cardinality) + 1)
|
||||
}
|
||||
|
@ -146,8 +146,9 @@ func (idx *Index) AddColumn(name string, cardinality uint64) {
|
|||
// FindColumn finds a column in the index. It returns the index if found.
|
||||
// Otherwise, it returns -1.
|
||||
func (idx *Index) FindColumn(name string) int {
|
||||
ciName := cistring.New(name)
|
||||
for i, colName := range idx.Columns {
|
||||
if name == colName {
|
||||
if colName.Equal(ciName) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
@ -157,8 +158,9 @@ func (idx *Index) FindColumn(name string) int {
|
|||
// FindDataColumn finds a data column in the index. It returns the index if found.
|
||||
// Otherwise, it returns -1.
|
||||
func (idx *Index) FindDataColumn(name string) int {
|
||||
ciName := cistring.New(name)
|
||||
for i, colName := range idx.DataColumns {
|
||||
if name == colName {
|
||||
if colName.Equal(ciName) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,11 +24,11 @@ func GetTableName(node SimpleTableExpr) string {
|
|||
|
||||
// GetColName returns the column name, only if
|
||||
// it's a simple expression. Otherwise, it returns "".
|
||||
func GetColName(node Expr) string {
|
||||
func GetColName(node Expr) ColIdent {
|
||||
if n, ok := node.(*ColName); ok {
|
||||
return string(n.Name)
|
||||
return n.Name
|
||||
}
|
||||
return ""
|
||||
return ColIdent{}
|
||||
}
|
||||
|
||||
// IsColName returns true if the ValExpr is a *ColName.
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/youtube/vitess/go/cistring"
|
||||
"github.com/youtube/vitess/go/sqltypes"
|
||||
)
|
||||
|
||||
|
@ -376,8 +377,8 @@ func (node *Set) WalkSubtree(visit Visit) error {
|
|||
// NewName is set for AlterStr, CreateStr, RenameStr.
|
||||
type DDL struct {
|
||||
Action string
|
||||
Table SQLName
|
||||
NewName SQLName
|
||||
Table TableIdent
|
||||
NewName TableIdent
|
||||
}
|
||||
|
||||
// DDL strings.
|
||||
|
@ -476,7 +477,7 @@ func (Nextval) iSelectExpr() {}
|
|||
|
||||
// StarExpr defines a '*' or 'table.*' expression.
|
||||
type StarExpr struct {
|
||||
TableName SQLName
|
||||
TableName TableIdent
|
||||
}
|
||||
|
||||
// Format formats the node.
|
||||
|
@ -501,13 +502,13 @@ func (node *StarExpr) WalkSubtree(visit Visit) error {
|
|||
// NonStarExpr defines a non-'*' select expr.
|
||||
type NonStarExpr struct {
|
||||
Expr Expr
|
||||
As SQLName
|
||||
As ColIdent
|
||||
}
|
||||
|
||||
// Format formats the node.
|
||||
func (node *NonStarExpr) Format(buf *TrackedBuffer) {
|
||||
buf.Myprintf("%v", node.Expr)
|
||||
if node.As != "" {
|
||||
if node.As.Original() != "" {
|
||||
buf.Myprintf(" as %v", node.As)
|
||||
}
|
||||
}
|
||||
|
@ -598,7 +599,7 @@ func (*JoinTableExpr) iTableExpr() {}
|
|||
// If As is empty, no alias was used.
|
||||
type AliasedTableExpr struct {
|
||||
Expr SimpleTableExpr
|
||||
As SQLName
|
||||
As TableIdent
|
||||
Hints *IndexHints
|
||||
}
|
||||
|
||||
|
@ -641,7 +642,7 @@ func (*Subquery) iSimpleTableExpr() {}
|
|||
// It's generally not supported because vitess has its own
|
||||
// rules about which database to send a query to.
|
||||
type TableName struct {
|
||||
Name, Qualifier SQLName
|
||||
Name, Qualifier TableIdent
|
||||
}
|
||||
|
||||
// Format formats the node.
|
||||
|
@ -737,7 +738,7 @@ func (node *JoinTableExpr) WalkSubtree(visit Visit) error {
|
|||
// IndexHints represents a list of index hints.
|
||||
type IndexHints struct {
|
||||
Type string
|
||||
Indexes []SQLName
|
||||
Indexes []ColIdent
|
||||
}
|
||||
|
||||
// Index hints.
|
||||
|
@ -1187,7 +1188,7 @@ type ColName struct {
|
|||
// additional data, typically info about which
|
||||
// table or column this node references.
|
||||
Metadata interface{}
|
||||
Name SQLName
|
||||
Name ColIdent
|
||||
Qualifier *TableName
|
||||
}
|
||||
|
||||
|
@ -1365,7 +1366,7 @@ func (node *UnaryExpr) WalkSubtree(visit Visit) error {
|
|||
// IntervalExpr represents a date-time INTERVAL expression.
|
||||
type IntervalExpr struct {
|
||||
Expr Expr
|
||||
Unit SQLName
|
||||
Unit ColIdent
|
||||
}
|
||||
|
||||
// Format formats the node.
|
||||
|
@ -1398,6 +1399,9 @@ func (node *FuncExpr) Format(buf *TrackedBuffer) {
|
|||
if node.Distinct {
|
||||
distinct = "distinct "
|
||||
}
|
||||
// Function names should not be back-quoted even
|
||||
// if they match a reserved word. So, print the
|
||||
// name as is.
|
||||
buf.Myprintf("%s(%s%v)", node.Name, distinct, node.Exprs)
|
||||
}
|
||||
|
||||
|
@ -1434,7 +1438,7 @@ var Aggregates = map[string]bool{
|
|||
|
||||
// IsAggregate returns true if the function is an aggregate.
|
||||
func (node *FuncExpr) IsAggregate() bool {
|
||||
return Aggregates[string(node.Name)]
|
||||
return Aggregates[strings.ToLower(node.Name)]
|
||||
}
|
||||
|
||||
// CaseExpr represents a CASE expression.
|
||||
|
@ -1737,12 +1741,61 @@ func (node OnDup) WalkSubtree(visit Visit) error {
|
|||
return Walk(visit, UpdateExprs(node))
|
||||
}
|
||||
|
||||
// SQLName is an SQL identifier. It will be escaped with
|
||||
// ColIdent is a case insensitive SQL identifier. It will be escaped with
|
||||
// backquotes if it matches a keyword.
|
||||
type SQLName string
|
||||
type ColIdent cistring.CIString
|
||||
|
||||
// NewColIdent makes a new ColIdent.
|
||||
func NewColIdent(str string) ColIdent {
|
||||
return ColIdent(cistring.New(str))
|
||||
}
|
||||
|
||||
// Format formats the node.
|
||||
func (node SQLName) Format(buf *TrackedBuffer) {
|
||||
func (node ColIdent) Format(buf *TrackedBuffer) {
|
||||
if _, ok := keywords[node.Lowered()]; ok {
|
||||
buf.Myprintf("`%s`", node.Original())
|
||||
return
|
||||
}
|
||||
buf.Myprintf("%s", node.Original())
|
||||
}
|
||||
|
||||
// WalkSubtree walks the nodes of the subtree
|
||||
func (node ColIdent) WalkSubtree(visit Visit) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Original returns the case-preserved column name.
|
||||
func (node ColIdent) Original() string {
|
||||
return cistring.CIString(node).Original()
|
||||
}
|
||||
|
||||
func (node ColIdent) String() string {
|
||||
return cistring.CIString(node).String()
|
||||
}
|
||||
|
||||
// Lowered returns a lower-cased column name.
|
||||
// This function should generally be used only for optimizing
|
||||
// comparisons.
|
||||
func (node ColIdent) Lowered() string {
|
||||
return cistring.CIString(node).Lowered()
|
||||
}
|
||||
|
||||
// Equal performs a case-insensitive compare.
|
||||
func (node ColIdent) Equal(in ColIdent) bool {
|
||||
return cistring.CIString(node).Equal(cistring.CIString(in))
|
||||
}
|
||||
|
||||
// EqualString performs a case-insensitive compare with str.
|
||||
func (node ColIdent) EqualString(str string) bool {
|
||||
return cistring.CIString(node).EqualString(str)
|
||||
}
|
||||
|
||||
// TableIdent is a case sensitive SQL identifier. It will be escaped with
|
||||
// backquotes if it matches a keyword.
|
||||
type TableIdent string
|
||||
|
||||
// Format formats the node.
|
||||
func (node TableIdent) Format(buf *TrackedBuffer) {
|
||||
name := string(node)
|
||||
if _, ok := keywords[strings.ToLower(name)]; ok {
|
||||
buf.Myprintf("`%s`", name)
|
||||
|
@ -1752,6 +1805,6 @@ func (node SQLName) Format(buf *TrackedBuffer) {
|
|||
}
|
||||
|
||||
// WalkSubtree walks the nodes of the subtree
|
||||
func (node SQLName) WalkSubtree(visit Visit) error {
|
||||
func (node TableIdent) WalkSubtree(visit Visit) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -163,8 +163,32 @@ func TestIsAggregate(t *testing.T) {
|
|||
t.Error("IsAggregate: false, want true")
|
||||
}
|
||||
|
||||
f = FuncExpr{Name: "Avg"}
|
||||
if !f.IsAggregate() {
|
||||
t.Error("IsAggregate: false, want true")
|
||||
}
|
||||
|
||||
f = FuncExpr{Name: "foo"}
|
||||
if f.IsAggregate() {
|
||||
t.Error("IsAggregate: true, want false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestColIdent(t *testing.T) {
|
||||
str := NewColIdent("Ab")
|
||||
if str.String() != "Ab" {
|
||||
t.Errorf("String=%s, want Ab", str.Original())
|
||||
}
|
||||
if str.Original() != "Ab" {
|
||||
t.Errorf("Val=%s, want Ab", str.Original())
|
||||
}
|
||||
if str.Lowered() != "ab" {
|
||||
t.Errorf("Val=%s, want ab", str.Lowered())
|
||||
}
|
||||
if !str.Equal(NewColIdent("aB")) {
|
||||
t.Error("str.Equal(NewColIdent(aB))=false, want true")
|
||||
}
|
||||
if !str.EqualString("ab") {
|
||||
t.Error("str.EqualString(ab)=false, want true")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,8 +36,7 @@ func TestValid(t *testing.T) {
|
|||
}, {
|
||||
input: "select /* simplest */ 1 from t",
|
||||
}, {
|
||||
input: "select /* keyword col */ `By` from t",
|
||||
output: "select /* keyword col */ `by` from t",
|
||||
input: "select /* keyword col */ `By` from t",
|
||||
}, {
|
||||
input: "select /* double star **/ 1 from t",
|
||||
}, {
|
||||
|
@ -81,8 +80,7 @@ func TestValid(t *testing.T) {
|
|||
}, {
|
||||
input: "select /* column alias with as */ a as b from t",
|
||||
}, {
|
||||
input: "select /* keyword column alias */ a as `By` from t",
|
||||
output: "select /* keyword column alias */ a as `by` from t",
|
||||
input: "select /* keyword column alias */ a as `By` from t",
|
||||
}, {
|
||||
input: "select /* a.* */ a.* from t",
|
||||
}, {
|
||||
|
@ -115,8 +113,7 @@ func TestValid(t *testing.T) {
|
|||
}, {
|
||||
input: "select /* use */ 1 from t1 use index (a) where b = 1",
|
||||
}, {
|
||||
input: "select /* keyword index */ 1 from t1 use index (`By`) where b = 1",
|
||||
output: "select /* keyword index */ 1 from t1 use index (`by`) where b = 1",
|
||||
input: "select /* keyword index */ 1 from t1 use index (`By`) where b = 1",
|
||||
}, {
|
||||
input: "select /* ignore */ 1 from t1 as t2 ignore index (a), t3 use index (b) where b = 1",
|
||||
}, {
|
||||
|
@ -308,8 +305,7 @@ func TestValid(t *testing.T) {
|
|||
}, {
|
||||
input: "select /* a.b.c */ a.b.c from t",
|
||||
}, {
|
||||
input: "select /* keyword a.b */ `By`.`bY` from t",
|
||||
output: "select /* keyword a.b */ `By`.`by` from t",
|
||||
input: "select /* keyword a.b */ `By`.`bY` from t",
|
||||
}, {
|
||||
input: "select /* string */ 'a' from t",
|
||||
}, {
|
||||
|
@ -574,6 +570,10 @@ func TestCaseSensitivity(t *testing.T) {
|
|||
}, {
|
||||
input: "alter table A foo",
|
||||
output: "alter table A",
|
||||
}, {
|
||||
// View names get lower-cased.
|
||||
input: "alter view A foo",
|
||||
output: "alter table a",
|
||||
}, {
|
||||
input: "alter table A rename to B",
|
||||
output: "rename table A B",
|
||||
|
@ -588,13 +588,11 @@ func TestCaseSensitivity(t *testing.T) {
|
|||
}, {
|
||||
input: "select a from B",
|
||||
}, {
|
||||
input: "select A as B from C",
|
||||
output: "select a as b from C",
|
||||
input: "select A as B from C",
|
||||
}, {
|
||||
input: "select B.* from c",
|
||||
}, {
|
||||
input: "select B.A from c",
|
||||
output: "select B.a from c",
|
||||
input: "select B.A from c",
|
||||
}, {
|
||||
input: "select * from B as C",
|
||||
}, {
|
||||
|
@ -604,23 +602,19 @@ func TestCaseSensitivity(t *testing.T) {
|
|||
}, {
|
||||
input: "update A.B set b = 1",
|
||||
}, {
|
||||
input: "select A() from b",
|
||||
output: "select a() from b",
|
||||
input: "select A() from b",
|
||||
}, {
|
||||
input: "select A(B, C) from b",
|
||||
output: "select a(b, c) from b",
|
||||
input: "select A(B, C) from b",
|
||||
}, {
|
||||
input: "select A(distinct B, C) from b",
|
||||
output: "select a(distinct b, c) from b",
|
||||
input: "select A(distinct B, C) from b",
|
||||
}, {
|
||||
// IF is an exception. It's always lower-cased.
|
||||
input: "select IF(B, C) from b",
|
||||
output: "select if(b, c) from b",
|
||||
output: "select if(B, C) from b",
|
||||
}, {
|
||||
input: "select * from b use index (A)",
|
||||
output: "select * from b use index (a)",
|
||||
input: "select * from b use index (A)",
|
||||
}, {
|
||||
input: "insert into A(A, B) values (1, 2)",
|
||||
output: "insert into A(a, b) values (1, 2)",
|
||||
input: "insert into A(A, B) values (1, 2)",
|
||||
}, {
|
||||
input: "CREATE TABLE A",
|
||||
output: "create table A",
|
||||
|
@ -633,6 +627,14 @@ func TestCaseSensitivity(t *testing.T) {
|
|||
}, {
|
||||
input: "drop view A",
|
||||
output: "drop table a",
|
||||
}, {
|
||||
input: "select /* lock in SHARE MODE */ 1 from t lock in SHARE MODE",
|
||||
output: "select /* lock in SHARE MODE */ 1 from t lock in share mode",
|
||||
}, {
|
||||
input: "select next VALUE from t",
|
||||
output: "select next value from t",
|
||||
}, {
|
||||
input: "select /* use */ 1 from t1 use index (A) where b = 1",
|
||||
}}
|
||||
for _, tcase := range validSQL {
|
||||
if tcase.output == "" {
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -5,8 +5,6 @@
|
|||
%{
|
||||
package sqlparser
|
||||
|
||||
import "strings"
|
||||
|
||||
func setParseTree(yylex interface{}, stmt Statement) {
|
||||
yylex.(*Tokenizer).ParseTree = stmt
|
||||
}
|
||||
|
@ -66,8 +64,9 @@ func forceEOF(yylex interface{}) {
|
|||
insRows InsertRows
|
||||
updateExprs UpdateExprs
|
||||
updateExpr *UpdateExpr
|
||||
sqlID SQLName
|
||||
sqlIDs []SQLName
|
||||
colIdent ColIdent
|
||||
colIdents []ColIdent
|
||||
tableIdent TableIdent
|
||||
}
|
||||
|
||||
%token LEX_ERROR
|
||||
|
@ -126,7 +125,7 @@ func forceEOF(yylex interface{}) {
|
|||
%type <str> inner_join outer_join natural_join
|
||||
%type <tableName> table_name
|
||||
%type <indexHints> index_hint_list
|
||||
%type <sqlIDs> index_list
|
||||
%type <colIdents> index_list
|
||||
%type <boolExpr> where_expression_opt
|
||||
%type <boolExpr> boolean_expression condition
|
||||
%type <str> compare
|
||||
|
@ -157,8 +156,8 @@ func forceEOF(yylex interface{}) {
|
|||
%type <empty> for_from
|
||||
%type <str> ignore_opt
|
||||
%type <empty> exists_opt not_exists_opt non_rename_operation to_opt constraint_opt using_opt
|
||||
%type <sqlID> sql_id as_lower_opt
|
||||
%type <sqlID> table_id as_opt_id
|
||||
%type <colIdent> sql_id as_ci_opt
|
||||
%type <tableIdent> table_id as_opt_id
|
||||
%type <empty> as_opt
|
||||
%type <empty> force_eof
|
||||
|
||||
|
@ -195,7 +194,7 @@ select_statement:
|
|||
}
|
||||
| SELECT comment_opt NEXT sql_id for_from table_name
|
||||
{
|
||||
if $4 != "value" {
|
||||
if $4.Lowered() != "value" {
|
||||
yylex.Error("expecting value after next")
|
||||
return 1
|
||||
}
|
||||
|
@ -252,7 +251,7 @@ create_statement:
|
|||
}
|
||||
| CREATE VIEW sql_id force_eof
|
||||
{
|
||||
$$ = &DDL{Action: CreateStr, NewName: SQLName($3)}
|
||||
$$ = &DDL{Action: CreateStr, NewName: TableIdent($3.Lowered())}
|
||||
}
|
||||
|
||||
alter_statement:
|
||||
|
@ -267,7 +266,7 @@ alter_statement:
|
|||
}
|
||||
| ALTER VIEW sql_id force_eof
|
||||
{
|
||||
$$ = &DDL{Action: AlterStr, Table: SQLName($3), NewName: SQLName($3)}
|
||||
$$ = &DDL{Action: AlterStr, Table: TableIdent($3.Lowered()), NewName: TableIdent($3.Lowered())}
|
||||
}
|
||||
|
||||
rename_statement:
|
||||
|
@ -288,7 +287,7 @@ drop_statement:
|
|||
}
|
||||
| DROP VIEW exists_opt sql_id force_eof
|
||||
{
|
||||
$$ = &DDL{Action: DropStr, Table: SQLName($4)}
|
||||
$$ = &DDL{Action: DropStr, Table: TableIdent($4.Lowered())}
|
||||
}
|
||||
|
||||
analyze_statement:
|
||||
|
@ -377,7 +376,7 @@ select_expression:
|
|||
{
|
||||
$$ = &StarExpr{}
|
||||
}
|
||||
| expression as_lower_opt
|
||||
| expression as_ci_opt
|
||||
{
|
||||
$$ = &NonStarExpr{Expr: $1, As: $2}
|
||||
}
|
||||
|
@ -396,9 +395,9 @@ expression:
|
|||
$$ = $1
|
||||
}
|
||||
|
||||
as_lower_opt:
|
||||
as_ci_opt:
|
||||
{
|
||||
$$ = ""
|
||||
$$ = ColIdent{}
|
||||
}
|
||||
| sql_id
|
||||
{
|
||||
|
@ -560,7 +559,7 @@ index_hint_list:
|
|||
index_list:
|
||||
sql_id
|
||||
{
|
||||
$$ = []SQLName{$1}
|
||||
$$ = []ColIdent{$1}
|
||||
}
|
||||
| index_list ',' sql_id
|
||||
{
|
||||
|
@ -825,15 +824,15 @@ value_expression:
|
|||
// will be non-trivial because of grammar conflicts.
|
||||
$$ = &IntervalExpr{Expr: $2, Unit: $3}
|
||||
}
|
||||
| sql_id openb closeb
|
||||
| table_id openb closeb
|
||||
{
|
||||
$$ = &FuncExpr{Name: string($1)}
|
||||
}
|
||||
| sql_id openb select_expression_list closeb
|
||||
| table_id openb select_expression_list closeb
|
||||
{
|
||||
$$ = &FuncExpr{Name: string($1), Exprs: $3}
|
||||
}
|
||||
| sql_id openb DISTINCT select_expression_list closeb
|
||||
| table_id openb DISTINCT select_expression_list closeb
|
||||
{
|
||||
$$ = &FuncExpr{Name: string($1), Distinct: true, Exprs: $4}
|
||||
}
|
||||
|
@ -997,11 +996,11 @@ lock_opt:
|
|||
}
|
||||
| LOCK IN sql_id sql_id
|
||||
{
|
||||
if $3 != "share" {
|
||||
if $3.Lowered() != "share" {
|
||||
yylex.Error("expecting share")
|
||||
return 1
|
||||
}
|
||||
if $4 != "mode" {
|
||||
if $4.Lowered() != "mode" {
|
||||
yylex.Error("expecting mode")
|
||||
return 1
|
||||
}
|
||||
|
@ -1133,13 +1132,13 @@ using_opt:
|
|||
sql_id:
|
||||
ID
|
||||
{
|
||||
$$ = SQLName(strings.ToLower(string($1)))
|
||||
$$ = NewColIdent(string($1))
|
||||
}
|
||||
|
||||
table_id:
|
||||
ID
|
||||
{
|
||||
$$ = SQLName($1)
|
||||
$$ = TableIdent($1)
|
||||
}
|
||||
|
||||
openb:
|
||||
|
|
|
@ -94,8 +94,7 @@ type ActionAgent struct {
|
|||
|
||||
// History of the health checks, public so status
|
||||
// pages can display it
|
||||
History *history.History
|
||||
lastHealthMapCount *stats.Int
|
||||
History *history.History
|
||||
|
||||
// actionMutex is there to run only one action at a time. If
|
||||
// both agent.actionMutex and agent.mutex needs to be taken,
|
||||
|
@ -111,11 +110,32 @@ type ActionAgent struct {
|
|||
// the record.
|
||||
initialTablet *topodatapb.Tablet
|
||||
|
||||
// mutex protects the following fields, only hold the mutex
|
||||
// to update the fields, nothing else.
|
||||
mutex sync.Mutex
|
||||
_tablet *topodatapb.Tablet
|
||||
_tabletControl *topodatapb.Shard_TabletControl
|
||||
// mutex protects all the following fields (that start with '_'),
|
||||
// only hold the mutex to update the fields, nothing else.
|
||||
mutex sync.Mutex
|
||||
|
||||
// _tablet has the Tablet record we last read from the topology server.
|
||||
_tablet *topodatapb.Tablet
|
||||
|
||||
// _disallowQueryService is set to the reason we should be
|
||||
// disallowing queries from being served. It is set from changeCallback,
|
||||
// and used by healthcheck. If empty, we should allow queries.
|
||||
// It is set if the current type is not serving, if a TabletControl
|
||||
// tells us not to serve, or if filtered replication is running.
|
||||
_disallowQueryService string
|
||||
|
||||
// _enableUpdateStream is true if we should be running the
|
||||
// UpdateStream service. Note if we can't start the query
|
||||
// service, or if the server health check fails, we will
|
||||
// disable UpdateStream.
|
||||
_enableUpdateStream bool
|
||||
|
||||
// _blacklistedTables has the list of tables we are currently
|
||||
// blacklisting.
|
||||
_blacklistedTables []string
|
||||
|
||||
// set to true if mysql is not up when we start. That way, we
|
||||
// only log once that we'r waiting for mysql.
|
||||
_waitingForMysql bool
|
||||
|
||||
// if the agent is healthy, this is nil. Otherwise it contains
|
||||
|
@ -184,7 +204,6 @@ func NewActionAgent(
|
|||
DBConfigs: dbcfgs,
|
||||
SchemaOverrides: schemaOverrides,
|
||||
History: history.New(historyLength),
|
||||
lastHealthMapCount: stats.NewInt("LastHealthMapCount"),
|
||||
_healthy: fmt.Errorf("healthcheck not run yet"),
|
||||
}
|
||||
agent.registerQueryRuleSources()
|
||||
|
@ -194,11 +213,6 @@ func NewActionAgent(
|
|||
return nil, fmt.Errorf("agent.InitTablet failed: %v", err)
|
||||
}
|
||||
|
||||
// Publish and set the TargetTabletType. Not a global var
|
||||
// since it should never be changed.
|
||||
statsTabletType := stats.NewString("TargetTabletType")
|
||||
statsTabletType.Set(*targetTabletType)
|
||||
|
||||
// Create the TabletType stats
|
||||
agent.exportStats = true
|
||||
agent.statsTabletType = stats.NewString("TabletType")
|
||||
|
@ -258,7 +272,7 @@ func NewActionAgent(
|
|||
|
||||
// NewTestActionAgent creates an agent for test purposes. Only a
|
||||
// subset of features are supported now, but we'll add more over time.
|
||||
func NewTestActionAgent(batchCtx context.Context, ts topo.Server, tabletAlias *topodatapb.TabletAlias, vtPort, grpcPort int32, mysqlDaemon mysqlctl.MysqlDaemon) *ActionAgent {
|
||||
func NewTestActionAgent(batchCtx context.Context, ts topo.Server, tabletAlias *topodatapb.TabletAlias, vtPort, grpcPort int32, mysqlDaemon mysqlctl.MysqlDaemon, preStart func(*ActionAgent)) *ActionAgent {
|
||||
agent := &ActionAgent{
|
||||
QueryServiceControl: tabletservermock.NewController(),
|
||||
UpdateStream: binlog.NewUpdateStreamControlMock(),
|
||||
|
@ -271,9 +285,11 @@ func NewTestActionAgent(batchCtx context.Context, ts topo.Server, tabletAlias *t
|
|||
SchemaOverrides: nil,
|
||||
BinlogPlayerMap: nil,
|
||||
History: history.New(historyLength),
|
||||
lastHealthMapCount: new(stats.Int),
|
||||
_healthy: fmt.Errorf("healthcheck not run yet"),
|
||||
}
|
||||
if preStart != nil {
|
||||
preStart(agent)
|
||||
}
|
||||
if err := agent.Start(batchCtx, 0, vtPort, grpcPort, false); err != nil {
|
||||
panic(fmt.Errorf("agent.Start(%v) failed: %v", tabletAlias, err))
|
||||
}
|
||||
|
@ -296,7 +312,6 @@ func NewComboActionAgent(batchCtx context.Context, ts topo.Server, tabletAlias *
|
|||
SchemaOverrides: nil,
|
||||
BinlogPlayerMap: nil,
|
||||
History: history.New(historyLength),
|
||||
lastHealthMapCount: new(stats.Int),
|
||||
_healthy: fmt.Errorf("healthcheck not run yet"),
|
||||
}
|
||||
agent.registerQueryRuleSources()
|
||||
|
@ -365,33 +380,38 @@ func (agent *ActionAgent) Healthy() (time.Duration, error) {
|
|||
return agent._replicationDelay, healthy
|
||||
}
|
||||
|
||||
// BlacklistedTables reads the list of blacklisted tables from the TabletControl
|
||||
// record (if any) stored in the agent, protected by mutex.
|
||||
// BlacklistedTables returns the list of currently blacklisted tables.
|
||||
func (agent *ActionAgent) BlacklistedTables() []string {
|
||||
var blacklistedTables []string
|
||||
agent.mutex.Lock()
|
||||
if agent._tabletControl != nil {
|
||||
blacklistedTables = agent._tabletControl.BlacklistedTables
|
||||
}
|
||||
agent.mutex.Unlock()
|
||||
return blacklistedTables
|
||||
defer agent.mutex.Unlock()
|
||||
return agent._blacklistedTables
|
||||
}
|
||||
|
||||
// DisableQueryService reads the DisableQueryService field from the TabletControl
|
||||
// record (if any) stored in the agent, protected by mutex.
|
||||
func (agent *ActionAgent) DisableQueryService() bool {
|
||||
disable := false
|
||||
// DisallowQueryService returns the reason the query service should be
|
||||
// disabled, if any.
|
||||
func (agent *ActionAgent) DisallowQueryService() string {
|
||||
agent.mutex.Lock()
|
||||
if agent._tabletControl != nil {
|
||||
disable = agent._tabletControl.DisableQueryService
|
||||
}
|
||||
agent.mutex.Unlock()
|
||||
return disable
|
||||
defer agent.mutex.Unlock()
|
||||
return agent._disallowQueryService
|
||||
}
|
||||
|
||||
func (agent *ActionAgent) setTabletControl(tc *topodatapb.Shard_TabletControl) {
|
||||
// EnableUpdateStream returns if we should enable update stream or not
|
||||
func (agent *ActionAgent) EnableUpdateStream() bool {
|
||||
agent.mutex.Lock()
|
||||
agent._tabletControl = proto.Clone(tc).(*topodatapb.Shard_TabletControl)
|
||||
defer agent.mutex.Unlock()
|
||||
return agent._enableUpdateStream
|
||||
}
|
||||
|
||||
func (agent *ActionAgent) setServicesDesiredState(disallowQueryService string, enableUpdateStream bool) {
|
||||
agent.mutex.Lock()
|
||||
agent._disallowQueryService = disallowQueryService
|
||||
agent._enableUpdateStream = enableUpdateStream
|
||||
agent.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (agent *ActionAgent) setBlacklistedTables(value []string) {
|
||||
agent.mutex.Lock()
|
||||
agent._blacklistedTables = value
|
||||
agent.mutex.Unlock()
|
||||
}
|
||||
|
||||
|
|
|
@ -443,13 +443,10 @@ func agentRPCTestRefreshStatePanic(ctx context.Context, t *testing.T, client tmc
|
|||
expectRPCWrapLockActionPanic(t, err)
|
||||
}
|
||||
|
||||
var testRunHealthCheckValue = topodatapb.TabletType_RDONLY
|
||||
|
||||
func (fra *fakeRPCAgent) RunHealthCheck(ctx context.Context, targetTabletType topodatapb.TabletType) {
|
||||
func (fra *fakeRPCAgent) RunHealthCheck(ctx context.Context) {
|
||||
if fra.panics {
|
||||
panic(fmt.Errorf("test-triggered panic"))
|
||||
}
|
||||
compare(fra.t, "RunHealthCheck tabletType", targetTabletType, testRunHealthCheckValue)
|
||||
}
|
||||
|
||||
var testIgnoreHealthErrorValue = ".*"
|
||||
|
@ -463,14 +460,14 @@ func (fra *fakeRPCAgent) IgnoreHealthError(ctx context.Context, pattern string)
|
|||
}
|
||||
|
||||
func agentRPCTestRunHealthCheck(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) {
|
||||
err := client.RunHealthCheck(ctx, ti, testRunHealthCheckValue)
|
||||
err := client.RunHealthCheck(ctx, ti)
|
||||
if err != nil {
|
||||
t.Errorf("RunHealthCheck failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func agentRPCTestRunHealthCheckPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) {
|
||||
err := client.RunHealthCheck(ctx, ti, testRunHealthCheckValue)
|
||||
err := client.RunHealthCheck(ctx, ti)
|
||||
expectRPCWrapPanic(t, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ func (client *FakeTabletManagerClient) RefreshState(ctx context.Context, tablet
|
|||
}
|
||||
|
||||
// RunHealthCheck is part of the tmclient.TabletManagerClient interface.
|
||||
func (client *FakeTabletManagerClient) RunHealthCheck(ctx context.Context, tablet *topo.TabletInfo, targetTabletType topodatapb.TabletType) error {
|
||||
func (client *FakeTabletManagerClient) RunHealthCheck(ctx context.Context, tablet *topo.TabletInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -219,15 +219,13 @@ func (client *Client) RefreshState(ctx context.Context, tablet *topo.TabletInfo)
|
|||
}
|
||||
|
||||
// RunHealthCheck is part of the tmclient.TabletManagerClient interface.
|
||||
func (client *Client) RunHealthCheck(ctx context.Context, tablet *topo.TabletInfo, targetTabletType topodatapb.TabletType) error {
|
||||
func (client *Client) RunHealthCheck(ctx context.Context, tablet *topo.TabletInfo) error {
|
||||
cc, c, err := client.dial(ctx, tablet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cc.Close()
|
||||
_, err = c.RunHealthCheck(ctx, &tabletmanagerdatapb.RunHealthCheckRequest{
|
||||
TabletType: targetTabletType,
|
||||
})
|
||||
_, err = c.RunHealthCheck(ctx, &tabletmanagerdatapb.RunHealthCheckRequest{})
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -130,7 +130,7 @@ func (s *server) RunHealthCheck(ctx context.Context, request *tabletmanagerdatap
|
|||
ctx = callinfo.GRPCCallInfo(ctx)
|
||||
response := &tabletmanagerdatapb.RunHealthCheckResponse{}
|
||||
return response, s.agent.RPCWrap(ctx, actionnode.TabletActionRunHealthCheck, request, response, func() error {
|
||||
s.agent.RunHealthCheck(ctx, request.TabletType)
|
||||
s.agent.RunHealthCheck(ctx)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
|
|
@ -4,23 +4,29 @@
|
|||
|
||||
package tabletmanager
|
||||
|
||||
// This file handles the health check. It is enabled by passing a
|
||||
// target_tablet_type command line parameter. The tablet will then go
|
||||
// to the target tablet type if healthy, and to 'spare' if not.
|
||||
// This file handles the health check. It is always enabled in production
|
||||
// vttablets (but not in vtcombo, and not in unit tests by default).
|
||||
// If we are unhealthy, we'll stop the query service. In any case,
|
||||
// we report our replication delay so vtgate's discovery can use this tablet
|
||||
// or not.
|
||||
//
|
||||
// Note: we used to go to SPARE when unhealthy, and back to the target
|
||||
// tablet type when healhty. Now that we use the discovery module,
|
||||
// health is handled by clients subscribing to the health stream, so
|
||||
// we don't need to do that any more.
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/youtube/vitess/go/timer"
|
||||
"github.com/youtube/vitess/go/vt/health"
|
||||
"github.com/youtube/vitess/go/vt/servenv"
|
||||
"github.com/youtube/vitess/go/vt/topo"
|
||||
"github.com/youtube/vitess/go/vt/topo/topoproto"
|
||||
"github.com/youtube/vitess/go/vt/topotools"
|
||||
|
||||
topodatapb "github.com/youtube/vitess/go/vt/proto/topodata"
|
||||
|
@ -33,8 +39,8 @@ const (
|
|||
|
||||
var (
|
||||
healthCheckInterval = flag.Duration("health_check_interval", 20*time.Second, "Interval between health checks")
|
||||
targetTabletType = flag.String("target_tablet_type", "", "The tablet type we are thriving to be when healthy. When not healthy, we'll go to spare.")
|
||||
degradedThreshold = flag.Duration("degraded_threshold", defaultDegradedThreshold, "replication lag after which a replica is considered degraded")
|
||||
targetTabletType = flag.String("target_tablet_type", "", "DEPRECATED, use init_tablet_type now.")
|
||||
degradedThreshold = flag.Duration("degraded_threshold", defaultDegradedThreshold, "replication lag after which a replica is considered degraded (only used in status UI)")
|
||||
unhealthyThreshold = flag.Duration("unhealthy_threshold", defaultUnhealthyThreshold, "replication lag after which a replica is considered unhealthy")
|
||||
)
|
||||
|
||||
|
@ -118,23 +124,11 @@ func ConfigHTML() template.HTML {
|
|||
healthCheckInterval, degradedThreshold, unhealthyThreshold))
|
||||
}
|
||||
|
||||
// IsRunningHealthCheck indicates if the agent is configured to run healthchecks.
|
||||
func (agent *ActionAgent) IsRunningHealthCheck() bool {
|
||||
return *targetTabletType != ""
|
||||
}
|
||||
|
||||
// initHealthCheck will start the health check background go routine,
|
||||
// and configure the healthcheck shutdown. It is only run by NewActionAgent
|
||||
// for real vttablet agents (not by tests, nor vtcombo).
|
||||
func (agent *ActionAgent) initHealthCheck() {
|
||||
if !agent.IsRunningHealthCheck() {
|
||||
log.Infof("No target_tablet_type specified, disabling any health check")
|
||||
return
|
||||
}
|
||||
|
||||
tt, err := topoproto.ParseTabletType(*targetTabletType)
|
||||
if err != nil {
|
||||
log.Fatalf("Invalid target tablet type %v: %v", *targetTabletType, err)
|
||||
}
|
||||
|
||||
log.Infof("Starting periodic health check every %v with target_tablet_type=%v", *healthCheckInterval, *targetTabletType)
|
||||
log.Infof("Starting periodic health check every %v", *healthCheckInterval)
|
||||
t := timer.NewTimer(*healthCheckInterval)
|
||||
servenv.OnTermSync(func() {
|
||||
// When we enter lameduck mode, we want to not call
|
||||
|
@ -144,74 +138,70 @@ func (agent *ActionAgent) initHealthCheck() {
|
|||
t.Stop()
|
||||
|
||||
// Now we can finish up and force ourselves to not healthy.
|
||||
agent.terminateHealthChecks(tt)
|
||||
agent.terminateHealthChecks()
|
||||
})
|
||||
t.Start(func() {
|
||||
agent.runHealthCheck(tt)
|
||||
agent.runHealthCheck()
|
||||
})
|
||||
t.Trigger()
|
||||
}
|
||||
|
||||
// runHealthCheck takes the action mutex, runs the health check,
|
||||
// and if we need to change our state, do it.
|
||||
// If we are the master, we don't change our type, healthy or not.
|
||||
// If we are not the master, we change to spare if not healthy,
|
||||
// or to the passed in targetTabletType if healthy.
|
||||
//
|
||||
// Note we only update the topo record if we need to, that is if our type or
|
||||
// health details changed.
|
||||
// and if we need to change our state, do it. We never change our type,
|
||||
// just the health we report (so we do not change the topo server at all).
|
||||
// We do not interact with topo server, we use cached values for everything.
|
||||
//
|
||||
// This will not change the BinlogPlayerMap, but if it is not empty,
|
||||
// we will think we should not be running the query service.
|
||||
//
|
||||
// This will not change the TabletControl record, but will use it
|
||||
// to see if we should be running the query service.
|
||||
func (agent *ActionAgent) runHealthCheck(targetTabletType topodatapb.TabletType) {
|
||||
func (agent *ActionAgent) runHealthCheck() {
|
||||
agent.actionMutex.Lock()
|
||||
defer agent.actionMutex.Unlock()
|
||||
|
||||
agent.runHealthCheckProtected()
|
||||
}
|
||||
|
||||
func (agent *ActionAgent) runHealthCheckProtected() {
|
||||
// read the current tablet record and tablet control
|
||||
agent.mutex.Lock()
|
||||
tablet := proto.Clone(agent._tablet).(*topodatapb.Tablet)
|
||||
tabletControl := proto.Clone(agent._tabletControl).(*topodatapb.Shard_TabletControl)
|
||||
shouldBeServing := agent._disallowQueryService == ""
|
||||
runUpdateStream := agent._enableUpdateStream
|
||||
ignoreErrorExpr := agent._ignoreHealthErrorExpr
|
||||
agent.mutex.Unlock()
|
||||
|
||||
// figure out if we should be running the query service
|
||||
shouldBeServing := false
|
||||
if topo.IsRunningQueryService(targetTabletType) && !agent.BinlogPlayerMap.isRunningFilteredReplication() {
|
||||
shouldBeServing = true
|
||||
if tabletControl != nil {
|
||||
if tabletControl.DisableQueryService {
|
||||
shouldBeServing = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// run the health check
|
||||
record := &HealthRecord{}
|
||||
isSlaveType := true
|
||||
if tablet.Type == topodatapb.TabletType_MASTER {
|
||||
isSlaveType = false
|
||||
}
|
||||
// Remember the health error as healthErr to be sure we don't accidentally
|
||||
// overwrite it with some other err.
|
||||
|
||||
// Remember the health error as healthErr to be sure we don't
|
||||
// accidentally overwrite it with some other err.
|
||||
replicationDelay, healthErr := agent.HealthReporter.Report(isSlaveType, shouldBeServing)
|
||||
if healthErr != nil && ignoreErrorExpr != nil &&
|
||||
ignoreErrorExpr.MatchString(healthErr.Error()) {
|
||||
// we need to ignore this health error
|
||||
record.IgnoredError = healthErr
|
||||
record.IgnoreErrorExpr = ignoreErrorExpr.String()
|
||||
healthErr = nil
|
||||
}
|
||||
health := make(map[string]string)
|
||||
if healthErr == health.ErrSlaveNotRunning {
|
||||
// The slave is not running, so we just don't know the
|
||||
// delay. Use a maximum delay, so we can let vtgate
|
||||
// find the right replica, instead of erroring out.
|
||||
// (this works as the check below is a strict > operator).
|
||||
replicationDelay = *unhealthyThreshold
|
||||
healthErr = nil
|
||||
}
|
||||
if healthErr == nil {
|
||||
if replicationDelay > *unhealthyThreshold {
|
||||
healthErr = fmt.Errorf("reported replication lag: %v higher than unhealthy threshold: %v", replicationDelay.Seconds(), unhealthyThreshold.Seconds())
|
||||
} else if replicationDelay > *degradedThreshold {
|
||||
health[topo.ReplicationLag] = topo.ReplicationLagHigh
|
||||
}
|
||||
}
|
||||
agent.lastHealthMapCount.Set(int64(len(health)))
|
||||
|
||||
// Figure out if we should be running QueryService, see if we are,
|
||||
// and reconcile.
|
||||
|
@ -226,34 +216,54 @@ func (agent *ActionAgent) runHealthCheck(targetTabletType topodatapb.TabletType)
|
|||
isServing := agent.QueryServiceControl.IsServing()
|
||||
if shouldBeServing {
|
||||
if !isServing {
|
||||
// It might be that we're ready to serve, but we just need to start
|
||||
// queryservice. Send the type we want to be, not the type we are.
|
||||
desiredType := tablet.Type
|
||||
if desiredType == topodatapb.TabletType_SPARE {
|
||||
desiredType = targetTabletType
|
||||
}
|
||||
|
||||
// If starting queryservice fails, that's our new reason for being unhealthy.
|
||||
// If starting queryservice fails, that's our
|
||||
// new reason for being unhealthy.
|
||||
//
|
||||
// We don't care if the QueryService state actually changed because we'll
|
||||
// broadcast the latest health status after this immediately anway.
|
||||
_ /* state changed */, healthErr = agent.allowQueries(desiredType)
|
||||
// We don't care if the QueryService state actually
|
||||
// changed because we'll broadcast the latest health
|
||||
// status after this immediately anway.
|
||||
_ /* state changed */, healthErr = agent.QueryServiceControl.SetServingType(tablet.Type, true, nil)
|
||||
|
||||
if healthErr == nil {
|
||||
// we were unhealthy, are now healthy,
|
||||
// make sure we have the right mysql port.
|
||||
if updatedTablet := agent.checkTabletMysqlPort(agent.batchCtx, tablet); updatedTablet != nil {
|
||||
agent.setTablet(updatedTablet)
|
||||
tablet = updatedTablet
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if isServing {
|
||||
// We are not healthy or should not be running the query service.
|
||||
//
|
||||
// We don't care if the QueryService state actually changed because we'll
|
||||
// broadcast the latest health status after this immediately anway.
|
||||
_ /* state changed */, err := agent.disallowQueries(tablet.Type,
|
||||
fmt.Sprintf("health-check failure(%v)", healthErr),
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("disallowQueries failed: %v", err)
|
||||
// We are not healthy or should not be running
|
||||
// the query service.
|
||||
|
||||
// First enter lameduck during gracePeriod to
|
||||
// limit client errors.
|
||||
if topo.IsSubjectToLameduck(tablet.Type) && *gracePeriod > 0 {
|
||||
agent.lameduck("health check failed")
|
||||
}
|
||||
|
||||
// We don't care if the QueryService state actually
|
||||
// changed because we'll broadcast the latest health
|
||||
// status after this immediately anway.
|
||||
log.Infof("Disabling query service because of health-check failure: %v", healthErr)
|
||||
if _ /* state changed */, err := agent.QueryServiceControl.SetServingType(tablet.Type, false, nil); err != nil {
|
||||
log.Errorf("SetServingType(serving=false) failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// change UpdateStream state if necessary
|
||||
if healthErr != nil {
|
||||
runUpdateStream = false
|
||||
}
|
||||
if topo.IsRunningUpdateStream(tablet.Type) && runUpdateStream {
|
||||
agent.UpdateStream.Enable()
|
||||
} else {
|
||||
agent.UpdateStream.Disable()
|
||||
}
|
||||
|
||||
// save the health record
|
||||
record.Time = time.Now()
|
||||
record.Error = healthErr
|
||||
|
@ -304,134 +314,44 @@ func (agent *ActionAgent) runHealthCheck(targetTabletType topodatapb.TabletType)
|
|||
|
||||
// send it to our observers
|
||||
agent.broadcastHealth()
|
||||
|
||||
// Update our topo.Server state, start with no change
|
||||
newTabletType := tablet.Type
|
||||
if healthErr != nil {
|
||||
// The tablet is not healthy, let's see what we need to do
|
||||
if tablet.Type != targetTabletType {
|
||||
if tablet.Type != topodatapb.TabletType_SPARE {
|
||||
// we only log if we're not in spare,
|
||||
// as the spare state is normal for a
|
||||
// failed health check.
|
||||
log.Infof("Tablet not healthy and in state %v, not changing it: %v", tablet.Type, healthErr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Note that if the query service is running, we may
|
||||
// need to stop it. The post-action callback will do
|
||||
// it, and it will be done after we change our state,
|
||||
// so it's the right order, let it do it.
|
||||
log.Infof("Tablet not healthy, converting it from %v to spare: %v", targetTabletType, healthErr)
|
||||
newTabletType = topodatapb.TabletType_SPARE
|
||||
} else {
|
||||
// We are healthy, maybe with health, see if we need
|
||||
// to update the record. We only change from spare to
|
||||
// our target type.
|
||||
if tablet.Type == topodatapb.TabletType_SPARE {
|
||||
newTabletType = targetTabletType
|
||||
}
|
||||
if tablet.Type == newTabletType && topo.IsHealthEqual(health, tablet.HealthMap) {
|
||||
// no change in health, not logging anything,
|
||||
// and we're done
|
||||
return
|
||||
}
|
||||
|
||||
// we need to update our state
|
||||
log.Infof("Updating tablet record as healthy type %v -> %v with health details %v -> %v", tablet.Type, newTabletType, tablet.HealthMap, health)
|
||||
}
|
||||
|
||||
// Change the Type, update the health. Note we pass in a map
|
||||
// that's not nil, meaning if it's empty, we will clear it.
|
||||
tablet, err := topotools.ChangeOwnType(agent.batchCtx, agent.TopoServer, agent.initialTablet, newTabletType, health)
|
||||
if err != nil {
|
||||
log.Infof("Error updating tablet record: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Rebuild the serving graph in our cell, only if we're dealing with
|
||||
// a serving type
|
||||
if err := agent.updateServingGraph(tablet, targetTabletType); err != nil {
|
||||
log.Warningf("updateServingGraph failed (will still run post action callbacks, serving graph might be out of date): %v", err)
|
||||
}
|
||||
|
||||
// Run the post action callbacks.
|
||||
// Note that this is where we might block for *gracePeriod, depending on the
|
||||
// type of state change. See changeCallback() for details.
|
||||
if err := agent.refreshTablet(agent.batchCtx, "healthcheck"); err != nil {
|
||||
log.Warningf("refreshTablet failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// terminateHealthChecks is called when we enter lame duck mode.
|
||||
// We will clean up our state, and set query service to lame duck mode.
|
||||
// We only do something if we are in targetTabletType state, and then
|
||||
// we just go to spare.
|
||||
func (agent *ActionAgent) terminateHealthChecks(targetTabletType topodatapb.TabletType) {
|
||||
// We only do something if we are in a serving state, and not a master.
|
||||
func (agent *ActionAgent) terminateHealthChecks() {
|
||||
agent.actionMutex.Lock()
|
||||
defer agent.actionMutex.Unlock()
|
||||
log.Info("agent.terminateHealthChecks is starting")
|
||||
|
||||
// read the current tablet record
|
||||
tablet := agent.Tablet()
|
||||
|
||||
if tablet.Type != targetTabletType {
|
||||
// If we're MASTER, SPARE, WORKER, etc. then the healthcheck shouldn't
|
||||
// touch it. We also skip gracePeriod in that case.
|
||||
log.Infof("Tablet in state %v, not changing it", tablet.Type)
|
||||
if !topo.IsSubjectToLameduck(tablet.Type) {
|
||||
// If we're MASTER, SPARE, WORKER, etc. then we
|
||||
// shouldn't enter lameduck. We do lameduck to not
|
||||
// trigger errors on clients.
|
||||
log.Infof("Tablet in state %v, not entering lameduck", tablet.Type)
|
||||
return
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Go lameduck for gracePeriod.
|
||||
// We've already checked above that we're not MASTER.
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
// Enter new lameduck mode for gracePeriod, then shut down queryservice.
|
||||
// New lameduck mode means keep accepting queries, but advertise unhealthy.
|
||||
// After we return from this synchronous OnTermSync hook, servenv may decide
|
||||
// to wait even longer, for the rest of the time specified by its own
|
||||
// "-lameduck-period" flag. During that extra period, queryservice will be
|
||||
// in old lameduck mode, meaning stay alive but reject new queries.
|
||||
agent.enterLameduck("terminating healthchecks")
|
||||
agent.broadcastHealth()
|
||||
time.Sleep(*gracePeriod)
|
||||
agent.disallowQueries(tablet.Type, "terminating healthchecks")
|
||||
}()
|
||||
// Enter new lameduck mode for gracePeriod, then shut down
|
||||
// queryservice. New lameduck mode means keep accepting
|
||||
// queries, but advertise unhealthy. After we return from
|
||||
// this synchronous OnTermSync hook, servenv may decide to
|
||||
// wait even longer, for the rest of the time specified by its
|
||||
// own "-lameduck-period" flag. During that extra period,
|
||||
// queryservice will be in old lameduck mode, meaning stay
|
||||
// alive but reject new queries.
|
||||
agent.lameduck("terminating healthchecks")
|
||||
|
||||
// Change Type to spare and clear HealthMap.
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
// We don't wait until after the lameduck period, because we want to make
|
||||
// sure this gets done before servenv onTermTimeout.
|
||||
tablet, err := topotools.ChangeOwnType(agent.batchCtx, agent.TopoServer, agent.initialTablet, topodatapb.TabletType_SPARE, topotools.ClearHealthMap)
|
||||
if err != nil {
|
||||
log.Infof("Error updating tablet record: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Update the serving graph in our cell, only if we're dealing with
|
||||
// a serving type
|
||||
if err := agent.updateServingGraph(tablet, targetTabletType); err != nil {
|
||||
log.Warningf("updateServingGraph failed (will still run post action callbacks, serving graph might be out of date): %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// updateServingGraph will update the serving graph if we need to.
|
||||
func (agent *ActionAgent) updateServingGraph(tablet *topodatapb.Tablet, targetTabletType topodatapb.TabletType) error {
|
||||
if topo.IsInServingGraph(targetTabletType) {
|
||||
if err := topotools.UpdateTabletEndpoints(agent.batchCtx, agent.TopoServer, tablet); err != nil {
|
||||
return fmt.Errorf("UpdateTabletEndpoints failed: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
// Note we only do this now if we entered lameduck. In the
|
||||
// master case for instance, we want to keep serving until
|
||||
// vttablet dies entirely (where else is the client going to
|
||||
// go?). After servenv lameduck, the queryservice is stopped
|
||||
// from a servenv.OnClose() hook anyway.
|
||||
log.Infof("Disabling query service after lameduck in terminating healthchecks")
|
||||
agent.QueryServiceControl.SetServingType(tablet.Type, false, nil)
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
|
||||
"github.com/youtube/vitess/go/sqltypes"
|
||||
"github.com/youtube/vitess/go/vt/binlog/binlogplayer"
|
||||
"github.com/youtube/vitess/go/vt/health"
|
||||
"github.com/youtube/vitess/go/vt/mysqlctl"
|
||||
"github.com/youtube/vitess/go/vt/tabletmanager/actionnode"
|
||||
"github.com/youtube/vitess/go/vt/tabletserver"
|
||||
|
@ -113,7 +114,7 @@ func (fhc *fakeHealthCheck) HTMLName() template.HTML {
|
|||
return template.HTML("fakeHealthCheck")
|
||||
}
|
||||
|
||||
func createTestAgent(ctx context.Context, t *testing.T) (*ActionAgent, chan<- *binlogplayer.VtClientMock) {
|
||||
func createTestAgent(ctx context.Context, t *testing.T, preStart func(*ActionAgent)) (*ActionAgent, chan<- *binlogplayer.VtClientMock) {
|
||||
ts := zktestserver.New(t, []string{"cell1"})
|
||||
|
||||
if err := ts.CreateKeyspace(ctx, "test_keyspace", &topodatapb.Keyspace{}); err != nil {
|
||||
|
@ -134,14 +135,14 @@ func createTestAgent(ctx context.Context, t *testing.T) (*ActionAgent, chan<- *b
|
|||
Ip: "1.0.0.1",
|
||||
Keyspace: "test_keyspace",
|
||||
Shard: "0",
|
||||
Type: topodatapb.TabletType_SPARE,
|
||||
Type: topodatapb.TabletType_REPLICA,
|
||||
}
|
||||
if err := ts.CreateTablet(ctx, tablet); err != nil {
|
||||
t.Fatalf("CreateTablet failed: %v", err)
|
||||
}
|
||||
|
||||
mysqlDaemon := &mysqlctl.FakeMysqlDaemon{MysqlPort: 3306}
|
||||
agent := NewTestActionAgent(ctx, ts, tabletAlias, port, 0, mysqlDaemon)
|
||||
agent := NewTestActionAgent(ctx, ts, tabletAlias, port, 0, mysqlDaemon, preStart)
|
||||
|
||||
vtClientMocksChannel := make(chan *binlogplayer.VtClientMock, 1)
|
||||
agent.BinlogPlayerMap = NewBinlogPlayerMap(ts, mysqlDaemon, func() binlogplayer.VtClient {
|
||||
|
@ -156,29 +157,41 @@ func createTestAgent(ctx context.Context, t *testing.T) (*ActionAgent, chan<- *b
|
|||
// TestHealthCheckControlsQueryService verifies that a tablet going healthy
|
||||
// starts the query service, and going unhealthy stops it.
|
||||
func TestHealthCheckControlsQueryService(t *testing.T) {
|
||||
// we need an actual grace period set, so lameduck is enabled
|
||||
*gracePeriod = 10 * time.Millisecond
|
||||
defer func() {
|
||||
*gracePeriod = 0
|
||||
}()
|
||||
|
||||
ctx := context.Background()
|
||||
agent, _ := createTestAgent(ctx, t)
|
||||
targetTabletType := topodatapb.TabletType_REPLICA
|
||||
agent, _ := createTestAgent(ctx, t, nil)
|
||||
|
||||
// Consume the first health broadcast triggered by ActionAgent.Start():
|
||||
// (SPARE, SERVING) goes to (SPARE, NOT_SERVING).
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 0); err != nil {
|
||||
/// Consume the first health broadcast triggered by ActionAgent.Start():
|
||||
// (REPLICA, NOT_SERVING) goes to (REPLICA, SERVING). And we
|
||||
// should be serving.
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, true, "healthcheck not run yet", 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := expectStateChange(agent.QueryServiceControl, false, topodatapb.TabletType_SPARE); err != nil {
|
||||
if err := expectStateChange(agent.QueryServiceControl, true, topodatapb.TabletType_REPLICA); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !agent.QueryServiceControl.IsServing() {
|
||||
t.Errorf("Query service should be running")
|
||||
}
|
||||
if !agent.UpdateStream.IsEnabled() {
|
||||
t.Errorf("UpdateStream should be running")
|
||||
}
|
||||
|
||||
// first health check, should change us to replica, and update the
|
||||
// mysql port to 3306
|
||||
// first health check, should keep us as replica and serving,
|
||||
// and update the mysql port to 3306
|
||||
before := time.Now()
|
||||
agent.HealthReporter.(*fakeHealthCheck).reportReplicationDelay = 12 * time.Second
|
||||
agent.runHealthCheck(targetTabletType)
|
||||
agent.runHealthCheck()
|
||||
ti, err := agent.TopoServer.GetTablet(ctx, tabletAlias)
|
||||
if err != nil {
|
||||
t.Fatalf("GetTablet failed: %v", err)
|
||||
}
|
||||
if ti.Type != targetTabletType {
|
||||
if ti.Type != topodatapb.TabletType_REPLICA {
|
||||
t.Errorf("First health check failed to go to replica: %v", ti.Type)
|
||||
}
|
||||
if ti.PortMap["mysql"] != 3306 {
|
||||
|
@ -196,10 +209,7 @@ func TestHealthCheckControlsQueryService(t *testing.T) {
|
|||
if agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType != topodatapb.TabletType_REPLICA {
|
||||
t.Errorf("invalid tabletserver target: %v", agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType)
|
||||
}
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 12); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := expectStateChange(agent.QueryServiceControl, true, topodatapb.TabletType_REPLICA); err != nil {
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, true, "", 12); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -207,13 +217,13 @@ func TestHealthCheckControlsQueryService(t *testing.T) {
|
|||
agent.HealthReporter.(*fakeHealthCheck).reportReplicationDelay = 13 * time.Second
|
||||
agent.HealthReporter.(*fakeHealthCheck).reportError = fmt.Errorf("tablet is unhealthy")
|
||||
before = time.Now()
|
||||
agent.runHealthCheck(targetTabletType)
|
||||
agent.runHealthCheck()
|
||||
ti, err = agent.TopoServer.GetTablet(ctx, tabletAlias)
|
||||
if err != nil {
|
||||
t.Fatalf("GetTablet failed: %v", err)
|
||||
}
|
||||
if ti.Type != topodatapb.TabletType_SPARE {
|
||||
t.Errorf("Unhappy health check failed to go to spare: %v", ti.Type)
|
||||
if ti.Type != topodatapb.TabletType_REPLICA {
|
||||
t.Errorf("Unhappy health check failed to stay as replica: %v", ti.Type)
|
||||
}
|
||||
if agent.QueryServiceControl.IsServing() {
|
||||
t.Errorf("Query service should not be running")
|
||||
|
@ -224,36 +234,82 @@ func TestHealthCheckControlsQueryService(t *testing.T) {
|
|||
if agent._healthyTime.Sub(before) < 0 {
|
||||
t.Errorf("runHealthCheck did not update agent._healthyTime")
|
||||
}
|
||||
want := topodatapb.TabletType_SPARE
|
||||
if got := agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType; got != want {
|
||||
t.Errorf("invalid tabletserver target: got = %v, want = %v", got, want)
|
||||
if got := agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType; got != topodatapb.TabletType_REPLICA {
|
||||
t.Errorf("invalid tabletserver target: got = %v, want = %v", got, topodatapb.TabletType_REPLICA)
|
||||
}
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 13); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// QueryService disabled since we are unhealthy now.
|
||||
if err := expectStateChange(agent.QueryServiceControl, false, topodatapb.TabletType_REPLICA); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Consume second health broadcast (runHealthCheck() called refreshTablet()
|
||||
// which broadcasts since we go from REPLICA to SPARE and into lameduck.)
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 13); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// NOTE: No state change here because the type during lameduck is still
|
||||
// REPLICA and the QueryService is already set to NOT_SERVING.
|
||||
//
|
||||
// Consume third health broadcast (runHealthCheck() called refreshTablet()
|
||||
// which broadcasts that the QueryService state changed from REPLICA to SPARE
|
||||
// (NOT_SERVING was already set before when we went into lameduck).)
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 13); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// After the lameduck grace period, the type changed from REPLICA to SPARE.
|
||||
if err := expectStateChange(agent.QueryServiceControl, false, topodatapb.TabletType_SPARE); err != nil {
|
||||
|
||||
// first we get the lameduck broadcast, with no error and old
|
||||
// replication delay
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, false, "", 12); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// then query service is disabled since we are unhealthy now.
|
||||
if err := expectStateChange(agent.QueryServiceControl, false, topodatapb.TabletType_REPLICA); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// and the associated broadcast
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, false, "tablet is unhealthy", 13); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// and nothing more.
|
||||
if err := expectBroadcastDataEmpty(agent.QueryServiceControl); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := expectStateChangesEmpty(agent.QueryServiceControl); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestErrSlaveNotRunningIsHealthy verifies that a tablet whose
|
||||
// healthcheck reports health.ErrSlaveNotRunning is still considered
|
||||
// healthy with high replication lag.
|
||||
func TestErrSlaveNotRunningIsHealthy(t *testing.T) {
|
||||
*unhealthyThreshold = 10 * time.Minute
|
||||
ctx := context.Background()
|
||||
agent, _ := createTestAgent(ctx, t, nil)
|
||||
|
||||
/// Consume the first health broadcast triggered by ActionAgent.Start():
|
||||
// (REPLICA, NOT_SERVING) goes to (REPLICA, SERVING). And we
|
||||
// should be serving.
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, true, "healthcheck not run yet", 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := expectStateChange(agent.QueryServiceControl, true, topodatapb.TabletType_REPLICA); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !agent.QueryServiceControl.IsServing() {
|
||||
t.Errorf("Query service should be running")
|
||||
}
|
||||
if !agent.UpdateStream.IsEnabled() {
|
||||
t.Errorf("UpdateStream should be running")
|
||||
}
|
||||
|
||||
// health check returning health.ErrSlaveNotRunning, should
|
||||
// keep us as replica and serving
|
||||
before := time.Now()
|
||||
agent.HealthReporter.(*fakeHealthCheck).reportReplicationDelay = 12 * time.Second
|
||||
agent.HealthReporter.(*fakeHealthCheck).reportError = health.ErrSlaveNotRunning
|
||||
agent.runHealthCheck()
|
||||
if !agent.QueryServiceControl.IsServing() {
|
||||
t.Errorf("Query service should be running")
|
||||
}
|
||||
if !agent.UpdateStream.IsEnabled() {
|
||||
t.Errorf("UpdateStream should be running")
|
||||
}
|
||||
if agent._healthyTime.Sub(before) < 0 {
|
||||
t.Errorf("runHealthCheck did not update agent._healthyTime")
|
||||
}
|
||||
if agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType != topodatapb.TabletType_REPLICA {
|
||||
t.Errorf("invalid tabletserver target: %v", agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType)
|
||||
}
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, true, "", 10*60); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// and nothing more.
|
||||
if err := expectBroadcastDataEmpty(agent.QueryServiceControl); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -263,30 +319,35 @@ func TestHealthCheckControlsQueryService(t *testing.T) {
|
|||
}
|
||||
|
||||
// TestQueryServiceNotStarting verifies that if a tablet cannot start the
|
||||
// query service, it should not go healthy
|
||||
// query service, it should not go healthy.
|
||||
func TestQueryServiceNotStarting(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
agent, _ := createTestAgent(ctx, t)
|
||||
targetTabletType := topodatapb.TabletType_REPLICA
|
||||
agent.QueryServiceControl.(*tabletservermock.Controller).SetServingTypeError = fmt.Errorf("test cannot start query service")
|
||||
agent, _ := createTestAgent(ctx, t, func(a *ActionAgent) {
|
||||
// The SetServingType that will fail is part of Start()
|
||||
// so we have to do this here.
|
||||
a.QueryServiceControl.(*tabletservermock.Controller).SetServingTypeError = fmt.Errorf("test cannot start query service")
|
||||
})
|
||||
|
||||
// Consume the first health broadcast triggered by ActionAgent.Start():
|
||||
// (SPARE, SERVING) goes to (SPARE, NOT_SERVING).
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
// we should not be serving.
|
||||
if agent.QueryServiceControl.IsServing() {
|
||||
t.Errorf("Query service should not be running")
|
||||
}
|
||||
if err := expectStateChange(agent.QueryServiceControl, false, topodatapb.TabletType_SPARE); err != nil {
|
||||
t.Fatal(err)
|
||||
if agent.UpdateStream.IsEnabled() {
|
||||
t.Errorf("UpdateStream should not be running")
|
||||
}
|
||||
|
||||
// There is no broadcast data to consume, we're just not
|
||||
// healthy from startup
|
||||
|
||||
// Now we can run another health check, it will stay unhealthy forever.
|
||||
before := time.Now()
|
||||
agent.runHealthCheck(targetTabletType)
|
||||
agent.runHealthCheck()
|
||||
ti, err := agent.TopoServer.GetTablet(ctx, tabletAlias)
|
||||
if err != nil {
|
||||
t.Fatalf("GetTablet failed: %v", err)
|
||||
}
|
||||
if ti.Type != topodatapb.TabletType_SPARE {
|
||||
t.Errorf("Happy health check which cannot start query service should stay spare: %v", ti.Type)
|
||||
if ti.Type != topodatapb.TabletType_REPLICA {
|
||||
t.Errorf("Happy health check which cannot start query service should stay replica: %v", ti.Type)
|
||||
}
|
||||
if agent.QueryServiceControl.IsServing() {
|
||||
t.Errorf("Query service should not be running")
|
||||
|
@ -301,7 +362,7 @@ func TestQueryServiceNotStarting(t *testing.T) {
|
|||
if bd.RealtimeStats.HealthError != "test cannot start query service" {
|
||||
t.Errorf("unexpected HealthError: %v", *bd)
|
||||
}
|
||||
if agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType != topodatapb.TabletType_SPARE {
|
||||
if agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType != topodatapb.TabletType_REPLICA {
|
||||
t.Errorf("invalid tabletserver target: %v", agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType)
|
||||
}
|
||||
|
||||
|
@ -317,28 +378,34 @@ func TestQueryServiceNotStarting(t *testing.T) {
|
|||
// service is shut down, the tablet goes unhealthy
|
||||
func TestQueryServiceStopped(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
agent, _ := createTestAgent(ctx, t)
|
||||
targetTabletType := topodatapb.TabletType_REPLICA
|
||||
agent, _ := createTestAgent(ctx, t, nil)
|
||||
|
||||
// Consume the first health broadcast triggered by ActionAgent.Start():
|
||||
// (SPARE, SERVING) goes to (SPARE, NOT_SERVING).
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 0); err != nil {
|
||||
/// Consume the first health broadcast triggered by ActionAgent.Start():
|
||||
// (REPLICA, NOT_SERVING) goes to (REPLICA, SERVING). And we
|
||||
// should be serving.
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, true, "healthcheck not run yet", 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := expectStateChange(agent.QueryServiceControl, false, topodatapb.TabletType_SPARE); err != nil {
|
||||
if err := expectStateChange(agent.QueryServiceControl, true, topodatapb.TabletType_REPLICA); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !agent.QueryServiceControl.IsServing() {
|
||||
t.Errorf("Query service should be running")
|
||||
}
|
||||
if !agent.UpdateStream.IsEnabled() {
|
||||
t.Errorf("UpdateStream should be running")
|
||||
}
|
||||
|
||||
// first health check, should change us to replica
|
||||
// first health check, should keep us in replica / healthy
|
||||
before := time.Now()
|
||||
agent.HealthReporter.(*fakeHealthCheck).reportReplicationDelay = 14 * time.Second
|
||||
agent.runHealthCheck(targetTabletType)
|
||||
agent.runHealthCheck()
|
||||
ti, err := agent.TopoServer.GetTablet(ctx, tabletAlias)
|
||||
if err != nil {
|
||||
t.Fatalf("GetTablet failed: %v", err)
|
||||
}
|
||||
if ti.Type != targetTabletType {
|
||||
t.Errorf("First health check failed to go to replica: %v", ti.Type)
|
||||
if ti.Type != topodatapb.TabletType_REPLICA {
|
||||
t.Errorf("First health check failed to stay in replica: %v", ti.Type)
|
||||
}
|
||||
if !agent.QueryServiceControl.IsServing() {
|
||||
t.Errorf("Query service should be running")
|
||||
|
@ -354,29 +421,29 @@ func TestQueryServiceStopped(t *testing.T) {
|
|||
t.Errorf("invalid tabletserver target: got = %v, want = %v", got, want)
|
||||
}
|
||||
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 14); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := expectStateChange(agent.QueryServiceControl, true, want); err != nil {
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, true, "", 14); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// shut down query service and prevent it from starting again
|
||||
// (this is to simulate mysql going away, tablet server detecting it
|
||||
// and shutting itself down)
|
||||
// and shutting itself down). Intercept the message
|
||||
agent.QueryServiceControl.SetServingType(topodatapb.TabletType_REPLICA, false, nil)
|
||||
agent.QueryServiceControl.(*tabletservermock.Controller).SetServingTypeError = fmt.Errorf("test cannot start query service")
|
||||
if err := expectStateChange(agent.QueryServiceControl, false, topodatapb.TabletType_REPLICA); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// health check should now fail
|
||||
before = time.Now()
|
||||
agent.HealthReporter.(*fakeHealthCheck).reportReplicationDelay = 15 * time.Second
|
||||
agent.runHealthCheck(targetTabletType)
|
||||
agent.runHealthCheck()
|
||||
ti, err = agent.TopoServer.GetTablet(ctx, tabletAlias)
|
||||
if err != nil {
|
||||
t.Fatalf("GetTablet failed: %v", err)
|
||||
}
|
||||
if ti.Type != topodatapb.TabletType_SPARE {
|
||||
t.Errorf("Happy health check which cannot start query service should stay spare: %v", ti.Type)
|
||||
if ti.Type != topodatapb.TabletType_REPLICA {
|
||||
t.Errorf("Happy health check which cannot start query service should stay replica: %v", ti.Type)
|
||||
}
|
||||
if agent.QueryServiceControl.IsServing() {
|
||||
t.Errorf("Query service should not be running")
|
||||
|
@ -391,19 +458,7 @@ func TestQueryServiceStopped(t *testing.T) {
|
|||
if got := agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType; got != want {
|
||||
t.Errorf("invalid tabletserver target: got = %v, want = %v", got, want)
|
||||
}
|
||||
if bd, err := expectBroadcastData(agent.QueryServiceControl, 15); err == nil {
|
||||
if bd.RealtimeStats.HealthError != "test cannot start query service" {
|
||||
t.Errorf("unexpected HealthError: %v", *bd)
|
||||
}
|
||||
} else {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := expectStateChange(agent.QueryServiceControl, false, want); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Consume second health broadcast (runHealthCheck() called refreshTablet()
|
||||
// which broadcasts since we go from REPLICA to SPARE and into lameduck.)
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 15); err != nil {
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, false, "test cannot start query service", 15); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// NOTE: No more broadcasts or state changes since SetServingTypeError is set
|
||||
|
@ -421,27 +476,27 @@ func TestQueryServiceStopped(t *testing.T) {
|
|||
// query service in a tablet.
|
||||
func TestTabletControl(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
agent, _ := createTestAgent(ctx, t)
|
||||
targetTabletType := topodatapb.TabletType_REPLICA
|
||||
agent, _ := createTestAgent(ctx, t, nil)
|
||||
|
||||
// Consume the first health broadcast triggered by ActionAgent.Start():
|
||||
// (SPARE, SERVING) goes to (SPARE, NOT_SERVING).
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 0); err != nil {
|
||||
/// Consume the first health broadcast triggered by ActionAgent.Start():
|
||||
// (REPLICA, NOT_SERVING) goes to (REPLICA, SERVING). And we
|
||||
// should be serving.
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, true, "healthcheck not run yet", 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := expectStateChange(agent.QueryServiceControl, false, topodatapb.TabletType_SPARE); err != nil {
|
||||
if err := expectStateChange(agent.QueryServiceControl, true, topodatapb.TabletType_REPLICA); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// first health check, should change us to replica
|
||||
// first health check, should keep us in replica, just broadcast
|
||||
before := time.Now()
|
||||
agent.HealthReporter.(*fakeHealthCheck).reportReplicationDelay = 16 * time.Second
|
||||
agent.runHealthCheck(targetTabletType)
|
||||
agent.runHealthCheck()
|
||||
ti, err := agent.TopoServer.GetTablet(ctx, tabletAlias)
|
||||
if err != nil {
|
||||
t.Fatalf("GetTablet failed: %v", err)
|
||||
}
|
||||
if ti.Type != targetTabletType {
|
||||
if ti.Type != topodatapb.TabletType_REPLICA {
|
||||
t.Errorf("First health check failed to go to replica: %v", ti.Type)
|
||||
}
|
||||
if !agent.QueryServiceControl.IsServing() {
|
||||
|
@ -453,13 +508,10 @@ func TestTabletControl(t *testing.T) {
|
|||
if agent._healthyTime.Sub(before) < 0 {
|
||||
t.Errorf("runHealthCheck did not update agent._healthyTime")
|
||||
}
|
||||
if got := agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType; got != targetTabletType {
|
||||
t.Errorf("invalid tabletserver target: got = %v, want = %v", got, targetTabletType)
|
||||
if got := agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType; got != topodatapb.TabletType_REPLICA {
|
||||
t.Errorf("invalid tabletserver target: got = %v, want = %v", got, topodatapb.TabletType_REPLICA)
|
||||
}
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 16); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := expectStateChange(agent.QueryServiceControl, true, targetTabletType); err != nil {
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, true, "", 16); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -470,7 +522,7 @@ func TestTabletControl(t *testing.T) {
|
|||
}
|
||||
si.TabletControls = []*topodatapb.Shard_TabletControl{
|
||||
{
|
||||
TabletType: targetTabletType,
|
||||
TabletType: topodatapb.TabletType_REPLICA,
|
||||
DisableQueryService: true,
|
||||
},
|
||||
}
|
||||
|
@ -496,22 +548,22 @@ func TestTabletControl(t *testing.T) {
|
|||
|
||||
// Consume the health broadcast which was triggered due to the QueryService
|
||||
// state change from SERVING to NOT_SERVING.
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 16); err != nil {
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, false, "", 16); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := expectStateChange(agent.QueryServiceControl, false, targetTabletType); err != nil {
|
||||
if err := expectStateChange(agent.QueryServiceControl, false, topodatapb.TabletType_REPLICA); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// check running a health check will not start it again
|
||||
before = time.Now()
|
||||
agent.HealthReporter.(*fakeHealthCheck).reportReplicationDelay = 17 * time.Second
|
||||
agent.runHealthCheck(targetTabletType)
|
||||
agent.runHealthCheck()
|
||||
ti, err = agent.TopoServer.GetTablet(ctx, tabletAlias)
|
||||
if err != nil {
|
||||
t.Fatalf("GetTablet failed: %v", err)
|
||||
}
|
||||
if ti.Type != targetTabletType {
|
||||
if ti.Type != topodatapb.TabletType_REPLICA {
|
||||
t.Errorf("Health check failed to go to replica: %v", ti.Type)
|
||||
}
|
||||
if agent.QueryServiceControl.IsServing() {
|
||||
|
@ -523,25 +575,25 @@ func TestTabletControl(t *testing.T) {
|
|||
if agent._healthyTime.Sub(before) < 0 {
|
||||
t.Errorf("runHealthCheck did not update agent._healthyTime")
|
||||
}
|
||||
if got := agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType; got != targetTabletType {
|
||||
t.Errorf("invalid tabletserver target: got = %v, want = %v", got, targetTabletType)
|
||||
if got := agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType; got != topodatapb.TabletType_REPLICA {
|
||||
t.Errorf("invalid tabletserver target: got = %v, want = %v", got, topodatapb.TabletType_REPLICA)
|
||||
}
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 17); err != nil {
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, false, "", 17); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// NOTE: No state change here since nothing has changed.
|
||||
|
||||
// go unhealthy, check we go to spare and QS is not running
|
||||
// go unhealthy, check we go to error state and QS is not running
|
||||
agent.HealthReporter.(*fakeHealthCheck).reportError = fmt.Errorf("tablet is unhealthy")
|
||||
agent.HealthReporter.(*fakeHealthCheck).reportReplicationDelay = 18 * time.Second
|
||||
before = time.Now()
|
||||
agent.runHealthCheck(targetTabletType)
|
||||
agent.runHealthCheck()
|
||||
ti, err = agent.TopoServer.GetTablet(ctx, tabletAlias)
|
||||
if err != nil {
|
||||
t.Fatalf("GetTablet failed: %v", err)
|
||||
}
|
||||
if ti.Type != topodatapb.TabletType_SPARE {
|
||||
t.Errorf("Unhealthy health check should go to spare: %v", ti.Type)
|
||||
if ti.Type != topodatapb.TabletType_REPLICA {
|
||||
t.Errorf("Unhealthy health check should stay replica: %v", ti.Type)
|
||||
}
|
||||
if agent.QueryServiceControl.IsServing() {
|
||||
t.Errorf("Query service should not be running")
|
||||
|
@ -552,41 +604,25 @@ func TestTabletControl(t *testing.T) {
|
|||
if agent._healthyTime.Sub(before) < 0 {
|
||||
t.Errorf("runHealthCheck did not update agent._healthyTime")
|
||||
}
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 18); err != nil {
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, false, "tablet is unhealthy", 18); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// NOTE: No state change here since QueryService is already NOT_SERVING.
|
||||
want := topodatapb.TabletType_SPARE
|
||||
want := topodatapb.TabletType_REPLICA
|
||||
if got := agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType; got != want {
|
||||
t.Errorf("invalid tabletserver target: got = %v, want = %v", got, want)
|
||||
}
|
||||
// Consume second health broadcast (runHealthCheck() called refreshTablet()
|
||||
// which broadcasts since we go from REPLICA to SPARE into lameduck.)
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 18); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Consume third health broadcast (runHealthCheck() called refreshTablet()
|
||||
// which broadcasts since the QueryService state changes from REPLICA to SPARE.
|
||||
// TODO(mberlin): With this, the cached TabletControl in the agent is also
|
||||
// cleared since it was only meant for REPLICA and now we are a SPARE.
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 18); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := expectStateChange(agent.QueryServiceControl, false, topodatapb.TabletType_SPARE); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// go back healthy, check QS is still not running
|
||||
agent.HealthReporter.(*fakeHealthCheck).reportError = nil
|
||||
agent.HealthReporter.(*fakeHealthCheck).reportReplicationDelay = 19 * time.Second
|
||||
before = time.Now()
|
||||
agent.runHealthCheck(targetTabletType)
|
||||
agent.runHealthCheck()
|
||||
ti, err = agent.TopoServer.GetTablet(ctx, tabletAlias)
|
||||
if err != nil {
|
||||
t.Fatalf("GetTablet failed: %v", err)
|
||||
}
|
||||
if ti.Type != targetTabletType {
|
||||
if ti.Type != topodatapb.TabletType_REPLICA {
|
||||
t.Errorf("Healthy health check should go to replica: %v", ti.Type)
|
||||
}
|
||||
if agent.QueryServiceControl.IsServing() {
|
||||
|
@ -598,29 +634,36 @@ func TestTabletControl(t *testing.T) {
|
|||
if agent._healthyTime.Sub(before) < 0 {
|
||||
t.Errorf("runHealthCheck did not update agent._healthyTime")
|
||||
}
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 19); err != nil {
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, false, "", 19); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got := agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType; got != targetTabletType {
|
||||
t.Errorf("invalid tabletserver target: got = %v, want = %v", got, targetTabletType)
|
||||
if got := agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType; got != topodatapb.TabletType_REPLICA {
|
||||
t.Errorf("invalid tabletserver target: got = %v, want = %v", got, topodatapb.TabletType_REPLICA)
|
||||
}
|
||||
// NOTE: At this point in time, the QueryService is actually visible as
|
||||
// SERVING since the previous change from REPLICA to SPARE cleared the
|
||||
// cached TabletControl and now the healthcheck assumes that the REPLICA type
|
||||
// is allowed to serve. This problem will be fixed when the healthcheck calls
|
||||
// refreshTablet() due to the seen state change from SPARE to REPLICA. Then,
|
||||
// the topology is read again and TabletControl becomes effective again.
|
||||
// TODO(mberlin): Fix this bug.
|
||||
if err := expectStateChange(agent.QueryServiceControl, true, targetTabletType); err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
// now clear TabletControl, run health check, make sure we go back healthy
|
||||
// and serving.
|
||||
si, err = agent.TopoServer.GetShard(ctx, "test_keyspace", "0")
|
||||
if err != nil {
|
||||
t.Fatalf("GetShard failed: %v", err)
|
||||
}
|
||||
si.TabletControls = nil
|
||||
if err := agent.TopoServer.UpdateShard(ctx, si); err != nil {
|
||||
t.Fatalf("UpdateShard failed: %v", err)
|
||||
}
|
||||
|
||||
// now refresh the tablet state, as the resharding process would do
|
||||
agent.RPCWrapLockAction(ctx, actionnode.TabletActionRefreshState, "", "", true, func() error {
|
||||
agent.RefreshState(ctx)
|
||||
return nil
|
||||
})
|
||||
|
||||
// QueryService changed back from SERVING to NOT_SERVING since refreshTablet()
|
||||
// re-read the topology and saw that REPLICA is still not allowed to serve.
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 19); err != nil {
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, true, "", 19); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := expectStateChange(agent.QueryServiceControl, false, targetTabletType); err != nil {
|
||||
if err := expectStateChange(agent.QueryServiceControl, true, topodatapb.TabletType_REPLICA); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -641,38 +684,80 @@ func TestStateChangeImmediateHealthBroadcast(t *testing.T) {
|
|||
flag.Set("binlog_player_retry_delay", "100ms")
|
||||
|
||||
ctx := context.Background()
|
||||
agent, vtClientMocksChannel := createTestAgent(ctx, t)
|
||||
targetTabletType := topodatapb.TabletType_MASTER
|
||||
agent, vtClientMocksChannel := createTestAgent(ctx, t, nil)
|
||||
|
||||
// Consume the first health broadcast triggered by ActionAgent.Start():
|
||||
// (SPARE, SERVING) goes to (SPARE, NOT_SERVING).
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 0); err != nil {
|
||||
/// Consume the first health broadcast triggered by ActionAgent.Start():
|
||||
// (REPLICA, NOT_SERVING) goes to (REPLICA, SERVING). And we
|
||||
// should be serving.
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, true, "healthcheck not run yet", 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := expectStateChange(agent.QueryServiceControl, false, topodatapb.TabletType_SPARE); err != nil {
|
||||
if err := expectStateChange(agent.QueryServiceControl, true, topodatapb.TabletType_REPLICA); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Run health check to get changed from SPARE to MASTER.
|
||||
agent.HealthReporter.(*fakeHealthCheck).reportReplicationDelay = 20 * time.Second
|
||||
agent.runHealthCheck(targetTabletType)
|
||||
// Run health check to turn into a healthy replica
|
||||
agent.HealthReporter.(*fakeHealthCheck).reportReplicationDelay = 12 * time.Second
|
||||
agent.runHealthCheck()
|
||||
if !agent.QueryServiceControl.IsServing() {
|
||||
t.Errorf("Query service should be running")
|
||||
}
|
||||
if got := agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType; got != topodatapb.TabletType_REPLICA {
|
||||
t.Errorf("invalid tabletserver target: got = %v, want = %v", got, topodatapb.TabletType_REPLICA)
|
||||
}
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, true, "", 12); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Run TER to turn us into a proper master, wait for it to finish.
|
||||
agent.HealthReporter.(*fakeHealthCheck).reportReplicationDelay = 19 * time.Second
|
||||
if err := agent.RPCWrapLock(ctx, actionnode.TabletActionExternallyReparented, "", "", false, func() error {
|
||||
return agent.TabletExternallyReparented(ctx, "unused_id")
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
select {
|
||||
case <-agent.finalizeReparentCtx.Done():
|
||||
}
|
||||
ti, err := agent.TopoServer.GetTablet(ctx, tabletAlias)
|
||||
if err != nil {
|
||||
t.Fatalf("GetTablet failed: %v", err)
|
||||
}
|
||||
if ti.Type != targetTabletType {
|
||||
t.Errorf("First health check failed to go to replica: %v", ti.Type)
|
||||
if ti.Type != topodatapb.TabletType_MASTER {
|
||||
t.Errorf("TER failed to go to master: %v", ti.Type)
|
||||
}
|
||||
if !agent.QueryServiceControl.IsServing() {
|
||||
t.Errorf("Query service should be running")
|
||||
}
|
||||
if got := agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType; got != targetTabletType {
|
||||
t.Errorf("invalid tabletserver target: got = %v, want = %v", got, targetTabletType)
|
||||
if got := agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType; got != topodatapb.TabletType_MASTER {
|
||||
t.Errorf("invalid tabletserver target: got = %v, want = %v", got, topodatapb.TabletType_MASTER)
|
||||
}
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, 20); err != nil {
|
||||
|
||||
// Consume the health broadcast (no replication delay as we are master)
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, true, "", 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := expectStateChange(agent.QueryServiceControl, true, targetTabletType); err != nil {
|
||||
if err := expectStateChange(agent.QueryServiceControl, true, topodatapb.TabletType_MASTER); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Run health check to make sure we stay good
|
||||
agent.HealthReporter.(*fakeHealthCheck).reportReplicationDelay = 20 * time.Second
|
||||
agent.runHealthCheck()
|
||||
ti, err = agent.TopoServer.GetTablet(ctx, tabletAlias)
|
||||
if err != nil {
|
||||
t.Fatalf("GetTablet failed: %v", err)
|
||||
}
|
||||
if ti.Type != topodatapb.TabletType_MASTER {
|
||||
t.Errorf("First health check failed to go to master: %v", ti.Type)
|
||||
}
|
||||
if !agent.QueryServiceControl.IsServing() {
|
||||
t.Errorf("Query service should be running")
|
||||
}
|
||||
if got := agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType; got != topodatapb.TabletType_MASTER {
|
||||
t.Errorf("invalid tabletserver target: got = %v, want = %v", got, topodatapb.TabletType_MASTER)
|
||||
}
|
||||
if _, err := expectBroadcastData(agent.QueryServiceControl, true, "", 20); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -725,34 +810,34 @@ func TestStateChangeImmediateHealthBroadcast(t *testing.T) {
|
|||
// (MASTER, SERVING) to (MASTER, NOT_SERVING).
|
||||
// Since we didn't run healthcheck again yet, the broadcast data contains the
|
||||
// cached replication lag of 20 instead of 21.
|
||||
if bd, err := expectBroadcastData(agent.QueryServiceControl, 20); err == nil {
|
||||
if bd, err := expectBroadcastData(agent.QueryServiceControl, false, "", 20); err == nil {
|
||||
if bd.RealtimeStats.BinlogPlayersCount != 1 {
|
||||
t.Fatalf("filtered replication must be enabled: %v", bd)
|
||||
}
|
||||
} else {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := expectStateChange(agent.QueryServiceControl, false, targetTabletType); err != nil {
|
||||
if err := expectStateChange(agent.QueryServiceControl, false, topodatapb.TabletType_MASTER); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Running a healthcheck won't put the QueryService back to SERVING.
|
||||
agent.HealthReporter.(*fakeHealthCheck).reportReplicationDelay = 22 * time.Second
|
||||
agent.runHealthCheck(targetTabletType)
|
||||
agent.runHealthCheck()
|
||||
ti, err = agent.TopoServer.GetTablet(ctx, tabletAlias)
|
||||
if err != nil {
|
||||
t.Fatalf("GetTablet failed: %v", err)
|
||||
}
|
||||
if ti.Type != targetTabletType {
|
||||
if ti.Type != topodatapb.TabletType_MASTER {
|
||||
t.Errorf("Health check failed to go to replica: %v", ti.Type)
|
||||
}
|
||||
if agent.QueryServiceControl.IsServing() {
|
||||
t.Errorf("Query service should not be running")
|
||||
}
|
||||
if got := agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType; got != targetTabletType {
|
||||
t.Errorf("invalid tabletserver target: got = %v, want = %v", got, targetTabletType)
|
||||
if got := agent.QueryServiceControl.(*tabletservermock.Controller).CurrentTarget.TabletType; got != topodatapb.TabletType_MASTER {
|
||||
t.Errorf("invalid tabletserver target: got = %v, want = %v", got, topodatapb.TabletType_MASTER)
|
||||
}
|
||||
if bd, err := expectBroadcastData(agent.QueryServiceControl, 22); err == nil {
|
||||
if bd, err := expectBroadcastData(agent.QueryServiceControl, false, "", 22); err == nil {
|
||||
if bd.RealtimeStats.BinlogPlayersCount != 1 {
|
||||
t.Fatalf("filtered replication must be still running: %v", bd)
|
||||
}
|
||||
|
@ -784,14 +869,14 @@ func TestStateChangeImmediateHealthBroadcast(t *testing.T) {
|
|||
}
|
||||
// Since we didn't run healthcheck again yet, the broadcast data contains the
|
||||
// cached replication lag of 22 instead of 23.
|
||||
if bd, err := expectBroadcastData(agent.QueryServiceControl, 22); err == nil {
|
||||
if bd, err := expectBroadcastData(agent.QueryServiceControl, true, "", 22); err == nil {
|
||||
if bd.RealtimeStats.BinlogPlayersCount != 0 {
|
||||
t.Fatalf("filtered replication must be disabled now: %v", bd)
|
||||
}
|
||||
} else {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := expectStateChange(agent.QueryServiceControl, true, targetTabletType); err != nil {
|
||||
if err := expectStateChange(agent.QueryServiceControl, true, topodatapb.TabletType_MASTER); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -807,7 +892,7 @@ func TestStateChangeImmediateHealthBroadcast(t *testing.T) {
|
|||
// return an error
|
||||
func TestOldHealthCheck(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
agent, _ := createTestAgent(ctx, t)
|
||||
agent, _ := createTestAgent(ctx, t, nil)
|
||||
*healthCheckInterval = 20 * time.Second
|
||||
agent._healthy = nil
|
||||
|
||||
|
@ -832,13 +917,16 @@ func TestOldHealthCheck(t *testing.T) {
|
|||
|
||||
// expectBroadcastData checks that runHealthCheck() broadcasted the expected
|
||||
// stats (going the value for secondsBehindMaster).
|
||||
// Note that it may be necessary to call this function twice when
|
||||
// runHealthCheck() also calls freshTablet() which might trigger another
|
||||
// broadcast e.g. because we went from REPLICA to SPARE and into lameduck.
|
||||
func expectBroadcastData(qsc tabletserver.Controller, secondsBehindMaster uint32) (*tabletservermock.BroadcastData, error) {
|
||||
func expectBroadcastData(qsc tabletserver.Controller, serving bool, healthError string, secondsBehindMaster uint32) (*tabletservermock.BroadcastData, error) {
|
||||
bd := <-qsc.(*tabletservermock.Controller).BroadcastData
|
||||
if got := bd.Serving; got != serving {
|
||||
return nil, fmt.Errorf("unexpected BroadcastData.Serving, got: %v want: %v with bd: %+v", got, serving, bd)
|
||||
}
|
||||
if got := bd.RealtimeStats.HealthError; got != healthError {
|
||||
return nil, fmt.Errorf("unexpected BroadcastData.HealthError, got: %v want: %v with bd: %+v", got, healthError, bd)
|
||||
}
|
||||
if got := bd.RealtimeStats.SecondsBehindMaster; got != secondsBehindMaster {
|
||||
return nil, fmt.Errorf("unexpected BroadcastData. got: %v want: %v got bd: %+v", got, secondsBehindMaster, bd)
|
||||
return nil, fmt.Errorf("unexpected BroadcastData.SecondsBehindMaster, got: %v want: %v with bd: %+v", got, secondsBehindMaster, bd)
|
||||
}
|
||||
return bd, nil
|
||||
}
|
||||
|
|
|
@ -10,13 +10,11 @@ package tabletmanager
|
|||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
"github.com/youtube/vitess/go/flagutil"
|
||||
"github.com/youtube/vitess/go/netutil"
|
||||
"github.com/youtube/vitess/go/vt/tabletmanager/actionnode"
|
||||
"github.com/youtube/vitess/go/vt/topo"
|
||||
"github.com/youtube/vitess/go/vt/topo/topoproto"
|
||||
"github.com/youtube/vitess/go/vt/topotools"
|
||||
|
@ -58,26 +56,32 @@ func (agent *ActionAgent) InitTablet(port, gRPCPort int32) error {
|
|||
var err error
|
||||
tabletType, err = topoproto.ParseTabletType(*initTabletType)
|
||||
if err != nil {
|
||||
log.Fatalf("Invalid init tablet type %v: %v", *initTabletType, err)
|
||||
log.Fatalf("Invalid init_tablet_type %v: %v", *initTabletType, err)
|
||||
}
|
||||
|
||||
if tabletType == topodatapb.TabletType_MASTER {
|
||||
// We disallow MASTER, so we don't have to change
|
||||
// shard.MasterAlias, and deal with the corner cases.
|
||||
log.Fatalf("init_tablet_type cannot be %v", tabletType)
|
||||
log.Fatalf("init_tablet_type cannot be master, use replica instead")
|
||||
}
|
||||
|
||||
} else if *targetTabletType != "" {
|
||||
if strings.ToUpper(*targetTabletType) == topodatapb.TabletType_name[int32(topodatapb.TabletType_MASTER)] {
|
||||
log.Fatalf("target_tablet_type cannot be '%v'. Use '%v' instead.", tabletType, topodatapb.TabletType_REPLICA)
|
||||
// use the targetTabletType, check it's not master.
|
||||
// FIXME(alainjobart): refactor the flags: we should switch
|
||||
// to init_tablet_type, and always enable healthcheck.
|
||||
var err error
|
||||
tabletType, err = topoproto.ParseTabletType(*targetTabletType)
|
||||
if err != nil {
|
||||
log.Fatalf("Invalid target_tablet_type %v: %v", *targetTabletType, err)
|
||||
}
|
||||
|
||||
// use spare, the healthcheck will turn us into what
|
||||
// we need to be eventually
|
||||
tabletType = topodatapb.TabletType_SPARE
|
||||
|
||||
if tabletType == topodatapb.TabletType_MASTER {
|
||||
// We disallow MASTER, so we don't have to change
|
||||
// shard.MasterAlias, and deal with the corner cases.
|
||||
log.Fatalf("target_tablet_type cannot be master. Use replica instead.")
|
||||
}
|
||||
} else {
|
||||
log.Fatalf("if init tablet is enabled, one of init_tablet_type or target_tablet_type needs to be specified")
|
||||
log.Fatalf("if init tablet is enabled (by specifying init_keyspace), one of init_tablet_type or target_tablet_type needs to be specified")
|
||||
}
|
||||
|
||||
// create a context for this whole operation
|
||||
|
@ -123,34 +127,18 @@ func (agent *ActionAgent) InitTablet(port, gRPCPort int32) error {
|
|||
}
|
||||
}
|
||||
|
||||
// See if we need to add the tablet's cell to the shard's cell
|
||||
// list. If we do, it has to be under the shard lock.
|
||||
// See if we need to add the tablet's cell to the shard's cell list.
|
||||
if !si.HasCell(agent.TabletAlias.Cell) {
|
||||
actionNode := actionnode.UpdateShard()
|
||||
lockPath, err := actionNode.LockShard(ctx, agent.TopoServer, *initKeyspace, shard)
|
||||
if err != nil {
|
||||
return fmt.Errorf("LockShard(%v/%v) failed: %v", *initKeyspace, shard, err)
|
||||
}
|
||||
|
||||
// re-read the shard with the lock
|
||||
si, err = agent.TopoServer.GetShard(ctx, *initKeyspace, shard)
|
||||
if err != nil {
|
||||
return actionNode.UnlockShard(ctx, agent.TopoServer, *initKeyspace, shard, lockPath, err)
|
||||
}
|
||||
|
||||
// see if we really need to update it now
|
||||
if !si.HasCell(agent.TabletAlias.Cell) {
|
||||
si.Cells = append(si.Cells, agent.TabletAlias.Cell)
|
||||
|
||||
// write it back
|
||||
if err := agent.TopoServer.UpdateShard(ctx, si); err != nil {
|
||||
return actionNode.UnlockShard(ctx, agent.TopoServer, *initKeyspace, shard, lockPath, err)
|
||||
si, err = agent.TopoServer.UpdateShardFields(ctx, *initKeyspace, shard, func(shard *topodatapb.Shard) error {
|
||||
if topoproto.ShardHasCell(shard, agent.TabletAlias.Cell) {
|
||||
// Someone else already did it.
|
||||
return topo.ErrNoUpdateNeeded
|
||||
}
|
||||
}
|
||||
|
||||
// and unlock
|
||||
if err := actionNode.UnlockShard(ctx, agent.TopoServer, *initKeyspace, shard, lockPath, nil); err != nil {
|
||||
return err
|
||||
shard.Cells = append(shard.Cells, agent.TabletAlias.Cell)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't add tablet's cell to shard record: %v", err)
|
||||
}
|
||||
}
|
||||
log.Infof("Initializing the tablet for type %v", tabletType)
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/youtube/vitess/go/history"
|
||||
"github.com/youtube/vitess/go/stats"
|
||||
"github.com/youtube/vitess/go/vt/dbconfigs"
|
||||
"github.com/youtube/vitess/go/vt/mysqlctl"
|
||||
"github.com/youtube/vitess/go/vt/vttest/fakesqldb"
|
||||
|
@ -36,16 +35,15 @@ func TestInitTablet(t *testing.T) {
|
|||
gRPCPort := int32(3456)
|
||||
mysqlDaemon := mysqlctl.NewFakeMysqlDaemon(db)
|
||||
agent := &ActionAgent{
|
||||
TopoServer: ts,
|
||||
TabletAlias: tabletAlias,
|
||||
MysqlDaemon: mysqlDaemon,
|
||||
DBConfigs: dbconfigs.DBConfigs{},
|
||||
SchemaOverrides: nil,
|
||||
BinlogPlayerMap: nil,
|
||||
batchCtx: ctx,
|
||||
History: history.New(historyLength),
|
||||
lastHealthMapCount: new(stats.Int),
|
||||
_healthy: fmt.Errorf("healthcheck not run yet"),
|
||||
TopoServer: ts,
|
||||
TabletAlias: tabletAlias,
|
||||
MysqlDaemon: mysqlDaemon,
|
||||
DBConfigs: dbconfigs.DBConfigs{},
|
||||
SchemaOverrides: nil,
|
||||
BinlogPlayerMap: nil,
|
||||
batchCtx: ctx,
|
||||
History: history.New(historyLength),
|
||||
_healthy: fmt.Errorf("healthcheck not run yet"),
|
||||
}
|
||||
|
||||
// let's use a real tablet in a shard, that will create
|
||||
|
@ -86,7 +84,7 @@ func TestInitTablet(t *testing.T) {
|
|||
t.Errorf("wrong gRPC port for tablet: %v", ti.PortMap["grpc"])
|
||||
}
|
||||
|
||||
// try to init again, this time with health check on
|
||||
// try to init again, this time with old deprecated target_tablet_type
|
||||
*initTabletType = ""
|
||||
*targetTabletType = "replica"
|
||||
if err := agent.InitTablet(port, gRPCPort); err != nil {
|
||||
|
@ -96,7 +94,7 @@ func TestInitTablet(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("GetTablet failed: %v", err)
|
||||
}
|
||||
if ti.Type != topodatapb.TabletType_SPARE {
|
||||
if ti.Type != topodatapb.TabletType_REPLICA {
|
||||
t.Errorf("wrong tablet type: %v", ti.Type)
|
||||
}
|
||||
|
||||
|
@ -116,8 +114,8 @@ func TestInitTablet(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("GetTablet failed: %v", err)
|
||||
}
|
||||
// It should still be spare, because the tablet record doesn't agree.
|
||||
if ti.Type != topodatapb.TabletType_SPARE {
|
||||
// It should still be replica, because the tablet record doesn't agree.
|
||||
if ti.Type != topodatapb.TabletType_REPLICA {
|
||||
t.Errorf("wrong tablet type: %v", ti.Type)
|
||||
}
|
||||
|
||||
|
@ -137,7 +135,7 @@ func TestInitTablet(t *testing.T) {
|
|||
t.Errorf("wrong tablet type: %v", ti.Type)
|
||||
}
|
||||
|
||||
// init again with the tablet_type set, no healthcheck
|
||||
// init again with the tablet_type set, using init_tablet_type
|
||||
// (also check db name override and tags here)
|
||||
*initTabletType = "replica"
|
||||
*targetTabletType = ""
|
||||
|
|
|
@ -49,8 +49,12 @@ func (agent *ActionAgent) SetReadOnly(ctx context.Context, rdonly bool) error {
|
|||
// ChangeType changes the tablet type
|
||||
// Should be called under RPCWrapLockAction.
|
||||
func (agent *ActionAgent) ChangeType(ctx context.Context, tabletType topodatapb.TabletType) error {
|
||||
_, err := topotools.ChangeType(ctx, agent.TopoServer, agent.TabletAlias, tabletType, nil)
|
||||
return err
|
||||
_, err := topotools.ChangeType(ctx, agent.TopoServer, agent.TabletAlias, tabletType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
agent.runHealthCheckProtected()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sleep sleeps for the duration
|
||||
|
@ -73,8 +77,8 @@ func (agent *ActionAgent) RefreshState(ctx context.Context) {
|
|||
|
||||
// RunHealthCheck will manually run the health check on the tablet.
|
||||
// Should be called under RPCWrap.
|
||||
func (agent *ActionAgent) RunHealthCheck(ctx context.Context, targetTabletType topodatapb.TabletType) {
|
||||
agent.runHealthCheck(targetTabletType)
|
||||
func (agent *ActionAgent) RunHealthCheck(ctx context.Context) {
|
||||
agent.runHealthCheck()
|
||||
}
|
||||
|
||||
// IgnoreHealthError sets the regexp for health check errors to ignore.
|
||||
|
|
|
@ -44,7 +44,7 @@ type RPCAgent interface {
|
|||
|
||||
RefreshState(ctx context.Context)
|
||||
|
||||
RunHealthCheck(ctx context.Context, targetTabletType topodatapb.TabletType)
|
||||
RunHealthCheck(ctx context.Context)
|
||||
|
||||
IgnoreHealthError(ctx context.Context, pattern string) error
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ func (agent *ActionAgent) Backup(ctx context.Context, concurrency int, logger lo
|
|||
return fmt.Errorf("type MASTER cannot take backup, if you really need to do this, restart vttablet in replica mode")
|
||||
}
|
||||
originalType := tablet.Type
|
||||
if _, err := topotools.ChangeType(ctx, agent.TopoServer, tablet.Alias, topodatapb.TabletType_BACKUP, make(map[string]string)); err != nil {
|
||||
if _, err := topotools.ChangeType(ctx, agent.TopoServer, tablet.Alias, topodatapb.TabletType_BACKUP); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -46,13 +46,8 @@ func (agent *ActionAgent) Backup(ctx context.Context, concurrency int, logger lo
|
|||
name := fmt.Sprintf("%v.%v", time.Now().UTC().Format("2006-01-02.150405"), topoproto.TabletAliasString(tablet.Alias))
|
||||
returnErr := mysqlctl.Backup(ctx, agent.MysqlDaemon, l, dir, name, concurrency, agent.hookExtraEnv())
|
||||
|
||||
// and change our type back to the appropriate value:
|
||||
// - if healthcheck is enabled, go to spare
|
||||
// - if not, go back to original type
|
||||
if agent.IsRunningHealthCheck() {
|
||||
originalType = topodatapb.TabletType_SPARE
|
||||
}
|
||||
_, err = topotools.ChangeType(ctx, agent.TopoServer, tablet.Alias, originalType, nil)
|
||||
// change our type back to the original value
|
||||
_, err = topotools.ChangeType(ctx, agent.TopoServer, tablet.Alias, originalType)
|
||||
if err != nil {
|
||||
// failure in changing the topology type is probably worse,
|
||||
// so returning that (we logged the snapshot error anyway)
|
||||
|
@ -62,5 +57,13 @@ func (agent *ActionAgent) Backup(ctx context.Context, concurrency int, logger lo
|
|||
returnErr = err
|
||||
}
|
||||
|
||||
// let's update our internal state (start query service and other things)
|
||||
if err := agent.refreshTablet(ctx, "after backup"); err != nil {
|
||||
return fmt.Errorf("failed to update state after backup: %v", err)
|
||||
}
|
||||
|
||||
// and re-run health check to be sure to capture any replication delay
|
||||
agent.runHealthCheckProtected()
|
||||
|
||||
return returnErr
|
||||
}
|
||||
|
|
|
@ -102,7 +102,6 @@ func (agent *ActionAgent) TabletExternallyReparented(ctx context.Context, extern
|
|||
log.Infof("fastTabletExternallyReparented: executing change callback for state change to MASTER")
|
||||
oldTablet := proto.Clone(tablet).(*topodatapb.Tablet)
|
||||
tablet.Type = topodatapb.TabletType_MASTER
|
||||
tablet.HealthMap = nil
|
||||
agent.setTablet(tablet)
|
||||
|
||||
wg.Add(1)
|
||||
|
@ -183,7 +182,6 @@ func (agent *ActionAgent) finalizeTabletExternallyReparented(ctx context.Context
|
|||
updatedTablet, err := agent.TopoServer.UpdateTabletFields(ctx, agent.TabletAlias,
|
||||
func(tablet *topodatapb.Tablet) error {
|
||||
tablet.Type = topodatapb.TabletType_MASTER
|
||||
tablet.HealthMap = nil
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -205,7 +203,7 @@ func (agent *ActionAgent) finalizeTabletExternallyReparented(ctx context.Context
|
|||
// old master to be up to change its own record.
|
||||
oldMasterTablet, err := agent.TopoServer.UpdateTabletFields(ctx, oldMasterAlias,
|
||||
func(tablet *topodatapb.Tablet) error {
|
||||
tablet.Type = topodatapb.TabletType_SPARE
|
||||
tablet.Type = topodatapb.TabletType_REPLICA
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -9,13 +9,15 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/youtube/vitess/go/vt/mysqlctl"
|
||||
"github.com/youtube/vitess/go/vt/mysqlctl/replication"
|
||||
"github.com/youtube/vitess/go/vt/tabletmanager/actionnode"
|
||||
"github.com/youtube/vitess/go/vt/topo"
|
||||
"github.com/youtube/vitess/go/vt/topo/topoproto"
|
||||
"github.com/youtube/vitess/go/vt/topotools"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
replicationdatapb "github.com/youtube/vitess/go/vt/proto/replicationdata"
|
||||
topodatapb "github.com/youtube/vitess/go/vt/proto/topodata"
|
||||
|
@ -134,7 +136,6 @@ func (agent *ActionAgent) InitMaster(ctx context.Context) (string, error) {
|
|||
// Change our type to master if not already
|
||||
if _, err := agent.TopoServer.UpdateTabletFields(ctx, agent.TabletAlias, func(tablet *topodatapb.Tablet) error {
|
||||
tablet.Type = topodatapb.TabletType_MASTER
|
||||
tablet.HealthMap = nil
|
||||
return nil
|
||||
}); err != nil {
|
||||
return "", err
|
||||
|
@ -210,8 +211,9 @@ func (agent *ActionAgent) DemoteMaster(ctx context.Context) (string, error) {
|
|||
tablet := agent.Tablet()
|
||||
// We don't care if the QueryService state actually changed because we'll
|
||||
// let vtgate keep serving read traffic from this master (see comment below).
|
||||
if _ /* state changed */, err := agent.disallowQueries(tablet.Type, "DemoteMaster marks server rdonly"); err != nil {
|
||||
return "", fmt.Errorf("disallowQueries failed: %v", err)
|
||||
log.Infof("DemoteMaster disabling query service")
|
||||
if _ /* state changed */, err := agent.QueryServiceControl.SetServingType(tablet.Type, false, nil); err != nil {
|
||||
return "", fmt.Errorf("SetServingType(serving=false) failed: %v", err)
|
||||
}
|
||||
|
||||
// If using semi-sync, we need to disable master-side.
|
||||
|
@ -270,7 +272,7 @@ func (agent *ActionAgent) PromoteSlaveWhenCaughtUp(ctx context.Context, position
|
|||
return "", err
|
||||
}
|
||||
|
||||
if _, err := topotools.ChangeType(ctx, agent.TopoServer, agent.TabletAlias, topodatapb.TabletType_MASTER, topotools.ClearHealthMap); err != nil {
|
||||
if _, err := topotools.ChangeType(ctx, agent.TopoServer, agent.TabletAlias, topodatapb.TabletType_MASTER); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
@ -280,12 +282,13 @@ func (agent *ActionAgent) PromoteSlaveWhenCaughtUp(ctx context.Context, position
|
|||
// SlaveWasPromoted promotes a slave to master, no questions asked.
|
||||
// Should be called under RPCWrapLockAction.
|
||||
func (agent *ActionAgent) SlaveWasPromoted(ctx context.Context) error {
|
||||
_, err := topotools.ChangeType(ctx, agent.TopoServer, agent.TabletAlias, topodatapb.TabletType_MASTER, topotools.ClearHealthMap)
|
||||
_, err := topotools.ChangeType(ctx, agent.TopoServer, agent.TabletAlias, topodatapb.TabletType_MASTER)
|
||||
return err
|
||||
}
|
||||
|
||||
// SetMaster sets replication master, and waits for the
|
||||
// reparent_journal table entry up to context timeout
|
||||
// Should be called under RPCWrapLockAction.
|
||||
func (agent *ActionAgent) SetMaster(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, forceStartSlave bool) error {
|
||||
parent, err := agent.TopoServer.GetTablet(ctx, parentAlias)
|
||||
if err != nil {
|
||||
|
@ -328,11 +331,12 @@ func (agent *ActionAgent) SetMaster(ctx context.Context, parentAlias *topodatapb
|
|||
return err
|
||||
}
|
||||
|
||||
// change our type to spare if we used to be the master
|
||||
// change our type to REPLICA if we used to be the master
|
||||
runHealthCheck := false
|
||||
_, err = agent.TopoServer.UpdateTabletFields(ctx, agent.TabletAlias, func(tablet *topodatapb.Tablet) error {
|
||||
if tablet.Type == topodatapb.TabletType_MASTER {
|
||||
tablet.Type = topodatapb.TabletType_SPARE
|
||||
tablet.HealthMap = nil
|
||||
tablet.Type = topodatapb.TabletType_REPLICA
|
||||
runHealthCheck = true
|
||||
return nil
|
||||
}
|
||||
return topo.ErrNoUpdateNeeded
|
||||
|
@ -346,21 +350,36 @@ func (agent *ActionAgent) SetMaster(ctx context.Context, parentAlias *topodatapb
|
|||
if !shouldbeReplicating || timeCreatedNS == 0 {
|
||||
return nil
|
||||
}
|
||||
return agent.MysqlDaemon.WaitForReparentJournal(ctx, timeCreatedNS)
|
||||
if err := agent.MysqlDaemon.WaitForReparentJournal(ctx, timeCreatedNS); err != nil {
|
||||
return err
|
||||
}
|
||||
if runHealthCheck {
|
||||
agent.runHealthCheckProtected()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SlaveWasRestarted updates the parent record for a tablet.
|
||||
// Should be called under RPCWrapLockAction.
|
||||
func (agent *ActionAgent) SlaveWasRestarted(ctx context.Context, swrd *actionnode.SlaveWasRestartedArgs) error {
|
||||
runHealthCheck := false
|
||||
|
||||
// Once this action completes, update authoritative tablet node first.
|
||||
_, err := agent.TopoServer.UpdateTabletFields(ctx, agent.TabletAlias, func(tablet *topodatapb.Tablet) error {
|
||||
if _, err := agent.TopoServer.UpdateTabletFields(ctx, agent.TabletAlias, func(tablet *topodatapb.Tablet) error {
|
||||
if tablet.Type == topodatapb.TabletType_MASTER {
|
||||
tablet.Type = topodatapb.TabletType_SPARE
|
||||
tablet.Type = topodatapb.TabletType_REPLICA
|
||||
runHealthCheck = true
|
||||
return nil
|
||||
}
|
||||
return topo.ErrNoUpdateNeeded
|
||||
})
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if runHealthCheck {
|
||||
agent.runHealthCheckProtected()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopReplicationAndGetStatus stops MySQL replication, and returns the
|
||||
|
@ -405,7 +424,7 @@ func (agent *ActionAgent) PromoteSlave(ctx context.Context) (string, error) {
|
|||
return "", err
|
||||
}
|
||||
|
||||
if _, err := topotools.ChangeType(ctx, agent.TopoServer, agent.TabletAlias, topodatapb.TabletType_MASTER, topotools.ClearHealthMap); err != nil {
|
||||
if _, err := topotools.ChangeType(ctx, agent.TopoServer, agent.TabletAlias, topodatapb.TabletType_MASTER); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
@ -416,15 +435,6 @@ func (agent *ActionAgent) isMasterEligible() (bool, error) {
|
|||
switch agent.Tablet().Type {
|
||||
case topodatapb.TabletType_MASTER, topodatapb.TabletType_REPLICA:
|
||||
return true, nil
|
||||
case topodatapb.TabletType_SPARE:
|
||||
// If we're SPARE, it could be because healthcheck is enabled.
|
||||
tt, err := topoproto.ParseTabletType(*targetTabletType)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("can't determine if tablet is master-eligible: currently SPARE and no -target_tablet_type flag specified")
|
||||
}
|
||||
if tt == topodatapb.TabletType_REPLICA {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
|
|
|
@ -71,24 +71,15 @@ func (agent *ActionAgent) loadBlacklistRules(tablet *topodatapb.Tablet, blacklis
|
|||
return nil
|
||||
}
|
||||
|
||||
// allowQueries tells QueryService to go in the serving state.
|
||||
// Returns true if the state of QueryService or the tablet type changed.
|
||||
func (agent *ActionAgent) allowQueries(tabletType topodatapb.TabletType) (bool, error) {
|
||||
return agent.QueryServiceControl.SetServingType(tabletType, true, nil)
|
||||
}
|
||||
|
||||
// disallowQueries tells QueryService to go in the *not* serving state.
|
||||
// Returns true if the state of QueryService or the tablet type changed.
|
||||
func (agent *ActionAgent) disallowQueries(tabletType topodatapb.TabletType, reason string) (bool, error) {
|
||||
log.Infof("Agent is going to disallow queries, reason: %v", reason)
|
||||
|
||||
return agent.QueryServiceControl.SetServingType(tabletType, false, nil)
|
||||
}
|
||||
|
||||
func (agent *ActionAgent) enterLameduck(reason string) {
|
||||
// lameduck changes the QueryServiceControl state to lameduck,
|
||||
// brodcasts the new health, then sleep for grace period, to give time
|
||||
// to clients to get the new status.
|
||||
func (agent *ActionAgent) lameduck(reason string) {
|
||||
log.Infof("Agent is entering lameduck, reason: %v", reason)
|
||||
|
||||
agent.QueryServiceControl.EnterLameduck()
|
||||
agent.broadcastHealth()
|
||||
time.Sleep(*gracePeriod)
|
||||
log.Infof("Agent is leaving lameduck")
|
||||
}
|
||||
|
||||
func (agent *ActionAgent) broadcastHealth() {
|
||||
|
@ -190,11 +181,11 @@ func (agent *ActionAgent) changeCallback(ctx context.Context, oldTablet, newTabl
|
|||
|
||||
allowQuery := topo.IsRunningQueryService(newTablet.Type)
|
||||
broadcastHealth := false
|
||||
runUpdateStream := allowQuery
|
||||
|
||||
// Read the shard to get SourceShards / TabletControlMap if
|
||||
// we're going to use it.
|
||||
var shardInfo *topo.ShardInfo
|
||||
var tabletControl *topodatapb.Shard_TabletControl
|
||||
var err error
|
||||
var disallowQueryReason string
|
||||
var blacklistedTables []string
|
||||
|
@ -215,20 +206,22 @@ func (agent *ActionAgent) changeCallback(ctx context.Context, oldTablet, newTabl
|
|||
if topo.InCellList(newTablet.Alias.Cell, tc.Cells) {
|
||||
if tc.DisableQueryService {
|
||||
allowQuery = false
|
||||
disallowQueryReason = "query service disabled by tablet control"
|
||||
disallowQueryReason = "TabletControl.DisableQueryService set"
|
||||
}
|
||||
blacklistedTables = tc.BlacklistedTables
|
||||
tabletControl = tc
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
disallowQueryReason = fmt.Sprintf("not a serving tablet type(%v)", newTablet.Type)
|
||||
}
|
||||
agent.setServicesDesiredState(disallowQueryReason, runUpdateStream)
|
||||
if updateBlacklistedTables {
|
||||
if err := agent.loadBlacklistRules(newTablet, blacklistedTables); err != nil {
|
||||
// FIXME(alainjobart) how to handle this error?
|
||||
log.Errorf("Cannot update blacklisted tables rule: %v", err)
|
||||
} else {
|
||||
agent.setBlacklistedTables(blacklistedTables)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -248,7 +241,7 @@ func (agent *ActionAgent) changeCallback(ctx context.Context, oldTablet, newTabl
|
|||
}
|
||||
}
|
||||
|
||||
if stateChanged, err := agent.allowQueries(newTablet.Type); err == nil {
|
||||
if stateChanged, err := agent.QueryServiceControl.SetServingType(newTablet.Type, true, nil); err == nil {
|
||||
// If the state changed, broadcast to vtgate.
|
||||
// (e.g. this happens when the tablet was already master, but it just
|
||||
// changed from NOT_SERVING to SERVING due to
|
||||
|
@ -257,39 +250,35 @@ func (agent *ActionAgent) changeCallback(ctx context.Context, oldTablet, newTabl
|
|||
broadcastHealth = true
|
||||
}
|
||||
} else {
|
||||
runUpdateStream = false
|
||||
log.Errorf("Cannot start query service: %v", err)
|
||||
}
|
||||
} else {
|
||||
// Query service should be stopped.
|
||||
if (oldTablet.Type == topodatapb.TabletType_REPLICA ||
|
||||
oldTablet.Type == topodatapb.TabletType_RDONLY) &&
|
||||
newTablet.Type == topodatapb.TabletType_SPARE {
|
||||
if topo.IsSubjectToLameduck(oldTablet.Type) &&
|
||||
newTablet.Type == topodatapb.TabletType_SPARE &&
|
||||
*gracePeriod > 0 {
|
||||
// When a non-MASTER serving type is going SPARE,
|
||||
// put query service in lameduck during gracePeriod.
|
||||
agent.enterLameduck(disallowQueryReason)
|
||||
agent.broadcastHealth()
|
||||
time.Sleep(*gracePeriod)
|
||||
agent.lameduck(disallowQueryReason)
|
||||
}
|
||||
|
||||
if stateChanged, err := agent.disallowQueries(newTablet.Type, disallowQueryReason); err == nil {
|
||||
log.Infof("Disabling query service on type change, reason: %v", disallowQueryReason)
|
||||
if stateChanged, err := agent.QueryServiceControl.SetServingType(newTablet.Type, false, nil); err == nil {
|
||||
// If the state changed, broadcast to vtgate.
|
||||
// (e.g. this happens when the tablet was already master, but it just
|
||||
// changed from NOT_SERVING to SERVING because filtered replication was
|
||||
// enabled.
|
||||
// changed from SERVING to NOT_SERVING because filtered replication was
|
||||
// enabled.)
|
||||
if stateChanged {
|
||||
broadcastHealth = true
|
||||
}
|
||||
} else {
|
||||
log.Errorf("disallowQueries failed: %v", err)
|
||||
log.Errorf("SetServingType(serving=false) failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// save the tabletControl we've been using, so the background
|
||||
// healthcheck makes the same decisions as we've been making.
|
||||
agent.setTabletControl(tabletControl)
|
||||
|
||||
// update stream needs to be started or stopped too
|
||||
if topo.IsRunningUpdateStream(newTablet.Type) {
|
||||
if topo.IsRunningUpdateStream(newTablet.Type) && runUpdateStream {
|
||||
agent.UpdateStream.Enable()
|
||||
} else {
|
||||
agent.UpdateStream.Disable()
|
||||
|
|
|
@ -64,7 +64,7 @@ type TabletManagerClient interface {
|
|||
RefreshState(ctx context.Context, tablet *topo.TabletInfo) error
|
||||
|
||||
// RunHealthCheck asks the remote tablet to run a health check cycle
|
||||
RunHealthCheck(ctx context.Context, tablet *topo.TabletInfo, targetTabletType topodatapb.TabletType) error
|
||||
RunHealthCheck(ctx context.Context, tablet *topo.TabletInfo) error
|
||||
|
||||
// IgnoreHealthError sets the regexp for health errors to ignore.
|
||||
IgnoreHealthError(ctx context.Context, tablet *topo.TabletInfo, pattern string) error
|
||||
|
|
|
@ -228,7 +228,7 @@ func buildStreamComment(tableInfo *TableInfo, pkValueList [][]sqltypes.Value, se
|
|||
fmt.Fprintf(buf, " /* _stream %s (", tableInfo.Name)
|
||||
// We assume the first index exists, and is the pk
|
||||
for _, pkName := range tableInfo.Indexes[0].Columns {
|
||||
buf.WriteString(pkName)
|
||||
buf.WriteString(pkName.Original())
|
||||
buf.WriteString(" ")
|
||||
}
|
||||
buf.WriteString(")")
|
||||
|
|
|
@ -577,7 +577,7 @@ func TestNocacheCases(t *testing.T) {
|
|||
&framework.TestCase{
|
||||
Query: "insert into vitess_mixed_case(col1, col2) values(1, 2)",
|
||||
Rewritten: []string{
|
||||
"insert into vitess_mixed_case(col1, col2) values (1, 2) /* _stream vitess_mixed_case (col1 ) (1 )",
|
||||
"insert into vitess_mixed_case(col1, col2) values (1, 2) /* _stream vitess_mixed_case (Col1 ) (1 )",
|
||||
},
|
||||
RowsAffected: 1,
|
||||
},
|
||||
|
|
|
@ -30,7 +30,7 @@ func analyzeUpdate(upd *sqlparser.Update, getTable TableGetter) (plan *ExecPlan,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if len(tableInfo.Indexes) == 0 || tableInfo.Indexes[0].Name != "PRIMARY" {
|
||||
if len(tableInfo.Indexes) == 0 || tableInfo.Indexes[0].Name.Lowered() != "primary" {
|
||||
log.Warningf("no primary key for table %s", tableName)
|
||||
plan.Reason = ReasonTableNoIndex
|
||||
return plan, nil
|
||||
|
@ -80,7 +80,7 @@ func analyzeDelete(del *sqlparser.Delete, getTable TableGetter) (plan *ExecPlan,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if len(tableInfo.Indexes) == 0 || tableInfo.Indexes[0].Name != "PRIMARY" {
|
||||
if len(tableInfo.Indexes) == 0 || tableInfo.Indexes[0].Name.Lowered() != "primary" {
|
||||
log.Warningf("no primary key for table %s", tableName)
|
||||
plan.Reason = ReasonTableNoIndex
|
||||
return plan, nil
|
||||
|
@ -114,7 +114,7 @@ func analyzeSet(set *sqlparser.Set) (plan *ExecPlan) {
|
|||
return plan
|
||||
}
|
||||
updateExpr := set.Exprs[0]
|
||||
plan.SetKey = string(updateExpr.Name.Name)
|
||||
plan.SetKey = updateExpr.Name.Name.Original()
|
||||
numExpr, ok := updateExpr.Expr.(sqlparser.NumVal)
|
||||
if !ok {
|
||||
return plan
|
||||
|
@ -130,7 +130,7 @@ func analyzeSet(set *sqlparser.Set) (plan *ExecPlan) {
|
|||
|
||||
func analyzeUpdateExpressions(exprs sqlparser.UpdateExprs, pkIndex *schema.Index) (pkValues []interface{}, err error) {
|
||||
for _, expr := range exprs {
|
||||
index := pkIndex.FindColumn(sqlparser.GetColName(expr.Name))
|
||||
index := pkIndex.FindColumn(sqlparser.GetColName(expr.Name).Original())
|
||||
if index == -1 {
|
||||
continue
|
||||
}
|
||||
|
@ -228,7 +228,7 @@ func analyzeSelect(sel *sqlparser.Select, getTable TableGetter) (plan *ExecPlan,
|
|||
}
|
||||
|
||||
// This check should never fail because we only cache tables with primary keys.
|
||||
if len(tableInfo.Indexes) == 0 || tableInfo.Indexes[0].Name != "PRIMARY" {
|
||||
if len(tableInfo.Indexes) == 0 || tableInfo.Indexes[0].Name.Lowered() != "primary" {
|
||||
panic("unexpected")
|
||||
}
|
||||
|
||||
|
@ -264,14 +264,14 @@ func analyzeSelect(sel *sqlparser.Select, getTable TableGetter) (plan *ExecPlan,
|
|||
plan.Reason = ReasonNoIndexMatch
|
||||
return plan, nil
|
||||
}
|
||||
plan.IndexUsed = indexUsed.Name
|
||||
plan.IndexUsed = indexUsed.Name.Original()
|
||||
if plan.IndexUsed == "PRIMARY" {
|
||||
plan.Reason = ReasonPKIndex
|
||||
return plan, nil
|
||||
}
|
||||
var missing bool
|
||||
for _, cnum := range selects {
|
||||
if indexUsed.FindDataColumn(tableInfo.Columns[cnum].Name) != -1 {
|
||||
if indexUsed.FindDataColumn(tableInfo.Columns[cnum].Name.Original()) != -1 {
|
||||
continue
|
||||
}
|
||||
missing = true
|
||||
|
@ -298,11 +298,11 @@ func analyzeSelectExprs(exprs sqlparser.SelectExprs, table *schema.Table) (selec
|
|||
}
|
||||
case *sqlparser.NonStarExpr:
|
||||
name := sqlparser.GetColName(expr.Expr)
|
||||
if name == "" {
|
||||
if name.Original() == "" {
|
||||
// Not a simple column name.
|
||||
return nil, nil
|
||||
}
|
||||
colIndex := table.FindColumn(name)
|
||||
colIndex := table.FindColumn(name.Original())
|
||||
if colIndex == -1 {
|
||||
return nil, fmt.Errorf("column %s not found in table %s", name, table.Name)
|
||||
}
|
||||
|
@ -391,7 +391,7 @@ func analyzeInsert(ins *sqlparser.Insert, getTable TableGetter) (plan *ExecPlan,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if len(tableInfo.Indexes) == 0 || tableInfo.Indexes[0].Name != "PRIMARY" {
|
||||
if len(tableInfo.Indexes) == 0 || tableInfo.Indexes[0].Name.Lowered() != "primary" {
|
||||
log.Warningf("no primary key for table %s", tableName)
|
||||
plan.Reason = ReasonTableNoIndex
|
||||
return plan, nil
|
||||
|
@ -479,7 +479,7 @@ func getInsertPKColumns(columns sqlparser.Columns, tableInfo *schema.Table) (pkC
|
|||
pkColumnNumbers[i] = -1
|
||||
}
|
||||
for i, column := range columns {
|
||||
index := pkIndex.FindColumn(sqlparser.GetColName(column.(*sqlparser.NonStarExpr).Expr))
|
||||
index := pkIndex.FindColumn(sqlparser.GetColName(column.(*sqlparser.NonStarExpr).Expr).Original())
|
||||
if index == -1 {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ func getPKValues(conditions []sqlparser.BoolExpr, pkIndex *schema.Index) (pkValu
|
|||
if !sqlparser.StringIn(condition.Operator, sqlparser.EqualStr, sqlparser.InStr) {
|
||||
return nil, nil
|
||||
}
|
||||
index := pkindexScore.FindMatch(string(condition.Left.(*sqlparser.ColName).Name))
|
||||
index := pkindexScore.FindMatch(condition.Left.(*sqlparser.ColName).Name.Original())
|
||||
if index == -1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -103,9 +103,9 @@ func getIndexMatch(conditions []sqlparser.BoolExpr, indexes []*schema.Index) *sc
|
|||
var col string
|
||||
switch condition := condition.(type) {
|
||||
case *sqlparser.ComparisonExpr:
|
||||
col = string(condition.Left.(*sqlparser.ColName).Name)
|
||||
col = condition.Left.(*sqlparser.ColName).Name.Original()
|
||||
case *sqlparser.RangeCond:
|
||||
col = string(condition.Left.(*sqlparser.ColName).Name)
|
||||
col = condition.Left.(*sqlparser.ColName).Name.Original()
|
||||
default:
|
||||
panic("unreachaable")
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ package planbuilder
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/youtube/vitess/go/cistring"
|
||||
"github.com/youtube/vitess/go/vt/schema"
|
||||
"github.com/youtube/vitess/go/vt/sqlparser"
|
||||
)
|
||||
|
@ -104,7 +105,7 @@ func GenerateDeleteOuterQuery(del *sqlparser.Delete) *sqlparser.ParsedQuery {
|
|||
|
||||
// GenerateSelectSubquery generates the subquery for selects.
|
||||
func GenerateSelectSubquery(sel *sqlparser.Select, tableInfo *schema.Table, index string) *sqlparser.ParsedQuery {
|
||||
hint := &sqlparser.IndexHints{Type: sqlparser.UseStr, Indexes: []sqlparser.SQLName{sqlparser.SQLName(index)}}
|
||||
hint := &sqlparser.IndexHints{Type: sqlparser.UseStr, Indexes: []sqlparser.ColIdent{sqlparser.NewColIdent(index)}}
|
||||
tableExpr := sel.From[0].(*sqlparser.AliasedTableExpr)
|
||||
savedHint := tableExpr.Hints
|
||||
tableExpr.Hints = hint
|
||||
|
@ -146,7 +147,7 @@ func GenerateDeleteSubquery(del *sqlparser.Delete, tableInfo *schema.Table) *sql
|
|||
}
|
||||
|
||||
// GenerateSubquery generates a subquery based on the input parameters.
|
||||
func GenerateSubquery(columns []string, table *sqlparser.AliasedTableExpr, where *sqlparser.Where, order sqlparser.OrderBy, limit *sqlparser.Limit, forUpdate bool) *sqlparser.ParsedQuery {
|
||||
func GenerateSubquery(columns []cistring.CIString, table *sqlparser.AliasedTableExpr, where *sqlparser.Where, order sqlparser.OrderBy, limit *sqlparser.Limit, forUpdate bool) *sqlparser.ParsedQuery {
|
||||
buf := sqlparser.NewTrackedBuffer(nil)
|
||||
if limit == nil {
|
||||
limit = execLimit
|
||||
|
@ -154,9 +155,9 @@ func GenerateSubquery(columns []string, table *sqlparser.AliasedTableExpr, where
|
|||
fmt.Fprintf(buf, "select ")
|
||||
i := 0
|
||||
for i = 0; i < len(columns)-1; i++ {
|
||||
fmt.Fprintf(buf, "%s, ", columns[i])
|
||||
fmt.Fprintf(buf, "%s, ", columns[i].Original())
|
||||
}
|
||||
fmt.Fprintf(buf, "%s", columns[i])
|
||||
fmt.Fprintf(buf, "%s", columns[i].Original())
|
||||
buf.Myprintf(" from %v%v%v%v", table, where, order, limit)
|
||||
if forUpdate {
|
||||
buf.Myprintf(sqlparser.ForUpdateStr)
|
||||
|
@ -167,7 +168,7 @@ func GenerateSubquery(columns []string, table *sqlparser.AliasedTableExpr, where
|
|||
func writeColumnList(buf *sqlparser.TrackedBuffer, columns []schema.TableColumn) {
|
||||
i := 0
|
||||
for i = 0; i < len(columns)-1; i++ {
|
||||
fmt.Fprintf(buf, "%s, ", columns[i].Name)
|
||||
fmt.Fprintf(buf, "%v, ", columns[i].Name)
|
||||
}
|
||||
fmt.Fprintf(buf, "%s", columns[i].Name)
|
||||
fmt.Fprintf(buf, "%v", columns[i].Name)
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"time"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
"github.com/youtube/vitess/go/cistring"
|
||||
"github.com/youtube/vitess/go/hack"
|
||||
"github.com/youtube/vitess/go/mysql"
|
||||
"github.com/youtube/vitess/go/sqltypes"
|
||||
|
@ -443,7 +444,7 @@ func (qre *QueryExecutor) fetchMulti(pkRows [][]sqltypes.Value, limit int64) (*s
|
|||
if len(missingRows) != 0 {
|
||||
bv := map[string]interface{}{
|
||||
"#pk": sqlparser.TupleEqualityList{
|
||||
Columns: qre.plan.TableInfo.Indexes[0].Columns,
|
||||
Columns: cistring.ToStrings(qre.plan.TableInfo.Indexes[0].Columns),
|
||||
Rows: missingRows,
|
||||
},
|
||||
}
|
||||
|
@ -487,7 +488,7 @@ func (qre *QueryExecutor) spotCheck(rcresult RCResult, pk []sqltypes.Value) erro
|
|||
qre.qe.queryServiceStats.SpotCheckCount.Add(1)
|
||||
bv := map[string]interface{}{
|
||||
"#pk": sqlparser.TupleEqualityList{
|
||||
Columns: qre.plan.TableInfo.Indexes[0].Columns,
|
||||
Columns: cistring.ToStrings(qre.plan.TableInfo.Indexes[0].Columns),
|
||||
Rows: [][]sqltypes.Value{pk},
|
||||
},
|
||||
}
|
||||
|
@ -664,7 +665,7 @@ func (qre *QueryExecutor) execDMLPKRows(conn poolConn, query *sqlparser.ParsedQu
|
|||
}
|
||||
bsc := buildStreamComment(qre.plan.TableInfo, pkRows, secondaryList)
|
||||
qre.bindVars["#pk"] = sqlparser.TupleEqualityList{
|
||||
Columns: qre.plan.TableInfo.Indexes[0].Columns,
|
||||
Columns: cistring.ToStrings(qre.plan.TableInfo.Indexes[0].Columns),
|
||||
Rows: pkRows,
|
||||
}
|
||||
r, err := qre.directFetch(conn, query, qre.bindVars, bsc)
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/youtube/vitess/go/cistring"
|
||||
"github.com/youtube/vitess/go/sqltypes"
|
||||
querypb "github.com/youtube/vitess/go/vt/proto/query"
|
||||
"github.com/youtube/vitess/go/vt/sqlparser"
|
||||
|
@ -24,7 +25,7 @@ type QuerySplitter struct {
|
|||
schemaInfo *SchemaInfo
|
||||
sel *sqlparser.Select
|
||||
tableName string
|
||||
splitColumn string
|
||||
splitColumn cistring.CIString
|
||||
rowCount int64
|
||||
}
|
||||
|
||||
|
@ -50,7 +51,7 @@ func NewQuerySplitter(
|
|||
bindVariables: bindVariables,
|
||||
splitCount: splitCount,
|
||||
schemaInfo: schemaInfo,
|
||||
splitColumn: splitColumn,
|
||||
splitColumn: cistring.New(splitColumn),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -88,15 +89,15 @@ func (qs *QuerySplitter) validateQuery() error {
|
|||
if len(tableInfo.PKColumns) == 0 {
|
||||
return fmt.Errorf("no primary keys")
|
||||
}
|
||||
if qs.splitColumn != "" {
|
||||
if qs.splitColumn.Original() != "" {
|
||||
for _, index := range tableInfo.Indexes {
|
||||
for _, column := range index.Columns {
|
||||
if qs.splitColumn == column {
|
||||
if qs.splitColumn.Equal(column) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("split column is not indexed or does not exist in table schema, SplitColumn: %s, TableInfo.Table: %v", qs.splitColumn, tableInfo.Table)
|
||||
return fmt.Errorf("split column is not indexed or does not exist in table schema, SplitColumn: %v, TableInfo.Table: %v", qs.splitColumn, tableInfo.Table)
|
||||
}
|
||||
qs.splitColumn = tableInfo.GetPKColumn(0).Name
|
||||
return nil
|
||||
|
@ -151,7 +152,7 @@ func (qs *QuerySplitter) getWhereClause(whereClause *sqlparser.Where, bindVars m
|
|||
return whereClause
|
||||
}
|
||||
pk := &sqlparser.ColName{
|
||||
Name: sqlparser.SQLName(qs.splitColumn),
|
||||
Name: sqlparser.ColIdent(qs.splitColumn),
|
||||
}
|
||||
if !start.IsNull() {
|
||||
startClause = &sqlparser.ComparisonExpr{
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/youtube/vitess/go/cistring"
|
||||
"github.com/youtube/vitess/go/sqltypes"
|
||||
querypb "github.com/youtube/vitess/go/vt/proto/query"
|
||||
"github.com/youtube/vitess/go/vt/schema"
|
||||
|
@ -143,7 +144,7 @@ func TestGetWhereClause(t *testing.T) {
|
|||
sql := "select * from test_table where count > :count"
|
||||
statement, _ := sqlparser.Parse(sql)
|
||||
splitter.sel, _ = statement.(*sqlparser.Select)
|
||||
splitter.splitColumn = "id"
|
||||
splitter.splitColumn = cistring.New("id")
|
||||
bindVars := make(map[string]interface{})
|
||||
// no boundary case, start = end = nil, should not change the where clause
|
||||
nilValue := sqltypes.Value{}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"github.com/golang/mock/gomock"
|
||||
|
||||
"github.com/youtube/vitess/go/sqltypes"
|
||||
"github.com/youtube/vitess/go/vt/sqlparser"
|
||||
"github.com/youtube/vitess/go/vt/tabletserver/splitquery/splitquery_testing"
|
||||
)
|
||||
|
||||
|
@ -111,7 +112,7 @@ func TestEqualSplitsAlgorithm(t *testing.T) {
|
|||
splitParams, err := NewSplitParamsGivenSplitCount(
|
||||
"select * from test_table where int_col > 5",
|
||||
/* bindVariables */ nil,
|
||||
[]string{testCase.SplitColumn},
|
||||
[]sqlparser.ColIdent{sqlparser.NewColIdent(testCase.SplitColumn)},
|
||||
testCase.SplitCount,
|
||||
GetSchema(),
|
||||
)
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/youtube/vitess/go/vt/schema"
|
||||
"github.com/youtube/vitess/go/vt/sqlparser"
|
||||
)
|
||||
|
||||
func Example() {
|
||||
|
@ -19,7 +20,10 @@ func Example() {
|
|||
splitParams, err := NewSplitParamsGivenSplitCount(
|
||||
"SELECT * FROM table WHERE id > :id", // SQL query
|
||||
map[string]interface{}{"id": int64(5)}, // Bind Variables
|
||||
[]string{"id", "user_id"}, // SplitColumns
|
||||
[]sqlparser.ColIdent{
|
||||
sqlparser.NewColIdent("id"),
|
||||
sqlparser.NewColIdent("user_id"),
|
||||
}, // SplitColumns
|
||||
1000, // SplitCount
|
||||
schema)
|
||||
if err != nil {
|
||||
|
|
|
@ -155,13 +155,13 @@ func buildNoninitialQuery(
|
|||
}
|
||||
}
|
||||
|
||||
func convertColumnNamesToSelectExprs(columnNames []string) sqlparser.SelectExprs {
|
||||
func convertColumnNamesToSelectExprs(columnNames []sqlparser.ColIdent) sqlparser.SelectExprs {
|
||||
result := make([]sqlparser.SelectExpr, 0, len(columnNames))
|
||||
for _, columnName := range columnNames {
|
||||
result = append(result,
|
||||
&sqlparser.NonStarExpr{
|
||||
Expr: &sqlparser.ColName{
|
||||
Name: sqlparser.SQLName(columnName),
|
||||
Name: columnName,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
@ -175,12 +175,12 @@ func buildLimitClause(offset, rowcount int64) *sqlparser.Limit {
|
|||
}
|
||||
}
|
||||
|
||||
func buildOrderByClause(splitColumns []string) sqlparser.OrderBy {
|
||||
func buildOrderByClause(splitColumns []sqlparser.ColIdent) sqlparser.OrderBy {
|
||||
result := make(sqlparser.OrderBy, 0, len(splitColumns))
|
||||
for _, splitColumn := range splitColumns {
|
||||
result = append(result,
|
||||
&sqlparser.Order{
|
||||
Expr: &sqlparser.ColName{Name: sqlparser.SQLName(splitColumn)},
|
||||
Expr: &sqlparser.ColName{Name: splitColumn},
|
||||
Direction: sqlparser.AscScr,
|
||||
},
|
||||
)
|
||||
|
@ -192,10 +192,10 @@ const (
|
|||
prevBindVariablePrefix string = "_splitquery_prev_"
|
||||
)
|
||||
|
||||
func buildPrevBindVariableNames(splitColumns []string) []string {
|
||||
func buildPrevBindVariableNames(splitColumns []sqlparser.ColIdent) []string {
|
||||
result := make([]string, 0, len(splitColumns))
|
||||
for _, splitColumn := range splitColumns {
|
||||
result = append(result, prevBindVariablePrefix+splitColumn)
|
||||
result = append(result, prevBindVariablePrefix+splitColumn.Lowered())
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/youtube/vitess/go/sqltypes"
|
||||
"github.com/youtube/vitess/go/vt/sqlparser"
|
||||
"github.com/youtube/vitess/go/vt/tabletserver/splitquery/splitquery_testing"
|
||||
)
|
||||
|
||||
|
@ -16,7 +17,10 @@ func TestMultipleBoundaries(t *testing.T) {
|
|||
splitParams, err := NewSplitParamsGivenNumRowsPerQueryPart(
|
||||
"select * from test_table where int_col > 5",
|
||||
nil, /* bindVariables */
|
||||
[]string{"id", "user_id"}, /* splitColumns */
|
||||
[]sqlparser.ColIdent{
|
||||
sqlparser.NewColIdent("id"),
|
||||
sqlparser.NewColIdent("user_id"),
|
||||
}, /* splitColumns */
|
||||
1000,
|
||||
GetSchema(),
|
||||
)
|
||||
|
@ -92,7 +96,10 @@ func TestSmallNumberOfRows(t *testing.T) {
|
|||
splitParams, err := NewSplitParamsGivenNumRowsPerQueryPart(
|
||||
"select * from test_table where int_col > 5",
|
||||
nil, /* bindVariables */
|
||||
[]string{"id", "user_id"}, /* splitColumns */
|
||||
[]sqlparser.ColIdent{
|
||||
sqlparser.NewColIdent("id"),
|
||||
sqlparser.NewColIdent("user_id"),
|
||||
}, /* splitColumns */
|
||||
1000,
|
||||
GetSchema(),
|
||||
)
|
||||
|
@ -129,7 +136,10 @@ func TestSQLExecuterReturnsError(t *testing.T) {
|
|||
splitParams, err := NewSplitParamsGivenNumRowsPerQueryPart(
|
||||
"select * from test_table where int_col > 5",
|
||||
nil, /* bindVariables */
|
||||
[]string{"id", "user_id"}, /* splitColumns */
|
||||
[]sqlparser.ColIdent{
|
||||
sqlparser.NewColIdent("id"),
|
||||
sqlparser.NewColIdent("user_id"),
|
||||
}, /* splitColumns */
|
||||
1000,
|
||||
GetSchema(),
|
||||
)
|
||||
|
|
|
@ -2,8 +2,8 @@ package splitquery
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/youtube/vitess/go/cistring"
|
||||
querypb "github.com/youtube/vitess/go/vt/proto/query"
|
||||
"github.com/youtube/vitess/go/vt/schema"
|
||||
"github.com/youtube/vitess/go/vt/sqlparser"
|
||||
|
@ -17,7 +17,7 @@ type SplitParams struct {
|
|||
// parameter in each constructor.
|
||||
sql string
|
||||
bindVariables map[string]interface{}
|
||||
splitColumns []string
|
||||
splitColumns []sqlparser.ColIdent
|
||||
// Exactly one of splitCount, numRowsPerQueryPart will be given by the caller.
|
||||
// See the two NewSplitParams... constructors below. The other field member
|
||||
// will be computed using the equation: max(1, floor(numTableRows / x)),
|
||||
|
@ -59,7 +59,7 @@ type SplitParams struct {
|
|||
func NewSplitParamsGivenNumRowsPerQueryPart(
|
||||
sql string,
|
||||
bindVariables map[string]interface{},
|
||||
splitColumns []string,
|
||||
splitColumns []sqlparser.ColIdent,
|
||||
numRowsPerQueryPart int64,
|
||||
schema map[string]*schema.Table) (*SplitParams, error) {
|
||||
if numRowsPerQueryPart <= 0 {
|
||||
|
@ -99,7 +99,7 @@ func NewSplitParamsGivenNumRowsPerQueryPart(
|
|||
func NewSplitParamsGivenSplitCount(
|
||||
sql string,
|
||||
bindVariables map[string]interface{},
|
||||
splitColumns []string,
|
||||
splitColumns []sqlparser.ColIdent,
|
||||
splitCount int64,
|
||||
schema map[string]*schema.Table) (*SplitParams, error) {
|
||||
|
||||
|
@ -116,13 +116,14 @@ func NewSplitParamsGivenSplitCount(
|
|||
return result, nil
|
||||
}
|
||||
|
||||
// GetSplitTableName returns the name of the table to split.
|
||||
func (sp *SplitParams) GetSplitTableName() string {
|
||||
return sp.splitTableSchema.Name
|
||||
}
|
||||
|
||||
// newSplitParams validates and initializes all the fields except splitCount and
|
||||
// numRowsPerQueryPart. It contains the common code for the constructors above.
|
||||
func newSplitParams(sql string, bindVariables map[string]interface{}, splitColumns []string,
|
||||
func newSplitParams(sql string, bindVariables map[string]interface{}, splitColumns []sqlparser.ColIdent,
|
||||
schema map[string]*schema.Table) (*SplitParams, error) {
|
||||
|
||||
statement, err := sqlparser.Parse(sql)
|
||||
|
@ -166,7 +167,7 @@ func newSplitParams(sql string, bindVariables map[string]interface{}, splitColum
|
|||
// Get the split-columns types.
|
||||
splitColumnTypes := make([]querypb.Type, 0, len(splitColumns))
|
||||
for _, splitColumn := range splitColumns {
|
||||
i := tableSchema.FindColumn(splitColumn)
|
||||
i := tableSchema.FindColumn(splitColumn.Original())
|
||||
if i == -1 {
|
||||
return nil, fmt.Errorf("can't find split-column: %v", splitColumn)
|
||||
}
|
||||
|
@ -185,33 +186,33 @@ func newSplitParams(sql string, bindVariables map[string]interface{}, splitColum
|
|||
|
||||
// getPrimaryKeyColumns returns the list of primary-key column names, in order, for the
|
||||
// given table.
|
||||
func getPrimaryKeyColumns(table *schema.Table) []string {
|
||||
result := make([]string, 0, len(table.PKColumns))
|
||||
func getPrimaryKeyColumns(table *schema.Table) []sqlparser.ColIdent {
|
||||
result := make([]sqlparser.ColIdent, 0, len(table.PKColumns))
|
||||
for _, pkColIndex := range table.PKColumns {
|
||||
result = append(result, table.Columns[pkColIndex].Name)
|
||||
result = append(result, sqlparser.ColIdent(table.Columns[pkColIndex].Name))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// areColumnsAPrefixOfAnIndex returns true if 'columns' form a prefix of the columns that
|
||||
// make up some index in 'table'.
|
||||
func areColumnsAPrefixOfAnIndex(columns []string, table *schema.Table) bool {
|
||||
func areColumnsAPrefixOfAnIndex(columns []sqlparser.ColIdent, table *schema.Table) bool {
|
||||
for _, index := range table.Indexes {
|
||||
if isStringSlicePrefix(columns, index.Columns) {
|
||||
if isColIdentSlicePrefix(columns, index.Columns) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isStringSlicePrefix returns true if 'potentialPrefix' is a prefix of the slice
|
||||
// isColIdentSlicePrefix returns true if 'potentialPrefix' is a prefix of the slice
|
||||
// 'slice'.
|
||||
func isStringSlicePrefix(potentialPrefix []string, slice []string) bool {
|
||||
func isColIdentSlicePrefix(potentialPrefix []sqlparser.ColIdent, slice []cistring.CIString) bool {
|
||||
if len(potentialPrefix) > len(slice) {
|
||||
return false
|
||||
}
|
||||
for i := range potentialPrefix {
|
||||
if potentialPrefix[i] != slice[i] {
|
||||
if !potentialPrefix[i].Equal(sqlparser.ColIdent(slice[i])) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -221,5 +222,14 @@ func isStringSlicePrefix(potentialPrefix []string, slice []string) bool {
|
|||
// areSplitColumnsPrimaryKey returns true if the splitColumns in 'splitParams'
|
||||
// are the primary key columns in order.
|
||||
func (sp *SplitParams) areSplitColumnsPrimaryKey() bool {
|
||||
return reflect.DeepEqual(sp.splitColumns, getPrimaryKeyColumns(sp.splitTableSchema))
|
||||
pkCols := getPrimaryKeyColumns(sp.splitTableSchema)
|
||||
if len(sp.splitColumns) != len(pkCols) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(sp.splitColumns); i++ {
|
||||
if !sp.splitColumns[i].Equal(pkCols[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -29,9 +29,9 @@ func NewSplitter(splitParams *SplitParams, algorithm SplitAlgorithmInterface) *S
|
|||
splitter.endBindVariableNames = make([]string, 0, len(splitter.splitParams.splitColumns))
|
||||
for _, splitColumn := range splitter.splitParams.splitColumns {
|
||||
splitter.startBindVariableNames = append(
|
||||
splitter.startBindVariableNames, startBindVariablePrefix+splitColumn)
|
||||
splitter.startBindVariableNames, startBindVariablePrefix+splitColumn.Lowered())
|
||||
splitter.endBindVariableNames = append(
|
||||
splitter.endBindVariableNames, endBindVariablePrefix+splitColumn)
|
||||
splitter.endBindVariableNames, endBindVariablePrefix+splitColumn.Lowered())
|
||||
}
|
||||
splitter.initQueryPartSQLs()
|
||||
return &splitter
|
||||
|
@ -129,10 +129,10 @@ func populateBoundaryBindVariables(
|
|||
}
|
||||
}
|
||||
|
||||
func convertColumnNamesToValExpr(colNames []string) []sqlparser.ValExpr {
|
||||
func convertColumnNamesToValExpr(colNames []sqlparser.ColIdent) []sqlparser.ValExpr {
|
||||
valExprs := make([]sqlparser.ValExpr, 0, len(colNames))
|
||||
for _, colName := range colNames {
|
||||
valExprs = append(valExprs, &sqlparser.ColName{Name: sqlparser.SQLName(colName)})
|
||||
valExprs = append(valExprs, &sqlparser.ColName{Name: colName})
|
||||
}
|
||||
return valExprs
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/youtube/vitess/go/vt/sqlparser"
|
||||
"github.com/youtube/vitess/go/vt/tabletserver/querytypes"
|
||||
)
|
||||
|
||||
|
@ -46,7 +47,7 @@ func TestSplit1SplitColumn(t *testing.T) {
|
|||
splitParams, err := NewSplitParamsGivenNumRowsPerQueryPart(
|
||||
"select * from test_table",
|
||||
map[string]interface{}{},
|
||||
[]string{"id"},
|
||||
[]sqlparser.ColIdent{sqlparser.NewColIdent("id")},
|
||||
1000, // numRowsPerQueryPart
|
||||
GetSchema())
|
||||
if err != nil {
|
||||
|
@ -107,7 +108,10 @@ func TestSplit2SplitColumns(t *testing.T) {
|
|||
splitParams, err := NewSplitParamsGivenNumRowsPerQueryPart(
|
||||
"select * from test_table",
|
||||
map[string]interface{}{},
|
||||
[]string{"id", "user_id"},
|
||||
[]sqlparser.ColIdent{
|
||||
sqlparser.NewColIdent("id"),
|
||||
sqlparser.NewColIdent("user_id"),
|
||||
}, /* splitColumns */
|
||||
1000, // numRowsPerQueryPart
|
||||
GetSchema())
|
||||
if err != nil {
|
||||
|
@ -181,7 +185,11 @@ func TestSplit3SplitColumns(t *testing.T) {
|
|||
splitParams, err := NewSplitParamsGivenNumRowsPerQueryPart(
|
||||
"select * from test_table",
|
||||
map[string]interface{}{},
|
||||
[]string{"id", "user_id", "user_id2"},
|
||||
[]sqlparser.ColIdent{
|
||||
sqlparser.NewColIdent("id"),
|
||||
sqlparser.NewColIdent("user_id"),
|
||||
sqlparser.NewColIdent("user_id2"),
|
||||
}, /* splitColumns */
|
||||
1000, // numRowsPerQueryPart
|
||||
GetSchema())
|
||||
if err != nil {
|
||||
|
@ -260,7 +268,10 @@ func TestSplitWithWhereClause(t *testing.T) {
|
|||
splitParams, err := NewSplitParamsGivenNumRowsPerQueryPart(
|
||||
"select * from test_table where name!='foo'",
|
||||
map[string]interface{}{},
|
||||
[]string{"id", "user_id"},
|
||||
[]sqlparser.ColIdent{
|
||||
sqlparser.NewColIdent("id"),
|
||||
sqlparser.NewColIdent("user_id"),
|
||||
}, /* splitColumns */
|
||||
1000, // numRowsPerQueryPart
|
||||
GetSchema())
|
||||
if err != nil {
|
||||
|
@ -334,7 +345,10 @@ func TestSplitWithExistingBindVariables(t *testing.T) {
|
|||
splitParams, err := NewSplitParamsGivenNumRowsPerQueryPart(
|
||||
"select * from test_table",
|
||||
map[string]interface{}{"foo": int64(100)},
|
||||
[]string{"id", "user_id"},
|
||||
[]sqlparser.ColIdent{
|
||||
sqlparser.NewColIdent("id"),
|
||||
sqlparser.NewColIdent("user_id"),
|
||||
}, /* splitColumns */
|
||||
1000, // numRowsPerQueryPart
|
||||
GetSchema())
|
||||
if err != nil {
|
||||
|
@ -412,7 +426,10 @@ func TestSplitWithEmptyBoundaryList(t *testing.T) {
|
|||
splitParams, err := NewSplitParamsGivenNumRowsPerQueryPart(
|
||||
"select * from test_table",
|
||||
map[string]interface{}{"foo": int64(100)},
|
||||
[]string{"id", "user_id"},
|
||||
[]sqlparser.ColIdent{
|
||||
sqlparser.NewColIdent("id"),
|
||||
sqlparser.NewColIdent("user_id"),
|
||||
}, /* splitColumns */
|
||||
1000,
|
||||
GetSchema())
|
||||
if err != nil {
|
||||
|
|
|
@ -101,7 +101,7 @@ func (ti *TableInfo) SetPK(colnames []string) error {
|
|||
}
|
||||
if len(ti.Indexes) == 0 {
|
||||
ti.Indexes = make([]*schema.Index, 1)
|
||||
} else if ti.Indexes[0].Name != "PRIMARY" {
|
||||
} else if ti.Indexes[0].Name.Lowered() != "primary" {
|
||||
ti.Indexes = append(ti.Indexes, nil)
|
||||
copy(ti.Indexes[1:], ti.Indexes[:len(ti.Indexes)-1])
|
||||
} // else we replace the currunt primary key
|
||||
|
@ -136,12 +136,12 @@ func (ti *TableInfo) fetchIndexes(conn *DBConn) error {
|
|||
return nil
|
||||
}
|
||||
pkIndex := ti.Indexes[0]
|
||||
if pkIndex.Name != "PRIMARY" {
|
||||
if pkIndex.Name.Lowered() != "primary" {
|
||||
return nil
|
||||
}
|
||||
ti.PKColumns = make([]int, len(pkIndex.Columns))
|
||||
for i, pkCol := range pkIndex.Columns {
|
||||
ti.PKColumns[i] = ti.FindColumn(pkCol)
|
||||
ti.PKColumns[i] = ti.FindColumn(pkCol.Original())
|
||||
}
|
||||
// Primary key contains all table columns
|
||||
for _, col := range ti.Columns {
|
||||
|
@ -155,7 +155,7 @@ func (ti *TableInfo) fetchIndexes(conn *DBConn) error {
|
|||
for _, c := range pkIndex.Columns {
|
||||
// pk columns may already be part of the index. So,
|
||||
// check before adding.
|
||||
if ti.Indexes[i].FindDataColumn(c) != -1 {
|
||||
if ti.Indexes[i].FindDataColumn(c.Original()) != -1 {
|
||||
continue
|
||||
}
|
||||
ti.Indexes[i].DataColumns = append(ti.Indexes[i].DataColumns, c)
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
|
||||
log "github.com/golang/glog"
|
||||
"github.com/youtube/vitess/go/acl"
|
||||
"github.com/youtube/vitess/go/cistring"
|
||||
"github.com/youtube/vitess/go/history"
|
||||
"github.com/youtube/vitess/go/mysql"
|
||||
"github.com/youtube/vitess/go/sqltypes"
|
||||
|
@ -862,6 +863,11 @@ func (tsv *TabletServer) SplitQueryV2(
|
|||
// we don't expect too many of these queries to run concurrently.
|
||||
defer tsv.endRequest(false)
|
||||
|
||||
ciSplitColumns := make([]sqlparser.ColIdent, 0, len(splitColumns))
|
||||
for _, s := range splitColumns {
|
||||
ciSplitColumns = append(ciSplitColumns, sqlparser.NewColIdent(s))
|
||||
}
|
||||
|
||||
if err := validateSplitQueryParameters(
|
||||
target,
|
||||
sql,
|
||||
|
@ -875,7 +881,7 @@ func (tsv *TabletServer) SplitQueryV2(
|
|||
}
|
||||
schema := getSchemaForSplitQuery(tsv.qe.schemaInfo)
|
||||
splitParams, err := createSplitParams(
|
||||
sql, bindVariables, splitColumns, splitCount, numRowsPerQueryPart, schema)
|
||||
sql, bindVariables, ciSplitColumns, splitCount, numRowsPerQueryPart, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -953,7 +959,7 @@ func validateSplitQueryParameters(
|
|||
func createSplitParams(
|
||||
sql string,
|
||||
bindVariables map[string]interface{},
|
||||
splitColumns []string,
|
||||
splitColumns []sqlparser.ColIdent,
|
||||
splitCount int64,
|
||||
numRowsPerQueryPart int64,
|
||||
schema map[string]*schema.Table,
|
||||
|
@ -1328,7 +1334,7 @@ func withTimeout(ctx context.Context, timeout time.Duration) (context.Context, c
|
|||
return context.WithTimeout(ctx, timeout)
|
||||
}
|
||||
|
||||
func getColumnType(qre *QueryExecutor, columnName, tableName string) (querypb.Type, error) {
|
||||
func getColumnType(qre *QueryExecutor, columnName cistring.CIString, tableName string) (querypb.Type, error) {
|
||||
conn, err := qre.getConn(qre.qe.connPool)
|
||||
if err != nil {
|
||||
return sqltypes.Null, err
|
||||
|
@ -1348,7 +1354,7 @@ func getColumnType(qre *QueryExecutor, columnName, tableName string) (querypb.Ty
|
|||
return result.Fields[0].Type, nil
|
||||
}
|
||||
|
||||
func getColumnMinMax(qre *QueryExecutor, columnName, tableName string) (*sqltypes.Result, error) {
|
||||
func getColumnMinMax(qre *QueryExecutor, columnName cistring.CIString, tableName string) (*sqltypes.Result, error) {
|
||||
conn, err := qre.getConn(qre.qe.connPool)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -22,6 +22,9 @@ type BroadcastData struct {
|
|||
|
||||
// RealtimeStats stores the last broadcast stats.
|
||||
RealtimeStats querypb.RealtimeStats
|
||||
|
||||
// Serving contains the QueryServiceEnabled flag
|
||||
Serving bool
|
||||
}
|
||||
|
||||
// StateChange stores the state the controller changed to.
|
||||
|
@ -41,8 +44,8 @@ type Controller struct {
|
|||
// QueryServiceEnabled is a state variable
|
||||
QueryServiceEnabled bool
|
||||
|
||||
// InitDBConfigError is the return value for InitDBConfig
|
||||
InitDBConfigError error
|
||||
// IsInLameduck is a state variable
|
||||
IsInLameduck bool
|
||||
|
||||
// SetServingTypeError is the return value for SetServingType
|
||||
SetServingTypeError error
|
||||
|
@ -64,7 +67,6 @@ type Controller struct {
|
|||
func NewController() *Controller {
|
||||
return &Controller{
|
||||
QueryServiceEnabled: false,
|
||||
InitDBConfigError: nil,
|
||||
IsHealthyError: nil,
|
||||
ReloadSchemaCount: 0,
|
||||
BroadcastData: make(chan *BroadcastData, 10),
|
||||
|
@ -82,13 +84,8 @@ func (tqsc *Controller) AddStatusPart() {
|
|||
|
||||
// InitDBConfig is part of the tabletserver.Controller interface
|
||||
func (tqsc *Controller) InitDBConfig(target querypb.Target, dbConfigs dbconfigs.DBConfigs, schemaOverrides []tabletserver.SchemaOverride, mysqld mysqlctl.MysqlDaemon) error {
|
||||
if tqsc.InitDBConfigError == nil {
|
||||
tqsc.CurrentTarget = target
|
||||
tqsc.QueryServiceEnabled = true
|
||||
} else {
|
||||
tqsc.QueryServiceEnabled = false
|
||||
}
|
||||
return tqsc.InitDBConfigError
|
||||
tqsc.CurrentTarget = target
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetServingType is part of the tabletserver.Controller interface
|
||||
|
@ -105,6 +102,7 @@ func (tqsc *Controller) SetServingType(tabletType topodatapb.TabletType, serving
|
|||
TabletType: tabletType,
|
||||
}
|
||||
}
|
||||
tqsc.IsInLameduck = false
|
||||
return stateChanged, tqsc.SetServingTypeError
|
||||
}
|
||||
|
||||
|
@ -155,9 +153,11 @@ func (tqsc *Controller) BroadcastHealth(terTimestamp int64, stats *querypb.Realt
|
|||
tqsc.BroadcastData <- &BroadcastData{
|
||||
TERTimestamp: terTimestamp,
|
||||
RealtimeStats: *stats,
|
||||
Serving: tqsc.QueryServiceEnabled && (!tqsc.IsInLameduck),
|
||||
}
|
||||
}
|
||||
|
||||
// EnterLameduck implements tabletserver.Controller.
|
||||
func (tqsc *Controller) EnterLameduck() {
|
||||
tqsc.IsInLameduck = true
|
||||
}
|
||||
|
|
|
@ -55,18 +55,6 @@ func EndPointEquality(left, right *topodatapb.EndPoint) bool {
|
|||
return false
|
||||
}
|
||||
}
|
||||
if len(left.HealthMap) != len(right.HealthMap) {
|
||||
return false
|
||||
}
|
||||
for key, lvalue := range left.HealthMap {
|
||||
rvalue, ok := right.HealthMap[key]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if lvalue != rvalue {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
|
@ -6,7 +6,6 @@ package topo
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
@ -21,16 +20,6 @@ import (
|
|||
"github.com/youtube/vitess/go/vt/topo/topoproto"
|
||||
)
|
||||
|
||||
const (
|
||||
// ReplicationLag is the key in the health map to indicate high
|
||||
// replication lag
|
||||
ReplicationLag = "replication_lag"
|
||||
|
||||
// ReplicationLagHigh is the value in the health map to indicate high
|
||||
// replication lag
|
||||
ReplicationLagHigh = "high"
|
||||
)
|
||||
|
||||
// IsTrivialTypeChange returns if this db type be trivially reassigned
|
||||
// without changes to the replication graph
|
||||
func IsTrivialTypeChange(oldTabletType, newTabletType topodatapb.TabletType) bool {
|
||||
|
@ -61,7 +50,30 @@ func IsInServingGraph(tt topodatapb.TabletType) bool {
|
|||
// IsRunningQueryService returns if a tablet is running the query service
|
||||
func IsRunningQueryService(tt topodatapb.TabletType) bool {
|
||||
switch tt {
|
||||
case topodatapb.TabletType_MASTER, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY, topodatapb.TabletType_WORKER:
|
||||
case topodatapb.TabletType_MASTER, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY, topodatapb.TabletType_EXPERIMENTAL, topodatapb.TabletType_WORKER:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsSubjectToLameduck returns if a tablet is subject to being
|
||||
// lameduck. Lameduck is a transition period where we are still
|
||||
// allowed to serve, but we tell the clients we are going away
|
||||
// soon. Typically, a vttablet will still serve, but broadcast a
|
||||
// non-serving state through its health check. then vtgate will ctahc
|
||||
// that non-serving state, and stop sending queries.
|
||||
//
|
||||
// Masters are not subject to lameduck, as we usually want to transition
|
||||
// them as fast as possible.
|
||||
//
|
||||
// Replica and rdonly will use lameduck when going from healthy to
|
||||
// unhealhty (either because health check fails, or they're shutting down).
|
||||
//
|
||||
// Other types are probably not serving user visible traffic, so they
|
||||
// need to transition as fast as possible too.
|
||||
func IsSubjectToLameduck(tt topodatapb.TabletType) bool {
|
||||
switch tt {
|
||||
case topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
@ -111,12 +123,6 @@ func TabletEndPoint(tablet *topodatapb.Tablet) (*topodatapb.EndPoint, error) {
|
|||
entry.PortMap[name] = int32(port)
|
||||
}
|
||||
|
||||
if len(tablet.HealthMap) > 0 {
|
||||
entry.HealthMap = make(map[string]string, len(tablet.HealthMap))
|
||||
for k, v := range tablet.HealthMap {
|
||||
entry.HealthMap[k] = v
|
||||
}
|
||||
}
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
|
@ -180,16 +186,6 @@ func (ti *TabletInfo) IsSlaveType() bool {
|
|||
return IsSlaveType(ti.Type)
|
||||
}
|
||||
|
||||
// IsHealthEqual compares the two health maps, and
|
||||
// returns true if they're equivalent.
|
||||
func IsHealthEqual(left, right map[string]string) bool {
|
||||
if len(left) == 0 && len(right) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
return reflect.DeepEqual(left, right)
|
||||
}
|
||||
|
||||
// NewTabletInfo returns a TabletInfo basing on tablet with the
|
||||
// version set. This function should be only used by Server
|
||||
// implementations.
|
||||
|
|
|
@ -44,35 +44,14 @@ func ConfigureTabletHook(hk *hook.Hook, tabletAlias *topodatapb.TabletAlias) {
|
|||
hk.ExtraEnv["TABLET_ALIAS"] = topoproto.TabletAliasString(tabletAlias)
|
||||
}
|
||||
|
||||
// changeType is a single iteration of the update loop for ChangeType().
|
||||
func changeType(tablet *topodatapb.Tablet, newType topodatapb.TabletType, health map[string]string) error {
|
||||
tablet.Type = newType
|
||||
if health != nil {
|
||||
if len(health) == 0 {
|
||||
tablet.HealthMap = nil
|
||||
} else {
|
||||
tablet.HealthMap = health
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearHealthMap is a sentinel value that tells ChangeType to clear the health
|
||||
// map, as opposed to passing nil, which will leave the health map unchanged.
|
||||
var ClearHealthMap = make(map[string]string)
|
||||
|
||||
// ChangeType changes the type of the tablet and possibly also updates
|
||||
// the health information for it. Make this external, since these
|
||||
// ChangeType changes the type of the tablet. Make this external, since these
|
||||
// transitions need to be forced from time to time.
|
||||
//
|
||||
// - if health is nil, we don't touch the Tablet's Health record.
|
||||
// - if health is an empty map, we clear the Tablet's Health record.
|
||||
// - if health has values, we overwrite the Tablet's Health record.
|
||||
//
|
||||
// If successful, the updated tablet record is returned.
|
||||
func ChangeType(ctx context.Context, ts topo.Server, tabletAlias *topodatapb.TabletAlias, newType topodatapb.TabletType, health map[string]string) (*topodatapb.Tablet, error) {
|
||||
func ChangeType(ctx context.Context, ts topo.Server, tabletAlias *topodatapb.TabletAlias, newType topodatapb.TabletType) (*topodatapb.Tablet, error) {
|
||||
return ts.UpdateTabletFields(ctx, tabletAlias, func(tablet *topodatapb.Tablet) error {
|
||||
return changeType(tablet, newType, health)
|
||||
tablet.Type = newType
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -81,16 +60,16 @@ func ChangeType(ctx context.Context, ts topo.Server, tabletAlias *topodatapb.Tab
|
|||
//
|
||||
// Note that oldTablet is only used for its Alias, and to call CheckOwnership().
|
||||
// Other fields in oldTablet have no effect on the update, which will read the
|
||||
// latest tablet record before setting the type and health info (just like
|
||||
// ChangeType() does).
|
||||
// latest tablet record before setting the type (just like ChangeType() does).
|
||||
//
|
||||
// If successful, the updated tablet record is returned.
|
||||
func ChangeOwnType(ctx context.Context, ts topo.Server, oldTablet *topodatapb.Tablet, newType topodatapb.TabletType, health map[string]string) (*topodatapb.Tablet, error) {
|
||||
func ChangeOwnType(ctx context.Context, ts topo.Server, oldTablet *topodatapb.Tablet, newType topodatapb.TabletType) (*topodatapb.Tablet, error) {
|
||||
return ts.UpdateTabletFields(ctx, oldTablet.Alias, func(tablet *topodatapb.Tablet) error {
|
||||
if err := CheckOwnership(oldTablet, tablet); err != nil {
|
||||
return err
|
||||
}
|
||||
return changeType(tablet, newType, health)
|
||||
tablet.Type = newType
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -169,8 +169,8 @@ var commands = []commandGroup{
|
|||
"<tablet alias>",
|
||||
"Reloads the tablet record on the specified tablet."},
|
||||
{"RunHealthCheck", commandRunHealthCheck,
|
||||
"<tablet alias> <target tablet type>",
|
||||
"Runs a health check on a remote tablet with the specified target type."},
|
||||
"<tablet alias>",
|
||||
"Runs a health check on a remote tablet."},
|
||||
{"IgnoreHealthError", commandIgnoreHealthError,
|
||||
"<tablet alias> <ignore regexp>",
|
||||
"Sets the regexp for health check errors to ignore on the specified tablet. The pattern has implicit ^$ anchors. Set to empty string or restart vttablet to stop ignoring anything."},
|
||||
|
@ -881,22 +881,18 @@ func commandRunHealthCheck(ctx context.Context, wr *wrangler.Wrangler, subFlags
|
|||
if err := subFlags.Parse(args); err != nil {
|
||||
return err
|
||||
}
|
||||
if subFlags.NArg() != 2 {
|
||||
return fmt.Errorf("The <tablet alias> and <target tablet type> arguments are required for the RunHealthCheck command.")
|
||||
if subFlags.NArg() != 1 {
|
||||
return fmt.Errorf("The <tablet alias> argument is required for the RunHealthCheck command.")
|
||||
}
|
||||
tabletAlias, err := topoproto.ParseTabletAlias(subFlags.Arg(0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
servedType, err := parseTabletType(subFlags.Arg(1), []topodatapb.TabletType{topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tabletInfo, err := wr.TopoServer().GetTablet(ctx, tabletAlias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wr.TabletManagerClient().RunHealthCheck(ctx, tabletInfo, servedType)
|
||||
return wr.TabletManagerClient().RunHealthCheck(ctx, tabletInfo)
|
||||
}
|
||||
|
||||
func commandIgnoreHealthError(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error {
|
||||
|
@ -1412,7 +1408,7 @@ func commandWaitForFilteredReplication(ctx context.Context, wr *wrangler.Wrangle
|
|||
// Always run an explicit healthcheck first to make sure we don't see any outdated values.
|
||||
// This is especially true for tests and automation where there is no pause of multiple seconds
|
||||
// between commands and the periodic healthcheck did not run again yet.
|
||||
if err := wr.TabletManagerClient().RunHealthCheck(ctx, tabletInfo, topodatapb.TabletType_REPLICA); err != nil {
|
||||
if err := wr.TabletManagerClient().RunHealthCheck(ctx, tabletInfo); err != nil {
|
||||
return fmt.Errorf("failed to run explicit healthcheck on tablet: %v err: %v", tabletInfo, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/youtube/vitess/go/cistring"
|
||||
"github.com/youtube/vitess/go/vt/sqlparser"
|
||||
"github.com/youtube/vitess/go/vt/vtgate/engine"
|
||||
"github.com/youtube/vitess/go/vt/vtgate/vindexes"
|
||||
|
@ -63,13 +64,11 @@ func generateQuery(statement sqlparser.Statement) string {
|
|||
// isIndexChanging returns true if any of the update
|
||||
// expressions modify a vindex column.
|
||||
func isIndexChanging(setClauses sqlparser.UpdateExprs, colVindexes []*vindexes.ColVindex) bool {
|
||||
vindexCols := make([]string, len(colVindexes))
|
||||
for i, index := range colVindexes {
|
||||
vindexCols[i] = index.Col
|
||||
}
|
||||
for _, assignment := range setClauses {
|
||||
if sqlparser.StringIn(string(assignment.Name.Name), vindexCols...) {
|
||||
return true
|
||||
for _, vcol := range colVindexes {
|
||||
if vcol.Col.Equal(cistring.CIString(assignment.Name.Name)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
@ -115,7 +114,7 @@ func generateDeleteSubquery(del *sqlparser.Delete, table *vindexes.Table) string
|
|||
prefix := ""
|
||||
for _, cv := range table.Owned {
|
||||
buf.WriteString(prefix)
|
||||
buf.WriteString(cv.Col)
|
||||
buf.WriteString(cv.Col.Original())
|
||||
prefix = ", "
|
||||
}
|
||||
fmt.Fprintf(buf, " from %s", table.Name)
|
||||
|
@ -146,7 +145,7 @@ func getDMLRouting(where *sqlparser.Where, route *engine.Route) error {
|
|||
// getMatch returns the matched value if there is an equality
|
||||
// constraint on the specified column that can be used to
|
||||
// decide on a route.
|
||||
func getMatch(node sqlparser.BoolExpr, col string) interface{} {
|
||||
func getMatch(node sqlparser.BoolExpr, col cistring.CIString) interface{} {
|
||||
filters := splitAndExpression(nil, node)
|
||||
for _, filter := range filters {
|
||||
comparison, ok := filter.(*sqlparser.ComparisonExpr)
|
||||
|
@ -171,7 +170,7 @@ func getMatch(node sqlparser.BoolExpr, col string) interface{} {
|
|||
return nil
|
||||
}
|
||||
|
||||
func nameMatch(node sqlparser.ValExpr, col string) bool {
|
||||
func nameMatch(node sqlparser.ValExpr, col cistring.CIString) bool {
|
||||
colname, ok := node.(*sqlparser.ColName)
|
||||
return ok && string(colname.Name) == col
|
||||
return ok && colname.Name.Equal(sqlparser.ColIdent(col))
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/youtube/vitess/go/cistring"
|
||||
"github.com/youtube/vitess/go/vt/sqlparser"
|
||||
"github.com/youtube/vitess/go/vt/vtgate/engine"
|
||||
"github.com/youtube/vitess/go/vt/vtgate/vindexes"
|
||||
|
@ -61,7 +62,7 @@ func processAliasedTable(tableExpr *sqlparser.AliasedTableExpr, vschema VSchema)
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
alias := sqlparser.SQLName(sqlparser.String(expr))
|
||||
alias := sqlparser.TableIdent(sqlparser.String(expr))
|
||||
astName := expr.Name
|
||||
if tableExpr.As != "" {
|
||||
alias = tableExpr.As
|
||||
|
@ -96,7 +97,7 @@ func processAliasedTable(tableExpr *sqlparser.AliasedTableExpr, vschema VSchema)
|
|||
continue
|
||||
}
|
||||
table.ColVindexes = append(table.ColVindexes, &vindexes.ColVindex{
|
||||
Col: string(colsyms.Alias),
|
||||
Col: cistring.CIString(colsyms.Alias),
|
||||
Vindex: colsyms.Vindex,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/youtube/vitess/go/cistring"
|
||||
"github.com/youtube/vitess/go/vt/sqlparser"
|
||||
"github.com/youtube/vitess/go/vt/vtgate/engine"
|
||||
"github.com/youtube/vitess/go/vt/vtgate/vindexes"
|
||||
|
@ -78,7 +79,7 @@ func buildIndexPlan(ins *sqlparser.Insert, colVindex *vindexes.ColVindex, route
|
|||
return fmt.Errorf("could not convert val: %s, pos: %d: %v", sqlparser.String(row[pos]), pos, err)
|
||||
}
|
||||
route.Values = append(route.Values.([]interface{}), val)
|
||||
row[pos] = sqlparser.ValArg([]byte(":_" + colVindex.Col))
|
||||
row[pos] = sqlparser.ValArg([]byte(":_" + colVindex.Col.Original()))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -105,17 +106,17 @@ func buildAutoincPlan(ins *sqlparser.Insert, autoinc *vindexes.Autoinc, route *e
|
|||
return nil
|
||||
}
|
||||
|
||||
func findOrInsertPos(ins *sqlparser.Insert, col string) (row sqlparser.ValTuple, pos int) {
|
||||
func findOrInsertPos(ins *sqlparser.Insert, col cistring.CIString) (row sqlparser.ValTuple, pos int) {
|
||||
pos = -1
|
||||
for i, column := range ins.Columns {
|
||||
if col == sqlparser.GetColName(column.(*sqlparser.NonStarExpr).Expr) {
|
||||
if col.Equal(cistring.CIString(sqlparser.GetColName(column.(*sqlparser.NonStarExpr).Expr))) {
|
||||
pos = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if pos == -1 {
|
||||
pos = len(ins.Columns)
|
||||
ins.Columns = append(ins.Columns, &sqlparser.NonStarExpr{Expr: &sqlparser.ColName{Name: sqlparser.SQLName(col)}})
|
||||
ins.Columns = append(ins.Columns, &sqlparser.NonStarExpr{Expr: &sqlparser.ColName{Name: sqlparser.ColIdent(col)}})
|
||||
ins.Rows.(sqlparser.Values)[0] = append(ins.Rows.(sqlparser.Values)[0].(sqlparser.ValTuple), &sqlparser.NullVal{})
|
||||
}
|
||||
return ins.Rows.(sqlparser.Values)[0].(sqlparser.ValTuple), pos
|
||||
|
|
|
@ -38,9 +38,9 @@ func (jt *jointab) Procure(bldr builder, col *sqlparser.ColName, to int) string
|
|||
i := 0
|
||||
for {
|
||||
if !col.Qualifier.IsEmpty() {
|
||||
joinVar = string(col.Qualifier.Name) + "_" + string(col.Name) + suffix
|
||||
joinVar = string(col.Qualifier.Name) + "_" + col.Name.Original() + suffix
|
||||
} else {
|
||||
joinVar = string(col.Name) + suffix
|
||||
joinVar = col.Name.Original() + suffix
|
||||
}
|
||||
if _, ok := jt.vars[joinVar]; !ok {
|
||||
break
|
||||
|
|
|
@ -36,7 +36,7 @@ type route struct {
|
|||
ERoute *engine.Route
|
||||
}
|
||||
|
||||
func newRoute(from sqlparser.TableExprs, eroute *engine.Route, table *vindexes.Table, vschema VSchema, alias, astName sqlparser.SQLName) *route {
|
||||
func newRoute(from sqlparser.TableExprs, eroute *engine.Route, table *vindexes.Table, vschema VSchema, alias, astName sqlparser.TableIdent) *route {
|
||||
// We have some circular pointer references here:
|
||||
// The route points to the symtab idicating
|
||||
// the symtab that should be used to resolve symbols
|
||||
|
@ -313,12 +313,12 @@ func (rb *route) computeINPlan(comparison *sqlparser.ComparisonExpr) (opcode eng
|
|||
// PushSelect pushes the select expression into the route.
|
||||
func (rb *route) PushSelect(expr *sqlparser.NonStarExpr, _ *route) (colsym *colsym, colnum int, err error) {
|
||||
colsym = newColsym(rb, rb.Symtab())
|
||||
if expr.As != "" {
|
||||
if expr.As.Original() != "" {
|
||||
colsym.Alias = expr.As
|
||||
}
|
||||
if col, ok := expr.Expr.(*sqlparser.ColName); ok {
|
||||
if colsym.Alias == "" {
|
||||
colsym.Alias = sqlparser.SQLName(sqlparser.String(col))
|
||||
if colsym.Alias.Original() == "" {
|
||||
colsym.Alias = sqlparser.NewColIdent(sqlparser.String(col))
|
||||
}
|
||||
colsym.Vindex = rb.Symtab().Vindex(col, rb, true)
|
||||
colsym.Underlying = newColref(col)
|
||||
|
@ -335,7 +335,7 @@ func (rb *route) PushSelect(expr *sqlparser.NonStarExpr, _ *route) (colsym *cols
|
|||
// PushStar pushes the '*' expression into the route.
|
||||
func (rb *route) PushStar(expr *sqlparser.StarExpr) *colsym {
|
||||
colsym := newColsym(rb, rb.Symtab())
|
||||
colsym.Alias = sqlparser.SQLName(sqlparser.String(expr))
|
||||
colsym.Alias = sqlparser.NewColIdent(sqlparser.String(expr))
|
||||
rb.Select.SelectExprs = append(rb.Select.SelectExprs, expr)
|
||||
rb.Colsyms = append(rb.Colsyms, colsym)
|
||||
return colsym
|
||||
|
@ -517,7 +517,7 @@ func (rb *route) SupplyCol(ref colref) int {
|
|||
}
|
||||
ts := ref.Meta.(*tabsym)
|
||||
rb.Colsyms = append(rb.Colsyms, &colsym{
|
||||
Alias: ts.Alias + "." + ref.Name,
|
||||
Alias: sqlparser.NewColIdent(string(ts.Alias) + "." + ref.Name),
|
||||
Underlying: ref,
|
||||
})
|
||||
rb.Select.SelectExprs = append(
|
||||
|
@ -526,7 +526,7 @@ func (rb *route) SupplyCol(ref colref) int {
|
|||
Expr: &sqlparser.ColName{
|
||||
Metadata: ref.Meta,
|
||||
Qualifier: &sqlparser.TableName{Name: ts.ASTName},
|
||||
Name: ref.Name,
|
||||
Name: sqlparser.NewColIdent(ref.Name),
|
||||
},
|
||||
},
|
||||
)
|
||||
|
|
|
@ -7,6 +7,7 @@ package planbuilder
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/youtube/vitess/go/cistring"
|
||||
"github.com/youtube/vitess/go/vt/sqlparser"
|
||||
"github.com/youtube/vitess/go/vt/vtgate/vindexes"
|
||||
)
|
||||
|
@ -62,7 +63,7 @@ func newSymtab(vschema VSchema) *symtab {
|
|||
}
|
||||
|
||||
// AddAlias adds a table alias to symtab.
|
||||
func (st *symtab) AddAlias(alias, astName sqlparser.SQLName, table *vindexes.Table, rb *route) error {
|
||||
func (st *symtab) AddAlias(alias, astName sqlparser.TableIdent, table *vindexes.Table, rb *route) error {
|
||||
if found := st.findTable(alias); found != nil {
|
||||
return fmt.Errorf("duplicate symbol: %s", alias)
|
||||
}
|
||||
|
@ -99,7 +100,7 @@ func (st *symtab) Merge(newsyms *symtab) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (st *symtab) findTable(alias sqlparser.SQLName) *tabsym {
|
||||
func (st *symtab) findTable(alias sqlparser.TableIdent) *tabsym {
|
||||
for i, t := range st.tables {
|
||||
if t.Alias == alias {
|
||||
return st.tables[i]
|
||||
|
@ -137,13 +138,13 @@ func (st *symtab) Find(col *sqlparser.ColName, autoResolve bool) (rb *route, isL
|
|||
return m.Route(), m.Symtab() == st, nil
|
||||
}
|
||||
if len(st.Colsyms) != 0 {
|
||||
name := sqlparser.SQLName(sqlparser.String(col))
|
||||
starname := sqlparser.SQLName(sqlparser.String(&sqlparser.ColName{
|
||||
Name: "*",
|
||||
name := sqlparser.String(col)
|
||||
starname := sqlparser.String(&sqlparser.ColName{
|
||||
Name: sqlparser.NewColIdent("*"),
|
||||
Qualifier: col.Qualifier,
|
||||
}))
|
||||
})
|
||||
for _, colsym := range st.Colsyms {
|
||||
if name == colsym.Alias || starname == colsym.Alias || colsym.Alias == "*" {
|
||||
if colsym.Alias.EqualString(name) || colsym.Alias.EqualString(starname) || colsym.Alias.EqualString("*") {
|
||||
col.Metadata = colsym
|
||||
return colsym.Route(), true, nil
|
||||
}
|
||||
|
@ -158,7 +159,7 @@ func (st *symtab) Find(col *sqlparser.ColName, autoResolve bool) (rb *route, isL
|
|||
}
|
||||
return nil, false, fmt.Errorf("symbol %s not found", sqlparser.String(col))
|
||||
}
|
||||
qualifier := sqlparser.SQLName(sqlparser.String(col.Qualifier))
|
||||
qualifier := sqlparser.TableIdent(sqlparser.String(col.Qualifier))
|
||||
if qualifier == "" && autoResolve && len(st.tables) == 1 {
|
||||
for _, t := range st.tables {
|
||||
qualifier = t.Alias
|
||||
|
@ -231,8 +232,8 @@ type sym interface {
|
|||
// from the table name, which is something that VTTablet and MySQL
|
||||
// can't recognize.
|
||||
type tabsym struct {
|
||||
Alias sqlparser.SQLName
|
||||
ASTName sqlparser.SQLName
|
||||
Alias sqlparser.TableIdent
|
||||
ASTName sqlparser.TableIdent
|
||||
route *route
|
||||
symtab *symtab
|
||||
Keyspace *vindexes.Keyspace
|
||||
|
@ -242,7 +243,7 @@ type tabsym struct {
|
|||
func (t *tabsym) newColRef(col *sqlparser.ColName) colref {
|
||||
return colref{
|
||||
Meta: t,
|
||||
Name: col.Name,
|
||||
Name: col.Name.Lowered(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -255,9 +256,9 @@ func (t *tabsym) Symtab() *symtab {
|
|||
}
|
||||
|
||||
// FindVindex returns the vindex if one was found for the column.
|
||||
func (t *tabsym) FindVindex(name sqlparser.SQLName) vindexes.Vindex {
|
||||
func (t *tabsym) FindVindex(name sqlparser.ColIdent) vindexes.Vindex {
|
||||
for _, colVindex := range t.ColVindexes {
|
||||
if string(name) == colVindex.Col {
|
||||
if colVindex.Col.Equal(cistring.CIString(name)) {
|
||||
return colVindex.Vindex
|
||||
}
|
||||
}
|
||||
|
@ -274,7 +275,7 @@ func (t *tabsym) FindVindex(name sqlparser.SQLName) vindexes.Vindex {
|
|||
// is set to the column it refers. If the referenced column has a Vindex,
|
||||
// the Vindex field is also accordingly set.
|
||||
type colsym struct {
|
||||
Alias sqlparser.SQLName
|
||||
Alias sqlparser.ColIdent
|
||||
route *route
|
||||
symtab *symtab
|
||||
Underlying colref
|
||||
|
@ -312,7 +313,7 @@ func (cs *colsym) Symtab() *symtab {
|
|||
// or colsym. This representation makes a colref unambiguous.
|
||||
type colref struct {
|
||||
Meta sym
|
||||
Name sqlparser.SQLName
|
||||
Name string
|
||||
}
|
||||
|
||||
// newColref builds a colref from a sqlparser.ColName that was
|
||||
|
|
|
@ -64,7 +64,6 @@ type endPointCounters struct {
|
|||
emptyResults *stats.MultiCounters
|
||||
remoteQueries *stats.MultiCounters
|
||||
numberReturned *stats.MultiCounters
|
||||
degradedResults *stats.MultiCounters
|
||||
cacheHits *stats.MultiCounters
|
||||
remoteLookups *stats.MultiCounters
|
||||
remoteLookupErrors *stats.MultiCounters
|
||||
|
@ -79,7 +78,6 @@ func newEndPointCounters(counterPrefix string) *endPointCounters {
|
|||
errors: stats.NewMultiCounters(counterPrefix+"EndPointErrorCount", labels),
|
||||
emptyResults: stats.NewMultiCounters(counterPrefix+"EndPointEmptyResultCount", labels),
|
||||
numberReturned: stats.NewMultiCounters(counterPrefix+"EndPointsReturnedCount", labels),
|
||||
degradedResults: stats.NewMultiCounters(counterPrefix+"EndPointDegradedResultCount", labels),
|
||||
cacheHits: stats.NewMultiCounters(counterPrefix+"EndPointCacheHitCount", labels),
|
||||
remoteQueries: stats.NewMultiCounters(counterPrefix+"EndPointRemoteQueryCount", labels),
|
||||
remoteLookups: stats.NewMultiCounters(counterPrefix+"EndPointRemoteLookupCount", labels),
|
||||
|
@ -176,47 +174,10 @@ type endPointsEntry struct {
|
|||
// value is the end points that were returned to the client.
|
||||
value *topodatapb.EndPoints
|
||||
|
||||
// originalValue is the end points that were returned from
|
||||
// the topology server.
|
||||
originalValue *topodatapb.EndPoints
|
||||
|
||||
lastError error
|
||||
lastErrorCtx context.Context
|
||||
}
|
||||
|
||||
func endPointIsHealthy(ep *topodatapb.EndPoint) bool {
|
||||
// if we are behind on replication, we're not 100% healthy
|
||||
return ep.HealthMap == nil || ep.HealthMap[topo.ReplicationLag] != topo.ReplicationLagHigh
|
||||
}
|
||||
|
||||
// filterUnhealthyServers removes the unhealthy servers from the list,
|
||||
// unless all servers are unhealthy, then it keeps them all.
|
||||
func filterUnhealthyServers(endPoints *topodatapb.EndPoints) *topodatapb.EndPoints {
|
||||
|
||||
// no endpoints, return right away
|
||||
if endPoints == nil || len(endPoints.Entries) == 0 {
|
||||
return endPoints
|
||||
}
|
||||
|
||||
healthyEndPoints := make([]*topodatapb.EndPoint, 0, len(endPoints.Entries))
|
||||
for _, ep := range endPoints.Entries {
|
||||
// if we are behind on replication, we're not 100% healthy
|
||||
if !endPointIsHealthy(ep) {
|
||||
continue
|
||||
}
|
||||
|
||||
healthyEndPoints = append(healthyEndPoints, ep)
|
||||
}
|
||||
|
||||
// we have healthy guys, we return them
|
||||
if len(healthyEndPoints) > 0 {
|
||||
return &topodatapb.EndPoints{Entries: healthyEndPoints}
|
||||
}
|
||||
|
||||
// we only have unhealthy guys, return them
|
||||
return endPoints
|
||||
}
|
||||
|
||||
// NewResilientSrvTopoServer creates a new ResilientSrvTopoServer
|
||||
// based on the provided topo.Server.
|
||||
func NewResilientSrvTopoServer(base topo.Server, counterPrefix string) *ResilientSrvTopoServer {
|
||||
|
@ -486,11 +447,6 @@ func (server *ResilientSrvTopoServer) GetEndPoints(ctx context.Context, cell, ke
|
|||
return
|
||||
}
|
||||
server.endPointCounters.numberReturned.Add(key, int64(len(result.Entries)))
|
||||
// We either serve all healthy endpoints or all degraded endpoints, so the first entry is representative.
|
||||
if !endPointIsHealthy(result.Entries[0]) {
|
||||
server.endPointCounters.degradedResults.Add(key, 1)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
// If the entry is fresh enough, return it
|
||||
|
@ -538,8 +494,7 @@ func (server *ResilientSrvTopoServer) GetEndPoints(ctx context.Context, cell, ke
|
|||
|
||||
// save the value we got and the current time in the cache
|
||||
entry.insertionTime = time.Now()
|
||||
entry.originalValue = result
|
||||
entry.value = filterUnhealthyServers(result)
|
||||
entry.value = result
|
||||
entry.lastError = err
|
||||
entry.lastErrorCtx = newCtx
|
||||
entry.remote = remote
|
||||
|
@ -679,64 +634,38 @@ func (sscsl SrvShardCacheStatusList) Swap(i, j int) {
|
|||
|
||||
// EndPointsCacheStatus is the current value for an EndPoints object
|
||||
type EndPointsCacheStatus struct {
|
||||
Cell string
|
||||
Keyspace string
|
||||
Shard string
|
||||
TabletType topodatapb.TabletType
|
||||
Value *topodatapb.EndPoints
|
||||
OriginalValue *topodatapb.EndPoints
|
||||
LastError error
|
||||
LastErrorCtx context.Context
|
||||
Cell string
|
||||
Keyspace string
|
||||
Shard string
|
||||
TabletType topodatapb.TabletType
|
||||
Value *topodatapb.EndPoints
|
||||
LastError error
|
||||
LastErrorCtx context.Context
|
||||
}
|
||||
|
||||
// StatusAsHTML returns an HTML version of our status.
|
||||
// It works best if there is data in the cache.
|
||||
func (st *EndPointsCacheStatus) StatusAsHTML() template.HTML {
|
||||
ovl := 0
|
||||
if st.OriginalValue != nil {
|
||||
ovl = len(st.OriginalValue.Entries)
|
||||
}
|
||||
vl := 0
|
||||
if st.Value != nil {
|
||||
vl = len(st.Value.Entries)
|
||||
if st.Value == nil || len(st.Value.Entries) == 0 {
|
||||
return template.HTML("<b>No endpoints</b>")
|
||||
}
|
||||
|
||||
// Assemble links to individual endpoints
|
||||
epLinks := "{ "
|
||||
if ovl > 0 {
|
||||
for _, ove := range st.OriginalValue.Entries {
|
||||
healthColor := "red"
|
||||
var vtPort int32
|
||||
if vl > 0 {
|
||||
for _, ve := range st.Value.Entries {
|
||||
if ove.Uid == ve.Uid {
|
||||
ok := false
|
||||
if vtPort, ok = ve.PortMap["vt"]; ok {
|
||||
// EndPoint is healthy
|
||||
healthColor = "green"
|
||||
if len(ve.HealthMap) > 0 {
|
||||
// EndPoint is half healthy
|
||||
healthColor = "orange"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
epLinks += fmt.Sprintf(
|
||||
"<a href=\"http://%v:%d\" style=\"color:%v\">%v:%d</a> ",
|
||||
ove.Host, vtPort, healthColor, ove.Host, vtPort)
|
||||
for _, ve := range st.Value.Entries {
|
||||
healthColor := "red"
|
||||
var vtPort int32
|
||||
var ok bool
|
||||
if vtPort, ok = ve.PortMap["vt"]; ok {
|
||||
// EndPoint is healthy
|
||||
healthColor = "green"
|
||||
}
|
||||
epLinks += fmt.Sprintf(
|
||||
"<a href=\"http://%v:%d\" style=\"color:%v\">%v:%d</a> ",
|
||||
ve.Host, vtPort, healthColor, ve.Host, vtPort)
|
||||
}
|
||||
epLinks += "}"
|
||||
if ovl == vl {
|
||||
if vl == 0 {
|
||||
return template.HTML(fmt.Sprintf("<b>No healthy endpoints</b>, %v", epLinks))
|
||||
}
|
||||
if len(st.OriginalValue.Entries[0].HealthMap) > 0 {
|
||||
return template.HTML(fmt.Sprintf("<b>Serving from %v degraded endpoints</b>, %v", vl, epLinks))
|
||||
}
|
||||
return template.HTML(fmt.Sprintf("All %v endpoints are healthy, %v", vl, epLinks))
|
||||
}
|
||||
return template.HTML(fmt.Sprintf("Serving from %v healthy endpoints out of %v, %v", vl, ovl, epLinks))
|
||||
return template.HTML(fmt.Sprintf("Serving from %v healthy endpoints: %v", len(st.Value.Entries), epLinks))
|
||||
}
|
||||
|
||||
// EndPointsCacheStatusList is used for sorting
|
||||
|
@ -810,14 +739,13 @@ func (server *ResilientSrvTopoServer) CacheStatus() *ResilientSrvTopoServerCache
|
|||
for _, entry := range server.endPointsCache {
|
||||
entry.mutex.Lock()
|
||||
result.EndPoints = append(result.EndPoints, &EndPointsCacheStatus{
|
||||
Cell: entry.cell,
|
||||
Keyspace: entry.keyspace,
|
||||
Shard: entry.shard,
|
||||
TabletType: entry.tabletType,
|
||||
Value: entry.value,
|
||||
OriginalValue: entry.originalValue,
|
||||
LastError: entry.lastError,
|
||||
LastErrorCtx: entry.lastErrorCtx,
|
||||
Cell: entry.cell,
|
||||
Keyspace: entry.keyspace,
|
||||
Shard: entry.shard,
|
||||
TabletType: entry.tabletType,
|
||||
Value: entry.value,
|
||||
LastError: entry.lastError,
|
||||
LastErrorCtx: entry.lastErrorCtx,
|
||||
})
|
||||
entry.mutex.Unlock()
|
||||
}
|
||||
|
|
|
@ -6,7 +6,6 @@ package vtgate
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -19,179 +18,6 @@ import (
|
|||
topodatapb "github.com/youtube/vitess/go/vt/proto/topodata"
|
||||
)
|
||||
|
||||
func TestFilterUnhealthy(t *testing.T) {
|
||||
cases := []struct {
|
||||
source *topodatapb.EndPoints
|
||||
want *topodatapb.EndPoints
|
||||
}{
|
||||
{
|
||||
source: nil,
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
source: &topodatapb.EndPoints{},
|
||||
want: &topodatapb.EndPoints{Entries: nil},
|
||||
},
|
||||
{
|
||||
source: &topodatapb.EndPoints{Entries: []*topodatapb.EndPoint{}},
|
||||
want: &topodatapb.EndPoints{Entries: []*topodatapb.EndPoint{}},
|
||||
},
|
||||
{
|
||||
// All are healthy and all should be returned.
|
||||
source: &topodatapb.EndPoints{
|
||||
Entries: []*topodatapb.EndPoint{
|
||||
{
|
||||
Uid: 1,
|
||||
HealthMap: nil,
|
||||
},
|
||||
{
|
||||
Uid: 2,
|
||||
HealthMap: map[string]string{},
|
||||
},
|
||||
{
|
||||
Uid: 3,
|
||||
HealthMap: map[string]string{
|
||||
"Random": "Value1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Uid: 4,
|
||||
HealthMap: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &topodatapb.EndPoints{
|
||||
Entries: []*topodatapb.EndPoint{
|
||||
{
|
||||
Uid: 1,
|
||||
HealthMap: nil,
|
||||
},
|
||||
{
|
||||
Uid: 2,
|
||||
HealthMap: map[string]string{},
|
||||
},
|
||||
{
|
||||
Uid: 3,
|
||||
HealthMap: map[string]string{
|
||||
"Random": "Value1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Uid: 4,
|
||||
HealthMap: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// 4 is unhealthy, it should be filtered out.
|
||||
source: &topodatapb.EndPoints{
|
||||
Entries: []*topodatapb.EndPoint{
|
||||
{
|
||||
Uid: 1,
|
||||
HealthMap: nil,
|
||||
},
|
||||
{
|
||||
Uid: 2,
|
||||
HealthMap: map[string]string{},
|
||||
},
|
||||
{
|
||||
Uid: 3,
|
||||
HealthMap: map[string]string{
|
||||
"Random": "Value2",
|
||||
},
|
||||
},
|
||||
{
|
||||
Uid: 4,
|
||||
HealthMap: map[string]string{
|
||||
topo.ReplicationLag: topo.ReplicationLagHigh,
|
||||
},
|
||||
},
|
||||
{
|
||||
Uid: 5,
|
||||
HealthMap: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &topodatapb.EndPoints{
|
||||
Entries: []*topodatapb.EndPoint{
|
||||
{
|
||||
Uid: 1,
|
||||
HealthMap: nil,
|
||||
},
|
||||
{
|
||||
Uid: 2,
|
||||
HealthMap: map[string]string{},
|
||||
},
|
||||
{
|
||||
Uid: 3,
|
||||
HealthMap: map[string]string{
|
||||
"Random": "Value2",
|
||||
},
|
||||
},
|
||||
{
|
||||
Uid: 5,
|
||||
HealthMap: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Only unhealthy servers, return all of them.
|
||||
source: &topodatapb.EndPoints{
|
||||
Entries: []*topodatapb.EndPoint{
|
||||
{
|
||||
Uid: 1,
|
||||
HealthMap: map[string]string{
|
||||
topo.ReplicationLag: topo.ReplicationLagHigh,
|
||||
},
|
||||
},
|
||||
{
|
||||
Uid: 2,
|
||||
HealthMap: map[string]string{
|
||||
topo.ReplicationLag: topo.ReplicationLagHigh,
|
||||
},
|
||||
},
|
||||
{
|
||||
Uid: 3,
|
||||
HealthMap: map[string]string{
|
||||
topo.ReplicationLag: topo.ReplicationLagHigh,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &topodatapb.EndPoints{
|
||||
Entries: []*topodatapb.EndPoint{
|
||||
{
|
||||
Uid: 1,
|
||||
HealthMap: map[string]string{
|
||||
topo.ReplicationLag: topo.ReplicationLagHigh,
|
||||
},
|
||||
},
|
||||
{
|
||||
Uid: 2,
|
||||
HealthMap: map[string]string{
|
||||
topo.ReplicationLag: topo.ReplicationLagHigh,
|
||||
},
|
||||
},
|
||||
{
|
||||
Uid: 3,
|
||||
HealthMap: map[string]string{
|
||||
topo.ReplicationLag: topo.ReplicationLagHigh,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
if got := filterUnhealthyServers(c.source); !reflect.DeepEqual(got, c.want) {
|
||||
t.Errorf("filterUnhealthy(%+v)=%+v, want %+v", c.source, got, c.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fakeTopo is used in testing ResilientSrvTopoServer logic.
|
||||
// returns errors for everything, except the one keyspace.
|
||||
type fakeTopo struct {
|
||||
|
@ -258,8 +84,7 @@ func (ft *fakeTopoRemoteMaster) GetEndPoints(ctx context.Context, cell, keyspace
|
|||
return &topodatapb.EndPoints{
|
||||
Entries: []*topodatapb.EndPoint{
|
||||
{
|
||||
Uid: 0,
|
||||
HealthMap: nil,
|
||||
Uid: 0,
|
||||
},
|
||||
},
|
||||
}, -1, nil
|
||||
|
@ -267,8 +92,7 @@ func (ft *fakeTopoRemoteMaster) GetEndPoints(ctx context.Context, cell, keyspace
|
|||
return &topodatapb.EndPoints{
|
||||
Entries: []*topodatapb.EndPoint{
|
||||
{
|
||||
Uid: 1,
|
||||
HealthMap: nil,
|
||||
Uid: 1,
|
||||
},
|
||||
},
|
||||
}, -1, nil
|
||||
|
|
|
@ -520,7 +520,7 @@ func (rtr *Router) handleGenerate(vcursor *requestContext, gen *engine.Generate)
|
|||
|
||||
func (rtr *Router) handlePrimary(vcursor *requestContext, vindexKey interface{}, colVindex *vindexes.ColVindex, bv map[string]interface{}) (ksid []byte, err error) {
|
||||
if vindexKey == nil {
|
||||
return nil, fmt.Errorf("value must be supplied for column %s", colVindex.Col)
|
||||
return nil, fmt.Errorf("value must be supplied for column %v", colVindex.Col)
|
||||
}
|
||||
mapper := colVindex.Vindex.(vindexes.Unique)
|
||||
ksids, err := mapper.Map(vcursor, []interface{}{vindexKey})
|
||||
|
@ -531,14 +531,14 @@ func (rtr *Router) handlePrimary(vcursor *requestContext, vindexKey interface{},
|
|||
if len(ksid) == 0 {
|
||||
return nil, fmt.Errorf("could not map %v to a keyspace id", vindexKey)
|
||||
}
|
||||
bv["_"+colVindex.Col] = vindexKey
|
||||
bv["_"+colVindex.Col.Original()] = vindexKey
|
||||
return ksid, nil
|
||||
}
|
||||
|
||||
func (rtr *Router) handleNonPrimary(vcursor *requestContext, vindexKey interface{}, colVindex *vindexes.ColVindex, bv map[string]interface{}, ksid []byte) error {
|
||||
if colVindex.Owned {
|
||||
if vindexKey == nil {
|
||||
return fmt.Errorf("value must be supplied for column %s", colVindex.Col)
|
||||
return fmt.Errorf("value must be supplied for column %v", colVindex.Col)
|
||||
}
|
||||
err := colVindex.Vindex.(vindexes.Lookup).Create(vcursor, vindexKey, ksid)
|
||||
if err != nil {
|
||||
|
@ -548,7 +548,7 @@ func (rtr *Router) handleNonPrimary(vcursor *requestContext, vindexKey interface
|
|||
if vindexKey == nil {
|
||||
reversible, ok := colVindex.Vindex.(vindexes.Reversible)
|
||||
if !ok {
|
||||
return fmt.Errorf("value must be supplied for column %s", colVindex.Col)
|
||||
return fmt.Errorf("value must be supplied for column %v", colVindex.Col)
|
||||
}
|
||||
var err error
|
||||
vindexKey, err = reversible.ReverseMap(vcursor, ksid)
|
||||
|
@ -564,11 +564,11 @@ func (rtr *Router) handleNonPrimary(vcursor *requestContext, vindexKey interface
|
|||
return err
|
||||
}
|
||||
if !ok {
|
||||
return fmt.Errorf("value %v for column %s does not map to keyspace id %v", vindexKey, colVindex.Col, hex.EncodeToString(ksid))
|
||||
return fmt.Errorf("value %v for column %v does not map to keyspace id %v", vindexKey, colVindex.Col, hex.EncodeToString(ksid))
|
||||
}
|
||||
}
|
||||
}
|
||||
bv["_"+colVindex.Col] = vindexKey
|
||||
bv["_"+colVindex.Col.Original()] = vindexKey
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -247,10 +247,10 @@ func TestInsertSharded(t *testing.T) {
|
|||
t.Error(err)
|
||||
}
|
||||
wantQueries := []querytypes.BoundQuery{{
|
||||
Sql: "insert into user(id, v, name) values (:_id, 2, :_name) /* vtgate:: keyspace_id:166b40b44aba4bd6 */",
|
||||
Sql: "insert into user(id, v, name) values (:_Id, 2, :_name) /* vtgate:: keyspace_id:166b40b44aba4bd6 */",
|
||||
BindVariables: map[string]interface{}{
|
||||
"keyspace_id": "\x16k@\xb4J\xbaK\xd6",
|
||||
"_id": int64(1),
|
||||
"_Id": int64(1),
|
||||
"_name": []byte("myname"),
|
||||
"__seq": int64(1),
|
||||
},
|
||||
|
@ -279,10 +279,10 @@ func TestInsertSharded(t *testing.T) {
|
|||
t.Error(err)
|
||||
}
|
||||
wantQueries = []querytypes.BoundQuery{{
|
||||
Sql: "insert into user(id, v, name) values (:_id, 2, :_name) /* vtgate:: keyspace_id:4eb190c9a2fa169c */",
|
||||
Sql: "insert into user(id, v, name) values (:_Id, 2, :_name) /* vtgate:: keyspace_id:4eb190c9a2fa169c */",
|
||||
BindVariables: map[string]interface{}{
|
||||
"keyspace_id": "N\xb1\x90ɢ\xfa\x16\x9c",
|
||||
"_id": int64(3),
|
||||
"_Id": int64(3),
|
||||
"__seq": int64(3),
|
||||
"_name": []byte("myname2"),
|
||||
},
|
||||
|
@ -320,10 +320,10 @@ func TestInsertGenerator(t *testing.T) {
|
|||
t.Error(err)
|
||||
}
|
||||
wantQueries := []querytypes.BoundQuery{{
|
||||
Sql: "insert into user(v, name, id) values (2, :_name, :_id) /* vtgate:: keyspace_id:166b40b44aba4bd6 */",
|
||||
Sql: "insert into user(v, name, Id) values (2, :_name, :_Id) /* vtgate:: keyspace_id:166b40b44aba4bd6 */",
|
||||
BindVariables: map[string]interface{}{
|
||||
"keyspace_id": "\x16k@\xb4J\xbaK\xd6",
|
||||
"_id": int64(1),
|
||||
"_Id": int64(1),
|
||||
"__seq": int64(1),
|
||||
"_name": []byte("myname"),
|
||||
},
|
||||
|
|
|
@ -54,7 +54,7 @@ var routerVSchema = `
|
|||
"user": {
|
||||
"ColVindexes": [
|
||||
{
|
||||
"Col": "id",
|
||||
"Col": "Id",
|
||||
"Name": "user_index"
|
||||
},
|
||||
{
|
||||
|
|
|
@ -267,6 +267,26 @@ func TestSelectEqual(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSelectCaseSensitivity(t *testing.T) {
|
||||
router, sbc1, sbc2, _ := createRouterEnv()
|
||||
|
||||
_, err := routerExec(router, "select Id from user where iD = 1", nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
wantQueries := []querytypes.BoundQuery{{
|
||||
Sql: "select Id from user where iD = 1",
|
||||
BindVariables: map[string]interface{}{},
|
||||
}}
|
||||
if !reflect.DeepEqual(sbc1.Queries, wantQueries) {
|
||||
t.Errorf("sbc1.Queries: %+v, want %+v\n", sbc1.Queries, wantQueries)
|
||||
}
|
||||
if sbc2.Queries != nil {
|
||||
t.Errorf("sbc2.Queries: %+v, want nil\n", sbc2.Queries)
|
||||
}
|
||||
sbc1.Queries = nil
|
||||
}
|
||||
|
||||
func TestSelectEqualNotFound(t *testing.T) {
|
||||
router, _, _, sbclookup := createRouterEnv()
|
||||
|
||||
|
|
|
@ -9,7 +9,8 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/youtube/vitess/go/cistring"
|
||||
)
|
||||
|
||||
// VSchema represents the denormalized version of VSchemaFormal,
|
||||
|
@ -38,7 +39,7 @@ type Keyspace struct {
|
|||
|
||||
// ColVindex contains the index info for each index of a table.
|
||||
type ColVindex struct {
|
||||
Col string
|
||||
Col cistring.CIString
|
||||
Type string
|
||||
Name string
|
||||
Owned bool
|
||||
|
@ -53,7 +54,7 @@ type KeyspaceSchema struct {
|
|||
|
||||
// Autoinc contains the auto-inc information for a table.
|
||||
type Autoinc struct {
|
||||
Col string
|
||||
Col cistring.CIString
|
||||
Sequence *Table
|
||||
// ColVindexNum is the index of the ColVindex
|
||||
// if the column is also a ColVindex. Otherwise, it's -1.
|
||||
|
@ -165,7 +166,7 @@ func buildTables(source *VSchemaFormal, vschema *VSchema) error {
|
|||
owned = true
|
||||
}
|
||||
columnVindex := &ColVindex{
|
||||
Col: strings.ToLower(ind.Col),
|
||||
Col: cistring.New(ind.Col),
|
||||
Type: vindexInfo.Type,
|
||||
Name: ind.Name,
|
||||
Owned: owned,
|
||||
|
@ -199,7 +200,7 @@ func resolveAutoinc(source *VSchemaFormal, vschema *VSchema) error {
|
|||
if table.Autoinc == nil {
|
||||
continue
|
||||
}
|
||||
t.Autoinc = &Autoinc{Col: table.Autoinc.Col, ColVindexNum: -1}
|
||||
t.Autoinc = &Autoinc{Col: cistring.New(table.Autoinc.Col), ColVindexNum: -1}
|
||||
seq := vschema.tables[table.Autoinc.Sequence]
|
||||
// TODO(sougou): improve this search.
|
||||
if seq == nil {
|
||||
|
@ -207,7 +208,7 @@ func resolveAutoinc(source *VSchemaFormal, vschema *VSchema) error {
|
|||
}
|
||||
t.Autoinc.Sequence = seq
|
||||
for i, cv := range t.ColVindexes {
|
||||
if t.Autoinc.Col == cv.Col {
|
||||
if t.Autoinc.Col.Equal(cv.Col) {
|
||||
t.Autoinc.ColVindexNum = i
|
||||
break
|
||||
}
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче