Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Dean Yasuda 2015-08-20 12:45:05 -07:00
Родитель c5eeb8d65c 2bc5ae6f96
Коммит 5fa76ff6f5
231 изменённых файлов: 4550 добавлений и 3442 удалений

Просмотреть файл

@ -86,8 +86,7 @@ site_integration_test_files = \
tabletmanager.py \
update_stream.py \
vtdb_test.py \
vtgatev2_test.py \
zkocc_test.py
vtgatev2_test.py
# These tests should be run by developers after making code changes.
# Integration tests are grouped into 3 suites.
@ -98,7 +97,6 @@ small_integration_targets = \
tablet_test \
sql_builder_test \
vertical_split \
vertical_split_vtgate \
schema \
keyspace_test \
keyrange_test \
@ -121,10 +119,7 @@ medium_integration_targets = \
client_test \
vtgate_utils_test \
rowcache_invalidator \
automation_horizontal_resharding
# TODO(mberlin): Remove -v option to worker.py when we found out what causes 10 minute Travis timeouts.
verbose_medium_integration_targets = \
automation_horizontal_resharding \
worker
large_integration_targets = \
@ -142,7 +137,6 @@ worker_integration_test_files = \
resharding.py \
resharding_bytes.py \
vertical_split.py \
vertical_split_vtgate.py \
initial_sharding.py \
initial_sharding_bytes.py \
worker.py
@ -152,19 +146,12 @@ SHELL = /bin/bash
# function to execute a list of integration test files
# exits on first failure
# TODO(mberlin): Remove special handling for worker.py when we found out what causes 10 minute Travis timeouts.
define run_integration_tests
cd test ; \
for t in $1 ; do \
echo $$(date): Running test/$$t... ; \
if [[ $$t == *worker.py* ]]; then \
time ./$$t $$VT_TEST_FLAGS 2>&1 ; \
rc=$$? ; \
else \
output=$$(time ./$$t $$VT_TEST_FLAGS 2>&1) ; \
rc=$$? ; \
fi ; \
if [[ $$rc != 0 ]]; then \
output=$$(time timeout 5m ./$$t $$VT_TEST_FLAGS 2>&1) ; \
if [[ $$? != 0 ]]; then \
echo "$$output" >&2 ; \
exit 1 ; \
fi ; \

Просмотреть файл

@ -17,7 +17,6 @@ select A() from b#select a() from b
select A(B, C) from b#select a(b, c) from b
select A(distinct B, C) from b#select a(distinct b, c) from b
select IF(B, C) from b#select if(b, c) from b
select VALUES(B, C) from b#select values(b, c) from b
select * from b use index (A)#select * from b use index (a)
insert into A(A, B) values (1, 2)#insert into A(a, b) values (1, 2)
CREATE TABLE A#create table A

Просмотреть файл

@ -1,16 +1,18 @@
select !8 from t#syntax error at position 9 near !
select $ from t#syntax error at position 9 near $
select : from t#syntax error at position 9 near :
select 078 from t#syntax error at position 11 near 078
select `1a` from t#syntax error at position 9 near 1
select `:table` from t#syntax error at position 9 near :
select `table:` from t#syntax error at position 14 near table
select 'aa\#syntax error at position 12 near aa
select 'aa#syntax error at position 12 near aa
select * from t where :1 = 2#syntax error at position 24 near :
select * from t where :. = 2#syntax error at position 24 near :
select * from t where ::1 = 2#syntax error at position 25 near ::
select * from t where ::. = 2#syntax error at position 25 near ::
select !8 from t#syntax error at position 9 near '!'
select $ from t#syntax error at position 9 near '$'
select : from t#syntax error at position 9 near ':'
select 078 from t#syntax error at position 11 near '078'
select `1a` from t#syntax error at position 9 near '1'
select `:table` from t#syntax error at position 9 near ':'
select `table:` from t#syntax error at position 14 near 'table'
select 'aa\#syntax error at position 12 near 'aa'
select 'aa#syntax error at position 12 near 'aa'
select * from t where :1 = 2#syntax error at position 24 near ':'
select * from t where :. = 2#syntax error at position 24 near ':'
select * from t where ::1 = 2#syntax error at position 25 near '::'
select * from t where ::. = 2#syntax error at position 25 near '::'
update a set c = values(1)#syntax error at position 24 near 'values'
update a set c = last_insert_id(1)#syntax error at position 32 near 'last_insert_id'
select(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(#max nesting level reached at position 406
select(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(#syntax error at position 405
select /* aa#syntax error at position 13 near /* aa
select /* aa#syntax error at position 13 near '/* aa'

Просмотреть файл

@ -155,7 +155,7 @@ insert /* value expression list */ into a values (a + 1, 2 * 3)
insert /* column list */ into a(a, b) values (1, 2)
insert /* qualified column list */ into a(a, a.b) values (1, 2)
insert /* select */ into a select b, c from d
insert /* on duplicate */ into a values (1, 2) on duplicate key update b = values(a), c = d
insert /* on duplicate */ into a values (1, 2) on duplicate key update b = func(a), c = d
update /* simple */ a set b = 3
update /* a.b */ a.b set b = 3
update /* b.c */ a set b.c = 3

Просмотреть файл

@ -8,6 +8,7 @@
"FullQuery": "select * from a union select * from b",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -28,6 +29,7 @@
"FullQuery": "select distinct * from a limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -48,6 +50,7 @@
"FullQuery": "select * from a group by b limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -68,6 +71,7 @@
"FullQuery": "select * from a having b = 1 limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -88,6 +92,7 @@
"FullQuery": "select * from a limit 5",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
@ -113,6 +118,7 @@
"FullQuery": "select * from a.b limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -133,6 +139,7 @@
"FullQuery": "select * from a, b limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -153,6 +160,7 @@
"FullQuery": "select * from a join b limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -173,6 +181,7 @@
"FullQuery": "select * from a right join b limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -193,6 +202,7 @@
"FullQuery": "select * from b limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -213,6 +223,7 @@
"FullQuery": "select * from (b) limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -233,6 +244,7 @@
"FullQuery": "select :bv from a limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -253,6 +265,7 @@
"FullQuery": "select eid + 1 from a limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -273,6 +286,7 @@
"FullQuery": "select case when eid = 1 then 1 end from a limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -293,6 +307,7 @@
"FullQuery": "select eid from a limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0
@ -315,6 +330,7 @@
"FullQuery": "select eid as foo from a limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0
@ -337,6 +353,7 @@
"FullQuery": "select * from a limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
@ -362,6 +379,7 @@
"FullQuery": "select c.eid from a as c limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0
@ -384,6 +402,7 @@
"FullQuery": "select (eid) from a limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -404,6 +423,7 @@
"FullQuery": "select eid from a limit :#maxLimit for update",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -424,6 +444,7 @@
"FullQuery": "select eid from a limit :#maxLimit lock in share mode",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -444,6 +465,7 @@
"FullQuery": "select * from a where eid = 1 and id in (1, 2) limit :#maxLimit",
"OuterQuery": "select eid, id, name, foo from a where :#pk",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "PRIMARY",
"ColumnNumbers": [
0,
@ -469,6 +491,7 @@
"FullQuery": "select * from a where eid = :v1 and id in (:v2, :v3) limit :#maxLimit",
"OuterQuery": "select eid, id, name, foo from a where :#pk",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "PRIMARY",
"ColumnNumbers": [
0,
@ -494,6 +517,7 @@
"FullQuery": "select * from a where name = 'foo' limit :#maxLimit",
"OuterQuery": "select eid, id, name, foo from a where :#pk",
"Subquery": "select eid, id from a use index (b_name) where name = 'foo' limit :#maxLimit",
"UpsertQuery": null,
"IndexUsed": "b_name",
"ColumnNumbers": [
0,
@ -519,6 +543,7 @@
"FullQuery": "select eid, name, id from a where name = 'foo' limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "b_name",
"ColumnNumbers": [
0,
@ -543,6 +568,7 @@
"FullQuery": "select * from d where id = 1 limit :#maxLimit",
"OuterQuery": "select name, id, foo, bar from d where :#pk",
"Subquery": "select name from d use index (d_id) where id = 1 limit :#maxLimit",
"UpsertQuery": null,
"IndexUsed": "d_id",
"ColumnNumbers": [
0,
@ -568,6 +594,7 @@
"FullQuery": "select * from d where id = 1 limit 1",
"OuterQuery": "select name, id, foo, bar from d where :#pk",
"Subquery": "select name from d use index (d_id) where id = 1 limit 1",
"UpsertQuery": null,
"IndexUsed": "d_id",
"ColumnNumbers": [
0,
@ -593,6 +620,7 @@
"FullQuery": "select * from a where eid + 1 = 1 limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
@ -618,6 +646,7 @@
"FullQuery": "select * from a where eid = id limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
@ -643,6 +672,7 @@
"FullQuery": "select * from d where name between 'foo' and 'bar' limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "PRIMARY",
"ColumnNumbers": [
0,
@ -668,6 +698,7 @@
"FullQuery": "select * from a where (eid = 1) and (id = 2) limit :#maxLimit",
"OuterQuery": "select eid, id, name, foo from a where :#pk",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "PRIMARY",
"ColumnNumbers": [
0,
@ -696,6 +727,7 @@
"FullQuery": "select * from a where eid = 1 and id = 1 limit :#maxLimit",
"OuterQuery": "select eid, id, name, foo from a where :#pk",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "PRIMARY",
"ColumnNumbers": [
0,
@ -724,6 +756,7 @@
"FullQuery": "select * from d where bar = 'foo' and id = 1 limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
@ -749,6 +782,7 @@
"FullQuery": "select * from d where name = 'foo' limit :#maxLimit",
"OuterQuery": "select name, id, foo, bar from d where :#pk",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "PRIMARY",
"ColumnNumbers": [
0,
@ -776,6 +810,7 @@
"FullQuery": "select * from d where name = 'foo' limit 1",
"OuterQuery": "select name, id, foo, bar from d where :#pk",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "PRIMARY",
"ColumnNumbers": [
0,
@ -803,6 +838,7 @@
"FullQuery": "select * from d where name = 'foo' limit 0",
"OuterQuery": "select name, id, foo, bar from d where :#pk",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "PRIMARY",
"ColumnNumbers": [
0,
@ -830,6 +866,7 @@
"FullQuery": "select * from d where name = 'foo' limit :a",
"OuterQuery": "select name, id, foo, bar from d where :#pk",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "PRIMARY",
"ColumnNumbers": [
0,
@ -857,6 +894,7 @@
"FullQuery": "select * from d where name = 'foo' limit 1, 1",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "PRIMARY",
"ColumnNumbers": [
0,
@ -894,6 +932,7 @@
"FullQuery": "select * from d where 'foo' = name and eid = 1 limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
@ -919,6 +958,7 @@
"FullQuery": "select * from d where name in ('foo', 'bar') limit :#maxLimit",
"OuterQuery": "select name, id, foo, bar from d where :#pk",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "PRIMARY",
"ColumnNumbers": [
0,
@ -947,6 +987,7 @@
"FullQuery": "select * from d where name in (:a, :b) limit :#maxLimit",
"OuterQuery": "select name, id, foo, bar from d where :#pk",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "PRIMARY",
"ColumnNumbers": [
0,
@ -975,6 +1016,7 @@
"FullQuery": "select * from d where name in ('foo') limit :#maxLimit",
"OuterQuery": "select name, id, foo, bar from d where :#pk",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "PRIMARY",
"ColumnNumbers": [
0,
@ -1002,6 +1044,7 @@
"FullQuery": "select * from d where name in (:a) limit :#maxLimit",
"OuterQuery": "select name, id, foo, bar from d where :#pk",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "PRIMARY",
"ColumnNumbers": [
0,
@ -1029,6 +1072,7 @@
"FullQuery": "select * from d where name in (:a) limit 1",
"OuterQuery": "select name, id, foo, bar from d where :#pk",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "PRIMARY",
"ColumnNumbers": [
0,
@ -1054,6 +1098,7 @@
"FullQuery": "select * from a where eid in (1) and id in (1, 2) limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
@ -1079,6 +1124,7 @@
"FullQuery": "select * from a where eid in (1, 2) and id in (1, 2) limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
@ -1104,6 +1150,7 @@
"FullQuery": "select * from a where (eid, id) in ((1, 1), (2, 2)) limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
@ -1129,6 +1176,7 @@
"FullQuery":"select * from d where foo = 'bar' limit :#maxLimit",
"OuterQuery": null,
"Subquery":null,
"UpsertQuery": null,
"IndexUsed":"",
"ColumnNumbers": [
0,
@ -1154,6 +1202,7 @@
"FullQuery": "select * from d as c where c.name = 'foo' limit :#maxLimit",
"OuterQuery": "select name, id, foo, bar from d as c where :#pk",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "PRIMARY",
"ColumnNumbers": [
0,
@ -1181,6 +1230,7 @@
"FullQuery": "select * from d where id \u003c 0 limit :#maxLimit",
"OuterQuery": "select name, id, foo, bar from d where :#pk",
"Subquery": "select name from d use index (d_id) where id \u003c 0 limit :#maxLimit",
"UpsertQuery": null,
"IndexUsed": "d_id",
"ColumnNumbers": [
0,
@ -1206,6 +1256,7 @@
"FullQuery": "select * from d where name in ('foo', id) limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
@ -1231,6 +1282,7 @@
"FullQuery": "select * from d where id between 1 and 2 limit :#maxLimit",
"OuterQuery": "select name, id, foo, bar from d where :#pk",
"Subquery": "select name from d use index (d_id) where id between 1 and 2 limit :#maxLimit",
"UpsertQuery": null,
"IndexUsed": "d_id",
"ColumnNumbers": [
0,
@ -1256,6 +1308,7 @@
"FullQuery": "select * from d where id not between 1 and 2 limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
@ -1281,6 +1334,7 @@
"FullQuery": "select * from d where 1 between 1 and 2 limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
@ -1306,6 +1360,7 @@
"FullQuery": "select * from d where name is not null limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
@ -1331,6 +1386,7 @@
"FullQuery": "select * from a where eid = 1 and id = 1 order by name asc limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
@ -1356,6 +1412,7 @@
"FullQuery": "select * from d where bar = 'foo' limit :#maxLimit",
"OuterQuery": "select name, id, foo, bar from d where :#pk",
"Subquery": "select name from d use index (d_bar) where bar = 'foo' limit :#maxLimit",
"UpsertQuery": null,
"IndexUsed": "d_bar",
"ColumnNumbers": [
0,
@ -1381,6 +1438,7 @@
"FullQuery": "select * from d use index (d_bar_never) where bar = 'foo' limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
@ -1406,6 +1464,7 @@
"FullQuery": "select * from d force index (d_bar_never) where bar = 'foo' limit :#maxLimit",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
@ -1435,6 +1494,7 @@
"FullQuery": "insert into b.a(eid, id) values (1, :a)",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -1455,6 +1515,7 @@
"FullQuery": "insert into a(a.eid, id) values (1, 2)",
"OuterQuery": "insert into a(a.eid, id) values (1, 2)",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": [
@ -1482,6 +1543,7 @@
"FullQuery": "insert into a(eid, id) values (1, :a)",
"OuterQuery": "insert into a(eid, id) values (1, :a)",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": [
@ -1505,6 +1567,7 @@
"FullQuery": "insert into a(id) values (1)",
"OuterQuery": "insert into a(id) values (1)",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": [
@ -1528,6 +1591,7 @@
"FullQuery": "insert into d(id) values (1)",
"OuterQuery": "insert into d(id) values (1)",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": [
@ -1554,6 +1618,7 @@
"FullQuery": "insert into a(eid, id) values (-1, 2)",
"OuterQuery": "insert into a(eid, id) values (-1, 2)",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": [
@ -1577,6 +1642,7 @@
"FullQuery": "insert into a(eid, id) values (1, 2)",
"OuterQuery": "insert into a(eid, id) values (1, 2)",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": [
@ -1594,12 +1660,13 @@
"insert into a (eid, id) values (~1, 2)"
{
"PlanId": "PASS_DML",
"Reason": "DEFAULT",
"Reason": "COMPLEX_EXPR",
"TableName": "a",
"FieldQuery": null,
"FullQuery": "insert into a(eid, id) values (~1, 2)",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -1614,12 +1681,13 @@
"insert into a (eid, id) values (1+1, 2)"
{
"PlanId": "PASS_DML",
"Reason": "DEFAULT",
"Reason": "COMPLEX_EXPR",
"TableName": "a",
"FieldQuery": null,
"FullQuery": "insert into a(eid, id) values (1 + 1, 2)",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -1640,6 +1708,7 @@
"FullQuery": "insert into c(eid, id) values (1, 2)",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -1660,6 +1729,7 @@
"FullQuery": "insert into a values (1, 2)",
"OuterQuery": "insert into a values (1, 2)",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": [
@ -1674,18 +1744,19 @@
}
# on dup
"insert into b (eid, id) values (1, 2) on duplicate key update name = values(a)"
"insert into b (eid, id) values (1, 2) on duplicate key update name = func(a)"
{
"PlanId": "PASS_DML",
"Reason": "UPSERT",
"PlanId": "UPSERT_PK",
"Reason": "DEFAULT",
"TableName": "b",
"FieldQuery": null,
"FullQuery": "insert into b(eid, id) values (1, 2) on duplicate key update name = values(a)",
"OuterQuery": null,
"FullQuery": "insert into b(eid, id) values (1, 2) on duplicate key update name = func(a)",
"OuterQuery": "insert into b(eid, id) values (1, 2)",
"Subquery": null,
"UpsertQuery": "update b set name = func(a) where :#pk",
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
"PKValues": [1, 2],
"Limit": null,
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
@ -1696,16 +1767,38 @@
# on dup pk change
"insert into b (eid, id) values (1, 2) on duplicate key update eid = 2"
{
"PlanId": "PASS_DML",
"Reason": "UPSERT",
"PlanId": "UPSERT_PK",
"Reason": "DEFAULT",
"TableName": "b",
"FieldQuery": null,
"FullQuery": "insert into b(eid, id) values (1, 2) on duplicate key update eid = 2",
"OuterQuery": null,
"OuterQuery": "insert into b(eid, id) values (1, 2)",
"Subquery": null,
"UpsertQuery": "update b set eid = 2 where :#pk",
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
"PKValues": [1, 2],
"Limit": null,
"SecondaryPKValues": [2, null],
"SubqueryPKColumns": null,
"SetKey": "",
"SetValue": null
}
# on dup complex pk change
"insert into b (id, eid) values (1, 2) on duplicate key update eid = func(a)"
{
"PlanId": "PASS_DML",
"Reason": "PK_CHANGE",
"TableName": "b",
"FieldQuery": null,
"FullQuery": "insert into b(id, eid) values (1, 2) on duplicate key update eid = func(a)",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": [2, 1],
"Limit": null,
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
@ -1713,16 +1806,38 @@
"SetValue": null
}
# on dup complex pk change
"insert into b (id, eid) values (1, 2) on duplicate key update eid = values(a)"
# on dup multi-row
"insert into b (id, eid) values (1, 2), (2, 3) on duplicate key update name = func(a)"
{
"PlanId": "PASS_DML",
"Reason": "UPSERT",
"TableName": "b",
"FieldQuery": null,
"FullQuery": "insert into b(id, eid) values (1, 2) on duplicate key update eid = values(a)",
"FullQuery": "insert into b(id, eid) values (1, 2), (2, 3) on duplicate key update name = func(a)",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": [[2,3],[1,2]],
"Limit": null,
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
"SetKey": "",
"SetValue": null
}
# on dup subquery
"insert into b (id, eid) select * from a on duplicate key update name = func(a)"
{
"PlanId": "PASS_DML",
"Reason": "UPSERT",
"TableName": "b",
"FieldQuery": null,
"FullQuery": "insert into b(id, eid) select * from a on duplicate key update name = func(a)",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -1743,6 +1858,7 @@
"FullQuery": "insert into b(eid, id) select * from a",
"OuterQuery": "insert into b(eid, id) values :#values",
"Subquery": "select * from a limit :#maxLimit",
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
@ -1769,6 +1885,7 @@
"FullQuery": "insert into b select * from a",
"OuterQuery": "insert into b values :#values",
"Subquery": "select * from a limit :#maxLimit",
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
@ -1795,6 +1912,7 @@
"FullQuery": "insert into b(eid, id) values (1, 2), (3, 4)",
"OuterQuery": "insert into b(eid, id) values (1, 2), (3, 4)",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": [
@ -1824,6 +1942,7 @@
"FullQuery": "update b.a set name = 'foo' where eid = 1 and id = 1",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -1844,6 +1963,7 @@
"FullQuery": "update b set eid = 1",
"OuterQuery": "update b set eid = 1 where :#pk",
"Subquery": "select eid, id from b limit :#maxLimit for update",
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -1871,6 +1991,7 @@
"FullQuery": "update b set a.eid = 1",
"OuterQuery": "update b set a.eid = 1 where :#pk",
"Subquery": "select eid, id from b limit :#maxLimit for update",
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -1894,6 +2015,7 @@
"FullQuery": "update b set eid = foo()",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -1914,6 +2036,7 @@
"FullQuery": "update a set name = 'foo'",
"OuterQuery": "update a set name = 'foo' where :#pk",
"Subquery": "select eid, id from a limit :#maxLimit for update",
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -1934,6 +2057,7 @@
"FullQuery": "update a set name = 'foo' where eid + 1 = 1",
"OuterQuery": "update a set name = 'foo' where :#pk",
"Subquery": "select eid, id from a where eid + 1 = 1 limit :#maxLimit for update",
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -1954,6 +2078,7 @@
"FullQuery": "update a set name = 'foo' where eid = 1 and id = 1",
"OuterQuery": "update a set name = 'foo' where :#pk",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": [
@ -1977,6 +2102,7 @@
"FullQuery": "update a set a.name = 'foo' where eid = 1 and id = 1",
"OuterQuery": "update a set a.name = 'foo' where :#pk",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": [
@ -2000,6 +2126,7 @@
"FullQuery": "update a set name = 'foo' where eid = 1",
"OuterQuery": "update a set name = 'foo' where :#pk",
"Subquery": "select eid, id from a where eid = 1 limit :#maxLimit for update",
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -2020,6 +2147,7 @@
"FullQuery": "update a set name = 'foo' where eid = 1 limit 10",
"OuterQuery": "update a set name = 'foo' where :#pk",
"Subquery": "select eid, id from a where eid = 1 limit 10 for update",
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -2040,6 +2168,7 @@
"FullQuery": "update a set name = 'foo' where eid = 1 and name = 'foo'",
"OuterQuery": "update a set name = 'foo' where :#pk",
"Subquery": "select eid, id from a where eid = 1 and name = 'foo' limit :#maxLimit for update",
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -2060,6 +2189,7 @@
"FullQuery": "update c set eid = 1",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -2080,6 +2210,7 @@
"FullQuery": "delete from b.a where eid = 1 and id = 1",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -2100,6 +2231,7 @@
"FullQuery": "delete from a",
"OuterQuery": "delete from a where :#pk",
"Subquery": "select eid, id from a limit :#maxLimit for update",
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -2120,6 +2252,7 @@
"FullQuery": "delete from a where eid + 1 = 1",
"OuterQuery": "delete from a where :#pk",
"Subquery": "select eid, id from a where eid + 1 = 1 limit :#maxLimit for update",
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -2140,6 +2273,7 @@
"FullQuery": "delete from a where eid = 1 and id = 1",
"OuterQuery": "delete from a where :#pk",
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": [
@ -2163,6 +2297,7 @@
"FullQuery": "delete from a where eid = 1",
"OuterQuery": "delete from a where :#pk",
"Subquery": "select eid, id from a where eid = 1 limit :#maxLimit for update",
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -2183,6 +2318,7 @@
"FullQuery": "delete from a where eid = 1 and name = 'foo'",
"OuterQuery": "delete from a where :#pk",
"Subquery": "select eid, id from a where eid = 1 and name = 'foo' limit :#maxLimit for update",
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -2203,6 +2339,7 @@
"FullQuery": "delete from c",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -2223,6 +2360,7 @@
"FullQuery": "set a = 1",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -2243,6 +2381,7 @@
"FullQuery": "set a = 1.2",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -2263,6 +2402,7 @@
"FullQuery": "set a = 'b'",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -2283,6 +2423,7 @@
"FullQuery": "set a = 1, b = 2",
"OuterQuery": null,
"Subquery": null,
"UpsertQuery": null,
"IndexUsed": "",
"ColumnNumbers": null,
"PKValues": null,
@ -2303,6 +2444,7 @@
"FullQuery":null,
"OuterQuery":null,
"Subquery":null,
"UpsertQuery": null,
"IndexUsed":"",
"ColumnNumbers":null,
"PKValues":null,
@ -2323,6 +2465,7 @@
"FullQuery":null,
"OuterQuery":null,
"Subquery":null,
"UpsertQuery": null,
"IndexUsed":"",
"ColumnNumbers":null,
"PKValues":null,
@ -2343,6 +2486,7 @@
"FullQuery":null,
"OuterQuery":null,
"Subquery":null,
"UpsertQuery": null,
"IndexUsed":"",
"ColumnNumbers":null,
"PKValues":null,
@ -2363,6 +2507,7 @@
"FullQuery":null,
"OuterQuery":null,
"Subquery":null,
"UpsertQuery": null,
"IndexUsed":"",
"ColumnNumbers":null,
"PKValues":null,
@ -2383,6 +2528,7 @@
"FullQuery":null,
"OuterQuery":null,
"Subquery":null,
"UpsertQuery": null,
"IndexUsed":"",
"ColumnNumbers":null,
"PKValues":null,
@ -2403,6 +2549,7 @@
"FullQuery":null,
"OuterQuery":null,
"Subquery":null,
"UpsertQuery": null,
"IndexUsed":"",
"ColumnNumbers":null,
"PKValues":null,
@ -2423,6 +2570,7 @@
"FullQuery":null,
"OuterQuery":null,
"Subquery":null,
"UpsertQuery": null,
"IndexUsed":"",
"ColumnNumbers":null,
"PKValues":null,
@ -2443,6 +2591,7 @@
"FullQuery":null,
"OuterQuery":null,
"Subquery":null,
"UpsertQuery": null,
"IndexUsed":"",
"ColumnNumbers":null,
"PKValues":null,
@ -2463,6 +2612,7 @@
"FullQuery":null,
"OuterQuery":null,
"Subquery":null,
"UpsertQuery": null,
"IndexUsed":"",
"ColumnNumbers":null,
"PKValues":null,
@ -2479,4 +2629,4 @@
# syntax error
"syntax error"
"syntax error at position 7 near syntax"
"syntax error at position 7 near 'syntax'"

Просмотреть файл

@ -8,6 +8,7 @@
"FullQuery":"select * from a",
"OuterQuery":null,
"Subquery":null,
"UpsertQuery":null,
"IndexUsed":"",
"ColumnNumbers":null,
"PKValues":null,
@ -28,6 +29,7 @@
"FullQuery":"select * from a join b",
"OuterQuery":null,
"Subquery":null,
"UpsertQuery":null,
"IndexUsed":"",
"ColumnNumbers":null,
"PKValues":null,
@ -52,6 +54,7 @@
"FullQuery": "select * from a union select * from b",
"OuterQuery":null,
"Subquery":null,
"UpsertQuery":null,
"IndexUsed":"",
"ColumnNumbers":null,
"PKValues":null,
@ -68,4 +71,4 @@
# syntax error
"syntax error"
"syntax error at position 7 near syntax"
"syntax error at position 7 near 'syntax'"

Просмотреть файл

@ -2,7 +2,7 @@
"the quick brown fox"
{
"ID":"NoPlan",
"Reason":"syntax error at position 4 near the",
"Reason":"syntax error at position 4 near 'the'",
"Table": "",
"Original":"the quick brown fox",
"Rewritten":"",

Просмотреть файл

@ -85,16 +85,16 @@ func main() {
os.Exit(4)
}
fmt.Println("Operation was enqueued. Details:", enqueueResponse)
errWait := waitForClusterOp(client, enqueueResponse.Id)
resp, errWait := waitForClusterOp(client, enqueueResponse.Id)
if errWait != nil {
fmt.Println("ERROR:", errWait)
os.Exit(5)
}
fmt.Println("SUCCESS: ClusterOperation finished.")
fmt.Printf("SUCCESS: ClusterOperation finished.\n\nDetails:\n%v", proto.MarshalTextString(resp))
}
// waitForClusterOp polls and blocks until the ClusterOperation invocation specified by "id" has finished. If an error occured, it will be returned.
func waitForClusterOp(client pbs.AutomationClient, id string) error {
func waitForClusterOp(client pbs.AutomationClient, id string) (*pb.GetClusterOperationDetailsResponse, error) {
for {
req := &pb.GetClusterOperationDetailsRequest{
Id: id,
@ -102,17 +102,17 @@ func waitForClusterOp(client pbs.AutomationClient, id string) error {
resp, err := client.GetClusterOperationDetails(context.Background(), req)
if err != nil {
return fmt.Errorf("Failed to get ClusterOperation Details. Request: %v Error: %v", req, err)
return nil, fmt.Errorf("Failed to get ClusterOperation Details. Request: %v Error: %v", req, err)
}
switch resp.ClusterOp.State {
case pb.ClusterOperationState_UNKNOWN_CLUSTER_OPERATION_STATE:
return fmt.Errorf("ClusterOperation is in an unknown state. Details: %v", resp)
return resp, fmt.Errorf("ClusterOperation is in an unknown state. Details: %v", resp)
case pb.ClusterOperationState_CLUSTER_OPERATION_DONE:
if resp.ClusterOp.Error != "" {
return fmt.Errorf("ClusterOperation failed. Details:\n%v", proto.MarshalTextString(resp))
return resp, fmt.Errorf("ClusterOperation failed. Details:\n%v", proto.MarshalTextString(resp))
}
return nil
return resp, nil
}
time.Sleep(50 * time.Millisecond)

Просмотреть файл

@ -47,15 +47,15 @@ func main() {
toTS := topo.GetServerByName(*toTopo)
if *doKeyspaces {
helpers.CopyKeyspaces(ctx, fromTS, toTS)
helpers.CopyKeyspaces(ctx, fromTS.Impl, toTS.Impl)
}
if *doShards {
helpers.CopyShards(ctx, fromTS, toTS, *deleteKeyspaceShards)
helpers.CopyShards(ctx, fromTS.Impl, toTS.Impl, *deleteKeyspaceShards)
}
if *doShardReplications {
helpers.CopyShardReplications(ctx, fromTS, toTS)
helpers.CopyShardReplications(ctx, fromTS.Impl, toTS.Impl)
}
if *doTablets {
helpers.CopyTablets(ctx, fromTS, toTS)
helpers.CopyTablets(ctx, fromTS.Impl, toTS.Impl)
}
}

Просмотреть файл

@ -14,6 +14,7 @@ import (
"github.com/youtube/vitess/go/vt/tabletmanager/actionnode"
"github.com/youtube/vitess/go/vt/tabletmanager/tmclient"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
"github.com/youtube/vitess/go/vt/wrangler"
pb "github.com/youtube/vitess/go/vt/proto/topodata"
@ -143,7 +144,7 @@ func (ar *ActionRepository) ApplyShardAction(ctx context.Context, actionName, ke
func (ar *ActionRepository) ApplyTabletAction(ctx context.Context, actionName string, tabletAlias *pb.TabletAlias, r *http.Request) *ActionResult {
result := &ActionResult{
Name: actionName,
Parameters: topo.TabletAliasString(tabletAlias),
Parameters: topoproto.TabletAliasString(tabletAlias),
}
action, ok := ar.tabletActions[actionName]

Просмотреть файл

@ -12,6 +12,7 @@ import (
"github.com/youtube/vitess/go/vt/schemamanager"
"github.com/youtube/vitess/go/vt/tabletmanager/tmclient"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
"golang.org/x/net/context"
)
@ -158,14 +159,14 @@ func initAPI(ctx context.Context, ts topo.Server, actions *ActionRepository) {
if shardRef != "" {
// Look up by keyspace/shard, and optionally cell.
keyspace, shard, err := topo.ParseKeyspaceShardString(shardRef)
keyspace, shard, err := topoproto.ParseKeyspaceShard(shardRef)
if err != nil {
return nil, err
}
if cell != "" {
return topo.FindAllTabletAliasesInShardByCell(ctx, ts, keyspace, shard, []string{cell})
return ts.FindAllTabletAliasesInShardByCell(ctx, keyspace, shard, []string{cell})
}
return topo.FindAllTabletAliasesInShard(ctx, ts, keyspace, shard)
return ts.FindAllTabletAliasesInShard(ctx, keyspace, shard)
}
// Get all tablets in a cell.
@ -177,14 +178,14 @@ func initAPI(ctx context.Context, ts topo.Server, actions *ActionRepository) {
// Get tablet health.
if parts := strings.Split(tabletPath, "/"); len(parts) == 2 && parts[1] == "health" {
tabletAlias, err := topo.ParseTabletAliasString(parts[0])
tabletAlias, err := topoproto.ParseTabletAlias(parts[0])
if err != nil {
return nil, err
}
return tabletHealthCache.Get(ctx, tabletAlias)
}
tabletAlias, err := topo.ParseTabletAliasString(tabletPath)
tabletAlias, err := topoproto.ParseTabletAlias(tabletPath)
if err != nil {
return nil, err
}
@ -219,7 +220,7 @@ func initAPI(ctx context.Context, ts topo.Server, actions *ActionRepository) {
return ts.GetSrvTabletTypesPerShard(ctx, parts[0], parts[1], parts[2])
}
tabletType, err := topo.ParseTabletType(parts[3])
tabletType, err := topoproto.ParseTabletType(parts[3])
if err != nil {
return nil, fmt.Errorf("invalid tablet type %v: %v", parts[3], err)
}
@ -247,12 +248,6 @@ func initAPI(ctx context.Context, ts topo.Server, actions *ActionRepository) {
// VSchema
http.HandleFunc(apiPrefix+"vschema/", func(w http.ResponseWriter, r *http.Request) {
schemafier, ok := ts.(topo.Schemafier)
if !ok {
httpErrorf(w, r, "%T doesn't support schemafier API", ts)
return
}
// Save VSchema
if r.Method == "POST" {
vschema, err := ioutil.ReadAll(r.Body)
@ -260,14 +255,14 @@ func initAPI(ctx context.Context, ts topo.Server, actions *ActionRepository) {
httpErrorf(w, r, "can't read request body: %v", err)
return
}
if err := schemafier.SaveVSchema(ctx, string(vschema)); err != nil {
if err := ts.SaveVSchema(ctx, string(vschema)); err != nil {
httpErrorf(w, r, "can't save vschema: %v", err)
}
return
}
// Get VSchema
vschema, err := schemafier.GetVSchema(ctx)
vschema, err := ts.GetVSchema(ctx)
if err != nil {
httpErrorf(w, r, "can't get vschema: %v", err)
return

Просмотреть файл

@ -16,7 +16,6 @@ import (
"golang.org/x/net/context"
"github.com/youtube/vitess/go/vt/logutil"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topotools"
"github.com/youtube/vitess/go/vt/wrangler"
"github.com/youtube/vitess/go/vt/zktopo"
@ -42,16 +41,16 @@ func TestAPI(t *testing.T) {
// Populate topo.
ts.CreateKeyspace(ctx, "ks1", &pb.Keyspace{ShardingColumnName: "shardcol"})
ts.CreateShard(ctx, "ks1", "-80", &pb.Shard{
ts.Impl.CreateShard(ctx, "ks1", "-80", &pb.Shard{
Cells: cells,
KeyRange: &pb.KeyRange{Start: nil, End: []byte{0x80}},
})
ts.CreateShard(ctx, "ks1", "80-", &pb.Shard{
ts.Impl.CreateShard(ctx, "ks1", "80-", &pb.Shard{
Cells: cells,
KeyRange: &pb.KeyRange{Start: []byte{0x80}, End: nil},
})
topo.CreateTablet(ctx, ts, &pb.Tablet{
ts.CreateTablet(ctx, &pb.Tablet{
Alias: &pb.TabletAlias{Cell: "cell1", Uid: 100},
Keyspace: "ks1",
Shard: "-80",
@ -59,7 +58,7 @@ func TestAPI(t *testing.T) {
KeyRange: &pb.KeyRange{Start: nil, End: []byte{0x80}},
PortMap: map[string]int32{"vt": 100},
})
topo.CreateTablet(ctx, ts, &pb.Tablet{
ts.CreateTablet(ctx, &pb.Tablet{
Alias: &pb.TabletAlias{Cell: "cell2", Uid: 200},
Keyspace: "ks1",
Shard: "-80",

Просмотреть файл

@ -10,7 +10,7 @@ import (
log "github.com/golang/glog"
"github.com/youtube/vitess/go/cmd/vtctld/proto"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
pb "github.com/youtube/vitess/go/vt/proto/topodata"
)
@ -134,7 +134,7 @@ func handleExplorerRedirect(r *http.Request) (string, error) {
if keyspace == "" || shard == "" || cell == "" || tabletType == "" {
return "", errors.New("keyspace, shard, cell, and tablet_type are required for this redirect")
}
tt, err := topo.ParseTabletType(tabletType)
tt, err := topoproto.ParseTabletType(tabletType)
if err != nil {
return "", fmt.Errorf("cannot parse tablet type %v: %v", tabletType, err)
}
@ -144,7 +144,7 @@ func handleExplorerRedirect(r *http.Request) (string, error) {
if alias == "" {
return "", errors.New("alias is required for this redirect")
}
tabletAlias, err := topo.ParseTabletAliasString(alias)
tabletAlias, err := topoproto.ParseTabletAlias(alias)
if err != nil {
return "", fmt.Errorf("bad tablet alias %q: %v", alias, err)
}

Просмотреть файл

@ -15,7 +15,7 @@ import (
func init() {
// Wait until flags are parsed, so we can check which topo server is in use.
servenv.OnRun(func() {
if etcdServer, ok := topo.GetServer().(*etcdtopo.Server); ok {
if etcdServer, ok := topo.GetServer().Impl.(*etcdtopo.Server); ok {
HandleExplorer("etcd", "/etcd/", "etcd.html", etcdtopo.NewExplorer(etcdServer))
}
})

Просмотреть файл

@ -19,6 +19,7 @@ import (
"github.com/youtube/vitess/go/netutil"
"github.com/youtube/vitess/go/vt/servenv"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
"github.com/youtube/vitess/go/vt/zktopo"
"github.com/youtube/vitess/go/zk"
@ -28,7 +29,7 @@ import (
func init() {
// Wait until flags are parsed, so we can check which topo server is in use.
servenv.OnRun(func() {
if zkServer, ok := topo.GetServer().(*zktopo.Server); ok {
if zkServer, ok := topo.GetServer().Impl.(*zktopo.Server); ok {
HandleExplorer("zk", "/zk/", "zk.html", NewZkExplorer(zkServer.GetZConn()))
}
})
@ -71,7 +72,7 @@ func (ex ZkExplorer) GetSrvTypePath(cell, keyspace, shard string, tabletType pb.
// GetTabletPath is part of the Explorer interface
func (ex ZkExplorer) GetTabletPath(alias *pb.TabletAlias) string {
return path.Join("/zk", alias.Cell, "vt/tablets", topo.TabletAliasUIDStr(alias))
return path.Join("/zk", alias.Cell, "vt/tablets", topoproto.TabletAliasUIDStr(alias))
}
// GetReplicationSlaves is part of the Explorer interface

Просмотреть файл

@ -9,6 +9,7 @@ import (
"time"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
"golang.org/x/net/context"
pb "github.com/youtube/vitess/go/vt/proto/topodata"
@ -306,7 +307,7 @@ func newShardCache(ts topo.Server) *VersionedObjectCacheMap {
return NewVersionedObjectCacheMap(func(key string) *VersionedObjectCache {
return NewVersionedObjectCache(func(ctx context.Context) (VersionedObject, error) {
keyspace, shard, err := topo.ParseKeyspaceShardString(key)
keyspace, shard, err := topoproto.ParseKeyspaceShard(key)
if err != nil {
return nil, err
}

Просмотреть файл

@ -225,12 +225,12 @@ func TestShardNamesCache(t *testing.T) {
}); err != nil {
t.Fatalf("CreateKeyspace failed: %v", err)
}
if err := ts.CreateShard(ctx, "ks1", "s1", &pb.Shard{
if err := ts.Impl.CreateShard(ctx, "ks1", "s1", &pb.Shard{
Cells: []string{"cell1", "cell2"},
}); err != nil {
t.Fatalf("CreateShard failed: %v", err)
}
if err := ts.CreateShard(ctx, "ks1", "s2", &pb.Shard{
if err := ts.Impl.CreateShard(ctx, "ks1", "s2", &pb.Shard{
MasterAlias: &pb.TabletAlias{
Cell: "cell1",
Uid: 12,
@ -256,12 +256,12 @@ func TestShardCache(t *testing.T) {
}); err != nil {
t.Fatalf("CreateKeyspace failed: %v", err)
}
if err := ts.CreateShard(ctx, "ks1", "s1", &pb.Shard{
if err := ts.Impl.CreateShard(ctx, "ks1", "s1", &pb.Shard{
Cells: []string{"cell1", "cell2"},
}); err != nil {
t.Fatalf("CreateShard failed: %v", err)
}
if err := ts.CreateShard(ctx, "ks1", "s2", &pb.Shard{
if err := ts.Impl.CreateShard(ctx, "ks1", "s2", &pb.Shard{
MasterAlias: &pb.TabletAlias{
Cell: "cell1",
Uid: 12,

Просмотреть файл

@ -17,6 +17,7 @@ import (
"github.com/youtube/vitess/go/vt/servenv"
"github.com/youtube/vitess/go/vt/tabletmanager/tmclient"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
"github.com/youtube/vitess/go/vt/topotools"
"github.com/youtube/vitess/go/vt/wrangler"
@ -241,7 +242,7 @@ func main() {
http.Error(w, "no alias provided", http.StatusBadRequest)
return
}
tabletAlias, err := topo.ParseTabletAliasString(alias)
tabletAlias, err := topoproto.ParseTabletAlias(alias)
if err != nil {
http.Error(w, "bad alias provided", http.StatusBadRequest)
return
@ -327,10 +328,6 @@ func main() {
httpErrorf(w, r, "cannot parse form: %s", err)
return
}
schemafier, ok := ts.(topo.Schemafier)
if !ok {
httpErrorf(w, r, "%s", fmt.Errorf("%T doesn't support schemafier API", ts))
}
var data struct {
Error error
Input, Output string
@ -339,9 +336,9 @@ func main() {
switch r.Method {
case "POST":
data.Input = r.FormValue("vschema")
data.Error = schemafier.SaveVSchema(ctx, data.Input)
data.Error = ts.SaveVSchema(ctx, data.Input)
}
vschema, err := schemafier.GetVSchema(ctx)
vschema, err := ts.GetVSchema(ctx)
if err != nil {
if data.Error == nil {
data.Error = fmt.Errorf("Error fetching schema: %s", err)

Просмотреть файл

@ -55,13 +55,8 @@ func main() {
}
log.Infof("v3 is enabled: loaded schema from file: %v", *schemaFile)
} else {
schemafier, ok := ts.(topo.Schemafier)
if !ok {
log.Infof("Skipping v3 initialization: topo does not suppurt schemafier interface")
goto startServer
}
ctx := context.Background()
schemaJSON, err := schemafier.GetVSchema(ctx)
schemaJSON, err := ts.GetVSchema(ctx)
if err != nil {
log.Warningf("Skipping v3 initialization: GetVSchema failed: %v", err)
goto startServer

Просмотреть файл

@ -20,6 +20,7 @@ import (
"github.com/youtube/vitess/go/vt/tabletmanager/actionnode"
"github.com/youtube/vitess/go/vt/tabletserver"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
"golang.org/x/net/context"
// import mysql to register mysql connection function
@ -66,7 +67,7 @@ func main() {
log.Errorf("tabletPath required")
exit.Return(1)
}
tabletAlias, err := topo.ParseTabletAliasString(*tabletPath)
tabletAlias, err := topoproto.ParseTabletAlias(*tabletPath)
if err != nil {
log.Error(err)

Просмотреть файл

@ -1,11 +0,0 @@
// Copyright 2014, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
// This plugin imports influxdbbackend to register the influxdbbackend stats backend.
import (
_ "github.com/youtube/vitess/go/stats/influxdbbackend"
)

Просмотреть файл

@ -1,198 +0,0 @@
// Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"flag"
"fmt"
"os"
"runtime/pprof"
"sort"
"time"
"golang.org/x/net/context"
log "github.com/golang/glog"
"github.com/youtube/vitess/go/exit"
"github.com/youtube/vitess/go/rpcplus"
"github.com/youtube/vitess/go/rpcwrap/bsonrpc"
"github.com/youtube/vitess/go/sync2"
"github.com/youtube/vitess/go/vt/logutil"
"github.com/youtube/vitess/go/vt/topo"
pb "github.com/youtube/vitess/go/vt/proto/topodata"
)
var (
usage = `
Queries the topo server, for test purposes.
`
mode = flag.String("mode", "get", "which operation to run on the node (getSrvKeyspaceNames, getSrvKeyspace, getEndPoints, qps)")
server = flag.String("server", "localhost:3801", "topo server to dial")
timeout = flag.Duration("timeout", 5*time.Second, "connection timeout")
// flag can't overlap with servenv's cpu_profile
cpuProfile = flag.String("zkclient_cpu_profile", "", "write cpu profile to file")
)
func init() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
flag.PrintDefaults()
fmt.Fprintf(os.Stderr, usage)
}
}
func connect() *rpcplus.Client {
rpcClient, err := bsonrpc.DialHTTP("tcp", *server, *timeout)
if err != nil {
log.Fatalf("Can't connect to topo server: %v", err)
}
return rpcClient
}
func getSrvKeyspaceNames(ctx context.Context, rpcClient *rpcplus.Client, cell string, verbose bool) {
req := &topo.GetSrvKeyspaceNamesArgs{
Cell: cell,
}
reply := &topo.SrvKeyspaceNames{}
if err := rpcClient.Call(ctx, "TopoReader.GetSrvKeyspaceNames", req, reply); err != nil {
log.Fatalf("TopoReader.GetSrvKeyspaceNames error: %v", err)
}
if verbose {
for i, entry := range reply.Entries {
println(fmt.Sprintf("KeyspaceNames[%v] = %v", i, entry))
}
}
}
func getSrvKeyspace(ctx context.Context, rpcClient *rpcplus.Client, cell, keyspace string, verbose bool) {
req := &topo.GetSrvKeyspaceArgs{
Cell: cell,
Keyspace: keyspace,
}
reply := &topo.SrvKeyspace{}
if err := rpcClient.Call(ctx, "TopoReader.GetSrvKeyspace", req, reply); err != nil {
log.Fatalf("TopoReader.GetSrvKeyspace error: %v", err)
}
if verbose {
tabletTypes := make([]string, 0, len(reply.Partitions))
for t := range reply.Partitions {
tabletTypes = append(tabletTypes, string(t))
}
sort.Strings(tabletTypes)
for _, t := range tabletTypes {
println(fmt.Sprintf("Partitions[%v] =", t))
for i, s := range reply.Partitions[topo.TabletType(t)].ShardReferences {
println(fmt.Sprintf(" ShardReferences[%v]=%v", i, s.KeyRange.String()))
}
}
}
}
func getEndPoints(ctx context.Context, rpcClient *rpcplus.Client, cell, keyspace, shard, tabletType string, verbose bool) {
req := &topo.GetEndPointsArgs{
Cell: cell,
Keyspace: keyspace,
Shard: shard,
TabletType: topo.TabletType(tabletType),
}
reply := &pb.EndPoints{}
if err := rpcClient.Call(ctx, "TopoReader.GetEndPoints", req, reply); err != nil {
log.Fatalf("TopoReader.GetEndPoints error: %v", err)
}
if verbose {
for i, e := range reply.Entries {
println(fmt.Sprintf("Entries[%v] = %v %v", i, e.Uid, e.Host))
}
}
}
// qps is a function used by tests to run a vtgate load check.
// It will get the same srvKeyspaces as fast as possible and display the QPS.
func qps(ctx context.Context, cell string, keyspaces []string) {
var count sync2.AtomicInt32
for _, keyspace := range keyspaces {
for i := 0; i < 10; i++ {
go func() {
rpcClient := connect()
for true {
getSrvKeyspace(ctx, rpcClient, cell, keyspace, false)
count.Add(1)
}
}()
}
}
ticker := time.NewTicker(time.Second)
i := 0
for _ = range ticker.C {
c := count.Get()
count.Set(0)
println(fmt.Sprintf("QPS = %v", c))
i++
if i == 10 {
break
}
}
}
func main() {
defer exit.Recover()
defer logutil.Flush()
flag.Parse()
args := flag.Args()
if len(args) == 0 {
flag.Usage()
exit.Return(1)
}
if *cpuProfile != "" {
f, err := os.Create(*cpuProfile)
if err != nil {
log.Error(err)
exit.Return(1)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
ctx := context.Background()
if *mode == "getSrvKeyspaceNames" {
rpcClient := connect()
if len(args) == 1 {
getSrvKeyspaceNames(ctx, rpcClient, args[0], true)
} else {
log.Errorf("getSrvKeyspaceNames only takes one argument")
exit.Return(1)
}
} else if *mode == "getSrvKeyspace" {
rpcClient := connect()
if len(args) == 2 {
getSrvKeyspace(ctx, rpcClient, args[0], args[1], true)
} else {
log.Errorf("getSrvKeyspace only takes two arguments")
exit.Return(1)
}
} else if *mode == "getEndPoints" {
rpcClient := connect()
if len(args) == 4 {
getEndPoints(ctx, rpcClient, args[0], args[1], args[2], args[3], true)
} else {
log.Errorf("getEndPoints only takes four arguments")
exit.Return(1)
}
} else if *mode == "qps" {
qps(ctx, args[0], args[1:])
} else {
flag.Usage()
log.Errorf("Invalid mode: %v", *mode)
exit.Return(1)
}
}

Просмотреть файл

@ -1,64 +0,0 @@
// Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package db defines an alternate (and simplified)
// db api compared to go's database/sql.
package db
import (
"fmt"
)
// Driver is the interface that must be implemented by a database driver.
type Driver interface {
// Open returns a new connection to the database.
// The name is a string in a driver-specific format.
// The returned connection should only be used by one
// goroutine at a time.
Open(name string) (Conn, error)
}
var drivers = make(map[string]Driver)
// Register makes a database driver available by the provided name.
func Register(name string, driver Driver) {
drivers[name] = driver
}
// Open opens a database specified by its database driver name
// and a driver-specific data source name, usually consisting
// of at least a database name and connection information.
func Open(name, path string) (Conn, error) {
d := drivers[name]
if d == nil {
return nil, fmt.Errorf("Driver %s not found", name)
}
return d.Open(path)
}
// Conn is a connection to a database. It should not be used
// concurrently by multiple goroutines.
type Conn interface {
Exec(query string, args map[string]interface{}) (Result, error)
Begin() (Tx, error)
Close() error
}
// Tx is a transaction.
type Tx interface {
Commit() error
Rollback() error
}
// Result is an iterator over an executed query's results.
// It is also used to query for the results of a DML, in which
// case the iterator functions are not applicable.
type Result interface {
LastInsertId() (int64, error)
RowsAffected() (int64, error)
Columns() []string
Next() []interface{}
Err() error
Close() error
}

Просмотреть файл

@ -1,46 +0,0 @@
// Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package jscfg implements a simple API for reading and writing JSON files.
package jscfg
import (
"encoding/json"
"fmt"
"io/ioutil"
"github.com/youtube/vitess/go/ioutil2"
)
// ToJSON converts a structure to JSON, or panics
func ToJSON(val interface{}) string {
data, err := json.MarshalIndent(val, "", " ")
// This is not strictly the spirit of panic. This is meant to be used
// where it would be a programming error to have json encoding fail.
if err != nil {
panic(err)
}
return string(data)
}
// WriteJSON atomically write a marshaled structure to disk.
func WriteJSON(filename string, val interface{}) error {
data, err := json.MarshalIndent(val, " ", " ")
if err != nil {
return fmt.Errorf("WriteJSON failed: %v %v", filename, err)
}
return ioutil2.WriteFileAtomic(filename, data, 0660)
}
// ReadJSON reads and unmarshals a JSON file
func ReadJSON(filename string, val interface{}) error {
data, err := ioutil.ReadFile(filename)
if err != nil {
return fmt.Errorf("ReadJSON failed: %T %v", val, err)
}
if err = json.Unmarshal(data, val); err != nil {
return fmt.Errorf("ReadJSON failed: %T %v %v", val, filename, err)
}
return nil
}

Просмотреть файл

@ -50,6 +50,9 @@ const (
// ErrOptionPreventsStatement is C.ER_OPTION_PREVENTS_STATEMENT
ErrOptionPreventsStatement = C.ER_OPTION_PREVENTS_STATEMENT
// ErrDataTooLong is C.ER_DATA_TOO_LONG
ErrDataTooLong = C.ER_DATA_TOO_LONG
// ErrServerLost is C.CR_SERVER_LOST.
// It's hard-coded for now because it causes problems on import.
ErrServerLost = 2013

Просмотреть файл

@ -104,6 +104,18 @@ func (v Value) ParseUint64() (val uint64, err error) {
return strconv.ParseUint(string(n.raw()), 10, 64)
}
// ParseFloat64 will parse a Fractional value into an float64
func (v Value) ParseFloat64() (val float64, err error) {
if v.Inner == nil {
return 0, fmt.Errorf("value is null")
}
n, ok := v.Inner.(Fractional)
if !ok {
return 0, fmt.Errorf("value is not Fractional")
}
return strconv.ParseFloat(string(n.raw()), 64)
}
// EncodeSql encodes the value into an SQL statement. Can be binary.
func (v Value) EncodeSql(b BinWriter) {
if v.Inner == nil {

Просмотреть файл

@ -50,6 +50,19 @@ func (t *HorizontalReshardingTask) Run(parameters map[string]string) ([]*pb.Task
}
newTasks = append(newTasks, splitCloneTasks)
// TODO(mberlin): When the framework supports nesting tasks, these wait tasks should be run before each SplitDiff.
waitTasks := NewTaskContainer()
for _, destShard := range destShards {
AddTask(waitTasks, "WaitForFilteredReplicationTask", map[string]string{
"keyspace": keyspace,
"shard": destShard,
"max_delay": "30s",
"vtctld_endpoint": vtctldEndpoint,
})
}
newTasks = append(newTasks, waitTasks)
// TODO(mberlin): Run all SplitDiffTasks in parallel which do not use overlapping source shards for the comparison.
for _, destShard := range destShards {
splitDiffTask := NewTaskContainer()
AddTask(splitDiffTask, "SplitDiffTask", map[string]string{

Просмотреть файл

@ -207,6 +207,8 @@ func defaultTaskCreator(taskName string) Task {
return &HorizontalReshardingTask{}
case "CopySchemaShardTask":
return &CopySchemaShardTask{}
case "WaitForFilteredReplicationTask":
return &WaitForFilteredReplicationTask{}
case "SplitCloneTask":
return &SplitCloneTask{}
case "SplitDiffTask":

Просмотреть файл

@ -8,6 +8,8 @@ import (
"bytes"
"time"
log "github.com/golang/glog"
"github.com/youtube/vitess/go/vt/logutil"
"github.com/youtube/vitess/go/vt/vtctl/vtctlclient"
"golang.org/x/net/context"
@ -17,6 +19,7 @@ import (
func ExecuteVtctl(ctx context.Context, server string, args []string) (string, error) {
var output bytes.Buffer
log.Infof("Executing remote vtctl command: %v server: %v", args, server)
err := vtctlclient.RunCommandAndWait(
ctx, server, args,
// TODO(mberlin): Should these values be configurable as flags?

Просмотреть файл

@ -0,0 +1,35 @@
// Copyright 2015, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package automation
import (
"fmt"
pb "github.com/youtube/vitess/go/vt/proto/automation"
"golang.org/x/net/context"
)
// WaitForFilteredReplicationTask runs vtctl WaitForFilteredReplication to block until the destination master
// (i.e. the receiving side of the filtered replication) has caught up up to max_delay with the source shard.
type WaitForFilteredReplicationTask struct {
}
// Run is part of the Task interface.
func (t *WaitForFilteredReplicationTask) Run(parameters map[string]string) ([]*pb.TaskContainer, string, error) {
keyspaceAndShard := fmt.Sprintf("%v/%v", parameters["keyspace"], parameters["shard"])
output, err := ExecuteVtctl(context.TODO(), parameters["vtctld_endpoint"],
[]string{"WaitForFilteredReplication", "-max_delay", parameters["max_delay"], keyspaceAndShard})
return nil, output, err
}
// RequiredParameters is part of the Task interface.
func (t *WaitForFilteredReplicationTask) RequiredParameters() []string {
return []string{"keyspace", "shard", "max_delay", "vtctld_endpoint"}
}
// OptionalParameters is part of the Task interface.
func (t *WaitForFilteredReplicationTask) OptionalParameters() []string {
return nil
}

Просмотреть файл

@ -13,7 +13,7 @@ import (
"time"
mproto "github.com/youtube/vitess/go/mysql/proto"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
"github.com/youtube/vitess/go/vt/vtgate/vtgateconn"
"golang.org/x/net/context"
@ -41,7 +41,7 @@ func (d drv) Open(name string) (driver.Conn, error) {
if err != nil {
return nil, err
}
c.tabletType, err = topo.ParseTabletType(c.TabletType)
c.tabletType, err = topoproto.ParseTabletType(c.TabletType)
if err != nil {
return nil, err
}

Просмотреть файл

@ -10,12 +10,13 @@ package dbconfigs
// link with this library, so we should be safe.
import (
"encoding/json"
"errors"
"flag"
"io/ioutil"
"sync"
log "github.com/golang/glog"
"github.com/youtube/vitess/go/jscfg"
)
var (
@ -74,10 +75,17 @@ func (fcs *FileCredentialsServer) GetUserAndPassword(user string) (string, strin
// read the json file only once
if fcs.dbCredentials == nil {
fcs.dbCredentials = make(map[string][]string)
if err := jscfg.ReadJSON(*dbCredentialsFile, &fcs.dbCredentials); err != nil {
data, err := ioutil.ReadFile(*dbCredentialsFile)
if err != nil {
log.Warningf("Failed to read dbCredentials file: %v", *dbCredentialsFile)
return "", "", err
}
if err = json.Unmarshal(data, &fcs.dbCredentials); err != nil {
log.Warningf("Failed to parse dbCredentials file: %v", *dbCredentialsFile)
return "", "", err
}
}
passwd, ok := fcs.dbCredentials[user]

Просмотреть файл

@ -12,7 +12,7 @@ import (
"github.com/youtube/vitess/go/flagutil"
pb "github.com/youtube/vitess/go/vt/proto/topodata"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
)
const (
@ -70,7 +70,7 @@ func shardFilePath(keyspace, shard string) string {
}
func tabletDirPath(tabletAlias *pb.TabletAlias) string {
return path.Join(tabletsDirPath, topo.TabletAliasString(tabletAlias))
return path.Join(tabletsDirPath, topoproto.TabletAliasString(tabletAlias))
}
func tabletFilePath(tabletAlias *pb.TabletAlias) string {

Просмотреть файл

@ -9,10 +9,8 @@ import (
"fmt"
"sync"
"github.com/youtube/vitess/go/event"
"github.com/youtube/vitess/go/vt/concurrency"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/events"
"golang.org/x/net/context"
pb "github.com/youtube/vitess/go/vt/proto/topodata"
@ -26,36 +24,23 @@ func (s *Server) CreateKeyspace(ctx context.Context, keyspace string, value *pb.
}
global := s.getGlobal()
resp, err := global.Create(keyspaceFilePath(keyspace), string(data), 0 /* ttl */)
if err != nil {
if _, err = global.Create(keyspaceFilePath(keyspace), string(data), 0 /* ttl */); err != nil {
return convertError(err)
}
if err := initLockFile(global, keyspaceDirPath(keyspace)); err != nil {
if err = initLockFile(global, keyspaceDirPath(keyspace)); err != nil {
return err
}
// We don't return ErrBadResponse in this case because the Create() suceeeded
// and we don't really need the version to satisfy our contract - we're only
// logging it.
version := int64(-1)
if resp.Node != nil {
version = int64(resp.Node.ModifiedIndex)
}
event.Dispatch(&events.KeyspaceChange{
KeyspaceInfo: *topo.NewKeyspaceInfo(keyspace, value, version),
Status: "created",
})
return nil
}
// UpdateKeyspace implements topo.Server.
func (s *Server) UpdateKeyspace(ctx context.Context, ki *topo.KeyspaceInfo, existingVersion int64) (int64, error) {
data, err := json.MarshalIndent(ki.Keyspace, "", " ")
func (s *Server) UpdateKeyspace(ctx context.Context, keyspace string, value *pb.Keyspace, existingVersion int64) (int64, error) {
data, err := json.MarshalIndent(value, "", " ")
if err != nil {
return -1, err
}
resp, err := s.getGlobal().CompareAndSwap(keyspaceFilePath(ki.KeyspaceName()),
resp, err := s.getGlobal().CompareAndSwap(keyspaceFilePath(keyspace),
string(data), 0 /* ttl */, "" /* prevValue */, uint64(existingVersion))
if err != nil {
return -1, convertError(err)
@ -63,30 +48,25 @@ func (s *Server) UpdateKeyspace(ctx context.Context, ki *topo.KeyspaceInfo, exis
if resp.Node == nil {
return -1, ErrBadResponse
}
event.Dispatch(&events.KeyspaceChange{
KeyspaceInfo: *ki,
Status: "updated",
})
return int64(resp.Node.ModifiedIndex), nil
}
// GetKeyspace implements topo.Server.
func (s *Server) GetKeyspace(ctx context.Context, keyspace string) (*topo.KeyspaceInfo, error) {
func (s *Server) GetKeyspace(ctx context.Context, keyspace string) (*pb.Keyspace, int64, error) {
resp, err := s.getGlobal().Get(keyspaceFilePath(keyspace), false /* sort */, false /* recursive */)
if err != nil {
return nil, convertError(err)
return nil, 0, convertError(err)
}
if resp.Node == nil {
return nil, ErrBadResponse
return nil, 0, ErrBadResponse
}
value := &pb.Keyspace{}
if err := json.Unmarshal([]byte(resp.Node.Value), value); err != nil {
return nil, fmt.Errorf("bad keyspace data (%v): %q", err, resp.Node.Value)
return nil, 0, fmt.Errorf("bad keyspace data (%v): %q", err, resp.Node.Value)
}
return topo.NewKeyspaceInfo(keyspace, value, int64(resp.Node.ModifiedIndex)), nil
return value, int64(resp.Node.ModifiedIndex), nil
}
// GetKeyspaces implements topo.Server.
@ -125,11 +105,6 @@ func (s *Server) DeleteKeyspaceShards(ctx context.Context, keyspace string) erro
if err = rec.Error(); err != nil {
return err
}
event.Dispatch(&events.KeyspaceChange{
KeyspaceInfo: *topo.NewKeyspaceInfo(keyspace, nil, -1),
Status: "deleted all shards",
})
return nil
}
@ -139,10 +114,5 @@ func (s *Server) DeleteKeyspace(ctx context.Context, keyspace string) error {
if err != nil {
return convertError(err)
}
event.Dispatch(&events.KeyspaceChange{
KeyspaceInfo: *topo.NewKeyspaceInfo(keyspace, nil, -1),
Status: "deleted",
})
return nil
}

Просмотреть файл

@ -65,11 +65,11 @@ func TestServingGraph(t *testing.T) {
test.CheckServingGraph(ctx, t, ts)
}
func TestWatchEndPoints(t *testing.T) {
func TestWatchSrvKeyspace(t *testing.T) {
ctx := context.Background()
ts := newTestServer(t, []string{"test"})
defer ts.Close()
test.CheckWatchEndPoints(ctx, t, ts)
test.CheckWatchSrvKeyspace(ctx, t, ts)
}
func TestKeyspaceLock(t *testing.T) {

Просмотреть файл

@ -13,6 +13,7 @@ import (
"github.com/coreos/go-etcd/etcd"
log "github.com/golang/glog"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
"golang.org/x/net/context"
pb "github.com/youtube/vitess/go/vt/proto/topodata"
@ -41,7 +42,7 @@ func (s *Server) GetSrvTabletTypesPerShard(ctx context.Context, cellName, keyspa
tabletTypes := make([]pb.TabletType, 0, len(resp.Node.Nodes))
for _, n := range resp.Node.Nodes {
strType := path.Base(n.Key)
if tt, err := topo.ParseTabletType(strType); err == nil {
if tt, err := topoproto.ParseTabletType(strType); err == nil {
tabletTypes = append(tabletTypes, tt)
}
}
@ -271,15 +272,15 @@ func (s *Server) GetSrvKeyspaceNames(ctx context.Context, cellName string) ([]st
return getNodeNames(resp)
}
// WatchEndPoints is part of the topo.Server interface
func (s *Server) WatchEndPoints(ctx context.Context, cellName, keyspace, shard string, tabletType pb.TabletType) (<-chan *pb.EndPoints, chan<- struct{}, error) {
// WatchSrvKeyspace is part of the topo.Server interface
func (s *Server) WatchSrvKeyspace(ctx context.Context, cellName, keyspace string) (<-chan *topo.SrvKeyspace, chan<- struct{}, error) {
cell, err := s.getCell(cellName)
if err != nil {
return nil, nil, fmt.Errorf("WatchEndPoints cannot get cell: %v", err)
return nil, nil, fmt.Errorf("WatchSrvKeyspace cannot get cell: %v", err)
}
filePath := endPointsFilePath(keyspace, shard, tabletType)
filePath := srvKeyspaceFilePath(keyspace)
notifications := make(chan *pb.EndPoints, 10)
notifications := make(chan *topo.SrvKeyspace, 10)
stopWatching := make(chan struct{})
// The watch go routine will stop if the 'stop' channel is closed.
@ -288,12 +289,22 @@ func (s *Server) WatchEndPoints(ctx context.Context, cellName, keyspace, shard s
watch := make(chan *etcd.Response)
stop := make(chan bool)
go func() {
// get the current version of the file
ep, modifiedVersion, err := s.GetEndPoints(ctx, cellName, keyspace, shard, tabletType)
if err != nil {
var srvKeyspace *topo.SrvKeyspace
var modifiedVersion int64
resp, err := cell.Get(filePath, false /* sort */, false /* recursive */)
if err != nil || resp.Node == nil {
// node doesn't exist
modifiedVersion = 0
ep = nil
} else {
if resp.Node.Value != "" {
sk := &pb.SrvKeyspace{}
if err := json.Unmarshal([]byte(resp.Node.Value), sk); err != nil {
log.Warningf("bad SrvKeyspace data (%v): %q", err, resp.Node.Value)
} else {
srvKeyspace = topo.ProtoToSrvKeyspace(sk)
modifiedVersion = int64(resp.Node.ModifiedIndex)
}
}
}
// re-check for stop here to be safe, in case the
@ -301,7 +312,7 @@ func (s *Server) WatchEndPoints(ctx context.Context, cellName, keyspace, shard s
select {
case <-stop:
return
case notifications <- ep:
case notifications <- srvKeyspace:
}
for {
@ -325,15 +336,16 @@ func (s *Server) WatchEndPoints(ctx context.Context, cellName, keyspace, shard s
for {
select {
case resp := <-watch:
var ep *pb.EndPoints
var srvKeyspace *topo.SrvKeyspace
if resp.Node != nil && resp.Node.Value != "" {
ep = &pb.EndPoints{}
if err := json.Unmarshal([]byte(resp.Node.Value), ep); err != nil {
sk := &pb.SrvKeyspace{}
if err := json.Unmarshal([]byte(resp.Node.Value), sk); err != nil {
log.Errorf("failed to Unmarshal EndPoints for %v: %v", filePath, err)
continue
}
srvKeyspace = topo.ProtoToSrvKeyspace(sk)
}
notifications <- ep
notifications <- srvKeyspace
case <-stopWatching:
close(stop)
close(notifications)

Просмотреть файл

@ -8,9 +8,6 @@ import (
"encoding/json"
"fmt"
"github.com/youtube/vitess/go/event"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/events"
"golang.org/x/net/context"
pb "github.com/youtube/vitess/go/vt/proto/topodata"
@ -24,36 +21,23 @@ func (s *Server) CreateShard(ctx context.Context, keyspace, shard string, value
}
global := s.getGlobal()
resp, err := global.Create(shardFilePath(keyspace, shard), string(data), 0 /* ttl */)
if err != nil {
if _, err = global.Create(shardFilePath(keyspace, shard), string(data), 0 /* ttl */); err != nil {
return convertError(err)
}
if err := initLockFile(global, shardDirPath(keyspace, shard)); err != nil {
if err = initLockFile(global, shardDirPath(keyspace, shard)); err != nil {
return err
}
// We don't return ErrBadResponse in this case because the Create() suceeeded
// and we don't really need the version to satisfy our contract - we're only
// logging it.
version := int64(-1)
if resp.Node != nil {
version = int64(resp.Node.ModifiedIndex)
}
event.Dispatch(&events.ShardChange{
ShardInfo: *topo.NewShardInfo(keyspace, shard, value, version),
Status: "created",
})
return nil
}
// UpdateShard implements topo.Server.
func (s *Server) UpdateShard(ctx context.Context, si *topo.ShardInfo, existingVersion int64) (int64, error) {
data, err := json.MarshalIndent(si.Shard, "", " ")
func (s *Server) UpdateShard(ctx context.Context, keyspace, shard string, value *pb.Shard, existingVersion int64) (int64, error) {
data, err := json.MarshalIndent(value, "", " ")
if err != nil {
return -1, err
}
resp, err := s.getGlobal().CompareAndSwap(shardFilePath(si.Keyspace(), si.ShardName()),
resp, err := s.getGlobal().CompareAndSwap(shardFilePath(keyspace, shard),
string(data), 0 /* ttl */, "" /* prevValue */, uint64(existingVersion))
if err != nil {
return -1, convertError(err)
@ -61,36 +45,31 @@ func (s *Server) UpdateShard(ctx context.Context, si *topo.ShardInfo, existingVe
if resp.Node == nil {
return -1, ErrBadResponse
}
event.Dispatch(&events.ShardChange{
ShardInfo: *si,
Status: "updated",
})
return int64(resp.Node.ModifiedIndex), nil
}
// ValidateShard implements topo.Server.
func (s *Server) ValidateShard(ctx context.Context, keyspace, shard string) error {
_, err := s.GetShard(ctx, keyspace, shard)
_, _, err := s.GetShard(ctx, keyspace, shard)
return err
}
// GetShard implements topo.Server.
func (s *Server) GetShard(ctx context.Context, keyspace, shard string) (*topo.ShardInfo, error) {
func (s *Server) GetShard(ctx context.Context, keyspace, shard string) (*pb.Shard, int64, error) {
resp, err := s.getGlobal().Get(shardFilePath(keyspace, shard), false /* sort */, false /* recursive */)
if err != nil {
return nil, convertError(err)
return nil, 0, convertError(err)
}
if resp.Node == nil {
return nil, ErrBadResponse
return nil, 0, ErrBadResponse
}
value := &pb.Shard{}
if err := json.Unmarshal([]byte(resp.Node.Value), value); err != nil {
return nil, fmt.Errorf("bad shard data (%v): %q", err, resp.Node.Value)
return nil, 0, fmt.Errorf("bad shard data (%v): %q", err, resp.Node.Value)
}
return topo.NewShardInfo(keyspace, shard, value, int64(resp.Node.ModifiedIndex)), nil
return value, int64(resp.Node.ModifiedIndex), nil
}
// GetShardNames implements topo.Server.
@ -108,10 +87,5 @@ func (s *Server) DeleteShard(ctx context.Context, keyspace, shard string) error
if err != nil {
return convertError(err)
}
event.Dispatch(&events.ShardChange{
ShardInfo: *topo.NewShardInfo(keyspace, shard, nil, -1),
Status: "deleted",
})
return nil
}

Просмотреть файл

@ -8,9 +8,8 @@ import (
"encoding/json"
"fmt"
"github.com/youtube/vitess/go/event"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/events"
"github.com/youtube/vitess/go/vt/topo/topoproto"
"golang.org/x/net/context"
pb "github.com/youtube/vitess/go/vt/proto/topodata"
@ -31,26 +30,21 @@ func (s *Server) CreateTablet(ctx context.Context, tablet *pb.Tablet) error {
if err != nil {
return convertError(err)
}
event.Dispatch(&events.TabletChange{
Tablet: *tablet,
Status: "created",
})
return nil
}
// UpdateTablet implements topo.Server.
func (s *Server) UpdateTablet(ctx context.Context, ti *topo.TabletInfo, existingVersion int64) (int64, error) {
cell, err := s.getCell(ti.Alias.Cell)
func (s *Server) UpdateTablet(ctx context.Context, tablet *pb.Tablet, existingVersion int64) (int64, error) {
cell, err := s.getCell(tablet.Alias.Cell)
if err != nil {
return -1, err
}
data, err := json.MarshalIndent(ti.Tablet, "", " ")
data, err := json.MarshalIndent(tablet, "", " ")
if err != nil {
return -1, err
}
resp, err := cell.CompareAndSwap(tabletFilePath(ti.Alias),
resp, err := cell.CompareAndSwap(tabletFilePath(tablet.Alias),
string(data), 0 /* ttl */, "" /* prevValue */, uint64(existingVersion))
if err != nil {
return -1, convertError(err)
@ -58,39 +52,30 @@ func (s *Server) UpdateTablet(ctx context.Context, ti *topo.TabletInfo, existing
if resp.Node == nil {
return -1, ErrBadResponse
}
event.Dispatch(&events.TabletChange{
Tablet: *ti.Tablet,
Status: "updated",
})
return int64(resp.Node.ModifiedIndex), nil
}
// UpdateTabletFields implements topo.Server.
func (s *Server) UpdateTabletFields(ctx context.Context, tabletAlias *pb.TabletAlias, updateFunc func(*pb.Tablet) error) error {
var ti *topo.TabletInfo
func (s *Server) UpdateTabletFields(ctx context.Context, tabletAlias *pb.TabletAlias, updateFunc func(*pb.Tablet) error) (*pb.Tablet, error) {
var tablet *pb.Tablet
var err error
for {
if ti, err = s.GetTablet(ctx, tabletAlias); err != nil {
return err
var version int64
if tablet, version, err = s.GetTablet(ctx, tabletAlias); err != nil {
return nil, err
}
if err = updateFunc(ti.Tablet); err != nil {
return err
if err = updateFunc(tablet); err != nil {
return nil, err
}
if _, err = s.UpdateTablet(ctx, ti, ti.Version()); err != topo.ErrBadVersion {
if _, err = s.UpdateTablet(ctx, tablet, version); err != topo.ErrBadVersion {
break
}
}
if err != nil {
return err
return nil, err
}
event.Dispatch(&events.TabletChange{
Tablet: *ti.Tablet,
Status: "updated",
})
return nil
return tablet, nil
}
// DeleteTablet implements topo.Server.
@ -100,56 +85,39 @@ func (s *Server) DeleteTablet(ctx context.Context, tabletAlias *pb.TabletAlias)
return err
}
// Get the keyspace and shard names for the TabletChange event.
ti, tiErr := s.GetTablet(ctx, tabletAlias)
_, err = cell.Delete(tabletDirPath(tabletAlias), true /* recursive */)
if err != nil {
if _, err = cell.Delete(tabletDirPath(tabletAlias), true /* recursive */); err != nil {
return convertError(err)
}
// Only try to log if we have the required info.
if tiErr == nil {
// Only copy the identity info for the tablet. The rest has been deleted.
event.Dispatch(&events.TabletChange{
Tablet: pb.Tablet{
Alias: ti.Tablet.Alias,
Keyspace: ti.Tablet.Keyspace,
Shard: ti.Tablet.Shard,
},
Status: "deleted",
})
}
return nil
}
// ValidateTablet implements topo.Server.
func (s *Server) ValidateTablet(ctx context.Context, tabletAlias *pb.TabletAlias) error {
_, err := s.GetTablet(ctx, tabletAlias)
_, _, err := s.GetTablet(ctx, tabletAlias)
return err
}
// GetTablet implements topo.Server.
func (s *Server) GetTablet(ctx context.Context, tabletAlias *pb.TabletAlias) (*topo.TabletInfo, error) {
func (s *Server) GetTablet(ctx context.Context, tabletAlias *pb.TabletAlias) (*pb.Tablet, int64, error) {
cell, err := s.getCell(tabletAlias.Cell)
if err != nil {
return nil, err
return nil, 0, err
}
resp, err := cell.Get(tabletFilePath(tabletAlias), false /* sort */, false /* recursive */)
if err != nil {
return nil, convertError(err)
return nil, 0, convertError(err)
}
if resp.Node == nil {
return nil, ErrBadResponse
return nil, 0, ErrBadResponse
}
value := &pb.Tablet{}
if err := json.Unmarshal([]byte(resp.Node.Value), value); err != nil {
return nil, fmt.Errorf("bad tablet data (%v): %q", err, resp.Node.Value)
return nil, 0, fmt.Errorf("bad tablet data (%v): %q", err, resp.Node.Value)
}
return topo.NewTabletInfo(value, int64(resp.Node.ModifiedIndex)), nil
return value, int64(resp.Node.ModifiedIndex), nil
}
// GetTabletsByCell implements topo.Server.
@ -171,7 +139,7 @@ func (s *Server) GetTabletsByCell(ctx context.Context, cellName string) ([]*pb.T
tablets := make([]*pb.TabletAlias, 0, len(nodes))
for _, node := range nodes {
tabletAlias, err := topo.ParseTabletAliasString(node)
tabletAlias, err := topoproto.ParseTabletAlias(node)
if err != nil {
return nil, err
}

Просмотреть файл

@ -14,7 +14,6 @@ import (
"syscall"
log "github.com/golang/glog"
"github.com/youtube/vitess/go/jscfg"
vtenv "github.com/youtube/vitess/go/vt/env"
)
@ -129,6 +128,30 @@ func (hook *Hook) ExecuteOptional() error {
return nil
}
// String returns a printable version of the HookResult
func (hr *HookResult) String() string {
return jscfg.ToJSON(hr)
result := "result: "
switch hr.ExitStatus {
case HOOK_SUCCESS:
result += "HOOK_SUCCESS"
case HOOK_DOES_NOT_EXIST:
result += "HOOK_DOES_NOT_EXIST"
case HOOK_STAT_FAILED:
result += "HOOK_STAT_FAILED"
case HOOK_CANNOT_GET_EXIT_STATUS:
result += "HOOK_CANNOT_GET_EXIT_STATUS"
case HOOK_INVALID_NAME:
result += "HOOK_INVALID_NAME"
case HOOK_VTROOT_ERROR:
result += "HOOK_VTROOT_ERROR"
default:
result += fmt.Sprintf("exit(%v)", hr.ExitStatus)
}
if hr.Stdout != "" {
result += "\nstdout:\n" + hr.Stdout
}
if hr.Stderr != "" {
result += "\nstderr:\n" + hr.Stderr
}
return result
}

Просмотреть файл

@ -6,6 +6,7 @@ import (
"testing"
"time"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/wrangler"
)
@ -49,7 +50,7 @@ func TestJanitorInfo(t *testing.T) {
}
func TestRunJanitor(t *testing.T) {
scheduler, _ := New("a", "a", nil, nil, 0)
scheduler, _ := New("a", "a", topo.Server{}, nil, 0)
jan := newTestJanitor()
ji := newJanitorInfo(jan)
scheduler.janitors["test"] = ji

Просмотреть файл

@ -11,7 +11,6 @@ import (
"regexp"
"strings"
"github.com/youtube/vitess/go/jscfg"
"github.com/youtube/vitess/go/vt/concurrency"
)
@ -59,10 +58,6 @@ type SchemaDefinition struct {
Version string
}
func (sd *SchemaDefinition) String() string {
return jscfg.ToJSON(sd)
}
// FilterTables returns a copy which includes only
// whitelisted tables (tables), no blacklisted tables (excludeTables) and optionally views (includeViews).
func (sd *SchemaDefinition) FilterTables(tables, excludeTables []string, includeViews bool) (*SchemaDefinition, error) {
@ -160,13 +155,13 @@ func (sd *SchemaDefinition) GetTable(table string) (td *TableDefinition, ok bool
// All SQL statements will have {{.DatabaseName}} in place of the actual db name.
func (sd *SchemaDefinition) ToSQLStrings() []string {
sqlStrings := make([]string, 0, len(sd.TableDefinitions)+1)
createViewSql := make([]string, 0, len(sd.TableDefinitions))
createViewSQL := make([]string, 0, len(sd.TableDefinitions))
sqlStrings = append(sqlStrings, sd.DatabaseSchema)
for _, td := range sd.TableDefinitions {
if td.Type == TableView {
createViewSql = append(createViewSql, td.Schema)
createViewSQL = append(createViewSQL, td.Schema)
} else {
lines := strings.Split(td.Schema, "\n")
for i, line := range lines {
@ -178,7 +173,7 @@ func (sd *SchemaDefinition) ToSQLStrings() []string {
}
}
return append(sqlStrings, createViewSql...)
return append(sqlStrings, createViewSQL...)
}
// DiffSchema generates a report on what's different between two SchemaDefinitions
@ -270,7 +265,3 @@ type SchemaChangeResult struct {
BeforeSchema *SchemaDefinition
AfterSchema *SchemaDefinition
}
func (scr *SchemaChangeResult) String() string {
return jscfg.ToJSON(scr)
}

Просмотреть файл

@ -203,7 +203,7 @@ func TestSchemaDiff(t *testing.T) {
testDiff(t, nil, nil, "sd1", "sd2", nil)
testDiff(t, sd1, nil, "sd1", "sd2", []string{
fmt.Sprintf("sd1 and sd2 are different, sd1: %v, sd2: null", sd1),
fmt.Sprintf("sd1 and sd2 are different, sd1: %v, sd2: <nil>", sd1),
})
testDiff(t, sd1, sd3, "sd1", "sd3", []string{

Просмотреть файл

@ -21,11 +21,11 @@ import (
)
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
var _ = proto.Marshal
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ context.Context
var _ grpc.ClientConn
// Client API for Automation service

Просмотреть файл

@ -21,11 +21,11 @@ import (
)
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
var _ = proto.Marshal
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ context.Context
var _ grpc.ClientConn
// Client API for UpdateStream service

Просмотреть файл

@ -25,10 +25,6 @@ import (
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -75,6 +71,10 @@ func (m *RunMysqlUpgradeResponse) Reset() { *m = RunMysqlUpgradeResponse
func (m *RunMysqlUpgradeResponse) String() string { return proto.CompactTextString(m) }
func (*RunMysqlUpgradeResponse) ProtoMessage() {}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// Client API for MysqlCtl service
type MysqlCtlClient interface {

Просмотреть файл

@ -264,7 +264,7 @@ func (m *Target) String() string { return proto.CompactTextString(m) }
func (*Target) ProtoMessage() {}
// VTGateCallerID is sent by VTGate to VTTablet to describe the
// caller. If possible, this enformation is secure. For instance,
// caller. If possible, this information is secure. For instance,
// if using unique certificates that guarantee that VTGate->VTTablet
// traffic cannot be spoofed, then VTTablet can trust this information,
// and VTTablet will use it for tablet ACLs, for instance.
@ -799,12 +799,25 @@ type RealtimeStats struct {
// we do not send queries to servers that are not healthy.
HealthError string `protobuf:"bytes,1,opt,name=health_error" json:"health_error,omitempty"`
// seconds_behind_master is populated for slaves only. It indicates
// how far nehind on replication a slave currently is. It is used
// how far behind on (MySQL) replication a slave currently is. It is used
// by clients for subset selection (so we don't try to send traffic
// to tablets that are too far behind).
// NOTE: This field must not be evaluated if "health_error" is not empty.
// TODO(mberlin): Let's switch it to int64 instead?
SecondsBehindMaster uint32 `protobuf:"varint,2,opt,name=seconds_behind_master" json:"seconds_behind_master,omitempty"`
// bin_log_players_count is the number of currently running binlog players.
// if the value is 0, it means that filtered replication is currently not
// running on the tablet. If >0, filtered replication is running.
// NOTE: This field must not be evaluated if "health_error" is not empty.
BinlogPlayersCount int32 `protobuf:"varint,3,opt,name=binlog_players_count" json:"binlog_players_count,omitempty"`
// seconds_behind_master_filtered_replication is populated for the receiving
// master of an ongoing filtered replication only.
// It specifies how far the receiving master lags behind the sending master.
// NOTE: This field must not be evaluated if "health_error" is not empty.
// NOTE: This field must not be evaluated if "bin_log_players_count" is 0.
SecondsBehindMasterFilteredReplication int64 `protobuf:"varint,4,opt,name=seconds_behind_master_filtered_replication" json:"seconds_behind_master_filtered_replication,omitempty"`
// cpu_usage is used for load-based balancing
CpuUsage float64 `protobuf:"fixed64,3,opt,name=cpu_usage" json:"cpu_usage,omitempty"`
CpuUsage float64 `protobuf:"fixed64,5,opt,name=cpu_usage" json:"cpu_usage,omitempty"`
}
func (m *RealtimeStats) Reset() { *m = RealtimeStats{} }

Просмотреть файл

@ -21,11 +21,11 @@ import (
)
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
var _ = proto.Marshal
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ context.Context
var _ grpc.ClientConn
// Client API for Query service

Просмотреть файл

@ -21,11 +21,11 @@ import (
)
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
var _ = proto.Marshal
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ context.Context
var _ grpc.ClientConn
// Client API for TabletManager service

Просмотреть файл

@ -21,11 +21,11 @@ import (
)
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
var _ = proto.Marshal
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ context.Context
var _ grpc.ClientConn
// Client API for Vtctl service

Просмотреть файл

@ -21,11 +21,11 @@ import (
)
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
var _ = proto.Marshal
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ context.Context
var _ grpc.ClientConn
// Client API for Vitess service

Просмотреть файл

@ -21,11 +21,11 @@ import (
)
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
var _ = proto.Marshal
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ context.Context
var _ grpc.ClientConn
// Client API for Vtworker service

Просмотреть файл

@ -269,10 +269,13 @@ func (client *fakeTabletManagerClient) ExecuteFetchAsDba(ctx context.Context, ta
type fakeTopo struct {
faketopo.FakeTopo
WithEmptyMasterAlias bool
}
func newFakeTopo() *fakeTopo {
return &fakeTopo{}
func newFakeTopo() topo.Server {
return topo.Server{
Impl: &fakeTopo{},
}
}
func (topoServer *fakeTopo) GetShardNames(ctx context.Context, keyspace string) ([]string, error) {
@ -283,23 +286,25 @@ func (topoServer *fakeTopo) GetShardNames(ctx context.Context, keyspace string)
return []string{"0", "1", "2"}, nil
}
func (topoServer *fakeTopo) GetShard(ctx context.Context, keyspace string, shard string) (*topo.ShardInfo, error) {
value := &pb.Shard{
MasterAlias: &pb.TabletAlias{
func (topoServer *fakeTopo) GetShard(ctx context.Context, keyspace string, shard string) (*pb.Shard, int64, error) {
var masterAlias *pb.TabletAlias
if !topoServer.WithEmptyMasterAlias {
masterAlias = &pb.TabletAlias{
Cell: "test_cell",
Uid: 0,
},
}
}
return topo.NewShardInfo(keyspace, shard, value, 0), nil
value := &pb.Shard{
MasterAlias: masterAlias,
}
return value, 0, nil
}
func (topoServer *fakeTopo) GetTablet(ctx context.Context, tabletAlias *pb.TabletAlias) (*topo.TabletInfo, error) {
return &topo.TabletInfo{
Tablet: &pb.Tablet{
Alias: tabletAlias,
Keyspace: "test_keyspace",
},
}, nil
func (topoServer *fakeTopo) GetTablet(ctx context.Context, tabletAlias *pb.TabletAlias) (*pb.Tablet, int64, error) {
return &pb.Tablet{
Alias: tabletAlias,
Keyspace: "test_keyspace",
}, 0, nil
}
type fakeController struct {

Просмотреть файл

@ -54,6 +54,10 @@ func (exec *TabletExecutor) Open(ctx context.Context, keyspace string) error {
if err != nil {
return fmt.Errorf("unable to get shard info, keyspace: %s, shard: %s, error: %v", keyspace, shardName, err)
}
if !shardInfo.HasMaster() {
log.Errorf("shard: %s does not have a master", shardName)
return fmt.Errorf("shard: %s does not have a master", shardName)
}
tabletInfo, err := exec.topoServer.GetTablet(ctx, shardInfo.MasterAlias)
if err != nil {
return fmt.Errorf("unable to get master tablet info, keyspace: %s, shard: %s, error: %v", keyspace, shardName, err)

Просмотреть файл

@ -26,6 +26,20 @@ func TestTabletExecutorOpen(t *testing.T) {
}
}
func TestTabletExecutorOpenWithEmptyMasterAlias(t *testing.T) {
ft := newFakeTopo()
ft.Impl.(*fakeTopo).WithEmptyMasterAlias = true
executor := NewTabletExecutor(
newFakeTabletManagerClient(),
ft)
ctx := context.Background()
if err := executor.Open(ctx, "test_keyspace"); err == nil {
t.Fatalf("executor.Open() = nil, want error")
}
executor.Close()
}
func TestTabletExecutorValidate(t *testing.T) {
fakeTmc := newFakeTabletManagerClient()

Просмотреть файл

@ -48,6 +48,13 @@ func String(node SQLNode) string {
return buf.String()
}
// GenerateParsedQuery returns a ParsedQuery of the ast.
func GenerateParsedQuery(node SQLNode) *ParsedQuery {
buf := NewTrackedBuffer(nil)
buf.Myprintf("%v", node)
return buf.ParsedQuery()
}
// Statement represents a statement.
type Statement interface {
IStatement()
@ -129,6 +136,7 @@ func (node *Union) Format(buf *TrackedBuffer) {
// Insert represents an INSERT statement.
type Insert struct {
Comments Comments
Ignore string
Table *TableName
Columns Columns
Rows InsertRows
@ -136,8 +144,8 @@ type Insert struct {
}
func (node *Insert) Format(buf *TrackedBuffer) {
buf.Myprintf("insert %vinto %v%v %v%v",
node.Comments,
buf.Myprintf("insert %v%sinto %v%v %v%v",
node.Comments, node.Ignore,
node.Table, node.Columns, node.Rows, node.OnDup)
}
@ -397,13 +405,13 @@ type IndexHints struct {
}
const (
AST_USE = "use"
AST_IGNORE = "ignore"
AST_FORCE = "force"
AST_USE = "use "
AST_IGNORE = "ignore "
AST_FORCE = "force "
)
func (node *IndexHints) Format(buf *TrackedBuffer) {
buf.Myprintf(" %s index ", node.Type)
buf.Myprintf(" %sindex ", node.Type)
prefix := "("
for _, n := range node.Indexes {
buf.Myprintf("%s%v", prefix, n)

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -73,7 +73,8 @@ func forceEOF(yylex interface{}) {
%token LEX_ERROR
%token <empty> SELECT INSERT UPDATE DELETE FROM WHERE GROUP HAVING ORDER BY LIMIT FOR
%token <empty> ALL DISTINCT AS EXISTS IN IS LIKE BETWEEN NULL ASC DESC VALUES INTO DUPLICATE KEY DEFAULT SET LOCK KEYRANGE
%token <empty> ALL DISTINCT AS EXISTS IN IS LIKE BETWEEN NULL ASC DESC INTO DUPLICATE KEY DEFAULT SET LOCK KEYRANGE
%token <empty> VALUES LAST_INSERT_ID
%token <bytes> ID STRING NUMBER VALUE_ARG LIST_ARG COMMENT
%token <empty> LE GE NE NULL_SAFE_EQUAL
%token <empty> '(' '=' '<' '>'
@ -148,7 +149,8 @@ func forceEOF(yylex interface{}) {
%type <updateExprs> on_dup_opt
%type <updateExprs> update_list
%type <updateExpr> update_expression
%type <empty> exists_opt not_exists_opt ignore_opt non_rename_operation to_opt constraint_opt using_opt
%type <str> ignore_opt
%type <empty> exists_opt not_exists_opt non_rename_operation to_opt constraint_opt using_opt
%type <sqlID> sql_id as_lower_opt
%type <sqlID> table_id as_opt
%type <empty> force_eof
@ -188,19 +190,19 @@ select_statement:
}
insert_statement:
INSERT comment_opt INTO dml_table_expression column_list_opt row_list on_dup_opt
INSERT comment_opt ignore_opt INTO dml_table_expression column_list_opt row_list on_dup_opt
{
$$ = &Insert{Comments: Comments($2), Table: $4, Columns: $5, Rows: $6, OnDup: OnDup($7)}
$$ = &Insert{Comments: Comments($2), Ignore: $3, Table: $5, Columns: $6, Rows: $7, OnDup: OnDup($8)}
}
| INSERT comment_opt INTO dml_table_expression SET update_list on_dup_opt
| INSERT comment_opt ignore_opt INTO dml_table_expression SET update_list on_dup_opt
{
cols := make(Columns, 0, len($6))
vals := make(ValTuple, 0, len($6))
for _, col := range $6 {
cols := make(Columns, 0, len($7))
vals := make(ValTuple, 0, len($7))
for _, col := range $7 {
cols = append(cols, &NonStarExpr{Expr: col.Name})
vals = append(vals, col.Expr)
}
$$ = &Insert{Comments: Comments($2), Table: $4, Columns: cols, Rows: Values{vals}, OnDup: OnDup($7)}
$$ = &Insert{Comments: Comments($2), Ignore: $3, Table: $5, Columns: cols, Rows: Values{vals}, OnDup: OnDup($8)}
}
update_statement:
@ -757,10 +759,6 @@ keyword_as_func:
{
$$ = "if"
}
| VALUES
{
$$ = "values"
}
case_expression:
CASE value_expression_opt when_expression_list else_expression_opt END
@ -1005,9 +1003,9 @@ not_exists_opt:
{ $$ = struct{}{} }
ignore_opt:
{ $$ = struct{}{} }
{ $$ = "" }
| IGNORE
{ $$ = struct{}{} }
{ $$ = AST_IGNORE }
non_rename_operation:
ALTER

Просмотреть файл

@ -36,76 +36,77 @@ func NewStringTokenizer(sql string) *Tokenizer {
}
var keywords = map[string]int{
"all": ALL,
"alter": ALTER,
"analyze": ANALYZE,
"and": AND,
"as": AS,
"asc": ASC,
"between": BETWEEN,
"by": BY,
"case": CASE,
"create": CREATE,
"cross": CROSS,
"default": DEFAULT,
"delete": DELETE,
"desc": DESC,
"describe": DESCRIBE,
"distinct": DISTINCT,
"drop": DROP,
"duplicate": DUPLICATE,
"else": ELSE,
"end": END,
"except": EXCEPT,
"exists": EXISTS,
"explain": EXPLAIN,
"for": FOR,
"force": FORCE,
"from": FROM,
"group": GROUP,
"having": HAVING,
"if": IF,
"ignore": IGNORE,
"in": IN,
"index": INDEX,
"inner": INNER,
"insert": INSERT,
"intersect": INTERSECT,
"into": INTO,
"is": IS,
"join": JOIN,
"key": KEY,
"keyrange": KEYRANGE,
"left": LEFT,
"like": LIKE,
"limit": LIMIT,
"lock": LOCK,
"minus": MINUS,
"natural": NATURAL,
"not": NOT,
"null": NULL,
"on": ON,
"or": OR,
"order": ORDER,
"outer": OUTER,
"rename": RENAME,
"right": RIGHT,
"select": SELECT,
"set": SET,
"show": SHOW,
"straight_join": STRAIGHT_JOIN,
"table": TABLE,
"then": THEN,
"to": TO,
"union": UNION,
"unique": UNIQUE,
"update": UPDATE,
"use": USE,
"using": USING,
"values": VALUES,
"view": VIEW,
"when": WHEN,
"where": WHERE,
"all": ALL,
"alter": ALTER,
"analyze": ANALYZE,
"and": AND,
"as": AS,
"asc": ASC,
"between": BETWEEN,
"by": BY,
"case": CASE,
"create": CREATE,
"cross": CROSS,
"default": DEFAULT,
"delete": DELETE,
"desc": DESC,
"describe": DESCRIBE,
"distinct": DISTINCT,
"drop": DROP,
"duplicate": DUPLICATE,
"else": ELSE,
"end": END,
"except": EXCEPT,
"exists": EXISTS,
"explain": EXPLAIN,
"for": FOR,
"force": FORCE,
"from": FROM,
"group": GROUP,
"having": HAVING,
"if": IF,
"ignore": IGNORE,
"in": IN,
"index": INDEX,
"inner": INNER,
"insert": INSERT,
"intersect": INTERSECT,
"into": INTO,
"is": IS,
"join": JOIN,
"key": KEY,
"keyrange": KEYRANGE,
"last_insert_id": LAST_INSERT_ID,
"left": LEFT,
"like": LIKE,
"limit": LIMIT,
"lock": LOCK,
"minus": MINUS,
"natural": NATURAL,
"not": NOT,
"null": NULL,
"on": ON,
"or": OR,
"order": ORDER,
"outer": OUTER,
"rename": RENAME,
"right": RIGHT,
"select": SELECT,
"set": SET,
"show": SHOW,
"straight_join": STRAIGHT_JOIN,
"table": TABLE,
"then": THEN,
"to": TO,
"union": UNION,
"unique": UNIQUE,
"update": UPDATE,
"use": USE,
"using": USING,
"values": VALUES,
"view": VIEW,
"when": WHEN,
"where": WHERE,
}
// Lex returns the next token form the Tokenizer.
@ -130,7 +131,7 @@ func (tkn *Tokenizer) Lex(lval *yySymType) int {
func (tkn *Tokenizer) Error(err string) {
buf := bytes.NewBuffer(make([]byte, 0, 32))
if tkn.lastToken != nil {
fmt.Fprintf(buf, "%s at position %v near %s", err, tkn.Position, tkn.lastToken)
fmt.Fprintf(buf, "%s at position %v near '%s'", err, tkn.Position, tkn.lastToken)
} else {
fmt.Fprintf(buf, "%s at position %v", err, tkn.Position)
}

Просмотреть файл

@ -10,12 +10,11 @@
package actionnode
import (
"encoding/json"
"fmt"
"os"
"os/user"
"time"
"github.com/youtube/vitess/go/jscfg"
)
const (
@ -264,19 +263,31 @@ type ActionNode struct {
}
// ToJSON returns a JSON representation of the object.
func (n *ActionNode) ToJSON() string {
result := jscfg.ToJSON(n) + "\n"
func (n *ActionNode) ToJSON() (string, error) {
data, err := json.MarshalIndent(n, "", " ")
if err != nil {
return "", fmt.Errorf("cannot JSON-marshal node: %v", err)
}
result := string(data) + "\n"
if n.Args == nil {
result += "{}\n"
} else {
result += jscfg.ToJSON(n.Args) + "\n"
data, err := json.MarshalIndent(n.Args, "", " ")
if err != nil {
return "", fmt.Errorf("cannot JSON-marshal node args: %v", err)
}
result += string(data) + "\n"
}
if n.Reply == nil {
result += "{}\n"
} else {
result += jscfg.ToJSON(n.Reply) + "\n"
data, err := json.MarshalIndent(n.Reply, "", " ")
if err != nil {
return "", fmt.Errorf("cannot JSON-marshal node reply: %v", err)
}
result += string(data) + "\n"
}
return result
return result, nil
}
// SetGuid will set the ActionGuid field for the action node

Просмотреть файл

@ -2,11 +2,9 @@ package actionnode
import (
"encoding/json"
"strings"
"testing"
"github.com/youtube/vitess/go/bson"
"github.com/youtube/vitess/go/jscfg"
pb "github.com/youtube/vitess/go/vt/proto/topodata"
)
@ -32,12 +30,13 @@ func TestMissingFieldsJson(t *testing.T) {
ExpectedMasterIPAddr: "i1",
ScrapStragglers: true,
}
data := jscfg.ToJSON(swra)
data, err := json.MarshalIndent(swra, "", " ")
if err != nil {
t.Fatalf("cannot marshal: %v", err)
}
output := &SlaveWasRestartedArgs{}
decoder := json.NewDecoder(strings.NewReader(data))
err := decoder.Decode(output)
if err != nil {
if err = json.Unmarshal(data, output); err != nil {
t.Errorf("Cannot re-decode struct without field: %v", err)
}
}
@ -49,12 +48,13 @@ func TestExtraFieldsJson(t *testing.T) {
Cell: "aa",
},
}
data := jscfg.ToJSON(swra)
data, err := json.MarshalIndent(swra, "", " ")
if err != nil {
t.Fatalf("cannot marshal: %v", err)
}
output := &slaveWasRestartedTestArgs{}
decoder := json.NewDecoder(strings.NewReader(data))
err := decoder.Decode(output)
if err != nil {
if err = json.Unmarshal(data, output); err != nil {
t.Errorf("Cannot re-decode struct without field: %v", err)
}
}

Просмотреть файл

@ -33,7 +33,11 @@ func (n *ActionNode) LockKeyspace(ctx context.Context, ts topo.Server, keyspace
span.Annotate("keyspace", keyspace)
defer span.Finish()
return ts.LockKeyspaceForAction(ctx, keyspace, n.ToJSON())
j, err := n.ToJSON()
if err != nil {
return "", err
}
return ts.LockKeyspaceForAction(ctx, keyspace, j)
}
// UnlockKeyspace unlocks a previously locked keyspace.
@ -60,7 +64,16 @@ func (n *ActionNode) UnlockKeyspace(ctx context.Context, ts topo.Server, keyspac
n.Error = ""
n.State = ActionStateDone
}
err := ts.UnlockKeyspaceForAction(ctx, keyspace, lockPath, n.ToJSON())
j, err := n.ToJSON()
if err != nil {
if actionError != nil {
// this will be masked
log.Warningf("node.ToJSON failed: %v", err)
return actionError
}
return err
}
err = ts.UnlockKeyspaceForAction(ctx, keyspace, lockPath, j)
if actionError != nil {
if err != nil {
// this will be masked
@ -83,7 +96,11 @@ func (n *ActionNode) LockShard(ctx context.Context, ts topo.Server, keyspace, sh
span.Annotate("shard", shard)
defer span.Finish()
return ts.LockShardForAction(ctx, keyspace, shard, n.ToJSON())
j, err := n.ToJSON()
if err != nil {
return "", err
}
return ts.LockShardForAction(ctx, keyspace, shard, j)
}
// UnlockShard unlocks a previously locked shard.
@ -111,7 +128,16 @@ func (n *ActionNode) UnlockShard(ctx context.Context, ts topo.Server, keyspace,
n.Error = ""
n.State = ActionStateDone
}
err := ts.UnlockShardForAction(ctx, keyspace, shard, lockPath, n.ToJSON())
j, err := n.ToJSON()
if err != nil {
if actionError != nil {
// this will be masked
log.Warningf("node.ToJSON failed: %v", err)
return actionError
}
return err
}
err = ts.UnlockShardForAction(ctx, keyspace, shard, lockPath, j)
if actionError != nil {
if err != nil {
// this will be masked
@ -135,7 +161,11 @@ func (n *ActionNode) LockSrvShard(ctx context.Context, ts topo.Server, cell, key
span.Annotate("cell", cell)
defer span.Finish()
return ts.LockSrvShardForAction(ctx, cell, keyspace, shard, n.ToJSON())
j, err := n.ToJSON()
if err != nil {
return "", err
}
return ts.LockSrvShardForAction(ctx, cell, keyspace, shard, j)
}
// UnlockSrvShard unlocks a previously locked serving shard.
@ -164,7 +194,16 @@ func (n *ActionNode) UnlockSrvShard(ctx context.Context, ts topo.Server, cell, k
n.Error = ""
n.State = ActionStateDone
}
err := ts.UnlockSrvShardForAction(ctx, cell, keyspace, shard, lockPath, n.ToJSON())
j, err := n.ToJSON()
if err != nil {
if actionError != nil {
// this will be masked
log.Warningf("node.ToJSON failed: %v", err)
return actionError
}
return err
}
err = ts.UnlockSrvShardForAction(ctx, cell, keyspace, shard, lockPath, j)
if actionError != nil {
if err != nil {
// this will be masked

Просмотреть файл

@ -23,6 +23,7 @@ import (
"github.com/youtube/vitess/go/vt/tabletserver"
"github.com/youtube/vitess/go/vt/tabletserver/planbuilder"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
pb "github.com/youtube/vitess/go/vt/proto/query"
pbt "github.com/youtube/vitess/go/vt/proto/topodata"
@ -56,7 +57,7 @@ func (agent *ActionAgent) allowQueries(tablet *pbt.Tablet, blacklistedTables []s
if agent.DBConfigs != nil {
// Update our DB config to match the info we have in the tablet
if agent.DBConfigs.App.DbName == "" {
agent.DBConfigs.App.DbName = topo.TabletDbName(tablet)
agent.DBConfigs.App.DbName = topoproto.TabletDbName(tablet)
}
agent.DBConfigs.App.Keyspace = tablet.Keyspace
agent.DBConfigs.App.Shard = tablet.Shard
@ -116,7 +117,7 @@ func (agent *ActionAgent) loadKeyspaceAndBlacklistRules(tablet *pbt.Tablet, blac
blacklistRules := tabletserver.NewQueryRules()
if len(blacklistedTables) > 0 {
// tables, first resolve wildcards
tables, err := mysqlctl.ResolveTables(agent.MysqlDaemon, topo.TabletDbName(tablet), blacklistedTables)
tables, err := mysqlctl.ResolveTables(agent.MysqlDaemon, topoproto.TabletDbName(tablet), blacklistedTables)
if err != nil {
return err
}
@ -160,7 +161,7 @@ func (agent *ActionAgent) changeCallback(ctx context.Context, oldTablet, newTabl
var blacklistedTables []string
var err error
if allowQuery {
shardInfo, err = topo.GetShard(ctx, agent.TopoServer, newTablet.Keyspace, newTablet.Shard)
shardInfo, err = agent.TopoServer.GetShard(ctx, newTablet.Keyspace, newTablet.Shard)
if err != nil {
log.Errorf("Cannot read shard for this tablet %v, might have inaccurate SourceShards and TabletControls: %v", newTablet.Alias, err)
} else {

Просмотреть файл

@ -25,6 +25,7 @@ import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"net"
"sync"
"time"
@ -34,7 +35,6 @@ import (
log "github.com/golang/glog"
"github.com/youtube/vitess/go/event"
"github.com/youtube/vitess/go/history"
"github.com/youtube/vitess/go/jscfg"
"github.com/youtube/vitess/go/netutil"
"github.com/youtube/vitess/go/stats"
"github.com/youtube/vitess/go/trace"
@ -44,6 +44,7 @@ import (
"github.com/youtube/vitess/go/vt/tabletmanager/events"
"github.com/youtube/vitess/go/vt/tabletserver"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
"github.com/youtube/vitess/go/vt/topotools"
pb "github.com/youtube/vitess/go/vt/proto/topodata"
@ -112,12 +113,17 @@ func loadSchemaOverrides(overridesFile string) []tabletserver.SchemaOverride {
if overridesFile == "" {
return schemaOverrides
}
if err := jscfg.ReadJSON(overridesFile, &schemaOverrides); err != nil {
data, err := ioutil.ReadFile(overridesFile)
if err != nil {
log.Warningf("can't read overridesFile %v: %v", overridesFile, err)
} else {
data, _ := json.MarshalIndent(schemaOverrides, "", " ")
log.Infof("schemaOverrides: %s\n", data)
return schemaOverrides
}
if err = json.Unmarshal(data, &schemaOverrides); err != nil {
log.Warningf("can't parse overridesFile %v: %v", overridesFile, err)
return schemaOverrides
}
data, _ = json.MarshalIndent(schemaOverrides, "", " ")
log.Infof("schemaOverrides: %s\n", data)
return schemaOverrides
}
@ -253,7 +259,7 @@ func (agent *ActionAgent) updateState(ctx context.Context, oldTablet *pb.Tablet,
}
func (agent *ActionAgent) readTablet(ctx context.Context) (*topo.TabletInfo, error) {
tablet, err := topo.GetTablet(ctx, agent.TopoServer, agent.TabletAlias)
tablet, err := agent.TopoServer.GetTablet(ctx, agent.TabletAlias)
if err != nil {
return nil, err
}
@ -467,7 +473,7 @@ func (agent *ActionAgent) Stop() {
// hookExtraEnv returns the map to pass to local hooks
func (agent *ActionAgent) hookExtraEnv() map[string]string {
return map[string]string{"TABLET_ALIAS": topo.TabletAliasString(agent.TabletAlias)}
return map[string]string{"TABLET_ALIAS": topoproto.TabletAliasString(agent.TabletAlias)}
}
// checkTabletMysqlPort will check the mysql port for the tablet is good,
@ -485,7 +491,7 @@ func (agent *ActionAgent) checkTabletMysqlPort(ctx context.Context, tablet *topo
log.Warningf("MySQL port has changed from %v to %v, updating it in tablet record", tablet.PortMap["mysql"], mport)
tablet.PortMap["mysql"] = mport
if err := topo.UpdateTablet(ctx, agent.TopoServer, tablet); err != nil {
if err := agent.TopoServer.UpdateTablet(ctx, tablet); err != nil {
log.Warningf("Failed to update tablet record, may use old mysql port")
return nil
}

Просмотреть файл

@ -16,6 +16,7 @@ import (
myproto "github.com/youtube/vitess/go/vt/mysqlctl/proto"
"github.com/youtube/vitess/go/vt/tabletmanager/actionnode"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
"github.com/youtube/vitess/go/vt/topotools"
"golang.org/x/net/context"
@ -411,7 +412,7 @@ func (agent *ActionAgent) InitMaster(ctx context.Context) (myproto.ReplicationPo
}
// Change our type to master if not already
if err := topo.UpdateTabletFields(ctx, agent.TopoServer, agent.TabletAlias, func(tablet *pb.Tablet) error {
if err := agent.TopoServer.UpdateTabletFields(ctx, agent.TabletAlias, func(tablet *pb.Tablet) error {
tablet.Type = pb.TabletType_MASTER
tablet.HealthMap = nil
return nil
@ -426,7 +427,7 @@ func (agent *ActionAgent) InitMaster(ctx context.Context) (myproto.ReplicationPo
// PopulateReparentJournal adds an entry into the reparent_journal table.
func (agent *ActionAgent) PopulateReparentJournal(ctx context.Context, timeCreatedNS int64, actionName string, masterAlias *pb.TabletAlias, pos myproto.ReplicationPosition) error {
cmds := mysqlctl.CreateReparentJournal()
cmds = append(cmds, mysqlctl.PopulateReparentJournal(timeCreatedNS, actionName, topo.TabletAliasString(masterAlias), pos))
cmds = append(cmds, mysqlctl.PopulateReparentJournal(timeCreatedNS, actionName, topoproto.TabletAliasString(masterAlias), pos))
return agent.MysqlDaemon.ExecuteSuperQueryList(cmds)
}
@ -570,7 +571,7 @@ func (agent *ActionAgent) SetMaster(ctx context.Context, parent *pb.TabletAlias,
if tablet.Type == pb.TabletType_MASTER {
tablet.Type = pb.TabletType_SPARE
tablet.HealthMap = nil
if err := topo.UpdateTablet(ctx, agent.TopoServer, tablet); err != nil {
if err := agent.TopoServer.UpdateTablet(ctx, tablet); err != nil {
return err
}
}
@ -595,7 +596,7 @@ func (agent *ActionAgent) SlaveWasRestarted(ctx context.Context, swrd *actionnod
if tablet.Type == pb.TabletType_MASTER {
tablet.Type = pb.TabletType_SPARE
}
err = topo.UpdateTablet(ctx, agent.TopoServer, tablet)
err = agent.TopoServer.UpdateTablet(ctx, tablet)
if err != nil {
return err
}
@ -659,7 +660,7 @@ func (agent *ActionAgent) updateReplicationGraphForPromotedSlave(ctx context.Con
// Update tablet regardless - trend towards consistency.
tablet.Type = pb.TabletType_MASTER
tablet.HealthMap = nil
err := topo.UpdateTablet(ctx, agent.TopoServer, tablet)
err := agent.TopoServer.UpdateTablet(ctx, tablet)
if err != nil {
return err
}
@ -708,7 +709,7 @@ func (agent *ActionAgent) Backup(ctx context.Context, concurrency int, logger lo
// now we can run the backup
bucket := fmt.Sprintf("%v/%v", tablet.Keyspace, tablet.Shard)
name := fmt.Sprintf("%v.%v", topo.TabletAliasString(tablet.Alias), time.Now().UTC().Format("2006-01-02.150405"))
name := fmt.Sprintf("%v.%v", topoproto.TabletAliasString(tablet.Alias), time.Now().UTC().Format("2006-01-02.150405"))
returnErr := mysqlctl.Backup(ctx, agent.MysqlDaemon, l, bucket, name, concurrency, agent.hookExtraEnv())
// and change our type back to the appropriate value:

Просмотреть файл

@ -27,6 +27,7 @@ import (
"github.com/youtube/vitess/go/vt/mysqlctl"
myproto "github.com/youtube/vitess/go/vt/mysqlctl/proto"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
"golang.org/x/net/context"
pb "github.com/youtube/vitess/go/vt/proto/topodata"
@ -93,7 +94,7 @@ func newBinlogPlayerController(ts topo.Server, dbConfig *sqldb.ConnParams, mysql
}
func (bpc *BinlogPlayerController) String() string {
return "BinlogPlayerController(" + topo.SourceShardString(bpc.sourceShard) + ")"
return "BinlogPlayerController(" + topoproto.SourceShardString(bpc.sourceShard) + ")"
}
// Start will start the player in the background and run forever.
@ -332,18 +333,15 @@ func NewBinlogPlayerMap(ts topo.Server, dbConfig *sqldb.ConnParams, mysqld mysql
// RegisterBinlogPlayerMap registers the varz for the players.
func RegisterBinlogPlayerMap(blm *BinlogPlayerMap) {
stats.Publish("BinlogPlayerMapSize", stats.IntFunc(blm.size))
stats.Publish("BinlogPlayerSecondsBehindMaster", stats.IntFunc(func() int64 {
sbm := int64(0)
stats.Publish("BinlogPlayerMapSize", stats.IntFunc(stats.IntFunc(func() int64 {
blm.mu.Lock()
for _, bpc := range blm.players {
psbm := bpc.binlogPlayerStats.SecondsBehindMaster.Get()
if psbm > sbm {
sbm = psbm
}
}
blm.mu.Unlock()
return sbm
defer blm.mu.Unlock()
return int64(len(blm.players))
})))
stats.Publish("BinlogPlayerSecondsBehindMaster", stats.IntFunc(func() int64 {
blm.mu.Lock()
defer blm.mu.Unlock()
return blm.maxSecondsBehindMasterUNGUARDED()
}))
stats.Publish("BinlogPlayerSecondsBehindMasterMap", stats.CountersFunc(func() map[string]int64 {
blm.mu.Lock()
@ -378,11 +376,24 @@ func RegisterBinlogPlayerMap(blm *BinlogPlayerMap) {
}))
}
func (blm *BinlogPlayerMap) size() int64 {
func (blm *BinlogPlayerMap) isRunningFilteredReplication() bool {
blm.mu.Lock()
result := len(blm.players)
blm.mu.Unlock()
return int64(result)
defer blm.mu.Unlock()
return len(blm.players) != 0
}
// maxSecondsBehindMasterUNGUARDED returns the maximum of the secondsBehindMaster
// value of all binlog players i.e. the highest seen filtered replication lag.
// NOTE: Caller must own a lock on blm.mu.
func (blm *BinlogPlayerMap) maxSecondsBehindMasterUNGUARDED() int64 {
sbm := int64(0)
for _, bpc := range blm.players {
psbm := bpc.binlogPlayerStats.SecondsBehindMaster.Get()
if psbm > sbm {
sbm = psbm
}
}
return sbm
}
// addPlayer adds a new player to the map. It assumes we have the lock.
@ -435,7 +446,7 @@ func (blm *BinlogPlayerMap) RefreshMap(ctx context.Context, tablet *pb.Tablet, k
blm.mu.Lock()
if blm.dbConfig.DbName == "" {
blm.dbConfig.DbName = topo.TabletDbName(tablet)
blm.dbConfig.DbName = topoproto.TabletDbName(tablet)
}
// get the existing sources and build a map of sources to remove
@ -448,7 +459,7 @@ func (blm *BinlogPlayerMap) RefreshMap(ctx context.Context, tablet *pb.Tablet, k
// for each source, add it if not there, and delete from toRemove
for _, sourceShard := range shardInfo.SourceShards {
blm.addPlayer(ctx, tablet.Alias.Cell, keyspaceInfo.ShardingColumnType, tablet.KeyRange, sourceShard, topo.TabletDbName(tablet))
blm.addPlayer(ctx, tablet.Alias.Cell, keyspaceInfo.ShardingColumnType, tablet.KeyRange, sourceShard, topoproto.TabletDbName(tablet))
delete(toRemove, sourceShard.Uid)
}
hasPlayers := len(shardInfo.SourceShards) > 0
@ -588,13 +599,13 @@ type BinlogPlayerControllerStatus struct {
// SourceShardAsHTML returns the SourceShard as HTML
func (bpcs *BinlogPlayerControllerStatus) SourceShardAsHTML() template.HTML {
return topo.SourceShardAsHTML(bpcs.SourceShard)
return topoproto.SourceShardAsHTML(bpcs.SourceShard)
}
// SourceTabletAlias returns the string version of the SourceTablet alias, if set
func (bpcs *BinlogPlayerControllerStatus) SourceTabletAlias() string {
if bpcs.SourceTablet != nil {
return topo.TabletAliasString(bpcs.SourceTablet)
return topoproto.TabletAliasString(bpcs.SourceTablet)
}
return ""
}
@ -624,6 +635,7 @@ type BinlogPlayerMapStatus struct {
}
// Status returns the BinlogPlayerMapStatus for the BinlogPlayerMap.
// It is used to display the complete status in the webinterface.
func (blm *BinlogPlayerMap) Status() *BinlogPlayerMapStatus {
// Create the result, take care of the stopped state.
result := &BinlogPlayerMapStatus{}
@ -667,3 +679,14 @@ func (blm *BinlogPlayerMap) Status() *BinlogPlayerMapStatus {
return result
}
// StatusSummary returns aggregated health information e.g.
// the maximum replication delay across all binlog players.
// It is used by the QueryService.StreamHealth RPC.
func (blm *BinlogPlayerMap) StatusSummary() (maxSecondsBehindMaster int64, binlogPlayersCount int32) {
blm.mu.Lock()
defer blm.mu.Unlock()
maxSecondsBehindMaster = blm.maxSecondsBehindMasterUNGUARDED()
binlogPlayersCount = int32(len(blm.players))
return
}

Просмотреть файл

@ -19,6 +19,7 @@ import (
"github.com/youtube/vitess/go/timer"
"github.com/youtube/vitess/go/vt/servenv"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
"github.com/youtube/vitess/go/vt/topotools"
pb "github.com/youtube/vitess/go/vt/proto/query"
@ -103,7 +104,7 @@ func (agent *ActionAgent) initHealthCheck() {
return
}
tt, err := topo.ParseTabletType(*targetTabletType)
tt, err := topoproto.ParseTabletType(*targetTabletType)
if err != nil {
log.Fatalf("Invalid target tablet type %v: %v", *targetTabletType, err)
}
@ -147,7 +148,7 @@ func (agent *ActionAgent) runHealthCheck(targetTabletType pbt.TabletType) {
// figure out if we should be running the query service
shouldQueryServiceBeRunning := false
var blacklistedTables []string
if topo.IsRunningQueryService(targetTabletType) && agent.BinlogPlayerMap.size() == 0 {
if topo.IsRunningQueryService(targetTabletType) && !agent.BinlogPlayerMap.isRunningFilteredReplication() {
shouldQueryServiceBeRunning = true
if tabletControl != nil {
blacklistedTables = tabletControl.BlacklistedTables
@ -259,6 +260,7 @@ func (agent *ActionAgent) runHealthCheck(targetTabletType pbt.TabletType) {
stats := &pb.RealtimeStats{
SecondsBehindMaster: uint32(replicationDelay.Seconds()),
}
stats.SecondsBehindMasterFilteredReplication, stats.BinlogPlayersCount = agent.BinlogPlayerMap.StatusSummary()
if err != nil {
stats.HealthError = err.Error()
}

Просмотреть файл

@ -11,7 +11,6 @@ import (
"github.com/youtube/vitess/go/vt/mysqlctl"
"github.com/youtube/vitess/go/vt/tabletmanager/actionnode"
"github.com/youtube/vitess/go/vt/tabletserver"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/zktopo"
"golang.org/x/net/context"
@ -120,7 +119,7 @@ func createTestAgent(ctx context.Context, t *testing.T) *ActionAgent {
t.Fatalf("CreateKeyspace failed: %v", err)
}
if err := topo.CreateShard(ctx, ts, keyspace, shard); err != nil {
if err := ts.CreateShard(ctx, keyspace, shard); err != nil {
t.Fatalf("CreateShard failed: %v", err)
}
@ -136,7 +135,7 @@ func createTestAgent(ctx context.Context, t *testing.T) *ActionAgent {
Shard: shard,
Type: pb.TabletType_SPARE,
}
if err := topo.CreateTablet(ctx, ts, tablet); err != nil {
if err := ts.CreateTablet(ctx, tablet); err != nil {
t.Fatalf("CreateTablet failed: %v", err)
}
@ -301,7 +300,7 @@ func TestTabletControl(t *testing.T) {
DisableQueryService: true,
},
}
if err := topo.UpdateShard(ctx, agent.TopoServer, si); err != nil {
if err := agent.TopoServer.UpdateShard(ctx, si); err != nil {
t.Fatalf("UpdateShard failed: %v", err)
}

Просмотреть файл

@ -18,6 +18,7 @@ import (
"github.com/youtube/vitess/go/netutil"
"github.com/youtube/vitess/go/vt/tabletmanager/actionnode"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
"github.com/youtube/vitess/go/vt/topotools"
"golang.org/x/net/context"
@ -55,7 +56,7 @@ func (agent *ActionAgent) InitTablet(port, gRPCPort int32) error {
// use the type specified on the command line
var err error
tabletType, err = topo.ParseTabletType(*initTabletType)
tabletType, err = topoproto.ParseTabletType(*initTabletType)
if err != nil {
log.Fatalf("Invalid init tablet type %v: %v", *initTabletType, err)
}
@ -102,7 +103,7 @@ func (agent *ActionAgent) InitTablet(port, gRPCPort int32) error {
if err != nil {
return fmt.Errorf("InitTablet cannot GetOrCreateShard shard: %v", err)
}
if si.MasterAlias != nil && topo.TabletAliasEqual(si.MasterAlias, agent.TabletAlias) {
if si.MasterAlias != nil && topoproto.TabletAliasEqual(si.MasterAlias, agent.TabletAlias) {
// we are the current master for this shard (probably
// means the master tablet process was just restarted),
// so InitTablet as master.
@ -129,7 +130,7 @@ func (agent *ActionAgent) InitTablet(port, gRPCPort int32) error {
si.Cells = append(si.Cells, agent.TabletAlias.Cell)
// write it back
if err := topo.UpdateShard(ctx, agent.TopoServer, si); err != nil {
if err := agent.TopoServer.UpdateShard(ctx, si); err != nil {
return actionNode.UnlockShard(ctx, agent.TopoServer, *initKeyspace, shard, lockPath, err)
}
}
@ -174,7 +175,7 @@ func (agent *ActionAgent) InitTablet(port, gRPCPort int32) error {
}
// now try to create the record
err := topo.CreateTablet(ctx, agent.TopoServer, tablet)
err := agent.TopoServer.CreateTablet(ctx, tablet)
switch err {
case nil:
// it worked, we're good, can update the replication graph
@ -189,7 +190,7 @@ func (agent *ActionAgent) InitTablet(port, gRPCPort int32) error {
// it. So we read it first.
oldTablet, err := agent.TopoServer.GetTablet(ctx, tablet.Alias)
if err != nil {
fmt.Errorf("InitTablet failed to read existing tablet record: %v", err)
return fmt.Errorf("InitTablet failed to read existing tablet record: %v", err)
}
// Sanity check the keyspace and shard
@ -199,7 +200,7 @@ func (agent *ActionAgent) InitTablet(port, gRPCPort int32) error {
// And overwrite the rest
*(oldTablet.Tablet) = *tablet
if err := topo.UpdateTablet(ctx, agent.TopoServer, oldTablet); err != nil {
if err := agent.TopoServer.UpdateTablet(ctx, oldTablet); err != nil {
return fmt.Errorf("UpdateTablet failed: %v", err)
}

Просмотреть файл

@ -13,7 +13,6 @@ import (
"github.com/youtube/vitess/go/history"
"github.com/youtube/vitess/go/stats"
"github.com/youtube/vitess/go/vt/mysqlctl"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/zktopo"
"golang.org/x/net/context"
@ -140,7 +139,7 @@ func TestInitTablet(t *testing.T) {
t.Fatalf("GetShard failed: %v", err)
}
si.MasterAlias = tabletAlias
if err := topo.UpdateShard(ctx, ts, si); err != nil {
if err := ts.UpdateShard(ctx, si); err != nil {
t.Fatalf("UpdateShard failed: %v", err)
}
if err := agent.InitTablet(port, gRPCPort); err != nil {

Просмотреть файл

@ -17,6 +17,7 @@ import (
"github.com/youtube/vitess/go/vt/concurrency"
"github.com/youtube/vitess/go/vt/tabletmanager/tmclient"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
"github.com/youtube/vitess/go/vt/topotools"
"github.com/youtube/vitess/go/vt/topotools/events"
"golang.org/x/net/context"
@ -55,12 +56,12 @@ func (agent *ActionAgent) TabletExternallyReparented(ctx context.Context, extern
tablet := agent.Tablet()
// Check the global shard record.
si, err := topo.GetShard(ctx, agent.TopoServer, tablet.Keyspace, tablet.Shard)
si, err := agent.TopoServer.GetShard(ctx, tablet.Keyspace, tablet.Shard)
if err != nil {
log.Warningf("fastTabletExternallyReparented: failed to read global shard record for %v/%v: %v", tablet.Keyspace, tablet.Shard, err)
return err
}
if topo.TabletAliasEqual(si.MasterAlias, tablet.Alias) {
if topoproto.TabletAliasEqual(si.MasterAlias, tablet.Alias) {
// We may get called on the current master even when nothing has changed.
// If the global shard record is already updated, it means we successfully
// finished a previous reparent to this tablet.
@ -152,7 +153,7 @@ func (agent *ActionAgent) finalizeTabletExternallyReparented(ctx context.Context
defer wg.Done()
// Update our own record to master.
var updatedTablet *pb.Tablet
err := topo.UpdateTabletFields(ctx, agent.TopoServer, agent.TabletAlias,
err := agent.TopoServer.UpdateTabletFields(ctx, agent.TabletAlias,
func(tablet *pb.Tablet) error {
tablet.Type = pb.TabletType_MASTER
tablet.HealthMap = nil
@ -171,12 +172,12 @@ func (agent *ActionAgent) finalizeTabletExternallyReparented(ctx context.Context
}
}()
if !topo.TabletAliasIsZero(oldMasterAlias) {
if !topoproto.TabletAliasIsZero(oldMasterAlias) {
wg.Add(1)
go func() {
// Force the old master to spare.
var oldMasterTablet *pb.Tablet
err := topo.UpdateTabletFields(ctx, agent.TopoServer, oldMasterAlias,
err := agent.TopoServer.UpdateTabletFields(ctx, oldMasterAlias,
func(tablet *pb.Tablet) error {
tablet.Type = pb.TabletType_SPARE
oldMasterTablet = tablet
@ -220,7 +221,7 @@ func (agent *ActionAgent) finalizeTabletExternallyReparented(ctx context.Context
// write it back. Now we use an update loop pattern to do that instead.
event.DispatchUpdate(ev, "updating global shard record")
log.Infof("finalizeTabletExternallyReparented: updating global shard record")
si, err = topo.UpdateShardFields(ctx, agent.TopoServer, tablet.Keyspace, tablet.Shard, func(shard *pb.Shard) error {
si, err = agent.TopoServer.UpdateShardFields(ctx, tablet.Keyspace, tablet.Shard, func(shard *pb.Shard) error {
shard.MasterAlias = tablet.Alias
return nil
})

Просмотреть файл

@ -11,7 +11,7 @@ import (
log "github.com/golang/glog"
"github.com/youtube/vitess/go/tb"
"github.com/youtube/vitess/go/vt/callinfo"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
"golang.org/x/net/context"
)
@ -33,7 +33,7 @@ const rpcTimeout = time.Second * 30
func (agent *ActionAgent) rpcWrapper(ctx context.Context, name string, args, reply interface{}, verbose bool, f func() error, lock, runAfterAction bool) (err error) {
defer func() {
if x := recover(); x != nil {
log.Errorf("TabletManager.%v(%v) on %v panic: %v\n%s", name, args, topo.TabletAliasString(agent.TabletAlias), x, tb.Stack(4))
log.Errorf("TabletManager.%v(%v) on %v panic: %v\n%s", name, args, topoproto.TabletAliasString(agent.TabletAlias), x, tb.Stack(4))
err = fmt.Errorf("caught panic during %v: %v", name, x)
}
}()
@ -54,11 +54,11 @@ func (agent *ActionAgent) rpcWrapper(ctx context.Context, name string, args, rep
}
if err = f(); err != nil {
log.Warningf("TabletManager.%v(%v)(on %v from %v) error: %v", name, args, topo.TabletAliasString(agent.TabletAlias), from, err.Error())
return fmt.Errorf("TabletManager.%v on %v error: %v", name, topo.TabletAliasString(agent.TabletAlias), err)
log.Warningf("TabletManager.%v(%v)(on %v from %v) error: %v", name, args, topoproto.TabletAliasString(agent.TabletAlias), from, err.Error())
return fmt.Errorf("TabletManager.%v on %v error: %v", name, topoproto.TabletAliasString(agent.TabletAlias), err)
}
if verbose {
log.Infof("TabletManager.%v(%v)(on %v from %v): %#v", name, args, topo.TabletAliasString(agent.TabletAlias), from, reply)
log.Infof("TabletManager.%v(%v)(on %v from %v): %#v", name, args, topoproto.TabletAliasString(agent.TabletAlias), from, reply)
}
if runAfterAction {
err = agent.refreshTablet(ctx, "RPC("+name+")")

Просмотреть файл

@ -84,7 +84,7 @@ func TestDBConnKill(t *testing.T) {
}
newKillQuery := fmt.Sprintf("kill %d", dbConn.ID())
// Kill failed because "kill query_id" failed
db.AddRejectedQuery(newKillQuery)
db.AddRejectedQuery(newKillQuery, errRejected)
err = dbConn.Kill()
testUtils.checkTabletError(t, err, ErrFail, "Could not kill query")

Просмотреть файл

@ -388,14 +388,13 @@ func analyzeInsert(ins *sqlparser.Insert, getTable TableGetter) (plan *ExecPlan,
pkColumnNumbers := getInsertPKColumns(ins.Columns, tableInfo)
if ins.OnDup != nil {
// Upserts are not safe for statement based replication:
// http://bugs.mysql.com/bug.php?id=58637
plan.Reason = REASON_UPSERT
return plan, nil
}
if sel, ok := ins.Rows.(sqlparser.SelectStatement); ok {
if ins.OnDup != nil {
// Upserts not allowed for subqueries.
// http://bugs.mysql.com/bug.php?id=58637
plan.Reason = REASON_UPSERT
return plan, nil
}
plan.PlanId = PLAN_INSERT_SUBQUERY
plan.OuterQuery = GenerateInsertOuterQuery(ins)
plan.Subquery = GenerateSelectLimitQuery(sel)
@ -422,11 +421,40 @@ func analyzeInsert(ins *sqlparser.Insert, getTable TableGetter) (plan *ExecPlan,
if err != nil {
return nil, err
}
if pkValues != nil {
plan.PlanId = PLAN_INSERT_PK
plan.OuterQuery = plan.FullQuery
plan.PKValues = pkValues
if pkValues == nil {
plan.Reason = REASON_COMPLEX_EXPR
return plan, nil
}
plan.PKValues = pkValues
if ins.OnDup == nil {
plan.PlanId = PLAN_INSERT_PK
plan.OuterQuery = sqlparser.GenerateParsedQuery(ins)
return plan, nil
}
if len(rowList) > 1 {
// Upsert supported only for single row inserts.
plan.Reason = REASON_UPSERT
return plan, nil
}
plan.SecondaryPKValues, err = analyzeUpdateExpressions(sqlparser.UpdateExprs(ins.OnDup), tableInfo.Indexes[0])
if err != nil {
if err == ErrTooComplex {
plan.Reason = REASON_PK_CHANGE
return plan, nil
}
return nil, err
}
plan.PlanId = PLAN_UPSERT_PK
newins := *ins
newins.Ignore = ""
newins.OnDup = nil
plan.OuterQuery = sqlparser.GenerateParsedQuery(&newins)
upd := &sqlparser.Update{
Comments: ins.Comments,
Table: ins.Table,
Exprs: sqlparser.UpdateExprs(ins.OnDup),
}
plan.UpsertQuery = GenerateUpdateOuterQuery(upd)
return plan, nil
}
@ -467,7 +495,6 @@ func getInsertPKValues(pkColumnNumbers []int, rowList sqlparser.Values, tableInf
}
node := row[columnNumber]
if !sqlparser.IsValue(node) {
log.Warningf("insert is too complex %v", node)
return nil, nil
}
var err error

Просмотреть файл

@ -54,6 +54,8 @@ const (
PLAN_SELECT_STREAM
// PLAN_OTHER is for SHOW, DESCRIBE & EXPLAIN statements
PLAN_OTHER
// PLAN_UPSERT_PK is for insert ... on duplicate key constructs
PLAN_UPSERT_PK
// NumPlans stores the total number of plans
NumPlans
)
@ -73,6 +75,7 @@ var planName = []string{
"DDL",
"SELECT_STREAM",
"OTHER",
"UPSERT_PK",
}
func (pt PlanType) String() string {
@ -120,6 +123,7 @@ var tableAclRoles = map[PlanType]tableacl.Role{
PLAN_DDL: tableacl.ADMIN,
PLAN_SELECT_STREAM: tableacl.READER,
PLAN_OTHER: tableacl.ADMIN,
PLAN_UPSERT_PK: tableacl.WRITER,
}
// ReasonType indicates why a query plan fails to build
@ -141,6 +145,7 @@ const (
REASON_TABLE_NOINDEX
REASON_PK_CHANGE
REASON_HAS_HINTS
REASON_COMPLEX_EXPR
REASON_UPSERT
)
@ -161,6 +166,7 @@ var reasonName = []string{
"TABLE_NOINDEX",
"PK_CHANGE",
"HAS_HINTS",
"COMPLEX_EXPR",
"UPSERT",
}
@ -193,9 +199,10 @@ type ExecPlan struct {
// For PK plans, only OuterQuery is set.
// For SUBQUERY plans, Subquery is also set.
// IndexUsed is set only for PLAN_SELECT_SUBQUERY
OuterQuery *sqlparser.ParsedQuery
Subquery *sqlparser.ParsedQuery
IndexUsed string
OuterQuery *sqlparser.ParsedQuery
Subquery *sqlparser.ParsedQuery
UpsertQuery *sqlparser.ParsedQuery
IndexUsed string
// For selects, columns to be returned
// For PLAN_INSERT_SUBQUERY, columns to be inserted

Просмотреть файл

@ -78,12 +78,12 @@ func GenerateSelectOuterQuery(sel *sqlparser.Select, tableInfo *schema.Table) *s
// GenerateInsertOuterQuery generates the outer query for inserts.
func GenerateInsertOuterQuery(ins *sqlparser.Insert) *sqlparser.ParsedQuery {
buf := sqlparser.NewTrackedBuffer(nil)
buf.Myprintf("insert %vinto %v%v values %a%v",
buf.Myprintf("insert %v%sinto %v%v values %a",
ins.Comments,
ins.Ignore,
ins.Table,
ins.Columns,
":#values",
ins.OnDup,
)
return buf.ParsedQuery()
}

Просмотреть файл

@ -6,10 +6,12 @@ package tabletserver
import (
"fmt"
"strings"
"time"
log "github.com/golang/glog"
"github.com/youtube/vitess/go/hack"
"github.com/youtube/vitess/go/mysql"
mproto "github.com/youtube/vitess/go/mysql/proto"
"github.com/youtube/vitess/go/sqltypes"
"github.com/youtube/vitess/go/vt/callinfo"
@ -88,6 +90,8 @@ func (qre *QueryExecutor) Execute() (reply *mproto.QueryResult, err error) {
reply, err = qre.execDMLSubquery(conn, invalidator)
case planbuilder.PLAN_OTHER:
reply, err = qre.execSQL(conn, qre.query, true)
case planbuilder.PLAN_UPSERT_PK:
reply, err = qre.execUpsertPK(conn, invalidator)
default: // select or set in a transaction, just count as select
reply, err = qre.execDirect(conn)
}
@ -180,6 +184,8 @@ func (qre *QueryExecutor) execDmlAutoCommit() (reply *mproto.QueryResult, err er
reply, err = qre.execDMLPK(conn, invalidator)
case planbuilder.PLAN_DML_SUBQUERY:
reply, err = qre.execDMLSubquery(conn, invalidator)
case planbuilder.PLAN_UPSERT_PK:
reply, err = qre.execUpsertPK(conn, invalidator)
default:
return nil, NewTabletError(ErrFatal, "unsupported query: %s", qre.query)
}
@ -466,12 +472,42 @@ func (qre *QueryExecutor) execInsertSubquery(conn poolConn) (*mproto.QueryResult
}
func (qre *QueryExecutor) execInsertPKRows(conn poolConn, pkRows [][]sqltypes.Value) (*mproto.QueryResult, error) {
secondaryList, err := buildSecondaryList(qre.plan.TableInfo, pkRows, qre.plan.SecondaryPKValues, qre.bindVars)
bsc := buildStreamComment(qre.plan.TableInfo, pkRows, nil)
return qre.directFetch(conn, qre.plan.OuterQuery, qre.bindVars, bsc)
}
func (qre *QueryExecutor) execUpsertPK(conn poolConn, invalidator CacheInvalidator) (*mproto.QueryResult, error) {
pkRows, err := buildValueList(qre.plan.TableInfo, qre.plan.PKValues, qre.bindVars)
if err != nil {
return nil, err
}
bsc := buildStreamComment(qre.plan.TableInfo, pkRows, secondaryList)
return qre.directFetch(conn, qre.plan.OuterQuery, qre.bindVars, bsc)
bsc := buildStreamComment(qre.plan.TableInfo, pkRows, nil)
result, err := qre.directFetch(conn, qre.plan.OuterQuery, qre.bindVars, bsc)
if err == nil {
return result, nil
}
terr, ok := err.(*TabletError)
if !ok {
return result, err
}
if terr.SqlError != mysql.ErrDupEntry {
return nil, err
}
// If the error didn't match pk, just return the error without updating.
if !strings.Contains(terr.Message, "'PRIMARY'") {
return nil, err
}
// At this point, we know the insert failed due to a duplicate pk row.
// So, we just update the row.
result, err = qre.execDMLPKRows(conn, qre.plan.UpsertQuery, pkRows, invalidator)
if err != nil {
return nil, err
}
// Follow MySQL convention. RowsAffected must be 2 if a row was updated.
if result.RowsAffected == 1 {
result.RowsAffected = 2
}
return result, err
}
func (qre *QueryExecutor) execDMLPK(conn poolConn, invalidator CacheInvalidator) (*mproto.QueryResult, error) {
@ -479,7 +515,7 @@ func (qre *QueryExecutor) execDMLPK(conn poolConn, invalidator CacheInvalidator)
if err != nil {
return nil, err
}
return qre.execDMLPKRows(conn, pkRows, invalidator)
return qre.execDMLPKRows(conn, qre.plan.OuterQuery, pkRows, invalidator)
}
func (qre *QueryExecutor) execDMLSubquery(conn poolConn, invalidator CacheInvalidator) (*mproto.QueryResult, error) {
@ -487,10 +523,10 @@ func (qre *QueryExecutor) execDMLSubquery(conn poolConn, invalidator CacheInvali
if err != nil {
return nil, err
}
return qre.execDMLPKRows(conn, innerResult.Rows, invalidator)
return qre.execDMLPKRows(conn, qre.plan.OuterQuery, innerResult.Rows, invalidator)
}
func (qre *QueryExecutor) execDMLPKRows(conn poolConn, pkRows [][]sqltypes.Value, invalidator CacheInvalidator) (*mproto.QueryResult, error) {
func (qre *QueryExecutor) execDMLPKRows(conn poolConn, query *sqlparser.ParsedQuery, pkRows [][]sqltypes.Value, invalidator CacheInvalidator) (*mproto.QueryResult, error) {
if len(pkRows) == 0 {
return &mproto.QueryResult{RowsAffected: 0}, nil
}
@ -516,7 +552,7 @@ func (qre *QueryExecutor) execDMLPKRows(conn poolConn, pkRows [][]sqltypes.Value
Columns: qre.plan.TableInfo.Indexes[0].Columns,
Rows: pkRows,
}
r, err := qre.directFetch(conn, qre.plan.OuterQuery, qre.bindVars, bsc)
r, err := qre.directFetch(conn, query, qre.bindVars, bsc)
if err != nil {
return nil, err
}

Просмотреть файл

@ -12,7 +12,9 @@ import (
"testing"
"time"
"github.com/youtube/vitess/go/mysql"
mproto "github.com/youtube/vitess/go/mysql/proto"
"github.com/youtube/vitess/go/sqldb"
"github.com/youtube/vitess/go/sqltypes"
"github.com/youtube/vitess/go/vt/callinfo"
tableaclpb "github.com/youtube/vitess/go/vt/proto/tableacl"
@ -214,6 +216,64 @@ func TestQueryExecutorPlanInsertSubQuery(t *testing.T) {
}
}
func TestQueryExecutorPlanUpsertPk(t *testing.T) {
db := setUpQueryExecutorTest()
db.AddQuery("insert into test_table values (1) /* _stream test_table (pk ) (1 ); */", &mproto.QueryResult{})
want := &mproto.QueryResult{
Rows: make([][]sqltypes.Value, 0),
}
query := "insert into test_table values(1) on duplicate key update val=1"
ctx := context.Background()
sqlQuery := newTestSQLQuery(ctx, enableRowCache|enableStrict)
qre := newTestQueryExecutor(ctx, sqlQuery, query, 0)
defer sqlQuery.disallowQueries()
checkPlanID(t, planbuilder.PLAN_UPSERT_PK, qre.plan.PlanId)
got, err := qre.Execute()
if err != nil {
t.Fatalf("qre.Execute() = %v, want nil", err)
}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got: %v, want: %v", got, want)
}
db.AddRejectedQuery("insert into test_table values (1) /* _stream test_table (pk ) (1 ); */", errRejected)
_, err = qre.Execute()
wantErr := "error: rejected"
if err == nil || err.Error() != wantErr {
t.Fatalf("qre.Execute() = %v, want %v", err, wantErr)
}
db.AddRejectedQuery(
"insert into test_table values (1) /* _stream test_table (pk ) (1 ); */",
sqldb.NewSqlError(mysql.ErrDupEntry, "err"),
)
db.AddQuery("update test_table set val = 1 where pk in (1) /* _stream test_table (pk ) (1 ); */", &mproto.QueryResult{})
_, err = qre.Execute()
wantErr = "error: err (errno 1062)"
if err == nil || err.Error() != wantErr {
t.Fatalf("qre.Execute() = %v, want %v", err, wantErr)
}
db.AddRejectedQuery(
"insert into test_table values (1) /* _stream test_table (pk ) (1 ); */",
sqldb.NewSqlError(mysql.ErrDupEntry, "ERROR 1062 (23000): Duplicate entry '2' for key 'PRIMARY'"),
)
db.AddQuery(
"update test_table set val = 1 where pk in (1) /* _stream test_table (pk ) (1 ); */",
&mproto.QueryResult{RowsAffected: 1},
)
got, err = qre.Execute()
if err != nil {
t.Fatalf("qre.Execute() = %v, want nil", err)
}
want = &mproto.QueryResult{
RowsAffected: 2,
}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got: %v, want: %v", got, want)
}
}
func TestQueryExecutorPlanDmlPk(t *testing.T) {
db := setUpQueryExecutorTest()
query := "update test_table set name = 2 where pk in (1) /* _stream test_table (pk ) (1 ); */"

Просмотреть файл

@ -1,6 +1,7 @@
package tabletserver
import (
"encoding/binary"
"fmt"
"strconv"
@ -26,6 +27,11 @@ type QuerySplitter struct {
rowCount int64
}
const (
startBindVarName = "_splitquery_start"
endBindVarName = "_splitquery_end"
)
// NewQuerySplitter creates a new QuerySplitter. query is the original query
// to split and splitCount is the desired number of splits. splitCount must
// be a positive int, if not it will be set to 1.
@ -95,8 +101,8 @@ func (qs *QuerySplitter) validateQuery() error {
// split splits the query into multiple queries. validateQuery() must return
// nil error before split() is called.
func (qs *QuerySplitter) split(pkMinMax *mproto.QueryResult) ([]proto.QuerySplit, error) {
boundaries, err := qs.splitBoundaries(pkMinMax)
func (qs *QuerySplitter) split(columnType int64, pkMinMax *mproto.QueryResult) ([]proto.QuerySplit, error) {
boundaries, err := qs.splitBoundaries(columnType, pkMinMax)
if err != nil {
return nil, err
}
@ -108,51 +114,59 @@ func (qs *QuerySplitter) split(pkMinMax *mproto.QueryResult) ([]proto.QuerySplit
}
splits = append(splits, *split)
} else {
boundaries = append(boundaries, sqltypes.Value{})
whereClause := qs.sel.Where
// Loop through the boundaries and generated modified where clauses
start := sqltypes.Value{}
clauses := []*sqlparser.Where{}
for _, end := range boundaries {
clauses = append(clauses, qs.getWhereClause(start, end))
start.Inner = end.Inner
}
clauses = append(clauses, qs.getWhereClause(start, sqltypes.Value{}))
// Generate one split per clause
for _, clause := range clauses {
sel := qs.sel
sel.Where = clause
bindVars := make(map[string]interface{}, len(qs.query.BindVariables))
for k, v := range qs.query.BindVariables {
bindVars[k] = v
}
qs.sel.Where = qs.getWhereClause(whereClause, bindVars, start, end)
q := &proto.BoundQuery{
Sql: sqlparser.String(sel),
BindVariables: qs.query.BindVariables,
Sql: sqlparser.String(qs.sel),
BindVariables: bindVars,
}
split := &proto.QuerySplit{
Query: *q,
RowCount: qs.rowCount,
}
splits = append(splits, *split)
start.Inner = end.Inner
}
qs.sel.Where = whereClause // reset where clause
}
return splits, err
}
// getWhereClause returns a whereClause based on desired upper and lower
// bounds for primary key.
func (qs *QuerySplitter) getWhereClause(start, end sqltypes.Value) *sqlparser.Where {
func (qs *QuerySplitter) getWhereClause(whereClause *sqlparser.Where, bindVars map[string]interface{}, start, end sqltypes.Value) *sqlparser.Where {
var startClause *sqlparser.ComparisonExpr
var endClause *sqlparser.ComparisonExpr
var clauses sqlparser.BoolExpr
// No upper or lower bound, just return the where clause of original query
if start.IsNull() && end.IsNull() {
return qs.sel.Where
return whereClause
}
pk := &sqlparser.ColName{
Name: sqlparser.SQLName(qs.splitColumn),
}
// splitColumn >= start
if !start.IsNull() {
startClause = &sqlparser.ComparisonExpr{
Operator: sqlparser.AST_GE,
Left: pk,
Right: sqlparser.NumVal((start).Raw()),
Right: sqlparser.ValArg([]byte(":" + startBindVarName)),
}
if start.IsNumeric() {
v, _ := start.ParseInt64()
bindVars[startBindVarName] = v
} else if start.IsString() {
bindVars[startBindVarName] = start.Raw()
} else if start.IsFractional() {
v, _ := start.ParseFloat64()
bindVars[startBindVarName] = v
}
}
// splitColumn < end
@ -160,7 +174,16 @@ func (qs *QuerySplitter) getWhereClause(start, end sqltypes.Value) *sqlparser.Wh
endClause = &sqlparser.ComparisonExpr{
Operator: sqlparser.AST_LT,
Left: pk,
Right: sqlparser.NumVal((end).Raw()),
Right: sqlparser.ValArg([]byte(":" + endBindVarName)),
}
if end.IsNumeric() {
v, _ := end.ParseInt64()
bindVars[endBindVarName] = v
} else if end.IsString() {
bindVars[endBindVarName] = end.Raw()
} else if end.IsFractional() {
v, _ := end.ParseFloat64()
bindVars[endBindVarName] = v
}
}
if startClause == nil {
@ -176,10 +199,10 @@ func (qs *QuerySplitter) getWhereClause(start, end sqltypes.Value) *sqlparser.Wh
}
}
}
if qs.sel.Where != nil {
if whereClause != nil {
clauses = &sqlparser.AndExpr{
Left: qs.sel.Where.Expr,
Right: clauses,
Left: &sqlparser.ParenBoolExpr{Expr: whereClause.Expr},
Right: &sqlparser.ParenBoolExpr{Expr: clauses},
}
}
return &sqlparser.Where{
@ -188,24 +211,23 @@ func (qs *QuerySplitter) getWhereClause(start, end sqltypes.Value) *sqlparser.Wh
}
}
func (qs *QuerySplitter) splitBoundaries(pkMinMax *mproto.QueryResult) ([]sqltypes.Value, error) {
boundaries := []sqltypes.Value{}
var err error
// If no min or max values were found, return empty list of boundaries
if len(pkMinMax.Rows) != 1 || pkMinMax.Rows[0][0].IsNull() || pkMinMax.Rows[0][1].IsNull() {
return boundaries, err
}
switch pkMinMax.Fields[0].Type {
func (qs *QuerySplitter) splitBoundaries(columnType int64, pkMinMax *mproto.QueryResult) ([]sqltypes.Value, error) {
switch columnType {
case mproto.VT_TINY, mproto.VT_SHORT, mproto.VT_LONG, mproto.VT_LONGLONG, mproto.VT_INT24:
boundaries, err = qs.parseInt(pkMinMax)
return qs.splitBoundariesIntColumn(pkMinMax)
case mproto.VT_FLOAT, mproto.VT_DOUBLE:
boundaries, err = qs.parseFloat(pkMinMax)
return qs.splitBoundariesFloatColumn(pkMinMax)
case mproto.VT_VARCHAR, mproto.VT_BIT, mproto.VT_VAR_STRING, mproto.VT_STRING:
return qs.splitBoundariesStringColumn()
}
return boundaries, err
return []sqltypes.Value{}, nil
}
func (qs *QuerySplitter) parseInt(pkMinMax *mproto.QueryResult) ([]sqltypes.Value, error) {
func (qs *QuerySplitter) splitBoundariesIntColumn(pkMinMax *mproto.QueryResult) ([]sqltypes.Value, error) {
boundaries := []sqltypes.Value{}
if pkMinMax == nil || len(pkMinMax.Rows) != 1 || pkMinMax.Rows[0][0].IsNull() || pkMinMax.Rows[0][1].IsNull() {
return boundaries, nil
}
minNumeric := sqltypes.MakeNumeric(pkMinMax.Rows[0][0].Raw())
maxNumeric := sqltypes.MakeNumeric(pkMinMax.Rows[0][1].Raw())
if pkMinMax.Rows[0][0].Raw()[0] == '-' {
@ -256,8 +278,11 @@ func (qs *QuerySplitter) parseInt(pkMinMax *mproto.QueryResult) ([]sqltypes.Valu
return boundaries, nil
}
func (qs *QuerySplitter) parseFloat(pkMinMax *mproto.QueryResult) ([]sqltypes.Value, error) {
func (qs *QuerySplitter) splitBoundariesFloatColumn(pkMinMax *mproto.QueryResult) ([]sqltypes.Value, error) {
boundaries := []sqltypes.Value{}
if pkMinMax == nil || len(pkMinMax.Rows) != 1 || pkMinMax.Rows[0][0].IsNull() || pkMinMax.Rows[0][1].IsNull() {
return boundaries, nil
}
min, err := strconv.ParseFloat(pkMinMax.Rows[0][0].String(), 64)
if err != nil {
return nil, err
@ -281,3 +306,27 @@ func (qs *QuerySplitter) parseFloat(pkMinMax *mproto.QueryResult) ([]sqltypes.Va
}
return boundaries, nil
}
// TODO(shengzhe): support split based on min, max from the string column.
func (qs *QuerySplitter) splitBoundariesStringColumn() ([]sqltypes.Value, error) {
firstRow := int64(0x0)
lastRow := int64(0xFFFFFFFFFFFFFF)
splitRange := lastRow - firstRow + 1
splitSize := splitRange / int64(qs.splitCount)
qs.rowCount = splitSize
var boundaries []sqltypes.Value
for i := 1; i < qs.splitCount; i++ {
buf := make([]byte, 8)
// encode split point into binaries.
binary.BigEndian.PutUint64(buf, uint64(firstRow+splitSize*int64(i)))
// only converts the lower 4 bytes into hex because the upper 4 bytes are
// always 0x00000000 and mysql does byte comparison from the most significant
// bits.
val, err := sqltypes.BuildValue(buf[4:])
if err != nil {
return nil, err
}
boundaries = append(boundaries, val)
}
return boundaries, nil
}

Просмотреть файл

@ -1,6 +1,7 @@
package tabletserver
import (
"encoding/binary"
"fmt"
"reflect"
"strings"
@ -102,6 +103,20 @@ func TestValidateQuery(t *testing.T) {
t.Errorf("valid query validation failed, got:%v, want:%v", got, want)
}
splitter = NewQuerySplitter(query, "id2", 0, schemaInfo)
query.Sql = "select * from test_table where count > :count"
got = splitter.validateQuery()
want = nil
if !reflect.DeepEqual(got, want) {
t.Errorf("valid query validation failed, got:%v, want:%v", got, want)
}
splitter = NewQuerySplitter(query, "id2", 0, schemaInfo)
query.Sql = "invalid select * from test_table where count > :count"
if err := splitter.validateQuery(); err == nil {
t.Fatalf("validateQuery() = %v, want: nil", err)
}
// column id2 is indexed
splitter = NewQuerySplitter(query, "id2", 3, schemaInfo)
query.Sql = "select * from test_table where count > :count"
@ -134,10 +149,10 @@ func TestGetWhereClause(t *testing.T) {
statement, _ := sqlparser.Parse(sql)
splitter.sel, _ = statement.(*sqlparser.Select)
splitter.splitColumn = "id"
bindVars := make(map[string]interface{})
// no boundary case, start = end = nil, should not change the where clause
nilValue := sqltypes.Value{}
clause := splitter.getWhereClause(nilValue, nilValue)
clause := splitter.getWhereClause(splitter.sel.Where, bindVars, nilValue, nilValue)
want := " where count > :count"
got := sqlparser.String(clause)
if !reflect.DeepEqual(got, want) {
@ -145,26 +160,45 @@ func TestGetWhereClause(t *testing.T) {
}
// Set lower bound, should add the lower bound condition to where clause
start, _ := sqltypes.BuildValue(20)
clause = splitter.getWhereClause(start, nilValue)
want = " where count > :count and id >= 20"
startVal := int64(20)
start, _ := sqltypes.BuildValue(startVal)
bindVars = make(map[string]interface{})
bindVars[":count"] = 300
clause = splitter.getWhereClause(splitter.sel.Where, bindVars, start, nilValue)
want = " where (count > :count) and (id >= :" + startBindVarName + ")"
got = sqlparser.String(clause)
if !reflect.DeepEqual(got, want) {
t.Errorf("incorrect where clause, got:%v, want:%v", got, want)
}
v, ok := bindVars[startBindVarName]
if !ok {
t.Fatalf("bind var: %s not found got: nil, want: %v", startBindVarName, startVal)
}
if v != startVal {
t.Fatalf("bind var: %s not found got: %v, want: %v", startBindVarName, v, startVal)
}
// Set upper bound, should add the upper bound condition to where clause
end, _ := sqltypes.BuildValue(40)
clause = splitter.getWhereClause(nilValue, end)
want = " where count > :count and id < 40"
endVal := int64(40)
end, _ := sqltypes.BuildValue(endVal)
bindVars = make(map[string]interface{})
clause = splitter.getWhereClause(splitter.sel.Where, bindVars, nilValue, end)
want = " where (count > :count) and (id < :" + endBindVarName + ")"
got = sqlparser.String(clause)
if !reflect.DeepEqual(got, want) {
t.Errorf("incorrect where clause, got:%v, want:%v", got, want)
}
v, ok = bindVars[endBindVarName]
if !ok {
t.Fatalf("bind var: %s not found got: nil, want: %v", endBindVarName, endVal)
}
if v != endVal {
t.Fatalf("bind var: %s not found got: %v, want: %v", endBindVarName, v, endVal)
}
// Set both bounds, should add two conditions to where clause
clause = splitter.getWhereClause(start, end)
want = " where count > :count and id >= 20 and id < 40"
bindVars = make(map[string]interface{})
clause = splitter.getWhereClause(splitter.sel.Where, bindVars, start, end)
want = fmt.Sprintf(" where (count > :count) and (id >= :%s and id < :%s)", startBindVarName, endBindVarName)
got = sqlparser.String(clause)
if !reflect.DeepEqual(got, want) {
t.Errorf("incorrect where clause, got:%v, want:%v", got, want)
@ -174,22 +208,36 @@ func TestGetWhereClause(t *testing.T) {
sql = "select * from test_table"
statement, _ = sqlparser.Parse(sql)
splitter.sel, _ = statement.(*sqlparser.Select)
bindVars = make(map[string]interface{})
// no boundary case, start = end = nil should return no where clause
clause = splitter.getWhereClause(nilValue, nilValue)
clause = splitter.getWhereClause(splitter.sel.Where, bindVars, nilValue, nilValue)
want = ""
got = sqlparser.String(clause)
if !reflect.DeepEqual(got, want) {
t.Errorf("incorrect where clause for nil ranges, got:%v, want:%v", got, want)
}
bindVars = make(map[string]interface{})
// Set both bounds, should add two conditions to where clause
clause = splitter.getWhereClause(start, end)
want = " where id >= 20 and id < 40"
clause = splitter.getWhereClause(splitter.sel.Where, bindVars, start, end)
want = fmt.Sprintf(" where id >= :%s and id < :%s", startBindVarName, endBindVarName)
got = sqlparser.String(clause)
if !reflect.DeepEqual(got, want) {
t.Errorf("incorrect where clause, got:%v, want:%v", got, want)
}
v, ok = bindVars[startBindVarName]
if !ok {
t.Fatalf("bind var: %s not found got: nil, want: %v", startBindVarName, startVal)
}
if v != startVal {
t.Fatalf("bind var: %s not found got: %v, want: %v", startBindVarName, v, startVal)
}
v, ok = bindVars[endBindVarName]
if !ok {
t.Fatalf("bind var: %s not found got: nil, want: %v", endBindVarName, endVal)
}
if v != endVal {
t.Fatalf("bind var: %s not found got: %v, want: %v", endBindVarName, v, endVal)
}
}
func TestSplitBoundaries(t *testing.T) {
@ -209,14 +257,14 @@ func TestSplitBoundaries(t *testing.T) {
splitter := &QuerySplitter{}
splitter.splitCount = 5
boundaries, err := splitter.splitBoundaries(pkMinMax)
boundaries, err := splitter.splitBoundaries(mproto.VT_LONGLONG, pkMinMax)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(boundaries) != splitter.splitCount-1 {
t.Errorf("wrong number of boundaries got: %v, want: %v", len(boundaries), splitter.splitCount-1)
}
got, err := splitter.splitBoundaries(pkMinMax)
got, err := splitter.splitBoundaries(mproto.VT_LONGLONG, pkMinMax)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
@ -231,7 +279,7 @@ func TestSplitBoundaries(t *testing.T) {
row = []sqltypes.Value{min, max}
rows = [][]sqltypes.Value{row}
pkMinMax.Rows = rows
got, err = splitter.splitBoundaries(pkMinMax)
got, err = splitter.splitBoundaries(mproto.VT_LONGLONG, pkMinMax)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
@ -250,7 +298,7 @@ func TestSplitBoundaries(t *testing.T) {
fields = []mproto.Field{minField, maxField}
pkMinMax.Rows = rows
pkMinMax.Fields = fields
got, err = splitter.splitBoundaries(pkMinMax)
got, err = splitter.splitBoundaries(mproto.VT_DOUBLE, pkMinMax)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
@ -279,7 +327,7 @@ func TestSplitQuery(t *testing.T) {
Type: mproto.VT_LONGLONG,
}
maxField := mproto.Field{
Name: "min",
Name: "max",
Type: mproto.VT_LONGLONG,
}
fields := []mproto.Field{minField, maxField}
@ -288,29 +336,143 @@ func TestSplitQuery(t *testing.T) {
}
// Ensure that empty min max does not cause panic or return any error
splits, err := splitter.split(pkMinMax)
splits, err := splitter.split(mproto.VT_LONGLONG, pkMinMax)
if err != nil {
t.Errorf("unexpected error while splitting on empty pkMinMax, %s", err)
}
pkMinMax.Rows = [][]sqltypes.Value{[]sqltypes.Value{min, max}}
splits, err = splitter.split(pkMinMax)
splits, err = splitter.split(mproto.VT_LONGLONG, pkMinMax)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got := []string{}
got := []proto.BoundQuery{}
for _, split := range splits {
if split.RowCount != 100 {
t.Errorf("wrong RowCount, got: %v, want: %v", split.RowCount, 100)
}
got = append(got, split.Query.Sql)
got = append(got, split.Query)
}
want := []string{
"select * from test_table where count > :count and id < 100",
"select * from test_table where count > :count and id >= 100 and id < 200",
"select * from test_table where count > :count and id >= 200",
want := []proto.BoundQuery{
{
Sql: "select * from test_table where (count > :count) and (id < :" + endBindVarName + ")",
BindVariables: map[string]interface{}{endBindVarName: int64(100)},
},
{
Sql: fmt.Sprintf("select * from test_table where (count > :count) and (id >= :%s and id < :%s)", startBindVarName, endBindVarName),
BindVariables: map[string]interface{}{
startBindVarName: int64(100),
endBindVarName: int64(200),
},
},
{
Sql: "select * from test_table where (count > :count) and (id >= :" + startBindVarName + ")",
BindVariables: map[string]interface{}{startBindVarName: int64(200)},
},
}
if !reflect.DeepEqual(got, want) {
t.Errorf("wrong splits, got: %v, want: %v", got, want)
}
}
func TestSplitQueryFractionalColumn(t *testing.T) {
schemaInfo := getSchemaInfo()
query := &proto.BoundQuery{
Sql: "select * from test_table where count > :count",
}
splitter := NewQuerySplitter(query, "", 3, schemaInfo)
splitter.validateQuery()
min, _ := sqltypes.BuildValue(10.5)
max, _ := sqltypes.BuildValue(490.5)
minField := mproto.Field{
Name: "min",
Type: mproto.VT_FLOAT,
}
maxField := mproto.Field{
Name: "max",
Type: mproto.VT_FLOAT,
}
fields := []mproto.Field{minField, maxField}
pkMinMax := &mproto.QueryResult{
Fields: fields,
Rows: [][]sqltypes.Value{[]sqltypes.Value{min, max}},
}
splits, err := splitter.split(mproto.VT_FLOAT, pkMinMax)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got := []proto.BoundQuery{}
for _, split := range splits {
if split.RowCount != 160 {
t.Errorf("wrong RowCount, got: %v, want: %v", split.RowCount, 160)
}
got = append(got, split.Query)
}
want := []proto.BoundQuery{
{
Sql: "select * from test_table where (count > :count) and (id < :" + endBindVarName + ")",
BindVariables: map[string]interface{}{endBindVarName: 170.5},
},
{
Sql: fmt.Sprintf("select * from test_table where (count > :count) and (id >= :%s and id < :%s)", startBindVarName, endBindVarName),
BindVariables: map[string]interface{}{
startBindVarName: 170.5,
endBindVarName: 330.5,
},
},
{
Sql: "select * from test_table where (count > :count) and (id >= :" + startBindVarName + ")",
BindVariables: map[string]interface{}{startBindVarName: 330.5},
},
}
if !reflect.DeepEqual(got, want) {
t.Errorf("wrong splits, got: %v, want: %v", got, want)
}
}
func TestSplitQueryStringColumn(t *testing.T) {
schemaInfo := getSchemaInfo()
query := &proto.BoundQuery{
Sql: "select * from test_table where count > :count",
}
splitter := NewQuerySplitter(query, "", 3, schemaInfo)
splitter.validateQuery()
splits, err := splitter.split(mproto.VT_VAR_STRING, nil)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got := []proto.BoundQuery{}
for _, split := range splits {
if split.RowCount != 24019198012642645 {
t.Errorf("wrong RowCount, got: %v, want: %v", split.RowCount, 1431655765)
}
got = append(got, split.Query)
}
want := []proto.BoundQuery{
{
Sql: "select * from test_table where (count > :count) and (id < :" + endBindVarName + ")",
BindVariables: map[string]interface{}{endBindVarName: hexToByteUInt64(0x55555555)[4:]},
},
{
Sql: fmt.Sprintf("select * from test_table where (count > :count) and (id >= :%s and id < :%s)", startBindVarName, endBindVarName),
BindVariables: map[string]interface{}{
startBindVarName: hexToByteUInt64(0x55555555)[4:],
endBindVarName: hexToByteUInt64(0xAAAAAAAA)[4:],
},
},
{
Sql: "select * from test_table where (count > :count) and (id >= :" + startBindVarName + ")",
BindVariables: map[string]interface{}{startBindVarName: hexToByteUInt64(0xAAAAAAAA)[4:]},
},
}
if !reflect.DeepEqual(got, want) {
t.Errorf("wrong splits, got: %v, want: %v", got, want)
}
}
func hexToByteUInt64(val uint64) []byte {
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, val)
return buf
}

Просмотреть файл

@ -364,20 +364,33 @@ func (sq *SqlQuery) handleExecErrorNoPanic(query *proto.Query, err interface{},
return NewTabletError(ErrFail, "%v: uncaught panic for %v", err, query)
}
var myError error
if sq.config.TerseErrors && terr.SqlError != 0 {
if sq.config.TerseErrors && terr.SqlError != 0 && len(query.BindVariables) != 0 {
myError = fmt.Errorf("%s(errno %d) during query: %s", terr.Prefix(), terr.SqlError, query.Sql)
} else {
myError = terr
}
terr.RecordStats(sq.qe.queryServiceStats)
// suppress these errors in logs
if terr.ErrorType == ErrRetry || terr.ErrorType == ErrTxPoolFull || terr.SqlError == mysql.ErrDupEntry {
logMethod := log.Warningf
// Suppress or demote some errors in logs
switch terr.ErrorType {
case ErrRetry, ErrTxPoolFull:
return myError
case ErrFatal:
logMethod = log.Errorf
}
if terr.ErrorType == ErrFatal {
log.Errorf("%v: %v", terr, query)
// We want to suppress/demote some MySQL error codes (regardless of the ErrorType)
switch terr.SqlError {
case mysql.ErrDupEntry:
return myError
case mysql.ErrLockWaitTimeout, mysql.ErrLockDeadlock, mysql.ErrDataTooLong:
logMethod = log.Infof
case 0:
if strings.Contains(terr.Error(), "Row count exceeded") {
logMethod = log.Infof
}
}
log.Warningf("%v: %v", terr, query)
logMethod("%v: %v", terr, query)
return myError
}
@ -539,20 +552,19 @@ func (sq *SqlQuery) SplitQuery(ctx context.Context, target *pb.Target, req *prot
logStats: logStats,
qe: sq.qe,
}
conn, err := qre.getConn(sq.qe.connPool)
columnType, err := getColumnType(qre, splitter.splitColumn, splitter.tableName)
if err != nil {
return err
}
defer conn.Recycle()
// TODO: For fetching MinMax, include where clauses on the
// primary key, if any, in the original query which might give a narrower
// range of split column to work with.
minMaxSQL := fmt.Sprintf("SELECT MIN(%v), MAX(%v) FROM %v", splitter.splitColumn, splitter.splitColumn, splitter.tableName)
splitColumnMinMax, err := qre.execSQL(conn, minMaxSQL, true)
if err != nil {
return err
var pkMinMax *mproto.QueryResult
switch columnType {
case mproto.VT_TINY, mproto.VT_SHORT, mproto.VT_LONG, mproto.VT_LONGLONG, mproto.VT_INT24, mproto.VT_FLOAT, mproto.VT_DOUBLE:
pkMinMax, err = getColumnMinMax(qre, splitter.splitColumn, splitter.tableName)
if err != nil {
return err
}
}
reply.Queries, err = splitter.split(splitColumnMinMax)
reply.Queries, err = splitter.split(columnType, pkMinMax)
if err != nil {
return NewTabletError(ErrFail, "splitQuery: query split error: %s, request: %#v", err, req)
}
@ -673,3 +685,36 @@ func withTimeout(ctx context.Context, timeout time.Duration) (context.Context, c
}
return context.WithTimeout(ctx, timeout)
}
func getColumnType(qre *QueryExecutor, columnName, tableName string) (int64, error) {
conn, err := qre.getConn(qre.qe.connPool)
if err != nil {
return mproto.VT_NULL, err
}
defer conn.Recycle()
// TODO(shengzhe): use AST to represent the query to avoid sql injection.
// current code is safe because QuerySplitter.validateQuery is called before
// calling this.
query := fmt.Sprintf("SELECT %v FROM %v LIMIT 0", columnName, tableName)
result, err := qre.execSQL(conn, query, true)
if err != nil {
return mproto.VT_NULL, err
}
if result == nil || len(result.Fields) != 1 {
return mproto.VT_NULL, NewTabletError(ErrFail, "failed to get column type for column: %v, invalid result: %v", columnName, result)
}
return result.Fields[0].Type, nil
}
func getColumnMinMax(qre *QueryExecutor, columnName, tableName string) (*mproto.QueryResult, error) {
conn, err := qre.getConn(qre.qe.connPool)
if err != nil {
return nil, err
}
defer conn.Recycle()
// TODO(shengzhe): use AST to represent the query to avoid sql injection.
// current code is safe because QuerySplitter.validateQuery is called before
// calling this.
minMaxSQL := fmt.Sprintf("SELECT MIN(%v), MAX(%v) FROM %v", columnName, columnName, tableName)
return qre.execSQL(conn, minMaxSQL, true)
}

Просмотреть файл

@ -19,28 +19,28 @@ import (
)
func TestSqlQueryAllowQueriesFailBadConn(t *testing.T) {
db := setUpSqlQueryTest()
db := setUpSQLQueryTest()
db.EnableConnFail()
testUtils := newTestUtils()
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
checkSqlQueryState(t, sqlQuery, "NOT_SERVING")
checkSQLQueryState(t, sqlQuery, "NOT_SERVING")
dbconfigs := testUtils.newDBConfigs()
err := sqlQuery.allowQueries(nil, &dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
if err == nil {
t.Fatalf("SqlQuery.allowQueries should fail")
}
checkSqlQueryState(t, sqlQuery, "NOT_SERVING")
checkSQLQueryState(t, sqlQuery, "NOT_SERVING")
}
func TestSqlQueryAllowQueriesFailStrictModeConflictWithRowCache(t *testing.T) {
setUpSqlQueryTest()
setUpSQLQueryTest()
testUtils := newTestUtils()
config := testUtils.newQueryServiceConfig()
// disable strict mode
config.StrictMode = false
sqlQuery := NewSqlQuery(config)
checkSqlQueryState(t, sqlQuery, "NOT_SERVING")
checkSQLQueryState(t, sqlQuery, "NOT_SERVING")
dbconfigs := testUtils.newDBConfigs()
// enable rowcache
dbconfigs.App.EnableRowcache = true
@ -48,15 +48,15 @@ func TestSqlQueryAllowQueriesFailStrictModeConflictWithRowCache(t *testing.T) {
if err == nil {
t.Fatalf("SqlQuery.allowQueries should fail because strict mode is disabled while rowcache is enabled.")
}
checkSqlQueryState(t, sqlQuery, "NOT_SERVING")
checkSQLQueryState(t, sqlQuery, "NOT_SERVING")
}
func TestSqlQueryAllowQueries(t *testing.T) {
setUpSqlQueryTest()
setUpSQLQueryTest()
testUtils := newTestUtils()
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
checkSqlQueryState(t, sqlQuery, "NOT_SERVING")
checkSQLQueryState(t, sqlQuery, "NOT_SERVING")
dbconfigs := testUtils.newDBConfigs()
sqlQuery.setState(StateServing)
err := sqlQuery.allowQueries(nil, &dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
@ -73,7 +73,7 @@ func TestSqlQueryAllowQueries(t *testing.T) {
}
func TestSqlQueryCheckMysql(t *testing.T) {
setUpSqlQueryTest()
setUpSQLQueryTest()
testUtils := newTestUtils()
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
@ -89,7 +89,7 @@ func TestSqlQueryCheckMysql(t *testing.T) {
}
func TestSqlQueryCheckMysqlFailInvalidConn(t *testing.T) {
db := setUpSqlQueryTest()
db := setUpSQLQueryTest()
testUtils := newTestUtils()
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
@ -107,7 +107,7 @@ func TestSqlQueryCheckMysqlFailInvalidConn(t *testing.T) {
}
func TestSqlQueryCheckMysqlFailUninitializedQueryEngine(t *testing.T) {
setUpSqlQueryTest()
setUpSQLQueryTest()
testUtils := newTestUtils()
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
@ -126,7 +126,7 @@ func TestSqlQueryCheckMysqlFailUninitializedQueryEngine(t *testing.T) {
}
func TestSqlQueryCheckMysqlInNotServingState(t *testing.T) {
setUpSqlQueryTest()
setUpSQLQueryTest()
testUtils := newTestUtils()
config := testUtils.newQueryServiceConfig()
config.EnablePublishStats = true
@ -151,7 +151,7 @@ func TestSqlQueryCheckMysqlInNotServingState(t *testing.T) {
}
func TestSqlQueryGetSessionId(t *testing.T) {
setUpSqlQueryTest()
setUpSQLQueryTest()
testUtils := newTestUtils()
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
@ -196,7 +196,7 @@ func TestSqlQueryGetSessionId(t *testing.T) {
}
func TestSqlQueryCommandFailUnMatchedSessionId(t *testing.T) {
setUpSqlQueryTest()
setUpSQLQueryTest()
testUtils := newTestUtils()
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
@ -286,17 +286,17 @@ func TestSqlQueryCommandFailUnMatchedSessionId(t *testing.T) {
}
func TestSqlQueryCommitTransaciton(t *testing.T) {
db := setUpSqlQueryTest()
db := setUpSQLQueryTest()
testUtils := newTestUtils()
// sql that will be executed in this test
executeSql := "select * from test_table limit 1000"
executeSqlResult := &mproto.QueryResult{
executeSQL := "select * from test_table limit 1000"
executeSQLResult := &mproto.QueryResult{
RowsAffected: 1,
Rows: [][]sqltypes.Value{
[]sqltypes.Value{sqltypes.MakeString([]byte("row01"))},
},
}
db.AddQuery(executeSql, executeSqlResult)
db.AddQuery(executeSQL, executeSQLResult)
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
dbconfigs := testUtils.newDBConfigs()
@ -316,7 +316,7 @@ func TestSqlQueryCommitTransaciton(t *testing.T) {
}
session.TransactionId = txInfo.TransactionId
query := proto.Query{
Sql: executeSql,
Sql: executeSQL,
BindVariables: nil,
SessionId: session.SessionId,
TransactionId: session.TransactionId,
@ -331,17 +331,17 @@ func TestSqlQueryCommitTransaciton(t *testing.T) {
}
func TestSqlQueryRollback(t *testing.T) {
db := setUpSqlQueryTest()
db := setUpSQLQueryTest()
testUtils := newTestUtils()
// sql that will be executed in this test
executeSql := "select * from test_table limit 1000"
executeSqlResult := &mproto.QueryResult{
executeSQL := "select * from test_table limit 1000"
executeSQLResult := &mproto.QueryResult{
RowsAffected: 1,
Rows: [][]sqltypes.Value{
[]sqltypes.Value{sqltypes.MakeString([]byte("row01"))},
},
}
db.AddQuery(executeSql, executeSqlResult)
db.AddQuery(executeSQL, executeSQLResult)
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
dbconfigs := testUtils.newDBConfigs()
@ -361,7 +361,7 @@ func TestSqlQueryRollback(t *testing.T) {
}
session.TransactionId = txInfo.TransactionId
query := proto.Query{
Sql: executeSql,
Sql: executeSQL,
BindVariables: nil,
SessionId: session.SessionId,
TransactionId: session.TransactionId,
@ -376,17 +376,17 @@ func TestSqlQueryRollback(t *testing.T) {
}
func TestSqlQueryStreamExecute(t *testing.T) {
db := setUpSqlQueryTest()
db := setUpSQLQueryTest()
testUtils := newTestUtils()
// sql that will be executed in this test
executeSql := "select * from test_table limit 1000"
executeSqlResult := &mproto.QueryResult{
executeSQL := "select * from test_table limit 1000"
executeSQLResult := &mproto.QueryResult{
RowsAffected: 1,
Rows: [][]sqltypes.Value{
[]sqltypes.Value{sqltypes.MakeString([]byte("row01"))},
},
}
db.AddQuery(executeSql, executeSqlResult)
db.AddQuery(executeSQL, executeSQLResult)
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
@ -407,7 +407,7 @@ func TestSqlQueryStreamExecute(t *testing.T) {
}
session.TransactionId = txInfo.TransactionId
query := proto.Query{
Sql: executeSql,
Sql: executeSQL,
BindVariables: nil,
SessionId: session.SessionId,
TransactionId: session.TransactionId,
@ -427,14 +427,14 @@ func TestSqlQueryStreamExecute(t *testing.T) {
}
func TestSqlQueryExecuteBatch(t *testing.T) {
db := setUpSqlQueryTest()
db := setUpSQLQueryTest()
testUtils := newTestUtils()
sql := "insert into test_table values (1, 2)"
sqlResult := &mproto.QueryResult{}
expanedSql := "insert into test_table values (1, 2) /* _stream test_table (pk ) (1 ); */"
expanedSQL := "insert into test_table values (1, 2) /* _stream test_table (pk ) (1 ); */"
db.AddQuery(sql, sqlResult)
db.AddQuery(expanedSql, sqlResult)
db.AddQuery(expanedSQL, sqlResult)
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
dbconfigs := testUtils.newDBConfigs()
@ -469,7 +469,7 @@ func TestSqlQueryExecuteBatch(t *testing.T) {
}
func TestSqlQueryExecuteBatchFailEmptyQueryList(t *testing.T) {
setUpSqlQueryTest()
setUpSQLQueryTest()
testUtils := newTestUtils()
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
@ -493,7 +493,7 @@ func TestSqlQueryExecuteBatchFailEmptyQueryList(t *testing.T) {
}
func TestSqlQueryExecuteBatchFailAsTransaction(t *testing.T) {
setUpSqlQueryTest()
setUpSQLQueryTest()
testUtils := newTestUtils()
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
@ -524,10 +524,10 @@ func TestSqlQueryExecuteBatchFailAsTransaction(t *testing.T) {
}
func TestSqlQueryExecuteBatchBeginFail(t *testing.T) {
db := setUpSqlQueryTest()
db := setUpSQLQueryTest()
testUtils := newTestUtils()
// make "begin" query fail
db.AddRejectedQuery("begin")
db.AddRejectedQuery("begin", errRejected)
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
dbconfigs := testUtils.newDBConfigs()
@ -558,10 +558,10 @@ func TestSqlQueryExecuteBatchBeginFail(t *testing.T) {
}
func TestSqlQueryExecuteBatchCommitFail(t *testing.T) {
db := setUpSqlQueryTest()
db := setUpSQLQueryTest()
testUtils := newTestUtils()
// make "commit" query fail
db.AddRejectedQuery("commit")
db.AddRejectedQuery("commit", errRejected)
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
dbconfigs := testUtils.newDBConfigs()
@ -597,18 +597,18 @@ func TestSqlQueryExecuteBatchCommitFail(t *testing.T) {
}
func TestSqlQueryExecuteBatchSqlExecFailInTransaction(t *testing.T) {
db := setUpSqlQueryTest()
db := setUpSQLQueryTest()
testUtils := newTestUtils()
sql := "insert into test_table values (1, 2)"
sqlResult := &mproto.QueryResult{}
expanedSql := "insert into test_table values (1, 2) /* _stream test_table (pk ) (1 ); */"
expanedSQL := "insert into test_table values (1, 2) /* _stream test_table (pk ) (1 ); */"
db.AddQuery(sql, sqlResult)
db.AddQuery(expanedSql, sqlResult)
db.AddQuery(expanedSQL, sqlResult)
// make this query fail
db.AddRejectedQuery(sql)
db.AddRejectedQuery(expanedSql)
db.AddRejectedQuery(sql, errRejected)
db.AddRejectedQuery(expanedSQL, errRejected)
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
@ -652,17 +652,17 @@ func TestSqlQueryExecuteBatchSqlExecFailInTransaction(t *testing.T) {
}
func TestSqlQueryExecuteBatchSqlSucceedInTransaction(t *testing.T) {
db := setUpSqlQueryTest()
db := setUpSQLQueryTest()
testUtils := newTestUtils()
sql := "insert into test_table values (1, 2)"
sqlResult := &mproto.QueryResult{}
expanedSql := "insert into test_table values (1, 2) /* _stream test_table (pk ) (1 ); */"
expanedSQL := "insert into test_table values (1, 2) /* _stream test_table (pk ) (1 ); */"
db.AddQuery(sql, sqlResult)
db.AddQuery(expanedSql, sqlResult)
db.AddQuery(expanedSQL, sqlResult)
// cause execution error for this particular sql query
db.AddRejectedQuery(sql)
db.AddRejectedQuery(sql, errRejected)
config := testUtils.newQueryServiceConfig()
config.EnableAutoCommit = true
@ -695,7 +695,7 @@ func TestSqlQueryExecuteBatchSqlSucceedInTransaction(t *testing.T) {
}
func TestSqlQueryExecuteBatchCallCommitWithoutABegin(t *testing.T) {
setUpSqlQueryTest()
setUpSQLQueryTest()
testUtils := newTestUtils()
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
@ -727,14 +727,14 @@ func TestSqlQueryExecuteBatchCallCommitWithoutABegin(t *testing.T) {
}
func TestExecuteBatchNestedTransaction(t *testing.T) {
db := setUpSqlQueryTest()
db := setUpSQLQueryTest()
testUtils := newTestUtils()
sql := "insert into test_table values (1, 2)"
sqlResult := &mproto.QueryResult{}
expanedSql := "insert into test_table values (1, 2) /* _stream test_table (pk ) (1 ); */"
expanedSQL := "insert into test_table values (1, 2) /* _stream test_table (pk ) (1 ); */"
db.AddQuery(sql, sqlResult)
db.AddQuery(expanedSql, sqlResult)
db.AddQuery(expanedSQL, sqlResult)
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
dbconfigs := testUtils.newDBConfigs()
@ -786,7 +786,7 @@ func TestExecuteBatchNestedTransaction(t *testing.T) {
}
func TestSqlQuerySplitQuery(t *testing.T) {
db := setUpSqlQueryTest()
db := setUpSQLQueryTest()
db.AddQuery("SELECT MIN(pk), MAX(pk) FROM test_table", &mproto.QueryResult{
Fields: []mproto.Field{
mproto.Field{Name: "pk", Type: mproto.VT_LONG},
@ -799,7 +799,17 @@ func TestSqlQuerySplitQuery(t *testing.T) {
},
},
})
db.AddQuery("SELECT pk FROM test_table LIMIT 0", &mproto.QueryResult{
Fields: []mproto.Field{
mproto.Field{Name: "pk", Type: mproto.VT_LONG},
},
RowsAffected: 1,
Rows: [][]sqltypes.Value{
[]sqltypes.Value{
sqltypes.MakeNumeric([]byte("1")),
},
},
})
testUtils := newTestUtils()
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
@ -837,7 +847,30 @@ func TestSqlQuerySplitQuery(t *testing.T) {
}
func TestSqlQuerySplitQueryInvalidQuery(t *testing.T) {
setUpSqlQueryTest()
db := setUpSQLQueryTest()
db.AddQuery("SELECT MIN(pk), MAX(pk) FROM test_table", &mproto.QueryResult{
Fields: []mproto.Field{
mproto.Field{Name: "pk", Type: mproto.VT_LONG},
},
RowsAffected: 1,
Rows: [][]sqltypes.Value{
[]sqltypes.Value{
sqltypes.MakeNumeric([]byte("1")),
sqltypes.MakeNumeric([]byte("100")),
},
},
})
db.AddQuery("SELECT pk FROM test_table LIMIT 0", &mproto.QueryResult{
Fields: []mproto.Field{
mproto.Field{Name: "pk", Type: mproto.VT_LONG},
},
RowsAffected: 1,
Rows: [][]sqltypes.Value{
[]sqltypes.Value{
sqltypes.MakeNumeric([]byte("1")),
},
},
})
testUtils := newTestUtils()
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
@ -875,7 +908,7 @@ func TestSqlQuerySplitQueryInvalidQuery(t *testing.T) {
}
func TestSqlQuerySplitQueryInvalidMinMax(t *testing.T) {
db := setUpSqlQueryTest()
db := setUpSQLQueryTest()
testUtils := newTestUtils()
pkMinMaxQuery := "SELECT MIN(pk), MAX(pk) FROM test_table"
pkMinMaxQueryResp := &mproto.QueryResult{
@ -891,6 +924,17 @@ func TestSqlQuerySplitQueryInvalidMinMax(t *testing.T) {
},
},
}
db.AddQuery("SELECT pk FROM test_table LIMIT 0", &mproto.QueryResult{
Fields: []mproto.Field{
mproto.Field{Name: "pk", Type: mproto.VT_LONG},
},
RowsAffected: 1,
Rows: [][]sqltypes.Value{
[]sqltypes.Value{
sqltypes.MakeNumeric([]byte("1")),
},
},
})
db.AddQuery(pkMinMaxQuery, pkMinMaxQueryResp)
config := testUtils.newQueryServiceConfig()
@ -990,7 +1034,7 @@ func TestTerseErrors2(t *testing.T) {
logStats := newSqlQueryStats("TestHandleExecError", ctx)
query := proto.Query{
Sql: "select * from test_table",
BindVariables: nil,
BindVariables: map[string]interface{}{"a": 1},
}
var err error
defer func() {
@ -1011,7 +1055,33 @@ func TestTerseErrors2(t *testing.T) {
})
}
func setUpSqlQueryTest() *fakesqldb.DB {
func TestTerseErrors3(t *testing.T) {
ctx := context.Background()
logStats := newSqlQueryStats("TestHandleExecError", ctx)
query := proto.Query{
Sql: "select * from test_table",
BindVariables: nil,
}
var err error
defer func() {
want := "error: msg"
if err == nil || err.Error() != want {
t.Errorf("Error: %v, want '%s'", err, want)
}
}()
testUtils := newTestUtils()
config := testUtils.newQueryServiceConfig()
sqlQuery := NewSqlQuery(config)
sqlQuery.config.TerseErrors = true
defer sqlQuery.handleExecError(&query, &err, logStats)
panic(&TabletError{
ErrorType: ErrFail,
Message: "msg",
SqlError: 10,
})
}
func setUpSQLQueryTest() *fakesqldb.DB {
db := fakesqldb.Register()
for query, result := range getSupportedQueries() {
db.AddQuery(query, result)
@ -1019,7 +1089,7 @@ func setUpSqlQueryTest() *fakesqldb.DB {
return db
}
func checkSqlQueryState(t *testing.T, sqlQuery *SqlQuery, expectState string) {
func checkSQLQueryState(t *testing.T, sqlQuery *SqlQuery, expectState string) {
if sqlQuery.GetState() != expectState {
t.Fatalf("sqlquery should in state: %s, but get state: %s", expectState, sqlQuery.GetState())
}

Просмотреть файл

@ -5,6 +5,7 @@
package tabletserver
import (
"errors"
"fmt"
"math/rand"
"testing"
@ -18,6 +19,8 @@ import (
"golang.org/x/net/context"
)
var errRejected = errors.New("rejected")
func TestTableInfoNew(t *testing.T) {
fakecacheservice.Register()
db := fakesqldb.Register()
@ -46,7 +49,7 @@ func TestTableInfoFailBecauseUnableToRetrieveTableIndex(t *testing.T) {
for query, result := range getTestTableInfoQueries() {
db.AddQuery(query, result)
}
db.AddRejectedQuery("show index from `test_table`")
db.AddRejectedQuery("show index from `test_table`", errRejected)
cachePool := newTestTableInfoCachePool()
cachePool.Open()
defer cachePool.Close()

Просмотреть файл

@ -193,13 +193,20 @@ func handleError(err *error, logStats *SQLQueryStats, queryServiceStats *QuerySe
}
*err = terr
terr.RecordStats(queryServiceStats)
if terr.ErrorType == ErrRetry { // Retry errors are too spammy
switch terr.ErrorType {
case ErrRetry: // Retry errors are too spammy
return
}
if terr.ErrorType == ErrTxPoolFull {
case ErrTxPoolFull:
logTxPoolFull.Errorf("%v", terr)
} else {
log.Errorf("%v", terr)
default:
switch terr.SqlError {
// MySQL deadlock errors are (usually) due to client behavior, not server
// behavior, and therefore logged at the INFO level.
case mysql.ErrLockWaitTimeout, mysql.ErrLockDeadlock:
log.Infof("%v", terr)
default:
log.Errorf("%v", terr)
}
}
}
if logStats != nil {

Просмотреть файл

@ -988,9 +988,11 @@ var testStreamHealthStreamHealthResponse = &pb.StreamHealthResponse{
},
TabletExternallyReparentedTimestamp: 1234589,
RealtimeStats: &pb.RealtimeStats{
HealthError: "random error",
SecondsBehindMaster: 234,
CpuUsage: 1.0,
HealthError: "random error",
SecondsBehindMaster: 234,
BinlogPlayersCount: 1,
SecondsBehindMasterFilteredReplication: 2,
CpuUsage: 1.0,
},
}
var testStreamHealthError = "to trigger a server error"

Просмотреть файл

@ -174,7 +174,7 @@ func TestTxPoolBeginWithPoolConnectionError(t *testing.T) {
func TestTxPoolBeginWithExecError(t *testing.T) {
db := fakesqldb.Register()
db.AddRejectedQuery("begin")
db.AddRejectedQuery("begin", errRejected)
txPool := newTxPool(false)
appParams := sqldb.ConnParams{}
dbaParams := sqldb.ConnParams{}
@ -190,7 +190,7 @@ func TestTxPoolSafeCommitFail(t *testing.T) {
sql := fmt.Sprintf("alter table test_table add test_column int")
db.AddQuery("begin", &proto.QueryResult{})
db.AddQuery(sql, &proto.QueryResult{})
db.AddRejectedQuery("commit")
db.AddRejectedQuery("commit", errRejected)
txPool := newTxPool(false)
appParams := sqldb.ConnParams{}
dbaParams := sqldb.ConnParams{}
@ -215,7 +215,7 @@ func TestTxPoolRollbackFail(t *testing.T) {
db := fakesqldb.Register()
db.AddQuery(sql, &proto.QueryResult{})
db.AddQuery("begin", &proto.QueryResult{})
db.AddRejectedQuery("rollback")
db.AddRejectedQuery("rollback", errRejected)
txPool := newTxPool(false)
appParams := sqldb.ConnParams{}

Просмотреть файл

@ -1,11 +1,12 @@
package events
import (
"github.com/youtube/vitess/go/vt/topo"
pb "github.com/youtube/vitess/go/vt/proto/topodata"
)
// KeyspaceChange is an event that describes changes to a keyspace.
type KeyspaceChange struct {
KeyspaceInfo topo.KeyspaceInfo
KeyspaceName string
Keyspace *pb.Keyspace
Status string
}

Просмотреть файл

@ -9,8 +9,8 @@ import (
// Syslog writes the event to syslog.
func (kc *KeyspaceChange) Syslog() (syslog.Priority, string) {
return syslog.LOG_INFO, fmt.Sprintf("%s [keyspace] %s",
kc.KeyspaceInfo.KeyspaceName(), kc.Status)
return syslog.LOG_INFO, fmt.Sprintf("%s [keyspace] %s value: %s",
kc.KeyspaceName, kc.Status, kc.Keyspace.String())
}
var _ syslogger.Syslogger = (*KeyspaceChange)(nil) // compile-time interface check

Просмотреть файл

@ -4,14 +4,17 @@ import (
"log/syslog"
"testing"
"github.com/youtube/vitess/go/vt/topo"
pb "github.com/youtube/vitess/go/vt/proto/topodata"
)
func TestKeyspaceChangeSyslog(t *testing.T) {
wantSev, wantMsg := syslog.LOG_INFO, "keyspace-123 [keyspace] status"
wantSev, wantMsg := syslog.LOG_INFO, "keyspace-123 [keyspace] status value: sharding_column_name:\"sharded_by_me\" "
kc := &KeyspaceChange{
KeyspaceInfo: *topo.NewKeyspaceInfo("keyspace-123", nil, -1),
Status: "status",
KeyspaceName: "keyspace-123",
Keyspace: &pb.Keyspace{
ShardingColumnName: "sharded_by_me",
},
Status: "status",
}
gotSev, gotMsg := kc.Syslog()

Просмотреть файл

@ -1,11 +1,13 @@
package events
import (
"github.com/youtube/vitess/go/vt/topo"
pb "github.com/youtube/vitess/go/vt/proto/topodata"
)
// ShardChange is an event that describes changes to a shard.
type ShardChange struct {
ShardInfo topo.ShardInfo
Status string
KeyspaceName string
ShardName string
Shard *pb.Shard
Status string
}

Просмотреть файл

@ -9,8 +9,8 @@ import (
// Syslog writes the event to syslog.
func (sc *ShardChange) Syslog() (syslog.Priority, string) {
return syslog.LOG_INFO, fmt.Sprintf("%s/%s [shard] %s",
sc.ShardInfo.Keyspace(), sc.ShardInfo.ShardName(), sc.Status)
return syslog.LOG_INFO, fmt.Sprintf("%s/%s [shard] %s value: %s",
sc.KeyspaceName, sc.ShardName, sc.Status, sc.Shard.String())
}
var _ syslogger.Syslogger = (*ShardChange)(nil) // compile-time interface check

Просмотреть файл

@ -4,14 +4,21 @@ import (
"log/syslog"
"testing"
"github.com/youtube/vitess/go/vt/topo"
pb "github.com/youtube/vitess/go/vt/proto/topodata"
)
func TestShardChangeSyslog(t *testing.T) {
wantSev, wantMsg := syslog.LOG_INFO, "keyspace-123/shard-123 [shard] status"
wantSev, wantMsg := syslog.LOG_INFO, "keyspace-123/shard-123 [shard] status value: master_alias:<cell:\"test\" uid:123 > "
sc := &ShardChange{
ShardInfo: *topo.NewShardInfo("keyspace-123", "shard-123", nil, -1),
Status: "status",
KeyspaceName: "keyspace-123",
ShardName: "shard-123",
Shard: &pb.Shard{
MasterAlias: &pb.TabletAlias{
Cell: "test",
Uid: 123,
},
},
Status: "status",
}
gotSev, gotMsg := sc.Syslog()

Просмотреть файл

@ -5,13 +5,13 @@ import (
"log/syslog"
"github.com/youtube/vitess/go/event/syslogger"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
)
// Syslog writes the event to syslog.
func (tc *TabletChange) Syslog() (syslog.Priority, string) {
return syslog.LOG_INFO, fmt.Sprintf("%s/%s/%s [tablet] %s",
tc.Tablet.Keyspace, tc.Tablet.Shard, topo.TabletAliasString(tc.Tablet.Alias), tc.Status)
tc.Tablet.Keyspace, tc.Tablet.Shard, topoproto.TabletAliasString(tc.Tablet.Alias), tc.Status)
}
var _ syslogger.Syslogger = (*TabletChange)(nil) // compile-time interface check

Просмотреть файл

@ -19,7 +19,7 @@ import (
)
// CopyKeyspaces will create the keyspaces in the destination topo
func CopyKeyspaces(ctx context.Context, fromTS, toTS topo.Server) {
func CopyKeyspaces(ctx context.Context, fromTS, toTS topo.Impl) {
keyspaces, err := fromTS.GetKeyspaces(ctx)
if err != nil {
log.Fatalf("GetKeyspaces: %v", err)
@ -32,13 +32,13 @@ func CopyKeyspaces(ctx context.Context, fromTS, toTS topo.Server) {
go func(keyspace string) {
defer wg.Done()
k, err := fromTS.GetKeyspace(ctx, keyspace)
k, _, err := fromTS.GetKeyspace(ctx, keyspace)
if err != nil {
rec.RecordError(fmt.Errorf("GetKeyspace(%v): %v", keyspace, err))
return
}
if err := toTS.CreateKeyspace(ctx, keyspace, k.Keyspace); err != nil {
if err := toTS.CreateKeyspace(ctx, keyspace, k); err != nil {
if err == topo.ErrNodeExists {
log.Warningf("keyspace %v already exists", keyspace)
} else {
@ -54,7 +54,7 @@ func CopyKeyspaces(ctx context.Context, fromTS, toTS topo.Server) {
}
// CopyShards will create the shards in the destination topo
func CopyShards(ctx context.Context, fromTS, toTS topo.Server, deleteKeyspaceShards bool) {
func CopyShards(ctx context.Context, fromTS, toTS topo.Impl, deleteKeyspaceShards bool) {
keyspaces, err := fromTS.GetKeyspaces(ctx)
if err != nil {
log.Fatalf("fromTS.GetKeyspaces: %v", err)
@ -83,7 +83,7 @@ func CopyShards(ctx context.Context, fromTS, toTS topo.Server, deleteKeyspaceSha
wg.Add(1)
go func(keyspace, shard string) {
defer wg.Done()
if err := topo.CreateShard(ctx, toTS, keyspace, shard); err != nil {
if err := toTS.CreateShard(ctx, keyspace, shard, &pb.Shard{}); err != nil {
if err == topo.ErrNodeExists {
log.Warningf("shard %v/%v already exists", keyspace, shard)
} else {
@ -92,19 +92,19 @@ func CopyShards(ctx context.Context, fromTS, toTS topo.Server, deleteKeyspaceSha
}
}
si, err := fromTS.GetShard(ctx, keyspace, shard)
s, _, err := fromTS.GetShard(ctx, keyspace, shard)
if err != nil {
rec.RecordError(fmt.Errorf("GetShard(%v, %v): %v", keyspace, shard, err))
return
}
toSi, err := toTS.GetShard(ctx, keyspace, shard)
_, toV, err := toTS.GetShard(ctx, keyspace, shard)
if err != nil {
rec.RecordError(fmt.Errorf("toTS.GetShard(%v, %v): %v", keyspace, shard, err))
return
}
if _, err := toTS.UpdateShard(ctx, si, toSi.Version()); err != nil {
if _, err := toTS.UpdateShard(ctx, keyspace, shard, s, toV); err != nil {
rec.RecordError(fmt.Errorf("UpdateShard(%v, %v): %v", keyspace, shard, err))
}
}(keyspace, shard)
@ -118,7 +118,7 @@ func CopyShards(ctx context.Context, fromTS, toTS topo.Server, deleteKeyspaceSha
}
// CopyTablets will create the tablets in the destination topo
func CopyTablets(ctx context.Context, fromTS, toTS topo.Server) {
func CopyTablets(ctx context.Context, fromTS, toTS topo.Impl) {
cells, err := fromTS.GetKnownCells(ctx)
if err != nil {
log.Fatalf("fromTS.GetKnownCells: %v", err)
@ -140,19 +140,19 @@ func CopyTablets(ctx context.Context, fromTS, toTS topo.Server) {
defer wg.Done()
// read the source tablet
ti, err := fromTS.GetTablet(ctx, tabletAlias)
tablet, _, err := fromTS.GetTablet(ctx, tabletAlias)
if err != nil {
rec.RecordError(fmt.Errorf("GetTablet(%v): %v", tabletAlias, err))
return
}
// try to create the destination
err = toTS.CreateTablet(ctx, ti.Tablet)
err = toTS.CreateTablet(ctx, tablet)
if err == topo.ErrNodeExists {
// update the destination tablet
log.Warningf("tablet %v already exists, updating it", tabletAlias)
err = toTS.UpdateTabletFields(ctx, ti.Alias, func(t *pb.Tablet) error {
*t = *ti.Tablet
_, err = toTS.UpdateTabletFields(ctx, tablet.Alias, func(t *pb.Tablet) error {
*t = *tablet
return nil
})
}
@ -173,7 +173,7 @@ func CopyTablets(ctx context.Context, fromTS, toTS topo.Server) {
// CopyShardReplications will create the ShardReplication objects in
// the destination topo
func CopyShardReplications(ctx context.Context, fromTS, toTS topo.Server) {
func CopyShardReplications(ctx context.Context, fromTS, toTS topo.Impl) {
keyspaces, err := fromTS.GetKeyspaces(ctx)
if err != nil {
log.Fatalf("fromTS.GetKeyspaces: %v", err)
@ -197,13 +197,13 @@ func CopyShardReplications(ctx context.Context, fromTS, toTS topo.Server) {
defer wg.Done()
// read the source shard to get the cells
si, err := fromTS.GetShard(ctx, keyspace, shard)
s, _, err := fromTS.GetShard(ctx, keyspace, shard)
if err != nil {
rec.RecordError(fmt.Errorf("GetShard(%v, %v): %v", keyspace, shard, err))
return
}
for _, cell := range si.Cells {
for _, cell := range s.Cells {
sri, err := fromTS.GetShardReplication(ctx, cell, keyspace, shard)
if err != nil {
rec.RecordError(fmt.Errorf("GetShardReplication(%v, %v, %v): %v", cell, keyspace, shard, err))

Просмотреть файл

@ -20,7 +20,7 @@ import (
pb "github.com/youtube/vitess/go/vt/proto/topodata"
)
func createSetup(ctx context.Context, t *testing.T) (topo.Server, topo.Server) {
func createSetup(ctx context.Context, t *testing.T) (topo.Impl, topo.Impl) {
fromConn := fakezk.NewConn()
fromTS := zktopo.NewServer(fromConn)
@ -40,7 +40,8 @@ func createSetup(ctx context.Context, t *testing.T) (topo.Server, topo.Server) {
if err := fromTS.CreateShard(ctx, "test_keyspace", "0", &pb.Shard{Cells: []string{"test_cell"}}); err != nil {
t.Fatalf("cannot create shard: %v", err)
}
if err := topo.CreateTablet(ctx, fromTS, &pb.Tablet{
tts := topo.Server{Impl: fromTS}
if err := tts.CreateTablet(ctx, &pb.Tablet{
Alias: &pb.TabletAlias{
Cell: "test_cell",
Uid: 123,
@ -60,7 +61,7 @@ func createSetup(ctx context.Context, t *testing.T) (topo.Server, topo.Server) {
}); err != nil {
t.Fatalf("cannot create master tablet: %v", err)
}
if err := topo.CreateTablet(ctx, fromTS, &pb.Tablet{
if err := tts.CreateTablet(ctx, &pb.Tablet{
Alias: &pb.TabletAlias{
Cell: "test_cell",
Uid: 234,
@ -117,12 +118,12 @@ func TestBasic(t *testing.T) {
t.Fatalf("unexpected shards: %v", shards)
}
CopyShards(ctx, fromTS, toTS, false)
si, err := toTS.GetShard(ctx, "test_keyspace", "0")
s, _, err := toTS.GetShard(ctx, "test_keyspace", "0")
if err != nil {
t.Fatalf("cannot read shard: %v", err)
}
if len(si.Cells) != 1 || si.Cells[0] != "test_cell" {
t.Fatalf("bad shard data: %v", *si)
if len(s.Cells) != 1 || s.Cells[0] != "test_cell" {
t.Fatalf("bad shard data: %v", *s)
}
// check ShardReplication copy

Просмотреть файл

@ -26,14 +26,14 @@ import (
// - we lock primary/secondary if reverseLockOrder is False,
// or secondary/primary if reverseLockOrder is True.
type Tee struct {
primary topo.Server
secondary topo.Server
primary topo.Impl
secondary topo.Impl
readFrom topo.Server
readFromSecond topo.Server
readFrom topo.Impl
readFromSecond topo.Impl
lockFirst topo.Server
lockSecond topo.Server
lockFirst topo.Impl
lockSecond topo.Impl
// protects the variables below this point
mu sync.Mutex
@ -55,8 +55,8 @@ type versionMapping struct {
readFromSecondVersion int64
}
// NewTee returns a new topo.Server object
func NewTee(primary, secondary topo.Server, reverseLockOrder bool) *Tee {
// NewTee returns a new topo.Impl object
func NewTee(primary, secondary topo.Impl, reverseLockOrder bool) *Tee {
lockFirst := primary
lockSecond := secondary
if reverseLockOrder {
@ -116,8 +116,8 @@ func (tee *Tee) CreateKeyspace(ctx context.Context, keyspace string, value *pb.K
}
// UpdateKeyspace is part of the topo.Server interface
func (tee *Tee) UpdateKeyspace(ctx context.Context, ki *topo.KeyspaceInfo, existingVersion int64) (newVersion int64, err error) {
if newVersion, err = tee.primary.UpdateKeyspace(ctx, ki, existingVersion); err != nil {
func (tee *Tee) UpdateKeyspace(ctx context.Context, keyspace string, value *pb.Keyspace, existingVersion int64) (newVersion int64, err error) {
if newVersion, err = tee.primary.UpdateKeyspace(ctx, keyspace, value, existingVersion); err != nil {
// failed on primary, not updating secondary
return
}
@ -126,39 +126,39 @@ func (tee *Tee) UpdateKeyspace(ctx context.Context, ki *topo.KeyspaceInfo, exist
// and keyspace version in second topo, replace the version number.
// if not, this will probably fail and log.
tee.mu.Lock()
kvm, ok := tee.keyspaceVersionMapping[ki.KeyspaceName()]
kvm, ok := tee.keyspaceVersionMapping[keyspace]
if ok && kvm.readFromVersion == existingVersion {
existingVersion = kvm.readFromSecondVersion
delete(tee.keyspaceVersionMapping, ki.KeyspaceName())
delete(tee.keyspaceVersionMapping, keyspace)
}
tee.mu.Unlock()
if newVersion2, serr := tee.secondary.UpdateKeyspace(ctx, ki, existingVersion); serr != nil {
if newVersion2, serr := tee.secondary.UpdateKeyspace(ctx, keyspace, value, existingVersion); serr != nil {
// not critical enough to fail
if serr == topo.ErrNoNode {
// the keyspace doesn't exist on the secondary, let's
// just create it
if serr = tee.secondary.CreateKeyspace(ctx, ki.KeyspaceName(), ki.Keyspace); serr != nil {
log.Warningf("secondary.CreateKeyspace(%v) failed (after UpdateKeyspace returned ErrNoNode): %v", ki.KeyspaceName(), serr)
if serr = tee.secondary.CreateKeyspace(ctx, keyspace, value); serr != nil {
log.Warningf("secondary.CreateKeyspace(%v) failed (after UpdateKeyspace returned ErrNoNode): %v", keyspace, serr)
} else {
log.Infof("secondary.UpdateKeyspace(%v) failed with ErrNoNode, CreateKeyspace then worked.", ki.KeyspaceName())
ki, gerr := tee.secondary.GetKeyspace(ctx, ki.KeyspaceName())
log.Infof("secondary.UpdateKeyspace(%v) failed with ErrNoNode, CreateKeyspace then worked.", keyspace)
_, secondaryVersion, gerr := tee.secondary.GetKeyspace(ctx, keyspace)
if gerr != nil {
log.Warningf("Failed to re-read keyspace(%v) after creating it on secondary: %v", ki.KeyspaceName(), gerr)
log.Warningf("Failed to re-read keyspace(%v) after creating it on secondary: %v", keyspace, gerr)
} else {
tee.mu.Lock()
tee.keyspaceVersionMapping[ki.KeyspaceName()] = versionMapping{
tee.keyspaceVersionMapping[keyspace] = versionMapping{
readFromVersion: newVersion,
readFromSecondVersion: ki.Version(),
readFromSecondVersion: secondaryVersion,
}
tee.mu.Unlock()
}
}
} else {
log.Warningf("secondary.UpdateKeyspace(%v) failed: %v", ki.KeyspaceName(), serr)
log.Warningf("secondary.UpdateKeyspace(%v) failed: %v", keyspace, serr)
}
} else {
tee.mu.Lock()
tee.keyspaceVersionMapping[ki.KeyspaceName()] = versionMapping{
tee.keyspaceVersionMapping[keyspace] = versionMapping{
readFromVersion: newVersion,
readFromSecondVersion: newVersion2,
}
@ -182,25 +182,25 @@ func (tee *Tee) DeleteKeyspace(ctx context.Context, keyspace string) error {
}
// GetKeyspace is part of the topo.Server interface
func (tee *Tee) GetKeyspace(ctx context.Context, keyspace string) (*topo.KeyspaceInfo, error) {
ki, err := tee.readFrom.GetKeyspace(ctx, keyspace)
func (tee *Tee) GetKeyspace(ctx context.Context, keyspace string) (*pb.Keyspace, int64, error) {
k, version, err := tee.readFrom.GetKeyspace(ctx, keyspace)
if err != nil {
return nil, err
return nil, 0, err
}
ki2, err := tee.readFromSecond.GetKeyspace(ctx, keyspace)
_, version2, err := tee.readFromSecond.GetKeyspace(ctx, keyspace)
if err != nil {
// can't read from secondary, so we can's keep version map
return ki, nil
return k, version, nil
}
tee.mu.Lock()
tee.keyspaceVersionMapping[keyspace] = versionMapping{
readFromVersion: ki.Version(),
readFromSecondVersion: ki2.Version(),
readFromVersion: version,
readFromSecondVersion: version2,
}
tee.mu.Unlock()
return ki, nil
return k, version, nil
}
// GetKeyspaces is part of the topo.Server interface
@ -241,8 +241,8 @@ func (tee *Tee) CreateShard(ctx context.Context, keyspace, shard string, value *
}
// UpdateShard is part of the topo.Server interface
func (tee *Tee) UpdateShard(ctx context.Context, si *topo.ShardInfo, existingVersion int64) (newVersion int64, err error) {
if newVersion, err = tee.primary.UpdateShard(ctx, si, existingVersion); err != nil {
func (tee *Tee) UpdateShard(ctx context.Context, keyspace, shard string, value *pb.Shard, existingVersion int64) (newVersion int64, err error) {
if newVersion, err = tee.primary.UpdateShard(ctx, keyspace, shard, value, existingVersion); err != nil {
// failed on primary, not updating secondary
return
}
@ -251,39 +251,39 @@ func (tee *Tee) UpdateShard(ctx context.Context, si *topo.ShardInfo, existingVer
// and shard version in second topo, replace the version number.
// if not, this will probably fail and log.
tee.mu.Lock()
svm, ok := tee.shardVersionMapping[si.Keyspace()+"/"+si.ShardName()]
svm, ok := tee.shardVersionMapping[keyspace+"/"+shard]
if ok && svm.readFromVersion == existingVersion {
existingVersion = svm.readFromSecondVersion
delete(tee.shardVersionMapping, si.Keyspace()+"/"+si.ShardName())
delete(tee.shardVersionMapping, keyspace+"/"+shard)
}
tee.mu.Unlock()
if newVersion2, serr := tee.secondary.UpdateShard(ctx, si, existingVersion); serr != nil {
if newVersion2, serr := tee.secondary.UpdateShard(ctx, keyspace, shard, value, existingVersion); serr != nil {
// not critical enough to fail
if serr == topo.ErrNoNode {
// the shard doesn't exist on the secondary, let's
// just create it
if serr = tee.secondary.CreateShard(ctx, si.Keyspace(), si.ShardName(), si.Shard); serr != nil {
log.Warningf("secondary.CreateShard(%v,%v) failed (after UpdateShard returned ErrNoNode): %v", si.Keyspace(), si.ShardName(), serr)
if serr = tee.secondary.CreateShard(ctx, keyspace, shard, value); serr != nil {
log.Warningf("secondary.CreateShard(%v,%v) failed (after UpdateShard returned ErrNoNode): %v", keyspace, shard, serr)
} else {
log.Infof("secondary.UpdateShard(%v, %v) failed with ErrNoNode, CreateShard then worked.", si.Keyspace(), si.ShardName())
si, gerr := tee.secondary.GetShard(ctx, si.Keyspace(), si.ShardName())
log.Infof("secondary.UpdateShard(%v, %v) failed with ErrNoNode, CreateShard then worked.", keyspace, shard)
_, v, gerr := tee.secondary.GetShard(ctx, keyspace, shard)
if gerr != nil {
log.Warningf("Failed to re-read shard(%v, %v) after creating it on secondary: %v", si.Keyspace(), si.ShardName(), gerr)
log.Warningf("Failed to re-read shard(%v, %v) after creating it on secondary: %v", keyspace, shard, gerr)
} else {
tee.mu.Lock()
tee.shardVersionMapping[si.Keyspace()+"/"+si.ShardName()] = versionMapping{
tee.shardVersionMapping[keyspace+"/"+shard] = versionMapping{
readFromVersion: newVersion,
readFromSecondVersion: si.Version(),
readFromSecondVersion: v,
}
tee.mu.Unlock()
}
}
} else {
log.Warningf("secondary.UpdateShard(%v, %v) failed: %v", si.Keyspace(), si.ShardName(), serr)
log.Warningf("secondary.UpdateShard(%v, %v) failed: %v", keyspace, shard, serr)
}
} else {
tee.mu.Lock()
tee.shardVersionMapping[si.Keyspace()+"/"+si.ShardName()] = versionMapping{
tee.shardVersionMapping[keyspace+"/"+shard] = versionMapping{
readFromVersion: newVersion,
readFromSecondVersion: newVersion2,
}
@ -307,25 +307,25 @@ func (tee *Tee) ValidateShard(ctx context.Context, keyspace, shard string) error
}
// GetShard is part of the topo.Server interface
func (tee *Tee) GetShard(ctx context.Context, keyspace, shard string) (*topo.ShardInfo, error) {
si, err := tee.readFrom.GetShard(ctx, keyspace, shard)
func (tee *Tee) GetShard(ctx context.Context, keyspace, shard string) (*pb.Shard, int64, error) {
s, v, err := tee.readFrom.GetShard(ctx, keyspace, shard)
if err != nil {
return nil, err
return nil, 0, err
}
si2, err := tee.readFromSecond.GetShard(ctx, keyspace, shard)
_, v2, err := tee.readFromSecond.GetShard(ctx, keyspace, shard)
if err != nil {
// can't read from secondary, so we can's keep version map
return si, nil
return s, v, nil
}
tee.mu.Lock()
tee.shardVersionMapping[keyspace+"/"+shard] = versionMapping{
readFromVersion: si.Version(),
readFromSecondVersion: si2.Version(),
readFromVersion: v,
readFromSecondVersion: v2,
}
tee.mu.Unlock()
return si, nil
return s, v, nil
}
// GetShardNames is part of the topo.Server interface
@ -366,7 +366,7 @@ func (tee *Tee) CreateTablet(ctx context.Context, tablet *pb.Tablet) error {
}
// UpdateTablet is part of the topo.Server interface
func (tee *Tee) UpdateTablet(ctx context.Context, tablet *topo.TabletInfo, existingVersion int64) (newVersion int64, err error) {
func (tee *Tee) UpdateTablet(ctx context.Context, tablet *pb.Tablet, existingVersion int64) (newVersion int64, err error) {
if newVersion, err = tee.primary.UpdateTablet(ctx, tablet, existingVersion); err != nil {
// failed on primary, not updating secondary
return
@ -387,18 +387,18 @@ func (tee *Tee) UpdateTablet(ctx context.Context, tablet *topo.TabletInfo, exist
if serr == topo.ErrNoNode {
// the tablet doesn't exist on the secondary, let's
// just create it
if serr = tee.secondary.CreateTablet(ctx, tablet.Tablet); serr != nil {
if serr = tee.secondary.CreateTablet(ctx, tablet); serr != nil {
log.Warningf("secondary.CreateTablet(%v) failed (after UpdateTablet returned ErrNoNode): %v", tablet.Alias, serr)
} else {
log.Infof("secondary.UpdateTablet(%v) failed with ErrNoNode, CreateTablet then worked.", tablet.Alias)
ti, gerr := tee.secondary.GetTablet(ctx, tablet.Alias)
_, v, gerr := tee.secondary.GetTablet(ctx, tablet.Alias)
if gerr != nil {
log.Warningf("Failed to re-read tablet(%v) after creating it on secondary: %v", tablet.Alias, gerr)
} else {
tee.mu.Lock()
tee.tabletVersionMapping[*tablet.Alias] = versionMapping{
readFromVersion: newVersion,
readFromSecondVersion: ti.Version(),
readFromSecondVersion: v,
}
tee.mu.Unlock()
}
@ -418,17 +418,18 @@ func (tee *Tee) UpdateTablet(ctx context.Context, tablet *topo.TabletInfo, exist
}
// UpdateTabletFields is part of the topo.Server interface
func (tee *Tee) UpdateTabletFields(ctx context.Context, tabletAlias *pb.TabletAlias, update func(*pb.Tablet) error) error {
if err := tee.primary.UpdateTabletFields(ctx, tabletAlias, update); err != nil {
func (tee *Tee) UpdateTabletFields(ctx context.Context, tabletAlias *pb.TabletAlias, update func(*pb.Tablet) error) (*pb.Tablet, error) {
tablet, err := tee.primary.UpdateTabletFields(ctx, tabletAlias, update)
if err != nil {
// failed on primary, not updating secondary
return err
return nil, err
}
if err := tee.secondary.UpdateTabletFields(ctx, tabletAlias, update); err != nil {
if _, err := tee.secondary.UpdateTabletFields(ctx, tabletAlias, update); err != nil {
// not critical enough to fail
log.Warningf("secondary.UpdateTabletFields(%v) failed: %v", tabletAlias, err)
}
return nil
return tablet, nil
}
// DeleteTablet is part of the topo.Server interface
@ -445,25 +446,25 @@ func (tee *Tee) DeleteTablet(ctx context.Context, alias *pb.TabletAlias) error {
}
// GetTablet is part of the topo.Server interface
func (tee *Tee) GetTablet(ctx context.Context, alias *pb.TabletAlias) (*topo.TabletInfo, error) {
ti, err := tee.readFrom.GetTablet(ctx, alias)
func (tee *Tee) GetTablet(ctx context.Context, alias *pb.TabletAlias) (*pb.Tablet, int64, error) {
t, v, err := tee.readFrom.GetTablet(ctx, alias)
if err != nil {
return nil, err
return nil, 0, err
}
ti2, err := tee.readFromSecond.GetTablet(ctx, alias)
_, v2, err := tee.readFromSecond.GetTablet(ctx, alias)
if err != nil {
// can't read from secondary, so we can's keep version map
return ti, nil
return t, v, nil
}
tee.mu.Lock()
tee.tabletVersionMapping[*alias] = versionMapping{
readFromVersion: ti.Version(),
readFromSecondVersion: ti2.Version(),
readFromVersion: v,
readFromSecondVersion: v2,
}
tee.mu.Unlock()
return ti, nil
return t, v, nil
}
// GetTabletsByCell is part of the topo.Server interface
@ -692,10 +693,10 @@ func (tee *Tee) GetSrvKeyspaceNames(ctx context.Context, cell string) ([]string,
return tee.readFrom.GetSrvKeyspaceNames(ctx, cell)
}
// WatchEndPoints is part of the topo.Server interface.
// WatchSrvKeyspace is part of the topo.Server interface.
// We only watch for changes on the primary.
func (tee *Tee) WatchEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType pb.TabletType) (<-chan *pb.EndPoints, chan<- struct{}, error) {
return tee.primary.WatchEndPoints(ctx, cell, keyspace, shard, tabletType)
func (tee *Tee) WatchSrvKeyspace(ctx context.Context, cell, keyspace string) (<-chan *topo.SrvKeyspace, chan<- struct{}, error) {
return tee.primary.WatchSrvKeyspace(ctx, cell, keyspace)
}
//
@ -799,3 +800,22 @@ func (tee *Tee) UnlockShardForAction(ctx context.Context, keyspace, shard, lockP
}
return perr
}
// SaveVSchema is part of the topo.Server interface
func (tee *Tee) SaveVSchema(ctx context.Context, contents string) error {
err := tee.primary.SaveVSchema(ctx, contents)
if err != nil {
return err
}
if err := tee.secondary.SaveVSchema(ctx, contents); err != nil {
// not critical enough to fail
log.Warningf("secondary.SaveVSchema() failed: %v", err)
}
return err
}
// GetVSchema is part of the topo.Server interface
func (tee *Tee) GetVSchema(ctx context.Context) (string, error) {
return tee.readFrom.GetVSchema(ctx)
}

Просмотреть файл

@ -25,7 +25,7 @@ func TestTee(t *testing.T) {
// create a tee and check it implements the interface
tee := NewTee(fromTS, toTS, true)
var _ topo.Server = tee
var _ topo.Impl = tee
// create a keyspace, make sure it is on both sides
if err := tee.CreateKeyspace(ctx, "keyspace2", &pb.Keyspace{}); err != nil {

Просмотреть файл

@ -20,7 +20,7 @@ import (
)
type fakeServer struct {
topo.Server
topo.Impl
localCells []string
}
@ -28,7 +28,7 @@ func (s fakeServer) GetKnownCells(ctx context.Context) ([]string, error) {
return s.localCells, nil
}
func newFakeTeeServer(t *testing.T) topo.Server {
func newFakeTeeServer(t *testing.T) topo.Impl {
cells := []string{"test", "global"} // global has to be last
zconn1 := fakezk.NewConn()
@ -42,8 +42,8 @@ func newFakeTeeServer(t *testing.T) topo.Server {
t.Fatalf("cannot init ZooKeeper: %v", err)
}
}
s1 := fakeServer{Server: zktopo.NewServer(zconn1), localCells: cells[:len(cells)-1]}
s2 := fakeServer{Server: zktopo.NewServer(zconn2), localCells: cells[:len(cells)-1]}
s1 := fakeServer{Impl: zktopo.NewServer(zconn1), localCells: cells[:len(cells)-1]}
s2 := fakeServer{Impl: zktopo.NewServer(zconn2), localCells: cells[:len(cells)-1]}
return NewTee(s1, s2, false)
}
@ -72,10 +72,10 @@ func TestServingGraph(t *testing.T) {
test.CheckServingGraph(ctx, t, ts)
}
func TestWatchEndPoints(t *testing.T) {
func TestWatchSrvKeyspace(t *testing.T) {
zktopo.WatchSleepDuration = 2 * time.Millisecond
ts := newFakeTeeServer(t)
test.CheckWatchEndPoints(context.Background(), t, ts)
test.CheckWatchSrvKeyspace(context.Background(), t, ts)
}
func TestShardReplication(t *testing.T) {

Просмотреть файл

@ -11,7 +11,9 @@ import (
log "github.com/golang/glog"
"golang.org/x/net/context"
"github.com/youtube/vitess/go/event"
"github.com/youtube/vitess/go/vt/concurrency"
"github.com/youtube/vitess/go/vt/topo/events"
pb "github.com/youtube/vitess/go/vt/proto/topodata"
)
@ -32,22 +34,6 @@ func (ki *KeyspaceInfo) KeyspaceName() string {
return ki.keyspace
}
// Version returns the keyspace version from last time it was read or updated.
func (ki *KeyspaceInfo) Version() int64 {
return ki.version
}
// NewKeyspaceInfo returns a KeyspaceInfo basing on keyspace with the
// keyspace. This function should be only used by Server
// implementations.
func NewKeyspaceInfo(keyspace string, value *pb.Keyspace, version int64) *KeyspaceInfo {
return &KeyspaceInfo{
keyspace: keyspace,
version: version,
Keyspace: value,
}
}
// GetServedFrom returns a Keyspace_ServedFrom record if it exists.
func (ki *KeyspaceInfo) GetServedFrom(tabletType pb.TabletType) *pb.Keyspace_ServedFrom {
for _, ksf := range ki.ServedFroms {
@ -151,23 +137,59 @@ func (ki *KeyspaceInfo) ComputeCellServedFrom(cell string) map[TabletType]string
return result
}
// CreateKeyspace wraps the underlying Impl.DeleteKeyspaceShards
// and dispatches the event.
func (ts Server) CreateKeyspace(ctx context.Context, keyspace string, value *pb.Keyspace) error {
if err := ts.Impl.CreateKeyspace(ctx, keyspace, value); err != nil {
return err
}
event.Dispatch(&events.KeyspaceChange{
KeyspaceName: keyspace,
Keyspace: value,
Status: "created",
})
return nil
}
// GetKeyspace reads the given keyspace and returns it
func (ts Server) GetKeyspace(ctx context.Context, keyspace string) (*KeyspaceInfo, error) {
value, version, err := ts.Impl.GetKeyspace(ctx, keyspace)
if err != nil {
return nil, err
}
return &KeyspaceInfo{
keyspace: keyspace,
version: version,
Keyspace: value,
}, nil
}
// UpdateKeyspace updates the keyspace data, with the right version
func UpdateKeyspace(ctx context.Context, ts Server, ki *KeyspaceInfo) error {
func (ts Server) UpdateKeyspace(ctx context.Context, ki *KeyspaceInfo) error {
var version int64 = -1
if ki.version != 0 {
version = ki.version
}
newVersion, err := ts.UpdateKeyspace(ctx, ki, version)
if err == nil {
ki.version = newVersion
newVersion, err := ts.Impl.UpdateKeyspace(ctx, ki.keyspace, ki.Keyspace, version)
if err != nil {
return err
}
return err
ki.version = newVersion
event.Dispatch(&events.KeyspaceChange{
KeyspaceName: ki.keyspace,
Keyspace: ki.Keyspace,
Status: "updated",
})
return nil
}
// FindAllShardsInKeyspace reads and returns all the existing shards in
// a keyspace. It doesn't take any lock.
func FindAllShardsInKeyspace(ctx context.Context, ts Server, keyspace string) (map[string]*ShardInfo, error) {
func (ts Server) FindAllShardsInKeyspace(ctx context.Context, keyspace string) (map[string]*ShardInfo, error) {
shards, err := ts.GetShardNames(ctx, keyspace)
if err != nil {
return nil, err
@ -197,3 +219,31 @@ func FindAllShardsInKeyspace(ctx context.Context, ts Server, keyspace string) (m
}
return result, nil
}
// DeleteKeyspaceShards wraps the underlying Impl.DeleteKeyspaceShards
// and dispatches the event.
func (ts Server) DeleteKeyspaceShards(ctx context.Context, keyspace string) error {
if err := ts.Impl.DeleteKeyspaceShards(ctx, keyspace); err != nil {
return err
}
event.Dispatch(&events.KeyspaceChange{
KeyspaceName: keyspace,
Keyspace: nil,
Status: "deleted all shards",
})
return nil
}
// DeleteKeyspace wraps the underlying Impl.DeleteKeyspace
// and dispatches the event.
func (ts Server) DeleteKeyspace(ctx context.Context, keyspace string) error {
if err := ts.Impl.DeleteKeyspace(ctx, keyspace); err != nil {
return err
}
event.Dispatch(&events.KeyspaceChange{
KeyspaceName: keyspace,
Keyspace: nil,
Status: "deleted",
})
return nil
}

Просмотреть файл

@ -14,20 +14,24 @@ import (
// This file tests the keyspace related object functionnalities.
func TestUpdateServedFromMap(t *testing.T) {
ki := NewKeyspaceInfo("ks", &pb.Keyspace{
ServedFroms: []*pb.Keyspace_ServedFrom{
&pb.Keyspace_ServedFrom{
TabletType: pb.TabletType_RDONLY,
Cells: nil,
Keyspace: "source",
},
&pb.Keyspace_ServedFrom{
TabletType: pb.TabletType_MASTER,
Cells: nil,
Keyspace: "source",
ki := &KeyspaceInfo{
keyspace: "ks",
version: 1,
Keyspace: &pb.Keyspace{
ServedFroms: []*pb.Keyspace_ServedFrom{
&pb.Keyspace_ServedFrom{
TabletType: pb.TabletType_RDONLY,
Cells: nil,
Keyspace: "source",
},
&pb.Keyspace_ServedFrom{
TabletType: pb.TabletType_MASTER,
Cells: nil,
Keyspace: "source",
},
},
},
}, 1)
}
allCells := []string{"first", "second", "third"}
// migrate one cell
@ -115,20 +119,24 @@ func TestUpdateServedFromMap(t *testing.T) {
}
func TestComputeCellServedFrom(t *testing.T) {
ki := NewKeyspaceInfo("ks", &pb.Keyspace{
ServedFroms: []*pb.Keyspace_ServedFrom{
&pb.Keyspace_ServedFrom{
TabletType: pb.TabletType_MASTER,
Cells: nil,
Keyspace: "source",
},
&pb.Keyspace_ServedFrom{
TabletType: pb.TabletType_REPLICA,
Cells: []string{"c1", "c2"},
Keyspace: "source",
ki := &KeyspaceInfo{
keyspace: "ks",
version: 1,
Keyspace: &pb.Keyspace{
ServedFroms: []*pb.Keyspace_ServedFrom{
&pb.Keyspace_ServedFrom{
TabletType: pb.TabletType_MASTER,
Cells: nil,
Keyspace: "source",
},
&pb.Keyspace_ServedFrom{
TabletType: pb.TabletType_REPLICA,
Cells: []string{"c1", "c2"},
Keyspace: "source",
},
},
},
}, 1)
}
m := ki.ComputeCellServedFrom("c3")
if !reflect.DeepEqual(m, map[TabletType]string{

Просмотреть файл

@ -9,6 +9,7 @@ import (
"strings"
"github.com/youtube/vitess/go/vt/key"
"github.com/youtube/vitess/go/vt/topo/topoproto"
pb "github.com/youtube/vitess/go/vt/proto/topodata"
)
@ -18,7 +19,7 @@ import (
// TabletTypeToProto turns a TabletType into a proto
func TabletTypeToProto(t TabletType) pb.TabletType {
if result, err := ParseTabletType(string(t)); err != nil {
if result, err := topoproto.ParseTabletType(string(t)); err != nil {
panic(fmt.Errorf("unknown tablet type: %v", t))
} else {
return result

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше