зеркало из https://github.com/github/vitess-gh.git
Коммит
b167cd5f3b
|
@ -30,9 +30,10 @@ class TestAutomationHorizontalResharding(worker.TestBaseSplitClone):
|
|||
worker_proc, _, worker_rpc_port = utils.run_vtworker_bg(
|
||||
['--cell', 'test_nj'],
|
||||
auto_log=True)
|
||||
vtworker_endpoint = "localhost:" + str(worker_rpc_port)
|
||||
vtworker_endpoint = 'localhost:' + str(worker_rpc_port)
|
||||
|
||||
automation_server_proc, automation_server_port = utils.run_automation_server()
|
||||
automation_server_proc, automation_server_port = (
|
||||
utils.run_automation_server())
|
||||
|
||||
keyspace = 'test_keyspace'
|
||||
source_shard_list = '0'
|
||||
|
|
|
@ -3,13 +3,9 @@
|
|||
import warnings
|
||||
# Dropping a table inexplicably produces a warning despite
|
||||
# the "IF EXISTS" clause. Squelch these warnings.
|
||||
warnings.simplefilter("ignore")
|
||||
warnings.simplefilter('ignore')
|
||||
|
||||
import gzip
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
from subprocess import call
|
||||
import unittest
|
||||
|
||||
import environment
|
||||
|
@ -24,6 +20,7 @@ tablet_replica2 = tablet.Tablet(use_mysqlctld=use_mysqlctld)
|
|||
|
||||
setup_procs = []
|
||||
|
||||
|
||||
def setUpModule():
|
||||
try:
|
||||
environment.topo_server().setup()
|
||||
|
@ -45,6 +42,7 @@ def setUpModule():
|
|||
tearDownModule()
|
||||
raise
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
if utils.options.skip_teardown:
|
||||
return
|
||||
|
@ -72,6 +70,7 @@ def tearDownModule():
|
|||
|
||||
|
||||
class TestBackup(unittest.TestCase):
|
||||
|
||||
def tearDown(self):
|
||||
tablet.Tablet.check_vttablet_count()
|
||||
environment.topo_server().wipe()
|
||||
|
@ -86,8 +85,10 @@ class TestBackup(unittest.TestCase):
|
|||
) Engine=InnoDB'''
|
||||
|
||||
def _insert_master(self, index):
|
||||
tablet_master.mquery('vt_test_keyspace',
|
||||
"insert into vt_insert_test (msg) values ('test %s')" % index, write=True)
|
||||
tablet_master.mquery(
|
||||
'vt_test_keyspace',
|
||||
"insert into vt_insert_test (msg) values ('test %s')" %
|
||||
index, write=True)
|
||||
|
||||
def test_backup(self):
|
||||
"""test_backup will:
|
||||
|
@ -115,7 +116,8 @@ class TestBackup(unittest.TestCase):
|
|||
self._insert_master(1)
|
||||
timeout = 10
|
||||
while True:
|
||||
result = tablet_replica1.mquery('vt_test_keyspace', 'select count(*) from vt_insert_test')
|
||||
result = tablet_replica1.mquery(
|
||||
'vt_test_keyspace', 'select count(*) from vt_insert_test')
|
||||
if result[0][0] == 1:
|
||||
break
|
||||
timeout = utils.wait_step('slave tablet getting data', timeout)
|
||||
|
@ -136,7 +138,8 @@ class TestBackup(unittest.TestCase):
|
|||
# check the new slave has the data
|
||||
timeout = 10
|
||||
while True:
|
||||
result = tablet_replica2.mquery('vt_test_keyspace', 'select count(*) from vt_insert_test')
|
||||
result = tablet_replica2.mquery(
|
||||
'vt_test_keyspace', 'select count(*) from vt_insert_test')
|
||||
if result[0][0] == 2:
|
||||
break
|
||||
timeout = utils.wait_step('new slave tablet getting data', timeout)
|
||||
|
@ -146,7 +149,7 @@ class TestBackup(unittest.TestCase):
|
|||
['ListBackups', 'test_keyspace/0'],
|
||||
mode=utils.VTCTL_VTCTL, trap_output=True)
|
||||
backups = backups.splitlines()
|
||||
logging.debug("list of backups: %s", backups)
|
||||
logging.debug('list of backups: %s', backups)
|
||||
self.assertEqual(len(backups), 1)
|
||||
self.assertTrue(backups[0].startswith(tablet_replica1.tablet_alias))
|
||||
|
||||
|
@ -160,7 +163,7 @@ class TestBackup(unittest.TestCase):
|
|||
['ListBackups', 'test_keyspace/0'],
|
||||
mode=utils.VTCTL_VTCTL, trap_output=True)
|
||||
backups = backups.splitlines()
|
||||
logging.debug("list of backups after remove: %s", backups)
|
||||
logging.debug('list of backups after remove: %s', backups)
|
||||
self.assertEqual(len(backups), 0)
|
||||
|
||||
for t in tablet_master, tablet_replica1, tablet_replica2:
|
||||
|
|
|
@ -70,7 +70,7 @@ def setUpModule():
|
|||
src_master.tablet_alias], auto_log=True)
|
||||
|
||||
# Create schema
|
||||
logging.debug("Creating schema...")
|
||||
logging.debug('Creating schema...')
|
||||
create_table = '''create table test_table(
|
||||
id bigint auto_increment,
|
||||
keyspace_id bigint(20) unsigned,
|
||||
|
@ -99,18 +99,18 @@ def setUpModule():
|
|||
|
||||
# run the clone worked (this is a degenerate case, source and destination
|
||||
# both have the full keyrange. Happens to work correctly).
|
||||
logging.debug("Running the clone worker to start binlog stream...")
|
||||
logging.debug('Running the clone worker to start binlog stream...')
|
||||
utils.run_vtworker(['--cell', 'test_nj',
|
||||
'SplitClone',
|
||||
'--strategy=-populate_blp_checkpoint',
|
||||
'--source_reader_count', '10',
|
||||
'--min_table_size_for_split', '1',
|
||||
'test_keyspace/0'],
|
||||
auto_log=True)
|
||||
auto_log=True)
|
||||
dst_master.wait_for_binlog_player_count(1)
|
||||
|
||||
# Wait for dst_replica to be ready.
|
||||
dst_replica.wait_for_binlog_server_state("Enabled")
|
||||
dst_replica.wait_for_binlog_server_state('Enabled')
|
||||
except:
|
||||
tearDownModule()
|
||||
raise
|
||||
|
@ -162,8 +162,10 @@ class TestBinlog(unittest.TestCase):
|
|||
# Vitess tablets default to using utf8, so we insert something crazy and
|
||||
# pretend it's latin1. If the binlog player doesn't also pretend it's
|
||||
# latin1, it will be inserted as utf8, which will change its value.
|
||||
src_master.mquery("vt_test_keyspace",
|
||||
"INSERT INTO test_table (id, keyspace_id, msg) VALUES (41523, 1, 'Šṛ́rỏé') /* EMD keyspace_id:1 */",
|
||||
src_master.mquery(
|
||||
'vt_test_keyspace',
|
||||
"INSERT INTO test_table (id, keyspace_id, msg) "
|
||||
"VALUES (41523, 1, 'Šṛ́rỏé') /* EMD keyspace_id:1 */",
|
||||
conn_params={'charset': 'latin1'}, write=True)
|
||||
|
||||
# Wait for it to replicate.
|
||||
|
@ -173,11 +175,13 @@ class TestBinlog(unittest.TestCase):
|
|||
break
|
||||
|
||||
# Check the value.
|
||||
data = dst_master.mquery("vt_test_keyspace",
|
||||
"SELECT id, keyspace_id, msg FROM test_table WHERE id=41523 LIMIT 1")
|
||||
data = dst_master.mquery(
|
||||
'vt_test_keyspace',
|
||||
'SELECT id, keyspace_id, msg FROM test_table WHERE id=41523 LIMIT 1')
|
||||
self.assertEqual(len(data), 1, 'No data replicated.')
|
||||
self.assertEqual(len(data[0]), 3, 'Wrong number of columns.')
|
||||
self.assertEqual(data[0][2], 'Šṛ́rỏé', 'Data corrupted due to wrong charset.')
|
||||
self.assertEqual(data[0][2], 'Šṛ́rỏé',
|
||||
'Data corrupted due to wrong charset.')
|
||||
|
||||
def test_checksum_enabled(self):
|
||||
start_position = mysql_flavor().master_position(dst_replica)
|
||||
|
@ -186,14 +190,15 @@ class TestBinlog(unittest.TestCase):
|
|||
# Enable binlog_checksum, which will also force a log rotation that should
|
||||
# cause binlog streamer to notice the new checksum setting.
|
||||
if not mysql_flavor().enable_binlog_checksum(dst_replica):
|
||||
logging.debug('skipping checksum test on flavor without binlog_checksum setting')
|
||||
logging.debug(
|
||||
'skipping checksum test on flavor without binlog_checksum setting')
|
||||
return
|
||||
|
||||
# Insert something and make sure it comes through intact.
|
||||
sql = "INSERT INTO test_table (id, keyspace_id, msg) VALUES (19283, 1, 'testing checksum enabled') /* EMD keyspace_id:1 */"
|
||||
src_master.mquery("vt_test_keyspace",
|
||||
sql,
|
||||
write=True)
|
||||
sql = (
|
||||
"INSERT INTO test_table (id, keyspace_id, msg) "
|
||||
"VALUES (19283, 1, 'testing checksum enabled') /* EMD keyspace_id:1 */")
|
||||
src_master.mquery('vt_test_keyspace', sql, write=True)
|
||||
|
||||
# Look for it using update stream to see if binlog streamer can talk to
|
||||
# dst_replica, which now has binlog_checksum enabled.
|
||||
|
@ -218,10 +223,12 @@ class TestBinlog(unittest.TestCase):
|
|||
mysql_flavor().disable_binlog_checksum(dst_replica)
|
||||
|
||||
# Insert something and make sure it comes through intact.
|
||||
sql = "INSERT INTO test_table (id, keyspace_id, msg) VALUES (58812, 1, 'testing checksum disabled') /* EMD keyspace_id:1 */"
|
||||
src_master.mquery("vt_test_keyspace",
|
||||
sql,
|
||||
write=True)
|
||||
sql = (
|
||||
"INSERT INTO test_table (id, keyspace_id, msg) "
|
||||
"VALUES (58812, 1, 'testing checksum disabled') "
|
||||
"/* EMD keyspace_id:1 */")
|
||||
src_master.mquery(
|
||||
'vt_test_keyspace', sql, write=True)
|
||||
|
||||
# Look for it using update stream to see if binlog streamer can talk to
|
||||
# dst_replica, which now has binlog_checksum disabled.
|
||||
|
|
|
@ -14,7 +14,7 @@ from checkers import checker
|
|||
|
||||
# Dropping a table inexplicably produces a warning despite
|
||||
# the "IF EXISTS" clause. Squelch these warnings.
|
||||
warnings.simplefilter("ignore")
|
||||
warnings.simplefilter('ignore')
|
||||
|
||||
|
||||
# I need this mostly for mysql
|
||||
|
@ -24,7 +24,8 @@ source_tablets = [tablet.Tablet(62044),
|
|||
tablets = [destination_tablet] + source_tablets
|
||||
|
||||
db_configuration = {
|
||||
"sources": [t.mysql_connection_parameters("test_checkers%i" % i) for i, t in enumerate(source_tablets)],
|
||||
'sources': [t0.mysql_connection_parameters('test_checkers%i' % i0)
|
||||
for i0, t0 in enumerate(source_tablets)],
|
||||
}
|
||||
|
||||
|
||||
|
@ -51,31 +52,41 @@ class MockChecker(checker.Checker):
|
|||
def handle_mismatch(self, mismatch):
|
||||
self.mismatches.append(mismatch)
|
||||
|
||||
class TestCheckersBase(unittest.TestCase):
|
||||
keyrange = {"end": 900}
|
||||
|
||||
def make_checker(self, destination_table_name="test", **kwargs):
|
||||
class TestCheckersBase(unittest.TestCase):
|
||||
keyrange = {'end': 900}
|
||||
|
||||
def make_checker(self, destination_table_name='test', **kwargs):
|
||||
default = {'keyrange': TestCheckersBase.keyrange,
|
||||
'batch_count': 20,
|
||||
'logging_level': logging.WARNING,
|
||||
'directory': tempfile.mkdtemp()}
|
||||
default.update(kwargs)
|
||||
source_addresses = ['vt_dba@localhost:%s/test_checkers%s?unix_socket=%s' % (s.mysql_port, i, s.mysql_connection_parameters('test_checkers')['unix_socket'])
|
||||
for i, s in enumerate(source_tablets)]
|
||||
destination_socket = destination_tablet.mysql_connection_parameters('test_checkers')['unix_socket']
|
||||
return MockChecker('vt_dba@localhost/test_checkers?unix_socket=%s' % destination_socket, source_addresses, destination_table_name, **default)
|
||||
source_addresses = [
|
||||
'vt_dba@localhost:%s/test_checkers%s?unix_socket=%s' %
|
||||
(s.mysql_port, i,
|
||||
s.mysql_connection_parameters('test_checkers')['unix_socket'])
|
||||
for i, s in enumerate(source_tablets)]
|
||||
destination_socket = destination_tablet.mysql_connection_parameters(
|
||||
'test_checkers')['unix_socket']
|
||||
return MockChecker(
|
||||
'vt_dba@localhost/test_checkers?unix_socket=%s' % destination_socket,
|
||||
source_addresses, destination_table_name, **default)
|
||||
|
||||
|
||||
class TestSortedRowListDifference(unittest.TestCase):
|
||||
|
||||
def test_sorted_row_list_difference(self):
|
||||
|
||||
expected = [(1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0)]
|
||||
actual = [(1, 0), (3, 0), (4, 0), (5, 0), (6, 1), (10, 0)]
|
||||
missing, unexpected, different = checker.sorted_row_list_difference(expected, actual, 1)
|
||||
missing, unexpected, different = checker.sorted_row_list_difference(
|
||||
expected, actual, 1)
|
||||
self.assertEqual(missing, [(2, 0)])
|
||||
self.assertEqual(unexpected, [(10, 0)])
|
||||
self.assertEqual(different, [((6, 1), (6, 0))])
|
||||
|
||||
|
||||
class TestCheckers(TestCheckersBase):
|
||||
|
||||
@classmethod
|
||||
|
@ -84,54 +95,69 @@ class TestCheckers(TestCheckersBase):
|
|||
cls.configuration = config
|
||||
|
||||
def setUp(self):
|
||||
create_table = "create table test (pk1 bigint, pk2 bigint, pk3 bigint, keyspace_id bigint, msg varchar(64), primary key (pk1, pk2, pk3)) Engine=InnoDB"
|
||||
destination_tablet.create_db("test_checkers")
|
||||
destination_tablet.mquery("test_checkers", create_table, True)
|
||||
create_table = (
|
||||
'create table test (pk1 bigint, pk2 bigint, pk3 bigint, '
|
||||
'keyspace_id bigint, msg varchar(64), primary key (pk1, pk2, pk3)) '
|
||||
'Engine=InnoDB')
|
||||
destination_tablet.create_db('test_checkers')
|
||||
destination_tablet.mquery('test_checkers', create_table, True)
|
||||
for i, t in enumerate(source_tablets):
|
||||
t.create_db("test_checkers%s" % i)
|
||||
t.mquery("test_checkers%s" % i, create_table, True)
|
||||
t.create_db('test_checkers%s' % i)
|
||||
t.mquery('test_checkers%s' % i, create_table, True)
|
||||
|
||||
destination_queries = []
|
||||
source_queries = [[] for t in source_tablets]
|
||||
for i in range(1, 400):
|
||||
query = "insert into test (pk1, pk2, pk3, msg, keyspace_id) values (%s, %s, %s, 'message %s', %s)" % (i/100+1, i/10+1, i, i, i)
|
||||
query = (
|
||||
'insert into test (pk1, pk2, pk3, msg, keyspace_id) '
|
||||
"values (%s, %s, %s, 'message %s', %s)" % (i/100+1, i/10+1, i, i, i))
|
||||
destination_queries.append(query)
|
||||
source_queries[i % 2].append(query)
|
||||
for i in range(1100, 1110):
|
||||
query = "insert into test (pk1, pk2, pk3, msg, keyspace_id) values (%s, %s, %s, 'message %s', %s)" % (i/100+1, i/10+1, i, i, i)
|
||||
query = (
|
||||
'insert into test (pk1, pk2, pk3, msg, keyspace_id) '
|
||||
"values (%s, %s, %s, 'message %s', %s)" % (i/100+1, i/10+1, i, i, i))
|
||||
source_queries[0].append(query)
|
||||
|
||||
destination_tablet.mquery("test_checkers", destination_queries, write=True)
|
||||
destination_tablet.mquery('test_checkers', destination_queries, write=True)
|
||||
for i, (tablet, queries) in enumerate(zip(source_tablets, source_queries)):
|
||||
tablet.mquery("test_checkers%s" % i, queries, write=True)
|
||||
tablet.mquery('test_checkers%s' % i, queries, write=True)
|
||||
self.c = self.make_checker()
|
||||
|
||||
def tearDown(self):
|
||||
destination_tablet.mquery("test_checkers", "drop table test", True)
|
||||
destination_tablet.mquery('test_checkers', 'drop table test', True)
|
||||
for i, t in enumerate(source_tablets):
|
||||
t.mquery("test_checkers%s" % i, "drop table test", True)
|
||||
t.mquery('test_checkers%s' % i, 'drop table test', True)
|
||||
|
||||
def query_all(self, sql, write=False):
|
||||
return [t.mquery("test_checkers", sql, write=write) for t in tablets]
|
||||
|
||||
return [t.mquery('test_checkers', sql, write=write) for t in tablets]
|
||||
|
||||
def test_ok(self):
|
||||
self.c._run()
|
||||
self.assertFalse(self.c.mismatches)
|
||||
|
||||
def test_different_value(self):
|
||||
destination_tablet.mquery("test_checkers", "update test set msg='something else' where pk2 = 29 and pk3 = 280 and pk1 = 3", write=True)
|
||||
destination_tablet.mquery(
|
||||
'test_checkers',
|
||||
"update test set msg='something else' where pk2 = 29 and "
|
||||
"pk3 = 280 and pk1 = 3", write=True)
|
||||
self.c._run()
|
||||
self.assertTrue(self.c.mismatches)
|
||||
|
||||
def test_additional_value(self):
|
||||
destination_tablet.mquery("test_checkers", "insert into test (pk1, pk2, pk3) values (1, 1, 900)", write=True)
|
||||
destination_tablet.mquery(
|
||||
'test_checkers',
|
||||
'insert into test (pk1, pk2, pk3) values (1, 1, 900)', write=True)
|
||||
self.c._run()
|
||||
self.assertTrue(self.c.mismatches)
|
||||
|
||||
def test_more_mismatches(self):
|
||||
destination_tablet.mquery("test_checkers", "insert into test (pk1, pk2, pk3) values (1, 1, 900)", write=True)
|
||||
destination_tablet.mquery("test_checkers", "insert into test (pk1, pk2, pk3) values (1000, 1000, 1000)", write=True)
|
||||
destination_tablet.mquery(
|
||||
'test_checkers',
|
||||
'insert into test (pk1, pk2, pk3) values (1, 1, 900)', write=True)
|
||||
destination_tablet.mquery(
|
||||
'test_checkers', 'insert into test (pk1, pk2, pk3) '
|
||||
'values (1000, 1000, 1000)', write=True)
|
||||
self.c._run()
|
||||
self.assertEqual(len(self.c.mismatches), 2)
|
||||
|
||||
|
@ -143,81 +169,112 @@ class TestCheckers(TestCheckersBase):
|
|||
|
||||
|
||||
class TestDifferentEncoding(TestCheckersBase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
config = dict(db_configuration)
|
||||
cls.configuration = config
|
||||
|
||||
def setUp(self):
|
||||
create_table = "create table test (pk1 bigint, pk2 bigint, pk3 bigint, keyspace_id bigint, msg varchar(64), primary key (pk1, pk2, pk3)) Engine=InnoDB"
|
||||
destination_tablet.create_db("test_checkers")
|
||||
destination_tablet.mquery("test_checkers", create_table + "default character set = utf8", True)
|
||||
create_table = (
|
||||
'create table test (pk1 bigint, pk2 bigint, pk3 bigint, '
|
||||
'keyspace_id bigint, msg varchar(64), primary key (pk1, pk2, pk3)) '
|
||||
'Engine=InnoDB')
|
||||
destination_tablet.create_db('test_checkers')
|
||||
destination_tablet.mquery(
|
||||
'test_checkers', create_table + 'default character set = utf8', True)
|
||||
for i, t in enumerate(source_tablets):
|
||||
t.create_db("test_checkers%s" % i)
|
||||
t.mquery("test_checkers%s" % i, create_table + "default character set = latin2", True)
|
||||
t.create_db('test_checkers%s' % i)
|
||||
t.mquery('test_checkers%s' % i,
|
||||
create_table + 'default character set = latin2', True)
|
||||
|
||||
destination_queries = []
|
||||
source_queries = [[] for t in source_tablets]
|
||||
source_connections = [t.connect('test_checkers%s' % i) for i, t in enumerate(source_tablets)]
|
||||
source_connections = [
|
||||
t.connect('test_checkers%s' % i) for i, t in enumerate(source_tablets)]
|
||||
for c, _ in source_connections:
|
||||
c.set_character_set('latin2')
|
||||
c.begin()
|
||||
for i in range(1, 400):
|
||||
query = u"insert into test (pk1, pk2, pk3, keyspace_id, msg) values (%s, %s, %s, %s, '\xb1 %s')" % (i/100+1, i/10+1, i, i, i)
|
||||
query = (
|
||||
'insert into test (pk1, pk2, pk3, keyspace_id, msg) '
|
||||
u"values (%s, %s, %s, %s, '\xb1 %s')" % (i/100+1, i/10+1, i, i, i))
|
||||
destination_queries.append(query)
|
||||
#source_queries[i % 2].append(query.encode('utf-8').decode('iso-8859-2'))
|
||||
source_connections[i % 2][1].execute(query.encode('utf-8').decode('iso-8859-2'))
|
||||
# source_queries[i % 2].append(query.encode('utf-8').decode('iso-8859-2'))
|
||||
source_connections[i % 2][1].execute(
|
||||
query.encode('utf-8').decode('iso-8859-2'))
|
||||
for c, _ in source_connections:
|
||||
c.commit()
|
||||
|
||||
destination_tablet.mquery("test_checkers", destination_queries, write=True)
|
||||
destination_tablet.mquery('test_checkers', destination_queries, write=True)
|
||||
self.c = self.make_checker()
|
||||
|
||||
def test_problem(self):
|
||||
self.c._run()
|
||||
self.assertTrue(self.c.mismatches)
|
||||
|
||||
|
||||
class TestRlookup(TestCheckersBase):
|
||||
|
||||
def setUp(self):
|
||||
source_create_table = "create table test (pk1 bigint, k2 bigint, k3 bigint, keyspace_id bigint, msg varchar(64), primary key (pk1)) Engine=InnoDB"
|
||||
destination_create_table = "create table test_lookup (pk1_lookup bigint, msg_lookup varchar(64), primary key (pk1_lookup)) Engine=InnoDB"
|
||||
destination_tablet.create_db("test_checkers")
|
||||
destination_tablet.mquery("test_checkers", destination_create_table, True)
|
||||
source_create_table = (
|
||||
'create table test (pk1 bigint, k2 bigint, k3 bigint, '
|
||||
'keyspace_id bigint, msg varchar(64), primary key (pk1)) Engine=InnoDB')
|
||||
destination_create_table = (
|
||||
'create table test_lookup (pk1_lookup bigint, msg_lookup varchar(64), '
|
||||
'primary key (pk1_lookup)) Engine=InnoDB')
|
||||
destination_tablet.create_db('test_checkers')
|
||||
destination_tablet.mquery('test_checkers', destination_create_table, True)
|
||||
|
||||
for i, t in enumerate(source_tablets):
|
||||
t.create_db("test_checkers%s" % i)
|
||||
t.mquery("test_checkers%s" % i, source_create_table, True)
|
||||
t.create_db('test_checkers%s' % i)
|
||||
t.mquery('test_checkers%s' % i, source_create_table, True)
|
||||
|
||||
destination_queries = []
|
||||
source_queries = [[] for t in source_tablets]
|
||||
for i in range(1, 400):
|
||||
destination_queries.append("insert into test_lookup (pk1_lookup, msg_lookup) values (%s, 'message %s')" % (i, i))
|
||||
source_queries[i % 2].append("insert into test (pk1, k2, k3, msg, keyspace_id) values (%s, %s, %s, 'message %s', %s)" % (i, i, i, i, i))
|
||||
destination_queries.append(
|
||||
'insert into test_lookup (pk1_lookup, msg_lookup) '
|
||||
"values (%s, 'message %s')" % (i, i))
|
||||
source_queries[i % 2].append(
|
||||
'insert into test (pk1, k2, k3, msg, keyspace_id) '
|
||||
"values (%s, %s, %s, 'message %s', %s)" % (i, i, i, i, i))
|
||||
for i in range(1100, 1110):
|
||||
query = "insert into test (pk1, k2, k3, msg, keyspace_id) values (%s, %s, %s, 'message %s', %s)" % (i, i, i, i, i)
|
||||
query = (
|
||||
'insert into test (pk1, k2, k3, msg, keyspace_id) '
|
||||
"values (%s, %s, %s, 'message %s', %s)" % (i, i, i, i, i))
|
||||
source_queries[0].append(query)
|
||||
|
||||
destination_tablet.mquery("test_checkers", destination_queries, write=True)
|
||||
destination_tablet.mquery('test_checkers', destination_queries, write=True)
|
||||
for i, (tablet, queries) in enumerate(zip(source_tablets, source_queries)):
|
||||
tablet.mquery("test_checkers%s" % i, queries, write=True)
|
||||
self.c = self.make_checker(destination_table_name="test_lookup", source_table_name="test", source_column_map={'pk1_lookup': 'pk1', 'msg_lookup': 'msg'})
|
||||
tablet.mquery('test_checkers%s' % i, queries, write=True)
|
||||
self.c = self.make_checker(
|
||||
|
||||
destination_table_name='test_lookup', source_table_name='test',
|
||||
source_column_map={'pk1_lookup': 'pk1', 'msg_lookup': 'msg'})
|
||||
|
||||
def tearDown(self):
|
||||
destination_tablet.mquery("test_checkers", "drop table test_lookup", True)
|
||||
destination_tablet.mquery('test_checkers', 'drop table test_lookup', True)
|
||||
for i, t in enumerate(source_tablets):
|
||||
t.mquery("test_checkers%s" % i, "drop table test", True)
|
||||
t.mquery('test_checkers%s' % i, 'drop table test', True)
|
||||
|
||||
def test_ok(self):
|
||||
self.c._run()
|
||||
self.assertFalse(self.c.mismatches)
|
||||
|
||||
def test_different_value(self):
|
||||
destination_tablet.mquery("test_checkers", "update test_lookup set msg_lookup='something else' where pk1_lookup = 29", write=True)
|
||||
destination_tablet.mquery(
|
||||
'test_checkers',
|
||||
"update test_lookup set msg_lookup='something else' "
|
||||
'where pk1_lookup = 29', write=True)
|
||||
self.c._run()
|
||||
self.assertTrue(self.c.mismatches)
|
||||
|
||||
def test_additional_value(self):
|
||||
destination_tablet.mquery("test_checkers", "insert into test_lookup (pk1_lookup, msg_lookup) values (11000, 'something new')", write=True)
|
||||
destination_tablet.mquery(
|
||||
'test_checkers',
|
||||
'insert into test_lookup (pk1_lookup, msg_lookup) '
|
||||
"values (11000, 'something new')", write=True)
|
||||
self.c._run()
|
||||
self.assertTrue(self.c.mismatches)
|
||||
|
||||
|
|
|
@ -9,9 +9,6 @@ library test.
|
|||
import hashlib
|
||||
import random
|
||||
import struct
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
import unittest
|
||||
|
||||
import environment
|
||||
|
@ -37,20 +34,22 @@ conn_class = vtgatev2
|
|||
__tablets = None
|
||||
|
||||
shard_names = ['-80', '80-']
|
||||
shard_kid_map = {'-80': [527875958493693904, 626750931627689502,
|
||||
345387386794260318, 332484755310826578,
|
||||
1842642426274125671, 1326307661227634652,
|
||||
1761124146422844620, 1661669973250483744,
|
||||
3361397649937244239, 2444880764308344533],
|
||||
'80-': [9767889778372766922, 9742070682920810358,
|
||||
10296850775085416642, 9537430901666854108,
|
||||
10440455099304929791, 11454183276974683945,
|
||||
11185910247776122031, 10460396697869122981,
|
||||
13379616110062597001, 12826553979133932576],
|
||||
}
|
||||
shard_kid_map = {
|
||||
'-80': [527875958493693904, 626750931627689502,
|
||||
345387386794260318, 332484755310826578,
|
||||
1842642426274125671, 1326307661227634652,
|
||||
1761124146422844620, 1661669973250483744,
|
||||
3361397649937244239, 2444880764308344533],
|
||||
'80-': [9767889778372766922, 9742070682920810358,
|
||||
10296850775085416642, 9537430901666854108,
|
||||
10440455099304929791, 11454183276974683945,
|
||||
11185910247776122031, 10460396697869122981,
|
||||
13379616110062597001, 12826553979133932576],
|
||||
}
|
||||
|
||||
pack_kid = struct.Struct('!Q').pack
|
||||
|
||||
|
||||
def setUpModule():
|
||||
try:
|
||||
environment.topo_server().setup()
|
||||
|
@ -69,6 +68,7 @@ def setUpModule():
|
|||
tearDownModule()
|
||||
raise
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
global __tablets
|
||||
if utils.options.skip_teardown:
|
||||
|
@ -89,6 +89,7 @@ def tearDownModule():
|
|||
for t in __tablets:
|
||||
t.remove_tree()
|
||||
|
||||
|
||||
def setup_topology():
|
||||
global __tablets
|
||||
if __tablets is None:
|
||||
|
@ -127,6 +128,7 @@ def create_db():
|
|||
for table_tuple in topo_schema.keyspace_table_map[ks_name]:
|
||||
t.mquery(t.dbname, table_tuple[1])
|
||||
|
||||
|
||||
def start_tablets():
|
||||
global __tablets
|
||||
# start tablets
|
||||
|
@ -150,19 +152,20 @@ def start_tablets():
|
|||
auto_log=True)
|
||||
if ks_type == shard_constants.RANGE_SHARDED:
|
||||
utils.check_srv_keyspace('test_nj', ks_name,
|
||||
'Partitions(master): -80 80-\n' +
|
||||
'Partitions(rdonly): -80 80-\n' +
|
||||
'Partitions(master): -80 80-\n'
|
||||
'Partitions(rdonly): -80 80-\n'
|
||||
'Partitions(replica): -80 80-\n')
|
||||
|
||||
|
||||
def get_connection(user=None, password=None):
|
||||
timeout = 10.0
|
||||
conn = None
|
||||
vtgate_addrs = {"vt": [utils.vtgate.addr(),]}
|
||||
vtgate_addrs = {'vt': [utils.vtgate.addr(),]}
|
||||
conn = conn_class.connect(vtgate_addrs, timeout,
|
||||
user=user, password=password)
|
||||
return conn
|
||||
|
||||
|
||||
def get_keyrange(shard_name):
|
||||
kr = None
|
||||
if shard_name == keyrange_constants.SHARD_ZERO:
|
||||
|
@ -177,42 +180,49 @@ def _delete_all(keyspace, shard_name, table_name):
|
|||
# This write is to set up the test with fresh insert
|
||||
# and hence performing it directly on the connection.
|
||||
vtgate_conn.begin()
|
||||
vtgate_conn._execute("delete from %s" % table_name, {},
|
||||
vtgate_conn._execute('delete from %s' % table_name, {},
|
||||
keyspace, 'master',
|
||||
keyranges=[get_keyrange(shard_name)])
|
||||
vtgate_conn.commit()
|
||||
|
||||
|
||||
def restart_vtgate(extra_args={}):
|
||||
def restart_vtgate(extra_args=None):
|
||||
if extra_args is None:
|
||||
extra_args = {}
|
||||
port = utils.vtgate.port
|
||||
utils.vtgate.kill()
|
||||
utils.VtGate(port=port).start(extra_args=extra_args)
|
||||
|
||||
|
||||
def populate_table():
|
||||
keyspace = "KS_UNSHARDED"
|
||||
keyspace = 'KS_UNSHARDED'
|
||||
_delete_all(keyspace, keyrange_constants.SHARD_ZERO, 'vt_unsharded')
|
||||
vtgate_conn = get_connection()
|
||||
cursor = vtgate_conn.cursor(keyspace, 'master', keyranges=[get_keyrange(keyrange_constants.SHARD_ZERO),],writable=True)
|
||||
cursor = vtgate_conn.cursor(
|
||||
keyspace, 'master',
|
||||
keyranges=[get_keyrange(keyrange_constants.SHARD_ZERO),], writable=True)
|
||||
cursor.begin()
|
||||
for x in xrange(10):
|
||||
cursor.execute('insert into vt_unsharded (id, msg) values (%s, %s)' % (str(x), 'msg'), {})
|
||||
cursor.execute(
|
||||
'insert into vt_unsharded (id, msg) values (%s, %s)' %
|
||||
(str(x), 'msg'), {})
|
||||
cursor.commit()
|
||||
|
||||
|
||||
class TestUnshardedTable(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.vtgate_addrs = {"vt": [utils.vtgate.addr(),]}
|
||||
self.vtgate_addrs = {'vt': [utils.vtgate.addr(),]}
|
||||
self.dc = database_context.DatabaseContext(self.vtgate_addrs)
|
||||
self.all_ids = []
|
||||
with database_context.WriteTransaction(self.dc) as context:
|
||||
for x in xrange(20):
|
||||
for _ in xrange(20):
|
||||
ret_id = db_class_unsharded.VtUnsharded.insert(context.get_cursor(),
|
||||
msg="test message")
|
||||
msg='test message')
|
||||
self.all_ids.append(ret_id)
|
||||
|
||||
def tearDown(self):
|
||||
_delete_all("KS_UNSHARDED", "0", 'vt_unsharded')
|
||||
_delete_all('KS_UNSHARDED', '0', 'vt_unsharded')
|
||||
|
||||
def test_read(self):
|
||||
id_val = self.all_ids[0]
|
||||
|
@ -220,48 +230,58 @@ class TestUnshardedTable(unittest.TestCase):
|
|||
rows = db_class_unsharded.VtUnsharded.select_by_id(
|
||||
context.get_cursor(), id_val)
|
||||
expected = 1
|
||||
self.assertEqual(len(rows), expected, "wrong number of rows fetched %d, expected %d" % (len(rows), expected))
|
||||
self.assertEqual(rows[0].id, id_val, "wrong row fetched")
|
||||
self.assertEqual(
|
||||
len(rows), expected,
|
||||
'wrong number of rows fetched %d, expected %d' %
|
||||
(len(rows), expected))
|
||||
self.assertEqual(rows[0].id, id_val, 'wrong row fetched')
|
||||
|
||||
def test_update_and_read(self):
|
||||
id_val = self.all_ids[0]
|
||||
where_column_value_pairs = [('id', id_val)]
|
||||
with database_context.WriteTransaction(self.dc) as context:
|
||||
update_cols = [('msg', "test update"),]
|
||||
db_class_unsharded.VtUnsharded.update_columns(context.get_cursor(),
|
||||
where_column_value_pairs,
|
||||
update_column_value_pairs=update_cols)
|
||||
update_cols = [('msg', 'test update'),]
|
||||
db_class_unsharded.VtUnsharded.update_columns(
|
||||
context.get_cursor(),
|
||||
where_column_value_pairs,
|
||||
update_column_value_pairs=update_cols)
|
||||
|
||||
with database_context.ReadFromMaster(self.dc) as context:
|
||||
rows = db_class_unsharded.VtUnsharded.select_by_id(context.get_cursor(), id_val)
|
||||
self.assertEqual(len(rows), 1, "wrong number of rows fetched")
|
||||
self.assertEqual(rows[0].msg, "test update", "wrong row fetched")
|
||||
rows = db_class_unsharded.VtUnsharded.select_by_id(
|
||||
context.get_cursor(), id_val)
|
||||
self.assertEqual(len(rows), 1, 'wrong number of rows fetched')
|
||||
self.assertEqual(rows[0].msg, 'test update', 'wrong row fetched')
|
||||
|
||||
def test_delete_and_read(self):
|
||||
id_val = self.all_ids[-1]
|
||||
where_column_value_pairs = [('id', id_val)]
|
||||
with database_context.WriteTransaction(self.dc) as context:
|
||||
db_class_unsharded.VtUnsharded.delete_by_columns(context.get_cursor(),
|
||||
where_column_value_pairs)
|
||||
db_class_unsharded.VtUnsharded.delete_by_columns(
|
||||
context.get_cursor(), where_column_value_pairs)
|
||||
|
||||
with database_context.ReadFromMaster(self.dc) as context:
|
||||
rows = db_class_unsharded.VtUnsharded.select_by_id(context.get_cursor(), id_val)
|
||||
self.assertEqual(len(rows), 0, "wrong number of rows fetched")
|
||||
rows = db_class_unsharded.VtUnsharded.select_by_id(
|
||||
context.get_cursor(), id_val)
|
||||
self.assertEqual(len(rows), 0, 'wrong number of rows fetched')
|
||||
self.all_ids = self.all_ids[:-1]
|
||||
|
||||
def test_count(self):
|
||||
with database_context.ReadFromMaster(self.dc) as context:
|
||||
count = db_class_unsharded.VtUnsharded.get_count(
|
||||
context.get_cursor(), msg="test message")
|
||||
context.get_cursor(), msg='test message')
|
||||
expected = len(self.all_ids)
|
||||
self.assertEqual(count, expected, "wrong count fetched; expected %d got %d" % (expected, count))
|
||||
self.assertEqual(
|
||||
count, expected,
|
||||
'wrong count fetched; expected %d got %d' % (expected, count))
|
||||
|
||||
def test_min_id(self):
|
||||
with database_context.ReadFromMaster(self.dc) as context:
|
||||
min_id = db_class_unsharded.VtUnsharded.get_min(
|
||||
context.get_cursor())
|
||||
expected = min(self.all_ids)
|
||||
self.assertEqual(min_id, expected, "wrong min value fetched; expected %d got %d" % (expected, min_id))
|
||||
self.assertEqual(
|
||||
min_id, expected,
|
||||
'wrong min value fetched; expected %d got %d' % (expected, min_id))
|
||||
|
||||
def test_max_id(self):
|
||||
with database_context.ReadFromMaster(self.dc) as context:
|
||||
|
@ -269,10 +289,13 @@ class TestUnshardedTable(unittest.TestCase):
|
|||
context.get_cursor())
|
||||
self.all_ids.sort()
|
||||
expected = max(self.all_ids)
|
||||
self.assertEqual(max_id, expected, "wrong max value fetched; expected %d got %d" % (expected, max_id))
|
||||
self.assertEqual(
|
||||
max_id, expected,
|
||||
'wrong max value fetched; expected %d got %d' % (expected, max_id))
|
||||
|
||||
|
||||
class TestRangeSharded(unittest.TestCase):
|
||||
|
||||
def populate_tables(self):
|
||||
self.user_id_list = []
|
||||
self.song_id_list = []
|
||||
|
@ -282,8 +305,9 @@ class TestRangeSharded(unittest.TestCase):
|
|||
with database_context.WriteTransaction(self.dc) as context:
|
||||
for x in xrange(20):
|
||||
# vt_user - EntityRangeSharded; creates username:user_id lookup
|
||||
user_id = db_class_sharded.VtUser.insert(context.get_cursor(),
|
||||
username="user%s" % x, msg="test message")
|
||||
user_id = db_class_sharded.VtUser.insert(
|
||||
context.get_cursor(),
|
||||
username='user%s' % x, msg='test message')
|
||||
self.user_id_list.append(user_id)
|
||||
|
||||
# vt_user_email - RangeSharded; references user_id:keyspace_id hash
|
||||
|
@ -291,7 +315,7 @@ class TestRangeSharded(unittest.TestCase):
|
|||
m = hashlib.md5()
|
||||
m.update(email)
|
||||
email_hash = m.digest()
|
||||
entity_id_map={'user_id':user_id}
|
||||
entity_id_map = {'user_id': user_id}
|
||||
db_class_sharded.VtUserEmail.insert(
|
||||
context.get_cursor(entity_id_map=entity_id_map),
|
||||
user_id=user_id, email=email,
|
||||
|
@ -299,22 +323,22 @@ class TestRangeSharded(unittest.TestCase):
|
|||
|
||||
# vt_song - EntityRangeSharded; creates song_id:user_id lookup
|
||||
num_songs_for_user = r.randint(1, 5)
|
||||
for i in xrange(num_songs_for_user):
|
||||
song_id = db_class_sharded.VtSong.insert(context.get_cursor(),
|
||||
user_id=user_id, title="Test Song")
|
||||
for _ in xrange(num_songs_for_user):
|
||||
song_id = db_class_sharded.VtSong.insert(
|
||||
context.get_cursor(),
|
||||
user_id=user_id, title='Test Song')
|
||||
self.song_id_list.append(song_id)
|
||||
self.user_song_map.setdefault(user_id, []).append(song_id)
|
||||
|
||||
# vt_song_detail - RangeSharded; references song_id:user_id lookup
|
||||
entity_id_map = {'song_id':song_id}
|
||||
db_class_sharded.VtSongDetail.insert(context.get_cursor(entity_id_map=entity_id_map),
|
||||
song_id=song_id, album_name="Test album",
|
||||
artist="Test artist")
|
||||
|
||||
|
||||
entity_id_map = {'song_id': song_id}
|
||||
db_class_sharded.VtSongDetail.insert(
|
||||
context.get_cursor(entity_id_map=entity_id_map),
|
||||
song_id=song_id, album_name='Test album',
|
||||
artist='Test artist')
|
||||
|
||||
def setUp(self):
|
||||
self.vtgate_addrs = {"vt": [utils.vtgate.addr(),]}
|
||||
self.vtgate_addrs = {'vt': [utils.vtgate.addr(),]}
|
||||
self.dc = database_context.DatabaseContext(self.vtgate_addrs)
|
||||
self.populate_tables()
|
||||
|
||||
|
@ -322,18 +346,22 @@ class TestRangeSharded(unittest.TestCase):
|
|||
with database_context.WriteTransaction(self.dc) as context:
|
||||
for uid in self.user_id_list:
|
||||
try:
|
||||
db_class_sharded.VtUser.delete_by_columns(context.get_cursor(entity_id_map={'id':uid}),
|
||||
[('id', uid),])
|
||||
db_class_sharded.VtUserEmail.delete_by_columns(context.get_cursor(entity_id_map={'user_id':uid}),
|
||||
[('user_id', uid),])
|
||||
db_class_sharded.VtSong.delete_by_columns(context.get_cursor(entity_id_map={'user_id':uid}),
|
||||
[('user_id', uid),])
|
||||
db_class_sharded.VtUser.delete_by_columns(
|
||||
context.get_cursor(entity_id_map={'id': uid}),
|
||||
[('id', uid),])
|
||||
db_class_sharded.VtUserEmail.delete_by_columns(
|
||||
context.get_cursor(entity_id_map={'user_id': uid}),
|
||||
[('user_id', uid),])
|
||||
db_class_sharded.VtSong.delete_by_columns(
|
||||
context.get_cursor(entity_id_map={'user_id': uid}),
|
||||
[('user_id', uid),])
|
||||
song_id_list = self.user_song_map[uid]
|
||||
for sid in song_id_list:
|
||||
db_class_sharded.VtSongDetail.delete_by_columns(context.get_cursor(entity_id_map={'song_id':sid}),
|
||||
[('song_id', sid),])
|
||||
db_class_sharded.VtSongDetail.delete_by_columns(
|
||||
context.get_cursor(entity_id_map={'song_id': sid}),
|
||||
[('song_id', sid),])
|
||||
except dbexceptions.DatabaseError as e:
|
||||
if str(e) == "DB Row not found":
|
||||
if str(e) == 'DB Row not found':
|
||||
pass
|
||||
|
||||
def test_sharding_key_read(self):
|
||||
|
@ -344,21 +372,23 @@ class TestRangeSharded(unittest.TestCase):
|
|||
rows = db_class_sharded.VtUser.select_by_columns(
|
||||
context.get_cursor(entity_id_map=entity_id_map),
|
||||
where_column_value_pairs)
|
||||
self.assertEqual(len(rows), 1, "wrong number of rows fetched")
|
||||
self.assertEqual(len(rows), 1, 'wrong number of rows fetched')
|
||||
|
||||
where_column_value_pairs = [('user_id', user_id),]
|
||||
entity_id_map = dict(where_column_value_pairs)
|
||||
rows = db_class_sharded.VtUserEmail.select_by_columns(
|
||||
context.get_cursor(entity_id_map=entity_id_map),
|
||||
where_column_value_pairs)
|
||||
self.assertEqual(len(rows), 1, "wrong number of rows fetched")
|
||||
self.assertEqual(len(rows), 1, 'wrong number of rows fetched')
|
||||
|
||||
where_column_value_pairs = [('user_id', user_id),]
|
||||
entity_id_map = dict(where_column_value_pairs)
|
||||
rows = db_class_sharded.VtSong.select_by_columns(
|
||||
context.get_cursor(entity_id_map=entity_id_map),
|
||||
where_column_value_pairs)
|
||||
self.assertEqual(len(rows), len(self.user_song_map[user_id]), "wrong number of rows fetched")
|
||||
self.assertEqual(
|
||||
len(rows), len(self.user_song_map[user_id]),
|
||||
'wrong number of rows fetched')
|
||||
|
||||
def test_entity_id_read(self):
|
||||
user_id = self.user_id_list[0]
|
||||
|
@ -367,21 +397,21 @@ class TestRangeSharded(unittest.TestCase):
|
|||
rows = db_class_sharded.VtUser.select_by_columns(
|
||||
context.get_cursor(entity_id_map=entity_id_map),
|
||||
[('id', user_id),])
|
||||
self.assertEqual(len(rows), 1, "wrong number of rows fetched")
|
||||
self.assertEqual(len(rows), 1, 'wrong number of rows fetched')
|
||||
|
||||
where_column_value_pairs = [('id', self.user_song_map[user_id][0]),]
|
||||
entity_id_map = dict(where_column_value_pairs)
|
||||
rows = db_class_sharded.VtSong.select_by_columns(
|
||||
context.get_cursor(entity_id_map=entity_id_map),
|
||||
where_column_value_pairs)
|
||||
self.assertEqual(len(rows), 1, "wrong number of rows fetched")
|
||||
self.assertEqual(len(rows), 1, 'wrong number of rows fetched')
|
||||
|
||||
where_column_value_pairs = [('song_id', self.user_song_map[user_id][0]),]
|
||||
entity_id_map = dict(where_column_value_pairs)
|
||||
rows = db_class_sharded.VtSongDetail.select_by_columns(
|
||||
context.get_cursor(entity_id_map=entity_id_map),
|
||||
where_column_value_pairs)
|
||||
self.assertEqual(len(rows), 1, "wrong number of rows fetched")
|
||||
self.assertEqual(len(rows), 1, 'wrong number of rows fetched')
|
||||
|
||||
def test_in_clause_read(self):
|
||||
with database_context.ReadFromMaster(self.dc) as context:
|
||||
|
@ -392,10 +422,12 @@ class TestRangeSharded(unittest.TestCase):
|
|||
rows = db_class_sharded.VtUser.select_by_ids(
|
||||
context.get_cursor(entity_id_map=entity_id_map),
|
||||
where_column_value_pairs)
|
||||
self.assertEqual(len(rows), 2, "wrong number of rows fetched")
|
||||
self.assertEqual(len(rows), 2, 'wrong number of rows fetched')
|
||||
got = [row.id for row in rows]
|
||||
got.sort()
|
||||
self.assertEqual(user_id_list, got, "wrong rows fetched; expected %s got %s" % (user_id_list, got))
|
||||
self.assertEqual(
|
||||
user_id_list, got, 'wrong rows fetched; expected %s got %s' %
|
||||
(user_id_list, got))
|
||||
|
||||
username_list = [row.username for row in rows]
|
||||
username_list.sort()
|
||||
|
@ -404,20 +436,24 @@ class TestRangeSharded(unittest.TestCase):
|
|||
rows = db_class_sharded.VtUser.select_by_ids(
|
||||
context.get_cursor(entity_id_map=entity_id_map),
|
||||
where_column_value_pairs)
|
||||
self.assertEqual(len(rows), 2, "wrong number of rows fetched")
|
||||
self.assertEqual(len(rows), 2, 'wrong number of rows fetched')
|
||||
got = [row.username for row in rows]
|
||||
got.sort()
|
||||
self.assertEqual(username_list, got, "wrong rows fetched; expected %s got %s" % (username_list, got))
|
||||
self.assertEqual(
|
||||
username_list, got, 'wrong rows fetched; expected %s got %s' %
|
||||
(username_list, got))
|
||||
|
||||
where_column_value_pairs = (('user_id', user_id_list),)
|
||||
entity_id_map = dict(where_column_value_pairs)
|
||||
rows = db_class_sharded.VtUserEmail.select_by_ids(
|
||||
context.get_cursor(entity_id_map=entity_id_map),
|
||||
where_column_value_pairs)
|
||||
self.assertEqual(len(rows), 2, "wrong number of rows fetched")
|
||||
self.assertEqual(len(rows), 2, 'wrong number of rows fetched')
|
||||
got = [row.user_id for row in rows]
|
||||
got.sort()
|
||||
self.assertEqual(user_id_list, got, "wrong rows fetched; expected %s got %s" % (user_id_list, got))
|
||||
self.assertEqual(
|
||||
user_id_list, got, 'wrong rows fetched; expected %s got %s' %
|
||||
(user_id_list, got))
|
||||
|
||||
song_id_list = []
|
||||
for user_id in user_id_list:
|
||||
|
@ -430,7 +466,9 @@ class TestRangeSharded(unittest.TestCase):
|
|||
where_column_value_pairs)
|
||||
got = [row.id for row in rows]
|
||||
got.sort()
|
||||
self.assertEqual(song_id_list, got, "wrong rows fetched %s got %s" % (song_id_list, got))
|
||||
self.assertEqual(
|
||||
song_id_list, got, 'wrong rows fetched %s got %s' %
|
||||
(song_id_list, got))
|
||||
|
||||
where_column_value_pairs = [('song_id', song_id_list),]
|
||||
entity_id_map = dict(where_column_value_pairs)
|
||||
|
@ -439,8 +477,9 @@ class TestRangeSharded(unittest.TestCase):
|
|||
where_column_value_pairs)
|
||||
got = [row.song_id for row in rows]
|
||||
got.sort()
|
||||
self.assertEqual(song_id_list, got, "wrong rows fetched %s got %s" % (song_id_list, got))
|
||||
|
||||
self.assertEqual(
|
||||
song_id_list, got, 'wrong rows fetched %s got %s' %
|
||||
(song_id_list, got))
|
||||
|
||||
def test_keyrange_read(self):
|
||||
where_column_value_pairs = []
|
||||
|
@ -451,7 +490,10 @@ class TestRangeSharded(unittest.TestCase):
|
|||
context.get_cursor(keyrange='80-'), where_column_value_pairs)
|
||||
fetched_rows = len(rows1) + len(rows2)
|
||||
expected = len(self.user_id_list)
|
||||
self.assertEqual(fetched_rows, expected, "wrong number of rows fetched expected:%d got:%d" % (expected, fetched_rows))
|
||||
self.assertEqual(
|
||||
fetched_rows, expected,
|
||||
'wrong number of rows fetched expected:%d got:%d' %
|
||||
(expected, fetched_rows))
|
||||
|
||||
def test_scatter_read(self):
|
||||
where_column_value_pairs = []
|
||||
|
@ -459,7 +501,10 @@ class TestRangeSharded(unittest.TestCase):
|
|||
rows = db_class_sharded.VtUser.select_by_columns(
|
||||
context.get_cursor(keyrange=keyrange_constants.NON_PARTIAL_KEYRANGE),
|
||||
where_column_value_pairs)
|
||||
self.assertEqual(len(rows), len(self.user_id_list), "wrong number of rows fetched, expecting %d got %d" % (len(self.user_id_list), len(rows)))
|
||||
self.assertEqual(
|
||||
len(rows), len(self.user_id_list),
|
||||
'wrong number of rows fetched, expecting %d got %d' %
|
||||
(len(self.user_id_list), len(rows)))
|
||||
|
||||
def test_streaming_read(self):
|
||||
where_column_value_pairs = []
|
||||
|
@ -470,7 +515,9 @@ class TestRangeSharded(unittest.TestCase):
|
|||
got_user_id_list = []
|
||||
for r in rows:
|
||||
got_user_id_list.append(r.id)
|
||||
self.assertEqual(len(got_user_id_list), len(self.user_id_list), "wrong number of rows fetched")
|
||||
self.assertEqual(
|
||||
len(got_user_id_list), len(self.user_id_list),
|
||||
'wrong number of rows fetched')
|
||||
|
||||
def update_columns(self):
|
||||
with database_context.WriteTransaction(self.dc) as context:
|
||||
|
@ -479,15 +526,16 @@ class TestRangeSharded(unittest.TestCase):
|
|||
entity_id_map = {'id': user_id}
|
||||
new_username = 'new_user%s' % user_id
|
||||
update_cols = [('username', new_username),]
|
||||
db_class_sharded.VtUser.update_columns(context.get_cursor(entity_id_map=entity_id_map),
|
||||
where_column_value_pairs,
|
||||
update_column_value_pairs=update_cols)
|
||||
db_class_sharded.VtUser.update_columns(
|
||||
context.get_cursor(entity_id_map=entity_id_map),
|
||||
where_column_value_pairs,
|
||||
update_column_value_pairs=update_cols)
|
||||
# verify the updated value.
|
||||
where_column_value_pairs = [('id', user_id),]
|
||||
rows = db_class_sharded.VtUser.select_by_columns(
|
||||
context.get_cursor(entity_id_map={'id': user_id}),
|
||||
where_column_value_pairs)
|
||||
self.assertEqual(len(rows), 1, "wrong number of rows fetched")
|
||||
self.assertEqual(len(rows), 1, 'wrong number of rows fetched')
|
||||
self.assertEqual(new_username, rows[0].username)
|
||||
|
||||
where_column_value_pairs = [('user_id', user_id),]
|
||||
|
@ -497,9 +545,10 @@ class TestRangeSharded(unittest.TestCase):
|
|||
m.update(new_email)
|
||||
email_hash = m.digest()
|
||||
update_cols = [('email', new_email), ('email_hash', email_hash)]
|
||||
db_class_sharded.VtUserEmail.update_columns(context.get_cursor(entity_id_map={'user_id':user_id}),
|
||||
where_column_value_pairs,
|
||||
update_column_value_pairs=update_cols)
|
||||
db_class_sharded.VtUserEmail.update_columns(
|
||||
context.get_cursor(entity_id_map={'user_id': user_id}),
|
||||
where_column_value_pairs,
|
||||
update_column_value_pairs=update_cols)
|
||||
|
||||
# verify the updated value.
|
||||
with database_context.ReadFromMaster(self.dc) as context:
|
||||
|
@ -508,7 +557,7 @@ class TestRangeSharded(unittest.TestCase):
|
|||
rows = db_class_sharded.VtUserEmail.select_by_ids(
|
||||
context.get_cursor(entity_id_map=entity_id_map),
|
||||
where_column_value_pairs)
|
||||
self.assertEqual(len(rows), 1, "wrong number of rows fetched")
|
||||
self.assertEqual(len(rows), 1, 'wrong number of rows fetched')
|
||||
self.assertEqual(new_email, rows[0].email)
|
||||
self.user_id_list.sort()
|
||||
|
||||
|
@ -517,24 +566,26 @@ class TestRangeSharded(unittest.TestCase):
|
|||
with database_context.WriteTransaction(self.dc) as context:
|
||||
where_column_value_pairs = [('id', user_id),]
|
||||
entity_id_map = {'id': user_id}
|
||||
db_class_sharded.VtUser.delete_by_columns(context.get_cursor(entity_id_map=entity_id_map),
|
||||
where_column_value_pairs)
|
||||
db_class_sharded.VtUser.delete_by_columns(
|
||||
context.get_cursor(entity_id_map=entity_id_map),
|
||||
where_column_value_pairs)
|
||||
|
||||
where_column_value_pairs = [('user_id', user_id),]
|
||||
entity_id_map = {'user_id': user_id}
|
||||
db_class_sharded.VtUserEmail.delete_by_columns(context.get_cursor(entity_id_map=entity_id_map),
|
||||
where_column_value_pairs)
|
||||
db_class_sharded.VtUserEmail.delete_by_columns(
|
||||
context.get_cursor(entity_id_map=entity_id_map),
|
||||
where_column_value_pairs)
|
||||
|
||||
with database_context.ReadFromMaster(self.dc) as context:
|
||||
rows = db_class_sharded.VtUser.select_by_columns(
|
||||
context.get_cursor(entity_id_map=entity_id_map),
|
||||
where_column_value_pairs)
|
||||
self.assertEqual(len(rows), 0, "wrong number of rows fetched")
|
||||
self.assertEqual(len(rows), 0, 'wrong number of rows fetched')
|
||||
|
||||
rows = db_class_sharded.VtUserEmail.select_by_ids(
|
||||
context.get_cursor(entity_id_map=entity_id_map),
|
||||
where_column_value_pairs)
|
||||
self.assertEqual(len(rows), 0, "wrong number of rows fetched")
|
||||
self.assertEqual(len(rows), 0, 'wrong number of rows fetched')
|
||||
self.user_id_list = self.user_id_list[:-1]
|
||||
self.user_id_list.sort()
|
||||
|
||||
|
@ -542,9 +593,11 @@ class TestRangeSharded(unittest.TestCase):
|
|||
with database_context.ReadFromMaster(self.dc) as context:
|
||||
count = db_class_sharded.VtUser.get_count(
|
||||
context.get_cursor(keyrange=keyrange_constants.NON_PARTIAL_KEYRANGE),
|
||||
msg="test message")
|
||||
msg='test message')
|
||||
expected = len(self.user_id_list)
|
||||
self.assertEqual(count, expected, "wrong count fetched; expected %d got %d" % (expected, count))
|
||||
self.assertEqual(
|
||||
count, expected,
|
||||
'wrong count fetched; expected %d got %d' % (expected, count))
|
||||
|
||||
def test_min_id(self):
|
||||
with database_context.ReadFromMaster(self.dc) as context:
|
||||
|
@ -553,17 +606,21 @@ class TestRangeSharded(unittest.TestCase):
|
|||
self.user_id_list.sort()
|
||||
expected = min(self.user_id_list)
|
||||
rows1 = db_class_sharded.VtUser.select_by_columns(
|
||||
context.get_cursor(keyrange=keyrange_constants.NON_PARTIAL_KEYRANGE), [])
|
||||
context.get_cursor(keyrange=keyrange_constants.NON_PARTIAL_KEYRANGE),
|
||||
[])
|
||||
id_list = [row.id for row in rows1]
|
||||
self.assertEqual(min_id, expected, "wrong min value fetched; expected %d got %d" % (expected, min_id))
|
||||
self.assertEqual(
|
||||
min_id, expected,
|
||||
'wrong min value fetched; expected %d got %d' % (expected, min_id))
|
||||
|
||||
def test_max_id(self):
|
||||
with database_context.ReadFromMaster(self.dc) as context:
|
||||
max_id = db_class_sharded.VtUser.get_max(
|
||||
context.get_cursor(keyrange=keyrange_constants.NON_PARTIAL_KEYRANGE))
|
||||
expected = max(self.user_id_list)
|
||||
self.assertEqual(max_id, expected, "wrong max value fetched; expected %d got %d" % (expected, max_id))
|
||||
|
||||
self.assertEqual(
|
||||
max_id, expected,
|
||||
'wrong max value fetched; expected %d got %d' % (expected, max_id))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -17,6 +17,7 @@ shard_0_rdonly = tablet.Tablet()
|
|||
shard_1_master = tablet.Tablet()
|
||||
shard_1_rdonly = tablet.Tablet()
|
||||
|
||||
|
||||
def setUpModule():
|
||||
try:
|
||||
environment.topo_server().setup()
|
||||
|
@ -34,6 +35,7 @@ def setUpModule():
|
|||
tearDownModule()
|
||||
raise
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
if utils.options.skip_teardown:
|
||||
return
|
||||
|
@ -55,15 +57,16 @@ def tearDownModule():
|
|||
shard_1_master.remove_tree()
|
||||
shard_1_rdonly.remove_tree()
|
||||
|
||||
|
||||
class TestCustomSharding(unittest.TestCase):
|
||||
|
||||
def _insert_data(self, shard, start, count, table='data'):
|
||||
sql = 'insert into %s(id, name) values (:id, :name)' % table
|
||||
for x in xrange(count):
|
||||
bindvars = {
|
||||
'id': start+x,
|
||||
'name': 'row %d' % (start+x),
|
||||
}
|
||||
'id': start+x,
|
||||
'name': 'row %d' % (start+x),
|
||||
}
|
||||
utils.vtgate.execute_shard(sql, 'test_keyspace', shard,
|
||||
bindvars=bindvars)
|
||||
|
||||
|
@ -71,8 +74,8 @@ class TestCustomSharding(unittest.TestCase):
|
|||
sql = 'select name from %s where id=:id' % table
|
||||
for x in xrange(count):
|
||||
bindvars = {
|
||||
'id': start+x,
|
||||
}
|
||||
'id': start+x,
|
||||
}
|
||||
qr = utils.vtgate.execute_shard(sql, 'test_keyspace', shard,
|
||||
bindvars=bindvars)
|
||||
self.assertEqual(len(qr['Rows']), 1)
|
||||
|
@ -80,17 +83,18 @@ class TestCustomSharding(unittest.TestCase):
|
|||
self.assertEqual(v, 'row %d' % (start+x))
|
||||
|
||||
def test_custom_end_to_end(self):
|
||||
"""This test case runs through the common operations of a custom
|
||||
sharded keyspace: creation with one shard, schema change, reading
|
||||
/ writing data, adding one more shard, reading / writing data from
|
||||
both shards, applying schema changes again, and reading / writing data from
|
||||
both shards again.
|
||||
"""Runs through the common operations of a custom sharded keyspace.
|
||||
|
||||
Tests creation with one shard, schema change, reading / writing
|
||||
data, adding one more shard, reading / writing data from both
|
||||
shards, applying schema changes again, and reading / writing data
|
||||
from both shards again.
|
||||
"""
|
||||
|
||||
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
|
||||
|
||||
# start the first shard only for now
|
||||
shard_0_master.init_tablet( 'master', 'test_keyspace', '0')
|
||||
shard_0_master.init_tablet('master', 'test_keyspace', '0')
|
||||
shard_0_rdonly.init_tablet('rdonly', 'test_keyspace', '0')
|
||||
for t in [shard_0_master, shard_0_rdonly]:
|
||||
t.create_db('vt_test_keyspace')
|
||||
|
@ -124,7 +128,7 @@ primary key (id)
|
|||
self._check_data('0', 100, 10)
|
||||
|
||||
# create shard 1
|
||||
shard_1_master.init_tablet( 'master', 'test_keyspace', '1')
|
||||
shard_1_master.init_tablet('master', 'test_keyspace', '1')
|
||||
shard_1_rdonly.init_tablet('rdonly', 'test_keyspace', '1')
|
||||
for t in [shard_1_master, shard_1_rdonly]:
|
||||
t.start_vttablet(wait_for_state=None)
|
||||
|
@ -183,26 +187,27 @@ primary key (id)
|
|||
shard1count = 0
|
||||
for q in s:
|
||||
if q['QueryShard']['Shards'][0] == '0':
|
||||
shard0count+=1
|
||||
shard0count += 1
|
||||
if q['QueryShard']['Shards'][0] == '1':
|
||||
shard1count+=1
|
||||
shard1count += 1
|
||||
self.assertEqual(shard0count, 2)
|
||||
self.assertEqual(shard1count, 2)
|
||||
|
||||
# run the queries, aggregate the results, make sure we have all rows
|
||||
rows = {}
|
||||
for q in s:
|
||||
qr = utils.vtgate.execute_shard(q['QueryShard']['Sql'],
|
||||
'test_keyspace', ",".join(q['QueryShard']['Shards']),
|
||||
tablet_type='master', bindvars=q['QueryShard']['BindVariables'])
|
||||
qr = utils.vtgate.execute_shard(
|
||||
q['QueryShard']['Sql'],
|
||||
'test_keyspace', ','.join(q['QueryShard']['Shards']),
|
||||
tablet_type='master', bindvars=q['QueryShard']['BindVariables'])
|
||||
for r in qr['Rows']:
|
||||
id = int(r[0])
|
||||
rows[id] = r[1]
|
||||
self.assertEqual(len(rows), 20)
|
||||
expected = {}
|
||||
for i in xrange(10):
|
||||
expected[100+i] = 'row %d' % (100+i)
|
||||
expected[200+i] = 'row %d' % (200+i)
|
||||
expected[100 + i] = 'row %d' % (100 + i)
|
||||
expected[200 + i] = 'row %d' % (200 + i)
|
||||
self.assertEqual(rows, expected)
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
40
test/demo.py
40
test/demo.py
|
@ -4,9 +4,7 @@
|
|||
# Copyright 2015, Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can
|
||||
# be found in the LICENSE file.
|
||||
"""
|
||||
This program launches and shuts down a demo vitess cluster.
|
||||
"""
|
||||
"""This program launches and shuts down a demo vitess cluster."""
|
||||
|
||||
import json
|
||||
import optparse
|
||||
|
@ -118,8 +116,9 @@ vschema = '''{
|
|||
# Verify valid json
|
||||
json.loads(vschema)
|
||||
|
||||
|
||||
def main():
|
||||
parser = optparse.OptionParser(usage="usage: %prog [options]")
|
||||
parser = optparse.OptionParser(usage='usage: %prog [options]')
|
||||
utils.add_options(parser)
|
||||
(options, args) = parser.parse_args()
|
||||
options.debug = True
|
||||
|
@ -128,29 +127,36 @@ def main():
|
|||
try:
|
||||
environment.topo_server().setup()
|
||||
env.launch(
|
||||
"user",
|
||||
shards=["-80", "80-"],
|
||||
'user',
|
||||
shards=['-80', '80-'],
|
||||
ddls=[
|
||||
'create table user(user_id bigint, name varchar(128), primary key(user_id))',
|
||||
'create table user_extra(user_id bigint, extra varchar(128), primary key(user_id))',
|
||||
'create table music(user_id bigint, music_id bigint, primary key(user_id, music_id))',
|
||||
'create table music_extra(music_id bigint, keyspace_id bigint unsigned, primary key(music_id))',
|
||||
'create table user(user_id bigint, name varchar(128), '
|
||||
'primary key(user_id))',
|
||||
'create table user_extra(user_id bigint, extra varchar(128), '
|
||||
'primary key(user_id))',
|
||||
'create table music(user_id bigint, music_id bigint, '
|
||||
'primary key(user_id, music_id))',
|
||||
'create table music_extra(music_id bigint, '
|
||||
'keyspace_id bigint unsigned, primary key(music_id))',
|
||||
],
|
||||
)
|
||||
env.launch(
|
||||
"lookup",
|
||||
'lookup',
|
||||
ddls=[
|
||||
'create table user_idx(user_id bigint not null auto_increment, primary key(user_id))',
|
||||
'create table name_user_idx(name varchar(128), user_id bigint, primary key(name, user_id))',
|
||||
'create table music_user_idx(music_id bigint not null auto_increment, user_id bigint, primary key(music_id))',
|
||||
'create table user_idx(user_id bigint not null auto_increment, '
|
||||
'primary key(user_id))',
|
||||
'create table name_user_idx(name varchar(128), user_id bigint, '
|
||||
'primary key(name, user_id))',
|
||||
'create table music_user_idx(music_id bigint not null '
|
||||
'auto_increment, user_id bigint, primary key(music_id))',
|
||||
],
|
||||
)
|
||||
utils.apply_vschema(vschema)
|
||||
utils.VtGate().start(cache_ttl='500s')
|
||||
utils.Vtctld().start()
|
||||
print "vtgate:", utils.vtgate.port
|
||||
print "vtctld:", utils.vtctld.port
|
||||
utils.pause("the cluster is up, press enter to shut it down...")
|
||||
print 'vtgate:', utils.vtgate.port
|
||||
print 'vtctld:', utils.vtctld.port
|
||||
utils.pause('the cluster is up, press enter to shut it down...')
|
||||
finally:
|
||||
env.teardown()
|
||||
utils.kill_sub_processes()
|
||||
|
|
|
@ -20,10 +20,14 @@ import grpc_protocols_flavor
|
|||
|
||||
# sanity check the environment
|
||||
if os.environ['USER'] == 'root':
|
||||
sys.stderr.write('ERROR: Vitess and its dependencies (mysqld and memcached) should not be run as root.\n')
|
||||
sys.stderr.write(
|
||||
'ERROR: Vitess and its dependencies (mysqld and memcached) '
|
||||
'should not be run as root.\n')
|
||||
sys.exit(1)
|
||||
if 'VTTOP' not in os.environ:
|
||||
sys.stderr.write('ERROR: Vitess environment not set up. Please run "source dev.env" first.\n')
|
||||
sys.stderr.write(
|
||||
'ERROR: Vitess environment not set up. '
|
||||
'Please run "source dev.env" first.\n')
|
||||
sys.exit(1)
|
||||
|
||||
# vttop is the toplevel of the vitess source tree
|
||||
|
@ -36,7 +40,8 @@ vtroot = os.environ['VTROOT']
|
|||
vtdataroot = os.environ.get('VTDATAROOT', '/vt')
|
||||
|
||||
# vt_mysql_root is where MySQL is installed
|
||||
vt_mysql_root = os.environ.get('VT_MYSQL_ROOT', os.path.join(vtroot, 'dist', 'mysql'))
|
||||
vt_mysql_root = os.environ.get(
|
||||
'VT_MYSQL_ROOT', os.path.join(vtroot, 'dist', 'mysql'))
|
||||
|
||||
# tmproot is the temporary place to put all test files
|
||||
tmproot = os.path.join(vtdataroot, 'tmp')
|
||||
|
@ -56,6 +61,7 @@ curl_bin = '/usr/bin/curl'
|
|||
# if set, we will not build the binaries
|
||||
skip_build = False
|
||||
|
||||
|
||||
def memcached_bin():
|
||||
in_vt = os.path.join(vtroot, 'bin', 'memcached')
|
||||
if os.path.exists(in_vt):
|
||||
|
@ -65,6 +71,7 @@ def memcached_bin():
|
|||
# url to hit to force the logs to flush.
|
||||
flush_logs_url = '/debug/flushlogs'
|
||||
|
||||
|
||||
def setup():
|
||||
global tmproot
|
||||
try:
|
||||
|
@ -73,6 +80,7 @@ def setup():
|
|||
# directory already exists
|
||||
pass
|
||||
|
||||
|
||||
# port management: reserve count consecutive ports, returns the first one
|
||||
def reserve_ports(count):
|
||||
global vtportstart
|
||||
|
@ -80,10 +88,13 @@ def reserve_ports(count):
|
|||
vtportstart += count
|
||||
return result
|
||||
|
||||
|
||||
# simple run command, cannot use utils.run to avoid circular dependencies
|
||||
def run(args, raise_on_error=True, **kargs):
|
||||
try:
|
||||
logging.debug("run: %s %s", str(args), ', '.join('%s=%s' % x for x in kargs.iteritems()))
|
||||
logging.debug(
|
||||
'run: %s %s', str(args),
|
||||
', '.join('%s=%s' % x for x in kargs.iteritems()))
|
||||
proc = subprocess.Popen(args,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
|
@ -100,8 +111,11 @@ def run(args, raise_on_error=True, **kargs):
|
|||
logging.error('Command failed: %s:\n%s%s', ' '.join(args), stdout, stderr)
|
||||
return stdout, stderr
|
||||
|
||||
|
||||
# compile command line programs, only once
|
||||
compiled_progs = []
|
||||
|
||||
|
||||
def prog_compile(name):
|
||||
if skip_build or name in compiled_progs:
|
||||
return
|
||||
|
@ -109,33 +123,39 @@ def prog_compile(name):
|
|||
logging.debug('Compiling %s', name)
|
||||
run(['godep', 'go', 'install'], cwd=os.path.join(vttop, 'go', 'cmd', name))
|
||||
|
||||
# binary management: returns the full path for a binary
|
||||
# this should typically not be used outside this file, unless you want to bypass
|
||||
|
||||
# binary management: returns the full path for a binary this should
|
||||
# typically not be used outside this file, unless you want to bypass
|
||||
# global flag injection (see binary_args)
|
||||
def binary_path(name):
|
||||
prog_compile(name)
|
||||
return os.path.join(vtroot, 'bin', name)
|
||||
|
||||
|
||||
# returns flags specific to a given binary
|
||||
# use this to globally inject flags any time a given command runs
|
||||
# e.g. - if name == 'vtctl': return ['-extra_arg', 'value']
|
||||
def binary_flags(name):
|
||||
return []
|
||||
|
||||
|
||||
# returns binary_path + binary_flags as a list
|
||||
# this should be used instead of binary_path whenever possible
|
||||
def binary_args(name):
|
||||
return [binary_path(name)] + binary_flags(name)
|
||||
|
||||
|
||||
# returns binary_path + binary_flags as a string
|
||||
# this should be used instead of binary_path whenever possible
|
||||
def binary_argstr(name):
|
||||
return ' '.join(binary_args(name))
|
||||
return ' '.join(binary_args(name))
|
||||
|
||||
|
||||
# binary management for the MySQL distribution.
|
||||
def mysql_binary_path(name):
|
||||
return os.path.join(vt_mysql_root, 'bin', name)
|
||||
|
||||
|
||||
# add environment-specific command-line options
|
||||
def add_options(parser):
|
||||
pass
|
||||
|
|
|
@ -10,7 +10,9 @@ import unittest
|
|||
|
||||
import utils
|
||||
|
||||
|
||||
class TestCase(unittest.TestCase):
|
||||
|
||||
@classmethod
|
||||
def setenv(cls, env):
|
||||
cls.env = env
|
||||
|
@ -18,26 +20,30 @@ class TestCase(unittest.TestCase):
|
|||
def assertContains(self, b, a):
|
||||
self.assertIn(a, b)
|
||||
|
||||
|
||||
class MultiDict(dict):
|
||||
|
||||
def __getattr__(self, name):
|
||||
v = self[name]
|
||||
if type(v)==dict:
|
||||
v=MultiDict(v)
|
||||
if type(v) == dict:
|
||||
v = MultiDict(v)
|
||||
return v
|
||||
|
||||
def mget(self, mkey, default=None):
|
||||
keys = mkey.split(".")
|
||||
keys = mkey.split('.')
|
||||
try:
|
||||
v = self
|
||||
for key in keys:
|
||||
v = v[key]
|
||||
except KeyError:
|
||||
v = default
|
||||
if type(v)==dict:
|
||||
if type(v) == dict:
|
||||
v = MultiDict(v)
|
||||
return v
|
||||
|
||||
|
||||
class Tailer(object):
|
||||
|
||||
def __init__(self, filepath, flush=None, sleep=0, timeout=10.0):
|
||||
self.filepath = filepath
|
||||
self.flush = flush
|
||||
|
@ -75,7 +81,7 @@ class Tailer(object):
|
|||
self.f.seek(0, os.SEEK_END)
|
||||
newpos = self.f.tell()
|
||||
if newpos < self.pos:
|
||||
return ""
|
||||
return ''
|
||||
self.f.seek(self.pos, os.SEEK_SET)
|
||||
size = newpos-self.pos
|
||||
self.pos = newpos
|
||||
|
@ -85,6 +91,7 @@ class Tailer(object):
|
|||
"""Returns a list of read lines."""
|
||||
return self.read().splitlines()
|
||||
|
||||
|
||||
# FIXME: Hijacked from go/vt/tabletserver/test.py
|
||||
# Reuse when things come together
|
||||
def execute(cmd, trap_output=False, verbose=False, **kargs):
|
||||
|
@ -93,7 +100,7 @@ def execute(cmd, trap_output=False, verbose=False, **kargs):
|
|||
kargs['stdout'] = PIPE
|
||||
kargs['stderr'] = PIPE
|
||||
if verbose:
|
||||
print "Execute:", cmd, ', '.join('%s=%s' % x for x in kargs.iteritems())
|
||||
print 'Execute:', cmd, ', '.join('%s=%s' % x for x in kargs.iteritems())
|
||||
proc = Popen(args, **kargs)
|
||||
proc.args = args
|
||||
stdout, stderr = proc.communicate()
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
|
||||
import protocols_flavor
|
||||
|
||||
|
||||
class GoRpcProtocolsFlavor(protocols_flavor.ProtocolsFlavor):
|
||||
"""Overrides to use go rpc everywhere"""
|
||||
"""Overrides to use go rpc everywhere."""
|
||||
|
||||
def binlog_player_protocol(self):
|
||||
return 'gorpc'
|
||||
|
|
|
@ -2,9 +2,12 @@
|
|||
|
||||
import protocols_flavor
|
||||
|
||||
|
||||
class GRpcProtocolsFlavor(protocols_flavor.ProtocolsFlavor):
|
||||
"""Overrides to use gRPC everywhere where it is supported.
|
||||
If not supported yet, use GoRPC."""
|
||||
|
||||
If not supported yet, use GoRPC.
|
||||
"""
|
||||
|
||||
def binlog_player_protocol(self):
|
||||
return 'grpc'
|
||||
|
|
|
@ -36,11 +36,11 @@ shard_replica = tablet.Tablet()
|
|||
shard_rdonly1 = tablet.Tablet()
|
||||
|
||||
# split shards
|
||||
# range "" - 80
|
||||
# range '' - 80
|
||||
shard_0_master = tablet.Tablet()
|
||||
shard_0_replica = tablet.Tablet()
|
||||
shard_0_rdonly1 = tablet.Tablet()
|
||||
# range 80 - ""
|
||||
# range 80 - ''
|
||||
shard_1_master = tablet.Tablet()
|
||||
shard_1_replica = tablet.Tablet()
|
||||
shard_1_rdonly1 = tablet.Tablet()
|
||||
|
@ -111,7 +111,7 @@ index by_msg (msg)
|
|||
) Engine=InnoDB'''
|
||||
|
||||
utils.run_vtctl(['ApplySchema',
|
||||
'-sql=' + create_table_template % ("resharding1"),
|
||||
'-sql=' + create_table_template % ('resharding1'),
|
||||
'test_keyspace'],
|
||||
auto_log=True)
|
||||
|
||||
|
@ -122,7 +122,7 @@ index by_msg (msg)
|
|||
t = 'bigint(20) unsigned'
|
||||
sql = 'alter table %s add keyspace_id ' + t
|
||||
utils.run_vtctl(['ApplySchema',
|
||||
'-sql=' + sql % ("resharding1"),
|
||||
'-sql=' + sql % ('resharding1'),
|
||||
'test_keyspace'],
|
||||
auto_log=True)
|
||||
|
||||
|
@ -133,7 +133,7 @@ index by_msg (msg)
|
|||
t = 'bigint(20) unsigned'
|
||||
sql = 'alter table %s modify keyspace_id ' + t + ' not null'
|
||||
utils.run_vtctl(['ApplySchema',
|
||||
'-sql=' + sql % ("resharding1"),
|
||||
'-sql=' + sql % ('resharding1'),
|
||||
'test_keyspace'],
|
||||
auto_log=True)
|
||||
|
||||
|
@ -166,33 +166,38 @@ index by_msg (msg)
|
|||
if keyspace_id_type == keyrange_constants.KIT_BYTES:
|
||||
k = base64.b64encode(pack_keyspace_id(keyspace_id))
|
||||
else:
|
||||
k = "%d" % keyspace_id
|
||||
tablet.mquery('vt_test_keyspace', [
|
||||
'begin',
|
||||
'insert into %s(id, msg, keyspace_id) values(%d, "%s", 0x%x) /* EMD keyspace_id:%s user_id:%d */' % (table, id, msg, keyspace_id, k, id),
|
||||
'commit'
|
||||
], write=True)
|
||||
k = '%d' % keyspace_id
|
||||
tablet.mquery(
|
||||
'vt_test_keyspace',
|
||||
['begin',
|
||||
'insert into %s(id, msg, keyspace_id) '
|
||||
'values(%d, "%s", 0x%x) /* EMD keyspace_id:%s user_id:%d */' %
|
||||
(table, id, msg, keyspace_id, k, id),
|
||||
'commit'],
|
||||
write=True)
|
||||
|
||||
def _get_value(self, tablet, table, id):
|
||||
return tablet.mquery('vt_test_keyspace', 'select id, msg, keyspace_id from %s where id=%d' % (table, id))
|
||||
return tablet.mquery(
|
||||
'vt_test_keyspace',
|
||||
'select id, msg, keyspace_id from %s where id=%d' % (table, id))
|
||||
|
||||
def _check_value(self, tablet, table, id, msg, keyspace_id,
|
||||
should_be_here=True):
|
||||
result = self._get_value(tablet, table, id)
|
||||
if keyspace_id_type == keyrange_constants.KIT_BYTES:
|
||||
fmt = "%s"
|
||||
fmt = '%s'
|
||||
keyspace_id = pack_keyspace_id(keyspace_id)
|
||||
else:
|
||||
fmt = "%x"
|
||||
fmt = '%x'
|
||||
if should_be_here:
|
||||
self.assertEqual(result, ((id, msg, keyspace_id),),
|
||||
("Bad row in tablet %s for id=%d, keyspace_id=" +
|
||||
fmt + ", row=%s") % (tablet.tablet_alias, id,
|
||||
('Bad row in tablet %s for id=%d, keyspace_id=' +
|
||||
fmt + ', row=%s') % (tablet.tablet_alias, id,
|
||||
keyspace_id, str(result)))
|
||||
else:
|
||||
self.assertEqual(len(result), 0,
|
||||
("Extra row in tablet %s for id=%d, keyspace_id=" +
|
||||
fmt + ": %s") % (tablet.tablet_alias, id, keyspace_id,
|
||||
('Extra row in tablet %s for id=%d, keyspace_id=' +
|
||||
fmt + ': %s') % (tablet.tablet_alias, id, keyspace_id,
|
||||
str(result)))
|
||||
|
||||
# _is_value_present_and_correct tries to read a value.
|
||||
|
@ -204,12 +209,12 @@ index by_msg (msg)
|
|||
if len(result) == 0:
|
||||
return False
|
||||
if keyspace_id_type == keyrange_constants.KIT_BYTES:
|
||||
fmt = "%s"
|
||||
fmt = '%s'
|
||||
keyspace_id = pack_keyspace_id(keyspace_id)
|
||||
else:
|
||||
fmt = "%x"
|
||||
fmt = '%x'
|
||||
self.assertEqual(result, ((id, msg, keyspace_id),),
|
||||
("Bad row in tablet %s for id=%d, keyspace_id=" + fmt) % (
|
||||
('Bad row in tablet %s for id=%d, keyspace_id=' + fmt) % (
|
||||
tablet.tablet_alias, id, keyspace_id))
|
||||
return True
|
||||
|
||||
|
@ -255,7 +260,7 @@ index by_msg (msg)
|
|||
i, 0xE000000000000000 + base + i):
|
||||
found += 1
|
||||
percent = found * 100 / count / 2
|
||||
logging.debug("I have %d%% of the data", percent)
|
||||
logging.debug('I have %d%% of the data', percent)
|
||||
return percent
|
||||
|
||||
def _check_lots_timeout(self, count, threshold, timeout, base=0):
|
||||
|
@ -267,7 +272,6 @@ index by_msg (msg)
|
|||
|
||||
# _check_lots_not_present makes sure no data is in the wrong shard
|
||||
def _check_lots_not_present(self, count, base=0):
|
||||
found = 0
|
||||
for i in xrange(count):
|
||||
self._check_value(shard_0_replica, 'resharding1', 10000 + base + i,
|
||||
'msg-range1-%d' % i, 0xA000000000000000 + base + i,
|
||||
|
@ -283,9 +287,9 @@ index by_msg (msg)
|
|||
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force', 'test_keyspace',
|
||||
'keyspace_id', keyspace_id_type])
|
||||
|
||||
shard_master.init_tablet( 'master', 'test_keyspace', '0')
|
||||
shard_master.init_tablet('master', 'test_keyspace', '0')
|
||||
shard_replica.init_tablet('replica', 'test_keyspace', '0')
|
||||
shard_rdonly1.init_tablet( 'rdonly', 'test_keyspace', '0')
|
||||
shard_rdonly1.init_tablet('rdonly', 'test_keyspace', '0')
|
||||
|
||||
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
|
||||
|
||||
|
@ -313,12 +317,12 @@ index by_msg (msg)
|
|||
self._mark_sharding_key_not_null()
|
||||
|
||||
# create the split shards
|
||||
shard_0_master.init_tablet( 'master', 'test_keyspace', '-80')
|
||||
shard_0_master.init_tablet('master', 'test_keyspace', '-80')
|
||||
shard_0_replica.init_tablet('replica', 'test_keyspace', '-80')
|
||||
shard_0_rdonly1.init_tablet( 'rdonly', 'test_keyspace', '-80')
|
||||
shard_1_master.init_tablet( 'master', 'test_keyspace', '80-')
|
||||
shard_0_rdonly1.init_tablet('rdonly', 'test_keyspace', '-80')
|
||||
shard_1_master.init_tablet('master', 'test_keyspace', '80-')
|
||||
shard_1_replica.init_tablet('replica', 'test_keyspace', '80-')
|
||||
shard_1_rdonly1.init_tablet( 'rdonly', 'test_keyspace', '80-')
|
||||
shard_1_rdonly1.init_tablet('rdonly', 'test_keyspace', '80-')
|
||||
|
||||
# start vttablet on the split shards (no db created,
|
||||
# so they're all not serving)
|
||||
|
@ -337,8 +341,8 @@ index by_msg (msg)
|
|||
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
|
||||
auto_log=True)
|
||||
utils.check_srv_keyspace('test_nj', 'test_keyspace',
|
||||
'Partitions(master): -\n' +
|
||||
'Partitions(rdonly): -\n' +
|
||||
'Partitions(master): -\n'
|
||||
'Partitions(rdonly): -\n'
|
||||
'Partitions(replica): -\n',
|
||||
keyspace_id_type=keyspace_id_type)
|
||||
|
||||
|
@ -353,14 +357,14 @@ index by_msg (msg)
|
|||
utils.run_vtworker(['--cell', 'test_nj',
|
||||
'--command_display_interval', '10ms',
|
||||
'SplitClone',
|
||||
'--exclude_tables' ,'unrelated',
|
||||
'--exclude_tables', 'unrelated',
|
||||
'--strategy=-populate_blp_checkpoint',
|
||||
'--source_reader_count', '10',
|
||||
'--min_table_size_for_split', '1',
|
||||
'test_keyspace/0'],
|
||||
auto_log=True)
|
||||
utils.run_vtctl(['ChangeSlaveType', shard_rdonly1.tablet_alias, 'rdonly'],
|
||||
auto_log=True)
|
||||
auto_log=True)
|
||||
|
||||
# check the startup values are in the right place
|
||||
self._check_startup_values()
|
||||
|
@ -369,24 +373,24 @@ index by_msg (msg)
|
|||
utils.run_vtctl(['ValidateSchemaKeyspace', 'test_keyspace'], auto_log=True)
|
||||
|
||||
# check the binlog players are running
|
||||
logging.debug("Waiting for binlog players to start on new masters...")
|
||||
logging.debug('Waiting for binlog players to start on new masters...')
|
||||
shard_0_master.wait_for_binlog_player_count(1)
|
||||
shard_1_master.wait_for_binlog_player_count(1)
|
||||
|
||||
# testing filtered replication: insert a bunch of data on shard 1,
|
||||
# check we get most of it after a few seconds, wait for binlog server
|
||||
# timeout, check we get all of it.
|
||||
logging.debug("Inserting lots of data on source shard")
|
||||
logging.debug('Inserting lots of data on source shard')
|
||||
self._insert_lots(1000)
|
||||
logging.debug("Checking 80 percent of data is sent quickly")
|
||||
logging.debug('Checking 80 percent of data is sent quickly')
|
||||
self._check_lots_timeout(1000, 80, 5)
|
||||
logging.debug("Checking all data goes through eventually")
|
||||
logging.debug('Checking all data goes through eventually')
|
||||
self._check_lots_timeout(1000, 100, 20)
|
||||
logging.debug("Checking no data was sent the wrong way")
|
||||
logging.debug('Checking no data was sent the wrong way')
|
||||
self._check_lots_not_present(1000)
|
||||
|
||||
# use vtworker to compare the data
|
||||
logging.debug("Running vtworker SplitDiff for -80")
|
||||
logging.debug('Running vtworker SplitDiff for -80')
|
||||
utils.run_vtworker(['-cell', 'test_nj', 'SplitDiff', 'test_keyspace/-80'],
|
||||
auto_log=True)
|
||||
utils.run_vtctl(['ChangeSlaveType', shard_rdonly1.tablet_alias, 'rdonly'],
|
||||
|
@ -394,7 +398,7 @@ index by_msg (msg)
|
|||
utils.run_vtctl(['ChangeSlaveType', shard_0_rdonly1.tablet_alias, 'rdonly'],
|
||||
auto_log=True)
|
||||
|
||||
logging.debug("Running vtworker SplitDiff for 80-")
|
||||
logging.debug('Running vtworker SplitDiff for 80-')
|
||||
utils.run_vtworker(['-cell', 'test_nj', 'SplitDiff', 'test_keyspace/80-'],
|
||||
auto_log=True)
|
||||
utils.run_vtctl(['ChangeSlaveType', shard_rdonly1.tablet_alias, 'rdonly'],
|
||||
|
@ -402,7 +406,7 @@ index by_msg (msg)
|
|||
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias, 'rdonly'],
|
||||
auto_log=True)
|
||||
|
||||
utils.pause("Good time to test vtworker for diffs")
|
||||
utils.pause('Good time to test vtworker for diffs')
|
||||
|
||||
# check we can't migrate the master just yet
|
||||
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'master'],
|
||||
|
@ -412,8 +416,8 @@ index by_msg (msg)
|
|||
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'rdonly'],
|
||||
auto_log=True)
|
||||
utils.check_srv_keyspace('test_nj', 'test_keyspace',
|
||||
'Partitions(master): -\n' +
|
||||
'Partitions(rdonly): -80 80-\n' +
|
||||
'Partitions(master): -\n'
|
||||
'Partitions(rdonly): -80 80-\n'
|
||||
'Partitions(replica): -\n',
|
||||
keyspace_id_type=keyspace_id_type)
|
||||
|
||||
|
@ -421,44 +425,46 @@ index by_msg (msg)
|
|||
source_tablet = shard_replica
|
||||
destination_tablets = [shard_0_replica, shard_1_replica]
|
||||
|
||||
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'replica'],
|
||||
auto_log=True)
|
||||
utils.run_vtctl(
|
||||
['MigrateServedTypes', 'test_keyspace/0', 'replica'], auto_log=True)
|
||||
utils.check_srv_keyspace('test_nj', 'test_keyspace',
|
||||
'Partitions(master): -\n' +
|
||||
'Partitions(rdonly): -80 80-\n' +
|
||||
'Partitions(master): -\n'
|
||||
'Partitions(rdonly): -80 80-\n'
|
||||
'Partitions(replica): -80 80-\n',
|
||||
keyspace_id_type=keyspace_id_type)
|
||||
|
||||
# move replica back and forth
|
||||
utils.run_vtctl(['MigrateServedTypes', '-reverse', 'test_keyspace/0', 'replica'],
|
||||
auto_log=True)
|
||||
# After a backwards migration, queryservice should be enabled on source and disabled on destinations
|
||||
utils.run_vtctl(
|
||||
['MigrateServedTypes', '-reverse', 'test_keyspace/0', 'replica'],
|
||||
auto_log=True)
|
||||
# After a backwards migration, queryservice should be enabled on
|
||||
# source and disabled on destinations
|
||||
utils.check_tablet_query_service(self, source_tablet, True, False)
|
||||
utils.check_tablet_query_services(self, destination_tablets, False, True)
|
||||
utils.check_srv_keyspace('test_nj', 'test_keyspace',
|
||||
'Partitions(master): -\n' +
|
||||
'Partitions(rdonly): -80 80-\n' +
|
||||
'Partitions(master): -\n'
|
||||
'Partitions(rdonly): -80 80-\n'
|
||||
'Partitions(replica): -\n',
|
||||
keyspace_id_type=keyspace_id_type)
|
||||
|
||||
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'replica'],
|
||||
auto_log=True)
|
||||
# After a forwards migration, queryservice should be disabled on source and enabled on destinations
|
||||
# After a forwards migration, queryservice should be disabled on
|
||||
# source and enabled on destinations
|
||||
utils.check_tablet_query_service(self, source_tablet, False, True)
|
||||
utils.check_tablet_query_services(self, destination_tablets, True, False)
|
||||
utils.check_srv_keyspace('test_nj', 'test_keyspace',
|
||||
'Partitions(master): -\n' +
|
||||
'Partitions(rdonly): -80 80-\n' +
|
||||
'Partitions(master): -\n'
|
||||
'Partitions(rdonly): -80 80-\n'
|
||||
'Partitions(replica): -80 80-\n',
|
||||
keyspace_id_type=keyspace_id_type)
|
||||
|
||||
|
||||
# then serve master from the split shards
|
||||
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'master'],
|
||||
auto_log=True)
|
||||
utils.check_srv_keyspace('test_nj', 'test_keyspace',
|
||||
'Partitions(master): -80 80-\n' +
|
||||
'Partitions(rdonly): -80 80-\n' +
|
||||
'Partitions(master): -80 80-\n'
|
||||
'Partitions(rdonly): -80 80-\n'
|
||||
'Partitions(replica): -80 80-\n',
|
||||
keyspace_id_type=keyspace_id_type)
|
||||
|
||||
|
|
|
@ -13,8 +13,12 @@ Start up steps include:
|
|||
- start VtGate instance
|
||||
|
||||
Usage example:
|
||||
java_vtgate_test_helper.py --shards=-80,80- --tablet-config='{"rdonly":1, "replica":1}' --keyspace=test_keyspace --vtgate-port=11111
|
||||
starts 1 VtGate on the specified port and 6 vttablets - 1 master, replica and rdonly each per shard.
|
||||
java_vtgate_test_helper.py --shards=-80,80- \
|
||||
--tablet-config='{"rdonly":1, "replica":1}' --keyspace=test_keyspace \
|
||||
--vtgate-port=11111
|
||||
|
||||
starts 1 VtGate on the specified port and 6 vttablets - 1 master,
|
||||
replica and rdonly each per shard.
|
||||
"""
|
||||
|
||||
import utils
|
||||
|
@ -43,9 +47,9 @@ class TestEnv(object):
|
|||
self.tablets = []
|
||||
tablet_config = json.loads(options.tablet_config)
|
||||
for shard in options.shards.split(','):
|
||||
self.tablets.append(Tablet(shard, "master"))
|
||||
self.tablets.append(Tablet(shard, 'master'))
|
||||
for tablet_type, count in tablet_config.iteritems():
|
||||
for i in range(count):
|
||||
for _ in range(count):
|
||||
self.tablets.append(Tablet(shard, tablet_type))
|
||||
|
||||
def set_up(self):
|
||||
|
@ -53,7 +57,9 @@ class TestEnv(object):
|
|||
environment.topo_server().setup()
|
||||
utils.wait_procs([t.init_mysql() for t in self.tablets])
|
||||
utils.run_vtctl(['CreateKeyspace', self.keyspace])
|
||||
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force', self.keyspace, 'keyspace_id', 'uint64'])
|
||||
utils.run_vtctl(
|
||||
['SetKeyspaceShardingInfo', '-force', self.keyspace, 'keyspace_id',
|
||||
'uint64'])
|
||||
for t in self.tablets:
|
||||
t.init_tablet(t.type, keyspace=self.keyspace, shard=t.shard)
|
||||
utils.run_vtctl(['RebuildKeyspaceGraph', self.keyspace], auto_log=True)
|
||||
|
@ -65,31 +71,35 @@ class TestEnv(object):
|
|||
|
||||
t.create_db(dbname)
|
||||
t.start_vttablet(
|
||||
wait_for_state=None,
|
||||
extra_args=['-queryserver-config-schema-reload-time', '1',
|
||||
'-init_db_name_override', dbname],
|
||||
wait_for_state=None,
|
||||
extra_args=['-queryserver-config-schema-reload-time', '1',
|
||||
'-init_db_name_override', dbname],
|
||||
)
|
||||
for t in self.tablets:
|
||||
t.wait_for_vttablet_state('SERVING')
|
||||
for t in self.tablets:
|
||||
if t.type == "master":
|
||||
utils.run_vtctl(['InitShardMaster', self.keyspace+'/'+t.shard, t.tablet_alias], auto_log=True)
|
||||
if t.type == 'master':
|
||||
utils.run_vtctl(
|
||||
['InitShardMaster', self.keyspace+'/'+t.shard, t.tablet_alias],
|
||||
auto_log=True)
|
||||
utils.run_vtctl(['RebuildKeyspaceGraph', self.keyspace], auto_log=True)
|
||||
if self.schema:
|
||||
utils.run_vtctl(['ApplySchema', '-sql', self.schema, self.keyspace])
|
||||
if self.vschema:
|
||||
if self.vschema[0] == '{':
|
||||
utils.run_vtctl(['ApplyVSchema', "-vschema", self.vschema])
|
||||
utils.run_vtctl(['ApplyVSchema', '-vschema', self.vschema])
|
||||
else:
|
||||
utils.run_vtctl(['ApplyVSchema', "-vschema_file", self.vschema])
|
||||
utils.run_vtctl(['ApplyVSchema', '-vschema_file', self.vschema])
|
||||
utils.VtGate(port=self.vtgate_port).start(cache_ttl='500s')
|
||||
except:
|
||||
self.shutdown()
|
||||
raise
|
||||
|
||||
def shutdown(self):
|
||||
# Explicitly kill vtgate first because StreamingServerShutdownIT.java expects an EOF from the vtgate client
|
||||
# and not an error that vttablet killed the query (which is seen when vtgate is killed last).
|
||||
# Explicitly kill vtgate first because
|
||||
# StreamingServerShutdownIT.java expects an EOF from the vtgate
|
||||
# client and not an error that vttablet killed the query (which is
|
||||
# seen when vtgate is killed last).
|
||||
utils.vtgate.kill()
|
||||
tablet.kill_tablets(self.tablets)
|
||||
teardown_procs = [t.teardown_mysql() for t in self.tablets]
|
||||
|
@ -103,26 +113,29 @@ class TestEnv(object):
|
|||
|
||||
def parse_args():
|
||||
global options, args
|
||||
parser = optparse.OptionParser(usage="usage: %prog [options]")
|
||||
parser.add_option("--shards", action="store", type="string",
|
||||
parser = optparse.OptionParser(usage='usage: %prog [options]')
|
||||
parser.add_option('--shards', action='store', type='string',
|
||||
help="comma separated list of shard names, e.g: '-80,80-'")
|
||||
parser.add_option("--tablet-config", action="store", type="string",
|
||||
help="json config for for non-master tablets. e.g {'replica':2, 'rdonly':1}")
|
||||
parser.add_option("--keyspace", action="store", type="string")
|
||||
parser.add_option("--dbname-override", action="store", type="string")
|
||||
parser.add_option("--schema", action="store", type="string")
|
||||
parser.add_option("--vschema", action="store", type="string")
|
||||
parser.add_option("--vtgate-port", action="store", type="int")
|
||||
parser.add_option(
|
||||
'--tablet-config', action='store', type='string',
|
||||
help='json config for for non-master tablets. e.g '
|
||||
"{'replica':2, 'rdonly':1}")
|
||||
parser.add_option('--keyspace', action='store', type='string')
|
||||
parser.add_option('--dbname-override', action='store', type='string')
|
||||
parser.add_option('--schema', action='store', type='string')
|
||||
parser.add_option('--vschema', action='store', type='string')
|
||||
parser.add_option('--vtgate-port', action='store', type='int')
|
||||
utils.add_options(parser)
|
||||
(options, args) = parser.parse_args()
|
||||
utils.set_options(options)
|
||||
|
||||
|
||||
def main():
|
||||
env = TestEnv(options)
|
||||
env.set_up()
|
||||
sys.stdout.write(json.dumps({
|
||||
"port": utils.vtgate.port,
|
||||
}) + "\n")
|
||||
'port': utils.vtgate.port,
|
||||
}) + '\n')
|
||||
sys.stdout.flush()
|
||||
raw_input()
|
||||
env.shutdown()
|
||||
|
|
|
@ -14,27 +14,47 @@ from vtdb import vtrouting
|
|||
# and where clauses for streaming queries.
|
||||
|
||||
pkid_pack = struct.Struct('!Q').pack
|
||||
int_shard_kid_map = {'-10':[1, 100, 1000, 100000, 527875958493693904, 626750931627689502, 345387386794260318, 332484755310826578],
|
||||
'10-20':[1842642426274125671, 1326307661227634652, 1761124146422844620, 1661669973250483744],
|
||||
'20-30':[3361397649937244239, 3303511690915522723, 2444880764308344533, 2973657788686139039],
|
||||
'30-40':[3821005920507858605, 4575089859165626432, 3607090456016432961, 3979558375123453425],
|
||||
'40-50':[5129057445097465905, 5464969577815708398, 5190676584475132364, 5762096070688827561],
|
||||
'50-60':[6419540613918919447, 6867152356089593986, 6601838130703675400, 6132605084892127391],
|
||||
'60-70':[7251511061270371980, 7395364497868053835, 7814586147633440734, 7968977924086033834],
|
||||
'70-80':[8653665459643609079, 8419099072545971426, 9020726671664230611, 9064594986161620444],
|
||||
'80-90':[9767889778372766922, 9742070682920810358, 10296850775085416642, 9537430901666854108],
|
||||
'90-a0':[10440455099304929791, 11454183276974683945, 11185910247776122031, 10460396697869122981],
|
||||
'a0-b0':[11935085245138597119, 12115696589214223782, 12639360876311033978, 12548906240535188165],
|
||||
'b0-c0':[13379616110062597001, 12826553979133932576, 13288572810772383281, 13471801046560785347],
|
||||
'c0-d0':[14394342688314745188, 14639660031570920207, 14646353412066152016, 14186650213447467187],
|
||||
'd0-e0':[15397348460895960623, 16014223083986915239, 15058390871463382185, 15811857963302932363],
|
||||
'e0-f0':[17275711019497396001, 16979796627403646478, 16635982235308289704, 16906674090344806032],
|
||||
'f0-':[18229242992218358675, 17623451135465171527, 18333015752598164958, 17775908119782706671],
|
||||
}
|
||||
int_shard_kid_map = {
|
||||
'-10': [1, 100, 1000, 100000, 527875958493693904, 626750931627689502,
|
||||
345387386794260318, 332484755310826578],
|
||||
'10-20': [1842642426274125671, 1326307661227634652, 1761124146422844620,
|
||||
1661669973250483744],
|
||||
'20-30': [3361397649937244239, 3303511690915522723, 2444880764308344533,
|
||||
2973657788686139039],
|
||||
'30-40': [3821005920507858605, 4575089859165626432, 3607090456016432961,
|
||||
3979558375123453425],
|
||||
'40-50': [5129057445097465905, 5464969577815708398, 5190676584475132364,
|
||||
5762096070688827561],
|
||||
'50-60': [6419540613918919447, 6867152356089593986, 6601838130703675400,
|
||||
6132605084892127391],
|
||||
'60-70': [7251511061270371980, 7395364497868053835, 7814586147633440734,
|
||||
7968977924086033834],
|
||||
'70-80': [8653665459643609079, 8419099072545971426, 9020726671664230611,
|
||||
9064594986161620444],
|
||||
'80-90': [9767889778372766922, 9742070682920810358, 10296850775085416642,
|
||||
9537430901666854108],
|
||||
'90-a0': [10440455099304929791, 11454183276974683945, 11185910247776122031,
|
||||
10460396697869122981],
|
||||
'a0-b0': [11935085245138597119, 12115696589214223782, 12639360876311033978,
|
||||
12548906240535188165],
|
||||
'b0-c0': [13379616110062597001, 12826553979133932576, 13288572810772383281,
|
||||
13471801046560785347],
|
||||
'c0-d0': [14394342688314745188, 14639660031570920207, 14646353412066152016,
|
||||
14186650213447467187],
|
||||
'd0-e0': [15397348460895960623, 16014223083986915239, 15058390871463382185,
|
||||
15811857963302932363],
|
||||
'e0-f0': [17275711019497396001, 16979796627403646478, 16635982235308289704,
|
||||
16906674090344806032],
|
||||
'f0-': [18229242992218358675, 17623451135465171527, 18333015752598164958,
|
||||
17775908119782706671],
|
||||
}
|
||||
|
||||
# str_shard_kid_map is derived from int_shard_kid_map
|
||||
# by generating bin-packed strings from the int keyspace_id values.
|
||||
str_shard_kid_map = dict([(shard_name, [pkid_pack(kid) for kid in kid_list]) for shard_name, kid_list in int_shard_kid_map.iteritems()])
|
||||
str_shard_kid_map = dict(
|
||||
[(shard_name0, [pkid_pack(kid0) for kid0 in kid_list0])
|
||||
for shard_name0, kid_list0 in int_shard_kid_map.iteritems()])
|
||||
|
||||
|
||||
class TestKeyRange(unittest.TestCase):
|
||||
|
||||
|
@ -49,7 +69,7 @@ class TestKeyRange(unittest.TestCase):
|
|||
self.assertEqual(kr.End, keyrange_constants.MAX_KEY)
|
||||
self.assertEqual(str(kr), keyrange_constants.NON_PARTIAL_KEYRANGE)
|
||||
|
||||
for kr_str in int_shard_kid_map.keys():
|
||||
for kr_str in int_shard_kid_map:
|
||||
Start_raw, End_raw = kr_str.split('-')
|
||||
kr = keyrange.KeyRange(kr_str)
|
||||
self.assertEqual(kr.Start, Start_raw.strip().decode('hex'))
|
||||
|
@ -62,16 +82,17 @@ class TestKeyRange(unittest.TestCase):
|
|||
stm = vtrouting.create_parallel_task_keyrange_map(4, global_shard_count)
|
||||
|
||||
def test_keyranges_for_tasks(self):
|
||||
for shard_count in (16,32,64):
|
||||
for shard_count in (16, 32, 64):
|
||||
for num_tasks in (shard_count, shard_count*2, shard_count*4):
|
||||
stm = vtrouting.create_parallel_task_keyrange_map(num_tasks, shard_count)
|
||||
stm = vtrouting.create_parallel_task_keyrange_map(
|
||||
num_tasks, shard_count)
|
||||
self.assertEqual(len(stm.keyrange_list), num_tasks)
|
||||
|
||||
# This tests that the where clause and bind_vars generated for each shard
|
||||
# against a few sample values where keyspace_id is an int column.
|
||||
def test_bind_values_for_int_keyspace(self):
|
||||
stm = vtrouting.create_parallel_task_keyrange_map(16, 16)
|
||||
for i, kr in enumerate(stm.keyrange_list):
|
||||
for _, kr in enumerate(stm.keyrange_list):
|
||||
kr_parts = kr.split('-')
|
||||
where_clause, bind_vars = vtrouting._create_where_clause_for_keyrange(kr)
|
||||
if len(bind_vars.keys()) == 1:
|
||||
|
@ -80,9 +101,9 @@ class TestKeyRange(unittest.TestCase):
|
|||
else:
|
||||
self.assertNotEqual(where_clause.find('>='), -1)
|
||||
else:
|
||||
self.assertNotEqual(where_clause.find('>='), -1)
|
||||
self.assertNotEqual(where_clause.find('>='), -1)
|
||||
self.assertNotEqual(where_clause.find('AND'), -1)
|
||||
self.assertNotEqual(where_clause.find('>='), -1)
|
||||
self.assertNotEqual(where_clause.find('>='), -1)
|
||||
self.assertNotEqual(where_clause.find('AND'), -1)
|
||||
kid_list = int_shard_kid_map[kr]
|
||||
for keyspace_id in kid_list:
|
||||
if len(bind_vars.keys()) == 1:
|
||||
|
@ -94,7 +115,6 @@ class TestKeyRange(unittest.TestCase):
|
|||
self.assertGreaterEqual(keyspace_id, bind_vars['keyspace_id0'])
|
||||
self.assertLess(keyspace_id, bind_vars['keyspace_id1'])
|
||||
|
||||
|
||||
# This tests that the where clause and bind_vars generated for each shard
|
||||
# against a few sample values where keyspace_id is a str column.
|
||||
# mysql will use the hex function on string keyspace column
|
||||
|
@ -102,33 +122,38 @@ class TestKeyRange(unittest.TestCase):
|
|||
# the test emulates that by using keyspace_id.encode('hex').
|
||||
def test_bind_values_for_str_keyspace(self):
|
||||
stm = vtrouting.create_parallel_task_keyrange_map(16, 16)
|
||||
for i, kr in enumerate(stm.keyrange_list):
|
||||
for _, kr in enumerate(stm.keyrange_list):
|
||||
kr_parts = kr.split('-')
|
||||
where_clause, bind_vars = vtrouting._create_where_clause_for_keyrange(kr, keyspace_col_type=keyrange_constants.KIT_BYTES)
|
||||
where_clause, bind_vars = vtrouting._create_where_clause_for_keyrange(
|
||||
kr, keyspace_col_type=keyrange_constants.KIT_BYTES)
|
||||
if len(bind_vars.keys()) == 1:
|
||||
if kr_parts[0] == '':
|
||||
self.assertNotEqual(where_clause.find('<'), -1)
|
||||
else:
|
||||
self.assertNotEqual(where_clause.find('>='), -1)
|
||||
else:
|
||||
self.assertNotEqual(where_clause.find('>='), -1)
|
||||
self.assertNotEqual(where_clause.find('>='), -1)
|
||||
self.assertNotEqual(where_clause.find('AND'), -1)
|
||||
self.assertNotEqual(where_clause.find('>='), -1)
|
||||
self.assertNotEqual(where_clause.find('>='), -1)
|
||||
self.assertNotEqual(where_clause.find('AND'), -1)
|
||||
kid_list = str_shard_kid_map[kr]
|
||||
for keyspace_id in kid_list:
|
||||
if len(bind_vars.keys()) == 1:
|
||||
if kr_parts[0] == '':
|
||||
self.assertLess(keyspace_id.encode('hex'), bind_vars['keyspace_id0'])
|
||||
self.assertLess(
|
||||
keyspace_id.encode('hex'), bind_vars['keyspace_id0'])
|
||||
else:
|
||||
self.assertGreaterEqual(keyspace_id.encode('hex'), bind_vars['keyspace_id0'])
|
||||
self.assertGreaterEqual(
|
||||
keyspace_id.encode('hex'), bind_vars['keyspace_id0'])
|
||||
else:
|
||||
self.assertGreaterEqual(keyspace_id.encode('hex'), bind_vars['keyspace_id0'])
|
||||
self.assertGreaterEqual(
|
||||
keyspace_id.encode('hex'), bind_vars['keyspace_id0'])
|
||||
self.assertLess(keyspace_id.encode('hex'), bind_vars['keyspace_id1'])
|
||||
|
||||
def test_bind_values_for_unsharded_keyspace(self):
|
||||
stm = vtrouting.create_parallel_task_keyrange_map(1, 1)
|
||||
self.assertEqual(len(stm.keyrange_list), 1)
|
||||
where_clause, bind_vars = vtrouting._create_where_clause_for_keyrange(stm.keyrange_list[0])
|
||||
where_clause, bind_vars = vtrouting._create_where_clause_for_keyrange(
|
||||
stm.keyrange_list[0])
|
||||
self.assertEqual(where_clause, "")
|
||||
self.assertEqual(bind_vars, {})
|
||||
|
||||
|
|
|
@ -13,14 +13,14 @@ import utils
|
|||
import tablet
|
||||
from protocols_flavor import protocols_flavor
|
||||
|
||||
SHARDED_KEYSPACE = "TEST_KEYSPACE_SHARDED"
|
||||
UNSHARDED_KEYSPACE = "TEST_KEYSPACE_UNSHARDED"
|
||||
SHARDED_KEYSPACE = 'TEST_KEYSPACE_SHARDED'
|
||||
UNSHARDED_KEYSPACE = 'TEST_KEYSPACE_UNSHARDED'
|
||||
|
||||
# shards for SHARDED_KEYSPACE
|
||||
# range "" - 80
|
||||
# range '' - 80
|
||||
shard_0_master = tablet.Tablet()
|
||||
shard_0_replica = tablet.Tablet()
|
||||
# range 80 - ""
|
||||
# range 80 - ''
|
||||
shard_1_master = tablet.Tablet()
|
||||
shard_1_replica = tablet.Tablet()
|
||||
|
||||
|
@ -29,17 +29,18 @@ unsharded_master = tablet.Tablet()
|
|||
unsharded_replica = tablet.Tablet()
|
||||
|
||||
shard_names = ['-80', '80-']
|
||||
shard_kid_map = {'-80': [527875958493693904, 626750931627689502,
|
||||
345387386794260318, 332484755310826578,
|
||||
1842642426274125671, 1326307661227634652,
|
||||
1761124146422844620, 1661669973250483744,
|
||||
3361397649937244239, 2444880764308344533],
|
||||
'80-': [9767889778372766922, 9742070682920810358,
|
||||
10296850775085416642, 9537430901666854108,
|
||||
10440455099304929791, 11454183276974683945,
|
||||
11185910247776122031, 10460396697869122981,
|
||||
13379616110062597001, 12826553979133932576],
|
||||
}
|
||||
shard_kid_map = {
|
||||
'-80': [527875958493693904, 626750931627689502,
|
||||
345387386794260318, 332484755310826578,
|
||||
1842642426274125671, 1326307661227634652,
|
||||
1761124146422844620, 1661669973250483744,
|
||||
3361397649937244239, 2444880764308344533],
|
||||
'80-': [9767889778372766922, 9742070682920810358,
|
||||
10296850775085416642, 9537430901666854108,
|
||||
10440455099304929791, 11454183276974683945,
|
||||
11185910247776122031, 10460396697869122981,
|
||||
13379616110062597001, 12826553979133932576],
|
||||
}
|
||||
|
||||
create_vt_insert_test = '''create table vt_insert_test (
|
||||
id bigint auto_increment,
|
||||
|
@ -73,7 +74,7 @@ def tearDownModule():
|
|||
return
|
||||
|
||||
tablet.kill_tablets([shard_0_master, shard_0_replica,
|
||||
shard_1_master, shard_1_replica])
|
||||
shard_1_master, shard_1_replica])
|
||||
teardown_procs = [
|
||||
shard_0_master.teardown_mysql(),
|
||||
shard_0_replica.teardown_mysql(),
|
||||
|
@ -95,6 +96,7 @@ def tearDownModule():
|
|||
unsharded_master.remove_tree()
|
||||
unsharded_replica.remove_tree()
|
||||
|
||||
|
||||
def setup_tablets():
|
||||
setup_sharded_keyspace()
|
||||
setup_unsharded_keyspace()
|
||||
|
@ -106,9 +108,11 @@ def setup_sharded_keyspace():
|
|||
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force', SHARDED_KEYSPACE,
|
||||
'keyspace_id', 'uint64'])
|
||||
shard_0_master.init_tablet('master', keyspace=SHARDED_KEYSPACE, shard='-80')
|
||||
shard_0_replica.init_tablet('replica', keyspace=SHARDED_KEYSPACE, shard='-80')
|
||||
shard_0_replica.init_tablet(
|
||||
'replica', keyspace=SHARDED_KEYSPACE, shard='-80')
|
||||
shard_1_master.init_tablet('master', keyspace=SHARDED_KEYSPACE, shard='80-')
|
||||
shard_1_replica.init_tablet('replica', keyspace=SHARDED_KEYSPACE, shard='80-')
|
||||
shard_1_replica.init_tablet(
|
||||
'replica', keyspace=SHARDED_KEYSPACE, shard='80-')
|
||||
|
||||
utils.run_vtctl(['RebuildKeyspaceGraph', SHARDED_KEYSPACE,], auto_log=True)
|
||||
|
||||
|
@ -126,11 +130,11 @@ def setup_sharded_keyspace():
|
|||
shard_1_master.tablet_alias], auto_log=True)
|
||||
|
||||
utils.run_vtctl(['RebuildKeyspaceGraph', SHARDED_KEYSPACE],
|
||||
auto_log=True)
|
||||
auto_log=True)
|
||||
|
||||
utils.check_srv_keyspace('test_nj', SHARDED_KEYSPACE,
|
||||
'Partitions(master): -80 80-\n' +
|
||||
'Partitions(rdonly): -80 80-\n' +
|
||||
'Partitions(master): -80 80-\n'
|
||||
'Partitions(rdonly): -80 80-\n'
|
||||
'Partitions(replica): -80 80-\n')
|
||||
|
||||
|
||||
|
@ -138,8 +142,10 @@ def setup_unsharded_keyspace():
|
|||
utils.run_vtctl(['CreateKeyspace', UNSHARDED_KEYSPACE])
|
||||
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force', UNSHARDED_KEYSPACE,
|
||||
'keyspace_id', 'uint64'])
|
||||
unsharded_master.init_tablet('master', keyspace=UNSHARDED_KEYSPACE, shard='0')
|
||||
unsharded_replica.init_tablet('replica', keyspace=UNSHARDED_KEYSPACE, shard='0')
|
||||
unsharded_master.init_tablet(
|
||||
'master', keyspace=UNSHARDED_KEYSPACE, shard='0')
|
||||
unsharded_replica.init_tablet(
|
||||
'replica', keyspace=UNSHARDED_KEYSPACE, shard='0')
|
||||
|
||||
utils.run_vtctl(['RebuildKeyspaceGraph', UNSHARDED_KEYSPACE,], auto_log=True)
|
||||
|
||||
|
@ -155,17 +161,19 @@ def setup_unsharded_keyspace():
|
|||
unsharded_master.tablet_alias], auto_log=True)
|
||||
|
||||
utils.run_vtctl(['RebuildKeyspaceGraph', UNSHARDED_KEYSPACE],
|
||||
auto_log=True)
|
||||
auto_log=True)
|
||||
|
||||
utils.check_srv_keyspace('test_nj', UNSHARDED_KEYSPACE,
|
||||
'Partitions(master): -\n' +
|
||||
'Partitions(rdonly): -\n' +
|
||||
'Partitions(master): -\n'
|
||||
'Partitions(rdonly): -\n'
|
||||
'Partitions(replica): -\n')
|
||||
|
||||
|
||||
ALL_DB_TYPES = ['master', 'rdonly', 'replica']
|
||||
|
||||
|
||||
class TestKeyspace(unittest.TestCase):
|
||||
|
||||
def _read_srv_keyspace(self, keyspace_name):
|
||||
addr = utils.vtgate.rpc_endpoint()
|
||||
protocol = protocols_flavor().vtgate_python_protocol()
|
||||
|
@ -182,10 +190,13 @@ class TestKeyspace(unittest.TestCase):
|
|||
def test_delete_keyspace(self):
|
||||
utils.run_vtctl(['CreateKeyspace', 'test_delete_keyspace'])
|
||||
utils.run_vtctl(['CreateShard', 'test_delete_keyspace/0'])
|
||||
utils.run_vtctl(['InitTablet', '-keyspace=test_delete_keyspace', '-shard=0', 'test_nj-0000000100', 'master'])
|
||||
utils.run_vtctl(
|
||||
['InitTablet', '-keyspace=test_delete_keyspace', '-shard=0',
|
||||
'test_nj-0000000100', 'master'])
|
||||
|
||||
# Can't delete keyspace if there are shards present.
|
||||
utils.run_vtctl(['DeleteKeyspace', 'test_delete_keyspace'], expect_fail=True)
|
||||
utils.run_vtctl(
|
||||
['DeleteKeyspace', 'test_delete_keyspace'], expect_fail=True)
|
||||
# Can't delete shard if there are tablets present.
|
||||
utils.run_vtctl(['DeleteShard', 'test_delete_keyspace/0'], expect_fail=True)
|
||||
|
||||
|
@ -197,16 +208,20 @@ class TestKeyspace(unittest.TestCase):
|
|||
# Start over and this time use recursive DeleteKeyspace to do everything.
|
||||
utils.run_vtctl(['CreateKeyspace', 'test_delete_keyspace'])
|
||||
utils.run_vtctl(['CreateShard', 'test_delete_keyspace/0'])
|
||||
utils.run_vtctl(['InitTablet', '-port=1234', '-keyspace=test_delete_keyspace', '-shard=0', 'test_nj-0000000100', 'master'])
|
||||
utils.run_vtctl(
|
||||
['InitTablet', '-port=1234', '-keyspace=test_delete_keyspace',
|
||||
'-shard=0', 'test_nj-0000000100', 'master'])
|
||||
|
||||
# Create the serving/replication entries and check that they exist,
|
||||
# so we can later check they're deleted.
|
||||
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_delete_keyspace'])
|
||||
utils.run_vtctl(['RebuildShardGraph', 'test_delete_keyspace/0'])
|
||||
utils.run_vtctl(['GetShardReplication', 'test_nj', 'test_delete_keyspace/0'])
|
||||
utils.run_vtctl(
|
||||
['GetShardReplication', 'test_nj', 'test_delete_keyspace/0'])
|
||||
utils.run_vtctl(['GetSrvKeyspace', 'test_nj', 'test_delete_keyspace'])
|
||||
utils.run_vtctl(['GetSrvShard', 'test_nj', 'test_delete_keyspace/0'])
|
||||
utils.run_vtctl(['GetEndPoints', 'test_nj', 'test_delete_keyspace/0', 'master'])
|
||||
utils.run_vtctl(
|
||||
['GetEndPoints', 'test_nj', 'test_delete_keyspace/0', 'master'])
|
||||
|
||||
# Recursive DeleteKeyspace
|
||||
utils.run_vtctl(['DeleteKeyspace', '-recursive', 'test_delete_keyspace'])
|
||||
|
@ -215,35 +230,53 @@ class TestKeyspace(unittest.TestCase):
|
|||
utils.run_vtctl(['GetKeyspace', 'test_delete_keyspace'], expect_fail=True)
|
||||
utils.run_vtctl(['GetShard', 'test_delete_keyspace/0'], expect_fail=True)
|
||||
utils.run_vtctl(['GetTablet', 'test_nj-0000000100'], expect_fail=True)
|
||||
utils.run_vtctl(['GetShardReplication', 'test_nj', 'test_delete_keyspace/0'], expect_fail=True)
|
||||
utils.run_vtctl(['GetSrvKeyspace', 'test_nj', 'test_delete_keyspace'], expect_fail=True)
|
||||
utils.run_vtctl(['GetSrvShard', 'test_nj', 'test_delete_keyspace/0'], expect_fail=True)
|
||||
utils.run_vtctl(['GetEndPoints', 'test_nj', 'test_delete_keyspace/0', 'master'], expect_fail=True)
|
||||
utils.run_vtctl(
|
||||
['GetShardReplication', 'test_nj', 'test_delete_keyspace/0'],
|
||||
expect_fail=True)
|
||||
utils.run_vtctl(
|
||||
['GetSrvKeyspace', 'test_nj', 'test_delete_keyspace'],
|
||||
expect_fail=True)
|
||||
utils.run_vtctl(
|
||||
['GetSrvShard', 'test_nj', 'test_delete_keyspace/0'], expect_fail=True)
|
||||
utils.run_vtctl(
|
||||
['GetEndPoints', 'test_nj', 'test_delete_keyspace/0', 'master'],
|
||||
expect_fail=True)
|
||||
|
||||
def test_remove_keyspace_cell(self):
|
||||
utils.run_vtctl(['CreateKeyspace', 'test_delete_keyspace'])
|
||||
utils.run_vtctl(['CreateShard', 'test_delete_keyspace/0'])
|
||||
utils.run_vtctl(['CreateShard', 'test_delete_keyspace/1'])
|
||||
utils.run_vtctl(['InitTablet', '-port=1234', '-keyspace=test_delete_keyspace', '-shard=0', 'test_ca-0000000100', 'master'])
|
||||
utils.run_vtctl(['InitTablet', '-port=1234', '-keyspace=test_delete_keyspace', '-shard=0', 'test_nj-0000000100', 'replica'])
|
||||
utils.run_vtctl(['InitTablet', '-port=1234', '-keyspace=test_delete_keyspace', '-shard=1', 'test_nj-0000000101', 'replica'])
|
||||
utils.run_vtctl(
|
||||
['InitTablet', '-port=1234', '-keyspace=test_delete_keyspace',
|
||||
'-shard=0', 'test_ca-0000000100', 'master'])
|
||||
utils.run_vtctl(
|
||||
['InitTablet', '-port=1234', '-keyspace=test_delete_keyspace',
|
||||
'-shard=0', 'test_nj-0000000100', 'replica'])
|
||||
utils.run_vtctl(
|
||||
['InitTablet', '-port=1234', '-keyspace=test_delete_keyspace',
|
||||
'-shard=1', 'test_nj-0000000101', 'replica'])
|
||||
|
||||
# Create the serving/replication entries and check that they exist,
|
||||
# so we can later check they're deleted.
|
||||
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_delete_keyspace'])
|
||||
utils.run_vtctl(['RebuildShardGraph', 'test_delete_keyspace/0'])
|
||||
utils.run_vtctl(['RebuildShardGraph', 'test_delete_keyspace/1'])
|
||||
utils.run_vtctl(['GetShardReplication', 'test_nj', 'test_delete_keyspace/0'])
|
||||
utils.run_vtctl(['GetShardReplication', 'test_nj', 'test_delete_keyspace/1'])
|
||||
utils.run_vtctl(
|
||||
['GetShardReplication', 'test_nj', 'test_delete_keyspace/0'])
|
||||
utils.run_vtctl(
|
||||
['GetShardReplication', 'test_nj', 'test_delete_keyspace/1'])
|
||||
utils.run_vtctl(['GetSrvKeyspace', 'test_nj', 'test_delete_keyspace'])
|
||||
utils.run_vtctl(['GetSrvShard', 'test_nj', 'test_delete_keyspace/0'])
|
||||
utils.run_vtctl(['GetSrvShard', 'test_nj', 'test_delete_keyspace/1'])
|
||||
utils.run_vtctl(['GetEndPoints', 'test_nj', 'test_delete_keyspace/0', 'replica'])
|
||||
utils.run_vtctl(['GetEndPoints', 'test_nj', 'test_delete_keyspace/1', 'replica'])
|
||||
utils.run_vtctl(
|
||||
['GetEndPoints', 'test_nj', 'test_delete_keyspace/0', 'replica'])
|
||||
utils.run_vtctl(
|
||||
['GetEndPoints', 'test_nj', 'test_delete_keyspace/1', 'replica'])
|
||||
|
||||
# Just remove the shard from one cell (including tablets),
|
||||
# but leaving the global records and other cells/shards alone.
|
||||
utils.run_vtctl(['RemoveShardCell', '-recursive', 'test_delete_keyspace/0', 'test_nj'])
|
||||
utils.run_vtctl(
|
||||
['RemoveShardCell', '-recursive', 'test_delete_keyspace/0', 'test_nj'])
|
||||
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_delete_keyspace'])
|
||||
utils.run_vtctl(['RebuildShardGraph', 'test_delete_keyspace/0'])
|
||||
|
||||
|
@ -252,29 +285,46 @@ class TestKeyspace(unittest.TestCase):
|
|||
utils.run_vtctl(['GetTablet', 'test_ca-0000000100'])
|
||||
utils.run_vtctl(['GetTablet', 'test_nj-0000000100'], expect_fail=True)
|
||||
utils.run_vtctl(['GetTablet', 'test_nj-0000000101'])
|
||||
utils.run_vtctl(['GetShardReplication', 'test_ca', 'test_delete_keyspace/0'])
|
||||
utils.run_vtctl(['GetShardReplication', 'test_nj', 'test_delete_keyspace/0'], expect_fail=True)
|
||||
utils.run_vtctl(['GetShardReplication', 'test_nj', 'test_delete_keyspace/1'])
|
||||
utils.run_vtctl(
|
||||
['GetShardReplication', 'test_ca', 'test_delete_keyspace/0'])
|
||||
utils.run_vtctl(
|
||||
['GetShardReplication', 'test_nj', 'test_delete_keyspace/0'],
|
||||
expect_fail=True)
|
||||
utils.run_vtctl(
|
||||
['GetShardReplication', 'test_nj', 'test_delete_keyspace/1'])
|
||||
utils.run_vtctl(['GetSrvKeyspace', 'test_nj', 'test_delete_keyspace'])
|
||||
utils.run_vtctl(['GetSrvShard', 'test_nj', 'test_delete_keyspace/0'])
|
||||
utils.run_vtctl(['GetEndPoints', 'test_nj', 'test_delete_keyspace/1', 'replica'])
|
||||
utils.run_vtctl(['GetEndPoints', 'test_nj', 'test_delete_keyspace/0', 'replica'], expect_fail=True)
|
||||
utils.run_vtctl(
|
||||
['GetEndPoints', 'test_nj', 'test_delete_keyspace/1', 'replica'])
|
||||
utils.run_vtctl(
|
||||
['GetEndPoints', 'test_nj', 'test_delete_keyspace/0', 'replica'],
|
||||
expect_fail=True)
|
||||
|
||||
# Add it back to do another test.
|
||||
utils.run_vtctl(['InitTablet', '-port=1234', '-keyspace=test_delete_keyspace', '-shard=0', 'test_nj-0000000100', 'replica'])
|
||||
utils.run_vtctl(
|
||||
['InitTablet', '-port=1234', '-keyspace=test_delete_keyspace',
|
||||
'-shard=0', 'test_nj-0000000100', 'replica'])
|
||||
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_delete_keyspace'])
|
||||
utils.run_vtctl(['RebuildShardGraph', 'test_delete_keyspace/0'])
|
||||
utils.run_vtctl(['GetShardReplication', 'test_nj', 'test_delete_keyspace/0'])
|
||||
utils.run_vtctl(
|
||||
['GetShardReplication', 'test_nj', 'test_delete_keyspace/0'])
|
||||
|
||||
# Now use RemoveKeyspaceCell to remove all shards.
|
||||
utils.run_vtctl(['RemoveKeyspaceCell', '-recursive', 'test_delete_keyspace', 'test_nj'])
|
||||
utils.run_vtctl(
|
||||
['RemoveKeyspaceCell', '-recursive', 'test_delete_keyspace',
|
||||
'test_nj'])
|
||||
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_delete_keyspace'])
|
||||
utils.run_vtctl(['RebuildShardGraph', 'test_delete_keyspace/0'])
|
||||
utils.run_vtctl(['RebuildShardGraph', 'test_delete_keyspace/1'])
|
||||
|
||||
utils.run_vtctl(['GetShardReplication', 'test_ca', 'test_delete_keyspace/0'])
|
||||
utils.run_vtctl(['GetShardReplication', 'test_nj', 'test_delete_keyspace/0'], expect_fail=True)
|
||||
utils.run_vtctl(['GetShardReplication', 'test_nj', 'test_delete_keyspace/1'], expect_fail=True)
|
||||
utils.run_vtctl(
|
||||
['GetShardReplication', 'test_ca', 'test_delete_keyspace/0'])
|
||||
utils.run_vtctl(
|
||||
['GetShardReplication', 'test_nj', 'test_delete_keyspace/0'],
|
||||
expect_fail=True)
|
||||
utils.run_vtctl(
|
||||
['GetShardReplication', 'test_nj', 'test_delete_keyspace/1'],
|
||||
expect_fail=True)
|
||||
|
||||
# Clean up.
|
||||
utils.run_vtctl(['DeleteKeyspace', '-recursive', 'test_delete_keyspace'])
|
||||
|
@ -299,15 +349,21 @@ class TestKeyspace(unittest.TestCase):
|
|||
sharded_ks = self._read_srv_keyspace(SHARDED_KEYSPACE)
|
||||
for _, sn in enumerate(shard_names):
|
||||
for keyspace_id in shard_kid_map[sn]:
|
||||
self.assertEqual(sharded_ks.keyspace_id_to_shard_name_for_db_type(keyspace_id, 'master'), sn)
|
||||
self.assertEqual(
|
||||
sharded_ks.keyspace_id_to_shard_name_for_db_type(keyspace_id,
|
||||
'master'), sn)
|
||||
unsharded_ks = self._read_srv_keyspace(UNSHARDED_KEYSPACE)
|
||||
for keyspace_id in shard_kid_map[sn]:
|
||||
self.assertEqual(unsharded_ks.keyspace_id_to_shard_name_for_db_type(keyspace_id, 'master'), '0')
|
||||
self.assertEqual(
|
||||
unsharded_ks.keyspace_id_to_shard_name_for_db_type(
|
||||
keyspace_id, 'master'),
|
||||
'0')
|
||||
|
||||
def test_get_srv_keyspace_names(self):
|
||||
stdout, _ = utils.run_vtctl(['GetSrvKeyspaceNames', 'test_nj'],
|
||||
trap_output=True)
|
||||
self.assertEqual(set(stdout.splitlines()), {SHARDED_KEYSPACE, UNSHARDED_KEYSPACE})
|
||||
self.assertEqual(
|
||||
set(stdout.splitlines()), {SHARDED_KEYSPACE, UNSHARDED_KEYSPACE})
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -14,25 +14,30 @@ import environment
|
|||
import tablet
|
||||
import utils
|
||||
|
||||
|
||||
class TestEnv(object):
|
||||
|
||||
def __init__(self):
|
||||
self.tablet_map={}
|
||||
|
||||
def launch(self, keyspace, shards=None, replica_count=0, rdonly_count=0, ddls=None):
|
||||
self.tablets=[]
|
||||
def launch(
|
||||
self, keyspace, shards=None, replica_count=0, rdonly_count=0, ddls=None):
|
||||
self.tablets = []
|
||||
utils.run_vtctl(['CreateKeyspace', keyspace])
|
||||
if not shards or shards[0] == "0":
|
||||
shards = ["0"]
|
||||
if not shards or shards[0] == '0':
|
||||
shards = ['0']
|
||||
else:
|
||||
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force', keyspace, 'keyspace_id', 'uint64'])
|
||||
utils.run_vtctl(
|
||||
['SetKeyspaceShardingInfo', '-force', keyspace, 'keyspace_id',
|
||||
'uint64'])
|
||||
|
||||
for shard in shards:
|
||||
procs = []
|
||||
procs.append(self._start_tablet(keyspace, shard, "master", None))
|
||||
procs.append(self._start_tablet(keyspace, shard, 'master', None))
|
||||
for i in xrange(replica_count):
|
||||
procs.append(self._start_tablet(keyspace, shard, "replica", i))
|
||||
procs.append(self._start_tablet(keyspace, shard, 'replica', i))
|
||||
for i in xrange(rdonly_count):
|
||||
procs.append(self._start_tablet(keyspace, shard, "rdonly", i))
|
||||
procs.append(self._start_tablet(keyspace, shard, 'rdonly', i))
|
||||
utils.wait_procs(procs)
|
||||
|
||||
utils.run_vtctl(['RebuildKeyspaceGraph', keyspace], auto_log=True)
|
||||
|
@ -40,13 +45,13 @@ class TestEnv(object):
|
|||
for t in self.tablets:
|
||||
t.create_db('vt_' + keyspace)
|
||||
t.start_vttablet(
|
||||
wait_for_state=None,
|
||||
extra_args=['-queryserver-config-schema-reload-time', '1'],
|
||||
wait_for_state=None,
|
||||
extra_args=['-queryserver-config-schema-reload-time', '1'],
|
||||
)
|
||||
for t in self.tablets:
|
||||
t.wait_for_vttablet_state('SERVING')
|
||||
for t in self.tablets:
|
||||
if t.tablet_type == "master":
|
||||
if t.tablet_type == 'master':
|
||||
utils.run_vtctl(['InitShardMaster', keyspace+'/'+t.shard,
|
||||
t.tablet_alias], auto_log=True)
|
||||
# Force read-write even if there are no replicas.
|
||||
|
@ -55,8 +60,8 @@ class TestEnv(object):
|
|||
utils.run_vtctl(['RebuildKeyspaceGraph', keyspace], auto_log=True)
|
||||
|
||||
for ddl in ddls:
|
||||
fname = os.path.join(environment.tmproot, "ddl.sql")
|
||||
with open(fname, "w") as f:
|
||||
fname = os.path.join(environment.tmproot, 'ddl.sql')
|
||||
with open(fname, 'w') as f:
|
||||
f.write(ddl)
|
||||
utils.run_vtctl(['ApplySchema', '-sql-file', fname, keyspace])
|
||||
|
||||
|
@ -71,10 +76,10 @@ class TestEnv(object):
|
|||
def _start_tablet(self, keyspace, shard, tablet_type, index):
|
||||
t = tablet.Tablet()
|
||||
self.tablets.append(t)
|
||||
if tablet_type == "master":
|
||||
key = "%s.%s.%s" %(keyspace, shard, tablet_type)
|
||||
if tablet_type == 'master':
|
||||
key = '%s.%s.%s' %(keyspace, shard, tablet_type)
|
||||
else:
|
||||
key = "%s.%s.%s.%s" %(keyspace, shard, tablet_type, index)
|
||||
key = '%s.%s.%s.%s' % (keyspace, shard, tablet_type, index)
|
||||
self.tablet_map[key] = t
|
||||
proc = t.init_mysql()
|
||||
t.init_tablet(tablet_type, keyspace=keyspace, shard=shard)
|
||||
|
|
|
@ -7,7 +7,7 @@ import subprocess
|
|||
|
||||
|
||||
class MysqlFlavor(object):
|
||||
"""Base class with default SQL statements"""
|
||||
"""Base class with default SQL statements."""
|
||||
|
||||
def promote_slave_commands(self):
|
||||
"""Returns commands to convert a slave to a master."""
|
||||
|
@ -33,15 +33,18 @@ class MysqlFlavor(object):
|
|||
return None
|
||||
|
||||
def bootstrap_archive(self):
|
||||
"""Returns the name of the bootstrap archive for mysqlctl, relative to vitess/data/bootstrap/"""
|
||||
"""Returns the name of the bootstrap archive for mysqlctl.
|
||||
|
||||
Name is relative to vitess/data/bootstrap/.
|
||||
"""
|
||||
return "mysql-db-dir.tbz"
|
||||
|
||||
def master_position(self, tablet):
|
||||
"""Returns the position from SHOW MASTER STATUS as a string"""
|
||||
"""Returns the position from SHOW MASTER STATUS as a string."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def position_equal(self, a, b):
|
||||
"""Returns true if position 'a' is equal to 'b'"""
|
||||
"""Returns true if position 'a' is equal to 'b'."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def position_at_least(self, a, b):
|
||||
|
@ -49,7 +52,7 @@ class MysqlFlavor(object):
|
|||
raise NotImplementedError()
|
||||
|
||||
def position_after(self, a, b):
|
||||
"""Returns true if position 'a' is after 'b'"""
|
||||
"""Returns true if position 'a' is after 'b.'"""
|
||||
return self.position_at_least(a, b) and not self.position_equal(a, b)
|
||||
|
||||
def position_append(self, pos, gtid):
|
||||
|
@ -58,7 +61,9 @@ class MysqlFlavor(object):
|
|||
|
||||
def enable_binlog_checksum(self, tablet):
|
||||
"""Enables binlog_checksum and returns True if the flavor supports it.
|
||||
Returns False if the flavor doesn't support binlog_checksum."""
|
||||
|
||||
Returns False if the flavor doesn't support binlog_checksum.
|
||||
"""
|
||||
tablet.mquery("", "SET @@global.binlog_checksum=1")
|
||||
return True
|
||||
|
||||
|
@ -68,7 +73,7 @@ class MysqlFlavor(object):
|
|||
|
||||
|
||||
class MariaDB(MysqlFlavor):
|
||||
"""Overrides specific to MariaDB"""
|
||||
"""Overrides specific to MariaDB."""
|
||||
|
||||
def reset_replication_commands(self):
|
||||
return [
|
||||
|
|
|
@ -3,10 +3,9 @@
|
|||
import warnings
|
||||
# Dropping a table inexplicably produces a warning despite
|
||||
# the "IF EXISTS" clause. Squelch these warnings.
|
||||
warnings.simplefilter("ignore")
|
||||
warnings.simplefilter('ignore')
|
||||
|
||||
import os
|
||||
import logging
|
||||
import unittest
|
||||
|
||||
import environment
|
||||
|
@ -16,6 +15,7 @@ import tablet
|
|||
master_tablet = tablet.Tablet()
|
||||
replica_tablet = tablet.Tablet()
|
||||
|
||||
|
||||
def setUpModule():
|
||||
try:
|
||||
environment.topo_server().setup()
|
||||
|
@ -40,6 +40,7 @@ def setUpModule():
|
|||
tearDownModule()
|
||||
raise
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
if utils.options.skip_teardown:
|
||||
return
|
||||
|
@ -59,7 +60,9 @@ def tearDownModule():
|
|||
master_tablet.remove_tree()
|
||||
replica_tablet.remove_tree()
|
||||
|
||||
|
||||
class TestMysqlctl(unittest.TestCase):
|
||||
|
||||
def tearDown(self):
|
||||
tablet.Tablet.check_vttablet_count()
|
||||
for t in [master_tablet, replica_tablet]:
|
||||
|
@ -76,7 +79,7 @@ class TestMysqlctl(unittest.TestCase):
|
|||
master_tablet.start_vttablet(wait_for_state=None,
|
||||
extra_env={'MYSQL_FLAVOR': ''})
|
||||
replica_tablet.start_vttablet(wait_for_state=None,
|
||||
extra_env={'MYSQL_FLAVOR': ''})
|
||||
extra_env={'MYSQL_FLAVOR': ''})
|
||||
master_tablet.wait_for_vttablet_state('SERVING')
|
||||
replica_tablet.wait_for_vttablet_state('SERVING')
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
import logging
|
||||
|
||||
|
||||
class ProtocolsFlavor(object):
|
||||
"""Base class for protocols"""
|
||||
|
||||
|
@ -59,9 +60,11 @@ class ProtocolsFlavor(object):
|
|||
__knows_protocols_flavor_map = {}
|
||||
__protocols_flavor = None
|
||||
|
||||
|
||||
def protocols_flavor():
|
||||
return __protocols_flavor
|
||||
|
||||
|
||||
def set_protocols_flavor(flavor):
|
||||
global __protocols_flavor
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ class TestPythonClient(unittest.TestCase):
|
|||
def _open_stream_keyranges_cursor(self):
|
||||
kr = keyrange.KeyRange(keyrange_constants.NON_PARTIAL_KEYRANGE)
|
||||
return self.conn.cursor(
|
||||
'keyspace', 'master', keyranges=[kr],
|
||||
'keyspace', 'master', keyranges=[kr],
|
||||
cursorclass=vtgate_cursor.StreamVTGateCursor)
|
||||
|
||||
def _open_stream_keyspace_ids_cursor(self):
|
||||
|
@ -263,7 +263,5 @@ class TestPythonClient(unittest.TestCase):
|
|||
self._open_stream_keyranges_cursor(),
|
||||
cursor_stream_execute_keyranges_method)
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.main()
|
||||
|
|
|
@ -22,11 +22,12 @@ from topo_flavor.server import set_topo_server_flavor
|
|||
|
||||
|
||||
def main():
|
||||
parser = optparse.OptionParser(usage="usage: %prog [options] [test_names]")
|
||||
parser.add_option("-m", "--memcache", action="store_true", default=False,
|
||||
help="starts a memcache d, and tests rowcache")
|
||||
parser.add_option("-e", "--env", default='vttablet',
|
||||
help="Environment that will be used. Valid options: vttablet, vtocc")
|
||||
parser = optparse.OptionParser(usage='usage: %prog [options] [test_names]')
|
||||
parser.add_option('-m', '--memcache', action='store_true', default=False,
|
||||
help='starts a memcache d, and tests rowcache')
|
||||
parser.add_option(
|
||||
'-e', '--env', default='vttablet',
|
||||
help='Environment that will be used. Valid options: vttablet, vtocc')
|
||||
utils.add_options(parser)
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
|
@ -35,6 +36,7 @@ def main():
|
|||
|
||||
run_tests(options, args)
|
||||
|
||||
|
||||
def run_tests(options, args):
|
||||
suite = unittest.TestSuite()
|
||||
if args:
|
||||
|
@ -51,7 +53,7 @@ def run_tests(options, args):
|
|||
elif hasattr(cache_tests.TestWillNotBeCached, arg) and options.memcache:
|
||||
suite.addTest(cache_tests.TestWillNotBeCached(arg))
|
||||
else:
|
||||
raise Exception(arg, "not found in tests")
|
||||
raise Exception(arg, 'not found in tests')
|
||||
else:
|
||||
modules = [nocache_tests, stream_tests, status_tests]
|
||||
if options.memcache:
|
||||
|
@ -63,18 +65,19 @@ def run_tests(options, args):
|
|||
try:
|
||||
env.memcache = options.memcache
|
||||
env.setUp()
|
||||
print "Starting queryservice_test.py: %s" % options.env
|
||||
print 'Starting queryservice_test.py: %s' % options.env
|
||||
sys.stdout.flush()
|
||||
framework.TestCase.setenv(env)
|
||||
result = unittest.TextTestRunner(verbosity=options.verbose, failfast=True).run(suite)
|
||||
result = unittest.TextTestRunner(
|
||||
verbosity=options.verbose, failfast=True).run(suite)
|
||||
if not result.wasSuccessful():
|
||||
raise Exception("test failures")
|
||||
raise Exception('test failures')
|
||||
finally:
|
||||
if not options.skip_teardown:
|
||||
env.tearDown()
|
||||
if options.keep_logs:
|
||||
print("Leaving temporary files behind (--keep-logs), please "
|
||||
"clean up before next run: " + os.environ["VTDATAROOT"])
|
||||
print('Leaving temporary files behind (--keep-logs), please '
|
||||
'clean up before next run: ' + os.environ['VTDATAROOT'])
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
import warnings
|
||||
# Dropping a table inexplicably produces a warning despite
|
||||
# the "IF EXISTS" clause. Squelch these warnings.
|
||||
# the 'IF EXISTS' clause. Squelch these warnings.
|
||||
warnings.simplefilter('ignore')
|
||||
|
||||
import logging
|
||||
|
@ -78,8 +78,8 @@ class TestReparent(unittest.TestCase):
|
|||
) Engine=InnoDB'''
|
||||
|
||||
def _populate_vt_insert_test(self, master_tablet, index):
|
||||
q = "insert into vt_insert_test(id, msg) values (%d, 'test %d')" % \
|
||||
(index, index)
|
||||
q = ("insert into vt_insert_test(id, msg) values (%d, 'test %d')" %
|
||||
(index, index))
|
||||
master_tablet.mquery('vt_test_keyspace', q, write=True)
|
||||
|
||||
def _check_vt_insert_test(self, tablet, index):
|
||||
|
@ -101,11 +101,11 @@ class TestReparent(unittest.TestCase):
|
|||
self.assertEqual(
|
||||
len(ep['entries']), 1, 'Wrong number of entries: %s' % str(ep))
|
||||
port = ep['entries'][0]['port_map']['vt']
|
||||
self.assertEqual(port, expected_port,
|
||||
'Unexpected port: %d != %d from %s' % (port, expected_port,
|
||||
str(ep)))
|
||||
self.assertEqual(
|
||||
port, expected_port,
|
||||
'Unexpected port: %d != %d from %s' % (port, expected_port, str(ep)))
|
||||
host = ep['entries'][0]['host']
|
||||
# Hostname was set explicitly to "localhost" with -tablet_hostname flag.
|
||||
# Hostname was set explicitly to 'localhost' with -tablet_hostname flag.
|
||||
if not host.startswith('localhost'):
|
||||
self.fail(
|
||||
'Invalid hostname %s was expecting something starting with %s' %
|
||||
|
@ -160,7 +160,8 @@ class TestReparent(unittest.TestCase):
|
|||
utils.run_vtctl(['RebuildShardGraph', 'test_keyspace/0'])
|
||||
utils.validate_topology()
|
||||
|
||||
# Force the slaves to reparent assuming that all the datasets are identical.
|
||||
# Force the slaves to reparent assuming that all the datasets are
|
||||
# identical.
|
||||
for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
|
||||
t.reset_replication()
|
||||
utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
|
||||
|
@ -187,7 +188,8 @@ class TestReparent(unittest.TestCase):
|
|||
tablet_62344.tablet_alias],
|
||||
expect_fail=True)
|
||||
logging.debug('Failed ScrapTablet output:\n' + stderr)
|
||||
if 'connection refused' not in stderr and protocols_flavor().rpc_timeout_message() not in stderr:
|
||||
if ('connection refused' not in stderr and
|
||||
protocols_flavor().rpc_timeout_message() not in stderr):
|
||||
self.fail("didn't find the right error strings in failed ScrapTablet: " +
|
||||
stderr)
|
||||
|
||||
|
@ -254,7 +256,8 @@ class TestReparent(unittest.TestCase):
|
|||
utils.run_vtctl(['RebuildShardGraph', 'test_keyspace/' + shard_id])
|
||||
utils.validate_topology()
|
||||
|
||||
# Force the slaves to reparent assuming that all the datasets are identical.
|
||||
# Force the slaves to reparent assuming that all the datasets are
|
||||
# identical.
|
||||
for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
|
||||
t.reset_replication()
|
||||
utils.run_vtctl(['InitShardMaster', 'test_keyspace/' + shard_id,
|
||||
|
@ -325,7 +328,8 @@ class TestReparent(unittest.TestCase):
|
|||
utils.run_vtctl(['RebuildShardGraph', 'test_keyspace/' + shard_id])
|
||||
utils.validate_topology()
|
||||
|
||||
# Force the slaves to reparent assuming that all the datasets are identical.
|
||||
# Force the slaves to reparent assuming that all the datasets are
|
||||
# identical.
|
||||
for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
|
||||
t.reset_replication()
|
||||
utils.run_vtctl(['InitShardMaster', 'test_keyspace/' + shard_id,
|
||||
|
@ -339,8 +343,9 @@ class TestReparent(unittest.TestCase):
|
|||
self._check_master_cell('test_nj', shard_id, 'test_nj')
|
||||
self._check_master_cell('test_ny', shard_id, 'test_nj')
|
||||
|
||||
# Convert two replica to spare. That should leave only one node serving traffic,
|
||||
# but still needs to appear in the replication graph.
|
||||
# Convert two replica to spare. That should leave only one node
|
||||
# serving traffic, but still needs to appear in the replication
|
||||
# graph.
|
||||
utils.run_vtctl(['ChangeSlaveType', tablet_41983.tablet_alias, 'spare'])
|
||||
utils.run_vtctl(['ChangeSlaveType', tablet_31981.tablet_alias, 'spare'])
|
||||
utils.validate_topology()
|
||||
|
@ -417,7 +422,8 @@ class TestReparent(unittest.TestCase):
|
|||
utils.run_vtctl(['RebuildShardGraph', 'test_keyspace/' + shard_id])
|
||||
utils.validate_topology()
|
||||
|
||||
# Force the slaves to reparent assuming that all the datasets are identical.
|
||||
# Force the slaves to reparent assuming that all the datasets are
|
||||
# identical.
|
||||
for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
|
||||
t.reset_replication()
|
||||
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/' + shard_id,
|
||||
|
@ -487,7 +493,8 @@ class TestReparent(unittest.TestCase):
|
|||
tablet_62044.mquery('', mysql_flavor().promote_slave_commands())
|
||||
new_pos = mysql_flavor().master_position(tablet_62044)
|
||||
logging.debug('New master position: %s', str(new_pos))
|
||||
# Use "localhost" as hostname because Travis CI worker hostnames are too long for MySQL replication.
|
||||
# Use 'localhost' as hostname because Travis CI worker hostnames
|
||||
# are too long for MySQL replication.
|
||||
changeMasterCmds = mysql_flavor().change_master_commands(
|
||||
'localhost',
|
||||
tablet_62044.mysql_port,
|
||||
|
@ -551,14 +558,15 @@ class TestReparent(unittest.TestCase):
|
|||
|
||||
# make sure the master health stream says it's the master too
|
||||
# (health check is disabled on these servers, force it first)
|
||||
utils.run_vtctl(["RunHealthCheck", tablet_62044.tablet_alias, "replica"])
|
||||
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias, 'replica'])
|
||||
health = utils.run_vtctl_json(['VtTabletStreamHealth',
|
||||
'-count', '1',
|
||||
tablet_62044.tablet_alias])
|
||||
self.assertEqual(health['target']['tablet_type'],
|
||||
tablet.Tablet.tablet_type_value['MASTER'])
|
||||
# have to compare the int version, or the rounding errors can break
|
||||
self.assertTrue(health['tablet_externally_reparented_timestamp'] >= int(base_time))
|
||||
self.assertTrue(
|
||||
health['tablet_externally_reparented_timestamp'] >= int(base_time))
|
||||
|
||||
# See if a missing slave can be safely reparented after the fact.
|
||||
def test_reparent_with_down_slave(self, shard_id='0'):
|
||||
|
|
|
@ -23,11 +23,11 @@ keyspace_id_type = keyrange_constants.KIT_UINT64
|
|||
pack_keyspace_id = struct.Struct('!Q').pack
|
||||
|
||||
# initial shards
|
||||
# range "" - 80
|
||||
# range '' - 80
|
||||
shard_0_master = tablet.Tablet()
|
||||
shard_0_replica = tablet.Tablet()
|
||||
shard_0_ny_rdonly = tablet.Tablet(cell='ny')
|
||||
# range 80 - ""
|
||||
# range 80 - ''
|
||||
shard_1_master = tablet.Tablet()
|
||||
shard_1_slave1 = tablet.Tablet()
|
||||
shard_1_slave2 = tablet.Tablet()
|
||||
|
@ -39,7 +39,7 @@ shard_1_rdonly1 = tablet.Tablet()
|
|||
shard_2_master = tablet.Tablet()
|
||||
shard_2_replica1 = tablet.Tablet()
|
||||
shard_2_replica2 = tablet.Tablet()
|
||||
# range c0 - ""
|
||||
# range c0 - ''
|
||||
shard_3_master = tablet.Tablet()
|
||||
shard_3_replica = tablet.Tablet()
|
||||
shard_3_rdonly1 = tablet.Tablet()
|
||||
|
@ -127,29 +127,35 @@ class InsertThread(threading.Thread):
|
|||
if keyspace_id_type == keyrange_constants.KIT_BYTES:
|
||||
self.str_keyspace_id = base64.b64encode(pack_keyspace_id(keyspace_id))
|
||||
else:
|
||||
self.str_keyspace_id = "%d" % keyspace_id
|
||||
self.str_keyspace_id = '%d' % keyspace_id
|
||||
self.done = False
|
||||
|
||||
self.tablet.mquery('vt_test_keyspace', [
|
||||
'begin',
|
||||
'insert into timestamps(name, time_milli, keyspace_id) values("%s", %d, 0x%x) /* EMD keyspace_id:%s user_id:%d */' %
|
||||
(self.object_name, long(time.time() * 1000), self.keyspace_id,
|
||||
self.str_keyspace_id, self.user_id),
|
||||
'commit'
|
||||
], write=True, user='vt_app')
|
||||
self.tablet.mquery(
|
||||
'vt_test_keyspace',
|
||||
['begin',
|
||||
'insert into timestamps(name, time_milli, keyspace_id) '
|
||||
"values('%s', %d, 0x%x) /* EMD keyspace_id:%s user_id:%d */" %
|
||||
(self.object_name, long(time.time() * 1000), self.keyspace_id,
|
||||
self.str_keyspace_id, self.user_id),
|
||||
'commit'],
|
||||
write=True, user='vt_app')
|
||||
self.start()
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
while not self.done:
|
||||
self.tablet.mquery('vt_test_keyspace', [
|
||||
'begin',
|
||||
'update timestamps set time_milli=%d where name="%s" /* EMD keyspace_id:%s user_id:%d */' % (long(time.time() * 1000), self.object_name, self.str_keyspace_id, self.user_id),
|
||||
'commit'
|
||||
], write=True, user='vt_app')
|
||||
self.tablet.mquery(
|
||||
'vt_test_keyspace',
|
||||
['begin',
|
||||
'update timestamps set time_milli=%d '
|
||||
'where name="%s" /* EMD keyspace_id:%s user_id:%d */' %
|
||||
(long(time.time() * 1000), self.object_name,
|
||||
self.str_keyspace_id, self.user_id),
|
||||
'commit'],
|
||||
write=True, user='vt_app')
|
||||
time.sleep(0.2)
|
||||
except Exception as e:
|
||||
logging.error("InsertThread got exception: %s", e)
|
||||
logging.error('InsertThread got exception: %s', e)
|
||||
|
||||
|
||||
# MonitorLagThread will get values from a database, and compare the timestamp
|
||||
|
@ -170,17 +176,20 @@ class MonitorLagThread(threading.Thread):
|
|||
def run(self):
|
||||
try:
|
||||
while not self.done:
|
||||
result = self.tablet.mquery('vt_test_keyspace', 'select time_milli from timestamps where name="%s"' % self.object_name)
|
||||
result = self.tablet.mquery(
|
||||
'vt_test_keyspace',
|
||||
'select time_milli from timestamps where name="%s"' %
|
||||
self.object_name)
|
||||
if result:
|
||||
lag = long(time.time() * 1000) - long(result[0][0])
|
||||
logging.debug("MonitorLagThread(%s) got %d", self.object_name, lag)
|
||||
logging.debug('MonitorLagThread(%s) got %d', self.object_name, lag)
|
||||
self.sample_count += 1
|
||||
self.lag_sum += lag
|
||||
if lag > self.max_lag:
|
||||
self.max_lag = lag
|
||||
time.sleep(1.0)
|
||||
except Exception as e:
|
||||
logging.error("MonitorLagThread got exception: %s", e)
|
||||
logging.error('MonitorLagThread got exception: %s', e)
|
||||
|
||||
|
||||
class TestResharding(unittest.TestCase):
|
||||
|
@ -212,15 +221,15 @@ primary key (name)
|
|||
) Engine=InnoDB'''
|
||||
|
||||
utils.run_vtctl(['ApplySchema',
|
||||
'-sql=' + create_table_template % ("resharding1"),
|
||||
'-sql=' + create_table_template % ('resharding1'),
|
||||
'test_keyspace'],
|
||||
auto_log=True)
|
||||
utils.run_vtctl(['ApplySchema',
|
||||
'-sql=' + create_table_template % ("resharding2"),
|
||||
'-sql=' + create_table_template % ('resharding2'),
|
||||
'test_keyspace'],
|
||||
auto_log=True)
|
||||
utils.run_vtctl(['ApplySchema',
|
||||
'-sql=' + create_view_template % ("view1", "resharding1"),
|
||||
'-sql=' + create_view_template % ('view1', 'resharding1'),
|
||||
'test_keyspace'],
|
||||
auto_log=True)
|
||||
utils.run_vtctl(['ApplySchema',
|
||||
|
@ -238,33 +247,38 @@ primary key (name)
|
|||
if keyspace_id_type == keyrange_constants.KIT_BYTES:
|
||||
k = base64.b64encode(pack_keyspace_id(keyspace_id))
|
||||
else:
|
||||
k = "%d" % keyspace_id
|
||||
tablet.mquery('vt_test_keyspace', [
|
||||
'begin',
|
||||
'insert into %s(id, msg, keyspace_id) values(%d, "%s", 0x%x) /* EMD keyspace_id:%s user_id:%d */' % (table, id, msg, keyspace_id, k, id),
|
||||
'commit'
|
||||
], write=True)
|
||||
k = '%d' % keyspace_id
|
||||
tablet.mquery(
|
||||
'vt_test_keyspace',
|
||||
['begin',
|
||||
'insert into %s(id, msg, keyspace_id) '
|
||||
'values(%d, "%s", 0x%x) /* EMD keyspace_id:%s user_id:%d */' %
|
||||
(table, id, msg, keyspace_id, k, id),
|
||||
'commit'],
|
||||
write=True)
|
||||
|
||||
def _get_value(self, tablet, table, id):
|
||||
return tablet.mquery('vt_test_keyspace', 'select id, msg, keyspace_id from %s where id=%d' % (table, id))
|
||||
return tablet.mquery(
|
||||
'vt_test_keyspace',
|
||||
'select id, msg, keyspace_id from %s where id=%d' % (table, id))
|
||||
|
||||
def _check_value(self, tablet, table, id, msg, keyspace_id,
|
||||
should_be_here=True):
|
||||
result = self._get_value(tablet, table, id)
|
||||
if keyspace_id_type == keyrange_constants.KIT_BYTES:
|
||||
fmt = "%s"
|
||||
fmt = '%s'
|
||||
keyspace_id = pack_keyspace_id(keyspace_id)
|
||||
else:
|
||||
fmt = "%x"
|
||||
fmt = '%x'
|
||||
if should_be_here:
|
||||
self.assertEqual(result, ((id, msg, keyspace_id),),
|
||||
("Bad row in tablet %s for id=%d, keyspace_id=" +
|
||||
fmt + ", row=%s") % (tablet.tablet_alias, id,
|
||||
('Bad row in tablet %s for id=%d, keyspace_id=' +
|
||||
fmt + ', row=%s') % (tablet.tablet_alias, id,
|
||||
keyspace_id, str(result)))
|
||||
else:
|
||||
self.assertEqual(len(result), 0,
|
||||
("Extra row in tablet %s for id=%d, keyspace_id=" +
|
||||
fmt + ": %s") % (tablet.tablet_alias, id, keyspace_id,
|
||||
('Extra row in tablet %s for id=%d, keyspace_id=' +
|
||||
fmt + ': %s') % (tablet.tablet_alias, id, keyspace_id,
|
||||
str(result)))
|
||||
|
||||
# _is_value_present_and_correct tries to read a value.
|
||||
|
@ -276,12 +290,12 @@ primary key (name)
|
|||
if len(result) == 0:
|
||||
return False
|
||||
if keyspace_id_type == keyrange_constants.KIT_BYTES:
|
||||
fmt = "%s"
|
||||
fmt = '%s'
|
||||
keyspace_id = pack_keyspace_id(keyspace_id)
|
||||
else:
|
||||
fmt = "%x"
|
||||
fmt = '%x'
|
||||
self.assertEqual(result, ((id, msg, keyspace_id),),
|
||||
("Bad row in tablet %s for id=%d, keyspace_id=" + fmt) % (
|
||||
('Bad row in tablet %s for id=%d, keyspace_id=' + fmt) % (
|
||||
tablet.tablet_alias, id, keyspace_id))
|
||||
return True
|
||||
|
||||
|
@ -352,7 +366,7 @@ primary key (name)
|
|||
i, 0xE000000000000000 + base + i):
|
||||
found += 1
|
||||
percent = found * 100 / count / 2
|
||||
logging.debug("I have %d%% of the data", percent)
|
||||
logging.debug('I have %d%% of the data', percent)
|
||||
return percent
|
||||
|
||||
def _check_lots_timeout(self, count, threshold, timeout, base=0):
|
||||
|
@ -361,8 +375,8 @@ primary key (name)
|
|||
if value >= threshold:
|
||||
return
|
||||
if timeout == 0:
|
||||
self.fail("timeout waiting for %d%% of the data" % threshold)
|
||||
logging.debug("sleeping until we get %d%%", threshold)
|
||||
self.fail('timeout waiting for %d%% of the data' % threshold)
|
||||
logging.debug('sleeping until we get %d%%', threshold)
|
||||
time.sleep(1)
|
||||
timeout -= 1
|
||||
|
||||
|
@ -382,37 +396,41 @@ primary key (name)
|
|||
self.assertIn('UpdateStreamKeyRangeStatements', v)
|
||||
self.assertIn('UpdateStreamKeyRangeTransactions', v)
|
||||
|
||||
def _check_binlog_player_vars(self, tablet, seconds_behind_master_max = 0):
|
||||
def _check_binlog_player_vars(self, tablet, seconds_behind_master_max=0):
|
||||
v = utils.get_vars(tablet.port)
|
||||
self.assertIn('BinlogPlayerMapSize', v)
|
||||
self.assertIn('BinlogPlayerSecondsBehindMaster', v)
|
||||
self.assertIn('BinlogPlayerSecondsBehindMasterMap', v)
|
||||
self.assertIn('BinlogPlayerSourceShardNameMap', v)
|
||||
self.assertIn('0', v['BinlogPlayerSourceShardNameMap'])
|
||||
self.assertEquals(v['BinlogPlayerSourceShardNameMap']['0'], 'test_keyspace/80-')
|
||||
self.assertEquals(
|
||||
v['BinlogPlayerSourceShardNameMap']['0'], 'test_keyspace/80-')
|
||||
self.assertIn('BinlogPlayerSourceTabletAliasMap', v)
|
||||
self.assertIn('0', v['BinlogPlayerSourceTabletAliasMap'])
|
||||
if seconds_behind_master_max != 0:
|
||||
self.assertTrue(v['BinlogPlayerSecondsBehindMaster'] <
|
||||
seconds_behind_master_max,
|
||||
'BinlogPlayerSecondsBehindMaster is too high: %d > %d' % (
|
||||
v['BinlogPlayerSecondsBehindMaster'],
|
||||
seconds_behind_master_max))
|
||||
self.assertTrue(v['BinlogPlayerSecondsBehindMasterMap']['0'] <
|
||||
seconds_behind_master_max,
|
||||
'BinlogPlayerSecondsBehindMasterMap is too high: %d > %d' % (
|
||||
v['BinlogPlayerSecondsBehindMasterMap']['0'],
|
||||
seconds_behind_master_max))
|
||||
self.assertTrue(
|
||||
v['BinlogPlayerSecondsBehindMaster'] <
|
||||
seconds_behind_master_max,
|
||||
'BinlogPlayerSecondsBehindMaster is too high: %d > %d' % (
|
||||
v['BinlogPlayerSecondsBehindMaster'],
|
||||
seconds_behind_master_max))
|
||||
self.assertTrue(
|
||||
v['BinlogPlayerSecondsBehindMasterMap']['0'] <
|
||||
seconds_behind_master_max,
|
||||
'BinlogPlayerSecondsBehindMasterMap is too high: %d > %d' % (
|
||||
v['BinlogPlayerSecondsBehindMasterMap']['0'],
|
||||
seconds_behind_master_max))
|
||||
|
||||
def _check_stream_health_equals_binlog_player_vars(self, tablet):
|
||||
blp_stats = utils.get_vars(tablet.port)
|
||||
# Enforce health check because it's not running by default as tablets are not started with it.
|
||||
utils.run_vtctl(["RunHealthCheck", tablet.tablet_alias, 'replica'])
|
||||
# Enforce health check because it's not running by default as
|
||||
# tablets are not started with it.
|
||||
utils.run_vtctl(['RunHealthCheck', tablet.tablet_alias, 'replica'])
|
||||
stream_health, _ = utils.run_vtctl(['VtTabletStreamHealth',
|
||||
'-count', '1',
|
||||
tablet.tablet_alias],
|
||||
trap_output=True, auto_log=True)
|
||||
logging.debug("Got health: %s", stream_health)
|
||||
logging.debug('Got health: %s', stream_health)
|
||||
data = json.loads(stream_health)
|
||||
self.assertIn('realtime_stats', data)
|
||||
self.assertNotIn('health_error', data['realtime_stats'])
|
||||
|
@ -425,23 +443,27 @@ primary key (name)
|
|||
'seconds_behind_master_filtered_replication', 0))
|
||||
|
||||
def _test_keyrange_constraints(self):
|
||||
with self.assertRaisesRegexp(dbexceptions.DatabaseError, '.*enforce keyspace_id range.*'):
|
||||
with self.assertRaisesRegexp(
|
||||
dbexceptions.DatabaseError, '.*enforce keyspace_id range.*'):
|
||||
self._exec_dml(
|
||||
shard_0_master,
|
||||
"insert into resharding1(id, msg, keyspace_id) values(1, 'msg', :keyspace_id)",
|
||||
{"keyspace_id": 0x9000000000000000},
|
||||
shard_0_master,
|
||||
"insert into resharding1(id, msg, keyspace_id) "
|
||||
" values(1, 'msg', :keyspace_id)",
|
||||
{'keyspace_id': 0x9000000000000000},
|
||||
)
|
||||
with self.assertRaisesRegexp(dbexceptions.DatabaseError, '.*enforce keyspace_id range.*'):
|
||||
with self.assertRaisesRegexp(
|
||||
dbexceptions.DatabaseError, '.*enforce keyspace_id range.*'):
|
||||
self._exec_dml(
|
||||
shard_0_master,
|
||||
"update resharding1 set msg = 'msg' where id = 1",
|
||||
{"keyspace_id": 0x9000000000000000},
|
||||
shard_0_master,
|
||||
"update resharding1 set msg = 'msg' where id = 1",
|
||||
{'keyspace_id': 0x9000000000000000},
|
||||
)
|
||||
with self.assertRaisesRegexp(dbexceptions.DatabaseError, '.*enforce keyspace_id range.*'):
|
||||
with self.assertRaisesRegexp(
|
||||
dbexceptions.DatabaseError, '.*enforce keyspace_id range.*'):
|
||||
self._exec_dml(
|
||||
shard_0_master,
|
||||
"delete from resharding1 where id = 1",
|
||||
{"keyspace_id": 0x9000000000000000},
|
||||
shard_0_master,
|
||||
'delete from resharding1 where id = 1',
|
||||
{'keyspace_id': 0x9000000000000000},
|
||||
)
|
||||
|
||||
def test_resharding(self):
|
||||
|
@ -456,10 +478,10 @@ primary key (name)
|
|||
'-force', '-split_shard_count', '4',
|
||||
'test_keyspace', 'keyspace_id', keyspace_id_type])
|
||||
|
||||
shard_0_master.init_tablet( 'master', 'test_keyspace', '-80')
|
||||
shard_0_master.init_tablet('master', 'test_keyspace', '-80')
|
||||
shard_0_replica.init_tablet('replica', 'test_keyspace', '-80')
|
||||
shard_0_ny_rdonly.init_tablet('rdonly', 'test_keyspace', '-80')
|
||||
shard_1_master.init_tablet( 'master', 'test_keyspace', '80-')
|
||||
shard_1_master.init_tablet('master', 'test_keyspace', '80-')
|
||||
shard_1_slave1.init_tablet('replica', 'test_keyspace', '80-')
|
||||
shard_1_slave2.init_tablet('spare', 'test_keyspace', '80-')
|
||||
shard_1_ny_rdonly.init_tablet('rdonly', 'test_keyspace', '80-')
|
||||
|
@ -486,7 +508,7 @@ primary key (name)
|
|||
shard_0_ny_rdonly.wait_for_vttablet_state('SERVING')
|
||||
shard_1_master.wait_for_vttablet_state('SERVING')
|
||||
shard_1_slave1.wait_for_vttablet_state('SERVING')
|
||||
shard_1_slave2.wait_for_vttablet_state('NOT_SERVING') # spare
|
||||
shard_1_slave2.wait_for_vttablet_state('NOT_SERVING') # spare
|
||||
shard_1_ny_rdonly.wait_for_vttablet_state('SERVING')
|
||||
shard_1_rdonly1.wait_for_vttablet_state('SERVING')
|
||||
|
||||
|
@ -502,12 +524,12 @@ primary key (name)
|
|||
self._test_keyrange_constraints()
|
||||
|
||||
# create the split shards
|
||||
shard_2_master.init_tablet( 'master', 'test_keyspace', '80-c0')
|
||||
shard_2_replica1.init_tablet('spare', 'test_keyspace', '80-c0')
|
||||
shard_2_replica2.init_tablet('spare', 'test_keyspace', '80-c0')
|
||||
shard_3_master.init_tablet( 'master', 'test_keyspace', 'c0-')
|
||||
shard_3_replica.init_tablet( 'spare', 'test_keyspace', 'c0-')
|
||||
shard_3_rdonly1.init_tablet( 'rdonly', 'test_keyspace', 'c0-')
|
||||
shard_2_master.init_tablet('master', 'test_keyspace', '80-c0')
|
||||
shard_2_replica1.init_tablet('spare', 'test_keyspace', '80-c0')
|
||||
shard_2_replica2.init_tablet('spare', 'test_keyspace', '80-c0')
|
||||
shard_3_master.init_tablet('master', 'test_keyspace', 'c0-')
|
||||
shard_3_replica.init_tablet('spare', 'test_keyspace', 'c0-')
|
||||
shard_3_rdonly1.init_tablet('rdonly', 'test_keyspace', 'c0-')
|
||||
|
||||
# start vttablet on the split shards (no db created,
|
||||
# so they're all not serving)
|
||||
|
@ -546,7 +568,7 @@ primary key (name)
|
|||
utils.run_vtworker(['--cell', 'test_nj',
|
||||
'--command_display_interval', '10ms',
|
||||
'SplitClone',
|
||||
'--exclude_tables' ,'unrelated',
|
||||
'--exclude_tables', 'unrelated',
|
||||
'--strategy=-populate_blp_checkpoint',
|
||||
'--source_reader_count', '10',
|
||||
'--min_table_size_for_split', '1',
|
||||
|
@ -579,19 +601,19 @@ primary key (name)
|
|||
# testing filtered replication: insert a bunch of data on shard 1,
|
||||
# check we get most of it after a few seconds, wait for binlog server
|
||||
# timeout, check we get all of it.
|
||||
logging.debug("Inserting lots of data on source shard")
|
||||
logging.debug('Inserting lots of data on source shard')
|
||||
self._insert_lots(1000)
|
||||
logging.debug("Checking 80 percent of data is sent quickly")
|
||||
logging.debug('Checking 80 percent of data is sent quickly')
|
||||
self._check_lots_timeout(1000, 80, 5)
|
||||
logging.debug("Checking all data goes through eventually")
|
||||
logging.debug('Checking all data goes through eventually')
|
||||
self._check_lots_timeout(1000, 100, 20)
|
||||
logging.debug("Checking no data was sent the wrong way")
|
||||
logging.debug('Checking no data was sent the wrong way')
|
||||
self._check_lots_not_present(1000)
|
||||
self._check_binlog_player_vars(shard_2_master, seconds_behind_master_max=30)
|
||||
self._check_binlog_player_vars(shard_3_master, seconds_behind_master_max=30)
|
||||
|
||||
# use vtworker to compare the data
|
||||
logging.debug("Running vtworker SplitDiff")
|
||||
logging.debug('Running vtworker SplitDiff')
|
||||
utils.run_vtworker(['-cell', 'test_nj', 'SplitDiff', '--exclude_tables',
|
||||
'unrelated', 'test_keyspace/c0-'],
|
||||
auto_log=True)
|
||||
|
@ -600,7 +622,7 @@ primary key (name)
|
|||
utils.run_vtctl(['ChangeSlaveType', shard_3_rdonly1.tablet_alias, 'rdonly'],
|
||||
auto_log=True)
|
||||
|
||||
utils.pause("Good time to test vtworker for diffs")
|
||||
utils.pause('Good time to test vtworker for diffs')
|
||||
|
||||
# get status for a destination master tablet, make sure we have it all
|
||||
shard_2_master_status = shard_2_master.get_status()
|
||||
|
@ -610,12 +632,12 @@ primary key (name)
|
|||
|
||||
# start a thread to insert data into shard_1 in the background
|
||||
# with current time, and monitor the delay
|
||||
insert_thread_1 = InsertThread(shard_1_master, "insert_low", 10000,
|
||||
insert_thread_1 = InsertThread(shard_1_master, 'insert_low', 10000,
|
||||
0x9000000000000000)
|
||||
insert_thread_2 = InsertThread(shard_1_master, "insert_high", 10001,
|
||||
insert_thread_2 = InsertThread(shard_1_master, 'insert_high', 10001,
|
||||
0xD000000000000000)
|
||||
monitor_thread_1 = MonitorLagThread(shard_2_replica2, "insert_low")
|
||||
monitor_thread_2 = MonitorLagThread(shard_3_replica, "insert_high")
|
||||
monitor_thread_1 = MonitorLagThread(shard_2_replica2, 'insert_low')
|
||||
monitor_thread_2 = MonitorLagThread(shard_3_replica, 'insert_high')
|
||||
|
||||
# tests a failover switching serving to a different replica
|
||||
utils.run_vtctl(['ChangeSlaveType', shard_1_slave2.tablet_alias, 'replica'])
|
||||
|
@ -624,9 +646,9 @@ primary key (name)
|
|||
shard_1_slave1.wait_for_vttablet_state('NOT_SERVING')
|
||||
|
||||
# test data goes through again
|
||||
logging.debug("Inserting lots of data on source shard")
|
||||
logging.debug('Inserting lots of data on source shard')
|
||||
self._insert_lots(1000, base=1000)
|
||||
logging.debug("Checking 80 percent of data was sent quickly")
|
||||
logging.debug('Checking 80 percent of data was sent quickly')
|
||||
self._check_lots_timeout(1000, 80, 5, base=1000)
|
||||
|
||||
# check we can't migrate the master just yet
|
||||
|
@ -649,13 +671,13 @@ primary key (name)
|
|||
utils.run_vtctl(['MigrateServedTypes', '--cells=test_nj',
|
||||
'test_keyspace/80-', 'rdonly'], auto_log=True)
|
||||
utils.check_srv_keyspace('test_nj', 'test_keyspace',
|
||||
'Partitions(master): -80 80-\n' +
|
||||
'Partitions(rdonly): -80 80-c0 c0-\n' +
|
||||
'Partitions(master): -80 80-\n'
|
||||
'Partitions(rdonly): -80 80-c0 c0-\n'
|
||||
'Partitions(replica): -80 80-\n',
|
||||
keyspace_id_type=keyspace_id_type)
|
||||
utils.check_srv_keyspace('test_ny', 'test_keyspace',
|
||||
'Partitions(master): -80 80-\n' +
|
||||
'Partitions(rdonly): -80 80-\n' +
|
||||
'Partitions(master): -80 80-\n'
|
||||
'Partitions(rdonly): -80 80-\n'
|
||||
'Partitions(replica): -80 80-\n',
|
||||
keyspace_id_type=keyspace_id_type)
|
||||
utils.check_tablet_query_service(self, shard_0_ny_rdonly, True, False)
|
||||
|
@ -666,13 +688,13 @@ primary key (name)
|
|||
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'rdonly'],
|
||||
auto_log=True)
|
||||
utils.check_srv_keyspace('test_nj', 'test_keyspace',
|
||||
'Partitions(master): -80 80-\n' +
|
||||
'Partitions(rdonly): -80 80-c0 c0-\n' +
|
||||
'Partitions(master): -80 80-\n'
|
||||
'Partitions(rdonly): -80 80-c0 c0-\n'
|
||||
'Partitions(replica): -80 80-\n',
|
||||
keyspace_id_type=keyspace_id_type)
|
||||
utils.check_srv_keyspace('test_ny', 'test_keyspace',
|
||||
'Partitions(master): -80 80-\n' +
|
||||
'Partitions(rdonly): -80 80-c0 c0-\n' +
|
||||
'Partitions(master): -80 80-\n'
|
||||
'Partitions(rdonly): -80 80-c0 c0-\n'
|
||||
'Partitions(replica): -80 80-\n',
|
||||
keyspace_id_type=keyspace_id_type)
|
||||
utils.check_tablet_query_service(self, shard_0_ny_rdonly, True, False)
|
||||
|
@ -686,8 +708,8 @@ primary key (name)
|
|||
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'replica'],
|
||||
auto_log=True)
|
||||
utils.check_srv_keyspace('test_nj', 'test_keyspace',
|
||||
'Partitions(master): -80 80-\n' +
|
||||
'Partitions(rdonly): -80 80-c0 c0-\n' +
|
||||
'Partitions(master): -80 80-\n'
|
||||
'Partitions(rdonly): -80 80-c0 c0-\n'
|
||||
'Partitions(replica): -80 80-c0 c0-\n',
|
||||
keyspace_id_type=keyspace_id_type)
|
||||
utils.check_tablet_query_service(self, shard_1_slave2, False, True)
|
||||
|
@ -703,37 +725,39 @@ primary key (name)
|
|||
tablet.Tablet.tablet_type_value['REPLICA'],
|
||||
False)
|
||||
utils.check_srv_keyspace('test_nj', 'test_keyspace',
|
||||
'Partitions(master): -80 80-\n' +
|
||||
'Partitions(rdonly): -80 80-c0 c0-\n' +
|
||||
'Partitions(master): -80 80-\n'
|
||||
'Partitions(rdonly): -80 80-c0 c0-\n'
|
||||
'Partitions(replica): -80 80-\n',
|
||||
keyspace_id_type=keyspace_id_type)
|
||||
|
||||
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'replica'],
|
||||
auto_log=True)
|
||||
# After a forwards migration, queryservice should be disabled on source and enabled on destinations
|
||||
# After a forwards migration, queryservice should be disabled on
|
||||
# source and enabled on destinations
|
||||
utils.check_tablet_query_service(self, shard_1_slave2, False, True)
|
||||
# Destination tablets would have query service disabled for other reasons than the migration,
|
||||
# so check the shard record instead of the tablets directly
|
||||
# Destination tablets would have query service disabled for other
|
||||
# reasons than the migration, so check the shard record instead of
|
||||
# the tablets directly
|
||||
utils.check_shard_query_services(self, destination_shards,
|
||||
tablet.Tablet.tablet_type_value['REPLICA'],
|
||||
True)
|
||||
utils.check_srv_keyspace('test_nj', 'test_keyspace',
|
||||
'Partitions(master): -80 80-\n' +
|
||||
'Partitions(rdonly): -80 80-c0 c0-\n' +
|
||||
'Partitions(master): -80 80-\n'
|
||||
'Partitions(rdonly): -80 80-c0 c0-\n'
|
||||
'Partitions(replica): -80 80-c0 c0-\n',
|
||||
keyspace_id_type=keyspace_id_type)
|
||||
|
||||
# reparent shard_2 to shard_2_replica1, then insert more data and
|
||||
# see it flow through still
|
||||
utils.run_vtctl(['PlannedReparentShard', 'test_keyspace/80-c0',
|
||||
shard_2_replica1.tablet_alias])
|
||||
logging.debug("Inserting lots of data on source shard after reparenting")
|
||||
shard_2_replica1.tablet_alias])
|
||||
logging.debug('Inserting lots of data on source shard after reparenting')
|
||||
self._insert_lots(3000, base=2000)
|
||||
logging.debug("Checking 80 percent of data was sent fairly quickly")
|
||||
logging.debug('Checking 80 percent of data was sent fairly quickly')
|
||||
self._check_lots_timeout(3000, 80, 10, base=2000)
|
||||
|
||||
# use vtworker to compare the data again
|
||||
logging.debug("Running vtworker SplitDiff")
|
||||
logging.debug('Running vtworker SplitDiff')
|
||||
utils.run_vtworker(['-cell', 'test_nj', 'SplitDiff', '--exclude_tables',
|
||||
'unrelated', 'test_keyspace/c0-'],
|
||||
auto_log=True)
|
||||
|
@ -747,11 +771,11 @@ primary key (name)
|
|||
monitor_thread_2.done = True
|
||||
insert_thread_1.done = True
|
||||
insert_thread_2.done = True
|
||||
logging.debug("DELAY 1: %s max_lag=%d avg_lag=%d",
|
||||
logging.debug('DELAY 1: %s max_lag=%d avg_lag=%d',
|
||||
monitor_thread_1.object_name,
|
||||
monitor_thread_1.max_lag,
|
||||
monitor_thread_1.lag_sum / monitor_thread_1.sample_count)
|
||||
logging.debug("DELAY 2: %s max_lag=%d avg_lag=%d",
|
||||
logging.debug('DELAY 2: %s max_lag=%d avg_lag=%d',
|
||||
monitor_thread_2.object_name,
|
||||
monitor_thread_2.max_lag,
|
||||
monitor_thread_2.lag_sum / monitor_thread_2.sample_count)
|
||||
|
@ -769,8 +793,8 @@ primary key (name)
|
|||
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'master'],
|
||||
auto_log=True)
|
||||
utils.check_srv_keyspace('test_nj', 'test_keyspace',
|
||||
'Partitions(master): -80 80-c0 c0-\n' +
|
||||
'Partitions(rdonly): -80 80-c0 c0-\n' +
|
||||
'Partitions(master): -80 80-c0 c0-\n'
|
||||
'Partitions(rdonly): -80 80-c0 c0-\n'
|
||||
'Partitions(replica): -80 80-c0 c0-\n',
|
||||
keyspace_id_type=keyspace_id_type)
|
||||
utils.check_tablet_query_service(self, shard_1_master, False, True)
|
||||
|
@ -795,12 +819,17 @@ primary key (name)
|
|||
utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True)
|
||||
|
||||
# rebuild the serving graph, all mentions of the old shards shoud be gone
|
||||
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
|
||||
utils.run_vtctl(
|
||||
['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
|
||||
|
||||
# test RemoveShardCell
|
||||
utils.run_vtctl(['RemoveShardCell', 'test_keyspace/-80', 'test_nj'], auto_log=True, expect_fail=True)
|
||||
utils.run_vtctl(['RemoveShardCell', 'test_keyspace/80-', 'test_nj'], auto_log=True)
|
||||
utils.run_vtctl(['RemoveShardCell', 'test_keyspace/80-', 'test_ny'], auto_log=True)
|
||||
utils.run_vtctl(
|
||||
['RemoveShardCell', 'test_keyspace/-80', 'test_nj'], auto_log=True,
|
||||
expect_fail=True)
|
||||
utils.run_vtctl(
|
||||
['RemoveShardCell', 'test_keyspace/80-', 'test_nj'], auto_log=True)
|
||||
utils.run_vtctl(
|
||||
['RemoveShardCell', 'test_keyspace/80-', 'test_ny'], auto_log=True)
|
||||
shard = utils.run_vtctl_json(['GetShard', 'test_keyspace/80-'])
|
||||
self.assertNotIn('cells', shard)
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
import warnings
|
||||
# Dropping a table inexplicably produces a warning despite
|
||||
# the "IF EXISTS" clause. Squelch these warnings.
|
||||
warnings.simplefilter("ignore")
|
||||
warnings.simplefilter('ignore')
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
@ -67,6 +67,7 @@ def setUpModule():
|
|||
tearDownModule()
|
||||
raise
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
if utils.options.skip_teardown:
|
||||
return
|
||||
|
@ -82,7 +83,9 @@ def tearDownModule():
|
|||
master_tablet.remove_tree()
|
||||
replica_tablet.remove_tree()
|
||||
|
||||
|
||||
class RowCacheInvalidator(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.perform_insert(400)
|
||||
|
||||
|
@ -99,8 +102,8 @@ class RowCacheInvalidator(unittest.TestCase):
|
|||
|
||||
def perform_insert(self, count):
|
||||
for i in xrange(count):
|
||||
self._exec_vt_txn("insert into vt_insert_test (msg) values ('test %s')" %
|
||||
i)
|
||||
self._exec_vt_txn(
|
||||
"insert into vt_insert_test (msg) values ('test %s')" % i)
|
||||
|
||||
def perform_delete(self):
|
||||
self._exec_vt_txn('delete from vt_insert_test')
|
||||
|
@ -117,10 +120,11 @@ class RowCacheInvalidator(unittest.TestCase):
|
|||
self._wait_for_replica()
|
||||
invalidations = self.replica_stats()['Totals']['Invalidations']
|
||||
invalidatorStats = self.replica_vars()
|
||||
logging.debug('Invalidations %d InvalidatorStats %s' %
|
||||
(invalidations,
|
||||
invalidatorStats['RowcacheInvalidatorPosition']))
|
||||
self.assertTrue(invalidations > 0, 'Invalidations are not flowing through.')
|
||||
logging.debug(
|
||||
'Invalidations %d InvalidatorStats %s',
|
||||
invalidations, invalidatorStats['RowcacheInvalidatorPosition'])
|
||||
self.assertTrue(
|
||||
invalidations > 0, 'Invalidations are not flowing through.')
|
||||
|
||||
res = replica_tablet.mquery('vt_test_keyspace',
|
||||
'select min(id) from vt_insert_test')
|
||||
|
@ -128,7 +132,7 @@ class RowCacheInvalidator(unittest.TestCase):
|
|||
'Cannot proceed, no rows in vt_insert_test')
|
||||
id = int(res[0][0])
|
||||
stats_dict = self.replica_stats()['vt_insert_test']
|
||||
logging.debug('vt_insert_test stats %s' % stats_dict)
|
||||
logging.debug('vt_insert_test stats %s', stats_dict)
|
||||
misses = stats_dict['Misses']
|
||||
hits = stats_dict['Hits']
|
||||
replica_tablet.execute('select * from vt_insert_test where id=:id',
|
||||
|
@ -141,7 +145,7 @@ class RowCacheInvalidator(unittest.TestCase):
|
|||
bindvars={'id': id})
|
||||
stats_dict = self.replica_stats()['vt_insert_test']
|
||||
self.assertEqual(stats_dict['Hits'] - hits, 1,
|
||||
"This should have hit the cache")
|
||||
'This should have hit the cache')
|
||||
|
||||
def _wait_for_value(self, expected_result):
|
||||
timeout = 10
|
||||
|
@ -150,20 +154,23 @@ class RowCacheInvalidator(unittest.TestCase):
|
|||
'select * from vt_insert_test where id = 1000000')
|
||||
if result == expected_result:
|
||||
return
|
||||
timeout = utils.wait_step('replica rowcache updated, got %s expected %s' %
|
||||
(str(result), str(expected_result)), timeout,
|
||||
sleep_time=0.1)
|
||||
timeout = utils.wait_step(
|
||||
'replica rowcache updated, got %s expected %s' %
|
||||
(str(result), str(expected_result)), timeout,
|
||||
sleep_time=0.1)
|
||||
|
||||
def test_outofband_statements(self):
|
||||
start = self.replica_vars()['InternalErrors'].get('Invalidation', 0)
|
||||
|
||||
# Test update statement
|
||||
self._exec_vt_txn("insert into vt_insert_test (id, msg) values (1000000, 'start')")
|
||||
self._exec_vt_txn(
|
||||
"insert into vt_insert_test (id, msg) values (1000000, 'start')")
|
||||
self._wait_for_replica()
|
||||
self._wait_for_value([['1000000', 'start']])
|
||||
utils.mysql_write_query(master_tablet.tablet_uid,
|
||||
'vt_test_keyspace',
|
||||
"update vt_insert_test set msg = 'foo' where id = 1000000")
|
||||
utils.mysql_write_query(
|
||||
master_tablet.tablet_uid,
|
||||
'vt_test_keyspace',
|
||||
"update vt_insert_test set msg = 'foo' where id = 1000000")
|
||||
self._wait_for_replica()
|
||||
self._wait_for_value([['1000000', 'foo']])
|
||||
end1 = self.replica_vars()['InternalErrors'].get('Invalidation', 0)
|
||||
|
@ -179,9 +186,10 @@ class RowCacheInvalidator(unittest.TestCase):
|
|||
self.assertEqual(end1, end2)
|
||||
|
||||
# Test insert statement
|
||||
utils.mysql_write_query(master_tablet.tablet_uid,
|
||||
'vt_test_keyspace',
|
||||
"insert into vt_insert_test (id, msg) values(1000000, 'bar')")
|
||||
utils.mysql_write_query(
|
||||
master_tablet.tablet_uid,
|
||||
'vt_test_keyspace',
|
||||
"insert into vt_insert_test (id, msg) values(1000000, 'bar')")
|
||||
self._wait_for_replica()
|
||||
self._wait_for_value([['1000000', 'bar']])
|
||||
end3 = self.replica_vars()['InternalErrors'].get('Invalidation', 0)
|
||||
|
@ -190,7 +198,7 @@ class RowCacheInvalidator(unittest.TestCase):
|
|||
# Test unrecognized statement
|
||||
utils.mysql_query(master_tablet.tablet_uid,
|
||||
'vt_test_keyspace',
|
||||
'truncate table vt_insert_test')
|
||||
'truncate table vt_insert_test')
|
||||
self._wait_for_replica()
|
||||
timeout = 10
|
||||
while True:
|
||||
|
@ -240,7 +248,7 @@ class RowCacheInvalidator(unittest.TestCase):
|
|||
|
||||
# check and display some stats
|
||||
invalidatorStats = self.replica_vars()
|
||||
logging.debug('invalidatorStats %s' %
|
||||
logging.debug('invalidatorStats %s',
|
||||
invalidatorStats['RowcacheInvalidatorPosition'])
|
||||
self.assertEqual(invalidatorStats['RowcacheInvalidatorState'], 'Running',
|
||||
'Row-cache invalidator should be enabled')
|
||||
|
@ -278,19 +286,26 @@ class RowCacheInvalidator(unittest.TestCase):
|
|||
invStats_after = self.replica_vars()
|
||||
if invStats_after['RowcacheInvalidatorState'] == 'Stopped':
|
||||
break
|
||||
timeout = utils.wait_step('RowcacheInvalidatorState, got %s expecting Stopped' % invStats_after['RowcacheInvalidatorState'], timeout, sleep_time=0.1)
|
||||
timeout = utils.wait_step(
|
||||
'RowcacheInvalidatorState, got %s expecting Stopped' %
|
||||
invStats_after['RowcacheInvalidatorState'], timeout, sleep_time=0.1)
|
||||
|
||||
# check all data is right
|
||||
inv_after = self.replica_stats()['Totals']['Invalidations']
|
||||
invStats_after = self.replica_vars()
|
||||
logging.debug('Tablet Replica->Spare\n\tBefore: Invalidations: %d InvalidatorStats %s\n\tAfter: Invalidations: %d InvalidatorStats %s' % (inv_before, invStats_before['RowcacheInvalidatorPosition'], inv_after, invStats_after['RowcacheInvalidatorPosition']))
|
||||
logging.debug(
|
||||
'Tablet Replica->Spare\n\tBefore: Invalidations: %d InvalidatorStats '
|
||||
'%s\n\tAfter: Invalidations: %d InvalidatorStats %s',
|
||||
inv_before, invStats_before['RowcacheInvalidatorPosition'],
|
||||
inv_after, invStats_after['RowcacheInvalidatorPosition'])
|
||||
self.assertEqual(inv_after, 0,
|
||||
'Row-cache invalid. should be disabled, no invalidations')
|
||||
self.assertEqual(invStats_after['RowcacheInvalidatorState'], 'Stopped',
|
||||
'Row-cache invalidator should be disabled')
|
||||
|
||||
# and restore the type
|
||||
utils.run_vtctl(['ChangeSlaveType', replica_tablet.tablet_alias, 'replica'])
|
||||
utils.run_vtctl(
|
||||
['ChangeSlaveType', replica_tablet.tablet_alias, 'replica'])
|
||||
|
||||
def _exec_vt_txn(self, query):
|
||||
master_tablet.execute(query, auto_log=False)
|
||||
|
|
|
@ -18,12 +18,13 @@ shard_1_replica1 = tablet.Tablet()
|
|||
shard_2_master = tablet.Tablet()
|
||||
shard_2_replica1 = tablet.Tablet()
|
||||
# shard_2 tablets are not used by all tests and not included by default.
|
||||
tablets = [shard_0_master, shard_0_replica1, shard_0_replica2, shard_0_rdonly, shard_0_backup,
|
||||
shard_1_master, shard_1_replica1]
|
||||
tablets = [shard_0_master, shard_0_replica1, shard_0_replica2, shard_0_rdonly,
|
||||
shard_0_backup, shard_1_master, shard_1_replica1]
|
||||
tablets_shard2 = [shard_2_master, shard_2_replica1]
|
||||
test_keyspace = 'test_keyspace'
|
||||
db_name = 'vt_' + test_keyspace
|
||||
|
||||
|
||||
def setUpModule():
|
||||
try:
|
||||
environment.topo_server().setup()
|
||||
|
@ -32,12 +33,12 @@ def setUpModule():
|
|||
|
||||
utils.run_vtctl(['CreateKeyspace', test_keyspace])
|
||||
|
||||
shard_0_master.init_tablet( 'master', test_keyspace, '0')
|
||||
shard_0_master.init_tablet('master', test_keyspace, '0')
|
||||
shard_0_replica1.init_tablet('replica', test_keyspace, '0')
|
||||
shard_0_replica2.init_tablet('replica', test_keyspace, '0')
|
||||
shard_0_rdonly.init_tablet( 'rdonly', test_keyspace, '0')
|
||||
shard_0_backup.init_tablet( 'backup', test_keyspace, '0')
|
||||
shard_1_master.init_tablet( 'master', test_keyspace, '1')
|
||||
shard_0_rdonly.init_tablet('rdonly', test_keyspace, '0')
|
||||
shard_0_backup.init_tablet('backup', test_keyspace, '0')
|
||||
shard_1_master.init_tablet('master', test_keyspace, '1')
|
||||
shard_1_replica1.init_tablet('replica', test_keyspace, '1')
|
||||
|
||||
utils.run_vtctl(['RebuildKeyspaceGraph', test_keyspace], auto_log=True)
|
||||
|
@ -77,15 +78,17 @@ def setUpModule():
|
|||
try:
|
||||
tearDownModule()
|
||||
except Exception as e:
|
||||
logging.exception("Tearing down a failed setUpModule() failed: %s", e)
|
||||
logging.exception('Tearing down a failed setUpModule() failed: %s', e)
|
||||
raise setup_exception
|
||||
|
||||
|
||||
def _init_mysql(tablets):
|
||||
setup_procs = []
|
||||
for t in tablets:
|
||||
setup_procs.append(t.init_mysql())
|
||||
utils.wait_procs(setup_procs)
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
if utils.options.skip_teardown:
|
||||
return
|
||||
|
@ -104,7 +107,9 @@ def tearDownModule():
|
|||
for t in tablets:
|
||||
t.remove_tree()
|
||||
|
||||
|
||||
class TestSchema(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
for t in tablets:
|
||||
t.create_db(db_name)
|
||||
|
@ -114,7 +119,8 @@ class TestSchema(unittest.TestCase):
|
|||
# databases without restarting the tablets.
|
||||
for t in tablets:
|
||||
t.clean_dbs()
|
||||
# Tablets from shard 2 are always started during the test. Shut them down now.
|
||||
# Tablets from shard 2 are always started during the test. Shut
|
||||
# them down now.
|
||||
if shard_2_master in tablets:
|
||||
for t in tablets_shard2:
|
||||
t.scrap(force=True, skip_rebuild=True)
|
||||
|
@ -130,9 +136,11 @@ class TestSchema(unittest.TestCase):
|
|||
(tablet.tablet_alias, expectedCount, str(tables)))
|
||||
|
||||
def _check_db_not_created(self, tablet):
|
||||
# Broadly catch all exceptions, since the exception being raised is internal to MySQL.
|
||||
# We're strictly checking the error message though, so should be fine.
|
||||
with self.assertRaisesRegexp(Exception, '(1049, "Unknown database \'%s\'")' % db_name):
|
||||
# Broadly catch all exceptions, since the exception being raised
|
||||
# is internal to MySQL. We're strictly checking the error message
|
||||
# though, so should be fine.
|
||||
with self.assertRaisesRegexp(
|
||||
Exception, '(1049, "Unknown database \'%s\'")' % db_name):
|
||||
tablet.mquery(db_name, 'show tables')
|
||||
|
||||
def _apply_schema(self, keyspace, sql):
|
||||
|
@ -145,26 +153,27 @@ class TestSchema(unittest.TestCase):
|
|||
tablet_alias])
|
||||
|
||||
def _create_test_table_sql(self, table):
|
||||
return 'CREATE TABLE %s ( \
|
||||
`id` BIGINT(20) not NULL, \
|
||||
`msg` varchar(64), \
|
||||
PRIMARY KEY (`id`) \
|
||||
) ENGINE=InnoDB' % table
|
||||
return (
|
||||
'CREATE TABLE %s (\n'
|
||||
'`id` BIGINT(20) not NULL,\n'
|
||||
'`msg` varchar(64),\n'
|
||||
'PRIMARY KEY (`id`)\n'
|
||||
') ENGINE=InnoDB') % table
|
||||
|
||||
def _alter_test_table_sql(self, table, index_column_name):
|
||||
return 'ALTER TABLE %s \
|
||||
ADD COLUMN new_id bigint(20) NOT NULL AUTO_INCREMENT FIRST, \
|
||||
DROP PRIMARY KEY, \
|
||||
ADD PRIMARY KEY (new_id), \
|
||||
ADD INDEX idx_column(%s) \
|
||||
' % (table, index_column_name)
|
||||
return (
|
||||
'ALTER TABLE %s\n'
|
||||
'ADD COLUMN new_id bigint(20) NOT NULL AUTO_INCREMENT FIRST,\n'
|
||||
'DROP PRIMARY KEY,\n'
|
||||
'ADD PRIMARY KEY (new_id),\n'
|
||||
'ADD INDEX idx_column(%s)\n') % (table, index_column_name)
|
||||
|
||||
def _apply_initial_schema(self):
|
||||
schema_changes = ';'.join([
|
||||
self._create_test_table_sql('vt_select_test01'),
|
||||
self._create_test_table_sql('vt_select_test02'),
|
||||
self._create_test_table_sql('vt_select_test03'),
|
||||
self._create_test_table_sql('vt_select_test04')])
|
||||
self._create_test_table_sql('vt_select_test01'),
|
||||
self._create_test_table_sql('vt_select_test02'),
|
||||
self._create_test_table_sql('vt_select_test03'),
|
||||
self._create_test_table_sql('vt_select_test04')])
|
||||
|
||||
# apply schema changes to the test keyspace
|
||||
self._apply_schema(test_keyspace, schema_changes)
|
||||
|
@ -183,7 +192,8 @@ class TestSchema(unittest.TestCase):
|
|||
def test_schema_changes(self):
|
||||
self._apply_initial_schema()
|
||||
|
||||
self._apply_schema(test_keyspace, self._alter_test_table_sql('vt_select_test03', 'msg'))
|
||||
self._apply_schema(
|
||||
test_keyspace, self._alter_test_table_sql('vt_select_test03', 'msg'))
|
||||
|
||||
shard_0_schema = self._get_schema(shard_0_master.tablet_alias)
|
||||
shard_1_schema = self._get_schema(shard_1_master.tablet_alias)
|
||||
|
@ -193,18 +203,19 @@ class TestSchema(unittest.TestCase):
|
|||
|
||||
# test schema changes
|
||||
os.makedirs(os.path.join(utils.vtctld.schema_change_dir, test_keyspace))
|
||||
input_path = os.path.join(utils.vtctld.schema_change_dir, test_keyspace, "input")
|
||||
input_path = os.path.join(
|
||||
utils.vtctld.schema_change_dir, test_keyspace, 'input')
|
||||
os.makedirs(input_path)
|
||||
sql_path = os.path.join(input_path, "create_test_table_x.sql")
|
||||
sql_path = os.path.join(input_path, 'create_test_table_x.sql')
|
||||
with open(sql_path, 'w') as handler:
|
||||
handler.write("create table test_table_x (id int)")
|
||||
handler.write('create table test_table_x (id int)')
|
||||
|
||||
timeout = 10
|
||||
# wait until this sql file being consumed by autoschema
|
||||
while os.path.isfile(sql_path):
|
||||
timeout = utils.wait_step('waiting for vtctld to pick up schema changes',
|
||||
timeout,
|
||||
sleep_time=0.2)
|
||||
timeout = utils.wait_step(
|
||||
'waiting for vtctld to pick up schema changes',
|
||||
timeout, sleep_time=0.2)
|
||||
|
||||
# check number of tables
|
||||
self._check_tables(shard_0_master, 5)
|
||||
|
@ -217,7 +228,7 @@ class TestSchema(unittest.TestCase):
|
|||
# Include shard2 tablets for tearDown.
|
||||
tablets.extend(tablets_shard2)
|
||||
|
||||
shard_2_master.init_tablet( 'master', 'test_keyspace', '2')
|
||||
shard_2_master.init_tablet('master', 'test_keyspace', '2')
|
||||
shard_2_replica1.init_tablet('replica', 'test_keyspace', '2')
|
||||
|
||||
# We intentionally don't want to create a db on these tablets.
|
||||
|
@ -244,8 +255,8 @@ class TestSchema(unittest.TestCase):
|
|||
|
||||
self._setUp_tablets_shard_2()
|
||||
|
||||
# CopySchemaShard is responsible for creating the db; one shouldn't exist before
|
||||
# the command is run.
|
||||
# CopySchemaShard is responsible for creating the db; one
|
||||
# shouldn't exist before the command is run.
|
||||
self._check_db_not_created(shard_2_master)
|
||||
self._check_db_not_created(shard_2_replica1)
|
||||
|
||||
|
|
|
@ -19,11 +19,13 @@ shard_0_slave = tablet.Tablet()
|
|||
|
||||
cert_dir = environment.tmproot + "/certs"
|
||||
|
||||
|
||||
def openssl(cmd):
|
||||
result = subprocess.call(["openssl"] + cmd, stderr=utils.devnull)
|
||||
if result != 0:
|
||||
raise utils.TestError("OpenSSL command failed: %s" % " ".join(cmd))
|
||||
|
||||
|
||||
def setUpModule():
|
||||
try:
|
||||
environment.topo_server().setup()
|
||||
|
@ -36,7 +38,7 @@ def setUpModule():
|
|||
ca_cert = cert_dir + "/ca-cert.pem"
|
||||
openssl(["genrsa", "-out", cert_dir + "/ca-key.pem"])
|
||||
ca_config = cert_dir + "/ca.config"
|
||||
with open(ca_config, 'w') as fd:
|
||||
with open(ca_config, "w") as fd:
|
||||
fd.write("""
|
||||
[ req ]
|
||||
default_bits = 1024
|
||||
|
@ -66,7 +68,7 @@ def setUpModule():
|
|||
server_cert = cert_dir + "/server-cert.pem"
|
||||
server_req = cert_dir + "/server-req.pem"
|
||||
server_config = cert_dir + "/server.config"
|
||||
with open(server_config, 'w') as fd:
|
||||
with open(server_config, "w") as fd:
|
||||
fd.write("""
|
||||
[ req ]
|
||||
default_bits = 1024
|
||||
|
@ -103,7 +105,7 @@ def setUpModule():
|
|||
client_cert = cert_dir + "/client-cert.pem"
|
||||
client_req = cert_dir + "/client-req.pem"
|
||||
client_config = cert_dir + "/client.config"
|
||||
with open(client_config, 'w') as fd:
|
||||
with open(client_config, "w") as fd:
|
||||
fd.write("""
|
||||
[ req ]
|
||||
default_bits = 1024
|
||||
|
@ -148,20 +150,21 @@ def setUpModule():
|
|||
]
|
||||
utils.wait_procs(setup_procs)
|
||||
|
||||
utils.run_vtctl('CreateKeyspace test_keyspace')
|
||||
utils.run_vtctl("CreateKeyspace test_keyspace")
|
||||
|
||||
shard_0_master.init_tablet('master', 'test_keyspace', '0')
|
||||
shard_0_slave.init_tablet('replica', 'test_keyspace', '0')
|
||||
shard_0_master.init_tablet("master", "test_keyspace", "0")
|
||||
shard_0_slave.init_tablet("replica", "test_keyspace", "0")
|
||||
|
||||
utils.run_vtctl('RebuildKeyspaceGraph test_keyspace', auto_log=True)
|
||||
utils.run_vtctl("RebuildKeyspaceGraph test_keyspace", auto_log=True)
|
||||
|
||||
# create databases so vttablet can start behaving normally
|
||||
shard_0_master.create_db('vt_test_keyspace')
|
||||
shard_0_slave.create_db('vt_test_keyspace')
|
||||
shard_0_master.create_db("vt_test_keyspace")
|
||||
shard_0_slave.create_db("vt_test_keyspace")
|
||||
except:
|
||||
tearDownModule()
|
||||
raise
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
if utils.options.skip_teardown:
|
||||
return
|
||||
|
@ -182,6 +185,7 @@ def tearDownModule():
|
|||
shard_0_master.remove_tree()
|
||||
shard_0_slave.remove_tree()
|
||||
|
||||
|
||||
class TestSecure(unittest.TestCase):
|
||||
"""This test makes sure that we can use SSL replication with Vitess.
|
||||
"""
|
||||
|
@ -190,17 +194,17 @@ class TestSecure(unittest.TestCase):
|
|||
# start the tablets
|
||||
shard_0_master.start_vttablet()
|
||||
shard_0_slave.start_vttablet(repl_extra_flags={
|
||||
'flags': "2048",
|
||||
'ssl-ca': cert_dir + "/ca-cert.pem",
|
||||
'ssl-cert': cert_dir + "/client-cert.pem",
|
||||
'ssl-key': cert_dir + "/client-key.pem",
|
||||
"flags": "2048",
|
||||
"ssl-ca": cert_dir + "/ca-cert.pem",
|
||||
"ssl-cert": cert_dir + "/client-cert.pem",
|
||||
"ssl-key": cert_dir + "/client-key.pem",
|
||||
})
|
||||
|
||||
# Reparent using SSL (this will also check replication works)
|
||||
for t in [shard_0_master, shard_0_slave]:
|
||||
t.reset_replication()
|
||||
utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
|
||||
utils.run_vtctl(["InitShardMaster", "test_keyspace/0",
|
||||
shard_0_master.tablet_alias], auto_log=True)
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
utils.main()
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
"""Tests a sharded setup works and routes queries correctly.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import unittest
|
||||
|
||||
import environment
|
||||
|
@ -16,6 +15,7 @@ shard_0_replica = tablet.Tablet()
|
|||
shard_1_master = tablet.Tablet()
|
||||
shard_1_replica = tablet.Tablet()
|
||||
|
||||
|
||||
def setUpModule():
|
||||
try:
|
||||
environment.topo_server().setup()
|
||||
|
@ -31,6 +31,7 @@ def setUpModule():
|
|||
tearDownModule()
|
||||
raise
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
if utils.options.skip_teardown:
|
||||
return
|
||||
|
@ -67,13 +68,14 @@ id bigint not null,
|
|||
primary key (id)
|
||||
) Engine=InnoDB'''
|
||||
|
||||
|
||||
class TestSharded(unittest.TestCase):
|
||||
|
||||
def test_sharding(self):
|
||||
|
||||
shard_0_master.init_tablet( 'master', 'test_keyspace', '-80')
|
||||
shard_0_master.init_tablet('master', 'test_keyspace', '-80')
|
||||
shard_0_replica.init_tablet('replica', 'test_keyspace', '-80')
|
||||
shard_1_master.init_tablet( 'master', 'test_keyspace', '80-')
|
||||
shard_1_master.init_tablet('master', 'test_keyspace', '80-')
|
||||
shard_1_replica.init_tablet('replica', 'test_keyspace', '80-')
|
||||
|
||||
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
|
||||
|
@ -96,10 +98,12 @@ class TestSharded(unittest.TestCase):
|
|||
create_vt_select_test.replace('\n', ''), write=True)
|
||||
|
||||
# apply the schema on the second shard.
|
||||
shard_1_master.mquery('vt_test_keyspace',
|
||||
create_vt_select_test_reverse.replace('\n', ''), write=True)
|
||||
shard_1_replica.mquery('vt_test_keyspace',
|
||||
create_vt_select_test_reverse.replace('\n', ''), write=True)
|
||||
shard_1_master.mquery(
|
||||
'vt_test_keyspace',
|
||||
create_vt_select_test_reverse.replace('\n', ''), write=True)
|
||||
shard_1_replica.mquery(
|
||||
'vt_test_keyspace',
|
||||
create_vt_select_test_reverse.replace('\n', ''), write=True)
|
||||
|
||||
for t in [shard_0_master, shard_0_replica, shard_1_master, shard_1_replica]:
|
||||
utils.run_vtctl(['ReloadSchema', t.tablet_alias])
|
||||
|
@ -118,16 +122,23 @@ class TestSharded(unittest.TestCase):
|
|||
# FIXME(alainjobart) these values don't match the shard map
|
||||
utils.run_vtctl(['SetReadWrite', shard_0_master.tablet_alias])
|
||||
utils.run_vtctl(['SetReadWrite', shard_1_master.tablet_alias])
|
||||
shard_0_master.mquery('vt_test_keyspace', "insert into vt_select_test (id, msg) values (1, 'test 1')", write=True)
|
||||
shard_1_master.mquery('vt_test_keyspace', "insert into vt_select_test (id, msg) values (10, 'test 10')", write=True)
|
||||
shard_0_master.mquery(
|
||||
'vt_test_keyspace',
|
||||
"insert into vt_select_test (id, msg) values (1, 'test 1')",
|
||||
write=True)
|
||||
shard_1_master.mquery(
|
||||
'vt_test_keyspace',
|
||||
"insert into vt_select_test (id, msg) values (10, 'test 10')",
|
||||
write=True)
|
||||
|
||||
utils.validate_topology(ping_tablets=True)
|
||||
|
||||
utils.pause('Before the sql scatter query')
|
||||
|
||||
# make sure the '1' value was written on first shard
|
||||
rows = shard_0_master.mquery('vt_test_keyspace', 'select id, msg from vt_select_test order by id')
|
||||
self.assertEqual(rows, ((1, 'test 1'), ),
|
||||
rows = shard_0_master.mquery(
|
||||
'vt_test_keyspace', 'select id, msg from vt_select_test order by id')
|
||||
self.assertEqual(rows, ((1, 'test 1'),),
|
||||
'wrong mysql_query output: %s' % str(rows))
|
||||
|
||||
utils.pause('After db writes')
|
||||
|
@ -138,8 +149,10 @@ class TestSharded(unittest.TestCase):
|
|||
utils.run_vtctl(['ValidateSchemaShard', 'test_keyspace/80-'])
|
||||
out, err = utils.run_vtctl(['ValidateSchemaKeyspace', 'test_keyspace'],
|
||||
trap_output=True, raise_on_error=False)
|
||||
if 'test_nj-0000062344 and test_nj-0000062346 disagree on schema for table vt_select_test:\nCREATE TABLE' not in err or \
|
||||
'test_nj-0000062344 and test_nj-0000062347 disagree on schema for table vt_select_test:\nCREATE TABLE' not in err:
|
||||
if ('test_nj-0000062344 and test_nj-0000062346 disagree on schema '
|
||||
'for table vt_select_test:\nCREATE TABLE' not in err or
|
||||
'test_nj-0000062344 and test_nj-0000062347 disagree on schema '
|
||||
'for table vt_select_test:\nCREATE TABLE' not in err):
|
||||
self.fail('wrong ValidateSchemaKeyspace output: ' + err)
|
||||
|
||||
# validate versions
|
||||
|
@ -155,14 +168,18 @@ class TestSharded(unittest.TestCase):
|
|||
auto_log=True)
|
||||
|
||||
if environment.topo_server().flavor() == 'zookeeper':
|
||||
# and create zkns on this complex keyspace, make sure a few files are created
|
||||
# and create zkns on this complex keyspace, make sure a few
|
||||
# files are created
|
||||
utils.run_vtctl(['ExportZknsForKeyspace', 'test_keyspace'])
|
||||
out, err = utils.run(environment.binary_argstr('zk')+' ls -R /zk/test_nj/zk?s/vt/test_keysp*', trap_output=True)
|
||||
out, err = utils.run(
|
||||
environment.binary_argstr('zk') +
|
||||
' ls -R /zk/test_nj/zk?s/vt/test_keysp*', trap_output=True)
|
||||
lines = out.splitlines()
|
||||
for base in ['-80', '80-']:
|
||||
for db_type in ['master', 'replica']:
|
||||
for sub_path in ['', '.vdns', '/0', '/vt.vdns']:
|
||||
expected = '/zk/test_nj/zkns/vt/test_keyspace/' + base + '/' + db_type + sub_path
|
||||
expected = ('/zk/test_nj/zkns/vt/test_keyspace/' + base + '/' +
|
||||
db_type + sub_path)
|
||||
if expected not in lines:
|
||||
self.fail('missing zkns part:\n%s\nin:%s' %(expected, out))
|
||||
|
||||
|
@ -171,10 +188,10 @@ class TestSharded(unittest.TestCase):
|
|||
sql = 'select id, msg from vt_select_test order by id'
|
||||
|
||||
qr = shard_0_master.execute(sql)
|
||||
self.assertEqual(qr['Rows'], [['1', 'test 1'], ])
|
||||
self.assertEqual(qr['Rows'], [['1', 'test 1'],])
|
||||
|
||||
qr = shard_1_master.execute(sql)
|
||||
self.assertEqual(qr['Rows'], [['10', 'test 10'], ])
|
||||
self.assertEqual(qr['Rows'], [['10', 'test 10'],])
|
||||
|
||||
_, stderr = utils.run_vtctl(['VtTabletExecute',
|
||||
'-keyspace', 'test_keyspace',
|
||||
|
|
198
test/tablet.py
198
test/tablet.py
|
@ -7,7 +7,7 @@ import time
|
|||
import urllib2
|
||||
import warnings
|
||||
# Dropping a table inexplicably produces a warning despite
|
||||
# the "IF EXISTS" clause. Squelch these warnings.
|
||||
# the 'IF EXISTS' clause. Squelch these warnings.
|
||||
warnings.simplefilter('ignore')
|
||||
|
||||
import MySQLdb
|
||||
|
@ -26,13 +26,15 @@ tablet_cell_map = {
|
|||
31981: 'ny',
|
||||
}
|
||||
|
||||
|
||||
def get_backup_storage_flags():
|
||||
return ['-backup_storage_implementation', 'file',
|
||||
'-file_backup_storage_root',
|
||||
os.path.join(environment.tmproot, 'backupstorage')]
|
||||
|
||||
|
||||
def get_all_extra_my_cnf(extra_my_cnf):
|
||||
all_extra_my_cnf = [environment.vttop + "/config/mycnf/default-fast.cnf"]
|
||||
all_extra_my_cnf = [environment.vttop + '/config/mycnf/default-fast.cnf']
|
||||
flavor_my_cnf = mysql_flavor().extra_my_cnf()
|
||||
if flavor_my_cnf:
|
||||
all_extra_my_cnf.append(flavor_my_cnf)
|
||||
|
@ -47,7 +49,8 @@ class Tablet(object):
|
|||
To use it for vttablet, you need to use init_tablet and/or
|
||||
start_vttablet. For vtocc, you can just call start_vtocc.
|
||||
If you use it to start as vtocc, many of the support functions
|
||||
that are meant for vttablet will not work."""
|
||||
that are meant for vttablet will not work.
|
||||
"""
|
||||
default_uid = 62344
|
||||
seq = 0
|
||||
tablets_running = 0
|
||||
|
@ -72,20 +75,20 @@ class Tablet(object):
|
|||
|
||||
# this will eventually be coming from the proto3
|
||||
tablet_type_value = {
|
||||
"UNKNOWN": 0,
|
||||
"IDLE": 1,
|
||||
"MASTER": 2,
|
||||
"REPLICA": 3,
|
||||
"RDONLY": 4,
|
||||
"BATCH": 4,
|
||||
"SPARE": 5,
|
||||
"EXPERIMENTAL": 6,
|
||||
"SCHEMA_UPGRADE": 7,
|
||||
"BACKUP": 8,
|
||||
"RESTORE": 9,
|
||||
"WORKER": 10,
|
||||
"SCRAP": 11,
|
||||
}
|
||||
'UNKNOWN': 0,
|
||||
'IDLE': 1,
|
||||
'MASTER': 2,
|
||||
'REPLICA': 3,
|
||||
'RDONLY': 4,
|
||||
'BATCH': 4,
|
||||
'SPARE': 5,
|
||||
'EXPERIMENTAL': 6,
|
||||
'SCHEMA_UPGRADE': 7,
|
||||
'BACKUP': 8,
|
||||
'RESTORE': 9,
|
||||
'WORKER': 10,
|
||||
'SCRAP': 11,
|
||||
}
|
||||
|
||||
def __init__(self, tablet_uid=None, port=None, mysql_port=None, cell=None,
|
||||
use_mysqlctld=False):
|
||||
|
@ -114,9 +117,9 @@ class Tablet(object):
|
|||
def update_stream_python_endpoint(self):
|
||||
protocol = protocols_flavor().binlog_player_python_protocol()
|
||||
port = self.port
|
||||
if protocol == "gorpc":
|
||||
if protocol == 'gorpc':
|
||||
from vtdb import gorpc_update_stream
|
||||
elif protocol == "grpc":
|
||||
elif protocol == 'grpc':
|
||||
# import the grpc update stream client implementation, change the port
|
||||
from vtdb import grpc_update_stream
|
||||
port = self.grpc_port
|
||||
|
@ -126,12 +129,13 @@ class Tablet(object):
|
|||
extra_env = {}
|
||||
all_extra_my_cnf = get_all_extra_my_cnf(extra_my_cnf)
|
||||
if all_extra_my_cnf:
|
||||
extra_env['EXTRA_MY_CNF'] = ':'.join(all_extra_my_cnf)
|
||||
extra_env['EXTRA_MY_CNF'] = ':'.join(all_extra_my_cnf)
|
||||
args = environment.binary_args('mysqlctl') + [
|
||||
'-log_dir', environment.vtlogroot,
|
||||
'-tablet_uid', str(self.tablet_uid)]
|
||||
if self.use_mysqlctld:
|
||||
args.extend(['-mysqlctl_socket', os.path.join(self.tablet_dir, 'mysqlctl.sock')])
|
||||
args.extend(
|
||||
['-mysqlctl_socket', os.path.join(self.tablet_dir, 'mysqlctl.sock')])
|
||||
if with_ports:
|
||||
args.extend(['-port', str(self.port),
|
||||
'-mysql_port', str(self.mysql_port)])
|
||||
|
@ -145,7 +149,7 @@ class Tablet(object):
|
|||
extra_env = {}
|
||||
all_extra_my_cnf = get_all_extra_my_cnf(extra_my_cnf)
|
||||
if all_extra_my_cnf:
|
||||
extra_env['EXTRA_MY_CNF'] = ':'.join(all_extra_my_cnf)
|
||||
extra_env['EXTRA_MY_CNF'] = ':'.join(all_extra_my_cnf)
|
||||
args = environment.binary_args('mysqlctld') + [
|
||||
'-log_dir', environment.vtlogroot,
|
||||
'-tablet_uid', str(self.tablet_uid),
|
||||
|
@ -203,7 +207,10 @@ class Tablet(object):
|
|||
return conn, MySQLdb.cursors.DictCursor(conn)
|
||||
|
||||
# Query the MySQL instance directly
|
||||
def mquery(self, dbname, query, write=False, user='vt_dba', conn_params={}):
|
||||
def mquery(
|
||||
self, dbname, query, write=False, user='vt_dba', conn_params=None):
|
||||
if conn_params is None:
|
||||
conn_params = {}
|
||||
conn, cursor = self.connect(dbname, user=user, **conn_params)
|
||||
if write:
|
||||
conn.begin()
|
||||
|
@ -211,7 +218,7 @@ class Tablet(object):
|
|||
query = [query]
|
||||
|
||||
for q in query:
|
||||
# logging.debug("mysql(%s,%s): %s", self.tablet_uid, dbname, q)
|
||||
# logging.debug('mysql(%s,%s): %s', self.tablet_uid, dbname, q)
|
||||
cursor.execute(q)
|
||||
|
||||
if write:
|
||||
|
@ -342,7 +349,8 @@ class Tablet(object):
|
|||
if start:
|
||||
if not wait_for_start:
|
||||
expected_state = None
|
||||
elif tablet_type == 'master' or tablet_type == 'replica' or tablet_type == 'rdonly' or tablet_type == 'batch':
|
||||
elif (tablet_type == 'master' or tablet_type == 'replica' or
|
||||
tablet_type == 'rdonly' or tablet_type == 'batch'):
|
||||
expected_state = 'SERVING'
|
||||
else:
|
||||
expected_state = 'NOT_SERVING'
|
||||
|
@ -360,21 +368,23 @@ class Tablet(object):
|
|||
return '%s/vt_%010d' % (environment.vtdataroot, self.tablet_uid)
|
||||
|
||||
def grpc_enabled(self):
|
||||
return protocols_flavor().tabletconn_protocol() == 'grpc' or \
|
||||
protocols_flavor().tablet_manager_protocol() == 'grpc' or \
|
||||
protocols_flavor().binlog_player_protocol() == 'grpc'
|
||||
return (
|
||||
protocols_flavor().tabletconn_protocol() == 'grpc' or
|
||||
protocols_flavor().tablet_manager_protocol() == 'grpc' or
|
||||
protocols_flavor().binlog_player_protocol() == 'grpc')
|
||||
|
||||
def flush(self):
|
||||
utils.curl('http://localhost:%s%s' %
|
||||
(self.port, environment.flush_logs_url),
|
||||
stderr=utils.devnull, stdout=utils.devnull)
|
||||
|
||||
def _start_prog(self, binary, port=None, auth=False, memcache=False,
|
||||
wait_for_state='SERVING', filecustomrules=None, zkcustomrules=None,
|
||||
schema_override=None,
|
||||
repl_extra_flags={}, table_acl_config=None,
|
||||
lameduck_period=None, security_policy=None,
|
||||
extra_args=None, extra_env=None):
|
||||
def _start_prog(
|
||||
self, binary, port=None, auth=False, memcache=False,
|
||||
wait_for_state='SERVING', filecustomrules=None, zkcustomrules=None,
|
||||
schema_override=None,
|
||||
repl_extra_flags={}, table_acl_config=None,
|
||||
lameduck_period=None, security_policy=None,
|
||||
extra_args=None, extra_env=None):
|
||||
environment.prog_compile(binary)
|
||||
args = environment.binary_args(binary)
|
||||
args.extend(['-port', '%s' % (port or self.port),
|
||||
|
@ -408,7 +418,7 @@ class Tablet(object):
|
|||
args.extend(['-queryserver-config-strict-table-acl'])
|
||||
|
||||
if protocols_flavor().service_map():
|
||||
args.extend(['-service_map', ",".join(protocols_flavor().service_map())])
|
||||
args.extend(['-service_map', ','.join(protocols_flavor().service_map())])
|
||||
if self.grpc_enabled():
|
||||
args.extend(['-grpc_port', str(self.grpc_port)])
|
||||
if lameduck_period:
|
||||
|
@ -419,15 +429,21 @@ class Tablet(object):
|
|||
args.extend(extra_args)
|
||||
|
||||
args.extend(['-enable-autocommit'])
|
||||
stderr_fd = open(os.path.join(environment.vtlogroot, '%s-%d.stderr' % (binary, self.tablet_uid)), 'w')
|
||||
stderr_fd = open(
|
||||
os.path.join(environment.vtlogroot, '%s-%d.stderr' %
|
||||
(binary, self.tablet_uid)), 'w')
|
||||
# increment count only the first time
|
||||
if not self.proc:
|
||||
Tablet.tablets_running += 1
|
||||
self.proc = utils.run_bg(args, stderr=stderr_fd, extra_env=extra_env)
|
||||
|
||||
log_message = "Started vttablet: %s (%s) with pid: %s - Log files: %s/vttablet.*.{INFO,WARNING,ERROR,FATAL}.*.%s" % \
|
||||
(self.tablet_uid, self.tablet_alias, self.proc.pid, environment.vtlogroot, self.proc.pid)
|
||||
# This may race with the stderr output from the process (though that's usually empty).
|
||||
log_message = (
|
||||
'Started vttablet: %s (%s) with pid: %s - Log files: '
|
||||
'%s/vttablet.*.{INFO,WARNING,ERROR,FATAL}.*.%s' %
|
||||
(self.tablet_uid, self.tablet_alias, self.proc.pid,
|
||||
environment.vtlogroot, self.proc.pid))
|
||||
# This may race with the stderr output from the process (though
|
||||
# that's usually empty).
|
||||
stderr_fd.write(log_message + '\n')
|
||||
stderr_fd.close()
|
||||
logging.debug(log_message)
|
||||
|
@ -441,22 +457,26 @@ class Tablet(object):
|
|||
|
||||
return self.proc
|
||||
|
||||
def start_vttablet(self, port=None, auth=False, memcache=False,
|
||||
wait_for_state='SERVING', filecustomrules=None, zkcustomrules=None,
|
||||
schema_override=None,
|
||||
repl_extra_flags={}, table_acl_config=None,
|
||||
lameduck_period=None, security_policy=None,
|
||||
target_tablet_type=None, full_mycnf_args=False,
|
||||
extra_args=None, extra_env=None, include_mysql_port=True,
|
||||
init_tablet_type=None, init_keyspace=None,
|
||||
init_shard=None, init_db_name_override=None,
|
||||
supports_backups=False):
|
||||
def start_vttablet(
|
||||
self, port=None, auth=False, memcache=False,
|
||||
wait_for_state='SERVING', filecustomrules=None, zkcustomrules=None,
|
||||
schema_override=None,
|
||||
repl_extra_flags=None, table_acl_config=None,
|
||||
lameduck_period=None, security_policy=None,
|
||||
target_tablet_type=None, full_mycnf_args=False,
|
||||
extra_args=None, extra_env=None, include_mysql_port=True,
|
||||
init_tablet_type=None, init_keyspace=None,
|
||||
init_shard=None, init_db_name_override=None,
|
||||
supports_backups=False):
|
||||
"""Starts a vttablet process, and returns it.
|
||||
|
||||
The process is also saved in self.proc, so it's easy to kill as well.
|
||||
"""
|
||||
if repl_extra_flags is None:
|
||||
repl_extra_flags = {}
|
||||
args = []
|
||||
# Use "localhost" as hostname because Travis CI worker hostnames are too long for MySQL replication.
|
||||
# Use 'localhost' as hostname because Travis CI worker hostnames
|
||||
# are too long for MySQL replication.
|
||||
args.extend(['-tablet_hostname', 'localhost'])
|
||||
args.extend(['-tablet-path', self.tablet_alias])
|
||||
args.extend(environment.topo_server().flags())
|
||||
|
@ -466,7 +486,8 @@ class Tablet(object):
|
|||
protocols_flavor().tablet_manager_protocol()])
|
||||
args.extend(['-pid_file', os.path.join(self.tablet_dir, 'vttablet.pid')])
|
||||
if self.use_mysqlctld:
|
||||
args.extend(['-mysqlctl_socket', os.path.join(self.tablet_dir, 'mysqlctl.sock')])
|
||||
args.extend(
|
||||
['-mysqlctl_socket', os.path.join(self.tablet_dir, 'mysqlctl.sock')])
|
||||
|
||||
if full_mycnf_args:
|
||||
# this flag is used to specify all the mycnf_ flags, to make
|
||||
|
@ -489,8 +510,8 @@ class Tablet(object):
|
|||
'-mycnf_relay_log_info_path', os.path.join(self.tablet_dir,
|
||||
'relay-logs',
|
||||
'relay-log.info'),
|
||||
'-mycnf_bin_log_path', os.path.join(self.tablet_dir, 'bin-logs',
|
||||
'vt-%010d-bin' % self.tablet_uid),
|
||||
'-mycnf_bin_log_path', os.path.join(
|
||||
self.tablet_dir, 'bin-logs', 'vt-%010d-bin' % self.tablet_uid),
|
||||
'-mycnf_master_info_file', os.path.join(self.tablet_dir,
|
||||
'master.info'),
|
||||
'-mycnf_pid_file', os.path.join(self.tablet_dir, 'mysql.pid'),
|
||||
|
@ -529,15 +550,16 @@ class Tablet(object):
|
|||
if extra_args:
|
||||
args.extend(extra_args)
|
||||
|
||||
return self._start_prog(binary='vttablet', port=port, auth=auth,
|
||||
memcache=memcache, wait_for_state=wait_for_state,
|
||||
filecustomrules=filecustomrules,
|
||||
zkcustomrules=zkcustomrules,
|
||||
schema_override=schema_override,
|
||||
repl_extra_flags=repl_extra_flags,
|
||||
table_acl_config=table_acl_config,
|
||||
lameduck_period=lameduck_period, extra_args=args,
|
||||
security_policy=security_policy, extra_env=extra_env)
|
||||
return self._start_prog(
|
||||
binary='vttablet', port=port, auth=auth,
|
||||
memcache=memcache, wait_for_state=wait_for_state,
|
||||
filecustomrules=filecustomrules,
|
||||
zkcustomrules=zkcustomrules,
|
||||
schema_override=schema_override,
|
||||
repl_extra_flags=repl_extra_flags,
|
||||
table_acl_config=table_acl_config,
|
||||
lameduck_period=lameduck_period, extra_args=args,
|
||||
security_policy=security_policy, extra_env=extra_env)
|
||||
|
||||
def start_vtocc(self, port=None, auth=False, memcache=False,
|
||||
wait_for_state='SERVING', filecustomrules=None,
|
||||
|
@ -554,11 +576,11 @@ class Tablet(object):
|
|||
self.shard = shard
|
||||
self.dbname = 'vt_' + (self.keyspace or 'database')
|
||||
args = []
|
||||
args.extend(["-db-config-app-unixsocket", self.tablet_dir + '/mysql.sock'])
|
||||
args.extend(["-db-config-dba-unixsocket", self.tablet_dir + '/mysql.sock'])
|
||||
args.extend(["-db-config-app-keyspace", keyspace])
|
||||
args.extend(["-db-config-app-shard", shard])
|
||||
args.extend(["-binlog-path", "foo"])
|
||||
args.extend(['-db-config-app-unixsocket', self.tablet_dir + '/mysql.sock'])
|
||||
args.extend(['-db-config-dba-unixsocket', self.tablet_dir + '/mysql.sock'])
|
||||
args.extend(['-db-config-app-keyspace', keyspace])
|
||||
args.extend(['-db-config-app-shard', shard])
|
||||
args.extend(['-binlog-path', 'foo'])
|
||||
|
||||
if extra_args:
|
||||
args.extend(extra_args)
|
||||
|
@ -572,17 +594,17 @@ class Tablet(object):
|
|||
lameduck_period=lameduck_period, extra_args=args,
|
||||
security_policy=security_policy)
|
||||
|
||||
|
||||
def wait_for_vttablet_state(self, expected, timeout=60.0, port=None):
|
||||
self.wait_for_vtocc_state(expected, timeout=timeout, port=port)
|
||||
|
||||
def wait_for_vtocc_state(self, expected, timeout=60.0, port=None):
|
||||
while True:
|
||||
v = utils.get_vars(port or self.port)
|
||||
last_seen_state = "?"
|
||||
last_seen_state = '?'
|
||||
if v == None:
|
||||
if self.proc.poll() is not None:
|
||||
raise utils.TestError('vttablet died while test waiting for state %s' % expected)
|
||||
raise utils.TestError(
|
||||
'vttablet died while test waiting for state %s' % expected)
|
||||
logging.debug(
|
||||
' vttablet %s not answering at /debug/vars, waiting...',
|
||||
self.tablet_alias)
|
||||
|
@ -600,9 +622,10 @@ class Tablet(object):
|
|||
expected)
|
||||
else:
|
||||
break
|
||||
timeout = utils.wait_step('waiting for state %s (last seen state: %s)' % (expected, last_seen_state),
|
||||
timeout,
|
||||
sleep_time=0.1)
|
||||
timeout = utils.wait_step(
|
||||
'waiting for state %s (last seen state: %s)' %
|
||||
(expected, last_seen_state),
|
||||
timeout, sleep_time=0.1)
|
||||
|
||||
def wait_for_mysqlctl_socket(self, timeout=30.0):
|
||||
mysql_sock = os.path.join(self.tablet_dir, 'mysql.sock')
|
||||
|
@ -610,9 +633,13 @@ class Tablet(object):
|
|||
while True:
|
||||
if os.path.exists(mysql_sock) and os.path.exists(mysqlctl_sock):
|
||||
return
|
||||
timeout = utils.wait_step('waiting for mysql and mysqlctl socket files: %s %s' % (mysql_sock, mysqlctl_sock), timeout)
|
||||
timeout = utils.wait_step(
|
||||
'waiting for mysql and mysqlctl socket files: %s %s' %
|
||||
(mysql_sock, mysqlctl_sock), timeout)
|
||||
|
||||
def _add_dbconfigs(self, args, repl_extra_flags={}):
|
||||
def _add_dbconfigs(self, args, repl_extra_flags=None):
|
||||
if repl_extra_flags is None:
|
||||
repl_extra_flags = {}
|
||||
config = dict(self.default_db_config)
|
||||
if self.keyspace:
|
||||
config['app']['dbname'] = self.dbname
|
||||
|
@ -629,7 +656,8 @@ class Tablet(object):
|
|||
return urllib2.urlopen('http://localhost:%d/healthz' % self.port).read()
|
||||
|
||||
def kill_vttablet(self, wait=True):
|
||||
logging.debug('killing vttablet: %s, wait: %s', self.tablet_alias, str(wait))
|
||||
logging.debug('killing vttablet: %s, wait: %s', self.tablet_alias,
|
||||
str(wait))
|
||||
if self.proc is not None:
|
||||
Tablet.tablets_running -= 1
|
||||
if self.proc.poll() is None:
|
||||
|
@ -652,7 +680,9 @@ class Tablet(object):
|
|||
v = utils.get_vars(self.port)
|
||||
if v == None:
|
||||
if self.proc.poll() is not None:
|
||||
raise utils.TestError('vttablet died while test waiting for binlog state %s' % expected)
|
||||
raise utils.TestError(
|
||||
'vttablet died while test waiting for binlog state %s' %
|
||||
expected)
|
||||
logging.debug(' vttablet not answering at /debug/vars, waiting...')
|
||||
else:
|
||||
if 'UpdateStreamState' not in v:
|
||||
|
@ -665,8 +695,9 @@ class Tablet(object):
|
|||
expected)
|
||||
else:
|
||||
break
|
||||
timeout = utils.wait_step('waiting for binlog server state %s' % expected,
|
||||
timeout, sleep_time=0.5)
|
||||
timeout = utils.wait_step(
|
||||
'waiting for binlog server state %s' % expected,
|
||||
timeout, sleep_time=0.5)
|
||||
logging.debug('tablet %s binlog service is in state %s',
|
||||
self.tablet_alias, expected)
|
||||
|
||||
|
@ -675,7 +706,9 @@ class Tablet(object):
|
|||
v = utils.get_vars(self.port)
|
||||
if v == None:
|
||||
if self.proc.poll() is not None:
|
||||
raise utils.TestError('vttablet died while test waiting for binlog count %s' % expected)
|
||||
raise utils.TestError(
|
||||
'vttablet died while test waiting for binlog count %s' %
|
||||
expected)
|
||||
logging.debug(' vttablet not answering at /debug/vars, waiting...')
|
||||
else:
|
||||
if 'BinlogPlayerMapSize' not in v:
|
||||
|
@ -688,8 +721,9 @@ class Tablet(object):
|
|||
s, expected)
|
||||
else:
|
||||
break
|
||||
timeout = utils.wait_step('waiting for binlog player count %d' % expected,
|
||||
timeout, sleep_time=0.5)
|
||||
timeout = utils.wait_step(
|
||||
'waiting for binlog player count %d' % expected,
|
||||
timeout, sleep_time=0.5)
|
||||
logging.debug('tablet %s binlog player has %d players',
|
||||
self.tablet_alias, expected)
|
||||
|
||||
|
|
|
@ -14,36 +14,42 @@ from vtdb import tablet
|
|||
class TestRPCCallAndExtract(unittest.TestCase):
|
||||
"""Tests rpc_call_and_extract_error is tolerant to various responses."""
|
||||
|
||||
tablet_conn = tablet.TabletConnection('addr', 'type', 'keyspace', 'shard', 30, caller_id='dev')
|
||||
tablet_conn = tablet.TabletConnection(
|
||||
'addr', 'type', 'keyspace', 'shard', 30, caller_id='dev')
|
||||
|
||||
def test_reply_is_none(self):
|
||||
with mock.patch.object(self.tablet_conn, 'client', autospec=True) as mock_client:
|
||||
with mock.patch.object(
|
||||
self.tablet_conn, 'client', autospec=True) as mock_client:
|
||||
mock_client.call.return_value = gorpc.GoRpcResponse()
|
||||
self.tablet_conn.rpc_call_and_extract_error('method', 'req')
|
||||
|
||||
def test_reply_is_empty_string(self):
|
||||
with mock.patch.object(self.tablet_conn, 'client', autospec=True) as mock_client:
|
||||
with mock.patch.object(
|
||||
self.tablet_conn, 'client', autospec=True) as mock_client:
|
||||
response = gorpc.GoRpcResponse()
|
||||
response.reply = ''
|
||||
mock_client.call.return_value = response
|
||||
self.tablet_conn.rpc_call_and_extract_error('method', 'req')
|
||||
|
||||
def test_reply_is_string(self):
|
||||
with mock.patch.object(self.tablet_conn, 'client', autospec=True) as mock_client:
|
||||
with mock.patch.object(
|
||||
self.tablet_conn, 'client', autospec=True) as mock_client:
|
||||
response = gorpc.GoRpcResponse()
|
||||
response.reply = 'foo'
|
||||
mock_client.call.return_value = response
|
||||
self.tablet_conn.rpc_call_and_extract_error('method', 'req')
|
||||
|
||||
def test_reply_is_dict(self):
|
||||
with mock.patch.object(self.tablet_conn, 'client', autospec=True) as mock_client:
|
||||
with mock.patch.object(
|
||||
self.tablet_conn, 'client', autospec=True) as mock_client:
|
||||
response = gorpc.GoRpcResponse()
|
||||
response.reply = {'foo': 'bar'}
|
||||
mock_client.call.return_value = response
|
||||
self.tablet_conn.rpc_call_and_extract_error('method', 'req')
|
||||
|
||||
def test_reply_has_non_dict_err(self):
|
||||
with mock.patch.object(self.tablet_conn, 'client', autospec=True) as mock_client:
|
||||
with mock.patch.object(
|
||||
self.tablet_conn, 'client', autospec=True) as mock_client:
|
||||
response = gorpc.GoRpcResponse()
|
||||
response.reply = {'Err': 'foo'}
|
||||
mock_client.call.return_value = response
|
||||
|
@ -51,7 +57,8 @@ class TestRPCCallAndExtract(unittest.TestCase):
|
|||
self.tablet_conn.rpc_call_and_extract_error('method', 'req')
|
||||
|
||||
def test_reply_has_missing_err_message(self):
|
||||
with mock.patch.object(self.tablet_conn, 'client', autospec=True) as mock_client:
|
||||
with mock.patch.object(
|
||||
self.tablet_conn, 'client', autospec=True) as mock_client:
|
||||
response = gorpc.GoRpcResponse()
|
||||
response.reply = {'Err': {'foo': 'bar'}}
|
||||
mock_client.call.return_value = response
|
||||
|
@ -59,7 +66,8 @@ class TestRPCCallAndExtract(unittest.TestCase):
|
|||
self.tablet_conn.rpc_call_and_extract_error('method', 'req')
|
||||
|
||||
def test_reply_has_err_message(self):
|
||||
with mock.patch.object(self.tablet_conn, 'client', autospec=True) as mock_client:
|
||||
with mock.patch.object(
|
||||
self.tablet_conn, 'client', autospec=True) as mock_client:
|
||||
response = gorpc.GoRpcResponse()
|
||||
response.reply = {'Err': {'Message': 'bar'}}
|
||||
mock_client.call.return_value = response
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
|
||||
import warnings
|
||||
# Dropping a table inexplicably produces a warning despite
|
||||
# the "IF EXISTS" clause. Squelch these warnings.
|
||||
warnings.simplefilter("ignore")
|
||||
# the 'IF EXISTS' clause. Squelch these warnings.
|
||||
warnings.simplefilter('ignore')
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
@ -21,6 +21,7 @@ from protocols_flavor import protocols_flavor
|
|||
tablet_62344 = tablet.Tablet(62344)
|
||||
tablet_62044 = tablet.Tablet(62044)
|
||||
|
||||
|
||||
def setUpModule():
|
||||
try:
|
||||
if environment.topo_server().flavor() == 'zookeeper':
|
||||
|
@ -41,6 +42,7 @@ def setUpModule():
|
|||
tearDownModule()
|
||||
raise
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
if utils.options.skip_teardown:
|
||||
return
|
||||
|
@ -58,7 +60,9 @@ def tearDownModule():
|
|||
tablet_62344.remove_tree()
|
||||
tablet_62044.remove_tree()
|
||||
|
||||
|
||||
class TestTabletManager(unittest.TestCase):
|
||||
|
||||
def tearDown(self):
|
||||
tablet.Tablet.check_vttablet_count()
|
||||
environment.topo_server().wipe()
|
||||
|
@ -81,7 +85,8 @@ class TestTabletManager(unittest.TestCase):
|
|||
utils.run_vtctl(['CreateKeyspace', '-force', 'test_keyspace'])
|
||||
utils.run_vtctl(['createshard', '-force', 'test_keyspace/0'])
|
||||
tablet_62344.init_tablet('master', 'test_keyspace', '0', parent=False)
|
||||
utils.run_vtctl(['RebuildKeyspaceGraph', '-rebuild_srv_shards', 'test_keyspace'])
|
||||
utils.run_vtctl(
|
||||
['RebuildKeyspaceGraph', '-rebuild_srv_shards', 'test_keyspace'])
|
||||
utils.validate_topology()
|
||||
self._check_srv_shard()
|
||||
|
||||
|
@ -96,12 +101,18 @@ class TestTabletManager(unittest.TestCase):
|
|||
# make sure the query service is started right away
|
||||
qr = tablet_62344.execute('select * from vt_select_test')
|
||||
self.assertEqual(len(qr['Rows']), 4,
|
||||
"expected 4 rows in vt_select_test: %s" % str(qr))
|
||||
'expected 4 rows in vt_select_test: %s' % str(qr))
|
||||
|
||||
# make sure direct dba queries work
|
||||
query_result = utils.run_vtctl_json(['ExecuteFetchAsDba', '-want_fields', tablet_62344.tablet_alias, 'select * from vt_test_keyspace.vt_select_test'])
|
||||
self.assertEqual(len(query_result['Rows']), 4, "expected 4 rows in vt_select_test: %s" % str(query_result))
|
||||
self.assertEqual(len(query_result['Fields']), 2, "expected 2 fields in vt_select_test: %s" % str(query_result))
|
||||
query_result = utils.run_vtctl_json(
|
||||
['ExecuteFetchAsDba', '-want_fields', tablet_62344.tablet_alias,
|
||||
'select * from vt_test_keyspace.vt_select_test'])
|
||||
self.assertEqual(
|
||||
len(query_result['Rows']), 4,
|
||||
'expected 4 rows in vt_select_test: %s' % str(query_result))
|
||||
self.assertEqual(
|
||||
len(query_result['Fields']), 2,
|
||||
'expected 2 fields in vt_select_test: %s' % str(query_result))
|
||||
|
||||
# check Ping / RefreshState
|
||||
utils.run_vtctl(['Ping', tablet_62344.tablet_alias])
|
||||
|
@ -144,7 +155,6 @@ class TestTabletManager(unittest.TestCase):
|
|||
utils.validate_topology()
|
||||
self._check_srv_shard()
|
||||
|
||||
|
||||
_create_vt_select_test = '''create table vt_select_test (
|
||||
id bigint auto_increment,
|
||||
msg varchar(64),
|
||||
|
@ -155,7 +165,6 @@ class TestTabletManager(unittest.TestCase):
|
|||
"insert into vt_select_test (msg) values ('test %s')" % x
|
||||
for x in xrange(4)]
|
||||
|
||||
|
||||
def test_actions_and_timeouts(self):
|
||||
# Start up a master mysql and vttablet
|
||||
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
|
||||
|
@ -183,9 +192,9 @@ class TestTabletManager(unittest.TestCase):
|
|||
|
||||
# try a frontend RefreshState that should timeout as the tablet is busy
|
||||
# running the other one
|
||||
stdout, stderr = utils.run_vtctl(['-wait-time', '3s',
|
||||
'RefreshState', tablet_62344.tablet_alias],
|
||||
expect_fail=True)
|
||||
stdout, stderr = utils.run_vtctl(
|
||||
['-wait-time', '3s', 'RefreshState', tablet_62344.tablet_alias],
|
||||
expect_fail=True)
|
||||
self.assertIn(protocols_flavor().rpc_timeout_message(), stderr)
|
||||
|
||||
# wait for the background vtctl
|
||||
|
@ -196,7 +205,7 @@ class TestTabletManager(unittest.TestCase):
|
|||
# make sure they're accounted for properly
|
||||
# first the query engine States
|
||||
v = utils.get_vars(tablet_62344.port)
|
||||
logging.debug("vars: %s" % str(v))
|
||||
logging.debug('vars: %s', v)
|
||||
|
||||
# then the Zookeeper connections
|
||||
if v['ZkMetaConn']['test_nj']['Current'] != 'Connected':
|
||||
|
@ -213,7 +222,6 @@ class TestTabletManager(unittest.TestCase):
|
|||
|
||||
tablet_62344.kill_vttablet()
|
||||
|
||||
|
||||
def test_vttablet_authenticated(self):
|
||||
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
|
||||
tablet_62344.init_tablet('master', 'test_keyspace', '0')
|
||||
|
@ -228,9 +236,11 @@ class TestTabletManager(unittest.TestCase):
|
|||
|
||||
# make sure we can connect using secure connection
|
||||
conn = tablet_62344.conn(user='ala', password=r'ma kota')
|
||||
results, rowcount, lastrowid, fields = conn._execute('select * from vt_select_test', {})
|
||||
logging.debug("Got results: %s", str(results))
|
||||
self.assertEqual(len(results), 4, 'got wrong result length: %s' % str(results))
|
||||
results, rowcount, lastrowid, fields = conn._execute(
|
||||
'select * from vt_select_test', {})
|
||||
logging.debug('Got results: %s', str(results))
|
||||
self.assertEqual(
|
||||
len(results), 4, 'got wrong result length: %s' % str(results))
|
||||
conn.close()
|
||||
|
||||
tablet_62344.kill_vttablet()
|
||||
|
@ -243,7 +253,9 @@ class TestTabletManager(unittest.TestCase):
|
|||
self.assertEqual(hr['ExitStatus'], expectedStatus)
|
||||
if isinstance(expectedStdout, basestring):
|
||||
if expectedStdout[-1:] == '%':
|
||||
self.assertEqual(hr['Stdout'][:len(expectedStdout)-1], expectedStdout[:len(expectedStdout)-1])
|
||||
self.assertEqual(
|
||||
hr['Stdout'][:len(expectedStdout)-1],
|
||||
expectedStdout[:len(expectedStdout)-1])
|
||||
else:
|
||||
self.assertEqual(hr['Stdout'], expectedStdout)
|
||||
else:
|
||||
|
@ -253,7 +265,9 @@ class TestTabletManager(unittest.TestCase):
|
|||
found = True
|
||||
break
|
||||
if not found:
|
||||
self.assertFail('cannot find expected %s in %s' % (str(expectedStdout), hr['Stdout']))
|
||||
self.assertFail(
|
||||
'cannot find expected %s in %s' %
|
||||
(str(expectedStdout), hr['Stdout']))
|
||||
|
||||
def test_hook(self):
|
||||
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
|
||||
|
@ -313,10 +327,10 @@ class TestTabletManager(unittest.TestCase):
|
|||
at all).
|
||||
"""
|
||||
if environment.topo_server().flavor() != 'zookeeper':
|
||||
logging.info("Skipping this test in non-github tree")
|
||||
logging.info('Skipping this test in non-github tree')
|
||||
return
|
||||
if tablet_62344.grpc_enabled():
|
||||
logging.info("Skipping this test as second gRPC port interferes")
|
||||
logging.info('Skipping this test as second gRPC port interferes')
|
||||
return
|
||||
|
||||
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
|
||||
|
@ -328,13 +342,13 @@ class TestTabletManager(unittest.TestCase):
|
|||
proc1 = tablet_62344.start_vttablet()
|
||||
proc2 = tablet_62344.start_vttablet()
|
||||
for timeout in xrange(20):
|
||||
logging.debug("Sleeping waiting for first process to die")
|
||||
logging.debug('Sleeping waiting for first process to die')
|
||||
time.sleep(1.0)
|
||||
proc1.poll()
|
||||
if proc1.returncode is not None:
|
||||
break
|
||||
if proc1.returncode is None:
|
||||
self.fail("proc1 still running")
|
||||
self.fail('proc1 still running')
|
||||
tablet_62344.kill_vttablet()
|
||||
|
||||
def test_scrap_and_reinit(self):
|
||||
|
@ -369,21 +383,21 @@ class TestTabletManager(unittest.TestCase):
|
|||
utils.run_vtctl(['ShardReplicationAdd', 'test_keyspace/0',
|
||||
'test_nj-0000066666'], auto_log=True)
|
||||
with_bogus = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
|
||||
'test_keyspace/0'])
|
||||
'test_keyspace/0'])
|
||||
self.assertEqual(3, len(with_bogus['nodes']),
|
||||
'wrong shard replication nodes with bogus: %s' %
|
||||
str(with_bogus))
|
||||
utils.run_vtctl(['ShardReplicationFix', 'test_nj', 'test_keyspace/0'],
|
||||
auto_log=True)
|
||||
after_fix = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
|
||||
'test_keyspace/0'])
|
||||
'test_keyspace/0'])
|
||||
self.assertEqual(2, len(after_scrap['nodes']),
|
||||
'wrong shard replication nodes after fix: %s' %
|
||||
str(after_fix))
|
||||
|
||||
def check_healthz(self, tablet, expected):
|
||||
if expected:
|
||||
self.assertEqual("ok\n", tablet.get_healthz())
|
||||
self.assertEqual('ok\n', tablet.get_healthz())
|
||||
else:
|
||||
with self.assertRaises(urllib2.HTTPError):
|
||||
tablet.get_healthz()
|
||||
|
@ -394,7 +408,7 @@ class TestTabletManager(unittest.TestCase):
|
|||
while True:
|
||||
ti = utils.run_vtctl_json(['GetTablet', tablet_alias])
|
||||
if ti['type'] == t:
|
||||
logging.debug('Slave tablet went to %s, good' % expected_type)
|
||||
logging.debug('Slave tablet went to %s, good', expected_type)
|
||||
break
|
||||
timeout = utils.wait_step('slave becomes ' + expected_type, timeout)
|
||||
|
||||
|
@ -422,17 +436,17 @@ class TestTabletManager(unittest.TestCase):
|
|||
tablet_62344.tablet_alias])
|
||||
|
||||
# make sure the 'spare' slave goes to 'replica'
|
||||
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, "replica")
|
||||
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, 'replica')
|
||||
self.check_healthz(tablet_62044, True)
|
||||
|
||||
# make sure the master is still master
|
||||
ti = utils.run_vtctl_json(['GetTablet', tablet_62344.tablet_alias])
|
||||
self.assertEqual(ti['type'], tablet.Tablet.tablet_type_value['MASTER'],
|
||||
"unexpected master type: %s" % ti['type'])
|
||||
'unexpected master type: %s' % ti['type'])
|
||||
|
||||
# stop replication, make sure we go unhealthy.
|
||||
utils.run_vtctl(['StopSlave', tablet_62044.tablet_alias])
|
||||
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, "spare")
|
||||
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, 'spare')
|
||||
self.check_healthz(tablet_62044, False)
|
||||
|
||||
# make sure the serving graph was updated
|
||||
|
@ -442,22 +456,28 @@ class TestTabletManager(unittest.TestCase):
|
|||
utils.run_vtctl_json(['GetEndPoints', 'test_nj', 'test_keyspace/0',
|
||||
'replica'])
|
||||
except:
|
||||
logging.debug("Tablet is gone from serving graph, good")
|
||||
logging.debug('Tablet is gone from serving graph, good')
|
||||
break
|
||||
timeout = utils.wait_step('Stopped replication didn\'t trigger removal from serving graph', timeout)
|
||||
timeout = utils.wait_step(
|
||||
'Stopped replication didn\'t trigger removal from serving graph',
|
||||
timeout)
|
||||
|
||||
# make sure status web page is unhappy
|
||||
self.assertIn('>unhealthy: replication_reporter: Replication is not running</span></div>', tablet_62044.get_status())
|
||||
self.assertIn(
|
||||
'>unhealthy: replication_reporter: '
|
||||
'Replication is not running</span></div>', tablet_62044.get_status())
|
||||
|
||||
# make sure the health stream is updated
|
||||
health = utils.run_vtctl_json(['VtTabletStreamHealth',
|
||||
'-count', '1',
|
||||
tablet_62044.tablet_alias])
|
||||
self.assertIn('replication_reporter: Replication is not running', health['realtime_stats']['health_error'])
|
||||
self.assertIn(
|
||||
'replication_reporter: Replication is not running',
|
||||
health['realtime_stats']['health_error'])
|
||||
|
||||
# then restart replication, and write data, make sure we go back to healthy
|
||||
utils.run_vtctl(['StartSlave', tablet_62044.tablet_alias])
|
||||
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, "replica")
|
||||
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, 'replica')
|
||||
|
||||
# make sure status web page is healthy
|
||||
self.assertIn('>healthy</span></div>', tablet_62044.get_status())
|
||||
|
@ -474,7 +494,7 @@ class TestTabletManager(unittest.TestCase):
|
|||
lines = stdout.splitlines()
|
||||
self.assertEqual(len(lines), 2)
|
||||
for line in lines:
|
||||
logging.debug("Got health: %s", line)
|
||||
logging.debug('Got health: %s', line)
|
||||
data = json.loads(line)
|
||||
self.assertIn('realtime_stats', data)
|
||||
self.assertNotIn('health_error', data['realtime_stats'])
|
||||
|
@ -489,13 +509,16 @@ class TestTabletManager(unittest.TestCase):
|
|||
# the replica was in lameduck for 5 seconds, should have been enough
|
||||
# to reset its state to spare
|
||||
ti = utils.run_vtctl_json(['GetTablet', tablet_62044.tablet_alias])
|
||||
self.assertEqual(ti['type'], tablet.Tablet.tablet_type_value['SPARE'],
|
||||
"tablet didn't go to spare while in lameduck mode: %s" % str(ti))
|
||||
self.assertEqual(
|
||||
ti['type'], tablet.Tablet.tablet_type_value['SPARE'],
|
||||
"tablet didn't go to spare while in lameduck mode: %s" % str(ti))
|
||||
|
||||
def test_health_check_worker_state_does_not_shutdown_query_service(self):
|
||||
# This test is similar to test_health_check, but has the following differences:
|
||||
# - the second tablet is an "rdonly" and not a "replica"
|
||||
# - the second tablet will be set to "worker" and we expect that the query service won't be shutdown
|
||||
# This test is similar to test_health_check, but has the following
|
||||
# differences:
|
||||
# - the second tablet is an 'rdonly' and not a 'replica'
|
||||
# - the second tablet will be set to 'worker' and we expect that
|
||||
# the query service won't be shutdown
|
||||
|
||||
# Setup master and rdonly tablets.
|
||||
tablet_62344.init_tablet('master', 'test_keyspace', '0')
|
||||
|
@ -518,30 +541,34 @@ class TestTabletManager(unittest.TestCase):
|
|||
utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
|
||||
tablet_62344.tablet_alias])
|
||||
# Trigger healthcheck to save time waiting for the next interval.
|
||||
utils.run_vtctl(["RunHealthCheck", tablet_62044.tablet_alias, 'rdonly'])
|
||||
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias, 'rdonly'])
|
||||
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, 'rdonly')
|
||||
self.check_healthz(tablet_62044, True)
|
||||
tablet_62044.wait_for_vttablet_state('SERVING')
|
||||
|
||||
# Change from rdonly to worker and stop replication. (These actions are similar to the SplitClone vtworker command implementation.)
|
||||
# The tablet will become unhealthy, but the query service is still running.
|
||||
utils.run_vtctl(["ChangeSlaveType", tablet_62044.tablet_alias, "worker"])
|
||||
# Change from rdonly to worker and stop replication. (These
|
||||
# actions are similar to the SplitClone vtworker command
|
||||
# implementation.) The tablet will become unhealthy, but the
|
||||
# query service is still running.
|
||||
utils.run_vtctl(['ChangeSlaveType', tablet_62044.tablet_alias, 'worker'])
|
||||
utils.run_vtctl(['StopSlave', tablet_62044.tablet_alias])
|
||||
# Trigger healthcheck explicitly to avoid waiting for the next interval.
|
||||
utils.run_vtctl(["RunHealthCheck", tablet_62044.tablet_alias, "rdonly"])
|
||||
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, "worker")
|
||||
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias, 'rdonly'])
|
||||
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, 'worker')
|
||||
self.check_healthz(tablet_62044, False)
|
||||
# Make sure that replication got disabled.
|
||||
self.assertIn('>unhealthy: replication_reporter: Replication is not running</span></div>', tablet_62044.get_status())
|
||||
self.assertIn(
|
||||
'>unhealthy: replication_reporter: '
|
||||
'Replication is not running</span></div>', tablet_62044.get_status())
|
||||
# Query service is still running.
|
||||
tablet_62044.wait_for_vttablet_state('SERVING')
|
||||
|
||||
# Restart replication. Tablet will become healthy again.
|
||||
utils.run_vtctl(["ChangeSlaveType", tablet_62044.tablet_alias, "spare"])
|
||||
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, "spare")
|
||||
utils.run_vtctl(['ChangeSlaveType', tablet_62044.tablet_alias, 'spare'])
|
||||
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, 'spare')
|
||||
utils.run_vtctl(['StartSlave', tablet_62044.tablet_alias])
|
||||
utils.run_vtctl(["RunHealthCheck", tablet_62044.tablet_alias, "rdonly"])
|
||||
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, "rdonly")
|
||||
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias, 'rdonly'])
|
||||
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, 'rdonly')
|
||||
self.check_healthz(tablet_62044, True)
|
||||
tablet_62044.wait_for_vttablet_state('SERVING')
|
||||
|
||||
|
@ -557,14 +584,15 @@ class TestTabletManager(unittest.TestCase):
|
|||
for t in tablet_62344, tablet_62044:
|
||||
t.create_db('vt_test_keyspace')
|
||||
pos = mysql_flavor().master_position(tablet_62344)
|
||||
# Use "localhost" as hostname because Travis CI worker hostnames are too long for MySQL replication.
|
||||
# Use 'localhost' as hostname because Travis CI worker hostnames
|
||||
# are too long for MySQL replication.
|
||||
changeMasterCmds = mysql_flavor().change_master_commands(
|
||||
"localhost",
|
||||
tablet_62344.mysql_port,
|
||||
pos)
|
||||
tablet_62044.mquery('', ['RESET MASTER', 'RESET SLAVE'] +
|
||||
changeMasterCmds +
|
||||
['START SLAVE'])
|
||||
'localhost',
|
||||
tablet_62344.mysql_port,
|
||||
pos)
|
||||
tablet_62044.mquery(
|
||||
'', ['RESET MASTER', 'RESET SLAVE'] +
|
||||
changeMasterCmds + ['START SLAVE'])
|
||||
|
||||
# now shutdown all mysqld
|
||||
shutdown_procs = [
|
||||
|
@ -645,9 +673,9 @@ class TestTabletManager(unittest.TestCase):
|
|||
|
||||
# run health check on both, make sure they are both healthy
|
||||
for t in tablet_62344, tablet_62044:
|
||||
utils.run_vtctl(['RunHealthCheck', t.tablet_alias, 'replica'],
|
||||
auto_log=True)
|
||||
self.check_healthz(t, True)
|
||||
utils.run_vtctl(['RunHealthCheck', t.tablet_alias, 'replica'],
|
||||
auto_log=True)
|
||||
self.check_healthz(t, True)
|
||||
|
||||
# pick the other one as master, make sure they are still healthy
|
||||
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
|
||||
|
@ -655,9 +683,9 @@ class TestTabletManager(unittest.TestCase):
|
|||
|
||||
# run health check on both, make sure they are both healthy
|
||||
for t in tablet_62344, tablet_62044:
|
||||
utils.run_vtctl(['RunHealthCheck', t.tablet_alias, 'replica'],
|
||||
auto_log=True)
|
||||
self.check_healthz(t, True)
|
||||
utils.run_vtctl(['RunHealthCheck', t.tablet_alias, 'replica'],
|
||||
auto_log=True)
|
||||
self.check_healthz(t, True)
|
||||
|
||||
# and come back to the original guy
|
||||
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
|
||||
|
@ -665,9 +693,9 @@ class TestTabletManager(unittest.TestCase):
|
|||
|
||||
# run health check on both, make sure they are both healthy
|
||||
for t in tablet_62344, tablet_62044:
|
||||
utils.run_vtctl(['RunHealthCheck', t.tablet_alias, 'replica'],
|
||||
auto_log=True)
|
||||
self.check_healthz(t, True)
|
||||
utils.run_vtctl(['RunHealthCheck', t.tablet_alias, 'replica'],
|
||||
auto_log=True)
|
||||
self.check_healthz(t, True)
|
||||
|
||||
# and done
|
||||
tablet.kill_tablets([tablet_62344, tablet_62044])
|
||||
|
@ -675,7 +703,7 @@ class TestTabletManager(unittest.TestCase):
|
|||
def test_fallback_policy(self):
|
||||
tablet_62344.create_db('vt_test_keyspace')
|
||||
tablet_62344.init_tablet('master', 'test_keyspace', '0')
|
||||
proc1 = tablet_62344.start_vttablet(security_policy="bogus")
|
||||
proc1 = tablet_62344.start_vttablet(security_policy='bogus')
|
||||
f = urllib.urlopen('http://localhost:%d/queryz' % int(tablet_62344.port))
|
||||
response = f.read()
|
||||
f.close()
|
||||
|
|
|
@ -152,7 +152,8 @@ class TestUpdateStream(unittest.TestCase):
|
|||
self._exec_vt_txn(self._populate_vt_insert_test)
|
||||
self._exec_vt_txn(['delete from vt_insert_test'])
|
||||
utils.run_vtctl(['ChangeSlaveType', replica_tablet.tablet_alias, 'spare'])
|
||||
utils.wait_for_tablet_type(replica_tablet.tablet_alias, tablet.Tablet.tablet_type_value['SPARE'])
|
||||
utils.wait_for_tablet_type(
|
||||
replica_tablet.tablet_alias, tablet.Tablet.tablet_type_value['SPARE'])
|
||||
logging.debug('dialing replica update stream service')
|
||||
replica_conn = self._get_replica_stream_conn()
|
||||
try:
|
||||
|
@ -175,9 +176,12 @@ class TestUpdateStream(unittest.TestCase):
|
|||
def _test_service_enabled(self):
|
||||
start_position = _get_repl_current_position()
|
||||
logging.debug('_test_service_enabled starting @ %s', start_position)
|
||||
utils.run_vtctl(['ChangeSlaveType', replica_tablet.tablet_alias, 'replica'])
|
||||
utils.run_vtctl(
|
||||
['ChangeSlaveType', replica_tablet.tablet_alias, 'replica'])
|
||||
logging.debug('sleeping a bit for the replica action to complete')
|
||||
utils.wait_for_tablet_type(replica_tablet.tablet_alias, tablet.Tablet.tablet_type_value['REPLICA'], 30)
|
||||
utils.wait_for_tablet_type(
|
||||
replica_tablet.tablet_alias,
|
||||
tablet.Tablet.tablet_type_value['REPLICA'], 30)
|
||||
thd = threading.Thread(target=self.perform_writes, name='write_thd',
|
||||
args=(100,))
|
||||
thd.daemon = True
|
||||
|
@ -209,8 +213,11 @@ class TestUpdateStream(unittest.TestCase):
|
|||
try:
|
||||
for stream_event in replica_conn.stream_update(start_position):
|
||||
if first:
|
||||
utils.run_vtctl(['ChangeSlaveType', replica_tablet.tablet_alias, 'spare'])
|
||||
utils.wait_for_tablet_type(replica_tablet.tablet_alias, tablet.Tablet.tablet_type_value['SPARE'], 30)
|
||||
utils.run_vtctl(
|
||||
['ChangeSlaveType', replica_tablet.tablet_alias, 'spare'])
|
||||
utils.wait_for_tablet_type(
|
||||
replica_tablet.tablet_alias,
|
||||
tablet.Tablet.tablet_type_value['SPARE'], 30)
|
||||
first = False
|
||||
else:
|
||||
if stream_event.category == update_stream.StreamEvent.POS:
|
||||
|
@ -247,8 +254,8 @@ class TestUpdateStream(unittest.TestCase):
|
|||
if master_start_position == replica_start_position:
|
||||
break
|
||||
timeout = utils.wait_step(
|
||||
"%s == %s" % (master_start_position, replica_start_position),
|
||||
timeout
|
||||
'%s == %s' % (master_start_position, replica_start_position),
|
||||
timeout
|
||||
)
|
||||
logging.debug('run_test_stream_parity starting @ %s',
|
||||
master_start_position)
|
||||
|
@ -298,7 +305,8 @@ class TestUpdateStream(unittest.TestCase):
|
|||
|
||||
def test_set_insert_id(self):
|
||||
start_position = _get_master_current_position()
|
||||
self._exec_vt_txn(['SET INSERT_ID=1000000'] + self._populate_vt_insert_test)
|
||||
self._exec_vt_txn(
|
||||
['SET INSERT_ID=1000000'] + self._populate_vt_insert_test)
|
||||
logging.debug('test_set_insert_id: starting @ %s', start_position)
|
||||
master_conn = self._get_master_stream_conn()
|
||||
expected_id = 1000000
|
||||
|
@ -309,7 +317,7 @@ class TestUpdateStream(unittest.TestCase):
|
|||
self.assertEqual(stream_event.rows[0][0], expected_id)
|
||||
expected_id += 1
|
||||
if expected_id != 1000004:
|
||||
self.fail("did not get my four values!")
|
||||
self.fail('did not get my four values!')
|
||||
|
||||
def test_database_filter(self):
|
||||
start_position = _get_master_current_position()
|
||||
|
@ -325,12 +333,15 @@ class TestUpdateStream(unittest.TestCase):
|
|||
"query using other_database wasn't filted out")
|
||||
|
||||
def test_service_switch(self):
|
||||
"""tests the service switch from disable -> enable -> disable"""
|
||||
"""tests the service switch from disable -> enable -> disable."""
|
||||
self._test_service_disabled()
|
||||
self._test_service_enabled()
|
||||
# The above tests leaves the service in disabled state, hence enabling it.
|
||||
utils.run_vtctl(['ChangeSlaveType', replica_tablet.tablet_alias, 'replica'])
|
||||
utils.wait_for_tablet_type(replica_tablet.tablet_alias, tablet.Tablet.tablet_type_value['REPLICA'], 30)
|
||||
utils.run_vtctl(
|
||||
['ChangeSlaveType', replica_tablet.tablet_alias, 'replica'])
|
||||
utils.wait_for_tablet_type(
|
||||
replica_tablet.tablet_alias,
|
||||
tablet.Tablet.tablet_type_value['REPLICA'], 30)
|
||||
|
||||
def test_log_rotation(self):
|
||||
start_position = _get_master_current_position()
|
||||
|
@ -344,13 +355,14 @@ class TestUpdateStream(unittest.TestCase):
|
|||
for stream_event in master_conn.stream_update(start_position):
|
||||
if stream_event.category == update_stream.StreamEvent.POS:
|
||||
master_txn_count += 1
|
||||
position = mysql_flavor().position_append(position, stream_event.transaction_id)
|
||||
position = mysql_flavor(
|
||||
).position_append(position, stream_event.transaction_id)
|
||||
if mysql_flavor().position_after(position, start_position):
|
||||
logs_correct = True
|
||||
logging.debug('Log rotation correctly interpreted')
|
||||
break
|
||||
if master_txn_count == 2:
|
||||
self.fail("ran out of logs")
|
||||
self.fail('ran out of logs')
|
||||
if not logs_correct:
|
||||
self.fail("Flush logs didn't get properly interpreted")
|
||||
|
||||
|
|
341
test/utils.py
341
test/utils.py
|
@ -26,16 +26,20 @@ from topo_flavor.server import set_topo_server_flavor
|
|||
|
||||
options = None
|
||||
devnull = open('/dev/null', 'w')
|
||||
hostname = socket.getaddrinfo(socket.getfqdn(), None, 0, 0, 0, socket.AI_CANONNAME)[0][3]
|
||||
hostname = socket.getaddrinfo(
|
||||
socket.getfqdn(), None, 0, 0, 0, socket.AI_CANONNAME)[0][3]
|
||||
|
||||
|
||||
class TestError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Break(Exception):
|
||||
pass
|
||||
|
||||
environment.setup()
|
||||
|
||||
|
||||
class LoggingStream(object):
|
||||
def __init__(self):
|
||||
self.line = ''
|
||||
|
@ -57,22 +61,28 @@ class LoggingStream(object):
|
|||
def flush(self):
|
||||
pass
|
||||
|
||||
|
||||
def add_options(parser):
|
||||
environment.add_options(parser)
|
||||
parser.add_option('-d', '--debug', action='store_true',
|
||||
help='utils.pause() statements will wait for user input')
|
||||
parser.add_option('-k', '--keep-logs', action='store_true',
|
||||
help='Do not delete log files on teardown.')
|
||||
parser.add_option('-q', '--quiet', action='store_const', const=0, dest='verbose', default=1)
|
||||
parser.add_option('-v', '--verbose', action='store_const', const=2, dest='verbose', default=1)
|
||||
parser.add_option(
|
||||
'-q', '--quiet', action='store_const', const=0, dest='verbose', default=1)
|
||||
parser.add_option(
|
||||
'-v', '--verbose', action='store_const', const=2, dest='verbose',
|
||||
default=1)
|
||||
parser.add_option('--skip-build', action='store_true',
|
||||
help='Do not build the go binaries when running the test.')
|
||||
parser.add_option('--skip-teardown', action='store_true',
|
||||
help='Leave the global processes running after the test is done.')
|
||||
parser.add_option(
|
||||
'--skip-teardown', action='store_true',
|
||||
help='Leave the global processes running after the test is done.')
|
||||
parser.add_option('--mysql-flavor')
|
||||
parser.add_option('--protocols-flavor')
|
||||
parser.add_option('--topo-server-flavor', default='zookeeper')
|
||||
|
||||
|
||||
def set_options(opts):
|
||||
global options
|
||||
options = opts
|
||||
|
@ -82,13 +92,14 @@ def set_options(opts):
|
|||
set_topo_server_flavor(options.topo_server_flavor)
|
||||
environment.skip_build = options.skip_build
|
||||
|
||||
|
||||
# main executes the test classes contained in the passed module, or
|
||||
# __main__ if empty.
|
||||
def main(mod=None, test_options=None):
|
||||
"""The replacement main method, which parses args and runs tests.
|
||||
|
||||
Args:
|
||||
test_options - a function which adds OptionParser options that are specific
|
||||
test_options: a function which adds OptionParser options that are specific
|
||||
to a test file.
|
||||
"""
|
||||
if mod == None:
|
||||
|
@ -109,12 +120,14 @@ def main(mod=None, test_options=None):
|
|||
else:
|
||||
level = logging.DEBUG
|
||||
logging.getLogger().setLevel(level)
|
||||
logging.basicConfig(format='-- %(asctime)s %(module)s:%(lineno)d %(levelname)s %(message)s')
|
||||
logging.basicConfig(
|
||||
format='-- %(asctime)s %(module)s:%(lineno)d %(levelname)s %(message)s')
|
||||
|
||||
set_options(options)
|
||||
|
||||
run_tests(mod, args)
|
||||
|
||||
|
||||
def run_tests(mod, args):
|
||||
try:
|
||||
suite = unittest.TestSuite()
|
||||
|
@ -135,7 +148,8 @@ def run_tests(mod, args):
|
|||
|
||||
if suite.countTestCases() > 0:
|
||||
logger = LoggingStream()
|
||||
result = unittest.TextTestRunner(stream=logger, verbosity=options.verbose, failfast=True).run(suite)
|
||||
result = unittest.TextTestRunner(
|
||||
stream=logger, verbosity=options.verbose, failfast=True).run(suite)
|
||||
if not result.wasSuccessful():
|
||||
sys.exit(-1)
|
||||
except KeyboardInterrupt:
|
||||
|
@ -148,6 +162,7 @@ def run_tests(mod, args):
|
|||
logging.warning('Leaving temporary files behind (--keep-logs), please '
|
||||
'clean up before next run: ' + os.environ['VTDATAROOT'])
|
||||
|
||||
|
||||
def remove_tmp_files():
|
||||
if options.keep_logs:
|
||||
return
|
||||
|
@ -156,10 +171,12 @@ def remove_tmp_files():
|
|||
except OSError as e:
|
||||
logging.debug('remove_tmp_files: %s', str(e))
|
||||
|
||||
|
||||
def pause(prompt):
|
||||
if options.debug:
|
||||
raw_input(prompt)
|
||||
|
||||
|
||||
# sub-process management
|
||||
pid_map = {}
|
||||
already_killed = []
|
||||
|
@ -168,6 +185,7 @@ def _add_proc(proc):
|
|||
with open(environment.tmproot+'/test-pids', 'a') as f:
|
||||
print >> f, proc.pid, os.path.basename(proc.args[0])
|
||||
|
||||
|
||||
def kill_sub_processes():
|
||||
for proc in pid_map.values():
|
||||
if proc.pid and proc.returncode is None:
|
||||
|
@ -186,6 +204,7 @@ def kill_sub_processes():
|
|||
except OSError as e:
|
||||
logging.debug('kill_sub_processes: %s', str(e))
|
||||
|
||||
|
||||
def kill_sub_process(proc, soft=False):
|
||||
if proc is None:
|
||||
return
|
||||
|
@ -198,6 +217,7 @@ def kill_sub_process(proc, soft=False):
|
|||
del pid_map[pid]
|
||||
already_killed.append(pid)
|
||||
|
||||
|
||||
# run in foreground, possibly capturing output
|
||||
def run(cmd, trap_output=False, raise_on_error=True, **kargs):
|
||||
if isinstance(cmd, str):
|
||||
|
@ -207,7 +227,9 @@ def run(cmd, trap_output=False, raise_on_error=True, **kargs):
|
|||
if trap_output:
|
||||
kargs['stdout'] = PIPE
|
||||
kargs['stderr'] = PIPE
|
||||
logging.debug('run: %s %s', str(cmd), ', '.join('%s=%s' % x for x in kargs.iteritems()))
|
||||
logging.debug(
|
||||
'run: %s %s', str(cmd),
|
||||
', '.join('%s=%s' % x for x in kargs.iteritems()))
|
||||
proc = Popen(args, **kargs)
|
||||
proc.args = args
|
||||
stdout, stderr = proc.communicate()
|
||||
|
@ -220,6 +242,7 @@ def run(cmd, trap_output=False, raise_on_error=True, **kargs):
|
|||
str(args), proc.returncode, stdout, stderr)
|
||||
return stdout, stderr
|
||||
|
||||
|
||||
# run sub-process, expects failure
|
||||
def run_fail(cmd, **kargs):
|
||||
if isinstance(cmd, str):
|
||||
|
@ -229,7 +252,9 @@ def run_fail(cmd, **kargs):
|
|||
kargs['stdout'] = PIPE
|
||||
kargs['stderr'] = PIPE
|
||||
if options.verbose == 2:
|
||||
logging.debug('run: (expect fail) %s %s', cmd, ', '.join('%s=%s' % x for x in kargs.iteritems()))
|
||||
logging.debug(
|
||||
'run: (expect fail) %s %s', cmd,
|
||||
', '.join('%s=%s' % x for x in kargs.iteritems()))
|
||||
proc = Popen(args, **kargs)
|
||||
proc.args = args
|
||||
stdout, stderr = proc.communicate()
|
||||
|
@ -238,10 +263,12 @@ def run_fail(cmd, **kargs):
|
|||
raise TestError('expected fail:', args, stdout, stderr)
|
||||
return stdout, stderr
|
||||
|
||||
|
||||
# run a daemon - kill when this script exits
|
||||
def run_bg(cmd, **kargs):
|
||||
if options.verbose == 2:
|
||||
logging.debug('run: %s %s', cmd, ', '.join('%s=%s' % x for x in kargs.iteritems()))
|
||||
logging.debug(
|
||||
'run: %s %s', cmd, ', '.join('%s=%s' % x for x in kargs.iteritems()))
|
||||
if 'extra_env' in kargs:
|
||||
kargs['env'] = os.environ.copy()
|
||||
if kargs['extra_env']:
|
||||
|
@ -256,6 +283,7 @@ def run_bg(cmd, **kargs):
|
|||
_add_proc(proc)
|
||||
return proc
|
||||
|
||||
|
||||
def wait_procs(proc_list, raise_on_error=True):
|
||||
for proc in proc_list:
|
||||
pid = proc.pid
|
||||
|
@ -270,24 +298,29 @@ def wait_procs(proc_list, raise_on_error=True):
|
|||
if raise_on_error:
|
||||
raise CalledProcessError(proc.returncode, ' '.join(proc.args))
|
||||
|
||||
|
||||
def validate_topology(ping_tablets=False):
|
||||
if ping_tablets:
|
||||
run_vtctl(['Validate', '-ping-tablets'])
|
||||
else:
|
||||
run_vtctl(['Validate'])
|
||||
|
||||
|
||||
def zk_ls(path):
|
||||
out, err = run(environment.binary_argstr('zk')+' ls '+path, trap_output=True)
|
||||
return sorted(out.splitlines())
|
||||
|
||||
|
||||
def zk_cat(path):
|
||||
out, err = run(environment.binary_argstr('zk')+' cat '+path, trap_output=True)
|
||||
return out
|
||||
|
||||
|
||||
def zk_cat_json(path):
|
||||
data = zk_cat(path)
|
||||
return json.loads(data)
|
||||
|
||||
|
||||
# wait_step is a helper for looping until a condition is true.
|
||||
# use as follow:
|
||||
# timeout = 10
|
||||
|
@ -299,11 +332,12 @@ def wait_step(msg, timeout, sleep_time=1.0):
|
|||
timeout -= sleep_time
|
||||
if timeout <= 0:
|
||||
raise TestError('timeout waiting for condition "%s"' % msg)
|
||||
logging.debug('Sleeping for %f seconds waiting for condition "%s"' %
|
||||
(sleep_time, msg))
|
||||
logging.debug('Sleeping for %f seconds waiting for condition "%s"',
|
||||
sleep_time, msg)
|
||||
time.sleep(sleep_time)
|
||||
return timeout
|
||||
|
||||
|
||||
# vars helpers
|
||||
def get_vars(port):
|
||||
"""
|
||||
|
@ -323,6 +357,7 @@ def get_vars(port):
|
|||
print data
|
||||
raise
|
||||
|
||||
|
||||
# wait_for_vars will wait until we can actually get the vars from a process,
|
||||
# and if var is specified, will wait until that var is in vars
|
||||
def wait_for_vars(name, port, var=None):
|
||||
|
@ -333,7 +368,10 @@ def wait_for_vars(name, port, var=None):
|
|||
break
|
||||
timeout = wait_step('waiting for /debug/vars of %s' % name, timeout)
|
||||
|
||||
def poll_for_vars(name, port, condition_msg, timeout=60.0, condition_fn=None, require_vars=False):
|
||||
|
||||
def poll_for_vars(
|
||||
name, port, condition_msg, timeout=60.0, condition_fn=None,
|
||||
require_vars=False):
|
||||
"""Polls for debug variables to exist, or match specific conditions, within a timeout.
|
||||
|
||||
This function polls in a tight loop, with no sleeps. This is useful for
|
||||
|
@ -341,17 +379,18 @@ def poll_for_vars(name, port, condition_msg, timeout=60.0, condition_fn=None, re
|
|||
immediately before a process exits).
|
||||
|
||||
Args:
|
||||
name - the name of the process that we're trying to poll vars from.
|
||||
port - the port number that we should poll for variables.
|
||||
condition_msg - string describing the conditions that we're polling for,
|
||||
name: the name of the process that we're trying to poll vars from.
|
||||
port: the port number that we should poll for variables.
|
||||
condition_msg: string describing the conditions that we're polling for,
|
||||
used for error messaging.
|
||||
timeout - number of seconds that we should attempt to poll for.
|
||||
condition_fn - a function that takes the debug vars dict as input, and
|
||||
timeout: number of seconds that we should attempt to poll for.
|
||||
condition_fn: a function that takes the debug vars dict as input, and
|
||||
returns a truthy value if it matches the success conditions.
|
||||
require_vars - True iff we expect the vars to always exist. If True, and the
|
||||
vars don't exist, we'll raise a TestError. This can be used to differentiate
|
||||
between a timeout waiting for a particular condition vs if the process that
|
||||
you're polling has already exited.
|
||||
require_vars: True iff we expect the vars to always exist. If
|
||||
True, and the vars don't exist, we'll raise a TestError. This
|
||||
can be used to differentiate between a timeout waiting for a
|
||||
particular condition vs if the process that you're polling has
|
||||
already exited.
|
||||
|
||||
Raises:
|
||||
TestError, if the conditions aren't met within the given timeout
|
||||
|
@ -359,6 +398,7 @@ def poll_for_vars(name, port, condition_msg, timeout=60.0, condition_fn=None, re
|
|||
|
||||
Returns:
|
||||
dict of debug variables
|
||||
|
||||
"""
|
||||
start_time = time.time()
|
||||
while True:
|
||||
|
@ -374,12 +414,14 @@ def poll_for_vars(name, port, condition_msg, timeout=60.0, condition_fn=None, re
|
|||
elif condition_fn(_vars):
|
||||
return _vars
|
||||
|
||||
|
||||
def apply_vschema(vschema):
|
||||
fname = os.path.join(environment.tmproot, 'vschema.json')
|
||||
with open(fname, 'w') as f:
|
||||
f.write(vschema)
|
||||
run_vtctl(['ApplyVSchema', '-vschema_file', fname])
|
||||
|
||||
|
||||
def wait_for_tablet_type(tablet_alias, expected_type, timeout=10):
|
||||
"""Waits for a given tablet's SlaveType to become the expected value.
|
||||
|
||||
|
@ -390,9 +432,9 @@ def wait_for_tablet_type(tablet_alias, expected_type, timeout=10):
|
|||
if run_vtctl_json(['GetTablet', tablet_alias])['type'] == expected_type:
|
||||
break
|
||||
timeout = wait_step(
|
||||
"%s's SlaveType to be %s" % (tablet_alias, expected_type),
|
||||
timeout
|
||||
)
|
||||
"%s's SlaveType to be %s" % (tablet_alias, expected_type),
|
||||
timeout)
|
||||
|
||||
|
||||
def wait_for_replication_pos(tablet_a, tablet_b, timeout=60.0):
|
||||
"""Waits for tablet B to catch up to the replication position of tablet A.
|
||||
|
@ -406,15 +448,17 @@ def wait_for_replication_pos(tablet_a, tablet_b, timeout=60.0):
|
|||
if mysql_flavor().position_at_least(replication_pos_b, replication_pos_a):
|
||||
break
|
||||
timeout = wait_step(
|
||||
"%s's replication position to catch up %s's; currently at: %s, waiting to catch up to: %s" % (
|
||||
tablet_b.tablet_alias, tablet_a.tablet_alias, replication_pos_b, replication_pos_a),
|
||||
timeout, sleep_time=0.1
|
||||
)
|
||||
"%s's replication position to catch up %s's; "
|
||||
'currently at: %s, waiting to catch up to: %s' % (
|
||||
tablet_b.tablet_alias, tablet_a.tablet_alias, replication_pos_b,
|
||||
replication_pos_a),
|
||||
timeout, sleep_time=0.1)
|
||||
|
||||
# Save the first running instance of vtgate. It is saved when 'start'
|
||||
# is called, and cleared when kill is called.
|
||||
vtgate = None
|
||||
|
||||
|
||||
class VtGate(object):
|
||||
"""VtGate object represents a vtgate process.
|
||||
"""
|
||||
|
@ -432,21 +476,23 @@ class VtGate(object):
|
|||
topo_impl=None, cache_ttl='1s',
|
||||
auth=False, timeout_total='4s', timeout_per_conn='2s',
|
||||
extra_args=None):
|
||||
"""Starts the process for thie vtgate instance. If no other instance has
|
||||
been started, saves it into the global vtgate variable.
|
||||
"""Starts the process for thie vtgate instance.
|
||||
|
||||
If no other instance has been started, saves it into the global
|
||||
vtgate variable.
|
||||
"""
|
||||
args = environment.binary_args('vtgate') + [
|
||||
'-port', str(self.port),
|
||||
'-cell', cell,
|
||||
'-retry-delay', '%ss' % (str(retry_delay)),
|
||||
'-retry-count', str(retry_count),
|
||||
'-log_dir', environment.vtlogroot,
|
||||
'-srv_topo_cache_ttl', cache_ttl,
|
||||
'-conn-timeout-total', timeout_total,
|
||||
'-conn-timeout-per-conn', timeout_per_conn,
|
||||
'-bsonrpc_timeout', '5s',
|
||||
'-tablet_protocol', protocols_flavor().tabletconn_protocol(),
|
||||
]
|
||||
'-port', str(self.port),
|
||||
'-cell', cell,
|
||||
'-retry-delay', '%ss' % (str(retry_delay)),
|
||||
'-retry-count', str(retry_count),
|
||||
'-log_dir', environment.vtlogroot,
|
||||
'-srv_topo_cache_ttl', cache_ttl,
|
||||
'-conn-timeout-total', timeout_total,
|
||||
'-conn-timeout-per-conn', timeout_per_conn,
|
||||
'-bsonrpc_timeout', '5s',
|
||||
'-tablet_protocol', protocols_flavor().tabletconn_protocol(),
|
||||
]
|
||||
if protocols_flavor().vtgate_protocol() == 'grpc':
|
||||
args.extend(['-grpc_port', str(self.grpc_port)])
|
||||
if protocols_flavor().service_map():
|
||||
|
@ -523,9 +569,9 @@ class VtGate(object):
|
|||
"""vtclient uses the vtclient binary to send a query to vtgate.
|
||||
"""
|
||||
args = environment.binary_args('vtclient') + [
|
||||
'-server', self.rpc_endpoint(),
|
||||
'-tablet_type', tablet_type,
|
||||
'-vtgate_protocol', protocols_flavor().vtgate_protocol()]
|
||||
'-server', self.rpc_endpoint(),
|
||||
'-tablet_type', tablet_type,
|
||||
'-vtgate_protocol', protocols_flavor().vtgate_protocol()]
|
||||
if bindvars:
|
||||
args.extend(['-bind_variables', json.dumps(bindvars)])
|
||||
if streaming:
|
||||
|
@ -581,10 +627,12 @@ class VtGate(object):
|
|||
# The modes are not all equivalent, and we don't really thrive for it.
|
||||
# If a client needs to rely on vtctl's command line behavior, make
|
||||
# sure to use mode=utils.VTCTL_VTCTL
|
||||
VTCTL_AUTO = 0
|
||||
VTCTL_VTCTL = 1
|
||||
VTCTL_AUTO = 0
|
||||
VTCTL_VTCTL = 1
|
||||
VTCTL_VTCTLCLIENT = 2
|
||||
VTCTL_RPC = 3
|
||||
VTCTL_RPC = 3
|
||||
|
||||
|
||||
def run_vtctl(clargs, auto_log=False, expect_fail=False,
|
||||
mode=VTCTL_AUTO, **kwargs):
|
||||
if mode == VTCTL_AUTO:
|
||||
|
@ -609,6 +657,7 @@ def run_vtctl(clargs, auto_log=False, expect_fail=False,
|
|||
|
||||
raise Exception('Unknown mode: %s', mode)
|
||||
|
||||
|
||||
def run_vtctl_vtctl(clargs, auto_log=False, expect_fail=False,
|
||||
**kwargs):
|
||||
args = environment.binary_args('vtctl') + ['-log_dir', environment.vtlogroot]
|
||||
|
@ -630,12 +679,14 @@ def run_vtctl_vtctl(clargs, auto_log=False, expect_fail=False,
|
|||
return run_fail(cmd, **kwargs)
|
||||
return run(cmd, **kwargs)
|
||||
|
||||
|
||||
# run_vtctl_json runs the provided vtctl command and returns the result
|
||||
# parsed as json
|
||||
def run_vtctl_json(clargs, auto_log=True):
|
||||
stdout, stderr = run_vtctl(clargs, trap_output=True, auto_log=auto_log)
|
||||
return json.loads(stdout)
|
||||
|
||||
|
||||
def get_log_level():
|
||||
if options.verbose == 2:
|
||||
return 'INFO'
|
||||
|
@ -644,14 +695,16 @@ def get_log_level():
|
|||
else:
|
||||
return 'ERROR'
|
||||
|
||||
|
||||
# vtworker helpers
|
||||
def run_vtworker(clargs, auto_log=False, expect_fail=False, **kwargs):
|
||||
"""Runs a vtworker process, returning the stdout and stderr"""
|
||||
"""Runs a vtworker process, returning the stdout and stderr."""
|
||||
cmd, _, _ = _get_vtworker_cmd(clargs, auto_log)
|
||||
if expect_fail:
|
||||
return run_fail(cmd, **kwargs)
|
||||
return run(cmd, **kwargs)
|
||||
|
||||
|
||||
def run_vtworker_bg(clargs, auto_log=False, **kwargs):
|
||||
"""Starts a background vtworker process.
|
||||
|
||||
|
@ -663,6 +716,7 @@ def run_vtworker_bg(clargs, auto_log=False, **kwargs):
|
|||
cmd, port, rpc_port = _get_vtworker_cmd(clargs, auto_log)
|
||||
return run_bg(cmd, **kwargs), port, rpc_port
|
||||
|
||||
|
||||
def _get_vtworker_cmd(clargs, auto_log=False):
|
||||
"""Assembles the command that is needed to run a vtworker.
|
||||
|
||||
|
@ -674,15 +728,15 @@ def _get_vtworker_cmd(clargs, auto_log=False):
|
|||
port = environment.reserve_ports(1)
|
||||
rpc_port = port
|
||||
args = environment.binary_args('vtworker') + [
|
||||
'-log_dir', environment.vtlogroot,
|
||||
'-min_healthy_rdonly_endpoints', '1',
|
||||
'-port', str(port),
|
||||
'-resolve_ttl', '2s',
|
||||
'-executefetch_retry_time', '1s',
|
||||
'-tablet_manager_protocol',
|
||||
protocols_flavor().tablet_manager_protocol(),
|
||||
'-tablet_protocol', protocols_flavor().tabletconn_protocol(),
|
||||
]
|
||||
'-log_dir', environment.vtlogroot,
|
||||
'-min_healthy_rdonly_endpoints', '1',
|
||||
'-port', str(port),
|
||||
'-resolve_ttl', '2s',
|
||||
'-executefetch_retry_time', '1s',
|
||||
'-tablet_manager_protocol',
|
||||
protocols_flavor().tablet_manager_protocol(),
|
||||
'-tablet_protocol', protocols_flavor().tabletconn_protocol(),
|
||||
]
|
||||
args.extend(environment.topo_server().flags())
|
||||
if protocols_flavor().service_map():
|
||||
args.extend(['-service_map',
|
||||
|
@ -697,6 +751,7 @@ def _get_vtworker_cmd(clargs, auto_log=False):
|
|||
cmd = args + clargs
|
||||
return cmd, port, rpc_port
|
||||
|
||||
|
||||
# vtworker client helpers
|
||||
def run_vtworker_client(args, rpc_port):
|
||||
"""Runs vtworkerclient to execute a command on a remote vtworker.
|
||||
|
@ -713,6 +768,7 @@ def run_vtworker_client(args, rpc_port):
|
|||
trap_output=True)
|
||||
return out, err
|
||||
|
||||
|
||||
def run_automation_server(auto_log=False):
|
||||
"""Starts a background automation_server process.
|
||||
|
||||
|
@ -721,21 +777,24 @@ def run_automation_server(auto_log=False):
|
|||
"""
|
||||
rpc_port = environment.reserve_ports(1)
|
||||
args = environment.binary_args('automation_server') + [
|
||||
'-log_dir', environment.vtlogroot,
|
||||
'-port', str(rpc_port),
|
||||
'-vtctl_client_protocol', protocols_flavor().vtctl_client_protocol(),
|
||||
'-vtworker_client_protocol', protocols_flavor().vtworker_client_protocol(),
|
||||
]
|
||||
'-log_dir', environment.vtlogroot,
|
||||
'-port', str(rpc_port),
|
||||
'-vtctl_client_protocol', protocols_flavor().vtctl_client_protocol(),
|
||||
'-vtworker_client_protocol',
|
||||
protocols_flavor().vtworker_client_protocol(),
|
||||
]
|
||||
if auto_log:
|
||||
args.append('--stderrthreshold=%s' % get_log_level())
|
||||
|
||||
return run_bg(args), rpc_port
|
||||
|
||||
|
||||
# mysql helpers
|
||||
def mysql_query(uid, dbname, query):
|
||||
conn = MySQLdb.Connect(user='vt_dba',
|
||||
unix_socket='%s/vt_%010d/mysql.sock' % (environment.vtdataroot, uid),
|
||||
db=dbname)
|
||||
conn = MySQLdb.Connect(
|
||||
user='vt_dba',
|
||||
unix_socket='%s/vt_%010d/mysql.sock' % (environment.vtdataroot, uid),
|
||||
db=dbname)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(query)
|
||||
try:
|
||||
|
@ -743,10 +802,12 @@ def mysql_query(uid, dbname, query):
|
|||
finally:
|
||||
conn.close()
|
||||
|
||||
|
||||
def mysql_write_query(uid, dbname, query):
|
||||
conn = MySQLdb.Connect(user='vt_dba',
|
||||
unix_socket='%s/vt_%010d/mysql.sock' % (environment.vtdataroot, uid),
|
||||
db=dbname)
|
||||
conn = MySQLdb.Connect(
|
||||
user='vt_dba',
|
||||
unix_socket='%s/vt_%010d/mysql.sock' % (environment.vtdataroot, uid),
|
||||
db=dbname)
|
||||
cursor = conn.cursor()
|
||||
conn.begin()
|
||||
cursor.execute(query)
|
||||
|
@ -756,9 +817,11 @@ def mysql_write_query(uid, dbname, query):
|
|||
finally:
|
||||
conn.close()
|
||||
|
||||
|
||||
def check_db_var(uid, name, value):
|
||||
conn = MySQLdb.Connect(user='vt_dba',
|
||||
unix_socket='%s/vt_%010d/mysql.sock' % (environment.vtdataroot, uid))
|
||||
conn = MySQLdb.Connect(
|
||||
user='vt_dba',
|
||||
unix_socket='%s/vt_%010d/mysql.sock' % (environment.vtdataroot, uid))
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("show variables like '%s'" % name)
|
||||
row = cursor.fetchone()
|
||||
|
@ -766,12 +829,15 @@ def check_db_var(uid, name, value):
|
|||
raise TestError('variable not set correctly', name, row)
|
||||
conn.close()
|
||||
|
||||
|
||||
def check_db_read_only(uid):
|
||||
return check_db_var(uid, 'read_only', 'ON')
|
||||
|
||||
|
||||
def check_db_read_write(uid):
|
||||
return check_db_var(uid, 'read_only', 'OFF')
|
||||
|
||||
|
||||
def wait_db_read_only(uid):
|
||||
for x in xrange(3):
|
||||
try:
|
||||
|
@ -782,6 +848,7 @@ def wait_db_read_only(uid):
|
|||
time.sleep(1.0)
|
||||
raise e
|
||||
|
||||
|
||||
def check_srv_keyspace(cell, keyspace, expected, keyspace_id_type='uint64'):
|
||||
ks = run_vtctl_json(['GetSrvKeyspace', cell, keyspace])
|
||||
result = ''
|
||||
|
@ -789,25 +856,31 @@ def check_srv_keyspace(cell, keyspace, expected, keyspace_id_type='uint64'):
|
|||
result += 'Partitions(%s):' % tablet_type
|
||||
partition = ks['Partitions'][tablet_type]
|
||||
for shard in partition['ShardReferences']:
|
||||
result = result + ' %s-%s' % (shard['KeyRange']['Start'],
|
||||
shard['KeyRange']['End'])
|
||||
result += ' %s-%s' % (shard['KeyRange']['Start'],
|
||||
shard['KeyRange']['End'])
|
||||
result += '\n'
|
||||
logging.debug('Cell %s keyspace %s has data:\n%s', cell, keyspace, result)
|
||||
if expected != result:
|
||||
raise Exception('Mismatch in srv keyspace for cell %s keyspace %s, expected:\n%s\ngot:\n%s' % (
|
||||
cell, keyspace, expected, result))
|
||||
raise Exception(
|
||||
'Mismatch in srv keyspace for cell %s keyspace %s, expected:\n%'
|
||||
's\ngot:\n%s' % (
|
||||
cell, keyspace, expected, result))
|
||||
if 'keyspace_id' != ks.get('ShardingColumnName'):
|
||||
raise Exception('Got wrong ShardingColumnName in SrvKeyspace: %s' %
|
||||
str(ks))
|
||||
str(ks))
|
||||
if keyspace_id_type != ks.get('ShardingColumnType'):
|
||||
raise Exception('Got wrong ShardingColumnType in SrvKeyspace: %s' %
|
||||
str(ks))
|
||||
str(ks))
|
||||
|
||||
def check_shard_query_service(testcase, shard_name, tablet_type, expected_state):
|
||||
|
||||
def check_shard_query_service(
|
||||
testcase, shard_name, tablet_type, expected_state):
|
||||
"""Makes assertions about the state of DisableQueryService in the shard record's TabletControlMap."""
|
||||
# We assume that query service should be enabled unless DisableQueryService is explicitly True
|
||||
# We assume that query service should be enabled unless
|
||||
# DisableQueryService is explicitly True
|
||||
query_service_enabled = True
|
||||
tablet_controls = run_vtctl_json(['GetShard', shard_name]).get('tablet_controls')
|
||||
tablet_controls = run_vtctl_json(
|
||||
['GetShard', shard_name]).get('tablet_controls')
|
||||
if tablet_controls:
|
||||
for tc in tablet_controls:
|
||||
if tc['tablet_type'] == tablet_type:
|
||||
|
@ -815,19 +888,27 @@ def check_shard_query_service(testcase, shard_name, tablet_type, expected_state)
|
|||
query_service_enabled = False
|
||||
|
||||
testcase.assertEqual(
|
||||
query_service_enabled,
|
||||
expected_state,
|
||||
'shard %s does not have the correct query service state: got %s but expected %s' % (shard_name, query_service_enabled, expected_state)
|
||||
query_service_enabled,
|
||||
expected_state,
|
||||
'shard %s does not have the correct query service state: '
|
||||
'got %s but expected %s' %
|
||||
(shard_name, query_service_enabled, expected_state)
|
||||
)
|
||||
|
||||
def check_shard_query_services(testcase, shard_names, tablet_type, expected_state):
|
||||
for shard_name in shard_names:
|
||||
check_shard_query_service(testcase, shard_name, tablet_type, expected_state)
|
||||
|
||||
def check_tablet_query_service(testcase, tablet, serving, tablet_control_disabled):
|
||||
"""check_tablet_query_service will check that the query service is enabled
|
||||
or disabled on the tablet. It will also check if the tablet control
|
||||
status is the reason for being enabled / disabled.
|
||||
def check_shard_query_services(
|
||||
testcase, shard_names, tablet_type, expected_state):
|
||||
for shard_name in shard_names:
|
||||
check_shard_query_service(
|
||||
testcase, shard_name, tablet_type, expected_state)
|
||||
|
||||
|
||||
def check_tablet_query_service(
|
||||
testcase, tablet, serving, tablet_control_disabled):
|
||||
"""Check that the query service is enabled or disabled on the tablet.
|
||||
|
||||
It will also check if the tablet control status is the reason for
|
||||
being enabled / disabled.
|
||||
|
||||
It will also run a remote RunHealthCheck to be sure it doesn't change
|
||||
the serving state.
|
||||
|
@ -837,7 +918,10 @@ def check_tablet_query_service(testcase, tablet, serving, tablet_control_disable
|
|||
expected_state = 'SERVING'
|
||||
else:
|
||||
expected_state = 'NOT_SERVING'
|
||||
testcase.assertEqual(tablet_vars['TabletStateName'], expected_state, 'tablet %s is not in the right serving state: got %s expected %s' % (tablet.tablet_alias, tablet_vars['TabletStateName'], expected_state))
|
||||
testcase.assertEqual(
|
||||
tablet_vars['TabletStateName'], expected_state,
|
||||
'tablet %s is not in the right serving state: got %s expected %s' %
|
||||
(tablet.tablet_alias, tablet_vars['TabletStateName'], expected_state))
|
||||
|
||||
status = tablet.get_status()
|
||||
if tablet_control_disabled:
|
||||
|
@ -847,19 +931,30 @@ def check_tablet_query_service(testcase, tablet, serving, tablet_control_disable
|
|||
|
||||
if tablet.tablet_type == 'rdonly':
|
||||
run_vtctl(['RunHealthCheck', tablet.tablet_alias, 'rdonly'],
|
||||
auto_log=True)
|
||||
auto_log=True)
|
||||
|
||||
tablet_vars = get_vars(tablet.port)
|
||||
testcase.assertEqual(tablet_vars['TabletStateName'], expected_state, 'tablet %s is not in the right serving state after health check: got %s expected %s' % (tablet.tablet_alias, tablet_vars['TabletStateName'], expected_state))
|
||||
testcase.assertEqual(
|
||||
tablet_vars['TabletStateName'], expected_state,
|
||||
'tablet %s is not in the right serving state after health check: '
|
||||
'got %s expected %s' %
|
||||
(tablet.tablet_alias, tablet_vars['TabletStateName'], expected_state))
|
||||
|
||||
def check_tablet_query_services(testcase, tablets, serving, tablet_control_disabled):
|
||||
|
||||
def check_tablet_query_services(
|
||||
testcase, tablets, serving, tablet_control_disabled):
|
||||
for tablet in tablets:
|
||||
check_tablet_query_service(testcase, tablet, serving, tablet_control_disabled)
|
||||
check_tablet_query_service(
|
||||
testcase, tablet, serving, tablet_control_disabled)
|
||||
|
||||
|
||||
def get_status(port):
|
||||
return urllib2.urlopen('http://localhost:%d%s' % (port, environment.status_url)).read()
|
||||
return urllib2.urlopen(
|
||||
'http://localhost:%d%s' % (port, environment.status_url)).read()
|
||||
|
||||
def curl(url, request=None, data=None, background=False, retry_timeout=0, **kwargs):
|
||||
|
||||
def curl(url, request=None, data=None, background=False, retry_timeout=0,
|
||||
**kwargs):
|
||||
args = [environment.curl_bin, '--silent', '--no-buffer', '--location']
|
||||
if not background:
|
||||
args.append('--show-error')
|
||||
|
@ -877,10 +972,12 @@ def curl(url, request=None, data=None, background=False, retry_timeout=0, **kwar
|
|||
try:
|
||||
return run(args, trap_output=True, **kwargs)
|
||||
except TestError as e:
|
||||
retry_timeout = wait_step('cmd: %s, error: %s' % (str(args), str(e)), retry_timeout)
|
||||
retry_timeout = wait_step(
|
||||
'cmd: %s, error: %s' % (str(args), str(e)), retry_timeout)
|
||||
|
||||
return run(args, trap_output=True, **kwargs)
|
||||
|
||||
|
||||
class VtctldError(Exception): pass
|
||||
|
||||
# save the first running instance, and an RPC connection to it,
|
||||
|
@ -888,43 +985,47 @@ class VtctldError(Exception): pass
|
|||
vtctld = None
|
||||
vtctld_connection = None
|
||||
|
||||
|
||||
class Vtctld(object):
|
||||
|
||||
def __init__(self):
|
||||
self.port = environment.reserve_ports(1)
|
||||
self.schema_change_dir = os.path.join(environment.tmproot, 'schema_change_test')
|
||||
self.schema_change_dir = os.path.join(
|
||||
environment.tmproot, 'schema_change_test')
|
||||
if protocols_flavor().vtctl_client_protocol() == 'grpc':
|
||||
self.grpc_port = environment.reserve_ports(1)
|
||||
|
||||
def dbtopo(self):
|
||||
data = json.load(urllib2.urlopen('http://localhost:%d/dbtopo?format=json' %
|
||||
self.port))
|
||||
data = json.load(
|
||||
urllib2.urlopen('http://localhost:%d/dbtopo?format=json' % self.port))
|
||||
if data['Error']:
|
||||
raise VtctldError(data)
|
||||
return data['Topology']
|
||||
|
||||
def serving_graph(self):
|
||||
data = json.load(urllib2.urlopen('http://localhost:%d/serving_graph/test_nj?format=json' % self.port))
|
||||
data = json.load(
|
||||
urllib2.urlopen(
|
||||
'http://localhost:%d/serving_graph/test_nj?format=json' %
|
||||
self.port))
|
||||
if data['Errors']:
|
||||
raise VtctldError(data['Errors'])
|
||||
return data['Keyspaces']
|
||||
|
||||
def start(self):
|
||||
args = environment.binary_args('vtctld') + [
|
||||
'-debug',
|
||||
'-web_dir', environment.vttop + '/web/vtctld',
|
||||
'--templates', environment.vttop + '/go/cmd/vtctld/templates',
|
||||
'--log_dir', environment.vtlogroot,
|
||||
'--port', str(self.port),
|
||||
'--schema_change_dir', self.schema_change_dir,
|
||||
'--schema_change_controller', 'local',
|
||||
'--schema_change_check_interval', '1',
|
||||
'-tablet_manager_protocol',
|
||||
protocols_flavor().tablet_manager_protocol(),
|
||||
'-vtgate_protocol', protocols_flavor().vtgate_protocol(),
|
||||
'-tablet_protocol', protocols_flavor().tabletconn_protocol(),
|
||||
] + \
|
||||
environment.topo_server().flags()
|
||||
'-debug',
|
||||
'-web_dir', environment.vttop + '/web/vtctld',
|
||||
'--templates', environment.vttop + '/go/cmd/vtctld/templates',
|
||||
'--log_dir', environment.vtlogroot,
|
||||
'--port', str(self.port),
|
||||
'--schema_change_dir', self.schema_change_dir,
|
||||
'--schema_change_controller', 'local',
|
||||
'--schema_change_check_interval', '1',
|
||||
'-tablet_manager_protocol',
|
||||
protocols_flavor().tablet_manager_protocol(),
|
||||
'-vtgate_protocol', protocols_flavor().vtgate_protocol(),
|
||||
'-tablet_protocol', protocols_flavor().tabletconn_protocol(),
|
||||
] + environment.topo_server().flags()
|
||||
if protocols_flavor().service_map():
|
||||
args.extend(['-service_map', ','.join(protocols_flavor().service_map())])
|
||||
if protocols_flavor().vtctl_client_protocol() == 'grpc':
|
||||
|
@ -978,11 +1079,11 @@ class Vtctld(object):
|
|||
|
||||
def vtctl_client(self, args):
|
||||
if options.verbose == 2:
|
||||
log_level='INFO'
|
||||
log_level = 'INFO'
|
||||
elif options.verbose == 1:
|
||||
log_level='WARNING'
|
||||
log_level = 'WARNING'
|
||||
else:
|
||||
log_level='ERROR'
|
||||
log_level = 'ERROR'
|
||||
|
||||
protocol, endpoint = self.rpc_endpoint()
|
||||
out, err = run(environment.binary_args('vtctlclient') +
|
||||
|
|
|
@ -29,6 +29,7 @@ destination_replica = tablet.Tablet()
|
|||
destination_rdonly1 = tablet.Tablet()
|
||||
destination_rdonly2 = tablet.Tablet()
|
||||
|
||||
|
||||
def setUpModule():
|
||||
try:
|
||||
environment.topo_server().setup()
|
||||
|
@ -58,14 +59,14 @@ def tearDownModule():
|
|||
if utils.vtgate:
|
||||
utils.vtgate.kill()
|
||||
teardown_procs = [
|
||||
source_master.teardown_mysql(),
|
||||
source_replica.teardown_mysql(),
|
||||
source_rdonly1.teardown_mysql(),
|
||||
source_rdonly2.teardown_mysql(),
|
||||
destination_master.teardown_mysql(),
|
||||
destination_replica.teardown_mysql(),
|
||||
destination_rdonly1.teardown_mysql(),
|
||||
destination_rdonly2.teardown_mysql(),
|
||||
source_master.teardown_mysql(),
|
||||
source_replica.teardown_mysql(),
|
||||
source_rdonly1.teardown_mysql(),
|
||||
source_rdonly2.teardown_mysql(),
|
||||
destination_master.teardown_mysql(),
|
||||
destination_replica.teardown_mysql(),
|
||||
destination_rdonly1.teardown_mysql(),
|
||||
destination_rdonly2.teardown_mysql(),
|
||||
]
|
||||
utils.wait_procs(teardown_procs, raise_on_error=False)
|
||||
|
||||
|
@ -82,7 +83,9 @@ def tearDownModule():
|
|||
destination_rdonly1.remove_tree()
|
||||
destination_rdonly2.remove_tree()
|
||||
|
||||
|
||||
class TestVerticalSplit(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.insert_index = 0
|
||||
|
||||
|
@ -93,7 +96,7 @@ msg varchar(64),
|
|||
primary key (id),
|
||||
index by_msg (msg)
|
||||
) Engine=InnoDB'''
|
||||
create_view_template = '''create view %s(id, msg) as select id, msg from %s'''
|
||||
create_view_template = 'create view %s(id, msg) as select id, msg from %s'
|
||||
|
||||
for t in ['moving1', 'moving2', 'staying1', 'staying2']:
|
||||
utils.run_vtctl(['ApplySchema',
|
||||
|
@ -114,7 +117,10 @@ index by_msg (msg)
|
|||
def _insert_values(self, table, count):
|
||||
result = self.insert_index
|
||||
conn = self._vtdb_conn()
|
||||
cursor = conn.cursor('source_keyspace', 'master', keyranges=[keyrange.KeyRange(keyrange_constants.NON_PARTIAL_KEYRANGE)], writable=True)
|
||||
cursor = conn.cursor(
|
||||
'source_keyspace', 'master',
|
||||
keyranges=[keyrange.KeyRange(keyrange_constants.NON_PARTIAL_KEYRANGE)],
|
||||
writable=True)
|
||||
for i in xrange(count):
|
||||
conn.begin()
|
||||
cursor.execute("insert into %s (id, msg) values(%d, 'value %d')" % (
|
||||
|
@ -125,9 +131,12 @@ index by_msg (msg)
|
|||
return result
|
||||
|
||||
def _check_values(self, tablet, dbname, table, first, count):
|
||||
logging.debug('Checking %d values from %s/%s starting at %d', count, dbname,
|
||||
table, first)
|
||||
rows = tablet.mquery(dbname, 'select id, msg from %s where id>=%d order by id limit %d' % (table, first, count))
|
||||
logging.debug(
|
||||
'Checking %d values from %s/%s starting at %d', count, dbname,
|
||||
table, first)
|
||||
rows = tablet.mquery(
|
||||
dbname, 'select id, msg from %s where id>=%d order by id limit %d' %
|
||||
(table, first, count))
|
||||
self.assertEqual(count, len(rows), 'got wrong number of rows: %d != %d' %
|
||||
(len(rows), count))
|
||||
for i in xrange(count):
|
||||
|
@ -161,9 +170,11 @@ index by_msg (msg)
|
|||
result += 'ServedFrom(%s): %s\n' % (served_from,
|
||||
ks['ServedFrom'][served_from])
|
||||
logging.debug('Cell %s keyspace %s has data:\n%s', cell, keyspace, result)
|
||||
self.assertEqual(expected, result,
|
||||
'Mismatch in srv keyspace for cell %s keyspace %s, expected:\n%s\ngot:\n%s' % (
|
||||
cell, keyspace, expected, result))
|
||||
self.assertEqual(
|
||||
expected, result,
|
||||
'Mismatch in srv keyspace for cell %s keyspace %s, expected:\n'
|
||||
'%s\ngot:\n%s' % (
|
||||
cell, keyspace, expected, result))
|
||||
self.assertEqual('', ks.get('ShardingColumnName'),
|
||||
'Got wrong ShardingColumnName in SrvKeyspace: %s' %
|
||||
str(ks))
|
||||
|
@ -189,14 +200,18 @@ index by_msg (msg)
|
|||
tablet.tablet_alias,
|
||||
'select count(1) from %s' % t],
|
||||
expect_fail=True)
|
||||
self.assertIn('retry: Query disallowed due to rule: enforce blacklisted tables', stderr)
|
||||
self.assertIn(
|
||||
'retry: Query disallowed due to rule: enforce blacklisted tables',
|
||||
stderr)
|
||||
else:
|
||||
# table is not blacklisted, should just work
|
||||
qr = tablet.execute('select count(1) from %s' % t)
|
||||
logging.debug('Got %s rows from table %s on tablet %s',
|
||||
qr['Rows'][0][0], t, tablet.tablet_alias)
|
||||
|
||||
def _check_client_conn_redirection(self, source_ks, destination_ks, db_types, servedfrom_db_types, moved_tables=None):
|
||||
def _check_client_conn_redirection(
|
||||
self, source_ks, destination_ks, db_types, servedfrom_db_types,
|
||||
moved_tables=None):
|
||||
# check that the ServedFrom indirection worked correctly.
|
||||
if moved_tables is None:
|
||||
moved_tables = []
|
||||
|
@ -204,29 +219,51 @@ index by_msg (msg)
|
|||
for db_type in servedfrom_db_types:
|
||||
for tbl in moved_tables:
|
||||
try:
|
||||
rows = conn._execute("select * from %s" % tbl, {}, destination_ks, db_type, keyranges=[keyrange.KeyRange(keyrange_constants.NON_PARTIAL_KEYRANGE)])
|
||||
logging.debug("Select on %s.%s returned %d rows" % (db_type, tbl, len(rows)))
|
||||
rows = conn._execute(
|
||||
'select * from %s' % tbl, {}, destination_ks, db_type,
|
||||
keyranges=[keyrange.KeyRange(
|
||||
keyrange_constants.NON_PARTIAL_KEYRANGE)])
|
||||
logging.debug(
|
||||
'Select on %s.%s returned %d rows', db_type, tbl, len(rows))
|
||||
except Exception, e:
|
||||
self.fail("Execute failed w/ exception %s" % str(e))
|
||||
self.fail('Execute failed w/ exception %s' % str(e))
|
||||
|
||||
def _check_stats(self):
|
||||
v = utils.vtgate.get_vars()
|
||||
self.assertEqual(v['VttabletCall']['Histograms']['Execute.source_keyspace.0.replica']['Count'], 2, "unexpected value for VttabletCall(Execute.source_keyspace.0.replica) inside %s" % str(v))
|
||||
self.assertEqual(v['VtgateApi']['Histograms']['ExecuteKeyRanges.destination_keyspace.master']['Count'], 6, "unexpected value for VtgateApi(ExecuteKeyRanges.destination_keyspace.master) inside %s" % str(v))
|
||||
self.assertEqual(len(v['VtgateApiErrorCounts']), 0, "unexpected errors for VtgateApiErrorCounts inside %s" % str(v))
|
||||
self.assertEqual(
|
||||
v['ResilientSrvTopoServerEndPointsReturnedCount']['test_nj.source_keyspace.0.master'] /
|
||||
v['ResilientSrvTopoServerEndPointQueryCount']['test_nj.source_keyspace.0.master'],
|
||||
1, "unexpected EndPointsReturnedCount inside %s" % str(v))
|
||||
v['VttabletCall']['Histograms']['Execute.source_keyspace.0.replica'][
|
||||
'Count'],
|
||||
2
|
||||
, 'unexpected value for VttabletCall('
|
||||
'Execute.source_keyspace.0.replica) inside %s' % str(v))
|
||||
self.assertEqual(
|
||||
v['VtgateApi']['Histograms'][
|
||||
'ExecuteKeyRanges.destination_keyspace.master']['Count'],
|
||||
6,
|
||||
'unexpected value for VtgateApi('
|
||||
'ExecuteKeyRanges.destination_keyspace.master) inside %s' % str(v))
|
||||
self.assertEqual(
|
||||
len(v['VtgateApiErrorCounts']), 0,
|
||||
'unexpected errors for VtgateApiErrorCounts inside %s' % str(v))
|
||||
self.assertEqual(
|
||||
v['ResilientSrvTopoServerEndPointsReturnedCount'][
|
||||
'test_nj.source_keyspace.0.master'] /
|
||||
v['ResilientSrvTopoServerEndPointQueryCount'][
|
||||
'test_nj.source_keyspace.0.master'],
|
||||
1,
|
||||
'unexpected EndPointsReturnedCount inside %s' % str(v))
|
||||
self.assertNotIn(
|
||||
'test_nj.source_keyspace.0.master', v['ResilientSrvTopoServerEndPointDegradedResultCount'],
|
||||
"unexpected EndPointDegradedResultCount inside %s" % str(v))
|
||||
'test_nj.source_keyspace.0.master',
|
||||
v['ResilientSrvTopoServerEndPointDegradedResultCount'],
|
||||
'unexpected EndPointDegradedResultCount inside %s' % str(v))
|
||||
|
||||
def test_vertical_split(self):
|
||||
utils.run_vtctl(['CreateKeyspace', 'source_keyspace'])
|
||||
utils.run_vtctl(['CreateKeyspace',
|
||||
'--served_from', 'master:source_keyspace,replica:source_keyspace,rdonly:source_keyspace',
|
||||
'destination_keyspace'])
|
||||
utils.run_vtctl(
|
||||
['CreateKeyspace', '--served_from',
|
||||
'master:source_keyspace,replica:source_keyspace,rdonly:'
|
||||
'source_keyspace',
|
||||
'destination_keyspace'])
|
||||
source_master.init_tablet('master', 'source_keyspace', '0')
|
||||
source_replica.init_tablet('replica', 'source_keyspace', '0')
|
||||
source_rdonly1.init_tablet('rdonly', 'source_keyspace', '0')
|
||||
|
@ -353,7 +390,9 @@ index by_msg (msg)
|
|||
destination_master_status = destination_master.get_status()
|
||||
self.assertIn('Binlog player state: Running', destination_master_status)
|
||||
self.assertIn('moving.*', destination_master_status)
|
||||
self.assertIn('<td><b>All</b>: 1000<br><b>Query</b>: 700<br><b>Transaction</b>: 300<br></td>', destination_master_status)
|
||||
self.assertIn(
|
||||
'<td><b>All</b>: 1000<br><b>Query</b>: 700<br>'
|
||||
'<b>Transaction</b>: 300<br></td>', destination_master_status)
|
||||
self.assertIn('</html>', destination_master_status)
|
||||
|
||||
# check query service is off on destination master, as filtered
|
||||
|
@ -380,7 +419,8 @@ index by_msg (msg)
|
|||
|
||||
# migrate test_nj only, using command line manual fix command,
|
||||
# and restore it back.
|
||||
keyspace_json = utils.run_vtctl_json(['GetKeyspace', 'destination_keyspace'])
|
||||
keyspace_json = utils.run_vtctl_json(
|
||||
['GetKeyspace', 'destination_keyspace'])
|
||||
found = False
|
||||
for ksf in keyspace_json['served_froms']:
|
||||
if ksf['tablet_type'] == 4:
|
||||
|
@ -390,7 +430,8 @@ index by_msg (msg)
|
|||
utils.run_vtctl(['SetKeyspaceServedFrom', '-source=source_keyspace',
|
||||
'-remove', '-cells=test_nj', 'destination_keyspace',
|
||||
'rdonly'], auto_log=True)
|
||||
keyspace_json = utils.run_vtctl_json(['GetKeyspace', 'destination_keyspace'])
|
||||
keyspace_json = utils.run_vtctl_json(
|
||||
['GetKeyspace', 'destination_keyspace'])
|
||||
found = False
|
||||
for ksf in keyspace_json['served_froms']:
|
||||
if ksf['tablet_type'] == 4:
|
||||
|
@ -399,7 +440,8 @@ index by_msg (msg)
|
|||
utils.run_vtctl(['SetKeyspaceServedFrom', '-source=source_keyspace',
|
||||
'destination_keyspace', 'rdonly'],
|
||||
auto_log=True)
|
||||
keyspace_json = utils.run_vtctl_json(['GetKeyspace', 'destination_keyspace'])
|
||||
keyspace_json = utils.run_vtctl_json(
|
||||
['GetKeyspace', 'destination_keyspace'])
|
||||
found = False
|
||||
for ksf in keyspace_json['served_froms']:
|
||||
if ksf['tablet_type'] == 4:
|
||||
|
@ -428,7 +470,9 @@ index by_msg (msg)
|
|||
self._check_blacklisted_tables(source_replica, ['moving.*', 'view1'])
|
||||
self._check_blacklisted_tables(source_rdonly1, ['moving.*', 'view1'])
|
||||
self._check_blacklisted_tables(source_rdonly2, ['moving.*', 'view1'])
|
||||
self._check_client_conn_redirection('source_keyspace', 'destination_keyspace', ['replica', 'rdonly'], ['master'], ['moving1', 'moving2'])
|
||||
self._check_client_conn_redirection(
|
||||
'source_keyspace', 'destination_keyspace', ['replica', 'rdonly'],
|
||||
['master'], ['moving1', 'moving2'])
|
||||
|
||||
# move replica back and forth
|
||||
utils.run_vtctl(['MigrateServedFrom', '-reverse',
|
||||
|
|
|
@ -14,11 +14,11 @@ from zk import zkocc
|
|||
from vtctl import vtctl_client
|
||||
|
||||
|
||||
# range "" - 80
|
||||
# range '' - 80
|
||||
shard_0_master = tablet.Tablet()
|
||||
shard_0_replica = tablet.Tablet()
|
||||
shard_0_spare = tablet.Tablet()
|
||||
# range 80 - ""
|
||||
# range 80 - ''
|
||||
shard_1_master = tablet.Tablet()
|
||||
shard_1_replica = tablet.Tablet()
|
||||
# not assigned
|
||||
|
@ -62,14 +62,15 @@ class TestVtctld(unittest.TestCase):
|
|||
@classmethod
|
||||
def setUpClass(klass):
|
||||
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
|
||||
utils.run_vtctl(['CreateKeyspace',
|
||||
'--served_from', 'master:test_keyspace,replica:test_keyspace,rdonly:test_keyspace',
|
||||
'redirected_keyspace'])
|
||||
utils.run_vtctl(
|
||||
['CreateKeyspace', '--served_from',
|
||||
'master:test_keyspace,replica:test_keyspace,rdonly:test_keyspace',
|
||||
'redirected_keyspace'])
|
||||
|
||||
shard_0_master.init_tablet( 'master', 'test_keyspace', '-80')
|
||||
shard_0_replica.init_tablet('spare', 'test_keyspace', '-80')
|
||||
shard_0_spare.init_tablet( 'spare', 'test_keyspace', '-80')
|
||||
shard_1_master.init_tablet( 'master', 'test_keyspace', '80-')
|
||||
shard_0_master.init_tablet('master', 'test_keyspace', '-80')
|
||||
shard_0_replica.init_tablet('spare', 'test_keyspace', '-80')
|
||||
shard_0_spare.init_tablet('spare', 'test_keyspace', '-80')
|
||||
shard_1_master.init_tablet('master', 'test_keyspace', '80-')
|
||||
shard_1_replica.init_tablet('replica', 'test_keyspace', '80-')
|
||||
idle.init_tablet('idle')
|
||||
scrap.init_tablet('idle')
|
||||
|
@ -113,7 +114,8 @@ class TestVtctld(unittest.TestCase):
|
|||
utils.validate_topology()
|
||||
|
||||
# start a vtgate server too
|
||||
utils.VtGate().start(cache_ttl='0s', extra_args=utils.vtctld.process_args())
|
||||
utils.VtGate(
|
||||
).start(cache_ttl='0s', extra_args=utils.vtctld.process_args())
|
||||
|
||||
def setUp(self):
|
||||
self.data = utils.vtctld.dbtopo()
|
||||
|
@ -150,20 +152,22 @@ class TestVtctld(unittest.TestCase):
|
|||
self._check_all_tablets(out)
|
||||
|
||||
def test_assigned(self):
|
||||
logging.debug("test_assigned: %s", str(self.data))
|
||||
self.assertItemsEqual(self.data["Assigned"].keys(), ["test_keyspace"])
|
||||
s0 = self.data["Assigned"]["test_keyspace"]['ShardNodes'][0]
|
||||
self.assertItemsEqual(s0['Name'], "-80")
|
||||
s1 = self.data["Assigned"]["test_keyspace"]['ShardNodes'][1]
|
||||
self.assertItemsEqual(s1['Name'], "80-")
|
||||
logging.debug('test_assigned: %s', str(self.data))
|
||||
self.assertItemsEqual(self.data['Assigned'].keys(), ['test_keyspace'])
|
||||
s0 = self.data['Assigned']['test_keyspace']['ShardNodes'][0]
|
||||
self.assertItemsEqual(s0['Name'], '-80')
|
||||
s1 = self.data['Assigned']['test_keyspace']['ShardNodes'][1]
|
||||
self.assertItemsEqual(s1['Name'], '80-')
|
||||
|
||||
def test_not_assigned(self):
|
||||
self.assertEqual(len(self.data["Idle"]), 1)
|
||||
self.assertEqual(len(self.data["Scrap"]), 1)
|
||||
self.assertEqual(len(self.data['Idle']), 1)
|
||||
self.assertEqual(len(self.data['Scrap']), 1)
|
||||
|
||||
def test_partial(self):
|
||||
utils.pause("You can now run a browser and connect to http://%s:%d to manually check topology" % (socket.getfqdn(), utils.vtctld.port))
|
||||
self.assertEqual(self.data["Partial"], True)
|
||||
utils.pause(
|
||||
'You can now run a browser and connect to http://%s:%d to '
|
||||
'manually check topology' % (socket.getfqdn(), utils.vtctld.port))
|
||||
self.assertEqual(self.data['Partial'], True)
|
||||
|
||||
def test_explorer_redirects(self):
|
||||
if environment.topo_server().flavor() != 'zookeeper':
|
||||
|
@ -172,30 +176,52 @@ class TestVtctld(unittest.TestCase):
|
|||
return
|
||||
|
||||
base = 'http://localhost:%d' % utils.vtctld.port
|
||||
self.assertEqual(urllib2.urlopen(base + '/explorers/redirect?type=keyspace&explorer=zk&keyspace=test_keyspace').geturl(),
|
||||
base + '/zk/global/vt/keyspaces/test_keyspace')
|
||||
self.assertEqual(urllib2.urlopen(base + '/explorers/redirect?type=shard&explorer=zk&keyspace=test_keyspace&shard=-80').geturl(),
|
||||
base + '/zk/global/vt/keyspaces/test_keyspace/shards/-80')
|
||||
self.assertEqual(urllib2.urlopen(base + '/explorers/redirect?type=tablet&explorer=zk&alias=%s' % shard_0_replica.tablet_alias).geturl(),
|
||||
base + shard_0_replica.zk_tablet_path)
|
||||
self.assertEqual(
|
||||
urllib2.urlopen(
|
||||
base + '/explorers/redirect?'
|
||||
'type=keyspace&explorer=zk&keyspace=test_keyspace').geturl(),
|
||||
base + '/zk/global/vt/keyspaces/test_keyspace')
|
||||
self.assertEqual(
|
||||
urllib2.urlopen(
|
||||
base + '/explorers/redirect?type=shard&explorer=zk&'
|
||||
'keyspace=test_keyspace&shard=-80').geturl(),
|
||||
base + '/zk/global/vt/keyspaces/test_keyspace/shards/-80')
|
||||
self.assertEqual(
|
||||
urllib2.urlopen(
|
||||
base + '/explorers/redirect?type=tablet&explorer=zk&alias=%s' %
|
||||
shard_0_replica.tablet_alias).geturl(),
|
||||
base + shard_0_replica.zk_tablet_path)
|
||||
|
||||
self.assertEqual(urllib2.urlopen(base + '/explorers/redirect?type=srv_keyspace&explorer=zk&keyspace=test_keyspace&cell=test_nj').geturl(),
|
||||
base + '/zk/test_nj/vt/ns/test_keyspace')
|
||||
self.assertEqual(urllib2.urlopen(base + '/explorers/redirect?type=srv_shard&explorer=zk&keyspace=test_keyspace&shard=-80&cell=test_nj').geturl(),
|
||||
base + '/zk/test_nj/vt/ns/test_keyspace/-80')
|
||||
self.assertEqual(urllib2.urlopen(base + '/explorers/redirect?type=srv_type&explorer=zk&keyspace=test_keyspace&shard=-80&tablet_type=replica&cell=test_nj').geturl(),
|
||||
base + '/zk/test_nj/vt/ns/test_keyspace/-80/replica')
|
||||
self.assertEqual(
|
||||
urllib2.urlopen(
|
||||
base + '/explorers/redirect?type=srv_keyspace&explorer=zk&'
|
||||
'keyspace=test_keyspace&cell=test_nj').geturl(),
|
||||
base + '/zk/test_nj/vt/ns/test_keyspace')
|
||||
self.assertEqual(
|
||||
urllib2.urlopen(
|
||||
base + '/explorers/redirect?type=srv_shard&explorer=zk&'
|
||||
'keyspace=test_keyspace&shard=-80&cell=test_nj').geturl(),
|
||||
base + '/zk/test_nj/vt/ns/test_keyspace/-80')
|
||||
self.assertEqual(
|
||||
urllib2.urlopen(
|
||||
base + '/explorers/redirect?type=srv_type&explorer=zk&'
|
||||
'keyspace=test_keyspace&shard=-80&tablet_type=replica&'
|
||||
'cell=test_nj').geturl(),
|
||||
base + '/zk/test_nj/vt/ns/test_keyspace/-80/replica')
|
||||
|
||||
self.assertEqual(urllib2.urlopen(base + '/explorers/redirect?type=replication&explorer=zk&keyspace=test_keyspace&shard=-80&cell=test_nj').geturl(),
|
||||
base + '/zk/test_nj/vt/replication/test_keyspace/-80')
|
||||
self.assertEqual(
|
||||
urllib2.urlopen(
|
||||
base + '/explorers/redirect?type=replication&explorer=zk&'
|
||||
'keyspace=test_keyspace&shard=-80&cell=test_nj').geturl(),
|
||||
base + '/zk/test_nj/vt/replication/test_keyspace/-80')
|
||||
|
||||
def test_serving_graph(self):
|
||||
self.assertItemsEqual(sorted(self.serving_data.keys()),
|
||||
["redirected_keyspace", "test_keyspace"])
|
||||
s0 = self.serving_data["test_keyspace"]['ShardNodes'][0]
|
||||
self.assertItemsEqual(s0['Name'], "-80")
|
||||
s1 = self.serving_data["test_keyspace"]['ShardNodes'][1]
|
||||
self.assertItemsEqual(s1['Name'], "80-")
|
||||
['redirected_keyspace', 'test_keyspace'])
|
||||
s0 = self.serving_data['test_keyspace']['ShardNodes'][0]
|
||||
self.assertItemsEqual(s0['Name'], '-80')
|
||||
s1 = self.serving_data['test_keyspace']['ShardNodes'][1]
|
||||
self.assertItemsEqual(s1['Name'], '80-')
|
||||
types = []
|
||||
for tn in s0['TabletNodes']:
|
||||
tt = tn['TabletType']
|
||||
|
@ -205,30 +231,36 @@ class TestVtctld(unittest.TestCase):
|
|||
self.assertItemsEqual(sorted(types), [
|
||||
tablet.Tablet.tablet_type_value['MASTER'],
|
||||
tablet.Tablet.tablet_type_value['REPLICA']])
|
||||
self.assertEqual(self.serving_data["redirected_keyspace"]['ServedFrom']['master'],
|
||||
'test_keyspace')
|
||||
self.assertEqual(
|
||||
self.serving_data['redirected_keyspace']['ServedFrom']['master'],
|
||||
'test_keyspace')
|
||||
|
||||
def test_tablet_status(self):
|
||||
# the vttablet that has a health check has a bit more, so using it
|
||||
shard_0_replica_status = shard_0_replica.get_status()
|
||||
self.assertTrue(re.search(r'Polling health information from.+MySQLReplicationLag', shard_0_replica_status))
|
||||
self.assertTrue(
|
||||
re.search(r'Polling health information from.+MySQLReplicationLag',
|
||||
shard_0_replica_status))
|
||||
self.assertIn('Alias: <a href="http://localhost:', shard_0_replica_status)
|
||||
self.assertIn('</html>', shard_0_replica_status)
|
||||
|
||||
def test_vtgate(self):
|
||||
# do a few vtgate topology queries to prime the cache
|
||||
vtgate_client = zkocc.ZkOccConnection(utils.vtgate.addr(), "test_nj", 30.0)
|
||||
vtgate_client = zkocc.ZkOccConnection(utils.vtgate.addr(), 'test_nj', 30.0)
|
||||
vtgate_client.dial()
|
||||
vtgate_client.get_srv_keyspace_names("test_nj")
|
||||
vtgate_client.get_srv_keyspace("test_nj", "test_keyspace")
|
||||
vtgate_client.get_end_points("test_nj", "test_keyspace", "-80", "master")
|
||||
vtgate_client.get_srv_keyspace_names('test_nj')
|
||||
vtgate_client.get_srv_keyspace('test_nj', 'test_keyspace')
|
||||
vtgate_client.get_end_points('test_nj', 'test_keyspace', '-80', 'master')
|
||||
vtgate_client.close()
|
||||
|
||||
status = utils.vtgate.get_status()
|
||||
self.assertIn('</html>', status) # end of page
|
||||
self.assertIn('/serving_graph/test_nj">test_nj', status) # vtctld link
|
||||
self.assertIn('</html>', status) # end of page
|
||||
self.assertIn('/serving_graph/test_nj">test_nj', status) # vtctld link
|
||||
|
||||
utils.pause("You can now run a browser and connect to http://%s:%d%s to manually check vtgate status page" % (socket.getfqdn(), utils.vtgate.port, environment.status_url))
|
||||
utils.pause(
|
||||
'You can now run a browser and connect to http://%s:%d%s to '
|
||||
'manually check vtgate status page' %
|
||||
(socket.getfqdn(), utils.vtgate.port, environment.status_url))
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.main()
|
||||
|
|
|
@ -37,17 +37,20 @@ shard_1_master = tablet.Tablet()
|
|||
shard_1_replica = tablet.Tablet()
|
||||
|
||||
shard_names = ['-80', '80-']
|
||||
shard_kid_map = {'-80': [527875958493693904, 626750931627689502,
|
||||
345387386794260318, 332484755310826578,
|
||||
1842642426274125671, 1326307661227634652,
|
||||
1761124146422844620, 1661669973250483744,
|
||||
3361397649937244239, 2444880764308344533],
|
||||
'80-': [9767889778372766922, 9742070682920810358,
|
||||
10296850775085416642, 9537430901666854108,
|
||||
10440455099304929791, 11454183276974683945,
|
||||
11185910247776122031, 10460396697869122981,
|
||||
13379616110062597001, 12826553979133932576],
|
||||
}
|
||||
shard_kid_map = {
|
||||
'-80': [
|
||||
527875958493693904, 626750931627689502,
|
||||
345387386794260318, 332484755310826578,
|
||||
1842642426274125671, 1326307661227634652,
|
||||
1761124146422844620, 1661669973250483744,
|
||||
3361397649937244239, 2444880764308344533],
|
||||
'80-': [
|
||||
9767889778372766922, 9742070682920810358,
|
||||
10296850775085416642, 9537430901666854108,
|
||||
10440455099304929791, 11454183276974683945,
|
||||
11185910247776122031, 10460396697869122981,
|
||||
13379616110062597001, 12826553979133932576],
|
||||
}
|
||||
|
||||
create_vt_insert_test = '''create table vt_insert_test (
|
||||
id bigint auto_increment,
|
||||
|
@ -65,7 +68,7 @@ primary key(eid, id)
|
|||
|
||||
|
||||
def setUpModule():
|
||||
logging.debug("in setUpModule")
|
||||
logging.debug('in setUpModule')
|
||||
try:
|
||||
environment.topo_server().setup()
|
||||
|
||||
|
@ -81,11 +84,12 @@ def setUpModule():
|
|||
tearDownModule()
|
||||
raise
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
logging.debug("in tearDownModule")
|
||||
logging.debug('in tearDownModule')
|
||||
if utils.options.skip_teardown:
|
||||
return
|
||||
logging.debug("Tearing down the servers and setup")
|
||||
logging.debug('Tearing down the servers and setup')
|
||||
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_1_master,
|
||||
shard_1_replica])
|
||||
teardown_procs = [shard_0_master.teardown_mysql(),
|
||||
|
@ -105,9 +109,10 @@ def tearDownModule():
|
|||
shard_1_master.remove_tree()
|
||||
shard_1_replica.remove_tree()
|
||||
|
||||
|
||||
def setup_tablets():
|
||||
# Start up a master mysql and vttablet
|
||||
logging.debug("Setting up tablets")
|
||||
logging.debug('Setting up tablets')
|
||||
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
|
||||
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force', 'test_keyspace',
|
||||
'keyspace_id', 'uint64'])
|
||||
|
@ -141,7 +146,7 @@ def setup_tablets():
|
|||
'Partitions(replica): -80 80-\n')
|
||||
|
||||
utils.VtGate().start()
|
||||
vtgate_client = zkocc.ZkOccConnection(utils.vtgate.addr(), "test_nj", 30.0)
|
||||
vtgate_client = zkocc.ZkOccConnection(utils.vtgate.addr(), 'test_nj', 30.0)
|
||||
topology.read_topology(vtgate_client)
|
||||
|
||||
|
||||
|
@ -150,8 +155,8 @@ def get_connection(db_type='master', shard_index=0, user=None, password=None):
|
|||
timeout = 10.0
|
||||
conn = None
|
||||
shard = shard_names[shard_index]
|
||||
vtgate_addrs = {"vt": [utils.vtgate.addr(),]}
|
||||
vtgate_client = zkocc.ZkOccConnection(utils.vtgate.addr(), "test_nj", 30.0)
|
||||
vtgate_addrs = {'vt': [utils.vtgate.addr(),]}
|
||||
vtgate_client = zkocc.ZkOccConnection(utils.vtgate.addr(), 'test_nj', 30.0)
|
||||
conn = vtclient.VtOCCConnection(vtgate_client, 'test_keyspace', shard,
|
||||
db_type, timeout,
|
||||
user=user, password=password,
|
||||
|
@ -160,15 +165,18 @@ def get_connection(db_type='master', shard_index=0, user=None, password=None):
|
|||
conn.connect()
|
||||
return conn
|
||||
|
||||
|
||||
def do_write(count):
|
||||
master_conn = get_connection(db_type='master')
|
||||
master_conn.begin()
|
||||
master_conn._execute("delete from vt_insert_test", {})
|
||||
master_conn._execute('delete from vt_insert_test', {})
|
||||
kid_list = shard_kid_map[master_conn.shard]
|
||||
for x in xrange(count):
|
||||
keyspace_id = kid_list[count%len(kid_list)]
|
||||
master_conn._execute("insert into vt_insert_test (msg, keyspace_id) values (%(msg)s, %(keyspace_id)s)",
|
||||
{'msg': 'test %s' % x, 'keyspace_id': keyspace_id})
|
||||
master_conn._execute(
|
||||
'insert into vt_insert_test (msg, keyspace_id) '
|
||||
'values (%(msg)s, %(keyspace_id)s)',
|
||||
{'msg': 'test %s' % x, 'keyspace_id': keyspace_id})
|
||||
master_conn.commit()
|
||||
|
||||
def direct_batch_write(count, tablet):
|
||||
|
@ -180,10 +188,10 @@ def direct_batch_write(count, tablet):
|
|||
"""
|
||||
master_conn = get_connection(db_type='master')
|
||||
master_conn.begin()
|
||||
master_conn._execute("delete from vt_insert_test", {})
|
||||
master_conn._execute('delete from vt_insert_test', {})
|
||||
master_conn.commit()
|
||||
kid_list = shard_kid_map[master_conn.shard]
|
||||
values_str = ""
|
||||
values_str = ''
|
||||
for x in xrange(count):
|
||||
if x != 0:
|
||||
values_str += ','
|
||||
|
@ -195,7 +203,9 @@ def direct_batch_write(count, tablet):
|
|||
'commit'
|
||||
], write=True)
|
||||
|
||||
|
||||
class TestTabletFunctions(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.shard_index = 0
|
||||
self.master_tablet = shard_0_master
|
||||
|
@ -203,104 +213,121 @@ class TestTabletFunctions(unittest.TestCase):
|
|||
|
||||
def test_connect(self):
|
||||
try:
|
||||
master_conn = get_connection(db_type='master', shard_index=self.shard_index)
|
||||
master_conn = get_connection(
|
||||
db_type='master', shard_index=self.shard_index)
|
||||
except Exception, e:
|
||||
self.fail("Connection to shard0 master failed with error %s" % str(e))
|
||||
self.fail('Connection to shard0 master failed with error %s' % str(e))
|
||||
self.assertNotEqual(master_conn, None)
|
||||
try:
|
||||
replica_conn = get_connection(db_type='replica', shard_index=self.shard_index)
|
||||
replica_conn = get_connection(
|
||||
db_type='replica', shard_index=self.shard_index)
|
||||
except Exception, e:
|
||||
logging.debug("Connection to %s replica failed with error %s" %
|
||||
(shard_names[self.shard_index], str(e)))
|
||||
logging.debug('Connection to %s replica failed with error %s',
|
||||
shard_names[self.shard_index], e)
|
||||
raise
|
||||
self.assertNotEqual(replica_conn, None)
|
||||
self.assertIsInstance(master_conn.conn, conn_class,
|
||||
"Invalid master connection")
|
||||
'Invalid master connection')
|
||||
self.assertIsInstance(replica_conn.conn, conn_class,
|
||||
"Invalid replica connection")
|
||||
'Invalid replica connection')
|
||||
|
||||
def test_writes(self):
|
||||
try:
|
||||
master_conn = get_connection(db_type='master', shard_index=self.shard_index)
|
||||
master_conn = get_connection(
|
||||
db_type='master', shard_index=self.shard_index)
|
||||
count = 10
|
||||
master_conn.begin()
|
||||
master_conn._execute("delete from vt_insert_test", {})
|
||||
master_conn._execute('delete from vt_insert_test', {})
|
||||
kid_list = shard_kid_map[master_conn.shard]
|
||||
for x in xrange(count):
|
||||
keyspace_id = kid_list[count%len(kid_list)]
|
||||
master_conn._execute("insert into vt_insert_test (msg, keyspace_id) values (%(msg)s, %(keyspace_id)s)",
|
||||
{'msg': 'test %s' % x, 'keyspace_id': keyspace_id})
|
||||
master_conn._execute(
|
||||
'insert into vt_insert_test (msg, keyspace_id) '
|
||||
'values (%(msg)s, %(keyspace_id)s)',
|
||||
{'msg': 'test %s' % x, 'keyspace_id': keyspace_id})
|
||||
master_conn.commit()
|
||||
results, rowcount = master_conn._execute("select * from vt_insert_test",
|
||||
results, rowcount = master_conn._execute('select * from vt_insert_test',
|
||||
{})[:2]
|
||||
self.assertEqual(rowcount, count, "master fetch works")
|
||||
self.assertEqual(rowcount, count, 'master fetch works')
|
||||
except Exception, e:
|
||||
logging.debug("Write failed with error %s" % str(e))
|
||||
logging.debug('Write failed with error %s', e)
|
||||
raise
|
||||
|
||||
def test_batch_read(self):
|
||||
try:
|
||||
master_conn = get_connection(db_type='master', shard_index=self.shard_index)
|
||||
master_conn = get_connection(
|
||||
db_type='master', shard_index=self.shard_index)
|
||||
count = 10
|
||||
master_conn.begin()
|
||||
master_conn._execute("delete from vt_insert_test", {})
|
||||
master_conn._execute('delete from vt_insert_test', {})
|
||||
kid_list = shard_kid_map[master_conn.shard]
|
||||
for x in xrange(count):
|
||||
keyspace_id = kid_list[count%len(kid_list)]
|
||||
master_conn._execute("insert into vt_insert_test (msg, keyspace_id) values (%(msg)s, %(keyspace_id)s)",
|
||||
{'msg': 'test %s' % x, 'keyspace_id': keyspace_id})
|
||||
master_conn._execute(
|
||||
'insert into vt_insert_test (msg, keyspace_id) '
|
||||
'values (%(msg)s, %(keyspace_id)s)',
|
||||
{'msg': 'test %s' % x, 'keyspace_id': keyspace_id})
|
||||
master_conn.commit()
|
||||
master_conn.begin()
|
||||
master_conn._execute("delete from vt_a", {})
|
||||
master_conn._execute('delete from vt_a', {})
|
||||
for x in xrange(count):
|
||||
keyspace_id = kid_list[count%len(kid_list)]
|
||||
master_conn._execute("insert into vt_a (eid, id, keyspace_id) \
|
||||
values (%(eid)s, %(id)s, %(keyspace_id)s)",
|
||||
master_conn._execute('insert into vt_a (eid, id, keyspace_id) \
|
||||
values (%(eid)s, %(id)s, %(keyspace_id)s)',
|
||||
{'eid': x, 'id': x, 'keyspace_id': keyspace_id})
|
||||
master_conn.commit()
|
||||
rowsets = master_conn._execute_batch(["select * from vt_insert_test",
|
||||
"select * from vt_a"], [{}, {}], False)
|
||||
rowsets = master_conn._execute_batch(
|
||||
['select * from vt_insert_test',
|
||||
'select * from vt_a'], [{}, {}], False)
|
||||
self.assertEqual(rowsets[0][1], count)
|
||||
self.assertEqual(rowsets[1][1], count)
|
||||
except Exception, e:
|
||||
self.fail("Write failed with error %s %s" % (str(e),
|
||||
self.fail('Write failed with error %s %s' % (str(e),
|
||||
traceback.print_exc()))
|
||||
|
||||
def test_batch_write(self):
|
||||
try:
|
||||
master_conn = get_connection(db_type='master', shard_index=self.shard_index)
|
||||
master_conn = get_connection(
|
||||
db_type='master', shard_index=self.shard_index)
|
||||
count = 10
|
||||
query_list = []
|
||||
bind_vars_list = []
|
||||
query_list.append("delete from vt_insert_test")
|
||||
query_list.append('delete from vt_insert_test')
|
||||
bind_vars_list.append({})
|
||||
kid_list = shard_kid_map[master_conn.shard]
|
||||
for x in xrange(count):
|
||||
keyspace_id = kid_list[count%len(kid_list)]
|
||||
query_list.append("insert into vt_insert_test (msg, keyspace_id) values (%(msg)s, %(keyspace_id)s)")
|
||||
bind_vars_list.append({'msg': 'test %s' % x, 'keyspace_id': keyspace_id})
|
||||
query_list.append("delete from vt_a")
|
||||
query_list.append(
|
||||
'insert into vt_insert_test (msg, keyspace_id) '
|
||||
'values (%(msg)s, %(keyspace_id)s)')
|
||||
bind_vars_list.append(
|
||||
{'msg': 'test %s' % x, 'keyspace_id': keyspace_id})
|
||||
query_list.append('delete from vt_a')
|
||||
bind_vars_list.append({})
|
||||
for x in xrange(count):
|
||||
keyspace_id = kid_list[count%len(kid_list)]
|
||||
query_list.append("insert into vt_a (eid, id, keyspace_id) values (%(eid)s, %(id)s, %(keyspace_id)s)")
|
||||
query_list.append(
|
||||
'insert into vt_a (eid, id, keyspace_id) '
|
||||
'values (%(eid)s, %(id)s, %(keyspace_id)s)')
|
||||
bind_vars_list.append({'eid': x, 'id': x, 'keyspace_id': keyspace_id})
|
||||
master_conn._execute_batch(query_list, bind_vars_list, True)
|
||||
results, rowcount, _, _ = master_conn._execute("select * from vt_insert_test", {})
|
||||
results, rowcount, _, _ = master_conn._execute(
|
||||
'select * from vt_insert_test', {})
|
||||
self.assertEqual(rowcount, count)
|
||||
results, rowcount, _, _ = master_conn._execute("select * from vt_a", {})
|
||||
results, rowcount, _, _ = master_conn._execute('select * from vt_a', {})
|
||||
self.assertEqual(rowcount, count)
|
||||
except Exception, e:
|
||||
self.fail("Write failed with error %s" % str(e))
|
||||
self.fail('Write failed with error %s' % str(e))
|
||||
|
||||
def test_streaming_fetchsubset(self):
|
||||
try:
|
||||
count = 100
|
||||
do_write(count)
|
||||
# Fetch a subset of the total size.
|
||||
master_conn = get_connection(db_type='master', shard_index=self.shard_index)
|
||||
master_conn = get_connection(
|
||||
db_type='master', shard_index=self.shard_index)
|
||||
stream_cursor = cursor.StreamCursor(master_conn)
|
||||
stream_cursor.execute("select * from vt_insert_test", {})
|
||||
stream_cursor.execute('select * from vt_insert_test', {})
|
||||
fetch_size = 10
|
||||
rows = stream_cursor.fetchmany(size=fetch_size)
|
||||
rowcount = 0
|
||||
|
@ -309,16 +336,17 @@ class TestTabletFunctions(unittest.TestCase):
|
|||
self.assertEqual(rowcount, fetch_size)
|
||||
stream_cursor.close()
|
||||
except Exception, e:
|
||||
self.fail("Failed with error %s %s" % (str(e), traceback.print_exc()))
|
||||
self.fail('Failed with error %s %s' % (str(e), traceback.print_exc()))
|
||||
|
||||
def test_streaming_fetchall(self):
|
||||
try:
|
||||
count = 100
|
||||
do_write(count)
|
||||
# Fetch all.
|
||||
master_conn = get_connection(db_type='master', shard_index=self.shard_index)
|
||||
master_conn = get_connection(
|
||||
db_type='master', shard_index=self.shard_index)
|
||||
stream_cursor = cursor.StreamCursor(master_conn)
|
||||
stream_cursor.execute("select * from vt_insert_test", {})
|
||||
stream_cursor.execute('select * from vt_insert_test', {})
|
||||
rows = stream_cursor.fetchall()
|
||||
rowcount = 0
|
||||
for r in rows:
|
||||
|
@ -326,38 +354,40 @@ class TestTabletFunctions(unittest.TestCase):
|
|||
self.assertEqual(rowcount, count)
|
||||
stream_cursor.close()
|
||||
except Exception, e:
|
||||
self.fail("Failed with error %s %s" % (str(e), traceback.print_exc()))
|
||||
self.fail('Failed with error %s %s' % (str(e), traceback.print_exc()))
|
||||
|
||||
def test_streaming_fetchone(self):
|
||||
try:
|
||||
count = 100
|
||||
do_write(count)
|
||||
# Fetch one.
|
||||
master_conn = get_connection(db_type='master', shard_index=self.shard_index)
|
||||
master_conn = get_connection(
|
||||
db_type='master', shard_index=self.shard_index)
|
||||
stream_cursor = cursor.StreamCursor(master_conn)
|
||||
stream_cursor.execute("select * from vt_insert_test", {})
|
||||
stream_cursor.execute('select * from vt_insert_test', {})
|
||||
rows = stream_cursor.fetchone()
|
||||
self.assertTrue(type(rows) == tuple, "Received a valid row")
|
||||
self.assertTrue(type(rows) == tuple, 'Received a valid row')
|
||||
stream_cursor.close()
|
||||
except Exception, e:
|
||||
self.fail("Failed with error %s %s" % (str(e), traceback.print_exc()))
|
||||
self.fail('Failed with error %s %s' % (str(e), traceback.print_exc()))
|
||||
|
||||
def test_streaming_zero_results(self):
|
||||
try:
|
||||
master_conn = get_connection(db_type='master', shard_index=self.shard_index)
|
||||
master_conn = get_connection(
|
||||
db_type='master', shard_index=self.shard_index)
|
||||
master_conn.begin()
|
||||
master_conn._execute("delete from vt_insert_test", {})
|
||||
master_conn._execute('delete from vt_insert_test', {})
|
||||
master_conn.commit()
|
||||
# After deletion, should result zero.
|
||||
stream_cursor = cursor.StreamCursor(master_conn)
|
||||
stream_cursor.execute("select * from vt_insert_test", {})
|
||||
stream_cursor.execute('select * from vt_insert_test', {})
|
||||
rows = stream_cursor.fetchall()
|
||||
rowcount = 0
|
||||
for r in rows:
|
||||
rowcount +=1
|
||||
self.assertEqual(rowcount, 0)
|
||||
except Exception, e:
|
||||
self.fail("Failed with error %s %s" % (str(e), traceback.print_exc()))
|
||||
self.fail('Failed with error %s %s' % (str(e), traceback.print_exc()))
|
||||
|
||||
class TestFailures(unittest.TestCase):
|
||||
def setUp(self):
|
||||
|
@ -367,67 +397,76 @@ class TestFailures(unittest.TestCase):
|
|||
|
||||
def test_tablet_restart_read(self):
|
||||
try:
|
||||
replica_conn = get_connection(db_type='replica', shard_index=self.shard_index)
|
||||
replica_conn = get_connection(
|
||||
db_type='replica', shard_index=self.shard_index)
|
||||
except Exception, e:
|
||||
self.fail("Connection to shard %s replica failed with error %s" % (shard_names[self.shard_index], str(e)))
|
||||
self.fail('Connection to shard %s replica failed with error %s' %
|
||||
(shard_names[self.shard_index], str(e)))
|
||||
self.replica_tablet.kill_vttablet()
|
||||
with self.assertRaises(dbexceptions.OperationalError):
|
||||
replica_conn._execute("select 1 from vt_insert_test", {})
|
||||
replica_conn._execute('select 1 from vt_insert_test', {})
|
||||
proc = self.replica_tablet.start_vttablet()
|
||||
try:
|
||||
results = replica_conn._execute("select 1 from vt_insert_test", {})
|
||||
results = replica_conn._execute('select 1 from vt_insert_test', {})
|
||||
except Exception, e:
|
||||
self.fail("Communication with shard %s replica failed with error %s" % (shard_names[self.shard_index], str(e)))
|
||||
self.fail('Communication with shard %s replica failed with error %s' %
|
||||
(shard_names[self.shard_index], str(e)))
|
||||
|
||||
def test_tablet_restart_stream_execute(self):
|
||||
try:
|
||||
replica_conn = get_connection(db_type='replica', shard_index=self.shard_index)
|
||||
replica_conn = get_connection(
|
||||
db_type='replica', shard_index=self.shard_index)
|
||||
except Exception, e:
|
||||
self.fail("Connection to %s replica failed with error %s" % (shard_names[self.shard_index], str(e)))
|
||||
self.fail('Connection to %s replica failed with error %s' %
|
||||
(shard_names[self.shard_index], str(e)))
|
||||
stream_cursor = cursor.StreamCursor(replica_conn)
|
||||
self.replica_tablet.kill_vttablet()
|
||||
with self.assertRaises(dbexceptions.OperationalError):
|
||||
stream_cursor.execute("select * from vt_insert_test", {})
|
||||
stream_cursor.execute('select * from vt_insert_test', {})
|
||||
proc = self.replica_tablet.start_vttablet()
|
||||
self.replica_tablet.wait_for_vttablet_state('SERVING')
|
||||
try:
|
||||
stream_cursor.execute("select * from vt_insert_test", {})
|
||||
stream_cursor.execute('select * from vt_insert_test', {})
|
||||
except Exception, e:
|
||||
self.fail("Communication with shard0 replica failed with error %s" %
|
||||
self.fail('Communication with shard0 replica failed with error %s' %
|
||||
str(e))
|
||||
|
||||
def test_fail_stream_execute_initial(self):
|
||||
"""Tests for app errors in the first stream execute response."""
|
||||
try:
|
||||
master_conn = get_connection(db_type='master', shard_index=self.shard_index)
|
||||
master_conn = get_connection(
|
||||
db_type='master', shard_index=self.shard_index)
|
||||
except Exception, e:
|
||||
self.fail("Connection to %s master failed with error %s" % (shard_names[self.shard_index], str(e)))
|
||||
self.fail('Connection to %s master failed with error %s' %
|
||||
(shard_names[self.shard_index], str(e)))
|
||||
try:
|
||||
stream_cursor = cursor.StreamCursor(master_conn)
|
||||
with self.assertRaises(dbexceptions.DatabaseError):
|
||||
stream_cursor.execute("invalid sql syntax", {})
|
||||
stream_cursor.execute('invalid sql syntax', {})
|
||||
except Exception, e:
|
||||
self.fail("Failed with error %s %s" % (str(e), traceback.print_exc()))
|
||||
self.fail('Failed with error %s %s' % (str(e), traceback.print_exc()))
|
||||
|
||||
def test_conn_after_stream_execute_failure(self):
|
||||
"""After a stream execute failure, other operations should work on the same connection."""
|
||||
try:
|
||||
master_conn = get_connection(db_type='master', shard_index=self.shard_index)
|
||||
master_conn = get_connection(
|
||||
db_type='master', shard_index=self.shard_index)
|
||||
except Exception, e:
|
||||
self.fail("Connection to %s master failed with error %s" % (shard_names[self.shard_index], str(e)))
|
||||
self.fail('Connection to %s master failed with error %s' %
|
||||
(shard_names[self.shard_index], str(e)))
|
||||
try:
|
||||
stream_cursor = cursor.StreamCursor(master_conn)
|
||||
with self.assertRaises(dbexceptions.DatabaseError):
|
||||
stream_cursor.execute("invalid sql syntax", {})
|
||||
master_conn._execute("select * from vt_insert_test", {})
|
||||
stream_cursor.execute('invalid sql syntax', {})
|
||||
master_conn._execute('select * from vt_insert_test', {})
|
||||
except Exception, e:
|
||||
self.fail("Failed with error %s %s" % (str(e), traceback.print_exc()))
|
||||
self.fail('Failed with error %s %s' % (str(e), traceback.print_exc()))
|
||||
|
||||
def test_tablet_restart_begin(self):
|
||||
try:
|
||||
master_conn = get_connection(db_type='master')
|
||||
except Exception, e:
|
||||
self.fail("Connection to shard0 master failed with error %s" % str(e))
|
||||
self.fail('Connection to shard0 master failed with error %s' % str(e))
|
||||
self.master_tablet.kill_vttablet()
|
||||
with self.assertRaises(dbexceptions.OperationalError):
|
||||
master_conn.begin()
|
||||
|
@ -438,88 +477,95 @@ class TestFailures(unittest.TestCase):
|
|||
try:
|
||||
master_conn = get_connection(db_type='master')
|
||||
except Exception, e:
|
||||
self.fail("Connection to shard0 master failed with error %s" % str(e))
|
||||
self.fail('Connection to shard0 master failed with error %s' % str(e))
|
||||
with self.assertRaises(dbexceptions.OperationalError):
|
||||
master_conn.begin()
|
||||
self.master_tablet.kill_vttablet()
|
||||
master_conn._execute("delete from vt_insert_test", {})
|
||||
master_conn._execute('delete from vt_insert_test', {})
|
||||
master_conn.commit()
|
||||
proc = self.master_tablet.start_vttablet()
|
||||
try:
|
||||
master_conn = get_connection(db_type='master')
|
||||
except Exception, e:
|
||||
self.fail("Connection to shard0 master failed with error %s" % str(e))
|
||||
self.fail('Connection to shard0 master failed with error %s' % str(e))
|
||||
master_conn.begin()
|
||||
master_conn._execute("delete from vt_insert_test", {})
|
||||
master_conn._execute('delete from vt_insert_test', {})
|
||||
master_conn.commit()
|
||||
|
||||
def test_query_timeout(self):
|
||||
try:
|
||||
replica_conn = get_connection(db_type='replica', shard_index=self.shard_index)
|
||||
replica_conn = get_connection(
|
||||
db_type='replica', shard_index=self.shard_index)
|
||||
except Exception, e:
|
||||
self.fail("Connection to shard0 replica failed with error %s" % str(e))
|
||||
self.fail('Connection to shard0 replica failed with error %s' % str(e))
|
||||
with self.assertRaises(dbexceptions.TimeoutError):
|
||||
replica_conn._execute("select sleep(12) from dual", {})
|
||||
replica_conn._execute('select sleep(12) from dual', {})
|
||||
|
||||
try:
|
||||
master_conn = get_connection(db_type='master')
|
||||
except Exception, e:
|
||||
self.fail("Connection to shard0 master failed with error %s" % str(e))
|
||||
self.fail('Connection to shard0 master failed with error %s' % str(e))
|
||||
with self.assertRaises(dbexceptions.TimeoutError):
|
||||
master_conn._execute("select sleep(12) from dual", {})
|
||||
master_conn._execute('select sleep(12) from dual', {})
|
||||
|
||||
def test_restart_mysql_failure(self):
|
||||
try:
|
||||
replica_conn = get_connection(db_type='replica', shard_index=self.shard_index)
|
||||
replica_conn = get_connection(
|
||||
db_type='replica', shard_index=self.shard_index)
|
||||
except Exception, e:
|
||||
self.fail("Connection to shard0 replica failed with error %s" % str(e))
|
||||
self.fail('Connection to shard0 replica failed with error %s' % str(e))
|
||||
utils.wait_procs([self.replica_tablet.shutdown_mysql(),])
|
||||
with self.assertRaises(dbexceptions.DatabaseError):
|
||||
replica_conn._execute("select 1 from vt_insert_test", {})
|
||||
replica_conn._execute('select 1 from vt_insert_test', {})
|
||||
utils.wait_procs([self.replica_tablet.start_mysql(),])
|
||||
self.replica_tablet.kill_vttablet()
|
||||
self.replica_tablet.start_vttablet()
|
||||
replica_conn._execute("select 1 from vt_insert_test", {})
|
||||
replica_conn._execute('select 1 from vt_insert_test', {})
|
||||
|
||||
def test_noretry_txn_pool_full(self):
|
||||
master_conn = get_connection(db_type='master')
|
||||
master_conn._execute("set vt_transaction_cap=1", {})
|
||||
master_conn._execute('set vt_transaction_cap=1', {})
|
||||
master_conn.begin()
|
||||
with self.assertRaises(dbexceptions.TxPoolFull):
|
||||
master_conn2 = get_connection(db_type='master')
|
||||
master_conn2.begin()
|
||||
master_conn.commit()
|
||||
master_conn._execute("set vt_transaction_cap=20", {})
|
||||
master_conn._execute('set vt_transaction_cap=20', {})
|
||||
master_conn.begin()
|
||||
master_conn._execute("delete from vt_insert_test", {})
|
||||
master_conn._execute('delete from vt_insert_test', {})
|
||||
master_conn.commit()
|
||||
|
||||
def test_read_only(self):
|
||||
master_conn = get_connection(db_type='master')
|
||||
count = 10
|
||||
master_conn.begin()
|
||||
master_conn._execute("delete from vt_insert_test", {})
|
||||
master_conn._execute('delete from vt_insert_test', {})
|
||||
kid_list = shard_kid_map[master_conn.shard]
|
||||
for x in xrange(count):
|
||||
keyspace_id = kid_list[count%len(kid_list)]
|
||||
master_conn._execute("insert into vt_insert_test (msg, keyspace_id) values (%(msg)s, %(keyspace_id)s)",
|
||||
{'msg': 'test %s' % x, 'keyspace_id': keyspace_id})
|
||||
master_conn._execute(
|
||||
'insert into vt_insert_test (msg, keyspace_id) '
|
||||
'values (%(msg)s, %(keyspace_id)s)',
|
||||
{'msg': 'test %s' % x, 'keyspace_id': keyspace_id})
|
||||
master_conn.commit()
|
||||
|
||||
self.master_tablet.mquery(self.master_tablet.dbname, "set global read_only=on")
|
||||
self.master_tablet.mquery(
|
||||
self.master_tablet.dbname, 'set global read_only=on')
|
||||
master_conn.begin()
|
||||
with self.assertRaises(dbexceptions.FatalError):
|
||||
master_conn._execute("delete from vt_insert_test", {})
|
||||
master_conn._execute('delete from vt_insert_test', {})
|
||||
master_conn.rollback()
|
||||
self.master_tablet.mquery(self.master_tablet.dbname, "set global read_only=off")
|
||||
self.master_tablet.mquery(
|
||||
self.master_tablet.dbname, 'set global read_only=off')
|
||||
master_conn.begin()
|
||||
master_conn._execute("delete from vt_insert_test", {})
|
||||
master_conn._execute('delete from vt_insert_test', {})
|
||||
master_conn.commit()
|
||||
|
||||
DML_KEYWORDS = ['insert', 'update', 'delete']
|
||||
|
||||
|
||||
class TestExceptionLogging(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.shard_index = 0
|
||||
self.master_tablet = shard_0_master
|
||||
|
@ -529,12 +575,13 @@ class TestExceptionLogging(unittest.TestCase):
|
|||
|
||||
def test_integrity_error_logging(self):
|
||||
try:
|
||||
master_conn = get_connection(db_type='master', shard_index=self.shard_index)
|
||||
master_conn = get_connection(
|
||||
db_type='master', shard_index=self.shard_index)
|
||||
except Exception, e:
|
||||
self.fail("Connection to shard0 master failed with error %s" % str(e))
|
||||
self.fail('Connection to shard0 master failed with error %s' % str(e))
|
||||
|
||||
master_conn.begin()
|
||||
master_conn._execute("delete from vt_a", {})
|
||||
master_conn._execute('delete from vt_a', {})
|
||||
master_conn.commit()
|
||||
|
||||
keyspace_id = shard_kid_map[master_conn.shard][0]
|
||||
|
@ -543,13 +590,13 @@ class TestExceptionLogging(unittest.TestCase):
|
|||
try:
|
||||
master_conn.begin()
|
||||
master_conn._execute(
|
||||
"insert into vt_a (eid, id, keyspace_id) \
|
||||
values (%(eid)s, %(id)s, %(keyspace_id)s)",
|
||||
{'eid': 1, 'id': 1, 'keyspace_id':keyspace_id})
|
||||
'insert into vt_a (eid, id, keyspace_id) '
|
||||
'values (%(eid)s, %(id)s, %(keyspace_id)s)',
|
||||
{'eid': 1, 'id': 1, 'keyspace_id': keyspace_id})
|
||||
master_conn._execute(
|
||||
"insert into vt_a (eid, id, keyspace_id) \
|
||||
values (%(eid)s, %(id)s, %(keyspace_id)s)",
|
||||
{'eid': 1, 'id': 1, 'keyspace_id':keyspace_id})
|
||||
'insert into vt_a (eid, id, keyspace_id) '
|
||||
'values (%(eid)s, %(id)s, %(keyspace_id)s)',
|
||||
{'eid': 1, 'id': 1, 'keyspace_id': keyspace_id})
|
||||
master_conn.commit()
|
||||
except dbexceptions.IntegrityError as e:
|
||||
parts = str(e).split(',')
|
||||
|
@ -558,7 +605,8 @@ class TestExceptionLogging(unittest.TestCase):
|
|||
if kw in exc_msg:
|
||||
self.fail("IntegrityError shouldn't contain the query %s" % exc_msg)
|
||||
except Exception as e:
|
||||
self.fail("Expected IntegrityError to be raised, instead raised %s" % str(e))
|
||||
self.fail('Expected IntegrityError to be raised, instead raised %s' %
|
||||
str(e))
|
||||
finally:
|
||||
master_conn.rollback()
|
||||
# The underlying execute is expected to catch and log the integrity error.
|
||||
|
@ -574,8 +622,8 @@ class TestAuthentication(unittest.TestCase):
|
|||
self.replica_tablet.start_vttablet(auth=True)
|
||||
utils.vtgate.kill()
|
||||
utils.VtGate().start(auth=True)
|
||||
credentials_file_name = os.path.join(environment.vttop, 'test', 'test_data',
|
||||
'authcredentials_test.json')
|
||||
credentials_file_name = os.path.join(
|
||||
environment.vttop, 'test', 'test_data', 'authcredentials_test.json')
|
||||
credentials_file = open(credentials_file_name, 'r')
|
||||
credentials = json.load(credentials_file)
|
||||
self.user = str(credentials.keys()[0])
|
||||
|
@ -584,52 +632,60 @@ class TestAuthentication(unittest.TestCase):
|
|||
|
||||
def test_correct_credentials(self):
|
||||
try:
|
||||
replica_conn = get_connection(db_type='replica', shard_index = self.shard_index, user=self.user,
|
||||
password=self.password)
|
||||
replica_conn = get_connection(
|
||||
db_type='replica', shard_index=self.shard_index, user=self.user,
|
||||
password=self.password)
|
||||
replica_conn.connect()
|
||||
finally:
|
||||
replica_conn.close()
|
||||
|
||||
def test_secondary_credentials(self):
|
||||
try:
|
||||
replica_conn = get_connection(db_type='replica', shard_index = self.shard_index, user=self.user,
|
||||
password=self.secondary_password)
|
||||
replica_conn = get_connection(
|
||||
db_type='replica', shard_index=self.shard_index, user=self.user,
|
||||
password=self.secondary_password)
|
||||
replica_conn.connect()
|
||||
finally:
|
||||
replica_conn.close()
|
||||
|
||||
def test_incorrect_user(self):
|
||||
with self.assertRaises(dbexceptions.OperationalError):
|
||||
replica_conn = get_connection(db_type='replica', shard_index = self.shard_index, user="romek", password="ma raka")
|
||||
replica_conn = get_connection(
|
||||
db_type='replica', shard_index=self.shard_index, user='romek',
|
||||
password='ma raka')
|
||||
replica_conn.connect()
|
||||
|
||||
def test_incorrect_credentials(self):
|
||||
with self.assertRaises(dbexceptions.OperationalError):
|
||||
replica_conn = get_connection(db_type='replica', shard_index = self.shard_index, user=self.user, password="ma raka")
|
||||
replica_conn = get_connection(
|
||||
db_type='replica', shard_index=self.shard_index, user=self.user,
|
||||
password='ma raka')
|
||||
replica_conn.connect()
|
||||
|
||||
def test_challenge_is_used(self):
|
||||
replica_conn = get_connection(db_type='replica', shard_index = self.shard_index, user=self.user,
|
||||
password=self.password)
|
||||
replica_conn = get_connection(
|
||||
db_type='replica', shard_index=self.shard_index, user=self.user,
|
||||
password=self.password)
|
||||
replica_conn.connect()
|
||||
challenge = ""
|
||||
proof = "%s %s" %(self.user, hmac.HMAC(self.password,
|
||||
challenge = ''
|
||||
proof = '%s %s' %(self.user, hmac.HMAC(self.password,
|
||||
challenge).hexdigest())
|
||||
self.assertRaises(gorpc.AppError, replica_conn.conn.client.call,
|
||||
'AuthenticatorCRAMMD5.Authenticate', {"Proof": proof})
|
||||
'AuthenticatorCRAMMD5.Authenticate', {'Proof': proof})
|
||||
|
||||
def test_only_few_requests_are_allowed(self):
|
||||
replica_conn = get_connection(db_type='replica', shard_index = self.shard_index, user=self.user,
|
||||
password=self.password)
|
||||
replica_conn = get_connection(
|
||||
db_type='replica', shard_index=self.shard_index, user=self.user,
|
||||
password=self.password)
|
||||
replica_conn.connect()
|
||||
for i in range(4):
|
||||
try:
|
||||
replica_conn.conn.client.call('AuthenticatorCRAMMD5.GetNewChallenge',
|
||||
"")
|
||||
'')
|
||||
except gorpc.GoRpcError:
|
||||
break
|
||||
else:
|
||||
self.fail("Too many requests were allowed (%s)." % (i + 1))
|
||||
self.fail('Too many requests were allowed (%s).' % (i + 1))
|
||||
|
||||
|
||||
class LocalLogger(vtdb_logger.VtdbLogger):
|
||||
|
@ -652,6 +708,7 @@ class LocalLogger(vtdb_logger.VtdbLogger):
|
|||
|
||||
|
||||
class TestTopoReResolve(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.shard_index = 0
|
||||
self.replica_tablet = shard_0_replica
|
||||
|
@ -660,14 +717,17 @@ class TestTopoReResolve(unittest.TestCase):
|
|||
# Lowering the keyspace refresh throttle so things are testable.
|
||||
topology.set_keyspace_fetch_throttle(0.1)
|
||||
self.vtgate_client = zkocc.ZkOccConnection(utils.vtgate.addr(),
|
||||
"test_nj", 30.0)
|
||||
'test_nj', 30.0)
|
||||
|
||||
def test_topo_read_threshold(self):
|
||||
before_topo_rtt = vtdb_logger.get_logger().get_topo_rtt()
|
||||
# Check original state.
|
||||
keyspace_obj = topology.get_keyspace('test_keyspace')
|
||||
self.assertNotEqual(keyspace_obj, None, "test_keyspace should be not None")
|
||||
self.assertEqual(keyspace_obj.sharding_col_type, keyrange_constants.KIT_UINT64, "ShardingColumnType be %s" % keyrange_constants.KIT_UINT64)
|
||||
self.assertNotEqual(
|
||||
keyspace_obj, None, 'test_keyspace should be not None')
|
||||
self.assertEqual(
|
||||
keyspace_obj.sharding_col_type, keyrange_constants.KIT_UINT64,
|
||||
'ShardingColumnType be %s' % keyrange_constants.KIT_UINT64)
|
||||
|
||||
# Change the keyspace object.
|
||||
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force', 'test_keyspace',
|
||||
|
@ -680,32 +740,44 @@ class TestTopoReResolve(unittest.TestCase):
|
|||
topology.refresh_keyspace(self.vtgate_client, 'test_keyspace')
|
||||
keyspace_obj = topology.get_keyspace('test_keyspace')
|
||||
after_1st_clear = vtdb_logger.get_logger().get_topo_rtt()
|
||||
self.assertEqual(after_1st_clear - before_topo_rtt, 1, "One additional round-trips to topo server")
|
||||
self.assertEqual(keyspace_obj.sharding_col_type, keyrange_constants.KIT_BYTES, "ShardingColumnType be %s" % keyrange_constants.KIT_BYTES)
|
||||
self.assertEqual(
|
||||
after_1st_clear - before_topo_rtt, 1,
|
||||
'One additional round-trips to topo server')
|
||||
self.assertEqual(
|
||||
keyspace_obj.sharding_col_type, keyrange_constants.KIT_BYTES,
|
||||
'ShardingColumnType be %s' % keyrange_constants.KIT_BYTES)
|
||||
|
||||
# Refresh without sleeping for throttle time shouldn't cause additional rtt.
|
||||
# Refresh without sleeping for throttle time shouldn't cause
|
||||
# additional rtt.
|
||||
topology.refresh_keyspace(self.vtgate_client, 'test_keyspace')
|
||||
keyspace_obj = topology.get_keyspace('test_keyspace')
|
||||
after_2nd_clear = vtdb_logger.get_logger().get_topo_rtt()
|
||||
self.assertEqual(after_2nd_clear - after_1st_clear, 0, "No additional round-trips to topo server")
|
||||
self.assertEqual(
|
||||
after_2nd_clear - after_1st_clear, 0,
|
||||
'No additional round-trips to topo server')
|
||||
|
||||
def test_keyspace_reresolve_on_execute(self):
|
||||
before_topo_rtt = vtdb_logger.get_logger().get_topo_rtt()
|
||||
try:
|
||||
replica_conn = get_connection(db_type='replica', shard_index=self.shard_index)
|
||||
replica_conn = get_connection(
|
||||
db_type='replica', shard_index=self.shard_index)
|
||||
except Exception, e:
|
||||
self.fail("Connection to shard %s replica failed with error %s" % (shard_names[self.shard_index], str(e)))
|
||||
self.fail(
|
||||
'Connection to shard %s replica failed with error %s' %
|
||||
(shard_names[self.shard_index], str(e)))
|
||||
self.replica_tablet.kill_vttablet()
|
||||
time.sleep(self.keyspace_fetch_throttle)
|
||||
|
||||
# this should cause a refresh of the topology.
|
||||
try:
|
||||
results = replica_conn._execute("select 1 from vt_insert_test", {})
|
||||
results = replica_conn._execute('select 1 from vt_insert_test', {})
|
||||
except Exception, e:
|
||||
pass
|
||||
|
||||
after_tablet_error = vtdb_logger.get_logger().get_topo_rtt()
|
||||
self.assertEqual(after_tablet_error - before_topo_rtt, 1, "One additional round-trips to topo server")
|
||||
self.assertEqual(
|
||||
after_tablet_error - before_topo_rtt, 1,
|
||||
'One additional round-trips to topo server')
|
||||
self.replica_tablet.start_vttablet()
|
||||
|
||||
def test_keyspace_reresolve_on_conn_failure(self):
|
||||
|
@ -713,12 +785,15 @@ class TestTopoReResolve(unittest.TestCase):
|
|||
self.replica_tablet.kill_vttablet()
|
||||
time.sleep(self.keyspace_fetch_throttle)
|
||||
try:
|
||||
replica_conn = get_connection(db_type='replica', shard_index=self.shard_index)
|
||||
replica_conn = get_connection(
|
||||
db_type='replica', shard_index=self.shard_index)
|
||||
except Exception, e:
|
||||
pass
|
||||
|
||||
after_tablet_conn_error = vtdb_logger.get_logger().get_topo_rtt()
|
||||
self.assertEqual(after_tablet_conn_error - before_topo_rtt, 1, "One additional round-trips to topo server")
|
||||
self.assertEqual(
|
||||
after_tablet_conn_error - before_topo_rtt, 1,
|
||||
'One additional round-trips to topo server')
|
||||
self.replica_tablet.start_vttablet()
|
||||
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ import exceptions
|
|||
from vtdb import vtgate_utils
|
||||
from vtdb import vtgatev2
|
||||
|
||||
|
||||
def setUpModule():
|
||||
pass
|
||||
|
||||
|
@ -28,22 +29,27 @@ class AnotherException(exceptions.Exception):
|
|||
|
||||
|
||||
class FakeVtGateConnection(vtgatev2.VTGateConnection):
|
||||
|
||||
def __init__(self):
|
||||
self.invoked_intervals = []
|
||||
self.keyspace = "test_keyspace"
|
||||
|
||||
@vtgate_utils.exponential_backoff_retry(retry_exceptions=(SomeException, AnotherException))
|
||||
@vtgate_utils.exponential_backoff_retry(
|
||||
retry_exceptions=(SomeException, AnotherException))
|
||||
def method(self, exc_to_raise):
|
||||
self.invoked_intervals.append(int(time.time() * 1000))
|
||||
if exc_to_raise:
|
||||
|
||||
raise exc_to_raise
|
||||
|
||||
class TestVtgateUtils(unittest.TestCase):
|
||||
|
||||
def test_retry_exception(self):
|
||||
fake_conn = FakeVtGateConnection()
|
||||
with self.assertRaises(SomeException):
|
||||
fake_conn.method(SomeException("an exception"))
|
||||
self.assertEquals(len(fake_conn.invoked_intervals), vtgate_utils.NUM_RETRIES + 1)
|
||||
self.assertEquals(
|
||||
len(fake_conn.invoked_intervals), vtgate_utils.NUM_RETRIES + 1)
|
||||
previous = fake_conn.invoked_intervals[0]
|
||||
delay = vtgate_utils.INITIAL_DELAY_MS
|
||||
for interval in fake_conn.invoked_intervals[1:]:
|
||||
|
@ -55,7 +61,8 @@ class TestVtgateUtils(unittest.TestCase):
|
|||
fake_conn = FakeVtGateConnection()
|
||||
with self.assertRaises(AnotherException):
|
||||
fake_conn.method(AnotherException("an exception"))
|
||||
self.assertEquals(len(fake_conn.invoked_intervals), vtgate_utils.NUM_RETRIES + 1)
|
||||
self.assertEquals(
|
||||
len(fake_conn.invoked_intervals), vtgate_utils.NUM_RETRIES + 1)
|
||||
|
||||
def test_no_retries_inside_txn(self):
|
||||
fake_conn = FakeVtGateConnection()
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -190,15 +190,15 @@ def setUpModule():
|
|||
global shard_0_master
|
||||
global shard_1_master
|
||||
global lookup_master
|
||||
logging.debug("in setUpModule")
|
||||
logging.debug('in setUpModule')
|
||||
|
||||
try:
|
||||
environment.topo_server().setup()
|
||||
logging.debug("Setting up tablets")
|
||||
logging.debug('Setting up tablets')
|
||||
keyspace_env = keyspace_util.TestEnv()
|
||||
keyspace_env.launch(
|
||||
"user",
|
||||
shards=["-80", "80-"],
|
||||
'user',
|
||||
shards=['-80', '80-'],
|
||||
ddls=[
|
||||
create_vt_user,
|
||||
create_vt_user2,
|
||||
|
@ -208,16 +208,16 @@ def setUpModule():
|
|||
],
|
||||
)
|
||||
keyspace_env.launch(
|
||||
"lookup",
|
||||
'lookup',
|
||||
ddls=[
|
||||
create_vt_user_idx,
|
||||
create_music_user_map,
|
||||
create_name_user2_map,
|
||||
],
|
||||
)
|
||||
shard_0_master = keyspace_env.tablet_map["user.-80.master"]
|
||||
shard_1_master = keyspace_env.tablet_map["user.80-.master"]
|
||||
lookup_master = keyspace_env.tablet_map["lookup.0.master"]
|
||||
shard_0_master = keyspace_env.tablet_map['user.-80.master']
|
||||
shard_1_master = keyspace_env.tablet_map['user.80-.master']
|
||||
lookup_master = keyspace_env.tablet_map['lookup.0.master']
|
||||
|
||||
utils.apply_vschema(schema)
|
||||
utils.VtGate().start()
|
||||
|
@ -226,10 +226,10 @@ def setUpModule():
|
|||
raise
|
||||
|
||||
def tearDownModule():
|
||||
logging.debug("in tearDownModule")
|
||||
logging.debug('in tearDownModule')
|
||||
if utils.options.skip_teardown:
|
||||
return
|
||||
logging.debug("Tearing down the servers and setup")
|
||||
logging.debug('Tearing down the servers and setup')
|
||||
keyspace_env.teardown()
|
||||
|
||||
environment.topo_server().teardown()
|
||||
|
@ -241,7 +241,7 @@ def tearDownModule():
|
|||
def get_connection(user=None, password=None):
|
||||
timeout = 10.0
|
||||
return vtgatev3.connect(utils.vtgate.addr(), timeout,
|
||||
user=user, password=password)
|
||||
user=user, password=password)
|
||||
|
||||
|
||||
class TestVTGateFunctions(unittest.TestCase):
|
||||
|
@ -258,109 +258,135 @@ class TestVTGateFunctions(unittest.TestCase):
|
|||
i = x+1
|
||||
cursor.begin()
|
||||
cursor.execute(
|
||||
"insert into vt_user (name) values (:name)",
|
||||
'insert into vt_user (name) values (:name)',
|
||||
{'name': 'test %s' % i})
|
||||
self.assertEqual((cursor.fetchall(), cursor.rowcount, cursor.lastrowid, cursor.description), ([], 1L, i, []))
|
||||
self.assertEqual(
|
||||
(cursor.fetchall(), cursor.rowcount, cursor.lastrowid,
|
||||
cursor.description),
|
||||
([], 1L, i, []))
|
||||
cursor.commit()
|
||||
|
||||
# Test select equal
|
||||
for x in xrange(count):
|
||||
i = x+1
|
||||
cursor.execute("select * from vt_user where id = :id", {'id': i})
|
||||
self.assertEqual((cursor.fetchall(), cursor.rowcount, cursor.lastrowid, cursor.description), ([(i, "test %s" % i)], 1L, 0, [('id', 8L), ('name', 253L)]))
|
||||
cursor.execute('select * from vt_user where id = :id', {'id': i})
|
||||
self.assertEqual(
|
||||
(cursor.fetchall(), cursor.rowcount, cursor.lastrowid,
|
||||
cursor.description),
|
||||
([(i, 'test %s' % i)], 1L, 0, [('id', 8L), ('name', 253L)]))
|
||||
|
||||
# Test insert with no auto-inc, then auto-inc
|
||||
vtgate_conn.begin()
|
||||
result = vtgate_conn._execute(
|
||||
"insert into vt_user (id, name) values (:id, :name)",
|
||||
'insert into vt_user (id, name) values (:id, :name)',
|
||||
{'id': 6, 'name': 'test 6'},
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
result = vtgate_conn._execute(
|
||||
"insert into vt_user (name) values (:name)",
|
||||
'insert into vt_user (name) values (:name)',
|
||||
{'name': 'test 7'},
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 7L, []))
|
||||
vtgate_conn.commit()
|
||||
|
||||
# Verify values in db
|
||||
result = shard_0_master.mquery("vt_user", "select * from vt_user")
|
||||
result = shard_0_master.mquery('vt_user', 'select * from vt_user')
|
||||
self.assertEqual(result, ((1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3')))
|
||||
result = shard_1_master.mquery("vt_user", "select * from vt_user")
|
||||
result = shard_1_master.mquery('vt_user', 'select * from vt_user')
|
||||
self.assertEqual(result, ((4L, 'test 4'), (6L, 'test 6'), (7L, 'test 7')))
|
||||
result = lookup_master.mquery("vt_lookup", "select * from vt_user_idx")
|
||||
result = lookup_master.mquery('vt_lookup', 'select * from vt_user_idx')
|
||||
self.assertEqual(result, ((1L,), (2L,), (3L,), (4L,), (6L,), (7L,)))
|
||||
|
||||
# Test IN clause
|
||||
result = vtgate_conn._execute("select * from vt_user where id in (:a, :b)", {"a": 1, "b": 4}, 'master')
|
||||
result = vtgate_conn._execute(
|
||||
'select * from vt_user where id in (:a, :b)', {'a': 1, 'b': 4},
|
||||
'master')
|
||||
result[0].sort()
|
||||
self.assertEqual(result, ([(1L, 'test 1'), (4L, 'test 4')], 2L, 0, [('id', 8L), ('name', 253L)]))
|
||||
result = vtgate_conn._execute("select * from vt_user where id in (:a, :b)", {"a": 1, "b": 2}, 'master')
|
||||
self.assertEqual(
|
||||
result,
|
||||
([(1L, 'test 1'), (4L, 'test 4')], 2L, 0, [('id', 8L), ('name', 253L)]))
|
||||
result = vtgate_conn._execute(
|
||||
'select * from vt_user where id in (:a, :b)', {'a': 1, 'b': 2},
|
||||
'master')
|
||||
result[0].sort()
|
||||
self.assertEqual(result, ([(1L, 'test 1'), (2L, 'test 2')], 2L, 0, [('id', 8L), ('name', 253L)]))
|
||||
self.assertEqual(
|
||||
result,
|
||||
([(1L, 'test 1'), (2L, 'test 2')], 2L, 0, [('id', 8L), ('name', 253L)]))
|
||||
|
||||
# Test keyrange
|
||||
result = vtgate_conn._execute("select * from vt_user where keyrange('', '\x80')", {}, 'master')
|
||||
self.assertEqual(result, ([(1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3')], 3L, 0, [('id', 8L), ('name', 253L)]))
|
||||
result = vtgate_conn._execute("select * from vt_user where keyrange('\x80', '')", {}, 'master')
|
||||
self.assertEqual(result, ([(4L, 'test 4'), (6L, 'test 6'), (7L, 'test 7')], 3L, 0, [('id', 8L), ('name', 253L)]))
|
||||
result = vtgate_conn._execute(
|
||||
"select * from vt_user where keyrange('', '\x80')", {}, 'master')
|
||||
self.assertEqual(
|
||||
result,
|
||||
([(1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3')], 3L, 0,
|
||||
[('id', 8L), ('name', 253L)]))
|
||||
result = vtgate_conn._execute(
|
||||
"select * from vt_user where keyrange('\x80', '')", {}, 'master')
|
||||
self.assertEqual(
|
||||
result,
|
||||
([(4L, 'test 4'), (6L, 'test 6'), (7L, 'test 7')], 3L, 0,
|
||||
[('id', 8L), ('name', 253L)]))
|
||||
|
||||
# Test scatter
|
||||
result = vtgate_conn._execute("select * from vt_user", {}, 'master')
|
||||
result = vtgate_conn._execute('select * from vt_user', {}, 'master')
|
||||
result[0].sort()
|
||||
self.assertEqual(result, (
|
||||
[(1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3'), (4L, 'test 4'), (6L, 'test 6'), (7L, 'test 7')],
|
||||
6L,
|
||||
0,
|
||||
[('id', 8L), ('name', 253L)],
|
||||
))
|
||||
self.assertEqual(
|
||||
result,
|
||||
([(1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3'), (4L, 'test 4'),
|
||||
(6L, 'test 6'), (7L, 'test 7')], 6L, 0, [('id', 8L), ('name', 253L)]))
|
||||
|
||||
# Test stream
|
||||
stream_cursor = vtgate_conn.cursor('master', cursorclass=cursorv3.StreamCursor)
|
||||
stream_cursor.execute("select * from vt_user", {})
|
||||
stream_cursor = vtgate_conn.cursor(
|
||||
'master', cursorclass=cursorv3.StreamCursor)
|
||||
stream_cursor.execute('select * from vt_user', {})
|
||||
self.assertEqual(cursor.description, [('id', 8L), ('name', 253L)])
|
||||
rows = []
|
||||
for row in stream_cursor:
|
||||
rows.append(row)
|
||||
rows.sort()
|
||||
self.assertEqual(rows, [(1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3'), (4L, 'test 4'), (6L, 'test 6'), (7L, 'test 7')])
|
||||
self.assertEqual(
|
||||
rows,
|
||||
[(1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3'), (4L, 'test 4'),
|
||||
(6L, 'test 6'), (7L, 'test 7')])
|
||||
|
||||
# Test updates
|
||||
vtgate_conn.begin()
|
||||
result = vtgate_conn._execute(
|
||||
"update vt_user set name = :name where id = :id",
|
||||
'update vt_user set name = :name where id = :id',
|
||||
{'id': 1, 'name': 'test one'},
|
||||
"master")
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
result = vtgate_conn._execute(
|
||||
"update vt_user set name = :name where id = :id",
|
||||
'update vt_user set name = :name where id = :id',
|
||||
{'id': 4, 'name': 'test four'},
|
||||
"master")
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
vtgate_conn.commit()
|
||||
result = shard_0_master.mquery("vt_user", "select * from vt_user")
|
||||
self.assertEqual(result, ((1L, 'test one'), (2L, 'test 2'), (3L, 'test 3')))
|
||||
result = shard_1_master.mquery("vt_user", "select * from vt_user")
|
||||
self.assertEqual(result, ((4L, 'test four'), (6L, 'test 6'), (7L, 'test 7')))
|
||||
result = shard_0_master.mquery('vt_user', 'select * from vt_user')
|
||||
self.assertEqual(
|
||||
result, ((1L, 'test one'), (2L, 'test 2'), (3L, 'test 3')))
|
||||
result = shard_1_master.mquery('vt_user', 'select * from vt_user')
|
||||
self.assertEqual(
|
||||
result, ((4L, 'test four'), (6L, 'test 6'), (7L, 'test 7')))
|
||||
|
||||
# Test deletes
|
||||
vtgate_conn.begin()
|
||||
result = vtgate_conn._execute(
|
||||
"delete from vt_user where id = :id",
|
||||
'delete from vt_user where id = :id',
|
||||
{'id': 1},
|
||||
"master")
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
result = vtgate_conn._execute(
|
||||
"delete from vt_user where id = :id",
|
||||
'delete from vt_user where id = :id',
|
||||
{'id': 4},
|
||||
"master")
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
vtgate_conn.commit()
|
||||
result = shard_0_master.mquery("vt_user", "select * from vt_user")
|
||||
result = shard_0_master.mquery('vt_user', 'select * from vt_user')
|
||||
self.assertEqual(result, ((2L, 'test 2'), (3L, 'test 3')))
|
||||
result = shard_1_master.mquery("vt_user", "select * from vt_user")
|
||||
result = shard_1_master.mquery('vt_user', 'select * from vt_user')
|
||||
self.assertEqual(result, ((6L, 'test 6'), (7L, 'test 7')))
|
||||
result = lookup_master.mquery("vt_lookup", "select * from vt_user_idx")
|
||||
result = lookup_master.mquery('vt_lookup', 'select * from vt_user_idx')
|
||||
self.assertEqual(result, ((2L,), (3L,), (6L,), (7L,)))
|
||||
|
||||
def test_user2(self):
|
||||
|
@ -368,63 +394,77 @@ class TestVTGateFunctions(unittest.TestCase):
|
|||
vtgate_conn = get_connection()
|
||||
vtgate_conn.begin()
|
||||
result = vtgate_conn._execute(
|
||||
"insert into vt_user2 (id, name) values (:id, :name)",
|
||||
'insert into vt_user2 (id, name) values (:id, :name)',
|
||||
{'id': 1, 'name': 'name1'},
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
result = vtgate_conn._execute(
|
||||
"insert into vt_user2 (id, name) values (:id, :name)",
|
||||
'insert into vt_user2 (id, name) values (:id, :name)',
|
||||
{'id': 7, 'name': 'name1'},
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
result = vtgate_conn._execute(
|
||||
"insert into vt_user2 (id, name) values (:id, :name)",
|
||||
'insert into vt_user2 (id, name) values (:id, :name)',
|
||||
{'id': 2, 'name': 'name2'},
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
vtgate_conn.commit()
|
||||
result = shard_0_master.mquery("vt_user", "select * from vt_user2")
|
||||
result = shard_0_master.mquery('vt_user', 'select * from vt_user2')
|
||||
self.assertEqual(result, ((1L, 'name1'), (2L, 'name2')))
|
||||
result = shard_1_master.mquery("vt_user", "select * from vt_user2")
|
||||
result = shard_1_master.mquery('vt_user', 'select * from vt_user2')
|
||||
self.assertEqual(result, ((7L, 'name1'),))
|
||||
result = lookup_master.mquery("vt_lookup", "select * from name_user2_map")
|
||||
result = lookup_master.mquery('vt_lookup', 'select * from name_user2_map')
|
||||
self.assertEqual(result, (('name1', 1L), ('name1', 7L), ('name2', 2L)))
|
||||
|
||||
# Test select by id
|
||||
result = vtgate_conn._execute("select * from vt_user2 where id = :id", {'id': 1}, 'master')
|
||||
self.assertEqual(result, ([(1, "name1")], 1L, 0, [('id', 8L), ('name', 253L)]))
|
||||
result = vtgate_conn._execute(
|
||||
'select * from vt_user2 where id = :id', {'id': 1}, 'master')
|
||||
self.assertEqual(
|
||||
result, ([(1, 'name1')], 1L, 0, [('id', 8L), ('name', 253L)]))
|
||||
|
||||
# Test select by lookup
|
||||
result = vtgate_conn._execute("select * from vt_user2 where name = :name", {'name': 'name1'}, 'master')
|
||||
result = vtgate_conn._execute(
|
||||
'select * from vt_user2 where name = :name', {'name': 'name1'},
|
||||
'master')
|
||||
result[0].sort()
|
||||
self.assertEqual(result, ([(1, "name1"), (7, "name1")], 2L, 0, [('id', 8L), ('name', 253L)]))
|
||||
self.assertEqual(
|
||||
result,
|
||||
([(1, 'name1'), (7, 'name1')], 2L, 0, [('id', 8L), ('name', 253L)]))
|
||||
|
||||
# Test IN clause using non-unique vindex
|
||||
result = vtgate_conn._execute("select * from vt_user2 where name in ('name1', 'name2')", {}, 'master')
|
||||
result = vtgate_conn._execute(
|
||||
"select * from vt_user2 where name in ('name1', 'name2')", {},
|
||||
'master')
|
||||
result[0].sort()
|
||||
self.assertEqual(result, ([(1, "name1"), (2, "name2"), (7, "name1")], 3L, 0, [('id', 8L), ('name', 253L)]))
|
||||
result = vtgate_conn._execute("select * from vt_user2 where name in ('name1')", {}, 'master')
|
||||
self.assertEqual(
|
||||
result,
|
||||
([(1, 'name1'), (2, 'name2'), (7, 'name1')], 3L, 0,
|
||||
[('id', 8L), ('name', 253L)]))
|
||||
result = vtgate_conn._execute(
|
||||
"select * from vt_user2 where name in ('name1')", {}, 'master')
|
||||
result[0].sort()
|
||||
self.assertEqual(result, ([(1, "name1"), (7, "name1")], 2L, 0, [('id', 8L), ('name', 253L)]))
|
||||
self.assertEqual(
|
||||
result,
|
||||
([(1, 'name1'), (7, 'name1')], 2L, 0, [('id', 8L), ('name', 253L)]))
|
||||
|
||||
# Test delete
|
||||
vtgate_conn.begin()
|
||||
result = vtgate_conn._execute(
|
||||
"delete from vt_user2 where id = :id",
|
||||
'delete from vt_user2 where id = :id',
|
||||
{'id': 1},
|
||||
"master")
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
result = vtgate_conn._execute(
|
||||
"delete from vt_user2 where id = :id",
|
||||
'delete from vt_user2 where id = :id',
|
||||
{'id': 2},
|
||||
"master")
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
vtgate_conn.commit()
|
||||
result = shard_0_master.mquery("vt_user", "select * from vt_user2")
|
||||
result = shard_0_master.mquery('vt_user', 'select * from vt_user2')
|
||||
self.assertEqual(result, ())
|
||||
result = shard_1_master.mquery("vt_user", "select * from vt_user2")
|
||||
result = shard_1_master.mquery('vt_user', 'select * from vt_user2')
|
||||
self.assertEqual(result, ((7L, 'name1'),))
|
||||
result = lookup_master.mquery("vt_lookup", "select * from name_user2_map")
|
||||
result = lookup_master.mquery('vt_lookup', 'select * from name_user2_map')
|
||||
self.assertEqual(result, (('name1', 7L),))
|
||||
|
||||
def test_user_extra(self):
|
||||
|
@ -435,52 +475,57 @@ class TestVTGateFunctions(unittest.TestCase):
|
|||
i = x+1
|
||||
vtgate_conn.begin()
|
||||
result = vtgate_conn._execute(
|
||||
"insert into vt_user_extra (user_id, email) values (:user_id, :email)",
|
||||
'insert into vt_user_extra (user_id, email) '
|
||||
'values (:user_id, :email)',
|
||||
{'user_id': i, 'email': 'test %s' % i},
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
vtgate_conn.commit()
|
||||
for x in xrange(count):
|
||||
i = x+1
|
||||
result = vtgate_conn._execute("select * from vt_user_extra where user_id = :user_id", {'user_id': i}, 'master')
|
||||
self.assertEqual(result, ([(i, "test %s" % i)], 1L, 0, [('user_id', 8L), ('email', 253L)]))
|
||||
result = shard_0_master.mquery("vt_user", "select * from vt_user_extra")
|
||||
result = vtgate_conn._execute(
|
||||
'select * from vt_user_extra where user_id = :user_id',
|
||||
{'user_id': i}, 'master')
|
||||
self.assertEqual(
|
||||
result,
|
||||
([(i, 'test %s' % i)], 1L, 0, [('user_id', 8L), ('email', 253L)]))
|
||||
result = shard_0_master.mquery('vt_user', 'select * from vt_user_extra')
|
||||
self.assertEqual(result, ((1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3')))
|
||||
result = shard_1_master.mquery("vt_user", "select * from vt_user_extra")
|
||||
result = shard_1_master.mquery('vt_user', 'select * from vt_user_extra')
|
||||
self.assertEqual(result, ((4L, 'test 4'),))
|
||||
|
||||
vtgate_conn.begin()
|
||||
result = vtgate_conn._execute(
|
||||
"update vt_user_extra set email = :email where user_id = :user_id",
|
||||
'update vt_user_extra set email = :email where user_id = :user_id',
|
||||
{'user_id': 1, 'email': 'test one'},
|
||||
"master")
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
result = vtgate_conn._execute(
|
||||
"update vt_user_extra set email = :email where user_id = :user_id",
|
||||
'update vt_user_extra set email = :email where user_id = :user_id',
|
||||
{'user_id': 4, 'email': 'test four'},
|
||||
"master")
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
vtgate_conn.commit()
|
||||
result = shard_0_master.mquery("vt_user", "select * from vt_user_extra")
|
||||
result = shard_0_master.mquery('vt_user', 'select * from vt_user_extra')
|
||||
self.assertEqual(result, ((1L, 'test one'), (2L, 'test 2'), (3L, 'test 3')))
|
||||
result = shard_1_master.mquery("vt_user", "select * from vt_user_extra")
|
||||
result = shard_1_master.mquery('vt_user', 'select * from vt_user_extra')
|
||||
self.assertEqual(result, ((4L, 'test four'),))
|
||||
|
||||
vtgate_conn.begin()
|
||||
result = vtgate_conn._execute(
|
||||
"delete from vt_user_extra where user_id = :user_id",
|
||||
'delete from vt_user_extra where user_id = :user_id',
|
||||
{'user_id': 1},
|
||||
"master")
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
result = vtgate_conn._execute(
|
||||
"delete from vt_user_extra where user_id = :user_id",
|
||||
'delete from vt_user_extra where user_id = :user_id',
|
||||
{'user_id': 4},
|
||||
"master")
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
vtgate_conn.commit()
|
||||
result = shard_0_master.mquery("vt_user", "select * from vt_user_extra")
|
||||
result = shard_0_master.mquery('vt_user', 'select * from vt_user_extra')
|
||||
self.assertEqual(result, ((2L, 'test 2'), (3L, 'test 3')))
|
||||
result = shard_1_master.mquery("vt_user", "select * from vt_user_extra")
|
||||
result = shard_1_master.mquery('vt_user', 'select * from vt_user_extra')
|
||||
self.assertEqual(result, ())
|
||||
|
||||
def test_music(self):
|
||||
|
@ -491,73 +536,89 @@ class TestVTGateFunctions(unittest.TestCase):
|
|||
i = x+1
|
||||
vtgate_conn.begin()
|
||||
result = vtgate_conn._execute(
|
||||
"insert into vt_music (user_id, song) values (:user_id, :song)",
|
||||
'insert into vt_music (user_id, song) values (:user_id, :song)',
|
||||
{'user_id': i, 'song': 'test %s' % i},
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, i, []))
|
||||
vtgate_conn.commit()
|
||||
for x in xrange(count):
|
||||
i = x+1
|
||||
result = vtgate_conn._execute("select * from vt_music where id = :id", {'id': i}, 'master')
|
||||
self.assertEqual(result, ([(i, i, "test %s" % i)], 1, 0, [('user_id', 8L), ('id', 8L), ('song', 253L)]))
|
||||
result = vtgate_conn._execute(
|
||||
'select * from vt_music where id = :id', {'id': i}, 'master')
|
||||
self.assertEqual(
|
||||
result,
|
||||
([(i, i, 'test %s' % i)], 1, 0,
|
||||
[('user_id', 8L), ('id', 8L), ('song', 253L)]))
|
||||
vtgate_conn.begin()
|
||||
result = vtgate_conn._execute(
|
||||
"insert into vt_music (user_id, id, song) values (:user_id, :id, :song)",
|
||||
'insert into vt_music (user_id, id, song) '
|
||||
'values (:user_id, :id, :song)',
|
||||
{'user_id': 5, 'id': 6, 'song': 'test 6'},
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
result = vtgate_conn._execute(
|
||||
"insert into vt_music (user_id, song) values (:user_id, :song)",
|
||||
'insert into vt_music (user_id, song) values (:user_id, :song)',
|
||||
{'user_id': 6, 'song': 'test 7'},
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 7L, []))
|
||||
result = vtgate_conn._execute(
|
||||
"insert into vt_music (user_id, song) values (:user_id, :song)",
|
||||
'insert into vt_music (user_id, song) values (:user_id, :song)',
|
||||
{'user_id': 6, 'song': 'test 8'},
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 8L, []))
|
||||
vtgate_conn.commit()
|
||||
result = shard_0_master.mquery("vt_user", "select * from vt_music")
|
||||
self.assertEqual(result, ((1L, 1L, 'test 1'), (2L, 2L, 'test 2'), (3L, 3L, 'test 3'), (5L, 6L, 'test 6')))
|
||||
result = shard_1_master.mquery("vt_user", "select * from vt_music")
|
||||
self.assertEqual(result, ((4L, 4L, 'test 4'), (6L, 7L, 'test 7'), (6L, 8L, 'test 8')))
|
||||
result = lookup_master.mquery("vt_lookup", "select * from music_user_map")
|
||||
self.assertEqual(result, ((1L, 1L), (2L, 2L), (3L, 3L), (4L, 4L), (6L, 5L), (7L, 6L), (8L, 6L)))
|
||||
result = shard_0_master.mquery('vt_user', 'select * from vt_music')
|
||||
self.assertEqual(
|
||||
result,
|
||||
((1L, 1L, 'test 1'), (2L, 2L, 'test 2'), (3L, 3L, 'test 3'),
|
||||
(5L, 6L, 'test 6')))
|
||||
result = shard_1_master.mquery('vt_user', 'select * from vt_music')
|
||||
self.assertEqual(
|
||||
result, ((4L, 4L, 'test 4'), (6L, 7L, 'test 7'), (6L, 8L, 'test 8')))
|
||||
result = lookup_master.mquery('vt_lookup', 'select * from music_user_map')
|
||||
self.assertEqual(
|
||||
result,
|
||||
((1L, 1L), (2L, 2L), (3L, 3L), (4L, 4L), (6L, 5L), (7L, 6L), (8L, 6L)))
|
||||
|
||||
vtgate_conn.begin()
|
||||
result = vtgate_conn._execute(
|
||||
"update vt_music set song = :song where id = :id",
|
||||
'update vt_music set song = :song where id = :id',
|
||||
{'id': 6, 'song': 'test six'},
|
||||
"master")
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
result = vtgate_conn._execute(
|
||||
"update vt_music set song = :song where id = :id",
|
||||
'update vt_music set song = :song where id = :id',
|
||||
{'id': 7, 'song': 'test seven'},
|
||||
"master")
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
vtgate_conn.commit()
|
||||
result = shard_0_master.mquery("vt_user", "select * from vt_music")
|
||||
self.assertEqual(result, ((1L, 1L, 'test 1'), (2L, 2L, 'test 2'), (3L, 3L, 'test 3'), (5L, 6L, 'test six')))
|
||||
result = shard_1_master.mquery("vt_user", "select * from vt_music")
|
||||
self.assertEqual(result, ((4L, 4L, 'test 4'), (6L, 7L, 'test seven'), (6L, 8L, 'test 8')))
|
||||
result = shard_0_master.mquery('vt_user', 'select * from vt_music')
|
||||
self.assertEqual(
|
||||
result, ((1L, 1L, 'test 1'), (2L, 2L, 'test 2'), (3L, 3L, 'test 3'),
|
||||
(5L, 6L, 'test six')))
|
||||
result = shard_1_master.mquery('vt_user', 'select * from vt_music')
|
||||
self.assertEqual(
|
||||
result, ((4L, 4L, 'test 4'), (6L, 7L, 'test seven'),
|
||||
(6L, 8L, 'test 8')))
|
||||
|
||||
vtgate_conn.begin()
|
||||
result = vtgate_conn._execute(
|
||||
"delete from vt_music where id = :id",
|
||||
'delete from vt_music where id = :id',
|
||||
{'id': 3},
|
||||
"master")
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
result = vtgate_conn._execute(
|
||||
"delete from vt_music where user_id = :user_id",
|
||||
'delete from vt_music where user_id = :user_id',
|
||||
{'user_id': 6},
|
||||
"master")
|
||||
'master')
|
||||
self.assertEqual(result, ([], 2L, 0L, []))
|
||||
vtgate_conn.commit()
|
||||
result = shard_0_master.mquery("vt_user", "select * from vt_music")
|
||||
self.assertEqual(result, ((1L, 1L, 'test 1'), (2L, 2L, 'test 2'), (5L, 6L, 'test six')))
|
||||
result = shard_1_master.mquery("vt_user", "select * from vt_music")
|
||||
result = shard_0_master.mquery('vt_user', 'select * from vt_music')
|
||||
self.assertEqual(
|
||||
result, ((1L, 1L, 'test 1'), (2L, 2L, 'test 2'), (5L, 6L, 'test six')))
|
||||
result = shard_1_master.mquery('vt_user', 'select * from vt_music')
|
||||
self.assertEqual(result, ((4L, 4L, 'test 4'),))
|
||||
result = lookup_master.mquery("vt_lookup", "select * from music_user_map")
|
||||
result = lookup_master.mquery('vt_lookup', 'select * from music_user_map')
|
||||
self.assertEqual(result, ((1L, 1L), (2L, 2L), (4L, 4L), (6L, 5L)))
|
||||
|
||||
def test_music_extra(self):
|
||||
|
@ -565,60 +626,69 @@ class TestVTGateFunctions(unittest.TestCase):
|
|||
vtgate_conn = get_connection()
|
||||
vtgate_conn.begin()
|
||||
result = vtgate_conn._execute(
|
||||
"insert into vt_music_extra (music_id, user_id, artist) values (:music_id, :user_id, :artist)",
|
||||
'insert into vt_music_extra (music_id, user_id, artist) '
|
||||
'values (:music_id, :user_id, :artist)',
|
||||
{'music_id': 1, 'user_id': 1, 'artist': 'test 1'},
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
result = vtgate_conn._execute(
|
||||
"insert into vt_music_extra (music_id, artist) values (:music_id, :artist)",
|
||||
'insert into vt_music_extra (music_id, artist) '
|
||||
'values (:music_id, :artist)',
|
||||
{'music_id': 6, 'artist': 'test 6'},
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
vtgate_conn.commit()
|
||||
result = vtgate_conn._execute("select * from vt_music_extra where music_id = :music_id", {'music_id': 6}, 'master')
|
||||
self.assertEqual(result, ([(6L, 5L, "test 6")], 1, 0, [('music_id', 8L), ('user_id', 8L), ('artist', 253L)]))
|
||||
result = shard_0_master.mquery("vt_user", "select * from vt_music_extra")
|
||||
result = vtgate_conn._execute(
|
||||
'select * from vt_music_extra where music_id = :music_id',
|
||||
{'music_id': 6}, 'master')
|
||||
self.assertEqual(
|
||||
result, ([(6L, 5L, 'test 6')], 1, 0,
|
||||
[('music_id', 8L), ('user_id', 8L), ('artist', 253L)]))
|
||||
result = shard_0_master.mquery('vt_user', 'select * from vt_music_extra')
|
||||
self.assertEqual(result, ((1L, 1L, 'test 1'), (6L, 5L, 'test 6')))
|
||||
result = shard_1_master.mquery("vt_user", "select * from vt_music_extra")
|
||||
result = shard_1_master.mquery('vt_user', 'select * from vt_music_extra')
|
||||
self.assertEqual(result, ())
|
||||
|
||||
vtgate_conn.begin()
|
||||
result = vtgate_conn._execute(
|
||||
"update vt_music_extra set artist = :artist where music_id = :music_id",
|
||||
'update vt_music_extra set artist = :artist '
|
||||
'where music_id = :music_id',
|
||||
{'music_id': 6, 'artist': 'test six'},
|
||||
"master")
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
result = vtgate_conn._execute(
|
||||
"update vt_music_extra set artist = :artist where music_id = :music_id",
|
||||
'update vt_music_extra set artist = :artist '
|
||||
'where music_id = :music_id',
|
||||
{'music_id': 7, 'artist': 'test seven'},
|
||||
"master")
|
||||
'master')
|
||||
self.assertEqual(result, ([], 0L, 0L, []))
|
||||
vtgate_conn.commit()
|
||||
result = shard_0_master.mquery("vt_user", "select * from vt_music_extra")
|
||||
result = shard_0_master.mquery('vt_user', 'select * from vt_music_extra')
|
||||
self.assertEqual(result, ((1L, 1L, 'test 1'), (6L, 5L, 'test six')))
|
||||
|
||||
vtgate_conn.begin()
|
||||
result = vtgate_conn._execute(
|
||||
"delete from vt_music_extra where music_id = :music_id",
|
||||
'delete from vt_music_extra where music_id = :music_id',
|
||||
{'music_id': 6},
|
||||
"master")
|
||||
'master')
|
||||
self.assertEqual(result, ([], 1L, 0L, []))
|
||||
result = vtgate_conn._execute(
|
||||
"delete from vt_music_extra where music_id = :music_id",
|
||||
'delete from vt_music_extra where music_id = :music_id',
|
||||
{'music_id': 7},
|
||||
"master")
|
||||
'master')
|
||||
self.assertEqual(result, ([], 0L, 0L, []))
|
||||
vtgate_conn.commit()
|
||||
result = shard_0_master.mquery("vt_user", "select * from vt_music_extra")
|
||||
result = shard_0_master.mquery('vt_user', 'select * from vt_music_extra')
|
||||
self.assertEqual(result, ((1L, 1L, 'test 1'),))
|
||||
|
||||
def test_insert_value_required(self):
|
||||
vtgate_conn = get_connection()
|
||||
try:
|
||||
vtgate_conn.begin()
|
||||
with self.assertRaisesRegexp(dbexceptions.DatabaseError, '.*value must be supplied.*'):
|
||||
with self.assertRaisesRegexp(
|
||||
dbexceptions.DatabaseError, '.*value must be supplied.*'):
|
||||
vtgate_conn._execute(
|
||||
"insert into vt_user_extra (email) values (:email)",
|
||||
'insert into vt_user_extra (email) values (:email)',
|
||||
{'email': 'test 10'},
|
||||
'master')
|
||||
finally:
|
||||
|
@ -627,43 +697,64 @@ class TestVTGateFunctions(unittest.TestCase):
|
|||
def test_vtclient(self):
|
||||
"""This test uses vtclient to send and receive various queries.
|
||||
"""
|
||||
utils.vtgate.vtclient('insert into vt_user_extra(user_id, email) values (:v1, :v2)', bindvars=[10, 'test 10'])
|
||||
utils.vtgate.vtclient(
|
||||
'insert into vt_user_extra(user_id, email) values (:v1, :v2)',
|
||||
bindvars=[10, 'test 10'])
|
||||
|
||||
out, err = utils.vtgate.vtclient('select * from vt_user_extra where user_id = :v1', bindvars=[10])
|
||||
self.assertEqual(out, ['Index\tuser_id\temail','0\t10\ttest 10'])
|
||||
out, err = utils.vtgate.vtclient(
|
||||
'select * from vt_user_extra where user_id = :v1', bindvars=[10])
|
||||
self.assertEqual(out, ['Index\tuser_id\temail', '0\t10\ttest 10'])
|
||||
|
||||
utils.vtgate.vtclient('update vt_user_extra set email=:v2 where user_id = :v1', bindvars=[10, 'test 1000'])
|
||||
utils.vtgate.vtclient(
|
||||
'update vt_user_extra set email=:v2 where user_id = :v1',
|
||||
bindvars=[10, 'test 1000'])
|
||||
|
||||
out, err = utils.vtgate.vtclient('select * from vt_user_extra where user_id = :v1', bindvars=[10], streaming=True)
|
||||
self.assertEqual(out, ['Index\tuser_id\temail','0\t10\ttest 1000'])
|
||||
out, err = utils.vtgate.vtclient(
|
||||
'select * from vt_user_extra where user_id = :v1', bindvars=[10],
|
||||
streaming=True)
|
||||
self.assertEqual(out, ['Index\tuser_id\temail', '0\t10\ttest 1000'])
|
||||
|
||||
utils.vtgate.vtclient('delete from vt_user_extra where user_id = :v1', bindvars=[10])
|
||||
utils.vtgate.vtclient(
|
||||
'delete from vt_user_extra where user_id = :v1', bindvars=[10])
|
||||
|
||||
out, err = utils.vtgate.vtclient('select * from vt_user_extra where user_id = :v1', bindvars=[10])
|
||||
out, err = utils.vtgate.vtclient(
|
||||
'select * from vt_user_extra where user_id = :v1', bindvars=[10])
|
||||
self.assertEqual(out, ['Index\tuser_id\temail'])
|
||||
|
||||
def test_vtctl_vtgate_execute(self):
|
||||
"""This test uses 'vtctl VtGateExecute' to send and receive various queries.
|
||||
"""
|
||||
utils.vtgate.execute('insert into vt_user_extra(user_id, email) values (:user_id, :email)', bindvars={'user_id': 11, 'email':'test 11'})
|
||||
utils.vtgate.execute(
|
||||
'insert into vt_user_extra(user_id, email) values (:user_id, :email)',
|
||||
bindvars={'user_id': 11, 'email': 'test 11'})
|
||||
|
||||
qr = utils.vtgate.execute('select user_id, email from vt_user_extra where user_id = :user_id', bindvars={'user_id': 11})
|
||||
qr = utils.vtgate.execute(
|
||||
'select user_id, email from vt_user_extra where user_id = :user_id',
|
||||
bindvars={'user_id': 11})
|
||||
logging.debug('Original row: %s', str(qr))
|
||||
self.assertEqual(len(qr['Rows']), 1)
|
||||
v = qr['Rows'][0][1]
|
||||
self.assertEqual(v, 'test 11')
|
||||
|
||||
utils.vtgate.execute('update vt_user_extra set email=:email where user_id = :user_id', bindvars={'user_id': 11, 'email':'test 1100'})
|
||||
utils.vtgate.execute(
|
||||
'update vt_user_extra set email=:email where user_id = :user_id',
|
||||
bindvars={'user_id': 11, 'email': 'test 1100'})
|
||||
|
||||
qr = utils.vtgate.execute('select user_id, email from vt_user_extra where user_id = :user_id', bindvars={'user_id': 11})
|
||||
qr = utils.vtgate.execute(
|
||||
'select user_id, email from vt_user_extra where user_id = :user_id',
|
||||
bindvars={'user_id': 11})
|
||||
logging.debug('Modified row: %s', str(qr))
|
||||
self.assertEqual(len(qr['Rows']), 1)
|
||||
v = qr['Rows'][0][1]
|
||||
self.assertEqual(v, 'test 1100')
|
||||
|
||||
utils.vtgate.execute('delete from vt_user_extra where user_id = :user_id', bindvars={'user_id': 11})
|
||||
utils.vtgate.execute(
|
||||
'delete from vt_user_extra where user_id = :user_id',
|
||||
bindvars={'user_id': 11})
|
||||
|
||||
qr = utils.vtgate.execute('select user_id, email from vt_user_extra where user_id = :user_id', bindvars={'user_id': 11})
|
||||
qr = utils.vtgate.execute(
|
||||
'select user_id, email from vt_user_extra where user_id = :user_id',
|
||||
bindvars={'user_id': 11})
|
||||
self.assertEqual(len(qr['Rows'] or []), 0)
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
325
test/worker.py
325
test/worker.py
|
@ -58,24 +58,26 @@ shard_replica = tablet.Tablet()
|
|||
shard_rdonly1 = tablet.Tablet()
|
||||
|
||||
# split shards
|
||||
# range "" - 80
|
||||
# range '' - 80
|
||||
shard_0_master = tablet.Tablet()
|
||||
shard_0_replica = tablet.Tablet()
|
||||
shard_0_rdonly1 = tablet.Tablet()
|
||||
# range 80 - ""
|
||||
# range 80 - ''
|
||||
shard_1_master = tablet.Tablet()
|
||||
shard_1_replica = tablet.Tablet()
|
||||
shard_1_rdonly1 = tablet.Tablet()
|
||||
|
||||
shard_tablets = ShardTablets(shard_master, [shard_replica], [shard_rdonly1])
|
||||
shard_0_tablets = ShardTablets(shard_0_master, [shard_0_replica], [shard_0_rdonly1])
|
||||
shard_1_tablets = ShardTablets(shard_1_master, [shard_1_replica], [shard_1_rdonly1])
|
||||
shard_0_tablets = ShardTablets(
|
||||
shard_0_master, [shard_0_replica], [shard_0_rdonly1])
|
||||
shard_1_tablets = ShardTablets(
|
||||
shard_1_master, [shard_1_replica], [shard_1_rdonly1])
|
||||
|
||||
|
||||
def init_keyspace():
|
||||
"""Creates a `test_keyspace` keyspace with a sharding key."""
|
||||
utils.run_vtctl(['CreateKeyspace', '-sharding_column_name', 'keyspace_id',
|
||||
'-sharding_column_type', KEYSPACE_ID_TYPE,'test_keyspace'])
|
||||
'-sharding_column_type', KEYSPACE_ID_TYPE, 'test_keyspace'])
|
||||
|
||||
|
||||
def setUpModule():
|
||||
|
@ -131,10 +133,13 @@ def tearDownModule():
|
|||
shard_1_replica.remove_tree()
|
||||
shard_1_rdonly1.remove_tree()
|
||||
|
||||
|
||||
class TestBaseSplitClone(unittest.TestCase):
|
||||
"""Abstract test base class for testing the SplitClone worker."""
|
||||
|
||||
def run_shard_tablets(self, shard_name, shard_tablets, create_db=True, create_table=True, wait_state='SERVING'):
|
||||
def run_shard_tablets(
|
||||
self, shard_name, shard_tablets, create_db=True, create_table=True,
|
||||
wait_state='SERVING'):
|
||||
"""Handles all the necessary work for initially running a shard's tablets.
|
||||
|
||||
This encompasses the following steps:
|
||||
|
@ -146,11 +151,11 @@ class TestBaseSplitClone(unittest.TestCase):
|
|||
7. (optional) Running initial schema setup
|
||||
|
||||
Args:
|
||||
shard_name - the name of the shard to start tablets in
|
||||
shard_tablets - an instance of ShardTablets for the given shard
|
||||
wait_state - string, the vttablet state that we should wait for
|
||||
create_db - boolean, True iff we should create a db on the tablets
|
||||
create_table - boolean, True iff we should create a table on the tablets
|
||||
shard_name: the name of the shard to start tablets in
|
||||
shard_tablets: an instance of ShardTablets for the given shard
|
||||
wait_state: string, the vttablet state that we should wait for
|
||||
create_db: boolean, True iff we should create a db on the tablets
|
||||
create_table: boolean, True iff we should create a table on the tablets
|
||||
"""
|
||||
# If requested, create databases.
|
||||
for tablet in shard_tablets.all_tablets:
|
||||
|
@ -158,17 +163,23 @@ class TestBaseSplitClone(unittest.TestCase):
|
|||
tablet.create_db('vt_test_keyspace')
|
||||
|
||||
# Start tablets.
|
||||
# Specifying "target_tablet_type" enables the health check i.e. tablets will
|
||||
# be automatically returned to the serving graph after a SplitClone or SplitDiff.
|
||||
# NOTE: The future master has to be started with type "replica".
|
||||
shard_tablets.master.start_vttablet(wait_for_state=None, target_tablet_type='replica',
|
||||
init_keyspace='test_keyspace', init_shard=shard_name)
|
||||
#
|
||||
# Specifying 'target_tablet_type' enables the health check
|
||||
# i.e. tablets will be automatically returned to the serving graph
|
||||
# after a SplitClone or SplitDiff.
|
||||
#
|
||||
# NOTE: The future master has to be started with type 'replica'.
|
||||
shard_tablets.master.start_vttablet(
|
||||
wait_for_state=None, target_tablet_type='replica',
|
||||
init_keyspace='test_keyspace', init_shard=shard_name)
|
||||
for tablet in shard_tablets.replicas:
|
||||
tablet.start_vttablet(wait_for_state=None, target_tablet_type='replica',
|
||||
init_keyspace='test_keyspace', init_shard=shard_name)
|
||||
tablet.start_vttablet(
|
||||
wait_for_state=None, target_tablet_type='replica',
|
||||
init_keyspace='test_keyspace', init_shard=shard_name)
|
||||
for tablet in shard_tablets.rdonlys:
|
||||
tablet.start_vttablet(wait_for_state=None, target_tablet_type='rdonly',
|
||||
init_keyspace='test_keyspace', init_shard=shard_name)
|
||||
tablet.start_vttablet(
|
||||
wait_for_state=None, target_tablet_type='rdonly',
|
||||
init_keyspace='test_keyspace', init_shard=shard_name)
|
||||
# Block until tablets are up and we can enable replication.
|
||||
# We don't care about the tablets' state which may have been changed by the
|
||||
# health check from SERVING to NOT_SERVING anyway.
|
||||
|
@ -176,21 +187,22 @@ class TestBaseSplitClone(unittest.TestCase):
|
|||
tablet.wait_for_vttablet_state('NOT_SERVING')
|
||||
|
||||
# Reparent to choose an initial master and enable replication.
|
||||
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/%s' % shard_name,
|
||||
shard_tablets.master.tablet_alias], auto_log=True)
|
||||
utils.run_vtctl(
|
||||
['InitShardMaster', '-force', 'test_keyspace/%s' % shard_name,
|
||||
shard_tablets.master.tablet_alias], auto_log=True)
|
||||
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
|
||||
# Enforce a health check instead of waiting for the next periodic one.
|
||||
# (saves up to 1 second execution time on average)
|
||||
if wait_state == 'SERVING':
|
||||
for tablet in shard_tablets.replicas:
|
||||
utils.run_vtctl(["RunHealthCheck", tablet.tablet_alias, "replica"])
|
||||
utils.run_vtctl(['RunHealthCheck', tablet.tablet_alias, 'replica'])
|
||||
for tablet in shard_tablets.rdonlys:
|
||||
utils.run_vtctl(["RunHealthCheck", tablet.tablet_alias, "rdonly"])
|
||||
utils.run_vtctl(['RunHealthCheck', tablet.tablet_alias, 'rdonly'])
|
||||
|
||||
# Wait for tablet state to change after starting all tablets. This allows
|
||||
# us to start all tablets at once, instead of sequentially waiting.
|
||||
# NOTE: Replication has to be enabled first or the health check will
|
||||
# set a a replica or rdonly tablet back to NOT_SERVING.
|
||||
# set a a replica or rdonly tablet back to NOT_SERVING.
|
||||
for tablet in shard_tablets.all_tablets:
|
||||
tablet.wait_for_vttablet_state(wait_state)
|
||||
|
||||
|
@ -219,39 +231,43 @@ class TestBaseSplitClone(unittest.TestCase):
|
|||
auto_log=True)
|
||||
|
||||
def _insert_values(self, tablet, id_offset, msg, keyspace_id, num_values):
|
||||
"""Inserts values in the MySQL database along with the required routing comments.
|
||||
"""Inserts values into MySQL along with the required routing comments.
|
||||
|
||||
Args:
|
||||
tablet - the Tablet instance to insert into
|
||||
id - the value of `id` column
|
||||
msg - the value of `msg` column
|
||||
keyspace_id - the value of `keyspace_id` column
|
||||
tablet: the Tablet instance to modify.
|
||||
id: the value of `id` column.
|
||||
msg: the value of `msg` column.
|
||||
keyspace_id: the value of `keyspace_id` column.
|
||||
"""
|
||||
k = "%d" % keyspace_id
|
||||
k = '%d' % keyspace_id
|
||||
values_str = ''
|
||||
for i in xrange(num_values):
|
||||
if i != 0:
|
||||
values_str += ','
|
||||
values_str += '(%d, "%s", 0x%x)' % (id_offset + i, msg, keyspace_id)
|
||||
tablet.mquery('vt_test_keyspace', [
|
||||
'begin',
|
||||
'insert into worker_test(id, msg, keyspace_id) values%s /* EMD keyspace_id:%s*/' % (values_str, k),
|
||||
'commit'
|
||||
], write=True)
|
||||
values_str += "(%d, '%s', 0x%x)" % (id_offset + i, msg, keyspace_id)
|
||||
tablet.mquery(
|
||||
'vt_test_keyspace', [
|
||||
'begin',
|
||||
'insert into worker_test(id, msg, keyspace_id) values%s '
|
||||
'/* EMD keyspace_id:%s*/' % (values_str, k),
|
||||
'commit'],
|
||||
write=True)
|
||||
|
||||
def insert_values(self, tablet, num_values, num_shards, offset=0, keyspace_id_range=2**64):
|
||||
def insert_values(
|
||||
self, tablet, num_values, num_shards, offset=0, keyspace_id_range=2**64):
|
||||
"""Inserts simple values, one for each potential shard.
|
||||
|
||||
Each row is given a message that contains the shard number, so we can easily
|
||||
verify that the source and destination shards have the same data.
|
||||
|
||||
Args:
|
||||
tablet - the Tablet instance to insert into
|
||||
num_values - the number of values to insert
|
||||
num_shards - the number of shards that we expect to have
|
||||
offset - amount that we should offset the `id`s by. This is useful for
|
||||
tablet: the Tablet instance to modify.
|
||||
num_values: The number of values to insert.
|
||||
num_shards: the number of shards that we expect to have.
|
||||
offset: amount that we should offset the `id`s by. This is useful for
|
||||
inserting values multiple times.
|
||||
keyspace_id_range - the number of distinct values that the keyspace id can have
|
||||
keyspace_id_range: the number of distinct values that the keyspace id
|
||||
can have.
|
||||
"""
|
||||
shard_width = keyspace_id_range / num_shards
|
||||
shard_offsets = [i * shard_width for i in xrange(num_shards)]
|
||||
|
@ -262,41 +278,47 @@ class TestBaseSplitClone(unittest.TestCase):
|
|||
shard_offsets[shard_num],
|
||||
num_values)
|
||||
|
||||
def assert_shard_data_equal(self, shard_num, source_tablet, destination_tablet):
|
||||
"""Asserts that a shard's data is identical on source and destination tablets.
|
||||
def assert_shard_data_equal(
|
||||
self, shard_num, source_tablet, destination_tablet):
|
||||
"""Asserts source and destination tablets have identical shard data.
|
||||
|
||||
Args:
|
||||
shard_num - the shard number of the shard that we want to verify the data of
|
||||
source_tablet - Tablet instance of the source shard
|
||||
destination_tablet - Tablet instance of the destination shard
|
||||
shard_num: The shard number of the shard that we want to verify.
|
||||
source_tablet: Tablet instance of the source shard.
|
||||
destination_tablet: Tablet instance of the destination shard.
|
||||
"""
|
||||
select_query = 'select * from worker_test where msg="msg-shard-%s" order by id asc' % shard_num
|
||||
select_query = (
|
||||
'select * from worker_test where msg="msg-shard-%s" order by id asc' %
|
||||
shard_num)
|
||||
|
||||
# Make sure all the right rows made it from the source to the destination
|
||||
source_rows = source_tablet.mquery('vt_test_keyspace', select_query)
|
||||
destination_rows = destination_tablet.mquery('vt_test_keyspace', select_query)
|
||||
destination_rows = destination_tablet.mquery(
|
||||
'vt_test_keyspace', select_query)
|
||||
self.assertEqual(source_rows, destination_rows)
|
||||
|
||||
# Make sure that there are no extra rows on the destination
|
||||
count_query = 'select count(*) from worker_test'
|
||||
destination_count = destination_tablet.mquery('vt_test_keyspace', count_query)[0][0]
|
||||
destination_count = destination_tablet.mquery(
|
||||
'vt_test_keyspace', count_query)[0][0]
|
||||
self.assertEqual(destination_count, len(destination_rows))
|
||||
|
||||
def run_split_diff(self, keyspace_shard, source_tablets, destination_tablets):
|
||||
"""Runs a vtworker SplitDiff on the given keyspace/shard, and then sets all
|
||||
former rdonly slaves back to rdonly.
|
||||
"""Runs a vtworker SplitDiff on the given keyspace/shard.
|
||||
|
||||
Sets all former rdonly slaves back to rdonly.
|
||||
|
||||
Args:
|
||||
keyspace_shard - keyspace/shard to run SplitDiff on (string)
|
||||
source_tablets - ShardTablets instance for the source shard
|
||||
destination_tablets - ShardTablets instance for the destination shard
|
||||
keyspace_shard: keyspace/shard to run SplitDiff on (string)
|
||||
source_tablets: ShardTablets instance for the source shard
|
||||
destination_tablets: ShardTablets instance for the destination shard
|
||||
"""
|
||||
logging.debug("Running vtworker SplitDiff for %s" % keyspace_shard)
|
||||
logging.debug('Running vtworker SplitDiff for %s', keyspace_shard)
|
||||
stdout, stderr = utils.run_vtworker(['-cell', 'test_nj', 'SplitDiff',
|
||||
keyspace_shard], auto_log=True)
|
||||
|
||||
def setUp(self):
|
||||
"""Creates the necessary shards, starts the tablets, and inserts some data."""
|
||||
"""Creates shards, starts the tablets, and inserts some data."""
|
||||
self.run_shard_tablets('0', shard_tablets)
|
||||
# create the split shards
|
||||
self.run_shard_tablets('-80', shard_0_tablets, create_db=False,
|
||||
|
@ -304,17 +326,19 @@ class TestBaseSplitClone(unittest.TestCase):
|
|||
self.run_shard_tablets('80-', shard_1_tablets, create_db=False,
|
||||
create_table=False, wait_state='NOT_SERVING')
|
||||
|
||||
logging.debug("Start inserting initial data: %s rows", utils.options.num_insert_rows)
|
||||
logging.debug(
|
||||
'Start inserting initial data: %s rows', utils.options.num_insert_rows)
|
||||
self.insert_values(shard_master, utils.options.num_insert_rows, 2)
|
||||
logging.debug("Done inserting initial data, waiting for replication to catch up")
|
||||
logging.debug(
|
||||
'Done inserting initial data, waiting for replication to catch up')
|
||||
utils.wait_for_replication_pos(shard_master, shard_rdonly1)
|
||||
logging.debug("Replication on source rdonly tablet is caught up")
|
||||
logging.debug('Replication on source rdonly tablet is caught up')
|
||||
|
||||
def tearDown(self):
|
||||
"""Tries to do the minimum to reset topology and tablets to their initial states.
|
||||
"""Does the minimum to reset topology and tablets to their initial states.
|
||||
|
||||
When benchmarked, this seemed to take around 30% of the time of (setupModule +
|
||||
tearDownModule).
|
||||
When benchmarked, this seemed to take around 30% of the time of
|
||||
(setupModule + tearDownModule).
|
||||
"""
|
||||
for shard_tablet in [shard_tablets, shard_0_tablets, shard_1_tablets]:
|
||||
for tablet in shard_tablet.all_tablets:
|
||||
|
@ -325,18 +349,17 @@ class TestBaseSplitClone(unittest.TestCase):
|
|||
tablet.kill_vttablet()
|
||||
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
|
||||
for shard in ['0', '-80', '80-']:
|
||||
utils.run_vtctl(['DeleteShard', 'test_keyspace/%s' % shard], auto_log=True)
|
||||
utils.run_vtctl(
|
||||
['DeleteShard', 'test_keyspace/%s' % shard], auto_log=True)
|
||||
|
||||
|
||||
class TestBaseSplitCloneResiliency(TestBaseSplitClone):
|
||||
"""Tests that the SplitClone worker is resilient to particular failures."""
|
||||
|
||||
|
||||
def setUp(self):
|
||||
super(TestBaseSplitCloneResiliency, self).setUp()
|
||||
self.copy_schema_to_destination_shards()
|
||||
|
||||
def tearDown(self):
|
||||
super(TestBaseSplitCloneResiliency, self).tearDown()
|
||||
|
||||
|
||||
def verify_successful_worker_copy_with_reparent(self, mysql_down=False):
|
||||
"""Verifies that vtworker can successfully copy data for a SplitClone.
|
||||
|
||||
|
@ -350,74 +373,89 @@ class TestBaseSplitCloneResiliency(TestBaseSplitClone):
|
|||
6. Verify that the data was copied successfully to both new shards
|
||||
|
||||
Args:
|
||||
mysql_down - boolean, True iff we expect the MySQL instances on the
|
||||
mysql_down: boolean, True iff we expect the MySQL instances on the
|
||||
destination masters to be down.
|
||||
|
||||
Raises:
|
||||
AssertionError if things didn't go as expected.
|
||||
"""
|
||||
worker_proc, worker_port, _ = utils.run_vtworker_bg(['--cell', 'test_nj',
|
||||
'SplitClone',
|
||||
'--source_reader_count', '1',
|
||||
'--destination_pack_count', '1',
|
||||
'--destination_writer_count', '1',
|
||||
'--strategy=-populate_blp_checkpoint',
|
||||
'test_keyspace/0'],
|
||||
auto_log=True)
|
||||
worker_proc, worker_port, _ = utils.run_vtworker_bg(
|
||||
['--cell', 'test_nj',
|
||||
'SplitClone',
|
||||
'--source_reader_count', '1',
|
||||
'--destination_pack_count', '1',
|
||||
'--destination_writer_count', '1',
|
||||
'--strategy=-populate_blp_checkpoint',
|
||||
'test_keyspace/0'],
|
||||
auto_log=True)
|
||||
|
||||
if mysql_down:
|
||||
# If MySQL is down, we wait until resolving at least twice (to verify that
|
||||
# we do reresolve and retry due to MySQL being down).
|
||||
worker_vars = utils.poll_for_vars('vtworker', worker_port,
|
||||
'WorkerDestinationActualResolves >= 2',
|
||||
condition_fn=lambda v: v.get('WorkerDestinationActualResolves') >= 2)
|
||||
self.assertNotEqual(worker_vars['WorkerRetryCount'], {},
|
||||
"expected vtworker to retry, but it didn't")
|
||||
logging.debug("Worker has resolved at least twice, starting reparent now")
|
||||
worker_vars = utils.poll_for_vars(
|
||||
'vtworker', worker_port,
|
||||
'WorkerDestinationActualResolves >= 2',
|
||||
condition_fn=lambda v: v.get('WorkerDestinationActualResolves') >= 2)
|
||||
self.assertNotEqual(
|
||||
worker_vars['WorkerRetryCount'], {},
|
||||
"expected vtworker to retry, but it didn't")
|
||||
logging.debug('Worker has resolved at least twice, starting reparent now')
|
||||
|
||||
# Original masters have no running MySQL, so need to force the reparent
|
||||
utils.run_vtctl(['EmergencyReparentShard', 'test_keyspace/-80',
|
||||
shard_0_replica.tablet_alias], auto_log=True)
|
||||
utils.run_vtctl(['EmergencyReparentShard', 'test_keyspace/80-',
|
||||
shard_1_replica.tablet_alias], auto_log=True)
|
||||
utils.run_vtctl(
|
||||
['EmergencyReparentShard', 'test_keyspace/-80',
|
||||
shard_0_replica.tablet_alias], auto_log=True)
|
||||
utils.run_vtctl(
|
||||
['EmergencyReparentShard', 'test_keyspace/80-',
|
||||
shard_1_replica.tablet_alias], auto_log=True)
|
||||
|
||||
else:
|
||||
utils.poll_for_vars('vtworker', worker_port,
|
||||
'WorkerDestinationActualResolves >= 1',
|
||||
condition_fn=lambda v: v.get('WorkerDestinationActualResolves') >= 1)
|
||||
logging.debug("Worker has resolved at least once, starting reparent now")
|
||||
utils.poll_for_vars(
|
||||
'vtworker', worker_port,
|
||||
'WorkerDestinationActualResolves >= 1',
|
||||
condition_fn=lambda v: v.get('WorkerDestinationActualResolves') >= 1)
|
||||
logging.debug('Worker has resolved at least once, starting reparent now')
|
||||
|
||||
utils.run_vtctl(['PlannedReparentShard', 'test_keyspace/-80',
|
||||
shard_0_replica.tablet_alias], auto_log=True)
|
||||
utils.run_vtctl(['PlannedReparentShard', 'test_keyspace/80-',
|
||||
shard_1_replica.tablet_alias], auto_log=True)
|
||||
utils.run_vtctl(
|
||||
['PlannedReparentShard', 'test_keyspace/-80',
|
||||
shard_0_replica.tablet_alias], auto_log=True)
|
||||
utils.run_vtctl(
|
||||
['PlannedReparentShard', 'test_keyspace/80-',
|
||||
shard_1_replica.tablet_alias], auto_log=True)
|
||||
|
||||
logging.debug("Polling for worker state")
|
||||
# There are a couple of race conditions around this, that we need to be careful of:
|
||||
# 1. It's possible for the reparent step to take so long that the worker will
|
||||
# actually finish before we get to the polling step. To workaround this,
|
||||
# the test takes a parameter to increase the number of rows that the worker
|
||||
# has to copy (with the idea being to slow the worker down).
|
||||
# 2. If the worker has a huge number of rows to copy, it's possible for the
|
||||
# polling to timeout before the worker has finished copying the data.
|
||||
logging.debug('Polling for worker state')
|
||||
# There are a couple of race conditions around this, that we need
|
||||
# to be careful of:
|
||||
#
|
||||
# 1. It's possible for the reparent step to take so long that the
|
||||
# worker will actually finish before we get to the polling
|
||||
# step. To workaround this, the test takes a parameter to
|
||||
# increase the number of rows that the worker has to copy (with
|
||||
# the idea being to slow the worker down).
|
||||
#
|
||||
# 2. If the worker has a huge number of rows to copy, it's
|
||||
# possible for the polling to timeout before the worker has
|
||||
# finished copying the data.
|
||||
#
|
||||
# You should choose a value for num_insert_rows, such that this test passes
|
||||
# for your environment (trial-and-error...)
|
||||
worker_vars = utils.poll_for_vars('vtworker', worker_port,
|
||||
'WorkerState == cleaning up',
|
||||
condition_fn=lambda v: v.get('WorkerState') == 'cleaning up',
|
||||
# We know that vars should already be ready, since we read them earlier
|
||||
require_vars=True,
|
||||
# We're willing to let the test run for longer to make it less flaky.
|
||||
# This should still fail fast if something goes wrong with vtworker,
|
||||
# because of the require_vars flag above.
|
||||
timeout=5*60)
|
||||
worker_vars = utils.poll_for_vars(
|
||||
'vtworker', worker_port,
|
||||
'WorkerState == cleaning up',
|
||||
condition_fn=lambda v: v.get('WorkerState') == 'cleaning up',
|
||||
# We know that vars should already be ready, since we read them earlier
|
||||
require_vars=True,
|
||||
# We're willing to let the test run for longer to make it less flaky.
|
||||
# This should still fail fast if something goes wrong with vtworker,
|
||||
# because of the require_vars flag above.
|
||||
timeout=5*60)
|
||||
|
||||
# Verify that we were forced to reresolve and retry.
|
||||
self.assertGreater(worker_vars['WorkerDestinationActualResolves'], 1)
|
||||
self.assertGreater(worker_vars['WorkerDestinationAttemptedResolves'], 1)
|
||||
self.assertNotEqual(worker_vars['WorkerRetryCount'], {},
|
||||
"expected vtworker to retry, but it didn't")
|
||||
self.assertNotEqual(
|
||||
worker_vars['WorkerRetryCount'], {},
|
||||
"expected vtworker to retry, but it didn't")
|
||||
|
||||
utils.wait_procs([worker_proc])
|
||||
|
||||
|
@ -432,7 +470,7 @@ class TestBaseSplitCloneResiliency(TestBaseSplitClone):
|
|||
class TestReparentDuringWorkerCopy(TestBaseSplitCloneResiliency):
|
||||
|
||||
def test_reparent_during_worker_copy(self):
|
||||
"""This test simulates a destination reparent during a worker SplitClone copy.
|
||||
"""Simulates a destination reparent during a worker SplitClone copy.
|
||||
|
||||
The SplitClone command should be able to gracefully handle the reparent and
|
||||
end up with the correct data on the destination.
|
||||
|
@ -448,30 +486,37 @@ class TestReparentDuringWorkerCopy(TestBaseSplitCloneResiliency):
|
|||
class TestMysqlDownDuringWorkerCopy(TestBaseSplitCloneResiliency):
|
||||
|
||||
def setUp(self):
|
||||
"""Shuts down MySQL on the destination masters (in addition to the base setup)"""
|
||||
logging.debug("Starting base setup for MysqlDownDuringWorkerCopy")
|
||||
"""Shuts down MySQL on the destination masters.
|
||||
|
||||
Also runs base setup.
|
||||
"""
|
||||
logging.debug('Starting base setup for MysqlDownDuringWorkerCopy')
|
||||
super(TestMysqlDownDuringWorkerCopy, self).setUp()
|
||||
|
||||
logging.debug("Starting MysqlDownDuringWorkerCopy-specific setup")
|
||||
utils.wait_procs([shard_0_master.shutdown_mysql(),
|
||||
shard_1_master.shutdown_mysql()])
|
||||
logging.debug("Finished MysqlDownDuringWorkerCopy-specific setup")
|
||||
logging.debug('Starting MysqlDownDuringWorkerCopy-specific setup')
|
||||
utils.wait_procs(
|
||||
[shard_0_master.shutdown_mysql(),
|
||||
shard_1_master.shutdown_mysql()])
|
||||
logging.debug('Finished MysqlDownDuringWorkerCopy-specific setup')
|
||||
|
||||
def tearDown(self):
|
||||
"""Restarts the MySQL processes that were killed during the setup."""
|
||||
logging.debug("Starting MysqlDownDuringWorkerCopy-specific tearDown")
|
||||
utils.wait_procs([shard_0_master.start_mysql(),
|
||||
shard_1_master.start_mysql()])
|
||||
logging.debug("Finished MysqlDownDuringWorkerCopy-specific tearDown")
|
||||
logging.debug('Starting MysqlDownDuringWorkerCopy-specific tearDown')
|
||||
utils.wait_procs(
|
||||
[shard_0_master.start_mysql(),
|
||||
shard_1_master.start_mysql()])
|
||||
logging.debug('Finished MysqlDownDuringWorkerCopy-specific tearDown')
|
||||
|
||||
super(TestMysqlDownDuringWorkerCopy, self).tearDown()
|
||||
logging.debug("Finished base tearDown for MysqlDownDuringWorkerCopy")
|
||||
logging.debug('Finished base tearDown for MysqlDownDuringWorkerCopy')
|
||||
|
||||
def test_mysql_down_during_worker_copy(self):
|
||||
"""This test simulates MySQL being down on the destination masters."""
|
||||
self.verify_successful_worker_copy_with_reparent(mysql_down=True)
|
||||
|
||||
|
||||
class TestVtworkerWebinterface(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
# Run vtworker without any optional arguments to start in interactive mode.
|
||||
self.worker_proc, self.worker_port, _ = utils.run_vtworker_bg([])
|
||||
|
@ -492,28 +537,38 @@ class TestVtworkerWebinterface(unittest.TestCase):
|
|||
pass
|
||||
if done:
|
||||
break
|
||||
timeout = utils.wait_step('worker /status webpage must be available', timeout)
|
||||
timeout = utils.wait_step(
|
||||
'worker /status webpage must be available', timeout)
|
||||
|
||||
# Run the command twice to make sure it's idempotent.
|
||||
for _ in range(2):
|
||||
# Run Ping command.
|
||||
try:
|
||||
urllib2.urlopen(worker_base_url + '/Debugging/Ping', data=urllib.urlencode({'message':'pong'})).read()
|
||||
raise Exception("Should have thrown an HTTPError for the redirect.")
|
||||
urllib2.urlopen(
|
||||
worker_base_url + '/Debugging/Ping',
|
||||
data=urllib.urlencode({'message': 'pong'})).read()
|
||||
raise Exception('Should have thrown an HTTPError for the redirect.')
|
||||
except urllib2.HTTPError as e:
|
||||
self.assertEqual(e.code, 307)
|
||||
# Verify that the command logged something and its available at /status.
|
||||
status = urllib2.urlopen(worker_base_url + '/status').read()
|
||||
self.assertIn("Ping command was called with message: 'pong'", status, "Command did not log output to /status")
|
||||
self.assertIn(
|
||||
"Ping command was called with message: 'pong'", status,
|
||||
'Command did not log output to /status')
|
||||
|
||||
# Reset the job.
|
||||
urllib2.urlopen(worker_base_url + '/reset').read()
|
||||
status_after_reset = urllib2.urlopen(worker_base_url + '/status').read()
|
||||
self.assertIn("This worker is idle.", status_after_reset, "/status does not indicate that the reset was successful")
|
||||
self.assertIn(
|
||||
'This worker is idle.', status_after_reset,
|
||||
'/status does not indicate that the reset was successful')
|
||||
|
||||
|
||||
def add_test_options(parser):
|
||||
parser.add_option('--num_insert_rows', type="int", default=3000,
|
||||
help="The number of rows, per shard, that we should insert before resharding for this test.")
|
||||
parser.add_option(
|
||||
'--num_insert_rows', type='int', default=3000,
|
||||
help='The number of rows, per shard, that we should insert before '
|
||||
'resharding for this test.')
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.main(test_options=add_test_options)
|
||||
|
|
Загрузка…
Ссылка в новой задаче