2015-05-08 22:16:40 +03:00
|
|
|
#!/usr/bin/env python
|
|
|
|
#
|
|
|
|
# Copyright 2015, Google Inc. All rights reserved.
|
|
|
|
# Use of this source code is governed by a BSD-style license that can
|
|
|
|
# be found in the LICENSE file.
|
|
|
|
|
|
|
|
import unittest
|
|
|
|
|
|
|
|
import environment
|
|
|
|
import utils
|
|
|
|
import tablet
|
|
|
|
|
|
|
|
# shards
|
|
|
|
shard_0_master = tablet.Tablet()
|
2015-06-03 00:27:40 +03:00
|
|
|
shard_0_rdonly = tablet.Tablet()
|
2015-05-08 22:16:40 +03:00
|
|
|
|
|
|
|
shard_1_master = tablet.Tablet()
|
2015-06-03 00:27:40 +03:00
|
|
|
shard_1_rdonly = tablet.Tablet()
|
2015-05-08 22:16:40 +03:00
|
|
|
|
|
|
|
def setUpModule():
|
|
|
|
try:
|
|
|
|
environment.topo_server().setup()
|
|
|
|
|
|
|
|
setup_procs = [
|
|
|
|
shard_0_master.init_mysql(),
|
2015-06-03 00:27:40 +03:00
|
|
|
shard_0_rdonly.init_mysql(),
|
2015-05-08 22:16:40 +03:00
|
|
|
shard_1_master.init_mysql(),
|
2015-06-03 00:27:40 +03:00
|
|
|
shard_1_rdonly.init_mysql(),
|
2015-05-08 22:16:40 +03:00
|
|
|
]
|
|
|
|
utils.Vtctld().start()
|
2015-06-04 17:43:43 +03:00
|
|
|
utils.VtGate().start()
|
2015-05-08 22:16:40 +03:00
|
|
|
utils.wait_procs(setup_procs)
|
|
|
|
except:
|
|
|
|
tearDownModule()
|
|
|
|
raise
|
|
|
|
|
|
|
|
def tearDownModule():
|
|
|
|
if utils.options.skip_teardown:
|
|
|
|
return
|
|
|
|
|
|
|
|
teardown_procs = [
|
|
|
|
shard_0_master.teardown_mysql(),
|
2015-06-03 00:27:40 +03:00
|
|
|
shard_0_rdonly.teardown_mysql(),
|
2015-05-08 22:16:40 +03:00
|
|
|
shard_1_master.teardown_mysql(),
|
2015-06-03 00:27:40 +03:00
|
|
|
shard_1_rdonly.teardown_mysql(),
|
2015-05-08 22:16:40 +03:00
|
|
|
]
|
|
|
|
utils.wait_procs(teardown_procs, raise_on_error=False)
|
|
|
|
|
|
|
|
environment.topo_server().teardown()
|
|
|
|
utils.kill_sub_processes()
|
|
|
|
utils.remove_tmp_files()
|
|
|
|
|
|
|
|
shard_0_master.remove_tree()
|
2015-06-03 00:27:40 +03:00
|
|
|
shard_0_rdonly.remove_tree()
|
2015-05-08 22:16:40 +03:00
|
|
|
shard_1_master.remove_tree()
|
2015-06-03 00:27:40 +03:00
|
|
|
shard_1_rdonly.remove_tree()
|
2015-05-08 22:16:40 +03:00
|
|
|
|
|
|
|
class TestCustomSharding(unittest.TestCase):
|
|
|
|
|
2015-06-01 19:14:16 +03:00
|
|
|
def _insert_data(self, shard, start, count, table='data'):
|
|
|
|
sql = 'insert into %s(id, name) values (:id, :name)' % table
|
2015-05-08 22:16:40 +03:00
|
|
|
for x in xrange(count):
|
2015-06-01 19:14:16 +03:00
|
|
|
bindvars = {
|
|
|
|
'id': start+x,
|
2015-08-07 22:42:36 +03:00
|
|
|
'name': 'row %d' % (start+x),
|
2015-06-01 19:14:16 +03:00
|
|
|
}
|
2015-06-04 17:43:43 +03:00
|
|
|
utils.vtgate.execute_shard(sql, 'test_keyspace', shard,
|
2015-06-01 19:14:16 +03:00
|
|
|
bindvars=bindvars)
|
|
|
|
|
|
|
|
def _check_data(self, shard, start, count, table='data'):
|
|
|
|
sql = 'select name from %s where id=:id' % table
|
|
|
|
for x in xrange(count):
|
|
|
|
bindvars = {
|
|
|
|
'id': start+x,
|
|
|
|
}
|
2015-06-04 17:43:43 +03:00
|
|
|
qr = utils.vtgate.execute_shard(sql, 'test_keyspace', shard,
|
2015-06-01 19:14:16 +03:00
|
|
|
bindvars=bindvars)
|
|
|
|
self.assertEqual(len(qr['Rows']), 1)
|
2015-06-26 00:57:39 +03:00
|
|
|
v = qr['Rows'][0][0]
|
2015-08-07 22:42:36 +03:00
|
|
|
self.assertEqual(v, 'row %d' % (start+x))
|
2015-05-08 22:16:40 +03:00
|
|
|
|
|
|
|
def test_custom_end_to_end(self):
|
|
|
|
"""This test case runs through the common operations of a custom
|
|
|
|
sharded keyspace: creation with one shard, schema change, reading
|
|
|
|
/ writing data, adding one more shard, reading / writing data from
|
2015-06-01 19:14:16 +03:00
|
|
|
both shards, applying schema changes again, and reading / writing data from
|
|
|
|
both shards again.
|
2015-05-08 22:16:40 +03:00
|
|
|
"""
|
|
|
|
|
|
|
|
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
|
|
|
|
|
|
|
|
# start the first shard only for now
|
|
|
|
shard_0_master.init_tablet( 'master', 'test_keyspace', '0')
|
2015-06-03 00:27:40 +03:00
|
|
|
shard_0_rdonly.init_tablet('rdonly', 'test_keyspace', '0')
|
|
|
|
for t in [shard_0_master, shard_0_rdonly]:
|
2015-05-08 22:16:40 +03:00
|
|
|
t.create_db('vt_test_keyspace')
|
|
|
|
t.start_vttablet(wait_for_state=None)
|
2015-06-03 00:27:40 +03:00
|
|
|
for t in [shard_0_master, shard_0_rdonly]:
|
2015-05-08 22:16:40 +03:00
|
|
|
t.wait_for_vttablet_state('SERVING')
|
|
|
|
|
|
|
|
utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
|
|
|
|
shard_0_master.tablet_alias], auto_log=True)
|
2015-06-03 00:27:40 +03:00
|
|
|
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
|
|
|
|
|
|
|
|
ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace'])
|
|
|
|
self.assertEqual(len(ks['Partitions']['master']['ShardReferences']), 1)
|
|
|
|
self.assertEqual(len(ks['Partitions']['rdonly']['ShardReferences']), 1)
|
|
|
|
s = utils.run_vtctl_json(['GetShard', 'test_keyspace/0'])
|
2015-08-04 22:44:19 +03:00
|
|
|
self.assertEqual(len(s['served_types']), 3)
|
2015-05-08 22:16:40 +03:00
|
|
|
|
|
|
|
# create a table on shard 0
|
|
|
|
sql = '''create table data(
|
|
|
|
id bigint auto_increment,
|
|
|
|
name varchar(64),
|
|
|
|
primary key (id)
|
|
|
|
) Engine=InnoDB'''
|
2015-06-01 19:14:16 +03:00
|
|
|
utils.run_vtctl(['ApplySchema', '-sql=' + sql, 'test_keyspace'],
|
2015-05-08 22:16:40 +03:00
|
|
|
auto_log=True)
|
|
|
|
|
|
|
|
# insert data on shard 0
|
|
|
|
self._insert_data('0', 100, 10)
|
|
|
|
|
|
|
|
# re-read shard 0 data
|
2015-06-01 19:14:16 +03:00
|
|
|
self._check_data('0', 100, 10)
|
|
|
|
|
|
|
|
# create shard 1
|
|
|
|
shard_1_master.init_tablet( 'master', 'test_keyspace', '1')
|
2015-06-03 00:27:40 +03:00
|
|
|
shard_1_rdonly.init_tablet('rdonly', 'test_keyspace', '1')
|
|
|
|
for t in [shard_1_master, shard_1_rdonly]:
|
2015-06-01 19:14:16 +03:00
|
|
|
t.start_vttablet(wait_for_state=None)
|
2015-06-03 00:27:40 +03:00
|
|
|
for t in [shard_1_master, shard_1_rdonly]:
|
2015-06-01 19:14:16 +03:00
|
|
|
t.wait_for_vttablet_state('NOT_SERVING')
|
2015-06-03 00:27:40 +03:00
|
|
|
s = utils.run_vtctl_json(['GetShard', 'test_keyspace/1'])
|
2015-08-04 22:44:19 +03:00
|
|
|
self.assertEqual(len(s['served_types']), 3)
|
2015-06-01 19:14:16 +03:00
|
|
|
|
|
|
|
utils.run_vtctl(['InitShardMaster', 'test_keyspace/1',
|
|
|
|
shard_1_master.tablet_alias], auto_log=True)
|
2015-06-03 00:27:40 +03:00
|
|
|
utils.run_vtctl(['CopySchemaShard', shard_0_rdonly.tablet_alias,
|
2015-06-01 19:14:16 +03:00
|
|
|
'test_keyspace/1'], auto_log=True)
|
2015-06-03 00:27:40 +03:00
|
|
|
for t in [shard_1_master, shard_1_rdonly]:
|
2015-06-01 19:14:16 +03:00
|
|
|
utils.run_vtctl(['RefreshState', t.tablet_alias], auto_log=True)
|
|
|
|
t.wait_for_vttablet_state('SERVING')
|
|
|
|
|
2015-06-03 00:27:40 +03:00
|
|
|
# rebuild the keyspace serving graph now that the new shard was added
|
|
|
|
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
|
|
|
|
|
2015-06-01 19:14:16 +03:00
|
|
|
# insert data on shard 1
|
|
|
|
self._insert_data('1', 200, 10)
|
|
|
|
|
|
|
|
# re-read shard 1 data
|
|
|
|
self._check_data('1', 200, 10)
|
|
|
|
|
|
|
|
# create a second table on all shards
|
|
|
|
sql = '''create table data2(
|
|
|
|
id bigint auto_increment,
|
|
|
|
name varchar(64),
|
|
|
|
primary key (id)
|
|
|
|
) Engine=InnoDB'''
|
|
|
|
utils.run_vtctl(['ApplySchema', '-sql=' + sql, 'test_keyspace'],
|
|
|
|
auto_log=True)
|
|
|
|
|
|
|
|
# insert and read data on all shards
|
|
|
|
self._insert_data('0', 300, 10, table='data2')
|
|
|
|
self._insert_data('1', 400, 10, table='data2')
|
|
|
|
self._check_data('0', 300, 10, table='data2')
|
|
|
|
self._check_data('1', 400, 10, table='data2')
|
2015-05-08 22:16:40 +03:00
|
|
|
|
2015-06-03 00:27:40 +03:00
|
|
|
# reload schema everywhere so the QueryService knows about the tables
|
|
|
|
for t in [shard_0_master, shard_0_rdonly, shard_1_master, shard_1_rdonly]:
|
|
|
|
utils.run_vtctl(['ReloadSchema', t.tablet_alias], auto_log=True)
|
|
|
|
|
|
|
|
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
|
|
|
|
ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace'])
|
|
|
|
self.assertEqual(len(ks['Partitions']['master']['ShardReferences']), 2)
|
|
|
|
self.assertEqual(len(ks['Partitions']['rdonly']['ShardReferences']), 2)
|
|
|
|
|
|
|
|
# Now test SplitQuery API works (used in MapReduce usually, but bringing
|
|
|
|
# up a full MR-capable cluster is too much for this test environment)
|
|
|
|
sql = 'select id, name from data'
|
2015-06-04 17:43:43 +03:00
|
|
|
s = utils.vtgate.split_query(sql, 'test_keyspace', 4)
|
2015-06-03 00:27:40 +03:00
|
|
|
self.assertEqual(len(s), 4)
|
|
|
|
shard0count = 0
|
|
|
|
shard1count = 0
|
|
|
|
for q in s:
|
|
|
|
if q['QueryShard']['Shards'][0] == '0':
|
|
|
|
shard0count+=1
|
|
|
|
if q['QueryShard']['Shards'][0] == '1':
|
|
|
|
shard1count+=1
|
|
|
|
self.assertEqual(shard0count, 2)
|
|
|
|
self.assertEqual(shard1count, 2)
|
|
|
|
|
|
|
|
# run the queries, aggregate the results, make sure we have all rows
|
|
|
|
rows = {}
|
|
|
|
for q in s:
|
2015-06-04 17:43:43 +03:00
|
|
|
qr = utils.vtgate.execute_shard(q['QueryShard']['Sql'],
|
2015-08-18 01:33:16 +03:00
|
|
|
'test_keyspace', ",".join(q['QueryShard']['Shards']),
|
|
|
|
tablet_type='master', bindvars=q['QueryShard']['BindVariables'])
|
2015-06-03 00:27:40 +03:00
|
|
|
for r in qr['Rows']:
|
2015-06-26 00:57:39 +03:00
|
|
|
id = int(r[0])
|
|
|
|
rows[id] = r[1]
|
2015-06-03 00:27:40 +03:00
|
|
|
self.assertEqual(len(rows), 20)
|
|
|
|
expected = {}
|
|
|
|
for i in xrange(10):
|
2015-08-07 22:42:36 +03:00
|
|
|
expected[100+i] = 'row %d' % (100+i)
|
|
|
|
expected[200+i] = 'row %d' % (200+i)
|
2015-06-03 00:27:40 +03:00
|
|
|
self.assertEqual(rows, expected)
|
|
|
|
|
2015-05-08 22:16:40 +03:00
|
|
|
if __name__ == '__main__':
|
|
|
|
utils.main()
|