diff --git a/go/vt/topo/topoproto/srvkeyspace.go b/go/vt/topo/topoproto/srvkeyspace.go index 5f54101f02..16851dbf29 100644 --- a/go/vt/topo/topoproto/srvkeyspace.go +++ b/go/vt/topo/topoproto/srvkeyspace.go @@ -15,6 +15,12 @@ func (sra ShardReferenceArray) Len() int { return len(sra) } // Len implements sort.Interface func (sra ShardReferenceArray) Less(i, j int) bool { + if sra[i].KeyRange == nil || len(sra[i].KeyRange.Start) == 0 { + return true + } + if sra[j].KeyRange == nil || len(sra[j].KeyRange.Start) == 0 { + return false + } return bytes.Compare(sra[i].KeyRange.Start, sra[j].KeyRange.Start) < 0 } diff --git a/go/vt/wrangler/rebuild.go b/go/vt/wrangler/rebuild.go index 5ad2645372..abd97a6b42 100644 --- a/go/vt/wrangler/rebuild.go +++ b/go/vt/wrangler/rebuild.go @@ -191,6 +191,15 @@ func (wr *Wrangler) orderAndCheckPartitions(cell string, srvKeyspace *pb.SrvKeys return fmt.Errorf("keyspace partition for %v in cell %v does not end with max key", tabletType, cell) } for i := range partition.ShardReferences[0 : len(partition.ShardReferences)-1] { + fn := partition.ShardReferences[i].KeyRange == nil + sn := partition.ShardReferences[i+1].KeyRange == nil + if fn != sn { + return fmt.Errorf("shards with unconsistent KeyRanges for %v in cell %v at shard %v", tabletType, cell, i) + } + if fn { + // this is the custom sharding case, all KeyRanges must be nil + continue + } if bytes.Compare(partition.ShardReferences[i].KeyRange.End, partition.ShardReferences[i+1].KeyRange.Start) != 0 { return fmt.Errorf("non-contiguous KeyRange values for %v in cell %v at shard %v to %v: %v != %v", tabletType, cell, i, i+1, hex.EncodeToString(partition.ShardReferences[i].KeyRange.End), hex.EncodeToString(partition.ShardReferences[i+1].KeyRange.Start)) } diff --git a/py/vtdb/keyspace.py b/py/vtdb/keyspace.py index a851b2d98f..39adb4eae6 100644 --- a/py/vtdb/keyspace.py +++ b/py/vtdb/keyspace.py @@ -65,6 +65,9 @@ class Keyspace(object): pkid = pack_keyspace_id(keyspace_id) shards = self.get_shards(db_type) for shard in shards: + if 'KeyRange' not in shard or not shard['KeyRange']: + # this keyrange is covering the full space + return shard['Name'] if _shard_contain_kid(pkid, shard['KeyRange']['Start'], shard['KeyRange']['End']): diff --git a/test/custom_sharding.py b/test/custom_sharding.py index b2fe293035..a9a44e6aa4 100755 --- a/test/custom_sharding.py +++ b/test/custom_sharding.py @@ -107,8 +107,8 @@ class TestCustomSharding(unittest.TestCase): utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True) ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace']) - self.assertEqual(len(ks['Partitions']['master']['ShardReferences']), 1) - self.assertEqual(len(ks['Partitions']['rdonly']['ShardReferences']), 1) + self.assertEqual(len(ks['partitions'][0]['shard_references']), 1) + self.assertEqual(len(ks['partitions'][0]['shard_references']), 1) s = utils.run_vtctl_json(['GetShard', 'test_keyspace/0']) self.assertEqual(len(s['served_types']), 3) @@ -175,8 +175,8 @@ primary key (id) utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True) ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace']) - self.assertEqual(len(ks['Partitions']['master']['ShardReferences']), 2) - self.assertEqual(len(ks['Partitions']['rdonly']['ShardReferences']), 2) + self.assertEqual(len(ks['partitions'][0]['shard_references']), 2) + self.assertEqual(len(ks['partitions'][0]['shard_references']), 2) # Now test SplitQuery API works (used in MapReduce usually, but bringing # up a full MR-capable cluster is too much for this test environment) diff --git a/test/resharding.py b/test/resharding.py index 18aa19e111..efba32bc62 100755 --- a/test/resharding.py +++ b/test/resharding.py @@ -490,7 +490,7 @@ primary key (name) utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True) ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace']) - self.assertEqual(ks['SplitShardCount'], 4) + self.assertEqual(ks['split_shard_count'], 4) # we set full_mycnf_args to True as a test in the KIT_BYTES case full_mycnf_args = keyspace_id_type == keyrange_constants.KIT_BYTES diff --git a/test/vertical_split.py b/test/vertical_split.py index 9f77924e6e..8ef413065d 100755 --- a/test/vertical_split.py +++ b/test/vertical_split.py @@ -165,21 +165,24 @@ index by_msg (msg) keyspace = 'destination_keyspace' ks = utils.run_vtctl_json(['GetSrvKeyspace', cell, keyspace]) result = '' - if 'ServedFrom' in ks and ks['ServedFrom']: - for served_from in sorted(ks['ServedFrom'].keys()): - result += 'ServedFrom(%s): %s\n' % (served_from, - ks['ServedFrom'][served_from]) + if 'served_from' in ks and ks['served_from']: + a = [] + for served_from in sorted(ks['served_from']): + tt = keyrange_constants.PROTO3_TABLET_TYPE_TO_STRING[served_from['tablet_type']] + a.append('ServedFrom(%s): %s\n' % (tt, served_from['keyspace'])) + for line in sorted(a): + result += line logging.debug('Cell %s keyspace %s has data:\n%s', cell, keyspace, result) self.assertEqual( expected, result, 'Mismatch in srv keyspace for cell %s keyspace %s, expected:\n' '%s\ngot:\n%s' % ( cell, keyspace, expected, result)) - self.assertEqual('', ks.get('ShardingColumnName'), - 'Got wrong ShardingColumnName in SrvKeyspace: %s' % + self.assertNotIn('sharding_column_name', ks, + 'Got a sharding_column_name in SrvKeyspace: %s' % str(ks)) - self.assertEqual('', ks.get('ShardingColumnType'), - 'Got wrong ShardingColumnType in SrvKeyspace: %s' % + self.assertNotIn('sharding_column_type', ks, + 'Got a sharding_column_type in SrvKeyspace: %s' % str(ks)) def _check_blacklisted_tables(self, tablet, expected):