From d6e826f7786599f32ed7962f2641d87e4df7e698 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 13 May 2016 09:57:38 -0700 Subject: [PATCH 01/27] Replacing test usage of GetEndPoints / GetSrvShard We're going to remove these two from the serving graph soon. --- test/keyspace_test.py | 20 -------------- test/reparent.py | 64 +++++++++++++------------------------------ test/tabletmanager.py | 8 ------ 3 files changed, 19 insertions(+), 73 deletions(-) diff --git a/test/keyspace_test.py b/test/keyspace_test.py index 9f97056c67..8d34df3ea8 100755 --- a/test/keyspace_test.py +++ b/test/keyspace_test.py @@ -275,9 +275,6 @@ class TestKeyspace(unittest.TestCase): utils.run_vtctl( ['GetShardReplication', 'test_nj', 'test_delete_keyspace/0']) utils.run_vtctl(['GetSrvKeyspace', 'test_nj', 'test_delete_keyspace']) - utils.run_vtctl(['GetSrvShard', 'test_nj', 'test_delete_keyspace/0']) - utils.run_vtctl( - ['GetEndPoints', 'test_nj', 'test_delete_keyspace/0', 'master']) # Recursive DeleteKeyspace utils.run_vtctl(['DeleteKeyspace', '-recursive', 'test_delete_keyspace']) @@ -292,11 +289,6 @@ class TestKeyspace(unittest.TestCase): utils.run_vtctl( ['GetSrvKeyspace', 'test_nj', 'test_delete_keyspace'], expect_fail=True) - utils.run_vtctl( - ['GetSrvShard', 'test_nj', 'test_delete_keyspace/0'], expect_fail=True) - utils.run_vtctl( - ['GetEndPoints', 'test_nj', 'test_delete_keyspace/0', 'master'], - expect_fail=True) def test_remove_keyspace_cell(self): utils.run_vtctl(['CreateKeyspace', 'test_delete_keyspace']) @@ -322,12 +314,6 @@ class TestKeyspace(unittest.TestCase): utils.run_vtctl( ['GetShardReplication', 'test_nj', 'test_delete_keyspace/1']) utils.run_vtctl(['GetSrvKeyspace', 'test_nj', 'test_delete_keyspace']) - utils.run_vtctl(['GetSrvShard', 'test_nj', 'test_delete_keyspace/0']) - utils.run_vtctl(['GetSrvShard', 'test_nj', 'test_delete_keyspace/1']) - utils.run_vtctl( - ['GetEndPoints', 'test_nj', 'test_delete_keyspace/0', 'replica']) - utils.run_vtctl( - ['GetEndPoints', 'test_nj', 'test_delete_keyspace/1', 'replica']) # Just remove the shard from one cell (including tablets), # but leaving the global records and other cells/shards alone. @@ -349,12 +335,6 @@ class TestKeyspace(unittest.TestCase): utils.run_vtctl( ['GetShardReplication', 'test_nj', 'test_delete_keyspace/1']) utils.run_vtctl(['GetSrvKeyspace', 'test_nj', 'test_delete_keyspace']) - utils.run_vtctl(['GetSrvShard', 'test_nj', 'test_delete_keyspace/0']) - utils.run_vtctl( - ['GetEndPoints', 'test_nj', 'test_delete_keyspace/1', 'replica']) - utils.run_vtctl( - ['GetEndPoints', 'test_nj', 'test_delete_keyspace/0', 'replica'], - expect_fail=True) # Add it back to do another test. utils.run_vtctl( diff --git a/test/reparent.py b/test/reparent.py index fdded1dac9..9193ee4fdd 100755 --- a/test/reparent.py +++ b/test/reparent.py @@ -95,26 +95,18 @@ class TestReparent(unittest.TestCase): tablet_obj.tablet_alias, timeout, sleep_time=0.1) - def _check_db_addr(self, shard, db_type, expected_port, cell='test_nj'): - ep = utils.run_vtctl_json(['GetEndPoints', cell, 'test_keyspace/' + shard, - db_type]) - self.assertEqual( - len(ep['entries']), 1, 'Wrong number of entries: %s' % str(ep)) - port = ep['entries'][0]['port_map']['vt'] - self.assertEqual( - port, expected_port, - 'Unexpected port: %d != %d from %s' % (port, expected_port, str(ep))) - host = ep['entries'][0]['host'] - # Hostname was set explicitly to 'localhost' with -tablet_hostname flag. - if not host.startswith('localhost'): - self.fail( - 'Invalid hostname %s was expecting something starting with %s' % - (host, 'localhost')) + def _check_master_tablet(self, t, port=None): + """Makes sure the tablet type is master, and its health check agrees.""" + ti = utils.run_vtctl_json(['GetTablet', t.tablet_alias]) + self.assertEqual(ti['type'], topodata_pb2.MASTER) + if port: + self.assertEqual(ti['port_map']['vt'], port) - def _check_master_cell(self, cell, shard_id, master_cell): - srv_shard = utils.run_vtctl_json(['GetSrvShard', cell, - 'test_keyspace/%s' % (shard_id)]) - self.assertEqual(srv_shard['master_cell'], master_cell) + # make sure the health stream is updated + health = utils.run_vtctl_json(['VtTabletStreamHealth', '-count', '1', + t.tablet_alias]) + self.assertIn('serving', health) + self.assertEqual(health['target']['tablet_type'], topodata_pb2.MASTER) def test_master_to_spare_state_change_impossible(self): utils.run_vtctl(['CreateKeyspace', 'test_keyspace']) @@ -172,8 +164,6 @@ class TestReparent(unittest.TestCase): tablet_62344.kill_vttablet() tablet_62344.shutdown_mysql().wait() - self._check_db_addr('0', 'master', tablet_62344.port) - # Perform a planned reparent operation, will try to contact # the current master and fail somewhat quickly _, stderr = utils.run_vtctl(['-wait-time', '5s', @@ -187,7 +177,7 @@ class TestReparent(unittest.TestCase): tablet_62044.tablet_alias], auto_log=True) utils.validate_topology() - self._check_db_addr('0', 'master', tablet_62044.port) + self._check_master_tablet(tablet_62044) # insert data into the new master, check the connected slaves work self._populate_vt_insert_test(tablet_62044, 2) @@ -244,11 +234,7 @@ class TestReparent(unittest.TestCase): tablet_62344.tablet_alias], auto_log=True) utils.validate_topology(ping_tablets=True) - self._check_db_addr(shard_id, 'master', tablet_62344.port) - - # Verify MasterCell is properly set - self._check_master_cell('test_nj', shard_id, 'test_nj') - self._check_master_cell('test_ny', shard_id, 'test_nj') + self._check_master_tablet(tablet_62344) # Perform a graceful reparent operation to another cell. utils.pause('test_reparent_cross_cell PlannedReparentShard') @@ -256,11 +242,7 @@ class TestReparent(unittest.TestCase): tablet_31981.tablet_alias], auto_log=True) utils.validate_topology() - self._check_db_addr(shard_id, 'master', tablet_31981.port, cell='test_ny') - - # Verify MasterCell is set to new cell. - self._check_master_cell('test_nj', shard_id, 'test_ny') - self._check_master_cell('test_ny', shard_id, 'test_ny') + self._check_master_tablet(tablet_31981) tablet.kill_tablets([tablet_62344, tablet_62044, tablet_41983, tablet_31981]) @@ -318,11 +300,7 @@ class TestReparent(unittest.TestCase): utils.validate_topology(ping_tablets=True) tablet_62344.mquery('vt_test_keyspace', self._create_vt_insert_test) - self._check_db_addr(shard_id, 'master', tablet_62344.port) - - # Verify MasterCell is set to new cell. - self._check_master_cell('test_nj', shard_id, 'test_nj') - self._check_master_cell('test_ny', shard_id, 'test_nj') + self._check_master_tablet(tablet_62344) utils.validate_topology() @@ -336,17 +314,13 @@ class TestReparent(unittest.TestCase): tablet_62044.tablet_alias], auto_log=True) utils.validate_topology() - self._check_db_addr(shard_id, 'master', tablet_62044.port) + self._check_master_tablet(tablet_62044) # insert data into the new master, check the connected slaves work self._populate_vt_insert_test(tablet_62044, 1) self._check_vt_insert_test(tablet_41983, 1) self._check_vt_insert_test(tablet_62344, 1) - # Verify MasterCell is set to new cell. - self._check_master_cell('test_nj', shard_id, 'test_nj') - self._check_master_cell('test_ny', shard_id, 'test_nj') - tablet.kill_tablets([tablet_62344, tablet_62044, tablet_41983, tablet_31981]) @@ -358,7 +332,7 @@ class TestReparent(unittest.TestCase): timeout = 30.0 while True: try: - self._check_db_addr(shard_id, 'master', new_port) + self._check_master_tablet(tablet_62044, port=new_port) break except protocols_flavor().client_error_exception_type(): timeout = utils.wait_step('waiting for new port to register', @@ -406,7 +380,7 @@ class TestReparent(unittest.TestCase): tablet_62344.tablet_alias]) utils.validate_topology(ping_tablets=True) - self._check_db_addr(shard_id, 'master', tablet_62344.port) + self._check_master_tablet(tablet_62344) # Kill one tablet so we seem offline tablet_31981.kill_vttablet() @@ -414,7 +388,7 @@ class TestReparent(unittest.TestCase): # Perform a graceful reparent operation. utils.run_vtctl(['PlannedReparentShard', 'test_keyspace/' + shard_id, tablet_62044.tablet_alias]) - self._check_db_addr(shard_id, 'master', tablet_62044.port) + self._check_master_tablet(tablet_62044) tablet.kill_tablets([tablet_62344, tablet_62044, tablet_41983]) diff --git a/test/tabletmanager.py b/test/tabletmanager.py index c07f480bf4..3a5693e8cc 100755 --- a/test/tabletmanager.py +++ b/test/tabletmanager.py @@ -69,11 +69,6 @@ class TestTabletManager(unittest.TestCase): t.set_semi_sync_enabled(master=False) t.clean_dbs() - def _check_srv_shard(self): - srv_shard = utils.run_vtctl_json(['GetSrvShard', 'test_nj', - 'test_keyspace/0']) - self.assertEqual(srv_shard['master_cell'], 'test_nj') - # run twice to check behavior with existing znode data def test_sanity(self): self._test_sanity() @@ -87,7 +82,6 @@ class TestTabletManager(unittest.TestCase): utils.run_vtctl( ['RebuildKeyspaceGraph', '-rebuild_srv_shards', 'test_keyspace']) utils.validate_topology() - self._check_srv_shard() # if these statements don't run before the tablet it will wedge # waiting for the db to become accessible. this is more a bug than @@ -133,7 +127,6 @@ class TestTabletManager(unittest.TestCase): # break because we only have a single master, no slaves utils.run_vtctl(['ValidateShard', '-ping-tablets=false', 'test_keyspace/0']) - self._check_srv_shard() tablet_62344.kill_vttablet() @@ -154,7 +147,6 @@ class TestTabletManager(unittest.TestCase): tablet_62344.init_tablet('master', 'test_keyspace', '0') utils.run_vtctl(['RebuildShardGraph', 'test_keyspace/0']) utils.validate_topology() - self._check_srv_shard() tablet_62344.create_db('vt_test_keyspace') tablet_62344.start_vttablet() From 73dbae7e8b230428d6e796a3639ee9655e8f4c3b Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 13 May 2016 10:01:22 -0700 Subject: [PATCH 02/27] Removing vtctl getSrvShard / getEndPoints. --- doc/vtctlReference.md | 54 ------------------------------------------- go/vt/vtctl/vtctl.go | 48 -------------------------------------- 2 files changed, 102 deletions(-) diff --git a/doc/vtctlReference.md b/doc/vtctlReference.md index 18b5bb8b5e..ea8b8d338d 100644 --- a/doc/vtctlReference.md +++ b/doc/vtctlReference.md @@ -1101,44 +1101,8 @@ Validates that the master version matches all of the slaves. ## Serving Graph -* [GetEndPoints](#getendpoints) * [GetSrvKeyspace](#getsrvkeyspace) * [GetSrvKeyspaceNames](#getsrvkeyspacenames) -* [GetSrvShard](#getsrvshard) - -### GetEndPoints - -Outputs a JSON structure that contains information about the EndPoints. - -#### Example - -
GetEndPoints <cell> <keyspace/shard> <tablet type>
- -#### Arguments - -* <cell> – Required. A cell is a location for a service. Generally, a cell resides in only one cluster. In Vitess, the terms "cell" and "data center" are interchangeable. The argument value is a string that does not contain whitespace. -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. -* <tablet type> – Required. The vttablet's role. Valid values are: - - * backup – A slaved copy of data that is offline to queries other than for backup purposes - * batch – A slaved copy of data for OLAP load patterns (typically for MapReduce jobs) - * experimental – A slaved copy of data that is ready but not serving query traffic. The value indicates a special characteristic of the tablet that indicates the tablet should not be considered a potential master. Vitess also does not worry about lag for experimental tablets when reparenting. - * master – A primary copy of data - * rdonly – A slaved copy of data for OLAP load patterns - * replica – A slaved copy of data ready to be promoted to master - * restore – A tablet that is restoring from a snapshot. Typically, this happens at tablet startup, then it goes to its right state. - * schema_apply – A slaved copy of data that had been serving query traffic but that is now applying a schema change. Following the change, the tablet will revert to its serving type. - * snapshot_source – A slaved copy of data where mysqld is not running and where Vitess is serving data files to clone slaves. Use this command to enter this mode:
vtctl Snapshot -server-mode ...
Use this command to exit this mode:
vtctl SnapshotSourceEnd ...
- * spare – A slaved copy of data that is ready but not serving query traffic. The data could be a potential master tablet. - * worker – A tablet that is in use by a vtworker process. The tablet is likely lagging in replication. - - - - -#### Errors - -* The <cell>, <keyspace/shard>, and <tablet type> arguments are required for the <GetEndPoints> command. This error occurs if the command is not called with exactly 3 arguments. - ### GetSrvKeyspace @@ -1175,24 +1139,6 @@ Outputs a list of keyspace names. * The <cell> argument is required for the <GetSrvKeyspaceNames> command. This error occurs if the command is not called with exactly one argument. -### GetSrvShard - -Outputs a JSON structure that contains information about the SrvShard. - -#### Example - -
GetSrvShard <cell> <keyspace/shard>
- -#### Arguments - -* <cell> – Required. A cell is a location for a service. Generally, a cell resides in only one cluster. In Vitess, the terms "cell" and "data center" are interchangeable. The argument value is a string that does not contain whitespace. -* <keyspace/shard> – Required. The name of a sharded database that contains one or more tables as well as the shard associated with the command. The keyspace must be identified by a string that does not contain whitepace, while the shard is typically identified by a string in the format <range start>-<range end>. - -#### Errors - -* The <cell> and <keyspace/shard> arguments are required for the <GetSrvShard> command. This error occurs if the command is not called with exactly 2 arguments. - - ## Shards * [CreateShard](#createshard) diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index eca427d3ea..fb1dd2ccab 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -363,12 +363,6 @@ var commands = []commandGroup{ {"GetSrvKeyspaceNames", commandGetSrvKeyspaceNames, "", "Outputs a list of keyspace names."}, - {"GetSrvShard", commandGetSrvShard, - " ", - "Outputs a JSON structure that contains information about the SrvShard."}, - {"GetEndPoints", commandGetEndPoints, - " ", - "Outputs a JSON structure that contains information about the EndPoints."}, }, }, { @@ -2124,48 +2118,6 @@ func commandGetSrvKeyspaceNames(ctx context.Context, wr *wrangler.Wrangler, subF return nil } -func commandGetSrvShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - if err := subFlags.Parse(args); err != nil { - return err - } - if subFlags.NArg() != 2 { - return fmt.Errorf("The and arguments are required for the GetSrvShard command.") - } - - keyspace, shard, err := topoproto.ParseKeyspaceShard(subFlags.Arg(1)) - if err != nil { - return err - } - srvShard, err := wr.TopoServer().GetSrvShard(ctx, subFlags.Arg(0), keyspace, shard) - if err != nil { - return err - } - return printJSON(wr.Logger(), srvShard) -} - -func commandGetEndPoints(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - if err := subFlags.Parse(args); err != nil { - return err - } - if subFlags.NArg() != 3 { - return fmt.Errorf("The , , and arguments are required for the GetEndPoints command.") - } - - keyspace, shard, err := topoproto.ParseKeyspaceShard(subFlags.Arg(1)) - if err != nil { - return err - } - tabletType, err := parseTabletType(subFlags.Arg(2), []topodatapb.TabletType{topodatapb.TabletType_MASTER, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY}) - if err != nil { - return err - } - endPoints, _, err := wr.TopoServer().GetEndPoints(ctx, subFlags.Arg(0), keyspace, shard, tabletType) - if err != nil { - return err - } - return printJSON(wr.Logger(), endPoints) -} - func commandGetShardReplication(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { if err := subFlags.Parse(args); err != nil { return err From 9c0d03ba9efff1075f64985c2e3d520f69bc8e46 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 13 May 2016 12:45:31 -0700 Subject: [PATCH 03/27] Don't check serving graph in this call. --- test/end2end/reparent_test.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/test/end2end/reparent_test.py b/test/end2end/reparent_test.py index d53b14b4e9..1bca853568 100755 --- a/test/end2end/reparent_test.py +++ b/test/end2end/reparent_test.py @@ -64,20 +64,6 @@ class ReparentTest(base_end2end_test.BaseEnd2EndTest): if self.env.get_tablet_task_number(new_master_name) != desired_master_task: return False - for cell in self.env.cells: - try: - # This GetEndPoints call should succeed when called on the cell with the - # master, otherwise it'll raise an exception. - self.env.vtctl_helper.execute_vtctl_command( - ['GetEndPoints', cell, '{0}/{1}'.format(keyspace, shard_name), - 'master'], expect_fail=(cell != desired_master_cell)) - if cell != desired_master_cell: - return False - except vtctl_helper.VtctlClientError: - # This should only happen when calling GetEndPoints on a cell other than - # the master cell. - if cell == desired_master_cell: - return False return True def explicit_reparent(self, keyspace, num_shards, external=False, From a9226ad6dd3e84311dc66182343c86a6c9db7c89 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 13 May 2016 12:51:09 -0700 Subject: [PATCH 04/27] Removing another use of GetEndPoints. --- test/end2end/reparent_test.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/test/end2end/reparent_test.py b/test/end2end/reparent_test.py index 1bca853568..42662fee63 100755 --- a/test/end2end/reparent_test.py +++ b/test/end2end/reparent_test.py @@ -45,8 +45,8 @@ class ReparentTest(base_end2end_test.BaseEnd2EndTest): desired_master_task): """Verify that the new master is the correct task in the correct cell. - This function uses vtctl to call GetShards and GetEndPoints on all cells - in order to determine that everyone agrees where the master is. + This function uses vtctl to call GetShard in order to determine + that everyone agrees where the master is. Args: keyspace: Name of the keyspace to reparent (string) @@ -56,6 +56,7 @@ class ReparentTest(base_end2end_test.BaseEnd2EndTest): Returns: True if desired_master is the consensus new master + """ # First verify that GetShard shows the correct master new_master_name = self.env.get_current_master_name(keyspace, shard_name) @@ -133,7 +134,7 @@ class ReparentTest(base_end2end_test.BaseEnd2EndTest): thread.start() if not cross_cell: - # Wait for the serving graph to be updated. + # Wait for the shard to be updated. # This doesn't work for cross-cell, because mapping a task # number to a tablet UID is more trouble than it's worth. uid = (self.env.get_tablet_uid(original_master_name) @@ -143,13 +144,12 @@ class ReparentTest(base_end2end_test.BaseEnd2EndTest): self.fail('Timed out waiting for serving graph update on %s/%s' % ( keyspace, shard_name)) try: - endpoints = json.loads(self.env.vtctl_helper.execute_vtctl_command( - ['GetEndPoints', next_master['cell'], - '%s/%s' % (keyspace, shard_name), 'master'])) - if int(endpoints['entries'][0]['uid']) == uid: + shard_info = json.loads(self.env.vtctl_helper.execute_vtctl_command( + ['GetShard', '%s/%s' % (keyspace, shard_name)])) + if int(shard_info['master_alias']['uid']) == uid: duration = time.time() - start_time durations.append(duration) - logging.info('Serving graph updated for %s/%s after %f seconds', + logging.info('Shard record updated for %s/%s after %f seconds', keyspace, shard_name, duration) break except (IndexError, KeyError, vtctl_helper.VtctlClientError): From 955230cf985f04501923f6261a716b659ca371d1 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 13 May 2016 12:53:34 -0700 Subject: [PATCH 05/27] Removing another test use of getEndPoints. --- .../testlib/reparent_external_test.go | 20 ------------------- 1 file changed, 20 deletions(-) diff --git a/go/vt/wrangler/testlib/reparent_external_test.go b/go/vt/wrangler/testlib/reparent_external_test.go index 958a58fe8c..735dc317a0 100644 --- a/go/vt/wrangler/testlib/reparent_external_test.go +++ b/go/vt/wrangler/testlib/reparent_external_test.go @@ -150,16 +150,6 @@ func TestTabletExternallyReparented(t *testing.T) { t.Fatalf("TabletExternallyReparented(replica) failed: %v", err) } waitForExternalReparent(t, waitID) - - // Now double-check the serving graph is good. - // Should have all replicas left. - addrs, _, err := ts.GetEndPoints(ctx, "cell1", "test_keyspace", "0", topodatapb.TabletType_REPLICA) - if err != nil { - t.Fatalf("GetEndPoints failed at the end: %v", err) - } - if len(addrs.Entries) != 3 { - t.Fatalf("GetEndPoints has too many entries %v: %v", len(addrs.Entries), addrs) - } } // TestTabletExternallyReparentedWithDifferentMysqlPort makes sure @@ -299,16 +289,6 @@ func TestTabletExternallyReparentedFailedOldMaster(t *testing.T) { } waitForExternalReparent(t, waitID) - // Now double-check the serving graph is good. - // Should only both replicas left. - addrs, _, err := ts.GetEndPoints(ctx, "cell1", "test_keyspace", "0", topodatapb.TabletType_REPLICA) - if err != nil { - t.Fatalf("GetEndPoints failed at the end: %v", err) - } - if len(addrs.Entries) != 2 { - t.Fatalf("GetEndPoints has too many entries: %v", addrs) - } - // check the old master was converted to replica tablet, err := ts.GetTablet(ctx, oldMaster.Tablet.Alias) if err != nil { From f1355976ef249b50214a719552d7845d81da8a01 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Mon, 16 May 2016 07:10:05 -0700 Subject: [PATCH 06/27] Removing EndPoints from topo server API. --- go/vt/etcdtopo/serving_graph.go | 142 +------- go/vt/tabletmanager/action_agent.go | 15 - go/vt/tabletmanager/binlog_players_test.go | 4 - go/vt/tabletmanager/init_tablet.go | 6 - go/vt/tabletmanager/rpc_external_reparent.go | 59 +--- go/vt/topo/helpers/tee.go | 50 --- go/vt/topo/server.go | 31 -- go/vt/topo/serving_graph.go | 29 -- go/vt/topo/test/faketopo/faketopo.go | 25 -- go/vt/topo/test/serving.go | 112 ------- go/vt/topotools/rebuild_shard.go | 323 +------------------ go/vt/topotools/rebuild_shard_test.go | 131 +------- go/vt/wrangler/shard.go | 10 - go/vt/zktopo/serving_graph.go | 153 --------- 14 files changed, 15 insertions(+), 1075 deletions(-) delete mode 100644 go/vt/topo/serving_graph.go diff --git a/go/vt/etcdtopo/serving_graph.go b/go/vt/etcdtopo/serving_graph.go index f5deb4ca49..26672f32ad 100644 --- a/go/vt/etcdtopo/serving_graph.go +++ b/go/vt/etcdtopo/serving_graph.go @@ -7,16 +7,12 @@ package etcdtopo import ( "encoding/json" "fmt" - "path" "time" "github.com/coreos/go-etcd/etcd" log "github.com/golang/glog" "golang.org/x/net/context" - "github.com/youtube/vitess/go/vt/topo" - "github.com/youtube/vitess/go/vt/topo/topoproto" - topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) @@ -25,142 +21,6 @@ import ( // test and main programs can change it. var WatchSleepDuration = 30 * time.Second -// GetSrvTabletTypesPerShard implements topo.Server. -func (s *Server) GetSrvTabletTypesPerShard(ctx context.Context, cellName, keyspace, shard string) ([]topodatapb.TabletType, error) { - cell, err := s.getCell(cellName) - if err != nil { - return nil, err - } - - resp, err := cell.Get(srvShardDirPath(keyspace, shard), false /* sort */, false /* recursive */) - if err != nil { - return nil, convertError(err) - } - if resp.Node == nil { - return nil, ErrBadResponse - } - - tabletTypes := make([]topodatapb.TabletType, 0, len(resp.Node.Nodes)) - for _, n := range resp.Node.Nodes { - strType := path.Base(n.Key) - if tt, err := topoproto.ParseTabletType(strType); err == nil { - tabletTypes = append(tabletTypes, tt) - } - } - return tabletTypes, nil -} - -// CreateEndPoints implements topo.Server. -func (s *Server) CreateEndPoints(ctx context.Context, cellName, keyspace, shard string, tabletType topodatapb.TabletType, addrs *topodatapb.EndPoints) error { - cell, err := s.getCell(cellName) - if err != nil { - return err - } - - data, err := json.MarshalIndent(addrs, "", " ") - if err != nil { - return err - } - - // Set only if it doesn't exist. - _, err = cell.Create(endPointsFilePath(keyspace, shard, tabletType), string(data), 0 /* ttl */) - return convertError(err) -} - -// UpdateEndPoints implements topo.Server. -func (s *Server) UpdateEndPoints(ctx context.Context, cellName, keyspace, shard string, tabletType topodatapb.TabletType, addrs *topodatapb.EndPoints, existingVersion int64) error { - cell, err := s.getCell(cellName) - if err != nil { - return err - } - - data, err := json.MarshalIndent(addrs, "", " ") - if err != nil { - return err - } - - if existingVersion == -1 { - // Set unconditionally. - _, err := cell.Set(endPointsFilePath(keyspace, shard, tabletType), string(data), 0 /* ttl */) - return convertError(err) - } - - // Update only if version matches. - return s.updateEndPoints(cellName, keyspace, shard, tabletType, addrs, existingVersion) -} - -// updateEndPoints updates the EndPoints file only if the version matches. -func (s *Server) updateEndPoints(cellName, keyspace, shard string, tabletType topodatapb.TabletType, addrs *topodatapb.EndPoints, version int64) error { - cell, err := s.getCell(cellName) - if err != nil { - return err - } - - data, err := json.MarshalIndent(addrs, "", " ") - if err != nil { - return err - } - - _, err = cell.CompareAndSwap(endPointsFilePath(keyspace, shard, tabletType), string(data), 0, /* ttl */ - "" /* prevValue */, uint64(version)) - return convertError(err) -} - -// GetEndPoints implements topo.Server. -func (s *Server) GetEndPoints(ctx context.Context, cellName, keyspace, shard string, tabletType topodatapb.TabletType) (*topodatapb.EndPoints, int64, error) { - cell, err := s.getCell(cellName) - if err != nil { - return nil, -1, err - } - - resp, err := cell.Get(endPointsFilePath(keyspace, shard, tabletType), false /* sort */, false /* recursive */) - if err != nil { - return nil, -1, convertError(err) - } - if resp.Node == nil { - return nil, -1, ErrBadResponse - } - - value := &topodatapb.EndPoints{} - if resp.Node.Value != "" { - if err := json.Unmarshal([]byte(resp.Node.Value), value); err != nil { - return nil, -1, fmt.Errorf("bad end points data (%v): %q", err, resp.Node.Value) - } - } - return value, int64(resp.Node.ModifiedIndex), nil -} - -// DeleteEndPoints implements topo.Server. -func (s *Server) DeleteEndPoints(ctx context.Context, cellName, keyspace, shard string, tabletType topodatapb.TabletType, existingVersion int64) error { - cell, err := s.getCell(cellName) - if err != nil { - return err - } - dirPath := endPointsDirPath(keyspace, shard, tabletType) - - if existingVersion == -1 { - // Delete unconditionally. - _, err := cell.Delete(dirPath, true /* recursive */) - return convertError(err) - } - - // Delete EndPoints file only if version matches. - if _, err := cell.CompareAndDelete(endPointsFilePath(keyspace, shard, tabletType), "" /* prevValue */, uint64(existingVersion)); err != nil { - return convertError(err) - } - // Delete the parent dir only if it's empty. - _, err = cell.DeleteDir(dirPath) - err = convertError(err) - if err == topo.ErrNotEmpty { - // Someone else recreated the EndPoints file after we deleted it, - // but before we got around to removing the parent dir. - // This is fine, because whoever recreated it has already seen our delete, - // and we're not at risk of overwriting their change. - err = nil - } - return err -} - // UpdateSrvShard implements topo.Server. func (s *Server) UpdateSrvShard(ctx context.Context, cellName, keyspace, shard string, srvShard *topodatapb.SrvShard) error { cell, err := s.getCell(cellName) @@ -339,7 +199,7 @@ func (s *Server) WatchSrvKeyspace(ctx context.Context, cellName, keyspace string if resp.Node != nil && resp.Node.Value != "" { srvKeyspace = &topodatapb.SrvKeyspace{} if err := json.Unmarshal([]byte(resp.Node.Value), srvKeyspace); err != nil { - log.Errorf("failed to Unmarshal EndPoints for %v: %v", filePath, err) + log.Errorf("failed to Unmarshal SrvKeyspace for %v: %v", filePath, err) continue } } diff --git a/go/vt/tabletmanager/action_agent.go b/go/vt/tabletmanager/action_agent.go index 22172d4fee..0e920f87f7 100644 --- a/go/vt/tabletmanager/action_agent.go +++ b/go/vt/tabletmanager/action_agent.go @@ -51,7 +51,6 @@ import ( "github.com/youtube/vitess/go/vt/tabletserver/tabletservermock" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" - "github.com/youtube/vitess/go/vt/topotools" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" @@ -436,16 +435,6 @@ func (agent *ActionAgent) verifyTopology(ctx context.Context) error { return nil } -func (agent *ActionAgent) verifyServingAddrs(ctx context.Context) error { - tablet := agent.Tablet() - if !topo.IsRunningQueryService(tablet.Type) { - return nil - } - - // Check to see our address is registered in the right place. - return topotools.UpdateTabletEndpoints(ctx, agent.TopoServer, tablet) -} - // Start validates and updates the topology records for the tablet, and performs // the initial state change callback to start tablet services. // If initUpdateStream is set, update stream service will also be registered. @@ -511,10 +500,6 @@ func (agent *ActionAgent) Start(ctx context.Context, mysqlPort, vtPort, gRPCPort return err } - if err = agent.verifyServingAddrs(ctx); err != nil { - return err - } - // get and fix the dbname if necessary if !agent.DBConfigs.IsZero() { // Only for real instances diff --git a/go/vt/tabletmanager/binlog_players_test.go b/go/vt/tabletmanager/binlog_players_test.go index b2b2f2e545..8b3a688e5f 100644 --- a/go/vt/tabletmanager/binlog_players_test.go +++ b/go/vt/tabletmanager/binlog_players_test.go @@ -23,7 +23,6 @@ import ( "github.com/youtube/vitess/go/vt/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo" - "github.com/youtube/vitess/go/vt/topotools" "github.com/youtube/vitess/go/vt/zktopo/zktestserver" binlogdatapb "github.com/youtube/vitess/go/vt/proto/binlogdata" @@ -267,9 +266,6 @@ func createSourceTablet(t *testing.T, name string, ts topo.Server, keyspace, sha if err := ts.CreateTablet(ctx, tablet); err != nil { t.Fatalf("CreateTablet failed: %v", err) } - if err := topotools.UpdateTabletEndpoints(ctx, ts, tablet); err != nil { - t.Fatalf("topotools.UpdateTabletEndpoints failed: %v", err) - } // register a tablet conn dialer that will return the instance // we want diff --git a/go/vt/tabletmanager/init_tablet.go b/go/vt/tabletmanager/init_tablet.go index 2fd97090f8..a206b755bb 100644 --- a/go/vt/tabletmanager/init_tablet.go +++ b/go/vt/tabletmanager/init_tablet.go @@ -209,11 +209,5 @@ func (agent *ActionAgent) InitTablet(port, gRPCPort int32) error { return fmt.Errorf("CreateTablet failed: %v", err) } - // and now update the serving graph. Note we do that in any case, - // to clean any inaccurate record from any part of the serving graph. - if err := topotools.UpdateTabletEndpoints(ctx, agent.TopoServer, tablet); err != nil { - return fmt.Errorf("UpdateTabletEndpoints failed: %v", err) - } - return nil } diff --git a/go/vt/tabletmanager/rpc_external_reparent.go b/go/vt/tabletmanager/rpc_external_reparent.go index da57aa4a34..1efdf0ad35 100644 --- a/go/vt/tabletmanager/rpc_external_reparent.go +++ b/go/vt/tabletmanager/rpc_external_reparent.go @@ -94,9 +94,6 @@ func (agent *ActionAgent) TabletExternallyReparented(ctx context.Context, extern }() event.DispatchUpdate(ev, "starting external from tablet (fast)") - var wg sync.WaitGroup - var errs concurrency.AllErrorRecorder - // Execute state change to master by force-updating only the local copy of the // tablet record. The actual record in topo will be updated later. log.Infof("fastTabletExternallyReparented: executing change callback for state change to MASTER") @@ -104,45 +101,10 @@ func (agent *ActionAgent) TabletExternallyReparented(ctx context.Context, extern tablet.Type = topodatapb.TabletType_MASTER agent.setTablet(tablet) - wg.Add(1) - go func() { - defer wg.Done() - - // This is where updateState will block for gracePeriod, while it gives - // vtgate a chance to stop sending replica queries. - if err := agent.updateState(ctx, oldTablet, "fastTabletExternallyReparented"); err != nil { - errs.RecordError(fmt.Errorf("fastTabletExternallyReparented: failed to change tablet state to MASTER: %v", err)) - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - - // Directly write the new master endpoint in the serving graph. - // We will do a true rebuild in the background soon, but in the meantime, - // this will be enough for clients to re-resolve the new master. - event.DispatchUpdate(ev, "writing new master endpoint") - log.Infof("fastTabletExternallyReparented: writing new master endpoint to serving graph") - ep, err := topo.TabletEndPoint(tablet) - if err != nil { - errs.RecordError(fmt.Errorf("fastTabletExternallyReparented: failed to generate EndPoint for tablet %v: %v", tablet.Alias, err)) - return - } - err = topo.UpdateEndPoints(ctx, agent.TopoServer, tablet.Alias.Cell, - si.Keyspace(), si.ShardName(), topodatapb.TabletType_MASTER, - &topodatapb.EndPoints{Entries: []*topodatapb.EndPoint{ep}}, -1) - if err != nil { - errs.RecordError(fmt.Errorf("fastTabletExternallyReparented: failed to update master endpoint: %v", err)) - return - } - externalReparentStats.Record("NewMasterVisible", startTime) - }() - - // Wait for serving state grace period and serving graph update. - wg.Wait() - if errs.HasErrors() { - return errs.Error() + // This is where updateState will block for gracePeriod, while it gives + // vtgate a chance to stop sending replica queries. + if err := agent.updateState(ctx, oldTablet, "fastTabletExternallyReparented"); err != nil { + return fmt.Errorf("fastTabletExternallyReparented: failed to change tablet state to MASTER: %v", err) } // Start the finalize stage with a background context, but connect the trace. @@ -179,20 +141,13 @@ func (agent *ActionAgent) finalizeTabletExternallyReparented(ctx context.Context go func() { defer wg.Done() // Update our own record to master. - updatedTablet, err := agent.TopoServer.UpdateTabletFields(ctx, agent.TabletAlias, + _, err := agent.TopoServer.UpdateTabletFields(ctx, agent.TabletAlias, func(tablet *topodatapb.Tablet) error { tablet.Type = topodatapb.TabletType_MASTER return nil }) if err != nil { errs.RecordError(err) - return - } - - // Update the serving graph for the tablet. - if updatedTablet != nil { - errs.RecordError( - topotools.UpdateTabletEndpoints(ctx, agent.TopoServer, updatedTablet)) } }() @@ -214,10 +169,6 @@ func (agent *ActionAgent) finalizeTabletExternallyReparented(ctx context.Context // We now know more about the old master, so add it to event data. ev.OldMaster = *oldMasterTablet - - // Update the serving graph. - errs.RecordError( - topotools.UpdateTabletEndpoints(ctx, agent.TopoServer, oldMasterTablet)) wg.Done() // Tell the old master to re-read its tablet record and change its state. diff --git a/go/vt/topo/helpers/tee.go b/go/vt/topo/helpers/tee.go index d08f09fb00..8e1458da8d 100644 --- a/go/vt/topo/helpers/tee.go +++ b/go/vt/topo/helpers/tee.go @@ -559,56 +559,6 @@ func (tee *Tee) UnlockSrvShardForAction(ctx context.Context, cell, keyspace, sha return perr } -// GetSrvTabletTypesPerShard is part of the topo.Server interface -func (tee *Tee) GetSrvTabletTypesPerShard(ctx context.Context, cell, keyspace, shard string) ([]topodatapb.TabletType, error) { - return tee.readFrom.GetSrvTabletTypesPerShard(ctx, cell, keyspace, shard) -} - -// CreateEndPoints is part of the topo.Server interface -func (tee *Tee) CreateEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topodatapb.TabletType, addrs *topodatapb.EndPoints) error { - if err := tee.primary.CreateEndPoints(ctx, cell, keyspace, shard, tabletType, addrs); err != nil { - return err - } - - if err := tee.secondary.CreateEndPoints(ctx, cell, keyspace, shard, tabletType, addrs); err != nil { - // not critical enough to fail - log.Warningf("secondary.CreateEndPoints(%v, %v, %v, %v) failed: %v", cell, keyspace, shard, tabletType, err) - } - return nil -} - -// UpdateEndPoints is part of the topo.Server interface -func (tee *Tee) UpdateEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topodatapb.TabletType, addrs *topodatapb.EndPoints, existingVersion int64) error { - if err := tee.primary.UpdateEndPoints(ctx, cell, keyspace, shard, tabletType, addrs, existingVersion); err != nil { - return err - } - - if err := tee.secondary.UpdateEndPoints(ctx, cell, keyspace, shard, tabletType, addrs, -1); err != nil { - // not critical enough to fail - log.Warningf("secondary.UpdateEndPoints(%v, %v, %v, %v) failed: %v", cell, keyspace, shard, tabletType, err) - } - return nil -} - -// GetEndPoints is part of the topo.Server interface -func (tee *Tee) GetEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topodatapb.TabletType) (*topodatapb.EndPoints, int64, error) { - return tee.readFrom.GetEndPoints(ctx, cell, keyspace, shard, tabletType) -} - -// DeleteEndPoints is part of the topo.Server interface -func (tee *Tee) DeleteEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topodatapb.TabletType, existingVersion int64) error { - err := tee.primary.DeleteEndPoints(ctx, cell, keyspace, shard, tabletType, existingVersion) - if err != nil && err != topo.ErrNoNode { - return err - } - - if err := tee.secondary.DeleteEndPoints(ctx, cell, keyspace, shard, tabletType, -1); err != nil { - // not critical enough to fail - log.Warningf("secondary.DeleteEndPoints(%v, %v, %v, %v) failed: %v", cell, keyspace, shard, tabletType, err) - } - return err -} - // UpdateSrvShard is part of the topo.Server interface func (tee *Tee) UpdateSrvShard(ctx context.Context, cell, keyspace, shard string, srvShard *topodatapb.SrvShard) error { if err := tee.primary.UpdateSrvShard(ctx, cell, keyspace, shard, srvShard); err != nil { diff --git a/go/vt/topo/server.go b/go/vt/topo/server.go index bff0082555..5c6d6ab928 100644 --- a/go/vt/topo/server.go +++ b/go/vt/topo/server.go @@ -206,37 +206,6 @@ type Impl interface { // UnlockSrvShardForAction unlocks a serving shard. UnlockSrvShardForAction(ctx context.Context, cell, keyspace, shard, lockPath, results string) error - // GetSrvTabletTypesPerShard returns the existing serving types - // for a shard. - // Can return ErrNoNode. - GetSrvTabletTypesPerShard(ctx context.Context, cell, keyspace, shard string) ([]topodatapb.TabletType, error) - - // CreateEndPoints creates and sets the serving records for a cell, - // keyspace, shard, tabletType. - // It returns ErrNodeExists if the record already exists. - CreateEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topodatapb.TabletType, addrs *topodatapb.EndPoints) error - - // UpdateEndPoints updates the serving records for a cell, - // keyspace, shard, tabletType. - // If existingVersion is -1, it will set the value unconditionally, - // creating it if necessary. - // Otherwise, it will Compare-And-Set only if the version matches. - // Can return ErrBadVersion. - // Can return ErrNoNode only if existingVersion is not -1. - UpdateEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topodatapb.TabletType, addrs *topodatapb.EndPoints, existingVersion int64) error - - // GetEndPoints returns the EndPoints list of serving addresses - // for a TabletType inside a shard, as well as the node version. - // Can return ErrNoNode. - GetEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topodatapb.TabletType) (ep *topodatapb.EndPoints, version int64, err error) - - // DeleteEndPoints deletes the serving records for a cell, - // keyspace, shard, tabletType. - // If existingVersion is -1, it will delete the records unconditionally. - // Otherwise, it will Compare-And-Delete only if the version matches. - // Can return ErrNoNode or ErrBadVersion. - DeleteEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topodatapb.TabletType, existingVersion int64) error - // WatchSrvKeyspace returns a channel that receives notifications // every time the SrvKeyspace for the given keyspace / cell changes. // It should receive a notification with the initial value fairly diff --git a/go/vt/topo/serving_graph.go b/go/vt/topo/serving_graph.go deleted file mode 100644 index 2504e4981e..0000000000 --- a/go/vt/topo/serving_graph.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2014, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package topo - -import ( - "strings" - - "golang.org/x/net/context" - - "github.com/youtube/vitess/go/trace" - - topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" -) - -// UpdateEndPoints is a high level wrapper for TopoServer.UpdateEndPoints. -// It generates trace spans. -func UpdateEndPoints(ctx context.Context, ts Server, cell, keyspace, shard string, tabletType topodatapb.TabletType, addrs *topodatapb.EndPoints, existingVersion int64) error { - span := trace.NewSpanFromContext(ctx) - span.StartClient("TopoServer.UpdateEndPoints") - span.Annotate("cell", cell) - span.Annotate("keyspace", keyspace) - span.Annotate("shard", shard) - span.Annotate("tablet_type", strings.ToLower(tabletType.String())) - defer span.Finish() - - return ts.UpdateEndPoints(ctx, cell, keyspace, shard, tabletType, addrs, existingVersion) -} diff --git a/go/vt/topo/test/faketopo/faketopo.go b/go/vt/topo/test/faketopo/faketopo.go index 0b760f9b75..ad72c164e5 100644 --- a/go/vt/topo/test/faketopo/faketopo.go +++ b/go/vt/topo/test/faketopo/faketopo.go @@ -26,11 +26,6 @@ func (ft FakeTopo) GetSrvKeyspace(ctx context.Context, cell, keyspace string) (* return nil, errNotImplemented } -// GetEndPoints implements topo.Server. -func (ft FakeTopo) GetEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topodatapb.TabletType) (*topodatapb.EndPoints, int64, error) { - return nil, -1, errNotImplemented -} - // Close implements topo.Server. func (ft FakeTopo) Close() {} @@ -154,26 +149,6 @@ func (ft FakeTopo) UnlockSrvShardForAction(ctx context.Context, cell, keyspace, return errNotImplemented } -// GetSrvTabletTypesPerShard implements topo.Server. -func (ft FakeTopo) GetSrvTabletTypesPerShard(ctx context.Context, cell, keyspace, shard string) ([]topodatapb.TabletType, error) { - return nil, errNotImplemented -} - -// CreateEndPoints implements topo.Server. -func (ft FakeTopo) CreateEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topodatapb.TabletType, addrs *topodatapb.EndPoints) error { - return errNotImplemented -} - -// UpdateEndPoints implements topo.Server. -func (ft FakeTopo) UpdateEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topodatapb.TabletType, addrs *topodatapb.EndPoints, existingVersion int64) error { - return errNotImplemented -} - -// DeleteEndPoints implements topo.Server. -func (ft FakeTopo) DeleteEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topodatapb.TabletType, existingVersion int64) error { - return errNotImplemented -} - // WatchSrvKeyspace implements topo.Server.WatchSrvKeyspace func (ft FakeTopo) WatchSrvKeyspace(ctx context.Context, cell, keyspace string) (<-chan *topodatapb.SrvKeyspace, error) { return nil, errNotImplemented diff --git a/go/vt/topo/test/serving.go b/go/vt/topo/test/serving.go index 97d06b10e4..5fe6818d99 100644 --- a/go/vt/topo/test/serving.go +++ b/go/vt/topo/test/serving.go @@ -19,118 +19,6 @@ import ( func CheckServingGraph(ctx context.Context, t *testing.T, ts topo.Impl) { cell := getLocalCell(ctx, t, ts) - // test individual cell/keyspace/shard/type entries - if _, err := ts.GetSrvTabletTypesPerShard(ctx, cell, "test_keyspace", "-10"); err != topo.ErrNoNode { - t.Errorf("GetSrvTabletTypesPerShard(invalid): %v", err) - } - if _, _, err := ts.GetEndPoints(ctx, cell, "test_keyspace", "-10", topodatapb.TabletType_MASTER); err != topo.ErrNoNode { - t.Errorf("GetEndPoints(invalid): %v", err) - } - - endPoints := &topodatapb.EndPoints{ - Entries: []*topodatapb.EndPoint{ - { - Uid: 1, - Host: "host1", - PortMap: map[string]int32{ - "vt": 1234, - "mysql": 1235, - "grpc": 1236, - }, - }, - }, - } - - if err := ts.CreateEndPoints(ctx, cell, "test_keyspace", "-10", topodatapb.TabletType_MASTER, endPoints); err != nil { - t.Fatalf("CreateEndPoints(master): %v", err) - } - // Try to create again. - if err := ts.CreateEndPoints(ctx, cell, "test_keyspace", "-10", topodatapb.TabletType_MASTER, endPoints); err != topo.ErrNodeExists { - t.Fatalf("CreateEndPoints(master): err = %v, want topo.ErrNodeExists", err) - } - - // Get version. - _, version, err := ts.GetEndPoints(ctx, cell, "test_keyspace", "-10", topodatapb.TabletType_MASTER) - if err != nil { - t.Fatalf("GetEndPoints(master): %v", err) - } - // Make a change. - tmp := endPoints.Entries[0].Uid - endPoints.Entries[0].Uid = tmp + 1 - if err := ts.UpdateEndPoints(ctx, cell, "test_keyspace", "-10", topodatapb.TabletType_MASTER, endPoints, -1); err != nil { - t.Fatalf("UpdateEndPoints(master): %v", err) - } - endPoints.Entries[0].Uid = tmp - // Try to delete with the wrong version. - if err := ts.DeleteEndPoints(ctx, cell, "test_keyspace", "-10", topodatapb.TabletType_MASTER, version); err != topo.ErrBadVersion { - t.Fatalf("DeleteEndPoints: err = %v, want topo.ErrBadVersion", err) - } - // Delete with the correct version. - _, version, err = ts.GetEndPoints(ctx, cell, "test_keyspace", "-10", topodatapb.TabletType_MASTER) - if err != nil { - t.Fatalf("GetEndPoints(master): %v", err) - } - if err := ts.DeleteEndPoints(ctx, cell, "test_keyspace", "-10", topodatapb.TabletType_MASTER, version); err != nil { - t.Fatalf("DeleteEndPoints: %v", err) - } - // Recreate it with an unconditional update. - if err := ts.UpdateEndPoints(ctx, cell, "test_keyspace", "-10", topodatapb.TabletType_MASTER, endPoints, -1); err != nil { - t.Fatalf("UpdateEndPoints(master): %v", err) - } - - if types, err := ts.GetSrvTabletTypesPerShard(ctx, cell, "test_keyspace", "-10"); err != nil || len(types) != 1 || types[0] != topodatapb.TabletType_MASTER { - t.Errorf("GetSrvTabletTypesPerShard(1): %v %v", err, types) - } - - // Delete it unconditionally. - if err := ts.DeleteEndPoints(ctx, cell, "test_keyspace", "-10", topodatapb.TabletType_MASTER, -1); err != nil { - t.Fatalf("DeleteEndPoints: %v", err) - } - - // Delete the SrvShard. - if err := ts.DeleteSrvShard(ctx, cell, "test_keyspace", "-10"); err != nil { - t.Fatalf("DeleteSrvShard: %v", err) - } - if _, err := ts.GetSrvShard(ctx, cell, "test_keyspace", "-10"); err != topo.ErrNoNode { - t.Errorf("GetSrvShard(deleted) got %v, want ErrNoNode", err) - } - - // Re-add endpoints. - if err := ts.UpdateEndPoints(ctx, cell, "test_keyspace", "-10", topodatapb.TabletType_MASTER, endPoints, -1); err != nil { - t.Fatalf("UpdateEndPoints(master): %v", err) - } - - addrs, version, err := ts.GetEndPoints(ctx, cell, "test_keyspace", "-10", topodatapb.TabletType_MASTER) - if err != nil { - t.Errorf("GetEndPoints: %v", err) - } - if len(addrs.Entries) != 1 || addrs.Entries[0].Uid != 1 { - t.Errorf("GetEndPoints(1): %v", addrs) - } - if pm := addrs.Entries[0].PortMap; pm["vt"] != 1234 || pm["mysql"] != 1235 || pm["grpc"] != 1236 { - t.Errorf("GetSrcTabletType(1).PortMap: want %v, got %v", endPoints.Entries[0].PortMap, pm) - } - - // Update with the wrong version. - if err := ts.UpdateEndPoints(ctx, cell, "test_keyspace", "-10", topodatapb.TabletType_MASTER, endPoints, version+1); err != topo.ErrBadVersion { - t.Fatalf("UpdateEndPoints(master): err = %v, want topo.ErrBadVersion", err) - } - // Update with the right version. - if err := ts.UpdateEndPoints(ctx, cell, "test_keyspace", "-10", topodatapb.TabletType_MASTER, endPoints, version); err != nil { - t.Fatalf("UpdateEndPoints(master): %v", err) - } - // Update existing EndPoints unconditionally. - if err := ts.UpdateEndPoints(ctx, cell, "test_keyspace", "-10", topodatapb.TabletType_MASTER, endPoints, -1); err != nil { - t.Fatalf("UpdateEndPoints(master): %v", err) - } - - if err := ts.DeleteEndPoints(ctx, cell, "test_keyspace", "-10", topodatapb.TabletType_REPLICA, -1); err != topo.ErrNoNode { - t.Errorf("DeleteEndPoints(unknown): %v", err) - } - if err := ts.DeleteEndPoints(ctx, cell, "test_keyspace", "-10", topodatapb.TabletType_MASTER, -1); err != nil { - t.Errorf("DeleteEndPoints(master): %v", err) - } - // test cell/keyspace/shard entries (SrvShard) srvShard := &topodatapb.SrvShard{ Name: "-10", diff --git a/go/vt/topotools/rebuild_shard.go b/go/vt/topotools/rebuild_shard.go index 9a459a621e..141f6cabde 100644 --- a/go/vt/topotools/rebuild_shard.go +++ b/go/vt/topotools/rebuild_shard.go @@ -5,7 +5,6 @@ package topotools import ( - "fmt" "sync" "github.com/youtube/vitess/go/trace" @@ -62,327 +61,7 @@ func RebuildShard(ctx context.Context, log logutil.Logger, ts topo.Server, keysp // single cell func rebuildCellSrvShard(ctx context.Context, log logutil.Logger, ts topo.Server, si *topo.ShardInfo, cell string) (err error) { log.Infof("rebuildCellSrvShard %v/%v in cell %v", si.Keyspace(), si.ShardName(), cell) - - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - // Read existing EndPoints node versions, so we know if any - // changes sneak in after we read the tablets. - versions, err := getEndPointsVersions(ctx, ts, cell, si.Keyspace(), si.ShardName()) - - // Get all tablets in this cell/shard. - tablets, err := ts.GetTabletMapForShardByCell(ctx, si.Keyspace(), si.ShardName(), []string{cell}) - if err != nil { - if err != topo.ErrPartialResult { - return err - } - log.Warningf("Got ErrPartialResult from topo.GetTabletMapForShardByCell(%v), some tablets may not be added properly to serving graph", cell) - } - - // Build up the serving graph from scratch. - serving := make(map[topodatapb.TabletType]*topodatapb.EndPoints) - for _, tablet := range tablets { - // Only add serving types. - if !tablet.IsInServingGraph() { - continue - } - - // Check the Keyspace and Shard for the tablet are right. - if tablet.Keyspace != si.Keyspace() || tablet.Shard != si.ShardName() { - return fmt.Errorf("CRITICAL: tablet %v is in replication graph for shard %v/%v but belongs to shard %v:%v", tablet.Alias, si.Keyspace(), si.ShardName(), tablet.Keyspace, tablet.Shard) - } - - // Add the tablet to the list. - endpoints, ok := serving[tablet.Type] - if !ok { - endpoints = topo.NewEndPoints() - serving[tablet.Type] = endpoints - } - entry, err := topo.TabletEndPoint(tablet.Tablet) - if err != nil { - log.Warningf("TabletEndPoint failed for tablet %v: %v", tablet.Alias, err) - continue - } - endpoints.Entries = append(endpoints.Entries, entry) - } - - wg := sync.WaitGroup{} - fatalErrs := concurrency.AllErrorRecorder{} - retryErrs := concurrency.AllErrorRecorder{} - - // Write nodes that should exist. - for tabletType, endpoints := range serving { - wg.Add(1) - go func(tabletType topodatapb.TabletType, endpoints *topodatapb.EndPoints) { - defer wg.Done() - - log.Infof("saving serving graph for cell %v shard %v/%v tabletType %v", cell, si.Keyspace(), si.ShardName(), tabletType) - - version, ok := versions[tabletType] - if !ok { - // This type didn't exist when we first checked. - // Try to create, but only if it still doesn't exist. - if err := ts.CreateEndPoints(ctx, cell, si.Keyspace(), si.ShardName(), tabletType, endpoints); err != nil { - switch err { - case topo.ErrNodeExists: - retryErrs.RecordError(err) - default: - fatalErrs.RecordError(err) - } - } - return - } - - // Update only if the version matches. - if err := ts.UpdateEndPoints(ctx, cell, si.Keyspace(), si.ShardName(), tabletType, endpoints, version); err != nil { - switch err { - case topo.ErrBadVersion, topo.ErrNoNode: - retryErrs.RecordError(err) - default: - fatalErrs.RecordError(err) - } - } - }(tabletType, endpoints) - } - - // Delete nodes that shouldn't exist. - for tabletType, version := range versions { - if _, ok := serving[tabletType]; !ok { - wg.Add(1) - go func(tabletType topodatapb.TabletType, version int64) { - defer wg.Done() - if err := ts.DeleteEndPoints(ctx, cell, si.Keyspace(), si.ShardName(), tabletType, version); err != nil && err != topo.ErrNoNode { - switch err { - case topo.ErrNoNode: - // Someone else deleted it, which is fine. - case topo.ErrBadVersion: - retryErrs.RecordError(err) - default: - fatalErrs.RecordError(err) - } - } - }(tabletType, version) - } - } - - // Update srvShard object - wg.Add(1) - go func() { - defer wg.Done() - log.Infof("updating shard serving graph in cell %v for %v/%v", cell, si.Keyspace(), si.ShardName()) - if err := UpdateSrvShard(ctx, ts, cell, si); err != nil { - fatalErrs.RecordError(err) - log.Warningf("writing serving data in cell %v for %v/%v failed: %v", cell, si.Keyspace(), si.ShardName(), err) - } - }() - - wg.Wait() - - // If there are any fatal errors, give up. - if fatalErrs.HasErrors() { - return fatalErrs.Error() - } - // If there are any retry errors, try again. - if retryErrs.HasErrors() { - continue - } - // Otherwise, success! - return nil - } -} - -func getEndPointsVersions(ctx context.Context, ts topo.Server, cell, keyspace, shard string) (map[topodatapb.TabletType]int64, error) { - // Get all existing tablet types. - tabletTypes, err := ts.GetSrvTabletTypesPerShard(ctx, cell, keyspace, shard) - if err != nil { - if err == topo.ErrNoNode { - // This just means there aren't any EndPoints lists yet. - return nil, nil - } - return nil, err - } - - // Get node versions. - wg := sync.WaitGroup{} - errs := concurrency.AllErrorRecorder{} - versions := make(map[topodatapb.TabletType]int64) - mu := sync.Mutex{} - - for _, tabletType := range tabletTypes { - wg.Add(1) - go func(tabletType topodatapb.TabletType) { - defer wg.Done() - - _, version, err := ts.GetEndPoints(ctx, cell, keyspace, shard, tabletType) - if err != nil && err != topo.ErrNoNode { - errs.RecordError(err) - return - } - - mu.Lock() - versions[tabletType] = version - mu.Unlock() - }(tabletType) - } - - wg.Wait() - return versions, errs.Error() -} - -func updateEndpoint(ctx context.Context, ts topo.Server, cell, keyspace, shard string, tabletType topodatapb.TabletType, endpoint *topodatapb.EndPoint) error { - return retryUpdateEndpoints(ctx, ts, cell, keyspace, shard, tabletType, true, /* create */ - func(endpoints *topodatapb.EndPoints) bool { - // Look for an existing entry to update. - for i := range endpoints.Entries { - if endpoints.Entries[i].Uid == endpoint.Uid { - if topo.EndPointEquality(endpoints.Entries[i], endpoint) { - // The entry already exists and is the same. - return false - } - // Update an existing entry. - endpoints.Entries[i] = endpoint - return true - } - } - // The entry doesn't exist, so add it. - endpoints.Entries = append(endpoints.Entries, endpoint) - return true - }) -} - -func removeEndpoint(ctx context.Context, ts topo.Server, cell, keyspace, shard string, tabletType topodatapb.TabletType, tabletUID uint32) error { - err := retryUpdateEndpoints(ctx, ts, cell, keyspace, shard, tabletType, false, /* create */ - func(endpoints *topodatapb.EndPoints) bool { - // Make a new list, excluding the given UID. - entries := make([]*topodatapb.EndPoint, 0, len(endpoints.Entries)) - for _, ep := range endpoints.Entries { - if ep.Uid != tabletUID { - entries = append(entries, ep) - } - } - if len(entries) == len(endpoints.Entries) { - // Nothing was removed. Don't bother updating. - return false - } - // Do the update. - endpoints.Entries = entries - return true - }) - - if err == topo.ErrNoNode { - // Our goal is to remove one endpoint. If the list is empty, we're fine. - err = nil - } - return err -} - -func retryUpdateEndpoints(ctx context.Context, ts topo.Server, cell, keyspace, shard string, tabletType topodatapb.TabletType, create bool, updateFunc func(*topodatapb.EndPoints) bool) error { - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - // Get or create EndPoints list. - endpoints, version, err := ts.GetEndPoints(ctx, cell, keyspace, shard, tabletType) - if err == topo.ErrNoNode && create { - // Create instead of updating. - endpoints = &topodatapb.EndPoints{} - if !updateFunc(endpoints) { - // Nothing changed. - return nil - } - err = ts.CreateEndPoints(ctx, cell, keyspace, shard, tabletType, endpoints) - if err == topo.ErrNodeExists { - // Someone else beat us to it. Try again. - continue - } - return err - } - if err != nil { - return err - } - - // We got an existing EndPoints list. Try to update. - if !updateFunc(endpoints) { - // Nothing changed. - return nil - } - - // If there's nothing left, we should delete the list entirely. - if len(endpoints.Entries) == 0 { - err = ts.DeleteEndPoints(ctx, cell, keyspace, shard, tabletType, version) - switch err { - case topo.ErrNoNode: - // Someone beat us to it, which is fine. - return nil - case topo.ErrBadVersion: - // Someone else updated the list. Try again. - continue - } - return err - } - - err = ts.UpdateEndPoints(ctx, cell, keyspace, shard, tabletType, endpoints, version) - if err == topo.ErrBadVersion || (err == topo.ErrNoNode && create) { - // Someone else updated or deleted the list in the meantime. Try again. - continue - } - return err - } -} - -// UpdateTabletEndpoints fixes up any entries in the serving graph that relate -// to a given tablet. -func UpdateTabletEndpoints(ctx context.Context, ts topo.Server, tablet *topodatapb.Tablet) (err error) { - srvTypes, err := ts.GetSrvTabletTypesPerShard(ctx, tablet.Alias.Cell, tablet.Keyspace, tablet.Shard) - if err != nil { - if err != topo.ErrNoNode { - return err - } - // It's fine if there are no existing types. - srvTypes = nil - } - - wg := sync.WaitGroup{} - errs := concurrency.AllErrorRecorder{} - - // Update the list that the tablet is supposed to be in (if any). - if topo.IsInServingGraph(tablet.Type) { - endpoint, err := topo.TabletEndPoint(tablet) - if err != nil { - return err - } - - wg.Add(1) - go func() { - defer wg.Done() - errs.RecordError( - updateEndpoint(ctx, ts, tablet.Alias.Cell, tablet.Keyspace, tablet.Shard, - tablet.Type, endpoint)) - }() - } - - // Remove it from any other lists it isn't supposed to be in. - for _, srvType := range srvTypes { - if srvType != tablet.Type { - wg.Add(1) - go func(tabletType topodatapb.TabletType) { - defer wg.Done() - errs.RecordError( - removeEndpoint(ctx, ts, tablet.Alias.Cell, tablet.Keyspace, tablet.Shard, - tabletType, tablet.Alias.Uid)) - }(srvType) - } - } - - wg.Wait() - return errs.Error() + return UpdateSrvShard(ctx, ts, cell, si) } // UpdateSrvShard creates the SrvShard object based on the global ShardInfo, diff --git a/go/vt/topotools/rebuild_shard_test.go b/go/vt/topotools/rebuild_shard_test.go index 38d2b85aea..1faad167e3 100644 --- a/go/vt/topotools/rebuild_shard_test.go +++ b/go/vt/topotools/rebuild_shard_test.go @@ -6,7 +6,6 @@ package topotools_test import ( "fmt" - "strings" "testing" "golang.org/x/net/context" @@ -61,138 +60,24 @@ func TestRebuildShard(t *testing.T) { t.Fatalf("GetOrCreateShard: %v", err) } si.Cells = append(si.Cells, cells[0]) + si.MasterAlias = &topodatapb.TabletAlias{Cell: cells[0], Uid: 1} if err := ts.UpdateShard(ctx, si); err != nil { t.Fatalf("UpdateShard: %v", err) } - masterInfo := addTablet(ctx, t, ts, 1, cells[0], topodatapb.TabletType_MASTER) - replicaInfo := addTablet(ctx, t, ts, 2, cells[0], topodatapb.TabletType_REPLICA) + addTablet(ctx, t, ts, 1, cells[0], topodatapb.TabletType_MASTER) + addTablet(ctx, t, ts, 2, cells[0], topodatapb.TabletType_REPLICA) - // Do an initial rebuild. + // Do a rebuild. if _, err := RebuildShard(ctx, logger, ts, testKeyspace, testShard, cells); err != nil { t.Fatalf("RebuildShard: %v", err) } - // Check initial state. - ep, _, err := ts.GetEndPoints(ctx, cells[0], testKeyspace, testShard, topodatapb.TabletType_MASTER) + srvShard, err := ts.GetSrvShard(ctx, cells[0], testKeyspace, testShard) if err != nil { - t.Fatalf("GetEndPoints: %v", err) + t.Fatalf("GetSrvShard: %v", err) } - if got, want := len(ep.Entries), 1; got != want { - t.Fatalf("len(Entries) = %v, want %v", got, want) - } - ep, _, err = ts.GetEndPoints(ctx, cells[0], testKeyspace, testShard, topodatapb.TabletType_REPLICA) - if err != nil { - t.Fatalf("GetEndPoints: %v", err) - } - if got, want := len(ep.Entries), 1; got != want { - t.Fatalf("len(Entries) = %v, want %v", got, want) - } - - // Make a change. - masterInfo.Type = topodatapb.TabletType_SPARE - if err := ts.UpdateTablet(ctx, masterInfo); err != nil { - t.Fatalf("UpdateTablet: %v", err) - } - if _, err := RebuildShard(ctx, logger, ts, testKeyspace, testShard, cells); err != nil { - t.Fatalf("RebuildShard: %v", err) - } - - // Make another change. - replicaInfo.Type = topodatapb.TabletType_SPARE - if err := ts.UpdateTablet(ctx, replicaInfo); err != nil { - t.Fatalf("UpdateTablet: %v", err) - } - if _, err := RebuildShard(ctx, logger, ts, testKeyspace, testShard, cells); err != nil { - t.Fatalf("RebuildShard: %v", err) - } - - // Check that the rebuild picked up both changes. - if _, _, err := ts.GetEndPoints(ctx, cells[0], testKeyspace, testShard, topodatapb.TabletType_MASTER); err == nil || !strings.Contains(err.Error(), "node doesn't exist") { - t.Errorf("first change wasn't picked up by second rebuild") - } - if _, _, err := ts.GetEndPoints(ctx, cells[0], testKeyspace, testShard, topodatapb.TabletType_REPLICA); err == nil || !strings.Contains(err.Error(), "node doesn't exist") { - t.Errorf("second change was overwritten by first rebuild finishing late") + if srvShard.MasterCell != cells[0] { + t.Errorf("Invalid cell name, got %v expected %v", srvShard.MasterCell, cells[0]) } } - -func TestUpdateTabletEndpoints(t *testing.T) { - ctx := context.Background() - cell := "test_cell" - - // Set up topology. - ts := zktestserver.New(t, []string{cell}) - si, err := GetOrCreateShard(ctx, ts, testKeyspace, testShard) - if err != nil { - t.Fatalf("GetOrCreateShard: %v", err) - } - si.Cells = append(si.Cells, cell) - if err := ts.UpdateShard(ctx, si); err != nil { - t.Fatalf("UpdateShard: %v", err) - } - - tablet1 := addTablet(ctx, t, ts, 1, cell, topodatapb.TabletType_MASTER).Tablet - tablet2 := addTablet(ctx, t, ts, 2, cell, topodatapb.TabletType_REPLICA).Tablet - - update := func(tablet *topodatapb.Tablet) { - if err := UpdateTabletEndpoints(ctx, ts, tablet); err != nil { - t.Fatalf("UpdateTabletEndpoints(%v): %v", tablet, err) - } - } - expect := func(tabletType topodatapb.TabletType, want int) { - eps, _, err := ts.GetEndPoints(ctx, cell, testKeyspace, testShard, tabletType) - if err != nil && err != topo.ErrNoNode { - t.Errorf("GetEndPoints(%v): %v", tabletType, err) - return - } - var got int - if err == nil { - got = len(eps.Entries) - if got == 0 { - t.Errorf("len(EndPoints) = 0, expected ErrNoNode instead") - } - } - if got != want { - t.Errorf("len(GetEndPoints(%v)) = %v, want %v. EndPoints = %v", tabletType, len(eps.Entries), want, eps) - } - } - - // Update tablets. This should create the serving graph dirs too. - update(tablet1) - expect(topodatapb.TabletType_MASTER, 1) - update(tablet2) - expect(topodatapb.TabletType_REPLICA, 1) - - // Re-update an identical tablet. - update(tablet1) - expect(topodatapb.TabletType_MASTER, 1) - - // Change a tablet, but keep it the same type. - tablet2.Hostname += "extra" - update(tablet2) - expect(topodatapb.TabletType_REPLICA, 1) - - // Move the master to replica. - tablet1.Type = topodatapb.TabletType_REPLICA - update(tablet1) - expect(topodatapb.TabletType_MASTER, 0) - expect(topodatapb.TabletType_REPLICA, 2) - - // Take a replica out of serving. - tablet1.Type = topodatapb.TabletType_SPARE - update(tablet1) - expect(topodatapb.TabletType_MASTER, 0) - expect(topodatapb.TabletType_REPLICA, 1) - - // Put it back to serving. - tablet1.Type = topodatapb.TabletType_REPLICA - update(tablet1) - expect(topodatapb.TabletType_MASTER, 0) - expect(topodatapb.TabletType_REPLICA, 2) - - // Move a replica to master. - tablet2.Type = topodatapb.TabletType_MASTER - update(tablet2) - expect(topodatapb.TabletType_MASTER, 1) - expect(topodatapb.TabletType_REPLICA, 1) -} diff --git a/go/vt/wrangler/shard.go b/go/vt/wrangler/shard.go index 6341566473..c8658e7ec9 100644 --- a/go/vt/wrangler/shard.go +++ b/go/vt/wrangler/shard.go @@ -183,16 +183,6 @@ func (wr *Wrangler) DeleteShard(ctx context.Context, keyspace, shard string, rec wr.Logger().Warningf("Cannot delete ShardReplication in cell %v for %v/%v: %v", cell, keyspace, shard, err) } - for _, t := range topoproto.AllTabletTypes { - if !topo.IsInServingGraph(t) { - continue - } - - if err := wr.ts.DeleteEndPoints(ctx, cell, keyspace, shard, t, -1); err != nil && err != topo.ErrNoNode { - wr.Logger().Warningf("Cannot delete EndPoints in cell %v for %v/%v/%v: %v", cell, keyspace, shard, t, err) - } - } - if err := wr.ts.DeleteSrvShard(ctx, cell, keyspace, shard); err != nil && err != topo.ErrNoNode { wr.Logger().Warningf("Cannot delete SrvShard in cell %v for %v/%v: %v", cell, keyspace, shard, err) } diff --git a/go/vt/zktopo/serving_graph.go b/go/vt/zktopo/serving_graph.go index 2387aac85a..55ccfd897e 100644 --- a/go/vt/zktopo/serving_graph.go +++ b/go/vt/zktopo/serving_graph.go @@ -17,7 +17,6 @@ import ( "launchpad.net/gozk/zookeeper" "github.com/youtube/vitess/go/vt/topo" - "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/zk" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" @@ -48,114 +47,6 @@ func zkPathForVtName(cell, keyspace, shard string, tabletType topodatapb.TabletT return path.Join(zkPathForVtShard(cell, keyspace, shard), strings.ToLower(tabletType.String())) } -// GetSrvTabletTypesPerShard is part of the topo.Server interface -func (zkts *Server) GetSrvTabletTypesPerShard(ctx context.Context, cell, keyspace, shard string) ([]topodatapb.TabletType, error) { - zkSgShardPath := zkPathForVtShard(cell, keyspace, shard) - children, _, err := zkts.zconn.Children(zkSgShardPath) - if err != nil { - if zookeeper.IsError(err, zookeeper.ZNONODE) { - err = topo.ErrNoNode - } - return nil, err - } - result := make([]topodatapb.TabletType, 0, len(children)) - for _, tt := range children { - // these two are used for locking - if tt == "action" || tt == "actionlog" { - continue - } - if ptt, err := topoproto.ParseTabletType(tt); err == nil { - result = append(result, ptt) - } - } - return result, nil -} - -// CreateEndPoints is part of the topo.Server interface -func (zkts *Server) CreateEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topodatapb.TabletType, addrs *topodatapb.EndPoints) error { - path := zkPathForVtName(cell, keyspace, shard, tabletType) - data, err := json.MarshalIndent(addrs, "", " ") - if err != nil { - return err - } - - // Create only if it doesn't exist. - _, err = zk.CreateRecursive(zkts.zconn, path, string(data), 0, zookeeper.WorldACL(zookeeper.PERM_ALL)) - if zookeeper.IsError(err, zookeeper.ZNODEEXISTS) { - err = topo.ErrNodeExists - } - return err -} - -// UpdateEndPoints is part of the topo.Server interface -func (zkts *Server) UpdateEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topodatapb.TabletType, addrs *topodatapb.EndPoints, existingVersion int64) error { - path := zkPathForVtName(cell, keyspace, shard, tabletType) - data, err := json.MarshalIndent(addrs, "", " ") - if err != nil { - return err - } - - if existingVersion == -1 { - // Update or create unconditionally. - _, err := zk.CreateRecursive(zkts.zconn, path, string(data), 0, zookeeper.WorldACL(zookeeper.PERM_ALL)) - if err != nil { - if zookeeper.IsError(err, zookeeper.ZNODEEXISTS) { - // Node already exists - just stomp away. Multiple writers shouldn't be here. - // We use RetryChange here because it won't update the node unnecessarily. - f := func(oldValue string, oldStat zk.Stat) (string, error) { - return string(data), nil - } - err = zkts.zconn.RetryChange(path, 0, zookeeper.WorldACL(zookeeper.PERM_ALL), f) - } - } - return err - } - - // Compare And Set - if _, err = zkts.zconn.Set(path, string(data), int(existingVersion)); err != nil { - if zookeeper.IsError(err, zookeeper.ZBADVERSION) { - err = topo.ErrBadVersion - } else if zookeeper.IsError(err, zookeeper.ZNONODE) { - err = topo.ErrNoNode - } - } - return err -} - -// GetEndPoints is part of the topo.Server interface -func (zkts *Server) GetEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topodatapb.TabletType) (*topodatapb.EndPoints, int64, error) { - path := zkPathForVtName(cell, keyspace, shard, tabletType) - data, stat, err := zkts.zconn.Get(path) - if err != nil { - if zookeeper.IsError(err, zookeeper.ZNONODE) { - err = topo.ErrNoNode - } - return nil, 0, err - } - result := &topodatapb.EndPoints{} - if len(data) > 0 { - if err := json.Unmarshal([]byte(data), result); err != nil { - return nil, 0, fmt.Errorf("EndPoints unmarshal failed: %v %v", data, err) - } - } - return result, int64(stat.Version()), nil -} - -// DeleteEndPoints is part of the topo.Server interface -func (zkts *Server) DeleteEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topodatapb.TabletType, existingVersion int64) error { - path := zkPathForVtName(cell, keyspace, shard, tabletType) - if err := zkts.zconn.Delete(path, int(existingVersion)); err != nil { - switch { - case zookeeper.IsError(err, zookeeper.ZNONODE): - err = topo.ErrNoNode - case zookeeper.IsError(err, zookeeper.ZBADVERSION): - err = topo.ErrBadVersion - } - return err - } - return nil -} - // UpdateSrvShard is part of the topo.Server interface func (zkts *Server) UpdateSrvShard(ctx context.Context, cell, keyspace, shard string, srvShard *topodatapb.SrvShard) error { path := zkPathForVtShard(cell, keyspace, shard) @@ -271,50 +162,6 @@ func (zkts *Server) GetSrvKeyspaceNames(ctx context.Context, cell string) ([]str return children, nil } -var errSkipUpdate = fmt.Errorf("skip update") - -func (zkts *Server) updateTabletEndpoint(oldValue string, oldStat zk.Stat, addr *topodatapb.EndPoint) (newValue string, err error) { - if oldStat == nil { - // The incoming object doesn't exist - we haven't been placed in the serving - // graph yet, so don't update. Assume the next process that rebuilds the graph - // will get the updated tablet location. - return "", errSkipUpdate - } - - var addrs *topodatapb.EndPoints - if oldValue != "" { - addrs = &topodatapb.EndPoints{} - if len(oldValue) > 0 { - if err := json.Unmarshal([]byte(oldValue), addrs); err != nil { - return "", fmt.Errorf("EndPoints unmarshal failed: %v %v", oldValue, err) - } - } - - foundTablet := false - for i, entry := range addrs.Entries { - if entry.Uid == addr.Uid { - foundTablet = true - if !topo.EndPointEquality(entry, addr) { - addrs.Entries[i] = addr - } - break - } - } - - if !foundTablet { - addrs.Entries = append(addrs.Entries, addr) - } - } else { - addrs = topo.NewEndPoints() - addrs.Entries = append(addrs.Entries, addr) - } - data, err := json.MarshalIndent(addrs, "", " ") - if err != nil { - return "", err - } - return string(data), nil -} - // WatchSrvKeyspace is part of the topo.Server interface func (zkts *Server) WatchSrvKeyspace(ctx context.Context, cell, keyspace string) (<-chan *topodatapb.SrvKeyspace, error) { filePath := zkPathForVtKeyspace(cell, keyspace) From c7f808ed98617953c47109aca773f1174ee897a2 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Mon, 16 May 2016 07:36:19 -0700 Subject: [PATCH 07/27] Removing EndPoints message. --- go/vt/proto/topodata/topodata.pb.go | 176 ++++++++---------- go/vt/topo/naming.go | 5 - .../Topodata/EndPoint/HealthMapEntry.php | 121 ------------ php/src/Vitess/Proto/Topodata/EndPoints.php | 94 ---------- .../Proto/Topodata/Tablet/HealthMapEntry.php | 121 ------------ proto/topodata.proto | 5 - py/vtproto/topodata_pb2.py | 70 ++----- 7 files changed, 93 insertions(+), 499 deletions(-) delete mode 100644 php/src/Vitess/Proto/Topodata/EndPoint/HealthMapEntry.php delete mode 100644 php/src/Vitess/Proto/Topodata/EndPoints.php delete mode 100644 php/src/Vitess/Proto/Topodata/Tablet/HealthMapEntry.php diff --git a/go/vt/proto/topodata/topodata.pb.go b/go/vt/proto/topodata/topodata.pb.go index 470b9aa482..ac752efac9 100644 --- a/go/vt/proto/topodata/topodata.pb.go +++ b/go/vt/proto/topodata/topodata.pb.go @@ -16,7 +16,6 @@ It has these top-level messages: Keyspace ShardReplication EndPoint - EndPoints SrvShard ShardReference SrvKeyspace @@ -437,23 +436,6 @@ func (m *EndPoint) GetPortMap() map[string]int32 { return nil } -// EndPoints corresponds to a list of tablets. -type EndPoints struct { - Entries []*EndPoint `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` -} - -func (m *EndPoints) Reset() { *m = EndPoints{} } -func (m *EndPoints) String() string { return proto.CompactTextString(m) } -func (*EndPoints) ProtoMessage() {} -func (*EndPoints) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -func (m *EndPoints) GetEntries() []*EndPoint { - if m != nil { - return m.Entries - } - return nil -} - // SrvShard is a rollup node for the shard itself. type SrvShard struct { // Copied from Shard. @@ -466,7 +448,7 @@ type SrvShard struct { func (m *SrvShard) Reset() { *m = SrvShard{} } func (m *SrvShard) String() string { return proto.CompactTextString(m) } func (*SrvShard) ProtoMessage() {} -func (*SrvShard) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*SrvShard) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *SrvShard) GetKeyRange() *KeyRange { if m != nil { @@ -485,7 +467,7 @@ type ShardReference struct { func (m *ShardReference) Reset() { *m = ShardReference{} } func (m *ShardReference) String() string { return proto.CompactTextString(m) } func (*ShardReference) ProtoMessage() {} -func (*ShardReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*ShardReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *ShardReference) GetKeyRange() *KeyRange { if m != nil { @@ -508,7 +490,7 @@ type SrvKeyspace struct { func (m *SrvKeyspace) Reset() { *m = SrvKeyspace{} } func (m *SrvKeyspace) String() string { return proto.CompactTextString(m) } func (*SrvKeyspace) ProtoMessage() {} -func (*SrvKeyspace) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (*SrvKeyspace) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } func (m *SrvKeyspace) GetPartitions() []*SrvKeyspace_KeyspacePartition { if m != nil { @@ -535,7 +517,7 @@ func (m *SrvKeyspace_KeyspacePartition) Reset() { *m = SrvKeyspace_Keysp func (m *SrvKeyspace_KeyspacePartition) String() string { return proto.CompactTextString(m) } func (*SrvKeyspace_KeyspacePartition) ProtoMessage() {} func (*SrvKeyspace_KeyspacePartition) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{10, 0} + return fileDescriptor0, []int{9, 0} } func (m *SrvKeyspace_KeyspacePartition) GetShardReferences() []*ShardReference { @@ -557,7 +539,7 @@ type SrvKeyspace_ServedFrom struct { func (m *SrvKeyspace_ServedFrom) Reset() { *m = SrvKeyspace_ServedFrom{} } func (m *SrvKeyspace_ServedFrom) String() string { return proto.CompactTextString(m) } func (*SrvKeyspace_ServedFrom) ProtoMessage() {} -func (*SrvKeyspace_ServedFrom) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 1} } +func (*SrvKeyspace_ServedFrom) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 1} } func init() { proto.RegisterType((*KeyRange)(nil), "topodata.KeyRange") @@ -572,7 +554,6 @@ func init() { proto.RegisterType((*ShardReplication)(nil), "topodata.ShardReplication") proto.RegisterType((*ShardReplication_Node)(nil), "topodata.ShardReplication.Node") proto.RegisterType((*EndPoint)(nil), "topodata.EndPoint") - proto.RegisterType((*EndPoints)(nil), "topodata.EndPoints") proto.RegisterType((*SrvShard)(nil), "topodata.SrvShard") proto.RegisterType((*ShardReference)(nil), "topodata.ShardReference") proto.RegisterType((*SrvKeyspace)(nil), "topodata.SrvKeyspace") @@ -583,78 +564,77 @@ func init() { } var fileDescriptor0 = []byte{ - // 1162 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x57, 0xdd, 0x6e, 0xe3, 0x44, - 0x14, 0xc6, 0x4e, 0x9c, 0x26, 0x27, 0xd9, 0xac, 0x3b, 0x74, 0x91, 0x65, 0x84, 0xb6, 0xf2, 0x0d, - 0xab, 0x02, 0x01, 0x75, 0xf9, 0x29, 0x95, 0x90, 0x36, 0x0d, 0x59, 0xe8, 0xb6, 0x4d, 0xc3, 0x24, - 0xd5, 0xd2, 0x2b, 0xcb, 0x49, 0x66, 0xbb, 0xd6, 0x26, 0xb6, 0xf1, 0x38, 0x91, 0xf2, 0x0c, 0x7b, - 0xc1, 0x3d, 0x0f, 0xc1, 0x25, 0xb7, 0x3c, 0x11, 0x8f, 0x80, 0xc4, 0xcc, 0x19, 0x3b, 0x71, 0x92, - 0xb6, 0x74, 0xa1, 0x57, 0x9d, 0xe3, 0xf3, 0xff, 0xcd, 0xf9, 0xce, 0xa4, 0x50, 0x4f, 0xc2, 0x28, - 0x1c, 0x79, 0x89, 0xd7, 0x88, 0xe2, 0x30, 0x09, 0x49, 0x39, 0x93, 0x9d, 0x7d, 0x28, 0x9f, 0xb0, - 0x39, 0xf5, 0x82, 0x2b, 0x46, 0x76, 0xc0, 0xe0, 0x89, 0x17, 0x27, 0x96, 0xb6, 0xab, 0x3d, 0xa9, - 0x51, 0x25, 0x10, 0x13, 0x0a, 0x2c, 0x18, 0x59, 0x3a, 0x7e, 0x93, 0x47, 0xe7, 0x29, 0x54, 0xfb, - 0xde, 0x60, 0xcc, 0x92, 0xe6, 0xd8, 0xf7, 0x38, 0x21, 0x50, 0x1c, 0xb2, 0xf1, 0x18, 0xbd, 0x2a, - 0x14, 0xcf, 0xd2, 0x69, 0xea, 0x2b, 0xa7, 0x07, 0x54, 0x1e, 0x9d, 0xbf, 0x0b, 0x50, 0x52, 0x5e, - 0xe4, 0x13, 0x30, 0x3c, 0xe9, 0x89, 0x1e, 0xd5, 0xfd, 0x47, 0x8d, 0x45, 0x75, 0xb9, 0xb0, 0x54, - 0xd9, 0x10, 0x1b, 0xca, 0xaf, 0x43, 0x9e, 0x04, 0xde, 0x84, 0x61, 0xb8, 0x0a, 0x5d, 0xc8, 0xa4, - 0x0e, 0xba, 0x1f, 0x59, 0x05, 0xfc, 0x2a, 0x4e, 0xe4, 0x00, 0xca, 0x51, 0x18, 0x27, 0xee, 0xc4, - 0x8b, 0xac, 0xe2, 0x6e, 0x41, 0xc4, 0xfe, 0x68, 0x3d, 0x76, 0xa3, 0x2b, 0x0c, 0xce, 0xbc, 0xa8, - 0x1d, 0x24, 0xf1, 0x9c, 0x6e, 0x45, 0x4a, 0x92, 0x59, 0xde, 0xb0, 0x39, 0x8f, 0xbc, 0x21, 0xb3, - 0x0c, 0x95, 0x25, 0x93, 0x11, 0x96, 0xd7, 0x5e, 0x3c, 0xb2, 0x4a, 0xa8, 0x50, 0x02, 0xf9, 0x1c, - 0x2a, 0xc2, 0xc2, 0x8d, 0x25, 0x72, 0xd6, 0x16, 0x36, 0x42, 0x96, 0xc9, 0x32, 0x4c, 0x31, 0x8c, - 0x42, 0xf7, 0x09, 0x14, 0x93, 0x79, 0xc4, 0xac, 0xb2, 0xb0, 0xad, 0xef, 0xef, 0xac, 0x17, 0xd6, - 0x17, 0x3a, 0x8a, 0x16, 0xc2, 0xd2, 0x1c, 0x0d, 0x5c, 0xd9, 0xa1, 0x1b, 0xce, 0x58, 0x1c, 0xfb, - 0x23, 0x66, 0x55, 0x30, 0x77, 0x7d, 0x34, 0xe8, 0x88, 0xcf, 0xe7, 0xe9, 0x57, 0xd2, 0x10, 0x31, - 0xbd, 0x2b, 0x6e, 0x01, 0x36, 0x6b, 0x6f, 0x34, 0xdb, 0x17, 0x4a, 0xd5, 0x29, 0xda, 0xd9, 0x87, - 0x50, 0xcb, 0xf7, 0x2f, 0xaf, 0x49, 0xd4, 0x97, 0xde, 0x9c, 0x3c, 0xca, 0x66, 0x67, 0xde, 0x78, - 0xaa, 0xb0, 0x36, 0xa8, 0x12, 0x0e, 0xf5, 0x03, 0xcd, 0xfe, 0x06, 0x2a, 0x8b, 0x70, 0xff, 0xe6, - 0x58, 0xc9, 0x39, 0xbe, 0x28, 0x96, 0xab, 0x66, 0xcd, 0x79, 0x5b, 0x02, 0xa3, 0x87, 0xc8, 0x1d, - 0x40, 0x6d, 0xe2, 0xf1, 0x84, 0xc5, 0xee, 0x1d, 0xa6, 0xa0, 0xaa, 0x4c, 0xd5, 0xa4, 0xad, 0x60, - 0xae, 0xdf, 0x01, 0xf3, 0xef, 0xa0, 0xc6, 0x59, 0x3c, 0x63, 0x23, 0x57, 0x02, 0xcb, 0xc5, 0xa8, - 0xac, 0xe1, 0x84, 0x15, 0x35, 0x7a, 0x68, 0x83, 0x37, 0x50, 0xe5, 0x8b, 0x33, 0x27, 0xcf, 0xe0, - 0x01, 0x0f, 0xa7, 0xf1, 0x90, 0xb9, 0x78, 0xe7, 0x3c, 0x1d, 0xaa, 0x0f, 0x37, 0xfc, 0xd1, 0x08, - 0xcf, 0xb4, 0xc6, 0x97, 0x02, 0x97, 0xa8, 0x48, 0x3e, 0x70, 0x31, 0x54, 0x05, 0x89, 0x0a, 0x0a, - 0xe4, 0x39, 0x3c, 0x4c, 0xb0, 0x47, 0x77, 0x18, 0x0a, 0x38, 0x43, 0xa1, 0x2f, 0xad, 0x8f, 0xab, - 0x8a, 0xac, 0xa0, 0x68, 0x29, 0x2b, 0x5a, 0x4f, 0xf2, 0x22, 0xb7, 0x2f, 0x01, 0x96, 0xa5, 0x93, - 0xaf, 0xa0, 0x9a, 0x46, 0xc5, 0x39, 0xd3, 0x6e, 0x99, 0x33, 0x48, 0x16, 0xe7, 0x65, 0x89, 0x7a, - 0xae, 0x44, 0xfb, 0x37, 0x0d, 0xaa, 0xb9, 0xb6, 0x32, 0x42, 0x6b, 0x0b, 0x42, 0xaf, 0x50, 0x46, - 0xbf, 0x89, 0x32, 0x85, 0x1b, 0x29, 0x53, 0xbc, 0xc3, 0xf5, 0x7d, 0x00, 0x25, 0x2c, 0x34, 0x83, - 0x2f, 0x95, 0xec, 0x3f, 0x35, 0x78, 0xb0, 0x82, 0xcc, 0xbd, 0xf6, 0x4e, 0xf6, 0xe1, 0xd1, 0xc8, - 0xe7, 0xd2, 0xca, 0xfd, 0x65, 0xca, 0xe2, 0xb9, 0x2b, 0x67, 0xc2, 0x17, 0x6d, 0xca, 0x6e, 0xca, - 0xf4, 0xfd, 0x54, 0xf9, 0x93, 0xd4, 0xf5, 0x94, 0x8a, 0x7c, 0x06, 0x64, 0x30, 0xf6, 0x86, 0x6f, - 0xc6, 0xbe, 0x18, 0x57, 0x31, 0x6e, 0xaa, 0xec, 0x22, 0x86, 0xdd, 0xce, 0x69, 0xb0, 0x10, 0xee, - 0xfc, 0xa5, 0xe3, 0xde, 0x55, 0x68, 0x7d, 0x01, 0x3b, 0x08, 0x90, 0x1f, 0x5c, 0x89, 0x81, 0x18, - 0x4f, 0x27, 0x01, 0x92, 0x3f, 0x65, 0x17, 0xc9, 0x74, 0x2d, 0x54, 0x49, 0xfe, 0x93, 0x17, 0x9b, - 0x1e, 0xd8, 0xb7, 0x8e, 0x7d, 0x5b, 0x2b, 0xa0, 0x62, 0x8e, 0x63, 0x35, 0xdd, 0x6b, 0xb1, 0x10, - 0x83, 0x3d, 0xd8, 0xe6, 0xd1, 0xd8, 0x4f, 0xd4, 0x8c, 0x8b, 0x70, 0xd3, 0x20, 0xc1, 0x4e, 0x0d, - 0xfa, 0x10, 0x15, 0x38, 0x00, 0x2d, 0xf9, 0x59, 0x10, 0x22, 0xe3, 0xd3, 0xab, 0x38, 0x9c, 0xf0, - 0xcd, 0x25, 0x9b, 0xe5, 0x4b, 0x29, 0xf5, 0x5c, 0x58, 0x65, 0x94, 0x92, 0x67, 0x6e, 0x4f, 0xb3, - 0x91, 0x95, 0xe2, 0xfd, 0x5e, 0x5b, 0x7e, 0x20, 0x0b, 0xab, 0x03, 0xe9, 0xbc, 0xd5, 0xc0, 0x54, - 0xfc, 0x64, 0xa2, 0xa5, 0xa1, 0x97, 0xf8, 0x61, 0x20, 0xb2, 0x1b, 0x41, 0x38, 0x62, 0x72, 0x03, - 0xc9, 0x36, 0x1e, 0xaf, 0x91, 0x2f, 0x67, 0xda, 0xe8, 0x08, 0x3b, 0xaa, 0xac, 0xed, 0x67, 0x50, - 0x94, 0xa2, 0xdc, 0x63, 0x69, 0xf1, 0x77, 0xd9, 0x63, 0xc9, 0x52, 0x70, 0x7e, 0xd7, 0xa0, 0xdc, - 0x0e, 0x46, 0xdd, 0xd0, 0x0f, 0x92, 0x6b, 0x98, 0x25, 0x1e, 0x54, 0xf9, 0xc4, 0xa5, 0xac, 0xc2, - 0x33, 0x39, 0xcc, 0x3d, 0x6d, 0x85, 0xf5, 0x72, 0xb3, 0x58, 0xd7, 0x3f, 0x6e, 0xff, 0x67, 0xeb, - 0x8b, 0xe5, 0x5d, 0x34, 0x0d, 0xe7, 0x5b, 0xa8, 0x64, 0x39, 0x38, 0xf9, 0x14, 0xb6, 0x98, 0x88, - 0xe3, 0x2f, 0x80, 0x23, 0x9b, 0x95, 0xd0, 0xcc, 0xc4, 0x89, 0xa0, 0xdc, 0x8b, 0x67, 0x6a, 0x89, - 0x88, 0xc6, 0x72, 0x83, 0x8d, 0xe7, 0x77, 0xdf, 0xe9, 0x8f, 0x21, 0x7d, 0x13, 0x5c, 0xfc, 0xd5, - 0xa1, 0x6e, 0x1a, 0xd4, 0xa7, 0x96, 0xf8, 0xe2, 0x5c, 0x40, 0x3d, 0xbd, 0xbf, 0x57, 0x2c, 0x66, - 0x81, 0x20, 0xd8, 0x7d, 0xe4, 0x75, 0xfe, 0x28, 0x8a, 0x8d, 0x18, 0xcf, 0x16, 0xac, 0xfd, 0x01, - 0x20, 0x12, 0xbf, 0x8f, 0x7c, 0x39, 0x1f, 0x19, 0x12, 0x1f, 0xe7, 0x46, 0x68, 0x69, 0xba, 0x60, - 0x45, 0x37, 0xb3, 0xa7, 0x39, 0xd7, 0x1b, 0xe9, 0xaf, 0xbf, 0x33, 0xfd, 0x0b, 0xff, 0x81, 0xfe, - 0x4d, 0xa8, 0xe6, 0x28, 0x9d, 0x32, 0x7a, 0xf7, 0xfa, 0x3e, 0x72, 0xa4, 0x86, 0x25, 0xa9, 0xaf, - 0xdf, 0x20, 0xc6, 0xb5, 0x1b, 0xc4, 0xfe, 0x55, 0x83, 0xed, 0x0d, 0x38, 0xe4, 0x1e, 0xc8, 0xbd, - 0xd3, 0xb7, 0xef, 0x81, 0xe5, 0x03, 0x4d, 0x5a, 0x60, 0xaa, 0x94, 0x71, 0x76, 0xd5, 0x6a, 0x25, - 0x54, 0xf3, 0x18, 0xac, 0xce, 0x82, 0xa8, 0x68, 0x45, 0xe6, 0xb6, 0x7b, 0x1f, 0x1b, 0xe9, 0x96, - 0xc7, 0x70, 0x6f, 0x1f, 0xea, 0xab, 0xf7, 0x40, 0x2a, 0x60, 0x5c, 0x74, 0x7a, 0xed, 0xbe, 0xf9, - 0x1e, 0x01, 0x28, 0x5d, 0x1c, 0x77, 0xfa, 0x5f, 0x7f, 0x69, 0x6a, 0xf2, 0xf3, 0xd1, 0x65, 0xbf, - 0xdd, 0x33, 0xf5, 0x3d, 0x01, 0x13, 0x2c, 0x53, 0x91, 0x2a, 0x6c, 0x5d, 0x74, 0x4e, 0x3a, 0xe7, - 0x2f, 0x3b, 0xca, 0xe5, 0xac, 0xd9, 0xeb, 0xb7, 0xa9, 0x70, 0x11, 0x0a, 0xda, 0xee, 0x9e, 0x1e, - 0xb7, 0x9a, 0xa6, 0x2e, 0x15, 0xf4, 0xfb, 0xf3, 0xce, 0xe9, 0xa5, 0x59, 0xc0, 0x58, 0xcd, 0x7e, - 0xeb, 0x47, 0x75, 0xec, 0x75, 0x9b, 0xb4, 0x6d, 0x16, 0x05, 0xf3, 0x6b, 0xed, 0x9f, 0xbb, 0x6d, - 0x7a, 0x7c, 0xd6, 0xee, 0xf4, 0x9b, 0xa7, 0xa6, 0x21, 0x7d, 0x8e, 0x9a, 0xad, 0x93, 0x8b, 0xae, - 0x59, 0x52, 0xc1, 0x7a, 0xfd, 0x73, 0x61, 0xba, 0x25, 0x15, 0x2f, 0xcf, 0xe9, 0x89, 0xc8, 0x52, - 0xb6, 0x75, 0x53, 0x3b, 0xb2, 0xc1, 0x1a, 0x86, 0x93, 0xc6, 0x3c, 0x9c, 0x26, 0xd3, 0x01, 0x6b, - 0xcc, 0xfc, 0x84, 0x71, 0xae, 0xfe, 0x9d, 0x18, 0x94, 0xf0, 0xcf, 0xd3, 0x7f, 0x02, 0x00, 0x00, - 0xff, 0xff, 0x24, 0x31, 0x3d, 0x8a, 0x67, 0x0c, 0x00, 0x00, + // 1143 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x57, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x2e, 0x29, 0x51, 0x96, 0x46, 0x8a, 0xc2, 0x6c, 0x9d, 0x82, 0x60, 0x51, 0xc4, 0xd0, 0xa5, + 0x81, 0x8b, 0xaa, 0x85, 0xd3, 0x1f, 0xc3, 0x40, 0x81, 0xc8, 0xaa, 0xd2, 0x3a, 0xb6, 0x65, 0x75, + 0x25, 0x23, 0xf5, 0x89, 0xa0, 0xa4, 0x8d, 0x43, 0x44, 0x22, 0x59, 0xee, 0x4a, 0x80, 0x9e, 0x21, + 0x87, 0xde, 0xfb, 0x10, 0x3d, 0xf6, 0xda, 0x27, 0xea, 0x23, 0x14, 0xe8, 0xee, 0x2c, 0x29, 0x51, + 0xf2, 0x4f, 0x9d, 0xd6, 0x27, 0xcf, 0xec, 0xfc, 0xec, 0xcc, 0xec, 0xf7, 0x0d, 0x65, 0xa8, 0x8b, + 0x28, 0x8e, 0xc6, 0xbe, 0xf0, 0x9b, 0x71, 0x12, 0x89, 0x88, 0x94, 0x33, 0xbd, 0xb1, 0x07, 0xe5, + 0x63, 0xb6, 0xa0, 0x7e, 0x78, 0xc9, 0xc8, 0x36, 0x58, 0x5c, 0xf8, 0x89, 0x70, 0x8c, 0x1d, 0xe3, + 0x69, 0x8d, 0x6a, 0x85, 0xd8, 0x50, 0x60, 0xe1, 0xd8, 0x31, 0xf1, 0x4c, 0x89, 0x8d, 0x67, 0x50, + 0x1d, 0xf8, 0xc3, 0x09, 0x13, 0xad, 0x49, 0xe0, 0x73, 0x42, 0xa0, 0x38, 0x62, 0x93, 0x09, 0x46, + 0x55, 0x28, 0xca, 0x2a, 0x68, 0x16, 0xe8, 0xa0, 0x07, 0x54, 0x89, 0x8d, 0xbf, 0x0b, 0x50, 0xd2, + 0x51, 0xe4, 0x33, 0xb0, 0x7c, 0x15, 0x89, 0x11, 0xd5, 0xbd, 0xc7, 0xcd, 0x65, 0x75, 0xb9, 0xb4, + 0x54, 0xfb, 0x10, 0x17, 0xca, 0x6f, 0x22, 0x2e, 0x42, 0x7f, 0xca, 0x30, 0x5d, 0x85, 0x2e, 0x75, + 0x52, 0x07, 0x33, 0x88, 0x9d, 0x02, 0x9e, 0x4a, 0x89, 0xec, 0x43, 0x39, 0x8e, 0x12, 0xe1, 0x4d, + 0xfd, 0xd8, 0x29, 0xee, 0x14, 0x64, 0xee, 0x4f, 0x36, 0x73, 0x37, 0x7b, 0xd2, 0xe1, 0xd4, 0x8f, + 0x3b, 0xa1, 0x48, 0x16, 0x74, 0x2b, 0xd6, 0x9a, 0xba, 0xe5, 0x2d, 0x5b, 0xf0, 0xd8, 0x1f, 0x31, + 0xc7, 0xd2, 0xb7, 0x64, 0x3a, 0x8e, 0xe5, 0x8d, 0x9f, 0x8c, 0x9d, 0x12, 0x1a, 0xb4, 0x42, 0xbe, + 0x80, 0x8a, 0xf4, 0xf0, 0x12, 0x35, 0x39, 0x67, 0x0b, 0x1b, 0x21, 0xab, 0xcb, 0xb2, 0x99, 0x62, + 0x1a, 0x3d, 0xdd, 0xa7, 0x50, 0x14, 0x8b, 0x98, 0x39, 0x65, 0xe9, 0x5b, 0xdf, 0xdb, 0xde, 0x2c, + 0x6c, 0x20, 0x6d, 0x14, 0x3d, 0xa4, 0xa7, 0x3d, 0x1e, 0x7a, 0xaa, 0x43, 0x2f, 0x9a, 0xb3, 0x24, + 0x09, 0xc6, 0xcc, 0xa9, 0xe0, 0xdd, 0xf5, 0xf1, 0xb0, 0x2b, 0x8f, 0xcf, 0xd2, 0x53, 0xd2, 0x94, + 0x39, 0xfd, 0x4b, 0xee, 0x00, 0x36, 0xeb, 0x5e, 0x69, 0x76, 0x20, 0x8d, 0xba, 0x53, 0xf4, 0x73, + 0x0f, 0xa0, 0x96, 0xef, 0x5f, 0x3d, 0x93, 0xac, 0x2f, 0x7d, 0x39, 0x25, 0xaa, 0x66, 0xe7, 0xfe, + 0x64, 0xa6, 0x67, 0x6d, 0x51, 0xad, 0x1c, 0x98, 0xfb, 0x86, 0xfb, 0x2d, 0x54, 0x96, 0xe9, 0xfe, + 0x2d, 0xb0, 0x92, 0x0b, 0x7c, 0x59, 0x2c, 0x57, 0xed, 0x5a, 0xe3, 0x5d, 0x09, 0xac, 0x3e, 0x4e, + 0x6e, 0x1f, 0x6a, 0x53, 0x9f, 0x0b, 0x96, 0x78, 0x77, 0x40, 0x41, 0x55, 0xbb, 0x6a, 0xa4, 0xad, + 0xcd, 0xdc, 0xbc, 0xc3, 0xcc, 0xbf, 0x83, 0x1a, 0x67, 0xc9, 0x9c, 0x8d, 0x3d, 0x35, 0x58, 0x2e, + 0xa1, 0xb2, 0x31, 0x27, 0xac, 0xa8, 0xd9, 0x47, 0x1f, 0x7c, 0x81, 0x2a, 0x5f, 0xca, 0x9c, 0x3c, + 0x87, 0x07, 0x3c, 0x9a, 0x25, 0x23, 0xe6, 0xe1, 0x9b, 0xf3, 0x14, 0x54, 0x1f, 0x5f, 0x89, 0x47, + 0x27, 0x94, 0x69, 0x8d, 0xaf, 0x14, 0xae, 0xa6, 0xa2, 0xf8, 0xc0, 0x25, 0xa8, 0x0a, 0x6a, 0x2a, + 0xa8, 0x90, 0x17, 0xf0, 0x50, 0x60, 0x8f, 0xde, 0x28, 0x92, 0xe3, 0x8c, 0xa4, 0xbd, 0xb4, 0x09, + 0x57, 0x9d, 0x59, 0x8f, 0xa2, 0xad, 0xbd, 0x68, 0x5d, 0xe4, 0x55, 0xee, 0x5e, 0x00, 0xac, 0x4a, + 0x27, 0x5f, 0x43, 0x35, 0xcd, 0x8a, 0x38, 0x33, 0x6e, 0xc1, 0x19, 0x88, 0xa5, 0xbc, 0x2a, 0xd1, + 0xcc, 0x95, 0xe8, 0xfe, 0x66, 0x40, 0x35, 0xd7, 0x56, 0x46, 0x68, 0x63, 0x49, 0xe8, 0x35, 0xca, + 0x98, 0x37, 0x51, 0xa6, 0x70, 0x23, 0x65, 0x8a, 0x77, 0x78, 0xbe, 0x8f, 0xa0, 0x84, 0x85, 0x66, + 0xe3, 0x4b, 0x35, 0xf7, 0x4f, 0x03, 0x1e, 0xac, 0x4d, 0xe6, 0x5e, 0x7b, 0x27, 0x7b, 0xf0, 0x78, + 0x1c, 0x70, 0xe5, 0xe5, 0xfd, 0x32, 0x63, 0xc9, 0xc2, 0x53, 0x98, 0x08, 0x64, 0x9b, 0xaa, 0x9b, + 0x32, 0xfd, 0x30, 0x35, 0xfe, 0xa4, 0x6c, 0x7d, 0x6d, 0x22, 0x9f, 0x03, 0x19, 0x4e, 0xfc, 0xd1, + 0xdb, 0x49, 0x20, 0xe1, 0x2a, 0xe1, 0xa6, 0xcb, 0x2e, 0x62, 0xda, 0x47, 0x39, 0x0b, 0x16, 0xc2, + 0x1b, 0x7f, 0x99, 0xb8, 0x77, 0xf5, 0xb4, 0xbe, 0x84, 0x6d, 0x1c, 0x50, 0x10, 0x5e, 0x4a, 0x40, + 0x4c, 0x66, 0xd3, 0x10, 0xc9, 0x9f, 0xb2, 0x8b, 0x64, 0xb6, 0x36, 0x9a, 0x14, 0xff, 0xc9, 0xcb, + 0xab, 0x11, 0xd8, 0xb7, 0x89, 0x7d, 0x3b, 0x6b, 0x43, 0xc5, 0x3b, 0x8e, 0x34, 0xba, 0x37, 0x72, + 0xe1, 0x0c, 0x76, 0xe1, 0x11, 0x8f, 0x27, 0x81, 0xd0, 0x18, 0x97, 0xe9, 0x66, 0xa1, 0xc0, 0x4e, + 0x2d, 0xfa, 0x10, 0x0d, 0x08, 0x80, 0xb6, 0x3a, 0x96, 0x84, 0xc8, 0xf8, 0xf4, 0x3a, 0x89, 0xa6, + 0xfc, 0xea, 0x92, 0xcd, 0xee, 0x4b, 0x29, 0xf5, 0x42, 0x7a, 0x65, 0x94, 0x52, 0x32, 0x77, 0x67, + 0x19, 0x64, 0x95, 0x7a, 0xbf, 0xcf, 0x96, 0x07, 0x64, 0x61, 0x1d, 0x90, 0x8d, 0x77, 0x06, 0xd8, + 0x9a, 0x9f, 0x4c, 0xb6, 0x34, 0xf2, 0x45, 0x10, 0x85, 0xf2, 0x76, 0x2b, 0x8c, 0xc6, 0x4c, 0x6d, + 0x20, 0xd5, 0xc6, 0x93, 0x0d, 0xf2, 0xe5, 0x5c, 0x9b, 0x5d, 0xe9, 0x47, 0xb5, 0xb7, 0xfb, 0x1c, + 0x8a, 0x4a, 0x55, 0x7b, 0x2c, 0x2d, 0xfe, 0x2e, 0x7b, 0x4c, 0xac, 0x94, 0xc6, 0xef, 0x06, 0x94, + 0x3b, 0xe1, 0xb8, 0x17, 0x05, 0xa1, 0xb8, 0x86, 0x59, 0xf2, 0x83, 0xaa, 0x3e, 0x71, 0x29, 0xab, + 0x50, 0x26, 0x07, 0xb9, 0x4f, 0x5b, 0x61, 0xb3, 0xdc, 0x2c, 0xd7, 0xf5, 0x1f, 0xb7, 0xff, 0xb3, + 0xf5, 0xe5, 0xf2, 0x2e, 0xda, 0x56, 0x23, 0x86, 0x72, 0x3f, 0x99, 0xeb, 0x4d, 0x20, 0xab, 0xcb, + 0xa1, 0x13, 0xe5, 0xf7, 0x5f, 0xcc, 0x4f, 0x20, 0x5d, 0xec, 0x1e, 0xfe, 0x74, 0xd0, 0xcf, 0x05, + 0xfa, 0xa8, 0x2d, 0x4f, 0x1a, 0xe7, 0x50, 0x4f, 0x1f, 0xe1, 0x35, 0x4b, 0x58, 0x28, 0x59, 0x72, + 0x1f, 0xf7, 0x36, 0xfe, 0x28, 0xca, 0xb5, 0x96, 0xcc, 0x97, 0xd4, 0xfb, 0x01, 0x20, 0x96, 0x3f, + 0x72, 0x02, 0xf5, 0xc8, 0x19, 0x0e, 0x3e, 0xcd, 0xe1, 0x60, 0xe5, 0xba, 0x84, 0x76, 0x2f, 0xf3, + 0xa7, 0xb9, 0xd0, 0x1b, 0x39, 0x6c, 0xbe, 0x37, 0x87, 0x0b, 0xff, 0x81, 0xc3, 0x2d, 0xa8, 0xe6, + 0x78, 0x99, 0xd2, 0x72, 0xe7, 0xfa, 0x3e, 0x72, 0xcc, 0x84, 0x15, 0x33, 0xaf, 0x5f, 0x03, 0xd6, + 0xb5, 0x6b, 0xc0, 0xfd, 0xd5, 0x80, 0x47, 0x57, 0xc6, 0xa1, 0xc8, 0x9c, 0xfb, 0xd8, 0xde, 0x4e, + 0xe6, 0xd5, 0x57, 0x96, 0xb4, 0xc1, 0xd6, 0x57, 0x26, 0xd9, 0x53, 0x6b, 0x5e, 0x57, 0xf3, 0x33, + 0x58, 0xc7, 0x82, 0xac, 0x68, 0x4d, 0xe7, 0xae, 0x77, 0x1f, 0x6b, 0xe5, 0x96, 0x2f, 0xda, 0xee, + 0x1e, 0xd4, 0xd7, 0xdf, 0x81, 0x54, 0xc0, 0x3a, 0xef, 0xf6, 0x3b, 0x03, 0xfb, 0x03, 0x02, 0x50, + 0x3a, 0x3f, 0xea, 0x0e, 0xbe, 0xf9, 0xca, 0x36, 0xd4, 0xf1, 0xe1, 0xc5, 0xa0, 0xd3, 0xb7, 0xcd, + 0x5d, 0x39, 0x26, 0x58, 0x5d, 0x45, 0xaa, 0xb0, 0x75, 0xde, 0x3d, 0xee, 0x9e, 0xbd, 0xea, 0xea, + 0x90, 0xd3, 0x56, 0x7f, 0xd0, 0xa1, 0x32, 0x44, 0x1a, 0x68, 0xa7, 0x77, 0x72, 0xd4, 0x6e, 0xd9, + 0xa6, 0x32, 0xd0, 0xef, 0xcf, 0xba, 0x27, 0x17, 0x76, 0x01, 0x73, 0xb5, 0x06, 0xed, 0x1f, 0xb5, + 0xd8, 0xef, 0xb5, 0x68, 0xc7, 0x2e, 0x4a, 0xfa, 0xd6, 0x3a, 0x3f, 0xf7, 0x3a, 0xf4, 0xe8, 0xb4, + 0xd3, 0x1d, 0xb4, 0x4e, 0x6c, 0x4b, 0xc5, 0x1c, 0xb6, 0xda, 0xc7, 0xe7, 0x3d, 0xbb, 0xa4, 0x93, + 0xf5, 0x07, 0x67, 0xd2, 0x75, 0x4b, 0x19, 0x5e, 0x9d, 0xd1, 0x63, 0x79, 0x4b, 0xd9, 0x35, 0x6d, + 0xe3, 0xd0, 0x05, 0x67, 0x14, 0x4d, 0x9b, 0x8b, 0x68, 0x26, 0x66, 0x43, 0xd6, 0x9c, 0x07, 0x82, + 0x71, 0xae, 0xff, 0x27, 0x18, 0x96, 0xf0, 0xcf, 0xb3, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xe4, + 0x42, 0x70, 0x73, 0x2c, 0x0c, 0x00, 0x00, } diff --git a/go/vt/topo/naming.go b/go/vt/topo/naming.go index 997e19af93..38567bda4b 100644 --- a/go/vt/topo/naming.go +++ b/go/vt/topo/naming.go @@ -57,8 +57,3 @@ func EndPointEquality(left, right *topodatapb.EndPoint) bool { } return true } - -// NewEndPoints creates a EndPoints with a pre-allocated slice for Entries. -func NewEndPoints() *topodatapb.EndPoints { - return &topodatapb.EndPoints{Entries: make([]*topodatapb.EndPoint, 0, 8)} -} diff --git a/php/src/Vitess/Proto/Topodata/EndPoint/HealthMapEntry.php b/php/src/Vitess/Proto/Topodata/EndPoint/HealthMapEntry.php deleted file mode 100644 index fb4bed1a6a..0000000000 --- a/php/src/Vitess/Proto/Topodata/EndPoint/HealthMapEntry.php +++ /dev/null @@ -1,121 +0,0 @@ -number = 1; - $f->name = "key"; - $f->type = \DrSlump\Protobuf::TYPE_STRING; - $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; - $descriptor->addField($f); - - // OPTIONAL STRING value = 2 - $f = new \DrSlump\Protobuf\Field(); - $f->number = 2; - $f->name = "value"; - $f->type = \DrSlump\Protobuf::TYPE_STRING; - $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; - $descriptor->addField($f); - - foreach (self::$__extensions as $cb) { - $descriptor->addField($cb(), true); - } - - return $descriptor; - } - - /** - * Check if has a value - * - * @return boolean - */ - public function hasKey(){ - return $this->_has(1); - } - - /** - * Clear value - * - * @return \Vitess\Proto\Topodata\EndPoint\HealthMapEntry - */ - public function clearKey(){ - return $this->_clear(1); - } - - /** - * Get value - * - * @return string - */ - public function getKey(){ - return $this->_get(1); - } - - /** - * Set value - * - * @param string $value - * @return \Vitess\Proto\Topodata\EndPoint\HealthMapEntry - */ - public function setKey( $value){ - return $this->_set(1, $value); - } - - /** - * Check if has a value - * - * @return boolean - */ - public function hasValue(){ - return $this->_has(2); - } - - /** - * Clear value - * - * @return \Vitess\Proto\Topodata\EndPoint\HealthMapEntry - */ - public function clearValue(){ - return $this->_clear(2); - } - - /** - * Get value - * - * @return string - */ - public function getValue(){ - return $this->_get(2); - } - - /** - * Set value - * - * @param string $value - * @return \Vitess\Proto\Topodata\EndPoint\HealthMapEntry - */ - public function setValue( $value){ - return $this->_set(2, $value); - } - } -} - diff --git a/php/src/Vitess/Proto/Topodata/EndPoints.php b/php/src/Vitess/Proto/Topodata/EndPoints.php deleted file mode 100644 index a88abbfa4f..0000000000 --- a/php/src/Vitess/Proto/Topodata/EndPoints.php +++ /dev/null @@ -1,94 +0,0 @@ -number = 1; - $f->name = "entries"; - $f->type = \DrSlump\Protobuf::TYPE_MESSAGE; - $f->rule = \DrSlump\Protobuf::RULE_REPEATED; - $f->reference = '\Vitess\Proto\Topodata\EndPoint'; - $descriptor->addField($f); - - foreach (self::$__extensions as $cb) { - $descriptor->addField($cb(), true); - } - - return $descriptor; - } - - /** - * Check if has a value - * - * @return boolean - */ - public function hasEntries(){ - return $this->_has(1); - } - - /** - * Clear value - * - * @return \Vitess\Proto\Topodata\EndPoints - */ - public function clearEntries(){ - return $this->_clear(1); - } - - /** - * Get value - * - * @param int $idx - * @return \Vitess\Proto\Topodata\EndPoint - */ - public function getEntries($idx = NULL){ - return $this->_get(1, $idx); - } - - /** - * Set value - * - * @param \Vitess\Proto\Topodata\EndPoint $value - * @return \Vitess\Proto\Topodata\EndPoints - */ - public function setEntries(\Vitess\Proto\Topodata\EndPoint $value, $idx = NULL){ - return $this->_set(1, $value, $idx); - } - - /** - * Get all elements of - * - * @return \Vitess\Proto\Topodata\EndPoint[] - */ - public function getEntriesList(){ - return $this->_get(1); - } - - /** - * Add a new element to - * - * @param \Vitess\Proto\Topodata\EndPoint $value - * @return \Vitess\Proto\Topodata\EndPoints - */ - public function addEntries(\Vitess\Proto\Topodata\EndPoint $value){ - return $this->_add(1, $value); - } - } -} - diff --git a/php/src/Vitess/Proto/Topodata/Tablet/HealthMapEntry.php b/php/src/Vitess/Proto/Topodata/Tablet/HealthMapEntry.php deleted file mode 100644 index 025516bbcd..0000000000 --- a/php/src/Vitess/Proto/Topodata/Tablet/HealthMapEntry.php +++ /dev/null @@ -1,121 +0,0 @@ -number = 1; - $f->name = "key"; - $f->type = \DrSlump\Protobuf::TYPE_STRING; - $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; - $descriptor->addField($f); - - // OPTIONAL STRING value = 2 - $f = new \DrSlump\Protobuf\Field(); - $f->number = 2; - $f->name = "value"; - $f->type = \DrSlump\Protobuf::TYPE_STRING; - $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; - $descriptor->addField($f); - - foreach (self::$__extensions as $cb) { - $descriptor->addField($cb(), true); - } - - return $descriptor; - } - - /** - * Check if has a value - * - * @return boolean - */ - public function hasKey(){ - return $this->_has(1); - } - - /** - * Clear value - * - * @return \Vitess\Proto\Topodata\Tablet\HealthMapEntry - */ - public function clearKey(){ - return $this->_clear(1); - } - - /** - * Get value - * - * @return string - */ - public function getKey(){ - return $this->_get(1); - } - - /** - * Set value - * - * @param string $value - * @return \Vitess\Proto\Topodata\Tablet\HealthMapEntry - */ - public function setKey( $value){ - return $this->_set(1, $value); - } - - /** - * Check if has a value - * - * @return boolean - */ - public function hasValue(){ - return $this->_has(2); - } - - /** - * Clear value - * - * @return \Vitess\Proto\Topodata\Tablet\HealthMapEntry - */ - public function clearValue(){ - return $this->_clear(2); - } - - /** - * Get value - * - * @return string - */ - public function getValue(){ - return $this->_get(2); - } - - /** - * Set value - * - * @param string $value - * @return \Vitess\Proto\Topodata\Tablet\HealthMapEntry - */ - public function setValue( $value){ - return $this->_set(2, $value); - } - } -} - diff --git a/proto/topodata.proto b/proto/topodata.proto index 10ecd3cb16..38d5b3c09f 100644 --- a/proto/topodata.proto +++ b/proto/topodata.proto @@ -254,11 +254,6 @@ message EndPoint { reserved 4; } -// EndPoints corresponds to a list of tablets. -message EndPoints { - repeated EndPoint entries = 1; -} - // SrvShard is a rollup node for the shard itself. message SrvShard { // Copied from Shard. diff --git a/py/vtproto/topodata_pb2.py b/py/vtproto/topodata_pb2.py index fef599f196..746ad92aeb 100644 --- a/py/vtproto/topodata_pb2.py +++ b/py/vtproto/topodata_pb2.py @@ -20,7 +20,7 @@ DESCRIPTOR = _descriptor.FileDescriptor( name='topodata.proto', package='topodata', syntax='proto3', - serialized_pb=_b('\n\x0etopodata.proto\x12\x08topodata\"&\n\x08KeyRange\x12\r\n\x05start\x18\x01 \x01(\x0c\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x0c\"(\n\x0bTabletAlias\x12\x0c\n\x04\x63\x65ll\x18\x01 \x01(\t\x12\x0b\n\x03uid\x18\x02 \x01(\r\"\x90\x03\n\x06Tablet\x12$\n\x05\x61lias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12\x10\n\x08hostname\x18\x02 \x01(\t\x12\n\n\x02ip\x18\x03 \x01(\t\x12/\n\x08port_map\x18\x04 \x03(\x0b\x32\x1d.topodata.Tablet.PortMapEntry\x12\x10\n\x08keyspace\x18\x05 \x01(\t\x12\r\n\x05shard\x18\x06 \x01(\t\x12%\n\tkey_range\x18\x07 \x01(\x0b\x32\x12.topodata.KeyRange\x12\"\n\x04type\x18\x08 \x01(\x0e\x32\x14.topodata.TabletType\x12\x18\n\x10\x64\x62_name_override\x18\t \x01(\t\x12(\n\x04tags\x18\n \x03(\x0b\x32\x1a.topodata.Tablet.TagsEntry\x1a.\n\x0cPortMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01J\x04\x08\x0b\x10\x0c\"\xcb\x04\n\x05Shard\x12+\n\x0cmaster_alias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x30\n\x0cserved_types\x18\x03 \x03(\x0b\x32\x1a.topodata.Shard.ServedType\x12\x32\n\rsource_shards\x18\x04 \x03(\x0b\x32\x1b.topodata.Shard.SourceShard\x12\r\n\x05\x63\x65lls\x18\x05 \x03(\t\x12\x36\n\x0ftablet_controls\x18\x06 \x03(\x0b\x32\x1d.topodata.Shard.TabletControl\x1a\x46\n\nServedType\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x1ar\n\x0bSourceShard\x12\x0b\n\x03uid\x18\x01 \x01(\r\x12\x10\n\x08keyspace\x18\x02 \x01(\t\x12\r\n\x05shard\x18\x03 \x01(\t\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\t\x1a\x84\x01\n\rTabletControl\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x12\x1d\n\x15\x64isable_query_service\x18\x03 \x01(\x08\x12\x1a\n\x12\x62lacklisted_tables\x18\x04 \x03(\t\"\x8a\x02\n\x08Keyspace\x12\x1c\n\x14sharding_column_name\x18\x01 \x01(\t\x12\x36\n\x14sharding_column_type\x18\x02 \x01(\x0e\x32\x18.topodata.KeyspaceIdType\x12\x19\n\x11split_shard_count\x18\x03 \x01(\x05\x12\x33\n\x0cserved_froms\x18\x04 \x03(\x0b\x32\x1d.topodata.Keyspace.ServedFrom\x1aX\n\nServedFrom\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x12\x10\n\x08keyspace\x18\x03 \x01(\t\"w\n\x10ShardReplication\x12.\n\x05nodes\x18\x01 \x03(\x0b\x32\x1f.topodata.ShardReplication.Node\x1a\x33\n\x04Node\x12+\n\x0ctablet_alias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\"\x8e\x01\n\x08\x45ndPoint\x12\x0b\n\x03uid\x18\x01 \x01(\r\x12\x0c\n\x04host\x18\x02 \x01(\t\x12\x31\n\x08port_map\x18\x03 \x03(\x0b\x32\x1f.topodata.EndPoint.PortMapEntry\x1a.\n\x0cPortMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01J\x04\x08\x04\x10\x05\"0\n\tEndPoints\x12#\n\x07\x65ntries\x18\x01 \x03(\x0b\x32\x12.topodata.EndPoint\"T\n\x08SrvShard\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x13\n\x0bmaster_cell\x18\x03 \x01(\t\"E\n\x0eShardReference\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\"\xb1\x03\n\x0bSrvKeyspace\x12;\n\npartitions\x18\x01 \x03(\x0b\x32\'.topodata.SrvKeyspace.KeyspacePartition\x12\x1c\n\x14sharding_column_name\x18\x02 \x01(\t\x12\x36\n\x14sharding_column_type\x18\x03 \x01(\x0e\x32\x18.topodata.KeyspaceIdType\x12\x35\n\x0bserved_from\x18\x04 \x03(\x0b\x32 .topodata.SrvKeyspace.ServedFrom\x12\x19\n\x11split_shard_count\x18\x05 \x01(\x05\x1ar\n\x11KeyspacePartition\x12)\n\x0bserved_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\x32\n\x10shard_references\x18\x02 \x03(\x0b\x32\x18.topodata.ShardReference\x1aI\n\nServedFrom\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\x10\n\x08keyspace\x18\x02 \x01(\t*2\n\x0eKeyspaceIdType\x12\t\n\x05UNSET\x10\x00\x12\n\n\x06UINT64\x10\x01\x12\t\n\x05\x42YTES\x10\x02*\x8f\x01\n\nTabletType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06MASTER\x10\x01\x12\x0b\n\x07REPLICA\x10\x02\x12\n\n\x06RDONLY\x10\x03\x12\t\n\x05\x42\x41TCH\x10\x03\x12\t\n\x05SPARE\x10\x04\x12\x10\n\x0c\x45XPERIMENTAL\x10\x05\x12\n\n\x06\x42\x41\x43KUP\x10\x06\x12\x0b\n\x07RESTORE\x10\x07\x12\n\n\x06WORKER\x10\x08\x1a\x02\x10\x01\x42\x1a\n\x18\x63om.youtube.vitess.protob\x06proto3') + serialized_pb=_b('\n\x0etopodata.proto\x12\x08topodata\"&\n\x08KeyRange\x12\r\n\x05start\x18\x01 \x01(\x0c\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x0c\"(\n\x0bTabletAlias\x12\x0c\n\x04\x63\x65ll\x18\x01 \x01(\t\x12\x0b\n\x03uid\x18\x02 \x01(\r\"\x90\x03\n\x06Tablet\x12$\n\x05\x61lias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12\x10\n\x08hostname\x18\x02 \x01(\t\x12\n\n\x02ip\x18\x03 \x01(\t\x12/\n\x08port_map\x18\x04 \x03(\x0b\x32\x1d.topodata.Tablet.PortMapEntry\x12\x10\n\x08keyspace\x18\x05 \x01(\t\x12\r\n\x05shard\x18\x06 \x01(\t\x12%\n\tkey_range\x18\x07 \x01(\x0b\x32\x12.topodata.KeyRange\x12\"\n\x04type\x18\x08 \x01(\x0e\x32\x14.topodata.TabletType\x12\x18\n\x10\x64\x62_name_override\x18\t \x01(\t\x12(\n\x04tags\x18\n \x03(\x0b\x32\x1a.topodata.Tablet.TagsEntry\x1a.\n\x0cPortMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01J\x04\x08\x0b\x10\x0c\"\xcb\x04\n\x05Shard\x12+\n\x0cmaster_alias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x30\n\x0cserved_types\x18\x03 \x03(\x0b\x32\x1a.topodata.Shard.ServedType\x12\x32\n\rsource_shards\x18\x04 \x03(\x0b\x32\x1b.topodata.Shard.SourceShard\x12\r\n\x05\x63\x65lls\x18\x05 \x03(\t\x12\x36\n\x0ftablet_controls\x18\x06 \x03(\x0b\x32\x1d.topodata.Shard.TabletControl\x1a\x46\n\nServedType\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x1ar\n\x0bSourceShard\x12\x0b\n\x03uid\x18\x01 \x01(\r\x12\x10\n\x08keyspace\x18\x02 \x01(\t\x12\r\n\x05shard\x18\x03 \x01(\t\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\t\x1a\x84\x01\n\rTabletControl\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x12\x1d\n\x15\x64isable_query_service\x18\x03 \x01(\x08\x12\x1a\n\x12\x62lacklisted_tables\x18\x04 \x03(\t\"\x8a\x02\n\x08Keyspace\x12\x1c\n\x14sharding_column_name\x18\x01 \x01(\t\x12\x36\n\x14sharding_column_type\x18\x02 \x01(\x0e\x32\x18.topodata.KeyspaceIdType\x12\x19\n\x11split_shard_count\x18\x03 \x01(\x05\x12\x33\n\x0cserved_froms\x18\x04 \x03(\x0b\x32\x1d.topodata.Keyspace.ServedFrom\x1aX\n\nServedFrom\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x12\x10\n\x08keyspace\x18\x03 \x01(\t\"w\n\x10ShardReplication\x12.\n\x05nodes\x18\x01 \x03(\x0b\x32\x1f.topodata.ShardReplication.Node\x1a\x33\n\x04Node\x12+\n\x0ctablet_alias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\"\x8e\x01\n\x08\x45ndPoint\x12\x0b\n\x03uid\x18\x01 \x01(\r\x12\x0c\n\x04host\x18\x02 \x01(\t\x12\x31\n\x08port_map\x18\x03 \x03(\x0b\x32\x1f.topodata.EndPoint.PortMapEntry\x1a.\n\x0cPortMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01J\x04\x08\x04\x10\x05\"T\n\x08SrvShard\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x13\n\x0bmaster_cell\x18\x03 \x01(\t\"E\n\x0eShardReference\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\"\xb1\x03\n\x0bSrvKeyspace\x12;\n\npartitions\x18\x01 \x03(\x0b\x32\'.topodata.SrvKeyspace.KeyspacePartition\x12\x1c\n\x14sharding_column_name\x18\x02 \x01(\t\x12\x36\n\x14sharding_column_type\x18\x03 \x01(\x0e\x32\x18.topodata.KeyspaceIdType\x12\x35\n\x0bserved_from\x18\x04 \x03(\x0b\x32 .topodata.SrvKeyspace.ServedFrom\x12\x19\n\x11split_shard_count\x18\x05 \x01(\x05\x1ar\n\x11KeyspacePartition\x12)\n\x0bserved_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\x32\n\x10shard_references\x18\x02 \x03(\x0b\x32\x18.topodata.ShardReference\x1aI\n\nServedFrom\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\x10\n\x08keyspace\x18\x02 \x01(\t*2\n\x0eKeyspaceIdType\x12\t\n\x05UNSET\x10\x00\x12\n\n\x06UINT64\x10\x01\x12\t\n\x05\x42YTES\x10\x02*\x8f\x01\n\nTabletType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06MASTER\x10\x01\x12\x0b\n\x07REPLICA\x10\x02\x12\n\n\x06RDONLY\x10\x03\x12\t\n\x05\x42\x41TCH\x10\x03\x12\t\n\x05SPARE\x10\x04\x12\x10\n\x0c\x45XPERIMENTAL\x10\x05\x12\n\n\x06\x42\x41\x43KUP\x10\x06\x12\x0b\n\x07RESTORE\x10\x07\x12\n\n\x06WORKER\x10\x08\x1a\x02\x10\x01\x42\x1a\n\x18\x63om.youtube.vitess.protob\x06proto3') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) @@ -45,8 +45,8 @@ _KEYSPACEIDTYPE = _descriptor.EnumDescriptor( ], containing_type=None, options=None, - serialized_start=2281, - serialized_end=2331, + serialized_start=2231, + serialized_end=2281, ) _sym_db.RegisterEnumDescriptor(_KEYSPACEIDTYPE) @@ -100,8 +100,8 @@ _TABLETTYPE = _descriptor.EnumDescriptor( ], containing_type=None, options=_descriptor._ParseOptions(descriptor_pb2.EnumOptions(), _b('\020\001')), - serialized_start=2334, - serialized_end=2477, + serialized_start=2284, + serialized_end=2427, ) _sym_db.RegisterEnumDescriptor(_TABLETTYPE) @@ -817,37 +817,6 @@ _ENDPOINT = _descriptor.Descriptor( ) -_ENDPOINTS = _descriptor.Descriptor( - name='EndPoints', - full_name='topodata.EndPoints', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='entries', full_name='topodata.EndPoints.entries', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1638, - serialized_end=1686, -) - - _SRVSHARD = _descriptor.Descriptor( name='SrvShard', full_name='topodata.SrvShard', @@ -888,8 +857,8 @@ _SRVSHARD = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=1688, - serialized_end=1772, + serialized_start=1638, + serialized_end=1722, ) @@ -926,8 +895,8 @@ _SHARDREFERENCE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=1774, - serialized_end=1843, + serialized_start=1724, + serialized_end=1793, ) @@ -964,8 +933,8 @@ _SRVKEYSPACE_KEYSPACEPARTITION = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=2090, - serialized_end=2204, + serialized_start=2040, + serialized_end=2154, ) _SRVKEYSPACE_SERVEDFROM = _descriptor.Descriptor( @@ -1001,8 +970,8 @@ _SRVKEYSPACE_SERVEDFROM = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=2206, - serialized_end=2279, + serialized_start=2156, + serialized_end=2229, ) _SRVKEYSPACE = _descriptor.Descriptor( @@ -1059,8 +1028,8 @@ _SRVKEYSPACE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=1846, - serialized_end=2279, + serialized_start=1796, + serialized_end=2229, ) _TABLET_PORTMAPENTRY.containing_type = _TABLET @@ -1090,7 +1059,6 @@ _SHARDREPLICATION_NODE.containing_type = _SHARDREPLICATION _SHARDREPLICATION.fields_by_name['nodes'].message_type = _SHARDREPLICATION_NODE _ENDPOINT_PORTMAPENTRY.containing_type = _ENDPOINT _ENDPOINT.fields_by_name['port_map'].message_type = _ENDPOINT_PORTMAPENTRY -_ENDPOINTS.fields_by_name['entries'].message_type = _ENDPOINT _SRVSHARD.fields_by_name['key_range'].message_type = _KEYRANGE _SHARDREFERENCE.fields_by_name['key_range'].message_type = _KEYRANGE _SRVKEYSPACE_KEYSPACEPARTITION.fields_by_name['served_type'].enum_type = _TABLETTYPE @@ -1108,7 +1076,6 @@ DESCRIPTOR.message_types_by_name['Shard'] = _SHARD DESCRIPTOR.message_types_by_name['Keyspace'] = _KEYSPACE DESCRIPTOR.message_types_by_name['ShardReplication'] = _SHARDREPLICATION DESCRIPTOR.message_types_by_name['EndPoint'] = _ENDPOINT -DESCRIPTOR.message_types_by_name['EndPoints'] = _ENDPOINTS DESCRIPTOR.message_types_by_name['SrvShard'] = _SRVSHARD DESCRIPTOR.message_types_by_name['ShardReference'] = _SHARDREFERENCE DESCRIPTOR.message_types_by_name['SrvKeyspace'] = _SRVKEYSPACE @@ -1228,13 +1195,6 @@ EndPoint = _reflection.GeneratedProtocolMessageType('EndPoint', (_message.Messag _sym_db.RegisterMessage(EndPoint) _sym_db.RegisterMessage(EndPoint.PortMapEntry) -EndPoints = _reflection.GeneratedProtocolMessageType('EndPoints', (_message.Message,), dict( - DESCRIPTOR = _ENDPOINTS, - __module__ = 'topodata_pb2' - # @@protoc_insertion_point(class_scope:topodata.EndPoints) - )) -_sym_db.RegisterMessage(EndPoints) - SrvShard = _reflection.GeneratedProtocolMessageType('SrvShard', (_message.Message,), dict( DESCRIPTOR = _SRVSHARD, __module__ = 'topodata_pb2' From 83856a2e5423633f0f5d574787de712041978bf4 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Mon, 16 May 2016 09:03:32 -0700 Subject: [PATCH 08/27] Use Tablet instead of EndPoint. In discovery, tabletconn, binlog client, ... --- go/cmd/vtcombo/tablet_map.go | 18 +- go/vt/binlog/binlogplayer/binlog_player.go | 16 +- go/vt/binlog/binlogplayer/client.go | 2 +- go/vt/binlog/binlogplayertest/player.go | 4 +- go/vt/binlog/grpcbinlogplayer/player.go | 4 +- go/vt/binlog/grpcbinlogplayer/player_test.go | 4 +- go/vt/discovery/fake_health_check_test.go | 44 +- go/vt/discovery/healthcheck.go | 380 +++++++++--------- go/vt/discovery/healthcheck_test.go | 192 ++++----- go/vt/discovery/healthcheck_wait.go | 32 +- go/vt/discovery/healthcheck_wait_test.go | 18 +- go/vt/discovery/replicationlag.go | 56 +-- go/vt/discovery/replicationlag_test.go | 180 ++++----- go/vt/discovery/topology_watcher.go | 51 ++- go/vt/discovery/topology_watcher_test.go | 36 +- go/vt/discovery/utils.go | 28 +- go/vt/discovery/utils_test.go | 99 ++--- go/vt/tabletmanager/binlog_players.go | 21 +- go/vt/tabletmanager/binlog_players_test.go | 20 +- go/vt/tabletserver/grpctabletconn/conn.go | 20 +- .../tabletserver/grpctabletconn/conn_test.go | 8 +- go/vt/tabletserver/tabletconn/tablet_conn.go | 6 +- .../tabletconntest/tabletconntest.go | 4 +- go/vt/topo/tablet.go | 35 ++ go/vt/vtctl/query.go | 32 +- go/vt/vtctl/vtctl.go | 6 +- go/vt/vtctld/tablet_data.go | 6 +- go/vt/vtgate/discoverygateway.go | 100 ++--- go/vt/vtgate/discoverygateway_test.go | 54 +-- go/vt/vtgate/fakehealthcheck_test.go | 78 ++-- go/vt/vtgate/gateway.go | 56 +-- go/vt/vtgate/gateway_test.go | 10 +- go/vt/vtgate/resolver.go | 4 +- go/vt/vtgate/resolver_test.go | 68 ++-- go/vt/vtgate/router_framework_test.go | 6 +- go/vt/vtgate/router_select_test.go | 6 +- go/vt/vtgate/sandbox_test.go | 10 +- go/vt/vtgate/scatter_conn.go | 4 +- go/vt/vtgate/scatter_conn_test.go | 52 +-- go/vt/vtgate/vtgate.go | 2 +- go/vt/vtgate/vtgate_test.go | 48 +-- go/vt/worker/clone_utils.go | 13 +- go/vt/worker/diff_utils.go | 7 +- go/vt/worker/executor.go | 8 +- go/vt/worker/split_clone.go | 6 +- go/vt/worker/topo_utils.go | 37 +- go/vt/worker/vertical_split_clone.go | 6 +- go/vt/wrangler/keyspace.go | 20 +- 48 files changed, 951 insertions(+), 966 deletions(-) diff --git a/go/cmd/vtcombo/tablet_map.go b/go/cmd/vtcombo/tablet_map.go index b569a5053e..8d32a5b7d4 100644 --- a/go/cmd/vtcombo/tablet_map.go +++ b/go/cmd/vtcombo/tablet_map.go @@ -204,23 +204,23 @@ func initTabletMap(ts topo.Server, topology string, mysqld mysqlctl.MysqlDaemon, // // dialer is our tabletconn.Dialer -func dialer(ctx context.Context, endPoint *topodatapb.EndPoint, keyspace, shard string, tabletType topodatapb.TabletType, timeout time.Duration) (tabletconn.TabletConn, error) { - tablet, ok := tabletMap[endPoint.Uid] +func dialer(ctx context.Context, tablet *topodatapb.Tablet, keyspace, shard string, tabletType topodatapb.TabletType, timeout time.Duration) (tabletconn.TabletConn, error) { + t, ok := tabletMap[tablet.Alias.Uid] if !ok { return nil, tabletconn.OperationalError("connection refused") } return &internalTabletConn{ - tablet: tablet, - endPoint: endPoint, + tablet: t, + topoTablet: tablet, }, nil } // internalTabletConn implements tabletconn.TabletConn by forwarding everything // to the tablet type internalTabletConn struct { - tablet *tablet - endPoint *topodatapb.EndPoint + tablet *tablet + topoTablet *topodatapb.Tablet } // Execute is part of tabletconn.TabletConn @@ -384,9 +384,9 @@ func (itc *internalTabletConn) SetTarget(keyspace, shard string, tabletType topo return nil } -// EndPoint is part of tabletconn.TabletConn -func (itc *internalTabletConn) EndPoint() *topodatapb.EndPoint { - return itc.endPoint +// Tablet is part of tabletconn.TabletConn +func (itc *internalTabletConn) Tablet() *topodatapb.Tablet { + return itc.topoTablet } // SplitQuery is part of tabletconn.TabletConn diff --git a/go/vt/binlog/binlogplayer/binlog_player.go b/go/vt/binlog/binlogplayer/binlog_player.go index 1156048f7e..d16a6a941a 100644 --- a/go/vt/binlog/binlogplayer/binlog_player.go +++ b/go/vt/binlog/binlogplayer/binlog_player.go @@ -87,7 +87,7 @@ func NewStats() *Stats { // BinlogPlayer is handling reading a stream of updates from BinlogServer type BinlogPlayer struct { - endPoint *topodatapb.EndPoint + tablet *topodatapb.Tablet dbClient VtClient // for key range base requests @@ -109,9 +109,9 @@ type BinlogPlayer struct { // replicating the provided keyrange, starting at the startPosition, // and updating _vt.blp_checkpoint with uid=startPosition.Uid. // If !stopPosition.IsZero(), it will stop when reaching that position. -func NewBinlogPlayerKeyRange(dbClient VtClient, endPoint *topodatapb.EndPoint, keyRange *topodatapb.KeyRange, uid uint32, startPosition string, stopPosition string, blplStats *Stats) (*BinlogPlayer, error) { +func NewBinlogPlayerKeyRange(dbClient VtClient, tablet *topodatapb.Tablet, keyRange *topodatapb.KeyRange, uid uint32, startPosition string, stopPosition string, blplStats *Stats) (*BinlogPlayer, error) { result := &BinlogPlayer{ - endPoint: endPoint, + tablet: tablet, dbClient: dbClient, keyRange: keyRange, uid: uid, @@ -135,9 +135,9 @@ func NewBinlogPlayerKeyRange(dbClient VtClient, endPoint *topodatapb.EndPoint, k // replicating the provided tables, starting at the startPosition, // and updating _vt.blp_checkpoint with uid=startPosition.Uid. // If !stopPosition.IsZero(), it will stop when reaching that position. -func NewBinlogPlayerTables(dbClient VtClient, endPoint *topodatapb.EndPoint, tables []string, uid uint32, startPosition string, stopPosition string, blplStats *Stats) (*BinlogPlayer, error) { +func NewBinlogPlayerTables(dbClient VtClient, tablet *topodatapb.Tablet, tables []string, uid uint32, startPosition string, stopPosition string, blplStats *Stats) (*BinlogPlayer, error) { result := &BinlogPlayer{ - endPoint: endPoint, + tablet: tablet, dbClient: dbClient, tables: tables, uid: uid, @@ -279,7 +279,7 @@ func (blp *BinlogPlayer) ApplyBinlogEvents(ctx context.Context) error { blp.uid, blp.tables, blp.position, - blp.endPoint, + blp.tablet, ) } else { log.Infof("BinlogPlayer client %v for keyrange '%v-%v' starting @ '%v', server: %v", @@ -287,7 +287,7 @@ func (blp *BinlogPlayer) ApplyBinlogEvents(ctx context.Context) error { hex.EncodeToString(blp.keyRange.Start), hex.EncodeToString(blp.keyRange.End), blp.position, - blp.endPoint, + blp.tablet, ) } if !blp.stopPosition.IsZero() { @@ -308,7 +308,7 @@ func (blp *BinlogPlayer) ApplyBinlogEvents(ctx context.Context) error { return fmt.Errorf("no binlog player client factory named %v", *binlogPlayerProtocol) } blplClient := clientFactory() - err := blplClient.Dial(blp.endPoint, *BinlogPlayerConnTimeout) + err := blplClient.Dial(blp.tablet, *BinlogPlayerConnTimeout) if err != nil { log.Errorf("Error dialing binlog server: %v", err) return fmt.Errorf("error dialing binlog server: %v", err) diff --git a/go/vt/binlog/binlogplayer/client.go b/go/vt/binlog/binlogplayer/client.go index 542b965b01..5dbb76f0f7 100644 --- a/go/vt/binlog/binlogplayer/client.go +++ b/go/vt/binlog/binlogplayer/client.go @@ -41,7 +41,7 @@ type BinlogTransactionStream interface { // Client is the interface all clients must satisfy type Client interface { // Dial a server - Dial(endPoint *topodatapb.EndPoint, connTimeout time.Duration) error + Dial(tablet *topodatapb.Tablet, connTimeout time.Duration) error // Close the connection Close() diff --git a/go/vt/binlog/binlogplayertest/player.go b/go/vt/binlog/binlogplayertest/player.go index 0fe8a994c4..c5c0b78abe 100644 --- a/go/vt/binlog/binlogplayertest/player.go +++ b/go/vt/binlog/binlogplayertest/player.go @@ -276,8 +276,8 @@ func (fake *FakeBinlogStreamer) HandlePanic(err *error) { } // Run runs the test suite -func Run(t *testing.T, bpc binlogplayer.Client, endPoint *topodatapb.EndPoint, fake *FakeBinlogStreamer) { - if err := bpc.Dial(endPoint, 30*time.Second); err != nil { +func Run(t *testing.T, bpc binlogplayer.Client, tablet *topodatapb.Tablet, fake *FakeBinlogStreamer) { + if err := bpc.Dial(tablet, 30*time.Second); err != nil { t.Fatalf("Dial failed: %v", err) } diff --git a/go/vt/binlog/grpcbinlogplayer/player.go b/go/vt/binlog/grpcbinlogplayer/player.go index cef48d4972..24f2c5d2e0 100644 --- a/go/vt/binlog/grpcbinlogplayer/player.go +++ b/go/vt/binlog/grpcbinlogplayer/player.go @@ -25,8 +25,8 @@ type client struct { c binlogservicepb.UpdateStreamClient } -func (client *client) Dial(endPoint *topodatapb.EndPoint, connTimeout time.Duration) error { - addr := netutil.JoinHostPort(endPoint.Host, endPoint.PortMap["grpc"]) +func (client *client) Dial(tablet *topodatapb.Tablet, connTimeout time.Duration) error { + addr := netutil.JoinHostPort(tablet.Hostname, tablet.PortMap["grpc"]) var err error client.cc, err = grpc.Dial(addr, grpc.WithInsecure(), grpc.WithBlock(), grpc.WithTimeout(connTimeout)) if err != nil { diff --git a/go/vt/binlog/grpcbinlogplayer/player_test.go b/go/vt/binlog/grpcbinlogplayer/player_test.go index 5cd11e8f19..a94b563dea 100644 --- a/go/vt/binlog/grpcbinlogplayer/player_test.go +++ b/go/vt/binlog/grpcbinlogplayer/player_test.go @@ -38,8 +38,8 @@ func TestGRPCBinlogStreamer(t *testing.T) { c := &client{} // and send it to the test suite - binlogplayertest.Run(t, c, &topodatapb.EndPoint{ - Host: host, + binlogplayertest.Run(t, c, &topodatapb.Tablet{ + Hostname: host, PortMap: map[string]int32{ "grpc": int32(port), }, diff --git a/go/vt/discovery/fake_health_check_test.go b/go/vt/discovery/fake_health_check_test.go index dcd609ed6a..770137a83a 100644 --- a/go/vt/discovery/fake_health_check_test.go +++ b/go/vt/discovery/fake_health_check_test.go @@ -8,51 +8,51 @@ import ( ) func newFakeHealthCheck() *fakeHealthCheck { - return &fakeHealthCheck{endPoints: make(map[string]*topodatapb.EndPoint)} + return &fakeHealthCheck{tablets: make(map[string]*topodatapb.Tablet)} } type fakeHealthCheck struct { - mu sync.RWMutex - endPoints map[string]*topodatapb.EndPoint + mu sync.RWMutex + tablets map[string]*topodatapb.Tablet } // SetListener sets the listener for healthcheck updates. func (*fakeHealthCheck) SetListener(listener HealthCheckStatsListener) { } -// AddEndPoint adds the endpoint, and starts health check. -func (fhc *fakeHealthCheck) AddEndPoint(cell, name string, endPoint *topodatapb.EndPoint) { +// AddTablet adds the tablet, and starts health check. +func (fhc *fakeHealthCheck) AddTablet(cell, name string, tablet *topodatapb.Tablet) { fhc.mu.Lock() defer fhc.mu.Unlock() - key := EndPointToMapKey(endPoint) - fhc.endPoints[key] = endPoint + key := TabletToMapKey(tablet) + fhc.tablets[key] = tablet } -// RemoveEndPoint removes the endpoint, and stops the health check. -func (fhc *fakeHealthCheck) RemoveEndPoint(endPoint *topodatapb.EndPoint) { +// RemoveTablet removes the tablet, and stops the health check. +func (fhc *fakeHealthCheck) RemoveTablet(tablet *topodatapb.Tablet) { fhc.mu.Lock() defer fhc.mu.Unlock() - key := EndPointToMapKey(endPoint) - delete(fhc.endPoints, key) + key := TabletToMapKey(tablet) + delete(fhc.tablets, key) } -// GetEndPointStatsFromKeyspaceShard returns all EndPointStats for the given keyspace/shard. -func (*fakeHealthCheck) GetEndPointStatsFromKeyspaceShard(keyspace, shard string) []*EndPointStats { +// GetTabletStatsFromKeyspaceShard returns all TabletStats for the given keyspace/shard. +func (*fakeHealthCheck) GetTabletStatsFromKeyspaceShard(keyspace, shard string) []*TabletStats { return nil } -// GetEndPointStatsFromTarget returns all EndPointStats for the given target. -func (*fakeHealthCheck) GetEndPointStatsFromTarget(keyspace, shard string, tabletType topodatapb.TabletType) []*EndPointStats { +// GetTabletStatsFromTarget returns all TabletStats for the given target. +func (*fakeHealthCheck) GetTabletStatsFromTarget(keyspace, shard string, tabletType topodatapb.TabletType) []*TabletStats { return nil } -// GetConnection returns the TabletConn of the given endpoint. -func (*fakeHealthCheck) GetConnection(endPoint *topodatapb.EndPoint) tabletconn.TabletConn { +// GetConnection returns the TabletConn of the given tablet. +func (*fakeHealthCheck) GetConnection(tablet *topodatapb.Tablet) tabletconn.TabletConn { return nil } // CacheStatus returns a displayable version of the cache. -func (*fakeHealthCheck) CacheStatus() EndPointsCacheStatusList { +func (*fakeHealthCheck) CacheStatus() TabletsCacheStatusList { return nil } @@ -61,12 +61,12 @@ func (*fakeHealthCheck) Close() error { return nil } -func (fhc *fakeHealthCheck) GetAllEndPoints() map[string]*topodatapb.EndPoint { - res := make(map[string]*topodatapb.EndPoint) +func (fhc *fakeHealthCheck) GetAllTablets() map[string]*topodatapb.Tablet { + res := make(map[string]*topodatapb.Tablet) fhc.mu.RLock() defer fhc.mu.RUnlock() - for key, ep := range fhc.endPoints { - res[key] = ep + for key, t := range fhc.tablets { + res[key] = t } return res } diff --git a/go/vt/discovery/healthcheck.go b/go/vt/discovery/healthcheck.go index f72f61c595..a86f1ef946 100644 --- a/go/vt/discovery/healthcheck.go +++ b/go/vt/discovery/healthcheck.go @@ -1,21 +1,20 @@ // Package discovery provides a way to discover all tablets e.g. within a // specific shard and monitor their current health. // -// Use the HealthCheck object to query for tablets (in this package also -// referred to as endpoints) and their health. +// Use the HealthCheck object to query for tablets and their health. // // For an example how to use the HealthCheck object, see worker/topo_utils.go. // -// Tablets have to be manually added to the HealthCheck using AddEndPoint(). +// Tablets have to be manually added to the HealthCheck using AddTablet(). // Alternatively, use a Watcher implementation which will constantly watch // a source (e.g. the topology) and add and remove tablets as they are // added or removed from the source. // For a Watcher example have a look at NewShardReplicationWatcher(). // -// Note that the getter functions GetEndPointStatsFrom* will always return +// Note that the getter functions GetTabletStatsFrom* will always return // an unfiltered list of all known tablets. // Use the helper functions in utils.go to filter them e.g. -// RemoveUnhealthyEndpoints() or GetCurrentMaster(). +// RemoveUnhealthyTablets() or GetCurrentMaster(). // replicationlag.go contains a more advanced health filter which is used by // vtgate. // @@ -61,16 +60,16 @@ func init() { // HealthCheckStatsListener is the listener to receive health check stats update. type HealthCheckStatsListener interface { - StatsUpdate(*EndPointStats) + StatsUpdate(*TabletStats) } -// EndPointStats is returned when getting the set of endpoints. -type EndPointStats struct { - EndPoint *topodatapb.EndPoint +// TabletStats is returned when getting the set of tablets. +type TabletStats struct { + Tablet *topodatapb.Tablet Name string // name is an optional tag (e.g. alternative address) Cell string Target *querypb.Target - Up bool // whether the endpoint is added + Up bool // whether the tablet is added Serving bool // whether the server is serving TabletExternallyReparentedTimestamp int64 Stats *querypb.RealtimeStats @@ -79,15 +78,12 @@ type EndPointStats struct { // Alias returns the alias of the tablet. // The return value can be used e.g. to generate the input for the topo API. -func (e *EndPointStats) Alias() *topodatapb.TabletAlias { - return &topodatapb.TabletAlias{ - Cell: e.Cell, - Uid: e.EndPoint.Uid, - } +func (e *TabletStats) Alias() *topodatapb.TabletAlias { + return e.Tablet.Alias } -// String is defined because we want to print a []*EndPointStats array nicely. -func (e *EndPointStats) String() string { +// String is defined because we want to print a []*TabletStats array nicely. +func (e *TabletStats) String() string { return fmt.Sprint(*e) } @@ -95,19 +91,19 @@ func (e *EndPointStats) String() string { type HealthCheck interface { // SetListener sets the listener for healthcheck updates. It should not block. SetListener(listener HealthCheckStatsListener) - // AddEndPoint adds the endpoint, and starts health check. - AddEndPoint(cell, name string, endPoint *topodatapb.EndPoint) - // RemoveEndPoint removes the endpoint, and stops the health check. - RemoveEndPoint(endPoint *topodatapb.EndPoint) - // GetEndPointStatsFromKeyspaceShard returns all EndPointStats for the given keyspace/shard. - GetEndPointStatsFromKeyspaceShard(keyspace, shard string) []*EndPointStats - // GetEndPointStatsFromTarget returns all EndPointStats for the given target. + // AddTablet adds the tablet, and starts health check. + AddTablet(cell, name string, tablet *topodatapb.Tablet) + // RemoveTablet removes the tablet, and stops the health check. + RemoveTablet(tablet *topodatapb.Tablet) + // GetTabletStatsFromKeyspaceShard returns all TabletStats for the given keyspace/shard. + GetTabletStatsFromKeyspaceShard(keyspace, shard string) []*TabletStats + // GetTabletStatsFromTarget returns all TabletStats for the given target. // You can exclude unhealthy entries using the helper in utils.go. - GetEndPointStatsFromTarget(keyspace, shard string, tabletType topodatapb.TabletType) []*EndPointStats - // GetConnection returns the TabletConn of the given endpoint. - GetConnection(endPoint *topodatapb.EndPoint) tabletconn.TabletConn + GetTabletStatsFromTarget(keyspace, shard string, tabletType topodatapb.TabletType) []*TabletStats + // GetConnection returns the TabletConn of the given tablet. + GetConnection(tablet *topodatapb.Tablet) tabletconn.TabletConn // CacheStatus returns a displayable version of the cache. - CacheStatus() EndPointsCacheStatusList + CacheStatus() TabletsCacheStatusList // Close stops the healthcheck. Close() error } @@ -116,7 +112,7 @@ type HealthCheck interface { func NewHealthCheck(connTimeout time.Duration, retryDelay time.Duration, healthCheckTimeout time.Duration, statsSuffix string) HealthCheck { hc := &HealthCheckImpl{ addrToConns: make(map[string]*healthCheckConn), - targetToEPs: make(map[string]map[string]map[topodatapb.TabletType][]*topodatapb.EndPoint), + targetToTablets: make(map[string]map[string]map[topodatapb.TabletType][]*topodatapb.Tablet), connTimeout: connTimeout, retryDelay: retryDelay, healthCheckTimeout: healthCheckTimeout, @@ -161,19 +157,19 @@ type HealthCheckImpl struct { // mu protects all the following fields // when locking both mutex from HealthCheck and healthCheckConn, HealthCheck.mu goes first. - mu sync.RWMutex - addrToConns map[string]*healthCheckConn // addrToConns maps from address to the healthCheckConn object. - targetToEPs map[string]map[string]map[topodatapb.TabletType][]*topodatapb.EndPoint // targetToEPs maps from keyspace/shard/tablettype to a list of endpoints. + mu sync.RWMutex + addrToConns map[string]*healthCheckConn // addrToConns maps from address to the healthCheckConn object. + targetToTablets map[string]map[string]map[topodatapb.TabletType][]*topodatapb.Tablet // targetToTablets maps from keyspace/shard/tablettype to a list of tablets. } -// healthCheckConn contains details about an endpoint. +// healthCheckConn contains details about a tablet. type healthCheckConn struct { // set at construction time cell string name string ctx context.Context cancelFunc context.CancelFunc - endPoint *topodatapb.EndPoint + tablet *topodatapb.Tablet // mu protects all the following fields // when locking both mutex from HealthCheck and healthCheckConn, HealthCheck.mu goes first. @@ -188,7 +184,7 @@ type healthCheckConn struct { lastResponseTimestamp time.Time // timestamp of the last healthcheck response } -// servingConnStats returns the number of serving endpoints per keyspace/shard/tablet type. +// servingConnStats returns the number of serving tablets per keyspace/shard/tablet type. func (hc *HealthCheckImpl) servingConnStats() map[string]int64 { res := make(map[string]int64) hc.mu.RLock() @@ -206,8 +202,8 @@ func (hc *HealthCheckImpl) servingConnStats() map[string]int64 { return res } -// checkConn performs health checking on the given endpoint. -func (hc *HealthCheckImpl) checkConn(hcc *healthCheckConn, cell, name string, endPoint *topodatapb.EndPoint) { +// checkConn performs health checking on the given tablet. +func (hc *HealthCheckImpl) checkConn(hcc *healthCheckConn, cell, name string, tablet *topodatapb.Tablet) { defer func() { hcc.mu.Lock() if hcc.conn != nil { @@ -218,7 +214,7 @@ func (hc *HealthCheckImpl) checkConn(hcc *healthCheckConn, cell, name string, en }() // retry health check if it fails for { - stream, err := hcc.connect(hc, endPoint) + stream, err := hcc.connect(hc, tablet) if err != nil { select { case <-hcc.ctx.Done(): @@ -235,19 +231,19 @@ func (hc *HealthCheckImpl) checkConn(hcc *healthCheckConn, cell, name string, en continue } for { - reconnect, err := hcc.processResponse(hc, endPoint, stream) + reconnect, err := hcc.processResponse(hc, tablet, stream) if err != nil { hcc.mu.Lock() hcc.serving = false hcc.lastError = err - eps := &EndPointStats{ - EndPoint: endPoint, - Cell: hcc.cell, - Name: hcc.name, - Target: hcc.target, - Up: hcc.up, - Serving: hcc.serving, - Stats: hcc.stats, + ts := &TabletStats{ + Tablet: tablet, + Cell: hcc.cell, + Name: hcc.name, + Target: hcc.target, + Up: hcc.up, + Serving: hcc.serving, + Stats: hcc.stats, TabletExternallyReparentedTimestamp: hcc.tabletExternallyReparentedTimestamp, LastError: hcc.lastError, } @@ -255,7 +251,7 @@ func (hc *HealthCheckImpl) checkConn(hcc *healthCheckConn, cell, name string, en hcc.mu.Unlock() // notify downstream for serving status change if hc.listener != nil { - hc.listener.StatsUpdate(eps) + hc.listener.StatsUpdate(ts) } select { case <-hcc.ctx.Done(): @@ -277,11 +273,11 @@ func (hc *HealthCheckImpl) checkConn(hcc *healthCheckConn, cell, name string, en } } -// connect creates connection to the endpoint and starts streaming. -func (hcc *healthCheckConn) connect(hc *HealthCheckImpl, endPoint *topodatapb.EndPoint) (tabletconn.StreamHealthReader, error) { +// connect creates connection to the tablet and starts streaming. +func (hcc *healthCheckConn) connect(hc *HealthCheckImpl, tablet *topodatapb.Tablet) (tabletconn.StreamHealthReader, error) { // Keyspace, shard and tabletType are not known yet, but they're unused // by StreamHealth. We'll use SetTarget later to set them. - conn, err := tabletconn.GetDialer()(hcc.ctx, endPoint, "" /*keyspace*/, "" /*shard*/, topodatapb.TabletType_UNKNOWN, hc.connTimeout) + conn, err := tabletconn.GetDialer()(hcc.ctx, tablet, "" /*keyspace*/, "" /*shard*/, topodatapb.TabletType_UNKNOWN, hc.connTimeout) if err != nil { return nil, err } @@ -299,7 +295,7 @@ func (hcc *healthCheckConn) connect(hc *HealthCheckImpl, endPoint *topodatapb.En // processResponse reads one health check response, and notifies HealthCheckStatsListener. // It returns bool to indicate if the caller should reconnect. We do not need to reconnect when the streaming is working. -func (hcc *healthCheckConn) processResponse(hc *HealthCheckImpl, endPoint *topodatapb.EndPoint, stream tabletconn.StreamHealthReader) (bool, error) { +func (hcc *healthCheckConn) processResponse(hc *HealthCheckImpl, tablet *topodatapb.Tablet, stream tabletconn.StreamHealthReader) (bool, error) { select { case <-hcc.ctx.Done(): return false, hcc.ctx.Err() @@ -324,18 +320,18 @@ func (hcc *healthCheckConn) processResponse(hc *HealthCheckImpl, endPoint *topod } if hcc.target.TabletType == topodatapb.TabletType_UNKNOWN { - // The first time we see response for the endpoint. + // The first time we see response for the tablet. hcc.update(shr, serving, healthErr, true) hc.mu.Lock() - hc.addEndPointToTargetProtected(hcc.target, endPoint) + hc.addTabletToTargetProtected(hcc.target, tablet) hc.mu.Unlock() } else if hcc.target.TabletType != shr.Target.TabletType { // tablet type changed for the tablet - log.Infof("HealthCheckUpdate(Type Change): %v, EP: %v/%+v, target %+v => %+v, reparent time: %v", hcc.name, hcc.cell, endPoint, hcc.target, shr.Target, shr.TabletExternallyReparentedTimestamp) + log.Infof("HealthCheckUpdate(Type Change): %v, EP: %v/%+v, target %+v => %+v, reparent time: %v", hcc.name, hcc.cell, tablet, hcc.target, shr.Target, shr.TabletExternallyReparentedTimestamp) hc.mu.Lock() - hc.deleteEndPointFromTargetProtected(hcc.target, endPoint) + hc.deleteTabletFromTargetProtected(hcc.target, tablet) hcc.update(shr, serving, healthErr, true) - hc.addEndPointToTargetProtected(shr.Target, endPoint) + hc.addTabletToTargetProtected(shr.Target, tablet) hc.mu.Unlock() } else { hcc.update(shr, serving, healthErr, false) @@ -343,19 +339,19 @@ func (hcc *healthCheckConn) processResponse(hc *HealthCheckImpl, endPoint *topod // notify downstream for tablettype and realtimestats change if hc.listener != nil { hcc.mu.RLock() - eps := &EndPointStats{ - EndPoint: endPoint, - Cell: hcc.cell, - Name: hcc.name, - Target: hcc.target, - Up: hcc.up, - Serving: hcc.serving, - Stats: hcc.stats, + ts := &TabletStats{ + Tablet: tablet, + Cell: hcc.cell, + Name: hcc.name, + Target: hcc.target, + Up: hcc.up, + Serving: hcc.serving, + Stats: hcc.stats, TabletExternallyReparentedTimestamp: hcc.tabletExternallyReparentedTimestamp, LastError: hcc.lastError, } hcc.mu.RUnlock() - hc.listener.StatsUpdate(eps) + hc.listener.StatsUpdate(ts) } return false, nil } @@ -384,7 +380,7 @@ func (hc *HealthCheckImpl) checkHealthCheckTimeout() { for _, hcc := range list { hcc.mu.RLock() if !hcc.serving { - // ignore non-serving endpoint + // ignore non-serving tablet hcc.mu.RUnlock() continue } @@ -394,11 +390,11 @@ func (hc *HealthCheckImpl) checkHealthCheckTimeout() { continue } hcc.mu.RUnlock() - // mark the endpoint non-serving as we have not seen a health check response for a long time + // mark the tablet non-serving as we have not seen a health check response for a long time hcc.mu.Lock() // check again to avoid race condition if !hcc.serving { - // ignore non-serving endpoint + // ignore non-serving tablet hcc.mu.Unlock() continue } @@ -409,14 +405,14 @@ func (hc *HealthCheckImpl) checkHealthCheckTimeout() { } hcc.serving = false hcc.lastError = fmt.Errorf("healthcheck timed out (latest %v)", hcc.lastResponseTimestamp) - eps := &EndPointStats{ - EndPoint: hcc.endPoint, - Cell: hcc.cell, - Name: hcc.name, - Target: hcc.target, - Up: hcc.up, - Serving: hcc.serving, - Stats: hcc.stats, + ts := &TabletStats{ + Tablet: hcc.tablet, + Cell: hcc.cell, + Name: hcc.name, + Target: hcc.target, + Up: hcc.up, + Serving: hcc.serving, + Stats: hcc.stats, TabletExternallyReparentedTimestamp: hcc.tabletExternallyReparentedTimestamp, LastError: hcc.lastError, } @@ -424,20 +420,20 @@ func (hc *HealthCheckImpl) checkHealthCheckTimeout() { hcc.mu.Unlock() // notify downstream for serving status change if hc.listener != nil { - hc.listener.StatsUpdate(eps) + hc.listener.StatsUpdate(ts) } hcErrorCounters.Add([]string{target.Keyspace, target.Shard, strings.ToLower(target.TabletType.String())}, 1) } } -func (hc *HealthCheckImpl) deleteConn(endPoint *topodatapb.EndPoint) { +func (hc *HealthCheckImpl) deleteConn(tablet *topodatapb.Tablet) { hc.mu.Lock() defer hc.mu.Unlock() - key := EndPointToMapKey(endPoint) + key := TabletToMapKey(tablet) hcc, ok := hc.addrToConns[key] if !ok { - log.Warningf("deleting unknown endpoint: %+v", endPoint) + log.Warningf("deleting unknown tablet: %+v", tablet) return } hcc.mu.Lock() @@ -445,7 +441,7 @@ func (hc *HealthCheckImpl) deleteConn(endPoint *topodatapb.EndPoint) { hcc.mu.Unlock() hcc.cancelFunc() delete(hc.addrToConns, key) - hc.deleteEndPointFromTargetProtected(hcc.target, endPoint) + hc.deleteTabletFromTargetProtected(hcc.target, tablet) } // SetListener sets the listener for healthcheck updates. It should not block. @@ -453,45 +449,45 @@ func (hc *HealthCheckImpl) SetListener(listener HealthCheckStatsListener) { hc.listener = listener } -// AddEndPoint adds the endpoint, and starts health check. +// AddTablet adds the tablet, and starts health check. // It does not block on making connection. -// name is an optional tag for the endpoint, e.g. an alternative address. -func (hc *HealthCheckImpl) AddEndPoint(cell, name string, endPoint *topodatapb.EndPoint) { +// name is an optional tag for the tablet, e.g. an alternative address. +func (hc *HealthCheckImpl) AddTablet(cell, name string, tablet *topodatapb.Tablet) { ctx, cancelFunc := context.WithCancel(context.Background()) hcc := &healthCheckConn{ cell: cell, name: name, ctx: ctx, cancelFunc: cancelFunc, - endPoint: endPoint, + tablet: tablet, target: &querypb.Target{}, up: true, } - key := EndPointToMapKey(endPoint) + key := TabletToMapKey(tablet) hc.mu.Lock() if _, ok := hc.addrToConns[key]; ok { hc.mu.Unlock() - log.Warningf("adding duplicate endpoint %v for %v: %+v", name, cell, endPoint) + log.Warningf("adding duplicate tablet %v for %v: %+v", name, cell, tablet) return } hc.addrToConns[key] = hcc hc.mu.Unlock() - go hc.checkConn(hcc, cell, name, endPoint) + go hc.checkConn(hcc, cell, name, tablet) } -// RemoveEndPoint removes the endpoint, and stops the health check. +// RemoveTablet removes the tablet, and stops the health check. // It does not block. -func (hc *HealthCheckImpl) RemoveEndPoint(endPoint *topodatapb.EndPoint) { - go hc.deleteConn(endPoint) +func (hc *HealthCheckImpl) RemoveTablet(tablet *topodatapb.Tablet) { + go hc.deleteConn(tablet) } -// GetEndPointStatsFromKeyspaceShard returns all EndPointStats for the given keyspace/shard. -func (hc *HealthCheckImpl) GetEndPointStatsFromKeyspaceShard(keyspace, shard string) []*EndPointStats { +// GetTabletStatsFromKeyspaceShard returns all TabletStats for the given keyspace/shard. +func (hc *HealthCheckImpl) GetTabletStatsFromKeyspaceShard(keyspace, shard string) []*TabletStats { hc.mu.RLock() defer hc.mu.RUnlock() - shardMap, ok := hc.targetToEPs[keyspace] + shardMap, ok := hc.targetToTablets[keyspace] if !ok { return nil } @@ -499,39 +495,39 @@ func (hc *HealthCheckImpl) GetEndPointStatsFromKeyspaceShard(keyspace, shard str if !ok { return nil } - res := make([]*EndPointStats, 0, 1) + res := make([]*TabletStats, 0, 1) for _, epList := range ttMap { for _, ep := range epList { - key := EndPointToMapKey(ep) + key := TabletToMapKey(ep) hcc, ok := hc.addrToConns[key] if !ok { continue } hcc.mu.RLock() - eps := &EndPointStats{ - EndPoint: ep, - Cell: hcc.cell, - Name: hcc.name, - Target: hcc.target, - Up: hcc.up, - Serving: hcc.serving, - Stats: hcc.stats, + ts := &TabletStats{ + Tablet: ep, + Cell: hcc.cell, + Name: hcc.name, + Target: hcc.target, + Up: hcc.up, + Serving: hcc.serving, + Stats: hcc.stats, TabletExternallyReparentedTimestamp: hcc.tabletExternallyReparentedTimestamp, LastError: hcc.lastError, } hcc.mu.RUnlock() - res = append(res, eps) + res = append(res, ts) } } return res } -// GetEndPointStatsFromTarget returns all EndPointStats for the given target. -func (hc *HealthCheckImpl) GetEndPointStatsFromTarget(keyspace, shard string, tabletType topodatapb.TabletType) []*EndPointStats { +// GetTabletStatsFromTarget returns all TabletStats for the given target. +func (hc *HealthCheckImpl) GetTabletStatsFromTarget(keyspace, shard string, tabletType topodatapb.TabletType) []*TabletStats { hc.mu.RLock() defer hc.mu.RUnlock() - shardMap, ok := hc.targetToEPs[keyspace] + shardMap, ok := hc.targetToTablets[keyspace] if !ok { return nil } @@ -543,36 +539,36 @@ func (hc *HealthCheckImpl) GetEndPointStatsFromTarget(keyspace, shard string, ta if !ok { return nil } - res := make([]*EndPointStats, 0, 1) + res := make([]*TabletStats, 0, 1) for _, ep := range epList { - key := EndPointToMapKey(ep) + key := TabletToMapKey(ep) hcc, ok := hc.addrToConns[key] if !ok { continue } hcc.mu.RLock() - eps := &EndPointStats{ - EndPoint: ep, - Cell: hcc.cell, - Name: hcc.name, - Target: hcc.target, - Up: hcc.up, - Serving: hcc.serving, - Stats: hcc.stats, + ts := &TabletStats{ + Tablet: ep, + Cell: hcc.cell, + Name: hcc.name, + Target: hcc.target, + Up: hcc.up, + Serving: hcc.serving, + Stats: hcc.stats, TabletExternallyReparentedTimestamp: hcc.tabletExternallyReparentedTimestamp, LastError: hcc.lastError, } hcc.mu.RUnlock() - res = append(res, eps) + res = append(res, ts) } return res } -// GetConnection returns the TabletConn of the given endpoint. -func (hc *HealthCheckImpl) GetConnection(endPoint *topodatapb.EndPoint) tabletconn.TabletConn { +// GetConnection returns the TabletConn of the given tablet. +func (hc *HealthCheckImpl) GetConnection(tablet *topodatapb.Tablet) tabletconn.TabletConn { hc.mu.RLock() defer hc.mu.RUnlock() - hcc := hc.addrToConns[EndPointToMapKey(endPoint)] + hcc := hc.addrToConns[TabletToMapKey(tablet)] if hcc == nil { return nil } @@ -581,36 +577,36 @@ func (hc *HealthCheckImpl) GetConnection(endPoint *topodatapb.EndPoint) tabletco return hcc.conn } -// addEndPointToTargetProtected adds the endpoint to the given target. +// addTabletToTargetProtected adds the tablet to the given target. // LOCK_REQUIRED hc.mu -func (hc *HealthCheckImpl) addEndPointToTargetProtected(target *querypb.Target, endPoint *topodatapb.EndPoint) { - shardMap, ok := hc.targetToEPs[target.Keyspace] +func (hc *HealthCheckImpl) addTabletToTargetProtected(target *querypb.Target, tablet *topodatapb.Tablet) { + shardMap, ok := hc.targetToTablets[target.Keyspace] if !ok { - shardMap = make(map[string]map[topodatapb.TabletType][]*topodatapb.EndPoint) - hc.targetToEPs[target.Keyspace] = shardMap + shardMap = make(map[string]map[topodatapb.TabletType][]*topodatapb.Tablet) + hc.targetToTablets[target.Keyspace] = shardMap } ttMap, ok := shardMap[target.Shard] if !ok { - ttMap = make(map[topodatapb.TabletType][]*topodatapb.EndPoint) + ttMap = make(map[topodatapb.TabletType][]*topodatapb.Tablet) shardMap[target.Shard] = ttMap } epList, ok := ttMap[target.TabletType] if !ok { - epList = make([]*topodatapb.EndPoint, 0, 1) + epList = make([]*topodatapb.Tablet, 0, 1) } for _, ep := range epList { - if topo.EndPointEquality(ep, endPoint) { - log.Warningf("endpoint is already added: %+v", endPoint) + if topo.TabletEquality(ep, tablet) { + log.Warningf("tablet is already added: %+v", tablet) return } } - ttMap[target.TabletType] = append(epList, endPoint) + ttMap[target.TabletType] = append(epList, tablet) } -// deleteEndPointFromTargetProtected deletes the endpoint for the given target. +// deleteTabletFromTargetProtected deletes the tablet for the given target. // LOCK_REQUIRED hc.mu -func (hc *HealthCheckImpl) deleteEndPointFromTargetProtected(target *querypb.Target, endPoint *topodatapb.EndPoint) { - shardMap, ok := hc.targetToEPs[target.Keyspace] +func (hc *HealthCheckImpl) deleteTabletFromTargetProtected(target *querypb.Target, tablet *topodatapb.Tablet) { + shardMap, ok := hc.targetToTablets[target.Keyspace] if !ok { return } @@ -623,7 +619,7 @@ func (hc *HealthCheckImpl) deleteEndPointFromTargetProtected(target *querypb.Tar return } for i, ep := range epList { - if topo.EndPointEquality(ep, endPoint) { + if topo.TabletEquality(ep, tablet) { epList = append(epList[:i], epList[i+1:]...) ttMap[target.TabletType] = epList return @@ -631,66 +627,66 @@ func (hc *HealthCheckImpl) deleteEndPointFromTargetProtected(target *querypb.Tar } } -// EndPointsCacheStatus is the current endpoints for a cell/target. -// TODO: change this to reflect the e2e information about the endpoints. -type EndPointsCacheStatus struct { - Cell string - Target *querypb.Target - EndPointsStats EndPointStatsList +// TabletsCacheStatus is the current tablets for a cell/target. +// TODO: change this to reflect the e2e information about the tablets. +type TabletsCacheStatus struct { + Cell string + Target *querypb.Target + TabletsStats TabletStatsList } -// EndPointStatsList is used for sorting. -type EndPointStatsList []*EndPointStats +// TabletStatsList is used for sorting. +type TabletStatsList []*TabletStats // Len is part of sort.Interface. -func (epsl EndPointStatsList) Len() int { - return len(epsl) +func (tsl TabletStatsList) Len() int { + return len(tsl) } // Less is part of sort.Interface -func (epsl EndPointStatsList) Less(i, j int) bool { - name1 := epsl[i].Name +func (tsl TabletStatsList) Less(i, j int) bool { + name1 := tsl[i].Name if name1 == "" { - name1 = EndPointToMapKey(epsl[i].EndPoint) + name1 = TabletToMapKey(tsl[i].Tablet) } - name2 := epsl[j].Name + name2 := tsl[j].Name if name2 == "" { - name2 = EndPointToMapKey(epsl[j].EndPoint) + name2 = TabletToMapKey(tsl[j].Tablet) } return name1 < name2 } // Swap is part of sort.Interface -func (epsl EndPointStatsList) Swap(i, j int) { - epsl[i], epsl[j] = epsl[j], epsl[i] +func (tsl TabletStatsList) Swap(i, j int) { + tsl[i], tsl[j] = tsl[j], tsl[i] } // StatusAsHTML returns an HTML version of the status. -func (epcs *EndPointsCacheStatus) StatusAsHTML() template.HTML { +func (epcs *TabletsCacheStatus) StatusAsHTML() template.HTML { epLinks := make([]string, 0, 1) - if epcs.EndPointsStats != nil { - sort.Sort(epcs.EndPointsStats) + if epcs.TabletsStats != nil { + sort.Sort(epcs.TabletsStats) } - for _, eps := range epcs.EndPointsStats { - vtPort := eps.EndPoint.PortMap["vt"] + for _, ts := range epcs.TabletsStats { + vtPort := ts.Tablet.PortMap["vt"] color := "green" extra := "" - if eps.LastError != nil { + if ts.LastError != nil { color = "red" - extra = fmt.Sprintf(" (%v)", eps.LastError) - } else if !eps.Serving { + extra = fmt.Sprintf(" (%v)", ts.LastError) + } else if !ts.Serving { color = "red" extra = " (Not Serving)" - } else if !eps.Up { + } else if !ts.Up { color = "red" extra = " (Down)" - } else if eps.Target.TabletType == topodatapb.TabletType_MASTER { - extra = fmt.Sprintf(" (MasterTS: %v)", eps.TabletExternallyReparentedTimestamp) + } else if ts.Target.TabletType == topodatapb.TabletType_MASTER { + extra = fmt.Sprintf(" (MasterTS: %v)", ts.TabletExternallyReparentedTimestamp) } else { - extra = fmt.Sprintf(" (RepLag: %v)", eps.Stats.SecondsBehindMaster) + extra = fmt.Sprintf(" (RepLag: %v)", ts.Stats.SecondsBehindMaster) } - name := eps.Name - addr := netutil.JoinHostPort(eps.EndPoint.Host, vtPort) + name := ts.Name + addr := netutil.JoinHostPort(ts.Tablet.Hostname, vtPort) if name == "" { name = addr } @@ -699,57 +695,57 @@ func (epcs *EndPointsCacheStatus) StatusAsHTML() template.HTML { return template.HTML(strings.Join(epLinks, "
")) } -// EndPointsCacheStatusList is used for sorting. -type EndPointsCacheStatusList []*EndPointsCacheStatus +// TabletsCacheStatusList is used for sorting. +type TabletsCacheStatusList []*TabletsCacheStatus // Len is part of sort.Interface. -func (epcsl EndPointsCacheStatusList) Len() int { +func (epcsl TabletsCacheStatusList) Len() int { return len(epcsl) } // Less is part of sort.Interface -func (epcsl EndPointsCacheStatusList) Less(i, j int) bool { +func (epcsl TabletsCacheStatusList) Less(i, j int) bool { return epcsl[i].Cell+"."+epcsl[i].Target.Keyspace+"."+epcsl[i].Target.Shard+"."+string(epcsl[i].Target.TabletType) < epcsl[j].Cell+"."+epcsl[j].Target.Keyspace+"."+epcsl[j].Target.Shard+"."+string(epcsl[j].Target.TabletType) } // Swap is part of sort.Interface -func (epcsl EndPointsCacheStatusList) Swap(i, j int) { +func (epcsl TabletsCacheStatusList) Swap(i, j int) { epcsl[i], epcsl[j] = epcsl[j], epcsl[i] } // CacheStatus returns a displayable version of the cache. -func (hc *HealthCheckImpl) CacheStatus() EndPointsCacheStatusList { - epcsMap := make(map[string]*EndPointsCacheStatus) +func (hc *HealthCheckImpl) CacheStatus() TabletsCacheStatusList { + epcsMap := make(map[string]*TabletsCacheStatus) hc.mu.RLock() for _, hcc := range hc.addrToConns { hcc.mu.RLock() key := fmt.Sprintf("%v.%v.%v.%v", hcc.cell, hcc.target.Keyspace, hcc.target.Shard, string(hcc.target.TabletType)) - var epcs *EndPointsCacheStatus + var epcs *TabletsCacheStatus var ok bool if epcs, ok = epcsMap[key]; !ok { - epcs = &EndPointsCacheStatus{ + epcs = &TabletsCacheStatus{ Cell: hcc.cell, Target: hcc.target, } epcsMap[key] = epcs } - stats := &EndPointStats{ - Cell: hcc.cell, - Name: hcc.name, - Target: hcc.target, - Up: hcc.up, - Serving: hcc.serving, - EndPoint: hcc.endPoint, - Stats: hcc.stats, + stats := &TabletStats{ + Cell: hcc.cell, + Name: hcc.name, + Target: hcc.target, + Up: hcc.up, + Serving: hcc.serving, + Tablet: hcc.tablet, + Stats: hcc.stats, TabletExternallyReparentedTimestamp: hcc.tabletExternallyReparentedTimestamp, LastError: hcc.lastError, } hcc.mu.RUnlock() - epcs.EndPointsStats = append(epcs.EndPointsStats, stats) + epcs.TabletsStats = append(epcs.TabletsStats, stats) } hc.mu.RUnlock() - epcsl := make(EndPointsCacheStatusList, 0, len(epcsMap)) + epcsl := make(TabletsCacheStatusList, 0, len(epcsMap)) for _, epcs := range epcsMap { epcsl = append(epcsl, epcs) } @@ -767,18 +763,18 @@ func (hc *HealthCheckImpl) Close() error { hcc.cancelFunc() } hc.addrToConns = make(map[string]*healthCheckConn) - hc.targetToEPs = make(map[string]map[string]map[topodatapb.TabletType][]*topodatapb.EndPoint) + hc.targetToTablets = make(map[string]map[string]map[topodatapb.TabletType][]*topodatapb.Tablet) return nil } -// EndPointToMapKey creates a key to the map from endpoint's host and ports. +// TabletToMapKey creates a key to the map from tablet's host and ports. // It should only be used in discovery and related module. -func EndPointToMapKey(endPoint *topodatapb.EndPoint) string { +func TabletToMapKey(tablet *topodatapb.Tablet) string { parts := make([]string, 0, 1) - for name, port := range endPoint.PortMap { + for name, port := range tablet.PortMap { parts = append(parts, netutil.JoinHostPort(name, port)) } sort.Strings(parts) - parts = append([]string{endPoint.Host}, parts...) + parts = append([]string{tablet.Hostname}, parts...) return strings.Join(parts, ",") } diff --git a/go/vt/discovery/healthcheck_test.go b/go/vt/discovery/healthcheck_test.go index f75df0bf54..de5da2326f 100644 --- a/go/vt/discovery/healthcheck_test.go +++ b/go/vt/discovery/healthcheck_test.go @@ -26,7 +26,7 @@ func init() { } func TestHealthCheck(t *testing.T) { - ep := topo.NewEndPoint(0, "a") + ep := topo.NewTablet(0, "a") ep.PortMap["vt"] = 1 input := make(chan *querypb.StreamHealthResponse) fakeConn := createFakeConn(ep, input) @@ -34,29 +34,29 @@ func TestHealthCheck(t *testing.T) { l := newListener() hc := NewHealthCheck(1*time.Millisecond, 1*time.Millisecond, time.Hour, "" /* statsSuffix */).(*HealthCheckImpl) hc.SetListener(l) - hc.AddEndPoint("cell", "", ep) - t.Logf(`hc = HealthCheck(); hc.AddEndPoint("cell", "", {Host: "a", PortMap: {"vt": 1}})`) + hc.AddTablet("cell", "", ep) + t.Logf(`hc = HealthCheck(); hc.AddTablet("cell", "", {Host: "a", PortMap: {"vt": 1}})`) - // no endpoint before getting first StreamHealthResponse - epsList := hc.GetEndPointStatsFromKeyspaceShard("k", "s") - if len(epsList) != 0 { - t.Errorf(`hc.GetEndPointStatsFromKeyspaceShard("k", "s") = %+v; want empty`, epsList) + // no tablet before getting first StreamHealthResponse + tsList := hc.GetTabletStatsFromKeyspaceShard("k", "s") + if len(tsList) != 0 { + t.Errorf(`hc.GetTabletStatsFromKeyspaceShard("k", "s") = %+v; want empty`, tsList) } - // one endpoint after receiving a StreamHealthResponse + // one tablet after receiving a StreamHealthResponse shr := &querypb.StreamHealthResponse{ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, Serving: true, TabletExternallyReparentedTimestamp: 10, RealtimeStats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.2}, } - want := &EndPointStats{ - EndPoint: ep, - Cell: "cell", - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, - Up: true, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.2}, + want := &TabletStats{ + Tablet: ep, + Cell: "cell", + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, + Up: true, + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.2}, TabletExternallyReparentedTimestamp: 10, } input <- shr @@ -65,21 +65,21 @@ func TestHealthCheck(t *testing.T) { if !reflect.DeepEqual(res, want) { t.Errorf(`<-l.output: %+v; want %+v`, res, want) } - epsList = hc.GetEndPointStatsFromKeyspaceShard("k", "s") - if len(epsList) != 1 || !reflect.DeepEqual(epsList[0], want) { - t.Errorf(`hc.GetEndPointStatsFromKeyspaceShard("k", "s") = %+v; want %+v`, epsList, want) + tsList = hc.GetTabletStatsFromKeyspaceShard("k", "s") + if len(tsList) != 1 || !reflect.DeepEqual(tsList[0], want) { + t.Errorf(`hc.GetTabletStatsFromKeyspaceShard("k", "s") = %+v; want %+v`, tsList, want) } epcsl := hc.CacheStatus() - epcslWant := EndPointsCacheStatusList{{ + epcslWant := TabletsCacheStatusList{{ Cell: "cell", Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, - EndPointsStats: EndPointStatsList{{ - EndPoint: ep, - Cell: "cell", - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, - Up: true, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.2}, + TabletsStats: TabletStatsList{{ + Tablet: ep, + Cell: "cell", + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, + Up: true, + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.2}, TabletExternallyReparentedTimestamp: 10, }}, }} @@ -94,13 +94,13 @@ func TestHealthCheck(t *testing.T) { TabletExternallyReparentedTimestamp: 0, RealtimeStats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.5}, } - want = &EndPointStats{ - EndPoint: ep, - Cell: "cell", - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Up: true, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.5}, + want = &TabletStats{ + Tablet: ep, + Cell: "cell", + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Up: true, + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.5}, TabletExternallyReparentedTimestamp: 0, } input <- shr @@ -109,9 +109,9 @@ func TestHealthCheck(t *testing.T) { if !reflect.DeepEqual(res, want) { t.Errorf(`<-l.output: %+v; want %+v`, res, want) } - epsList = hc.GetEndPointStatsFromTarget("k", "s", topodatapb.TabletType_REPLICA) - if len(epsList) != 1 || !reflect.DeepEqual(epsList[0], want) { - t.Errorf(`hc.GetEndPointStatsFromTarget("k", "s", REPLICA) = %+v; want %+v`, epsList, want) + tsList = hc.GetTabletStatsFromTarget("k", "s", topodatapb.TabletType_REPLICA) + if len(tsList) != 1 || !reflect.DeepEqual(tsList[0], want) { + t.Errorf(`hc.GetTabletStatsFromTarget("k", "s", REPLICA) = %+v; want %+v`, tsList, want) } // Serving & RealtimeStats changed @@ -121,13 +121,13 @@ func TestHealthCheck(t *testing.T) { TabletExternallyReparentedTimestamp: 0, RealtimeStats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.3}, } - want = &EndPointStats{ - EndPoint: ep, - Cell: "cell", - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Up: true, - Serving: false, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.3}, + want = &TabletStats{ + Tablet: ep, + Cell: "cell", + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Up: true, + Serving: false, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.3}, TabletExternallyReparentedTimestamp: 0, } input <- shr @@ -144,13 +144,13 @@ func TestHealthCheck(t *testing.T) { TabletExternallyReparentedTimestamp: 0, RealtimeStats: &querypb.RealtimeStats{HealthError: "some error", SecondsBehindMaster: 1, CpuUsage: 0.3}, } - want = &EndPointStats{ - EndPoint: ep, - Cell: "cell", - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Up: true, - Serving: false, - Stats: &querypb.RealtimeStats{HealthError: "some error", SecondsBehindMaster: 1, CpuUsage: 0.3}, + want = &TabletStats{ + Tablet: ep, + Cell: "cell", + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Up: true, + Serving: false, + Stats: &querypb.RealtimeStats{HealthError: "some error", SecondsBehindMaster: 1, CpuUsage: 0.3}, TabletExternallyReparentedTimestamp: 0, LastError: fmt.Errorf("vttablet error: some error"), } @@ -161,17 +161,17 @@ func TestHealthCheck(t *testing.T) { t.Errorf(`<-l.output: %+v; want %+v`, res, want) } - // remove endpoint + // remove tablet hc.deleteConn(ep) close(fakeConn.hcChan) - t.Logf(`hc.RemoveEndPoint({Host: "a", PortMap: {"vt": 1}})`) - want = &EndPointStats{ - EndPoint: ep, - Cell: "cell", - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Up: false, - Serving: false, - Stats: &querypb.RealtimeStats{HealthError: "some error", SecondsBehindMaster: 1, CpuUsage: 0.3}, + t.Logf(`hc.RemoveTablet({Host: "a", PortMap: {"vt": 1}})`) + want = &TabletStats{ + Tablet: ep, + Cell: "cell", + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Up: false, + Serving: false, + Stats: &querypb.RealtimeStats{HealthError: "some error", SecondsBehindMaster: 1, CpuUsage: 0.3}, TabletExternallyReparentedTimestamp: 0, LastError: fmt.Errorf("recv error"), } @@ -179,9 +179,9 @@ func TestHealthCheck(t *testing.T) { if !reflect.DeepEqual(res, want) { t.Errorf(`<-l.output: %+v; want %+v`, res, want) } - epsList = hc.GetEndPointStatsFromKeyspaceShard("k", "s") - if len(epsList) != 0 { - t.Errorf(`hc.GetEndPointStatsFromKeyspaceShard("k", "s") = %+v; want empty`, epsList) + tsList = hc.GetTabletStatsFromKeyspaceShard("k", "s") + if len(tsList) != 0 { + t.Errorf(`hc.GetTabletStatsFromKeyspaceShard("k", "s") = %+v; want empty`, tsList) } // close healthcheck hc.Close() @@ -189,7 +189,7 @@ func TestHealthCheck(t *testing.T) { func TestHealthCheckTimeout(t *testing.T) { timeout := 500 * time.Millisecond - ep := topo.NewEndPoint(0, "a") + ep := topo.NewTablet(0, "a") ep.PortMap["vt"] = 1 input := make(chan *querypb.StreamHealthResponse) createFakeConn(ep, input) @@ -197,23 +197,23 @@ func TestHealthCheckTimeout(t *testing.T) { l := newListener() hc := NewHealthCheck(1*time.Millisecond, 1*time.Millisecond, timeout, "" /* statsSuffix */).(*HealthCheckImpl) hc.SetListener(l) - hc.AddEndPoint("cell", "", ep) - t.Logf(`hc = HealthCheck(); hc.AddEndPoint("cell", "", {Host: "a", PortMap: {"vt": 1}})`) + hc.AddTablet("cell", "", ep) + t.Logf(`hc = HealthCheck(); hc.AddTablet("cell", "", {Host: "a", PortMap: {"vt": 1}})`) - // one endpoint after receiving a StreamHealthResponse + // one tablet after receiving a StreamHealthResponse shr := &querypb.StreamHealthResponse{ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, Serving: true, TabletExternallyReparentedTimestamp: 10, RealtimeStats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.2}, } - want := &EndPointStats{ - EndPoint: ep, - Cell: "cell", - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, - Up: true, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.2}, + want := &TabletStats{ + Tablet: ep, + Cell: "cell", + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, + Up: true, + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.2}, TabletExternallyReparentedTimestamp: 10, } input <- shr @@ -222,9 +222,9 @@ func TestHealthCheckTimeout(t *testing.T) { if !reflect.DeepEqual(res, want) { t.Errorf(`<-l.output: %+v; want %+v`, res, want) } - epsList := hc.GetEndPointStatsFromKeyspaceShard("k", "s") - if len(epsList) != 1 || !reflect.DeepEqual(epsList[0], want) { - t.Errorf(`hc.GetEndPointStatsFromKeyspaceShard("k", "s") = %+v; want %+v`, epsList, want) + tsList := hc.GetTabletStatsFromKeyspaceShard("k", "s") + if len(tsList) != 1 || !reflect.DeepEqual(tsList[0], want) { + t.Errorf(`hc.GetTabletStatsFromKeyspaceShard("k", "s") = %+v; want %+v`, tsList, want) } // wait for timeout period time.Sleep(2 * timeout) @@ -233,9 +233,9 @@ func TestHealthCheckTimeout(t *testing.T) { if res.Serving { t.Errorf(`<-l.output: %+v; want not serving`, res) } - epsList = hc.GetEndPointStatsFromKeyspaceShard("k", "s") - if len(epsList) != 1 || epsList[0].Serving { - t.Errorf(`hc.GetEndPointStatsFromKeyspaceShard("k", "s") = %+v; want not serving`, epsList) + tsList = hc.GetTabletStatsFromKeyspaceShard("k", "s") + if len(tsList) != 1 || tsList[0].Serving { + t.Errorf(`hc.GetTabletStatsFromKeyspaceShard("k", "s") = %+v; want not serving`, tsList) } // send a healthcheck response, it should be serving again input <- shr @@ -244,41 +244,41 @@ func TestHealthCheckTimeout(t *testing.T) { if !reflect.DeepEqual(res, want) { t.Errorf(`<-l.output: %+v; want %+v`, res, want) } - epsList = hc.GetEndPointStatsFromKeyspaceShard("k", "s") - if len(epsList) != 1 || !reflect.DeepEqual(epsList[0], want) { - t.Errorf(`hc.GetEndPointStatsFromKeyspaceShard("k", "s") = %+v; want %+v`, epsList, want) + tsList = hc.GetTabletStatsFromKeyspaceShard("k", "s") + if len(tsList) != 1 || !reflect.DeepEqual(tsList[0], want) { + t.Errorf(`hc.GetTabletStatsFromKeyspaceShard("k", "s") = %+v; want %+v`, tsList, want) } // close healthcheck hc.Close() } type listener struct { - output chan *EndPointStats + output chan *TabletStats } func newListener() *listener { - return &listener{output: make(chan *EndPointStats, 1)} + return &listener{output: make(chan *TabletStats, 1)} } -func (l *listener) StatsUpdate(eps *EndPointStats) { - l.output <- eps +func (l *listener) StatsUpdate(ts *TabletStats) { + l.output <- ts } -func createFakeConn(endPoint *topodatapb.EndPoint, c chan *querypb.StreamHealthResponse) *fakeConn { - key := EndPointToMapKey(endPoint) - conn := &fakeConn{endPoint: endPoint, hcChan: c} +func createFakeConn(tablet *topodatapb.Tablet, c chan *querypb.StreamHealthResponse) *fakeConn { + key := TabletToMapKey(tablet) + conn := &fakeConn{tablet: tablet, hcChan: c} connMap[key] = conn return conn } -func discoveryDialer(ctx context.Context, endPoint *topodatapb.EndPoint, keyspace, shard string, tabletType topodatapb.TabletType, timeout time.Duration) (tabletconn.TabletConn, error) { - key := EndPointToMapKey(endPoint) +func discoveryDialer(ctx context.Context, tablet *topodatapb.Tablet, keyspace, shard string, tabletType topodatapb.TabletType, timeout time.Duration) (tabletconn.TabletConn, error) { + key := TabletToMapKey(tablet) return connMap[key], nil } type fakeConn struct { - endPoint *topodatapb.EndPoint - hcChan chan *querypb.StreamHealthResponse + tablet *topodatapb.Tablet + hcChan chan *querypb.StreamHealthResponse } type streamHealthReader struct { @@ -364,9 +364,9 @@ func (fc *fakeConn) SetTarget(keyspace, shard string, tabletType topodatapb.Tabl return fmt.Errorf("not implemented") } -// EndPoint returns the endpoint associated with the connection. -func (fc *fakeConn) EndPoint() *topodatapb.EndPoint { - return fc.endPoint +// Tablet returns the tablet associated with the connection. +func (fc *fakeConn) Tablet() *topodatapb.Tablet { + return fc.tablet } // Close closes the connection. diff --git a/go/vt/discovery/healthcheck_wait.go b/go/vt/discovery/healthcheck_wait.go index efedfbe8d4..9e4aae29fb 100644 --- a/go/vt/discovery/healthcheck_wait.go +++ b/go/vt/discovery/healthcheck_wait.go @@ -17,11 +17,11 @@ import ( ) var ( - // ErrWaitForEndPointsTimeout is returned if we cannot get the endpoints in time - ErrWaitForEndPointsTimeout = errors.New("timeout waiting for endpoints") + // ErrWaitForTabletsTimeout is returned if we cannot get the tablets in time + ErrWaitForTabletsTimeout = errors.New("timeout waiting for tablets") // how much to sleep between each check - waitAvailableEndPointInterval = 100 * time.Millisecond + waitAvailableTabletInterval = 100 * time.Millisecond ) // keyspaceShard is a helper structure used internally @@ -30,27 +30,27 @@ type keyspaceShard struct { shard string } -// WaitForEndPoints waits for at least one endpoint in the given cell / +// WaitForTablets waits for at least one tablet in the given cell / // keyspace / shard before returning. -func WaitForEndPoints(ctx context.Context, hc HealthCheck, cell, keyspace, shard string, types []topodatapb.TabletType) error { +func WaitForTablets(ctx context.Context, hc HealthCheck, cell, keyspace, shard string, types []topodatapb.TabletType) error { keyspaceShards := map[keyspaceShard]bool{ keyspaceShard{ keyspace: keyspace, shard: shard, }: true, } - return waitForEndPoints(ctx, hc, keyspaceShards, types, false) + return waitForTablets(ctx, hc, keyspaceShards, types, false) } -// WaitForAllServingEndPoints waits for at least one serving endpoint in the given cell +// WaitForAllServingTablets waits for at least one serving tablet in the given cell // for all keyspaces / shards before returning. -func WaitForAllServingEndPoints(ctx context.Context, hc HealthCheck, ts topo.SrvTopoServer, cell string, types []topodatapb.TabletType) error { +func WaitForAllServingTablets(ctx context.Context, hc HealthCheck, ts topo.SrvTopoServer, cell string, types []topodatapb.TabletType) error { keyspaceShards, err := findAllKeyspaceShards(ctx, ts, cell) if err != nil { return err } - return waitForEndPoints(ctx, hc, keyspaceShards, types, true) + return waitForTablets(ctx, hc, keyspaceShards, types, true) } // findAllKeyspaceShards goes through all serving shards in the topology @@ -97,8 +97,8 @@ func findAllKeyspaceShards(ctx context.Context, ts topo.SrvTopoServer, cell stri return keyspaceShards, nil } -// waitForEndPoints is the internal method that polls for endpoints -func waitForEndPoints(ctx context.Context, hc HealthCheck, keyspaceShards map[keyspaceShard]bool, types []topodatapb.TabletType, requireServing bool) error { +// waitForTablets is the internal method that polls for tablets +func waitForTablets(ctx context.Context, hc HealthCheck, keyspaceShards map[keyspaceShard]bool, types []topodatapb.TabletType, requireServing bool) error { RetryLoop: for { select { @@ -111,7 +111,7 @@ RetryLoop: for ks := range keyspaceShards { allPresent := true for _, tt := range types { - epl := hc.GetEndPointStatsFromTarget(ks.keyspace, ks.shard, tt) + epl := hc.GetTabletStatsFromTarget(ks.keyspace, ks.shard, tt) if requireServing { hasServingEP := false for _, eps := range epl { @@ -143,7 +143,7 @@ RetryLoop: } // Unblock after the sleep or when the context has expired. - timer := time.NewTimer(waitAvailableEndPointInterval) + timer := time.NewTimer(waitAvailableTabletInterval) select { case <-ctx.Done(): timer.Stop() @@ -152,10 +152,10 @@ RetryLoop: } if ctx.Err() == context.DeadlineExceeded { - log.Warningf("waitForEndPoints timeout for %v (context error: %v)", keyspaceShards, ctx.Err()) - return ErrWaitForEndPointsTimeout + log.Warningf("waitForTablets timeout for %v (context error: %v)", keyspaceShards, ctx.Err()) + return ErrWaitForTabletsTimeout } - err := fmt.Errorf("waitForEndPoints failed for %v (context error: %v)", keyspaceShards, ctx.Err()) + err := fmt.Errorf("waitForTablets failed for %v (context error: %v)", keyspaceShards, ctx.Err()) log.Error(err) return err } diff --git a/go/vt/discovery/healthcheck_wait_test.go b/go/vt/discovery/healthcheck_wait_test.go index 7c09dd776e..68ae90efe1 100644 --- a/go/vt/discovery/healthcheck_wait_test.go +++ b/go/vt/discovery/healthcheck_wait_test.go @@ -104,32 +104,32 @@ func TestFindAllKeyspaceShards(t *testing.T) { } } -func TestWaitForEndPoints(t *testing.T) { +func TestWaitForTablets(t *testing.T) { shortCtx, shortCancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer shortCancel() - waitAvailableEndPointInterval = 20 * time.Millisecond + waitAvailableTabletInterval = 20 * time.Millisecond - ep := topo.NewEndPoint(0, "a") + ep := topo.NewTablet(0, "a") ep.PortMap["vt"] = 1 input := make(chan *querypb.StreamHealthResponse) createFakeConn(ep, input) hc := NewHealthCheck(1*time.Millisecond, 1*time.Millisecond, 1*time.Hour, "" /* statsSuffix */) - hc.AddEndPoint("cell", "", ep) + hc.AddTablet("cell", "", ep) // this should time out - if err := WaitForEndPoints(shortCtx, hc, "cell", "keyspace", "shard", []topodatapb.TabletType{topodatapb.TabletType_REPLICA}); err != ErrWaitForEndPointsTimeout { + if err := WaitForTablets(shortCtx, hc, "cell", "keyspace", "shard", []topodatapb.TabletType{topodatapb.TabletType_REPLICA}); err != ErrWaitForTabletsTimeout { t.Errorf("got wrong error: %v", err) } // this should fail, but return a non-timeout error cancelledCtx, cancel := context.WithCancel(context.Background()) cancel() - if err := WaitForEndPoints(cancelledCtx, hc, "cell", "keyspace", "shard", []topodatapb.TabletType{topodatapb.TabletType_REPLICA}); err == nil || err == ErrWaitForEndPointsTimeout { + if err := WaitForTablets(cancelledCtx, hc, "cell", "keyspace", "shard", []topodatapb.TabletType{topodatapb.TabletType_REPLICA}); err == nil || err == ErrWaitForTabletsTimeout { t.Errorf("want: non-timeout error, got: %v", err) } - // send the endpoint in + // send the tablet in shr := &querypb.StreamHealthResponse{ Target: &querypb.Target{ Keyspace: "keyspace", @@ -144,8 +144,8 @@ func TestWaitForEndPoints(t *testing.T) { // and ask again, with longer time outs so it's not flaky longCtx, longCancel := context.WithTimeout(context.Background(), 10*time.Second) defer longCancel() - waitAvailableEndPointInterval = 10 * time.Millisecond - if err := WaitForEndPoints(longCtx, hc, "cell", "keyspace", "shard", []topodatapb.TabletType{topodatapb.TabletType_REPLICA}); err != nil { + waitAvailableTabletInterval = 10 * time.Millisecond + if err := WaitForTablets(longCtx, hc, "cell", "keyspace", "shard", []topodatapb.TabletType{topodatapb.TabletType_REPLICA}); err != nil { t.Errorf("got error: %v", err) } } diff --git a/go/vt/discovery/replicationlag.go b/go/vt/discovery/replicationlag.go index 20744aa6a7..1feacfacbf 100644 --- a/go/vt/discovery/replicationlag.go +++ b/go/vt/discovery/replicationlag.go @@ -13,29 +13,29 @@ var ( highReplicationLagMinServing = flag.Duration("discovery_high_replication_lag_minimum_serving", 2*time.Hour, "the replication lag that is considered too high when selecting miminum 2 vttablets for serving") ) -// FilterByReplicationLag filters the list of EndPointStats by EndPointStats.Stats.SecondsBehindMaster. -// The algorithm (EndPointStats that is non-serving or has error is ignored): -// - Return the list if there is 0 or 1 endpoint. -// - Return the list if all endpoints have <=30s lag. -// - Filter by replication lag: for each endpoint, if the mean value without it is more than 0.7 of the mean value across all endpoints, it is valid. +// FilterByReplicationLag filters the list of TabletStats by TabletStats.Stats.SecondsBehindMaster. +// The algorithm (TabletStats that is non-serving or has error is ignored): +// - Return the list if there is 0 or 1 tablet. +// - Return the list if all tablets have <=30s lag. +// - Filter by replication lag: for each tablet, if the mean value without it is more than 0.7 of the mean value across all tablets, it is valid. // For example, lags of (5s, 10s, 15s, 120s) return the first three; // lags of (30m, 35m, 40m, 45m) return all. -func FilterByReplicationLag(epsList []*EndPointStats) []*EndPointStats { - list := make([]*EndPointStats, 0, len(epsList)) - // filter non-serving endpoints - for _, eps := range epsList { - if !eps.Serving || eps.LastError != nil || eps.Stats == nil { +func FilterByReplicationLag(tabletStatsList []*TabletStats) []*TabletStats { + list := make([]*TabletStats, 0, len(tabletStatsList)) + // filter non-serving tablets + for _, ts := range tabletStatsList { + if !ts.Serving || ts.LastError != nil || ts.Stats == nil { continue } - list = append(list, eps) + list = append(list, ts) } if len(list) <= 1 { return list } - // if all have low replication lag (<=30s), return all endpoints. + // if all have low replication lag (<=30s), return all tablets. allLowLag := true - for _, eps := range list { - if float64(eps.Stats.SecondsBehindMaster) > LowReplicationLag.Seconds() { + for _, ts := range list { + if float64(ts.Stats.SecondsBehindMaster) > LowReplicationLag.Seconds() { allLowLag = false break } @@ -44,31 +44,31 @@ func FilterByReplicationLag(epsList []*EndPointStats) []*EndPointStats { return list } // filter those affecting "mean" lag significantly - // calculate mean for all endpoints - res := make([]*EndPointStats, 0, len(list)) + // calculate mean for all tablets + res := make([]*TabletStats, 0, len(list)) m, _ := mean(list, -1) - for i, eps := range list { - // calculate mean by excluding ith endpoint + for i, ts := range list { + // calculate mean by excluding ith tablet mi, _ := mean(list, i) if float64(mi) > float64(m)*0.7 { - res = append(res, eps) + res = append(res, ts) } } - // return at least 2 endpoints to avoid over loading, - // if there is another endpoint with replication lag < highReplicationLagMinServing. + // return at least 2 tablets to avoid over loading, + // if there is another tablet with replication lag < highReplicationLagMinServing. if len(res) == 0 { return list } if len(res) == 1 && len(list) > 1 { minLag := uint32(math.MaxUint32) idx := -1 - for i, eps := range list { - if eps == res[0] { + for i, ts := range list { + if ts == res[0] { continue } - if eps.Stats.SecondsBehindMaster < minLag { + if ts.Stats.SecondsBehindMaster < minLag { idx = i - minLag = eps.Stats.SecondsBehindMaster + minLag = ts.Stats.SecondsBehindMaster } } if idx >= 0 && minLag <= uint32(highReplicationLagMinServing.Seconds()) { @@ -80,14 +80,14 @@ func FilterByReplicationLag(epsList []*EndPointStats) []*EndPointStats { // mean calculates the mean value over the given list, // while excluding the item with the specified index. -func mean(epsList []*EndPointStats, idxExclude int) (uint64, error) { +func mean(tabletStatsList []*TabletStats, idxExclude int) (uint64, error) { var sum uint64 var count uint64 - for i, eps := range epsList { + for i, ts := range tabletStatsList { if i == idxExclude { continue } - sum = sum + uint64(eps.Stats.SecondsBehindMaster) + sum = sum + uint64(ts.Stats.SecondsBehindMaster) count++ } if count == 0 { diff --git a/go/vt/discovery/replicationlag_test.go b/go/vt/discovery/replicationlag_test.go index 7b03c10fb1..c157d7ad59 100644 --- a/go/vt/discovery/replicationlag_test.go +++ b/go/vt/discovery/replicationlag_test.go @@ -5,136 +5,136 @@ import ( "testing" querypb "github.com/youtube/vitess/go/vt/proto/query" - topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" + "github.com/youtube/vitess/go/vt/topo" ) func TestFilterByReplicationLag(t *testing.T) { - // 0 endpoint - got := FilterByReplicationLag([]*EndPointStats{}) + // 0 tablet + got := FilterByReplicationLag([]*TabletStats{}) if len(got) != 0 { t.Errorf("FilterByReplicationLag([]) = %+v, want []", got) } - // 1 serving endpoint - eps1 := &EndPointStats{ - EndPoint: &topodatapb.EndPoint{Uid: 1}, - Serving: true, - Stats: &querypb.RealtimeStats{}, + // 1 serving tablet + ts1 := &TabletStats{ + Tablet: topo.NewTablet(1, "host1"), + Serving: true, + Stats: &querypb.RealtimeStats{}, } - eps2 := &EndPointStats{ - EndPoint: &topodatapb.EndPoint{Uid: 2}, - Serving: false, - Stats: &querypb.RealtimeStats{}, + ts2 := &TabletStats{ + Tablet: topo.NewTablet(2, "host2"), + Serving: false, + Stats: &querypb.RealtimeStats{}, } - got = FilterByReplicationLag([]*EndPointStats{eps1, eps2}) + got = FilterByReplicationLag([]*TabletStats{ts1, ts2}) if len(got) != 1 { - t.Errorf("len(FilterByReplicationLag([{EndPoint: {Uid: 1}, Serving: true}, {EndPoint: {Uid: 2}, Serving: false}])) = %v, want 1", len(got)) + t.Errorf("len(FilterByReplicationLag([{Tablet: {Uid: 1}, Serving: true}, {Tablet: {Uid: 2}, Serving: false}])) = %v, want 1", len(got)) } - if len(got) > 0 && !reflect.DeepEqual(got[0], eps1) { - t.Errorf("FilterByReplicationLag([{EndPoint: {Uid: 1}, Serving: true}, {EndPoint: {Uid: 2}, Serving: false}]) = %+v, want %+v", got[0], eps1) + if len(got) > 0 && !reflect.DeepEqual(got[0], ts1) { + t.Errorf("FilterByReplicationLag([{Tablet: {Uid: 1}, Serving: true}, {Tablet: {Uid: 2}, Serving: false}]) = %+v, want %+v", got[0], ts1) } // lags of (1s, 1s, 1s, 30s) - eps1 = &EndPointStats{ - EndPoint: &topodatapb.EndPoint{Uid: 1}, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1}, + ts1 = &TabletStats{ + Tablet: topo.NewTablet(1, "host1"), + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1}, } - eps2 = &EndPointStats{ - EndPoint: &topodatapb.EndPoint{Uid: 2}, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1}, + ts2 = &TabletStats{ + Tablet: topo.NewTablet(2, "host2"), + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1}, } - eps3 := &EndPointStats{ - EndPoint: &topodatapb.EndPoint{Uid: 3}, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1}, + ts3 := &TabletStats{ + Tablet: topo.NewTablet(3, "host3"), + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1}, } - eps4 := &EndPointStats{ - EndPoint: &topodatapb.EndPoint{Uid: 4}, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 30}, + ts4 := &TabletStats{ + Tablet: topo.NewTablet(4, "host4"), + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 30}, } - got = FilterByReplicationLag([]*EndPointStats{eps1, eps2, eps3, eps4}) - if len(got) != 4 || !reflect.DeepEqual(got[0], eps1) || !reflect.DeepEqual(got[1], eps2) || !reflect.DeepEqual(got[2], eps3) || !reflect.DeepEqual(got[3], eps4) { + got = FilterByReplicationLag([]*TabletStats{ts1, ts2, ts3, ts4}) + if len(got) != 4 || !reflect.DeepEqual(got[0], ts1) || !reflect.DeepEqual(got[1], ts2) || !reflect.DeepEqual(got[2], ts3) || !reflect.DeepEqual(got[3], ts4) { t.Errorf("FilterByReplicationLag([1s, 1s, 1s, 30s]) = %+v, want all", got) } // lags of (5s, 10s, 15s, 120s) - eps1 = &EndPointStats{ - EndPoint: &topodatapb.EndPoint{Uid: 1}, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 5}, + ts1 = &TabletStats{ + Tablet: topo.NewTablet(1, "host1"), + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 5}, } - eps2 = &EndPointStats{ - EndPoint: &topodatapb.EndPoint{Uid: 2}, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 10}, + ts2 = &TabletStats{ + Tablet: topo.NewTablet(2, "host2"), + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 10}, } - eps3 = &EndPointStats{ - EndPoint: &topodatapb.EndPoint{Uid: 3}, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 15}, + ts3 = &TabletStats{ + Tablet: topo.NewTablet(3, "host3"), + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 15}, } - eps4 = &EndPointStats{ - EndPoint: &topodatapb.EndPoint{Uid: 4}, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 120}, + ts4 = &TabletStats{ + Tablet: topo.NewTablet(4, "host4"), + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 120}, } - got = FilterByReplicationLag([]*EndPointStats{eps1, eps2, eps3, eps4}) - if len(got) != 3 || !reflect.DeepEqual(got[0], eps1) || !reflect.DeepEqual(got[1], eps2) || !reflect.DeepEqual(got[2], eps3) { + got = FilterByReplicationLag([]*TabletStats{ts1, ts2, ts3, ts4}) + if len(got) != 3 || !reflect.DeepEqual(got[0], ts1) || !reflect.DeepEqual(got[1], ts2) || !reflect.DeepEqual(got[2], ts3) { t.Errorf("FilterByReplicationLag([5s, 10s, 15s, 120s]) = %+v, want [5s, 10s, 15s]", got) } // lags of (30m, 35m, 40m, 45m) - eps1 = &EndPointStats{ - EndPoint: &topodatapb.EndPoint{Uid: 1}, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 30 * 60}, + ts1 = &TabletStats{ + Tablet: topo.NewTablet(1, "host1"), + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 30 * 60}, } - eps2 = &EndPointStats{ - EndPoint: &topodatapb.EndPoint{Uid: 2}, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 35 * 60}, + ts2 = &TabletStats{ + Tablet: topo.NewTablet(2, "host2"), + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 35 * 60}, } - eps3 = &EndPointStats{ - EndPoint: &topodatapb.EndPoint{Uid: 3}, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 40 * 60}, + ts3 = &TabletStats{ + Tablet: topo.NewTablet(3, "host3"), + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 40 * 60}, } - eps4 = &EndPointStats{ - EndPoint: &topodatapb.EndPoint{Uid: 4}, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 45 * 60}, + ts4 = &TabletStats{ + Tablet: topo.NewTablet(4, "host4"), + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 45 * 60}, } - got = FilterByReplicationLag([]*EndPointStats{eps1, eps2, eps3, eps4}) - if len(got) != 4 || !reflect.DeepEqual(got[0], eps1) || !reflect.DeepEqual(got[1], eps2) || !reflect.DeepEqual(got[2], eps3) || !reflect.DeepEqual(got[3], eps4) { + got = FilterByReplicationLag([]*TabletStats{ts1, ts2, ts3, ts4}) + if len(got) != 4 || !reflect.DeepEqual(got[0], ts1) || !reflect.DeepEqual(got[1], ts2) || !reflect.DeepEqual(got[2], ts3) || !reflect.DeepEqual(got[3], ts4) { t.Errorf("FilterByReplicationLag([30m, 35m, 40m, 45m]) = %+v, want all", got) } // lags of (1m, 100m) - return at least 2 items to avoid overloading if the 2nd one is not delayed too much - eps1 = &EndPointStats{ - EndPoint: &topodatapb.EndPoint{Uid: 1}, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1 * 60}, + ts1 = &TabletStats{ + Tablet: topo.NewTablet(1, "host1"), + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1 * 60}, } - eps2 = &EndPointStats{ - EndPoint: &topodatapb.EndPoint{Uid: 2}, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 100 * 60}, + ts2 = &TabletStats{ + Tablet: topo.NewTablet(2, "host2"), + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 100 * 60}, } - got = FilterByReplicationLag([]*EndPointStats{eps1, eps2}) - if len(got) != 2 || !reflect.DeepEqual(got[0], eps1) || !reflect.DeepEqual(got[1], eps2) { + got = FilterByReplicationLag([]*TabletStats{ts1, ts2}) + if len(got) != 2 || !reflect.DeepEqual(got[0], ts1) || !reflect.DeepEqual(got[1], ts2) { t.Errorf("FilterByReplicationLag([1m, 100m]) = %+v, want all", got) } // lags of (1m, 3h) - return 1 if the 2nd one is delayed too much - eps1 = &EndPointStats{ - EndPoint: &topodatapb.EndPoint{Uid: 1}, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1 * 60}, + ts1 = &TabletStats{ + Tablet: topo.NewTablet(1, "host1"), + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1 * 60}, } - eps2 = &EndPointStats{ - EndPoint: &topodatapb.EndPoint{Uid: 2}, - Serving: true, - Stats: &querypb.RealtimeStats{SecondsBehindMaster: 3 * 60 * 60}, + ts2 = &TabletStats{ + Tablet: topo.NewTablet(2, "host2"), + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 3 * 60 * 60}, } - got = FilterByReplicationLag([]*EndPointStats{eps1, eps2}) - if len(got) != 1 || !reflect.DeepEqual(got[0], eps1) { + got = FilterByReplicationLag([]*TabletStats{ts1, ts2}) + if len(got) != 1 || !reflect.DeepEqual(got[0], ts1) { t.Errorf("FilterByReplicationLag([1m, 3h]) = %+v, want [1m]", got) } } diff --git a/go/vt/discovery/topology_watcher.go b/go/vt/discovery/topology_watcher.go index 378ea67be6..d9e6eeaba5 100644 --- a/go/vt/discovery/topology_watcher.go +++ b/go/vt/discovery/topology_watcher.go @@ -44,13 +44,13 @@ func NewShardReplicationWatcher(topoServer topo.Server, hc HealthCheck, cell, ke }) } -// tabletEndPoint is used internally by the TopologyWatcher class -type tabletEndPoint struct { - alias string - endPoint *topodatapb.EndPoint +// tabletInfo is used internally by the TopologyWatcher class +type tabletInfo struct { + alias string + tablet *topodatapb.Tablet } -// TopologyWatcher pulls endpoints from a configurable set of tablets +// TopologyWatcher polls tablet from a configurable set of tablets // periodically. type TopologyWatcher struct { // set at construction time @@ -64,8 +64,8 @@ type TopologyWatcher struct { cancelFunc context.CancelFunc // mu protects all variables below - mu sync.Mutex - endPoints map[string]*tabletEndPoint + mu sync.Mutex + tablets map[string]*tabletInfo } // NewTopologyWatcher returns a TopologyWatcher that monitors all @@ -78,14 +78,14 @@ func NewTopologyWatcher(topoServer topo.Server, hc HealthCheck, cell string, ref refreshInterval: refreshInterval, getTablets: getTablets, sem: make(chan int, topoReadConcurrency), - endPoints: make(map[string]*tabletEndPoint), + tablets: make(map[string]*tabletInfo), } tw.ctx, tw.cancelFunc = context.WithCancel(context.Background()) go tw.watch() return tw } -// watch pulls all endpoints and notifies HealthCheck by adding/removing endpoints. +// watch polls all tablets and notifies HealthCheck by adding/removing tablets. func (tw *TopologyWatcher) watch() { ticker := time.NewTicker(tw.refreshInterval) defer ticker.Stop() @@ -99,10 +99,10 @@ func (tw *TopologyWatcher) watch() { } } -// loadTablets reads all tablets from topology, converts to endpoints, and updates HealthCheck. +// loadTablets reads all tablets from topology, and updates HealthCheck. func (tw *TopologyWatcher) loadTablets() { var wg sync.WaitGroup - newEndPoints := make(map[string]*tabletEndPoint) + newTablets := make(map[string]*tabletInfo) tabletAlias, err := tw.getTablets(tw) if err != nil { select { @@ -129,16 +129,11 @@ func (tw *TopologyWatcher) loadTablets() { log.Errorf("cannot get tablet for alias %v: %v", alias, err) return } - endPoint, err := topo.TabletEndPoint(tablet.Tablet) - if err != nil { - log.Errorf("cannot get endpoint from tablet %v: %v", tablet, err) - return - } - key := EndPointToMapKey(endPoint) + key := TabletToMapKey(tablet.Tablet) tw.mu.Lock() - newEndPoints[key] = &tabletEndPoint{ - alias: topoproto.TabletAliasString(alias), - endPoint: endPoint, + newTablets[key] = &tabletInfo{ + alias: topoproto.TabletAliasString(alias), + tablet: tablet.Tablet, } tw.mu.Unlock() }(tAlias) @@ -146,21 +141,21 @@ func (tw *TopologyWatcher) loadTablets() { wg.Wait() tw.mu.Lock() - for key, tep := range newEndPoints { - if _, ok := tw.endPoints[key]; !ok { - tw.hc.AddEndPoint(tw.cell, tep.alias, tep.endPoint) + for key, tep := range newTablets { + if _, ok := tw.tablets[key]; !ok { + tw.hc.AddTablet(tw.cell, tep.alias, tep.tablet) } } - for key, tep := range tw.endPoints { - if _, ok := newEndPoints[key]; !ok { - tw.hc.RemoveEndPoint(tep.endPoint) + for key, tep := range tw.tablets { + if _, ok := newTablets[key]; !ok { + tw.hc.RemoveTablet(tep.tablet) } } - tw.endPoints = newEndPoints + tw.tablets = newTablets tw.mu.Unlock() } -// Stop stops the watcher. It does not clean up the endpoints added to HealthCheck. +// Stop stops the watcher. It does not clean up the tablets added to HealthCheck. func (tw *TopologyWatcher) Stop() { tw.cancelFunc() } diff --git a/go/vt/discovery/topology_watcher_test.go b/go/vt/discovery/topology_watcher_test.go index 0df3673ff5..d14cba0665 100644 --- a/go/vt/discovery/topology_watcher_test.go +++ b/go/vt/discovery/topology_watcher_test.go @@ -37,15 +37,17 @@ func checkWatcher(t *testing.T, cellTablets bool) { ft.AddTablet("aa", 0, "host1", map[string]int32{"vt": 123}) tw.loadTablets() t.Logf(`ft.AddTablet("aa", 0, "host1", {"vt": 123}); tw.loadTablets()`) - want := &topodatapb.EndPoint{ - Uid: 0, - Host: "host1", - PortMap: map[string]int32{"vt": 123}, + want := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 0, + }, + Hostname: "host1", + PortMap: map[string]int32{"vt": 123}, } - allEPs := fhc.GetAllEndPoints() - key := EndPointToMapKey(want) - if _, ok := allEPs[key]; !ok || len(allEPs) != 1 { - t.Errorf("fhc.GetAllEndPoints() = %+v; want %+v", allEPs, want) + allTablets := fhc.GetAllTablets() + key := TabletToMapKey(want) + if _, ok := allTablets[key]; !ok || len(allTablets) != 1 { + t.Errorf("fhc.GetAllTablets() = %+v; want %+v", allTablets, want) } // same tablet, different port, should update (previous @@ -53,15 +55,17 @@ func checkWatcher(t *testing.T, cellTablets bool) { ft.AddTablet("aa", 0, "host1", map[string]int32{"vt": 456}) tw.loadTablets() t.Logf(`ft.AddTablet("aa", 0, "host1", {"vt": 456}); tw.loadTablets()`) - want = &topodatapb.EndPoint{ - Uid: 0, - Host: "host1", - PortMap: map[string]int32{"vt": 456}, + want = &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 0, + }, + Hostname: "host1", + PortMap: map[string]int32{"vt": 456}, } - allEPs = fhc.GetAllEndPoints() - key = EndPointToMapKey(want) - if _, ok := allEPs[key]; !ok || len(allEPs) != 1 { - t.Errorf("fhc.GetAllEndPoints() = %+v; want %+v", allEPs, want) + allTablets = fhc.GetAllTablets() + key = TabletToMapKey(want) + if _, ok := allTablets[key]; !ok || len(allTablets) != 1 { + t.Errorf("fhc.GetAllTablets() = %+v; want %+v", allTablets, want) } tw.Stop() diff --git a/go/vt/discovery/utils.go b/go/vt/discovery/utils.go index 9241045c2d..fdc4d83ccc 100644 --- a/go/vt/discovery/utils.go +++ b/go/vt/discovery/utils.go @@ -5,45 +5,45 @@ import ( ) // This file contains helper filter methods to process the unfiltered list of -// tablets returned by HealthCheck.GetEndPointStatsFrom*. +// tablets returned by HealthCheck.GetTabletStatsFrom*. // See also replicationlag.go for a more sophisicated filter used by vtgate. -// RemoveUnhealthyEndpoints filters all unhealthy tablets out. +// RemoveUnhealthyTablets filters all unhealthy tablets out. // NOTE: Non-serving tablets are considered healthy. -func RemoveUnhealthyEndpoints(epsList []*EndPointStats) []*EndPointStats { - result := make([]*EndPointStats, 0, len(epsList)) - for _, eps := range epsList { +func RemoveUnhealthyTablets(tabletStatsList []*TabletStats) []*TabletStats { + result := make([]*TabletStats, 0, len(tabletStatsList)) + for _, ts := range tabletStatsList { // Note we do not check the 'Serving' flag here. // This is mainly to avoid the case where we run a vtworker Diff between a // source and destination, and the source is not serving (disabled by // TabletControl). When we switch the tablet to 'worker', it will // go back to serving state. - if eps.Stats == nil || eps.Stats.HealthError != "" || float64(eps.Stats.SecondsBehindMaster) > LowReplicationLag.Seconds() { + if ts.Stats == nil || ts.Stats.HealthError != "" || float64(ts.Stats.SecondsBehindMaster) > LowReplicationLag.Seconds() { continue } - result = append(result, eps) + result = append(result, ts) } return result } // GetCurrentMaster returns the MASTER tablet with the highest // TabletExternallyReparentedTimestamp value. -func GetCurrentMaster(epsList []*EndPointStats) []*EndPointStats { - var master *EndPointStats +func GetCurrentMaster(tabletStatsList []*TabletStats) []*TabletStats { + var master *TabletStats // If there are multiple masters (e.g. during a reparent), pick the most // recent one (i.e. with the highest TabletExternallyReparentedTimestamp value). - for _, eps := range epsList { - if eps.Target.TabletType != topodatapb.TabletType_MASTER { + for _, ts := range tabletStatsList { + if ts.Target.TabletType != topodatapb.TabletType_MASTER { continue } - if master == nil || master.TabletExternallyReparentedTimestamp < eps.TabletExternallyReparentedTimestamp { - master = eps + if master == nil || master.TabletExternallyReparentedTimestamp < ts.TabletExternallyReparentedTimestamp { + master = ts } } if master == nil { return nil } - return []*EndPointStats{master} + return []*TabletStats{master} } diff --git a/go/vt/discovery/utils_test.go b/go/vt/discovery/utils_test.go index f6325b364f..16a5ee4410 100644 --- a/go/vt/discovery/utils_test.go +++ b/go/vt/discovery/utils_test.go @@ -8,47 +8,47 @@ import ( topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) -func TestRemoveUnhealthyEndpoints(t *testing.T) { +func TestRemoveUnhealthyTablets(t *testing.T) { var testcases = []struct { desc string - input []*EndPointStats - want []*EndPointStats + input []*TabletStats + want []*TabletStats }{ { desc: "tablets missing Stats", - input: []*EndPointStats{replica(1), replica(2)}, - want: []*EndPointStats{}, + input: []*TabletStats{replica(1), replica(2)}, + want: []*TabletStats{}, }, { desc: "all tablets healthy", - input: []*EndPointStats{healthy(replica(1)), healthy(replica(2))}, - want: []*EndPointStats{healthy(replica(1)), healthy(replica(2))}, + input: []*TabletStats{healthy(replica(1)), healthy(replica(2))}, + want: []*TabletStats{healthy(replica(1)), healthy(replica(2))}, }, { desc: "one unhealthy tablet (error)", - input: []*EndPointStats{healthy(replica(1)), unhealthyError(replica(2))}, - want: []*EndPointStats{healthy(replica(1))}, + input: []*TabletStats{healthy(replica(1)), unhealthyError(replica(2))}, + want: []*TabletStats{healthy(replica(1))}, }, { desc: "one unhealthy tablet (lag)", - input: []*EndPointStats{healthy(replica(1)), unhealthyLag(replica(2))}, - want: []*EndPointStats{healthy(replica(1))}, + input: []*TabletStats{healthy(replica(1)), unhealthyLag(replica(2))}, + want: []*TabletStats{healthy(replica(1))}, }, { desc: "no filtering by tablet type", - input: []*EndPointStats{healthy(master(1)), healthy(replica(2)), healthy(rdonly(3))}, - want: []*EndPointStats{healthy(master(1)), healthy(replica(2)), healthy(rdonly(3))}, + input: []*TabletStats{healthy(master(1)), healthy(replica(2)), healthy(rdonly(3))}, + want: []*TabletStats{healthy(master(1)), healthy(replica(2)), healthy(rdonly(3))}, }, { desc: "non-serving tablets won't be removed", - input: []*EndPointStats{notServing(healthy(replica(1)))}, - want: []*EndPointStats{notServing(healthy(replica(1)))}, + input: []*TabletStats{notServing(healthy(replica(1)))}, + want: []*TabletStats{notServing(healthy(replica(1)))}, }, } for _, tc := range testcases { - if got := RemoveUnhealthyEndpoints(tc.input); !reflect.DeepEqual(got, tc.want) { - t.Errorf("test case '%v' failed: RemoveUnhealthyEndpoints(%v) = %#v, want: %#v", tc.desc, tc.input, got, tc.want) + if got := RemoveUnhealthyTablets(tc.input); !reflect.DeepEqual(got, tc.want) { + t.Errorf("test case '%v' failed: RemoveUnhealthyTablets(%v) = %#v, want: %#v", tc.desc, tc.input, got, tc.want) } } } @@ -56,23 +56,23 @@ func TestRemoveUnhealthyEndpoints(t *testing.T) { func TestGetCurrentMaster(t *testing.T) { var testcases = []struct { desc string - input []*EndPointStats - want []*EndPointStats + input []*TabletStats + want []*TabletStats }{ { desc: "zero masters remains zero", - input: []*EndPointStats{replica(1), rdonly(2)}, + input: []*TabletStats{replica(1), rdonly(2)}, want: nil, }, { desc: "single master", - input: []*EndPointStats{master(1)}, - want: []*EndPointStats{master(1)}, + input: []*TabletStats{master(1)}, + want: []*TabletStats{master(1)}, }, { desc: "multiple masters with different reparent times", - input: []*EndPointStats{reparentAt(10, master(1)), reparentAt(11, master(2))}, - want: []*EndPointStats{reparentAt(11, master(2))}, + input: []*TabletStats{reparentAt(10, master(1)), reparentAt(11, master(2))}, + want: []*TabletStats{reparentAt(11, master(2))}, }, } @@ -83,21 +83,24 @@ func TestGetCurrentMaster(t *testing.T) { } } -func master(uid uint32) *EndPointStats { - return minimalEndPointStats(uid, topodatapb.TabletType_MASTER) +func master(uid uint32) *TabletStats { + return minimalTabletStats(uid, topodatapb.TabletType_MASTER) } -func replica(uid uint32) *EndPointStats { - return minimalEndPointStats(uid, topodatapb.TabletType_REPLICA) +func replica(uid uint32) *TabletStats { + return minimalTabletStats(uid, topodatapb.TabletType_REPLICA) } -func rdonly(uid uint32) *EndPointStats { - return minimalEndPointStats(uid, topodatapb.TabletType_RDONLY) +func rdonly(uid uint32) *TabletStats { + return minimalTabletStats(uid, topodatapb.TabletType_RDONLY) } -func minimalEndPointStats(uid uint32, tabletType topodatapb.TabletType) *EndPointStats { - return &EndPointStats{ - EndPoint: &topodatapb.EndPoint{Uid: uid}, +func minimalTabletStats(uid uint32, tabletType topodatapb.TabletType) *TabletStats { + return &TabletStats{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: uid}, + }, Target: &querypb.Target{ Keyspace: "test_keyspace", Shard: "-80", @@ -107,33 +110,33 @@ func minimalEndPointStats(uid uint32, tabletType topodatapb.TabletType) *EndPoin } } -func reparentAt(timestamp int64, eps *EndPointStats) *EndPointStats { - eps.TabletExternallyReparentedTimestamp = timestamp - return eps +func reparentAt(timestamp int64, ts *TabletStats) *TabletStats { + ts.TabletExternallyReparentedTimestamp = timestamp + return ts } -func healthy(eps *EndPointStats) *EndPointStats { - eps.Stats = &querypb.RealtimeStats{ +func healthy(ts *TabletStats) *TabletStats { + ts.Stats = &querypb.RealtimeStats{ SecondsBehindMaster: uint32(1), } - return eps + return ts } -func unhealthyLag(eps *EndPointStats) *EndPointStats { - eps.Stats = &querypb.RealtimeStats{ +func unhealthyLag(ts *TabletStats) *TabletStats { + ts.Stats = &querypb.RealtimeStats{ SecondsBehindMaster: uint32(3600), } - return eps + return ts } -func unhealthyError(eps *EndPointStats) *EndPointStats { - eps.Stats = &querypb.RealtimeStats{ +func unhealthyError(ts *TabletStats) *TabletStats { + ts.Stats = &querypb.RealtimeStats{ HealthError: "unhealthy", } - return eps + return ts } -func notServing(eps *EndPointStats) *EndPointStats { - eps.Serving = false - return eps +func notServing(ts *TabletStats) *TabletStats { + ts.Serving = false + return ts } diff --git a/go/vt/tabletmanager/binlog_players.go b/go/vt/tabletmanager/binlog_players.go index 8d2e2e8263..fcf91cb89b 100644 --- a/go/vt/tabletmanager/binlog_players.go +++ b/go/vt/tabletmanager/binlog_players.go @@ -270,26 +270,23 @@ func (bpc *BinlogPlayerController) Iteration() (err error) { return fmt.Errorf("not starting because flag '%v' is set", binlogplayer.BlpFlagDontStart) } - // wait for the endpoint set (usefull for the first run at least, fast for next runs) - if err := discovery.WaitForEndPoints(bpc.ctx, bpc.healthCheck, bpc.cell, bpc.sourceShard.Keyspace, bpc.sourceShard.Shard, []topodatapb.TabletType{topodatapb.TabletType_REPLICA}); err != nil { - return fmt.Errorf("error waiting for endpoints for %v %v %v: %v", bpc.cell, bpc.sourceShard.String(), topodatapb.TabletType_REPLICA, err) + // wait for the tablet set (usefull for the first run at least, fast for next runs) + if err := discovery.WaitForTablets(bpc.ctx, bpc.healthCheck, bpc.cell, bpc.sourceShard.Keyspace, bpc.sourceShard.Shard, []topodatapb.TabletType{topodatapb.TabletType_REPLICA}); err != nil { + return fmt.Errorf("error waiting for tablets for %v %v %v: %v", bpc.cell, bpc.sourceShard.String(), topodatapb.TabletType_REPLICA, err) } // Find the server list from the health check - addrs := discovery.RemoveUnhealthyEndpoints( - bpc.healthCheck.GetEndPointStatsFromTarget(bpc.sourceShard.Keyspace, bpc.sourceShard.Shard, topodatapb.TabletType_REPLICA)) + addrs := discovery.RemoveUnhealthyTablets( + bpc.healthCheck.GetTabletStatsFromTarget(bpc.sourceShard.Keyspace, bpc.sourceShard.Shard, topodatapb.TabletType_REPLICA)) if len(addrs) == 0 { return fmt.Errorf("can't find any healthy source tablet for %v %v %v", bpc.cell, bpc.sourceShard.String(), topodatapb.TabletType_REPLICA) } newServerIndex := rand.Intn(len(addrs)) - endPoint := addrs[newServerIndex].EndPoint + tablet := addrs[newServerIndex].Tablet // save our current server bpc.playerMutex.Lock() - bpc.sourceTablet = &topodatapb.TabletAlias{ - Cell: bpc.cell, - Uid: endPoint.Uid, - } + bpc.sourceTablet = tablet.Alias bpc.lastError = nil bpc.playerMutex.Unlock() @@ -302,7 +299,7 @@ func (bpc *BinlogPlayerController) Iteration() (err error) { } // tables, just get them - player, err := binlogplayer.NewBinlogPlayerTables(vtClient, endPoint, tables, bpc.sourceShard.Uid, startPosition, bpc.stopPosition, bpc.binlogPlayerStats) + player, err := binlogplayer.NewBinlogPlayerTables(vtClient, tablet, tables, bpc.sourceShard.Uid, startPosition, bpc.stopPosition, bpc.binlogPlayerStats) if err != nil { return fmt.Errorf("NewBinlogPlayerTables failed: %v", err) } @@ -315,7 +312,7 @@ func (bpc *BinlogPlayerController) Iteration() (err error) { return fmt.Errorf("Source shard %v doesn't overlap destination shard %v", bpc.sourceShard.KeyRange, bpc.keyRange) } - player, err := binlogplayer.NewBinlogPlayerKeyRange(vtClient, endPoint, overlap, bpc.sourceShard.Uid, startPosition, bpc.stopPosition, bpc.binlogPlayerStats) + player, err := binlogplayer.NewBinlogPlayerKeyRange(vtClient, tablet, overlap, bpc.sourceShard.Uid, startPosition, bpc.stopPosition, bpc.binlogPlayerStats) if err != nil { return fmt.Errorf("NewBinlogPlayerKeyRange failed: %v", err) } diff --git a/go/vt/tabletmanager/binlog_players_test.go b/go/vt/tabletmanager/binlog_players_test.go index 8b3a688e5f..44dd98906a 100644 --- a/go/vt/tabletmanager/binlog_players_test.go +++ b/go/vt/tabletmanager/binlog_players_test.go @@ -38,7 +38,7 @@ import ( // // BinlogPlayerMap will create BinlogPlayerController objects // to talk to the source remote tablets. They will use the topology to -// find valid endpoints, so we have to update the EndPoints. +// find valid tablets, so we have to update the Tablets. // // We fake the communication between the BinlogPlayerController objects and // the remote tablets by registering our own binlogplayer.Client. @@ -70,9 +70,9 @@ func newFakeBinlogClient(t *testing.T, expectedDialUID uint32) *fakeBinlogClient } // Dial is part of the binlogplayer.Client interface -func (fbc *fakeBinlogClient) Dial(endPoint *topodatapb.EndPoint, connTimeout time.Duration) error { - if fbc.expectedDialUID != endPoint.Uid { - fbc.t.Errorf("fakeBinlogClient.Dial expected uid %v got %v", fbc.expectedDialUID, endPoint.Uid) +func (fbc *fakeBinlogClient) Dial(tablet *topodatapb.Tablet, connTimeout time.Duration) error { + if fbc.expectedDialUID != tablet.Alias.Uid { + fbc.t.Errorf("fakeBinlogClient.Dial expected uid %v got %v", fbc.expectedDialUID, tablet.Alias.Uid) } return nil } @@ -121,7 +121,7 @@ func (fbc *fakeBinlogClient) StreamKeyRange(ctx context.Context, position string // fakeTabletConn implement TabletConn interface. We only care about the // health check part. type fakeTabletConn struct { - endPoint *topodatapb.EndPoint + tablet *topodatapb.Tablet keyspace string shard string tabletType topodatapb.TabletType @@ -176,9 +176,9 @@ func (ftc *fakeTabletConn) SetTarget(keyspace, shard string, tabletType topodata return fmt.Errorf("not implemented in this test") } -// EndPoint is part of the TabletConn interface -func (ftc *fakeTabletConn) EndPoint() *topodatapb.EndPoint { - return ftc.endPoint +// Tablet is part of the TabletConn interface +func (ftc *fakeTabletConn) Tablet() *topodatapb.Tablet { + return ftc.tablet } // SplitQuery is part of the TabletConn interface @@ -269,9 +269,9 @@ func createSourceTablet(t *testing.T, name string, ts topo.Server, keyspace, sha // register a tablet conn dialer that will return the instance // we want - tabletconn.RegisterDialer(name, func(ctx context.Context, endPoint *topodatapb.EndPoint, k, s string, tabletType topodatapb.TabletType, timeout time.Duration) (tabletconn.TabletConn, error) { + tabletconn.RegisterDialer(name, func(ctx context.Context, tablet *topodatapb.Tablet, k, s string, tabletType topodatapb.TabletType, timeout time.Duration) (tabletconn.TabletConn, error) { return &fakeTabletConn{ - endPoint: endPoint, + tablet: tablet, keyspace: keyspace, shard: vshard, tabletType: topodatapb.TabletType_REPLICA, diff --git a/go/vt/tabletserver/grpctabletconn/conn.go b/go/vt/tabletserver/grpctabletconn/conn.go index 1f9ef0959c..57312db3db 100644 --- a/go/vt/tabletserver/grpctabletconn/conn.go +++ b/go/vt/tabletserver/grpctabletconn/conn.go @@ -41,8 +41,8 @@ func init() { // gRPCQueryClient implements a gRPC implementation for TabletConn type gRPCQueryClient struct { - // endPoint is set at construction time, and never changed - endPoint *topodatapb.EndPoint + // tablet is set at construction time, and never changed + tablet *topodatapb.Tablet // mu protects the next fields mu sync.RWMutex @@ -52,9 +52,9 @@ type gRPCQueryClient struct { } // DialTablet creates and initializes gRPCQueryClient. -func DialTablet(ctx context.Context, endPoint *topodatapb.EndPoint, keyspace, shard string, tabletType topodatapb.TabletType, timeout time.Duration) (tabletconn.TabletConn, error) { +func DialTablet(ctx context.Context, tablet *topodatapb.Tablet, keyspace, shard string, tabletType topodatapb.TabletType, timeout time.Duration) (tabletconn.TabletConn, error) { // create the RPC client - addr := netutil.JoinHostPort(endPoint.Host, endPoint.PortMap["grpc"]) + addr := netutil.JoinHostPort(tablet.Hostname, tablet.PortMap["grpc"]) opt, err := grpcutils.ClientSecureDialOption(*cert, *key, *ca, *name) if err != nil { return nil, err @@ -66,9 +66,9 @@ func DialTablet(ctx context.Context, endPoint *topodatapb.EndPoint, keyspace, sh c := queryservicepb.NewQueryClient(cc) result := &gRPCQueryClient{ - endPoint: endPoint, - cc: cc, - c: c, + tablet: tablet, + cc: cc, + c: c, target: &querypb.Target{ Keyspace: keyspace, Shard: shard, @@ -482,7 +482,7 @@ func (conn *gRPCQueryClient) SetTarget(keyspace, shard string, tabletType topoda return nil } -// EndPoint returns the rpc end point. -func (conn *gRPCQueryClient) EndPoint() *topodatapb.EndPoint { - return conn.endPoint +// Tablet returns the rpc end point. +func (conn *gRPCQueryClient) Tablet() *topodatapb.Tablet { + return conn.tablet } diff --git a/go/vt/tabletserver/grpctabletconn/conn_test.go b/go/vt/tabletserver/grpctabletconn/conn_test.go index 260dcab2f1..95e0b0855f 100644 --- a/go/vt/tabletserver/grpctabletconn/conn_test.go +++ b/go/vt/tabletserver/grpctabletconn/conn_test.go @@ -35,8 +35,8 @@ func TestGRPCTabletConn(t *testing.T) { go server.Serve(listener) // run the test suite - tabletconntest.TestSuite(t, protocolName, &topodatapb.EndPoint{ - Host: host, + tabletconntest.TestSuite(t, protocolName, &topodatapb.Tablet{ + Hostname: host, PortMap: map[string]int32{ "grpc": int32(port), }, @@ -45,8 +45,8 @@ func TestGRPCTabletConn(t *testing.T) { // run it again with combo enabled t.Log("Enabling combo Begin / Execute{,Batch}") *combo = true - tabletconntest.TestSuite(t, protocolName, &topodatapb.EndPoint{ - Host: host, + tabletconntest.TestSuite(t, protocolName, &topodatapb.Tablet{ + Hostname: host, PortMap: map[string]int32{ "grpc": int32(port), }, diff --git a/go/vt/tabletserver/tabletconn/tablet_conn.go b/go/vt/tabletserver/tabletconn/tablet_conn.go index 3ca45710cb..b9b5a6d635 100644 --- a/go/vt/tabletserver/tabletconn/tablet_conn.go +++ b/go/vt/tabletserver/tabletconn/tablet_conn.go @@ -67,7 +67,7 @@ type StreamHealthReader interface { // Use SetTarget to update them later. // If the TabletDialer is used for StreamHealth only, then keyspace, shard // and tabletType won't be used. -type TabletDialer func(ctx context.Context, endPoint *topodatapb.EndPoint, keyspace, shard string, tabletType topodatapb.TabletType, timeout time.Duration) (TabletConn, error) +type TabletDialer func(ctx context.Context, tablet *topodatapb.Tablet, keyspace, shard string, tabletType topodatapb.TabletType, timeout time.Duration) (TabletConn, error) // TabletConn defines the interface for a vttablet client. It should // be thread-safe, so it can be used concurrently used across goroutines. @@ -107,8 +107,8 @@ type TabletConn interface { // subsequent calls. SetTarget(keyspace, shard string, tabletType topodatapb.TabletType) error - // GetEndPoint returns the end point info. - EndPoint() *topodatapb.EndPoint + // Tablet returns the tablet info. + Tablet() *topodatapb.Tablet // SplitQuery splits a query into equally sized smaller queries by // appending primary key range clauses to the original query diff --git a/go/vt/tabletserver/tabletconntest/tabletconntest.go b/go/vt/tabletserver/tabletconntest/tabletconntest.go index a8b7ddd6eb..7fd0b0aa7e 100644 --- a/go/vt/tabletserver/tabletconntest/tabletconntest.go +++ b/go/vt/tabletserver/tabletconntest/tabletconntest.go @@ -988,7 +988,7 @@ func CreateFakeServer(t *testing.T) *FakeQueryService { } // TestSuite runs all the tests -func TestSuite(t *testing.T, protocol string, endPoint *topodatapb.EndPoint, fake *FakeQueryService) { +func TestSuite(t *testing.T, protocol string, tablet *topodatapb.Tablet, fake *FakeQueryService) { tests := []func(*testing.T, tabletconn.TabletConn, *FakeQueryService){ // positive test cases testBegin, @@ -1034,7 +1034,7 @@ func TestSuite(t *testing.T, protocol string, endPoint *topodatapb.EndPoint, fak // create a connection ctx := context.Background() - conn, err := tabletconn.GetDialer()(ctx, endPoint, testTarget.Keyspace, testTarget.Shard, topodatapb.TabletType_REPLICA, 30*time.Second) + conn, err := tabletconn.GetDialer()(ctx, tablet, testTarget.Keyspace, testTarget.Shard, topodatapb.TabletType_REPLICA, 30*time.Second) if err != nil { t.Fatalf("dial failed: %v", err) } diff --git a/go/vt/topo/tablet.go b/go/vt/topo/tablet.go index afce069439..1ff32fbdd1 100644 --- a/go/vt/topo/tablet.go +++ b/go/vt/topo/tablet.go @@ -138,6 +138,41 @@ func TabletComplete(tablet *topodatapb.Tablet) error { return nil } +// NewTablet create a new Tablet record with the given Hostname and id. +func NewTablet(uid uint32, host string) *topodatapb.Tablet { + return &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: uid, + }, + Hostname: host, + PortMap: make(map[string]int32), + } +} + +// TabletEquality returns true iff two Tablet are representing the same tablet +// process: same uid/cell, running on the same host / ports. +func TabletEquality(left, right *topodatapb.Tablet) bool { + if !topoproto.TabletAliasEqual(left.Alias, right.Alias) { + return false + } + if left.Hostname != right.Hostname { + return false + } + if len(left.PortMap) != len(right.PortMap) { + return false + } + for key, lvalue := range left.PortMap { + rvalue, ok := right.PortMap[key] + if !ok { + return false + } + if lvalue != rvalue { + return false + } + } + return true +} + // TabletInfo is the container for a Tablet, read from the topology server. type TabletInfo struct { version int64 // node version - used to prevent stomping concurrent writes diff --git a/go/vt/vtctl/query.go b/go/vt/vtctl/query.go index 83c4100181..d861afed5a 100644 --- a/go/vt/vtctl/query.go +++ b/go/vt/vtctl/query.go @@ -18,7 +18,6 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" - "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/vt/vtgate/vtgateconn" "github.com/youtube/vitess/go/vt/wrangler" @@ -298,12 +297,8 @@ func commandVtTabletExecute(ctx context.Context, wr *wrangler.Wrangler, subFlags if err != nil { return err } - ep, err := topo.TabletEndPoint(tabletInfo.Tablet) - if err != nil { - return fmt.Errorf("cannot get EndPoint from tablet record: %v", err) - } - conn, err := tabletconn.GetDialer()(ctx, ep, *keyspace, *shard, tt, *connectTimeout) + conn, err := tabletconn.GetDialer()(ctx, tabletInfo.Tablet, *keyspace, *shard, tt, *connectTimeout) if err != nil { return fmt.Errorf("cannot connect to tablet %v: %v", tabletAlias, err) } @@ -343,12 +338,8 @@ func commandVtTabletBegin(ctx context.Context, wr *wrangler.Wrangler, subFlags * if err != nil { return err } - ep, err := topo.TabletEndPoint(tabletInfo.Tablet) - if err != nil { - return fmt.Errorf("cannot get EndPoint from tablet record: %v", err) - } - conn, err := tabletconn.GetDialer()(ctx, ep, *keyspace, *shard, tt, *connectTimeout) + conn, err := tabletconn.GetDialer()(ctx, tabletInfo.Tablet, *keyspace, *shard, tt, *connectTimeout) if err != nil { return fmt.Errorf("cannot connect to tablet %v: %v", tabletAlias, err) } @@ -391,12 +382,8 @@ func commandVtTabletCommit(ctx context.Context, wr *wrangler.Wrangler, subFlags if err != nil { return err } - ep, err := topo.TabletEndPoint(tabletInfo.Tablet) - if err != nil { - return fmt.Errorf("cannot get EndPoint from tablet record: %v", err) - } - conn, err := tabletconn.GetDialer()(ctx, ep, *keyspace, *shard, tt, *connectTimeout) + conn, err := tabletconn.GetDialer()(ctx, tabletInfo.Tablet, *keyspace, *shard, tt, *connectTimeout) if err != nil { return fmt.Errorf("cannot connect to tablet %v: %v", tabletAlias, err) } @@ -432,12 +419,8 @@ func commandVtTabletRollback(ctx context.Context, wr *wrangler.Wrangler, subFlag if err != nil { return err } - ep, err := topo.TabletEndPoint(tabletInfo.Tablet) - if err != nil { - return fmt.Errorf("cannot get EndPoint from tablet record: %v", err) - } - conn, err := tabletconn.GetDialer()(ctx, ep, *keyspace, *shard, tt, *connectTimeout) + conn, err := tabletconn.GetDialer()(ctx, tabletInfo.Tablet, *keyspace, *shard, tt, *connectTimeout) if err != nil { return fmt.Errorf("cannot connect to tablet %v: %v", tabletAlias, err) } @@ -464,13 +447,8 @@ func commandVtTabletStreamHealth(ctx context.Context, wr *wrangler.Wrangler, sub return err } - ep, err := topo.TabletEndPoint(tabletInfo.Tablet) - if err != nil { - return fmt.Errorf("cannot get EndPoint from tablet record: %v", err) - } - // tablet type is unused for StreamHealth, use UNKNOWN - conn, err := tabletconn.GetDialer()(ctx, ep, "", "", topodatapb.TabletType_UNKNOWN, *connectTimeout) + conn, err := tabletconn.GetDialer()(ctx, tabletInfo.Tablet, "", "", topodatapb.TabletType_UNKNOWN, *connectTimeout) if err != nil { return fmt.Errorf("cannot connect to tablet %v: %v", tabletAlias, err) } diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index fb1dd2ccab..2c5d10fe2f 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -1394,10 +1394,6 @@ func commandWaitForFilteredReplication(ctx context.Context, wr *wrangler.Wrangle if err != nil { return err } - ep, err := topo.TabletEndPoint(tabletInfo.Tablet) - if err != nil { - return fmt.Errorf("cannot get EndPoint for master tablet record: %v record: %v", err, tabletInfo) - } // Always run an explicit healthcheck first to make sure we don't see any outdated values. // This is especially true for tests and automation where there is no pause of multiple seconds @@ -1407,7 +1403,7 @@ func commandWaitForFilteredReplication(ctx context.Context, wr *wrangler.Wrangle } // TabletType is unused for StreamHealth, use UNKNOWN - conn, err := tabletconn.GetDialer()(ctx, ep, "", "", topodatapb.TabletType_UNKNOWN, 30*time.Second) + conn, err := tabletconn.GetDialer()(ctx, tabletInfo.Tablet, "", "", topodatapb.TabletType_UNKNOWN, 30*time.Second) if err != nil { return fmt.Errorf("cannot connect to tablet %v: %v", alias, err) } diff --git a/go/vt/vtctld/tablet_data.go b/go/vt/vtctld/tablet_data.go index 4f58c4b9fc..e97930976a 100644 --- a/go/vt/vtctld/tablet_data.go +++ b/go/vt/vtctld/tablet_data.go @@ -82,13 +82,9 @@ func (th *tabletHealth) stream(ctx context.Context, ts topo.Server, tabletAlias if err != nil { return err } - ep, err := topo.TabletEndPoint(ti.Tablet) - if err != nil { - return err - } // TabletType is unused for StreamHealth, use UNKNOWN - conn, err := tabletconn.GetDialer()(ctx, ep, "", "", topodatapb.TabletType_UNKNOWN, 30*time.Second) + conn, err := tabletconn.GetDialer()(ctx, ti.Tablet, "", "", topodatapb.TabletType_UNKNOWN, 30*time.Second) if err != nil { return err } diff --git a/go/vt/vtgate/discoverygateway.go b/go/vt/vtgate/discoverygateway.go index 49caacf322..a7af84a060 100644 --- a/go/vt/vtgate/discoverygateway.go +++ b/go/vt/vtgate/discoverygateway.go @@ -28,8 +28,8 @@ import ( ) var ( - cellsToWatch = flag.String("cells_to_watch", "", "comma-separated list of cells for watching endpoints") - refreshInterval = flag.Duration("endpoint_refresh_interval", 1*time.Minute, "endpoint refresh interval") + cellsToWatch = flag.String("cells_to_watch", "", "comma-separated list of cells for watching tablets") + refreshInterval = flag.Duration("tablet_refresh_interval", 1*time.Minute, "tablet refresh interval") topoReadConcurrency = flag.Int("topo_read_concurrency", 32, "concurrent topo reads") ) @@ -60,7 +60,7 @@ func createDiscoveryGateway(hc discovery.HealthCheck, topoServer topo.Server, se ctw := discovery.NewCellTabletsWatcher(dg.topoServer, dg.hc, c, *refreshInterval, *topoReadConcurrency) dg.tabletsWatchers = append(dg.tabletsWatchers, ctw) } - err := dg.waitForEndPoints() + err := dg.waitForTablets() if err != nil { log.Errorf("createDiscoveryGateway: %v", err) } @@ -78,27 +78,27 @@ type discoveryGateway struct { tabletsWatchers []*discovery.TopologyWatcher } -func (dg *discoveryGateway) waitForEndPoints() error { - // Skip waiting for endpoints if we are not told to do so. +func (dg *discoveryGateway) waitForTablets() error { + // Skip waiting for tablets if we are not told to do so. if len(dg.tabletTypesToWait) == 0 { return nil } - log.Infof("Waiting for endpoints") + log.Infof("Waiting for tablets") ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - err := discovery.WaitForAllServingEndPoints(ctx, dg.hc, dg.srvTopoServer, dg.localCell, dg.tabletTypesToWait) - if err == discovery.ErrWaitForEndPointsTimeout { + err := discovery.WaitForAllServingTablets(ctx, dg.hc, dg.srvTopoServer, dg.localCell, dg.tabletTypesToWait) + if err == discovery.ErrWaitForTabletsTimeout { // ignore this error, we will still start up, and may not serve - // all endpoints. - log.Warningf("Timeout when waiting for endpoints") + // all tablets. + log.Warningf("Timeout when waiting for tablets") err = nil } if err != nil { - log.Errorf("Error when waiting for endpoints: %v", err) + log.Errorf("Error when waiting for tablets: %v", err) return err } - log.Infof("Waiting for endpoints completed") + log.Infof("Waiting for tablets completed") return nil } @@ -231,13 +231,13 @@ func (dg *discoveryGateway) Close(ctx context.Context) error { return nil } -// CacheStatus returns a list of GatewayEndPointCacheStatus per endpoint. -func (dg *discoveryGateway) CacheStatus() GatewayEndPointCacheStatusList { +// CacheStatus returns a list of GatewayTabletCacheStatus per tablet. +func (dg *discoveryGateway) CacheStatus() GatewayTabletCacheStatusList { return nil } // StatsUpdate receives updates about target and realtime stats changes. -func (dg *discoveryGateway) StatsUpdate(*discovery.EndPointStats) { +func (dg *discoveryGateway) StatsUpdate(*discovery.TabletStats) { } // withRetry gets available connections and executes the action. If there are retryable errors, @@ -246,29 +246,29 @@ func (dg *discoveryGateway) StatsUpdate(*discovery.EndPointStats) { // a resharding event, and set the re-resolve bit and let the upper layers // re-resolve and retry. func (dg *discoveryGateway) withRetry(ctx context.Context, keyspace, shard string, tabletType topodatapb.TabletType, action func(conn tabletconn.TabletConn) error, transactionID int64, isStreaming bool) error { - var endPointLastUsed *topodatapb.EndPoint + var tabletLastUsed *topodatapb.Tablet var err error inTransaction := (transactionID != 0) - invalidEndPoints := make(map[string]bool) + invalidTablets := make(map[string]bool) for i := 0; i < dg.retryCount+1; i++ { - var endPoint *topodatapb.EndPoint - endPoints := dg.getEndPoints(keyspace, shard, tabletType) - if len(endPoints) == 0 { - // fail fast if there is no endpoint - err = vterrors.FromError(vtrpcpb.ErrorCode_INTERNAL_ERROR, fmt.Errorf("no valid endpoint")) + var tablet *topodatapb.Tablet + tablets := dg.getTablets(keyspace, shard, tabletType) + if len(tablets) == 0 { + // fail fast if there is no tablet + err = vterrors.FromError(vtrpcpb.ErrorCode_INTERNAL_ERROR, fmt.Errorf("no valid tablet")) break } - shuffleEndPoints(endPoints) + shuffleTablets(tablets) - // skip endpoints we tried before - for _, ep := range endPoints { - if _, ok := invalidEndPoints[discovery.EndPointToMapKey(ep)]; !ok { - endPoint = ep + // skip tablets we tried before + for _, ep := range tablets { + if _, ok := invalidTablets[discovery.TabletToMapKey(ep)]; !ok { + tablet = ep break } } - if endPoint == nil { + if tablet == nil { if err == nil { // do not override error from last attempt. err = vterrors.FromError(vtrpcpb.ErrorCode_INTERNAL_ERROR, fmt.Errorf("no available connection")) @@ -277,11 +277,11 @@ func (dg *discoveryGateway) withRetry(ctx context.Context, keyspace, shard strin } // execute - endPointLastUsed = endPoint - conn := dg.hc.GetConnection(endPoint) + tabletLastUsed = tablet + conn := dg.hc.GetConnection(tablet) if conn == nil { - err = vterrors.FromError(vtrpcpb.ErrorCode_INTERNAL_ERROR, fmt.Errorf("no connection for %+v", endPoint)) - invalidEndPoints[discovery.EndPointToMapKey(endPoint)] = true + err = vterrors.FromError(vtrpcpb.ErrorCode_INTERNAL_ERROR, fmt.Errorf("no connection for %+v", tablet)) + invalidTablets[discovery.TabletToMapKey(tablet)] = true continue } @@ -292,12 +292,12 @@ func (dg *discoveryGateway) withRetry(ctx context.Context, keyspace, shard strin err = action(conn) if dg.canRetry(ctx, err, transactionID, isStreaming) { - invalidEndPoints[discovery.EndPointToMapKey(endPoint)] = true + invalidTablets[discovery.TabletToMapKey(tablet)] = true continue } break } - return WrapError(err, keyspace, shard, tabletType, endPointLastUsed, inTransaction) + return WrapError(err, keyspace, shard, tabletType, tabletLastUsed, inTransaction) } // canRetry determines whether a query can be retried or not. @@ -340,42 +340,42 @@ func (dg *discoveryGateway) canRetry(ctx context.Context, err error, transaction return false } -func shuffleEndPoints(endPoints []*topodatapb.EndPoint) { +func shuffleTablets(tablets []*topodatapb.Tablet) { index := 0 - length := len(endPoints) + length := len(tablets) for i := length - 1; i > 0; i-- { index = rand.Intn(i + 1) - endPoints[i], endPoints[index] = endPoints[index], endPoints[i] + tablets[i], tablets[index] = tablets[index], tablets[i] } } -// getEndPoints gets all available endpoints from HealthCheck, +// getTablets gets all available tablets from HealthCheck, // and selects the usable ones based several rules: // master - return one from any cells with latest reparent timestamp; // replica - return all from local cell. // TODO(liang): select replica by replication lag. -func (dg *discoveryGateway) getEndPoints(keyspace, shard string, tabletType topodatapb.TabletType) []*topodatapb.EndPoint { - epsList := dg.hc.GetEndPointStatsFromTarget(keyspace, shard, tabletType) +func (dg *discoveryGateway) getTablets(keyspace, shard string, tabletType topodatapb.TabletType) []*topodatapb.Tablet { + epsList := dg.hc.GetTabletStatsFromTarget(keyspace, shard, tabletType) // for master, use any cells and return the one with max reparent timestamp. if tabletType == topodatapb.TabletType_MASTER { var maxTimestamp int64 - var ep *topodatapb.EndPoint + var ep *topodatapb.Tablet for _, eps := range epsList { if eps.LastError != nil || !eps.Serving { continue } if eps.TabletExternallyReparentedTimestamp >= maxTimestamp { maxTimestamp = eps.TabletExternallyReparentedTimestamp - ep = eps.EndPoint + ep = eps.Tablet } } if ep == nil { return nil } - return []*topodatapb.EndPoint{ep} + return []*topodatapb.Tablet{ep} } - // for non-master, use only endpoints from local cell and filter by replication lag. - list := make([]*discovery.EndPointStats, 0, len(epsList)) + // for non-master, use only tablets from local cell and filter by replication lag. + list := make([]*discovery.TabletStats, 0, len(epsList)) for _, eps := range epsList { if eps.LastError != nil || !eps.Serving { continue @@ -386,9 +386,9 @@ func (dg *discoveryGateway) getEndPoints(keyspace, shard string, tabletType topo list = append(list, eps) } list = discovery.FilterByReplicationLag(list) - epList := make([]*topodatapb.EndPoint, 0, len(list)) + epList := make([]*topodatapb.Tablet, 0, len(list)) for _, eps := range list { - epList = append(epList, eps.EndPoint) + epList = append(epList, eps.Tablet) } return epList } @@ -397,16 +397,16 @@ func (dg *discoveryGateway) getEndPoints(keyspace, shard string, tabletType topo // adds the connection context // and adds a bit to determine whether the keyspace/shard needs to be // re-resolved for a potential sharding event. -func WrapError(in error, keyspace, shard string, tabletType topodatapb.TabletType, endPoint *topodatapb.EndPoint, inTransaction bool) (wrapped error) { +func WrapError(in error, keyspace, shard string, tabletType topodatapb.TabletType, tablet *topodatapb.Tablet, inTransaction bool) (wrapped error) { if in == nil { return nil } - shardIdentifier := fmt.Sprintf("%s.%s.%s, %+v", keyspace, shard, strings.ToLower(tabletType.String()), endPoint) + shardIdentifier := fmt.Sprintf("%s.%s.%s, %+v", keyspace, shard, strings.ToLower(tabletType.String()), tablet) return &ShardError{ ShardIdentifier: shardIdentifier, InTransaction: inTransaction, Err: in, - EndPointCode: vterrors.RecoverVtErrorCode(in), + ErrorCode: vterrors.RecoverVtErrorCode(in), } } diff --git a/go/vt/vtgate/discoverygateway_test.go b/go/vt/vtgate/discoverygateway_test.go index b924569de6..bd7df3d25b 100644 --- a/go/vt/vtgate/discoverygateway_test.go +++ b/go/vt/vtgate/discoverygateway_test.go @@ -80,7 +80,7 @@ func TestDiscoveryGatewayBeginExecuteBatch(t *testing.T) { }) } -func TestDiscoveryGatewayGetEndPoints(t *testing.T) { +func TestDiscoveryGatewayGetTablets(t *testing.T) { keyspace := "ks" shard := "0" hc := newFakeHealthCheck() @@ -88,19 +88,19 @@ func TestDiscoveryGatewayGetEndPoints(t *testing.T) { // replica should only use local ones hc.Reset() - hc.addTestEndPoint("remote", "1.1.1.1", 1001, keyspace, shard, topodatapb.TabletType_REPLICA, true, 10, nil, nil) - ep1 := hc.addTestEndPoint("local", "2.2.2.2", 1001, keyspace, shard, topodatapb.TabletType_REPLICA, true, 10, nil, nil) - eps := dg.getEndPoints(keyspace, shard, topodatapb.TabletType_REPLICA) - if len(eps) != 1 || !topo.EndPointEquality(eps[0], ep1) { + hc.addTestTablet("remote", "1.1.1.1", 1001, keyspace, shard, topodatapb.TabletType_REPLICA, true, 10, nil, nil) + ep1 := hc.addTestTablet("local", "2.2.2.2", 1001, keyspace, shard, topodatapb.TabletType_REPLICA, true, 10, nil, nil) + eps := dg.getTablets(keyspace, shard, topodatapb.TabletType_REPLICA) + if len(eps) != 1 || !topo.TabletEquality(eps[0], ep1) { t.Errorf("want %+v, got %+v", ep1, eps) } // master should use the one with newer timestamp regardless of cell hc.Reset() - hc.addTestEndPoint("remote", "1.1.1.1", 1001, keyspace, shard, topodatapb.TabletType_MASTER, true, 5, nil, nil) - ep1 = hc.addTestEndPoint("remote", "2.2.2.2", 1001, keyspace, shard, topodatapb.TabletType_MASTER, true, 10, nil, nil) - eps = dg.getEndPoints(keyspace, shard, topodatapb.TabletType_MASTER) - if len(eps) != 1 || !topo.EndPointEquality(eps[0], ep1) { + hc.addTestTablet("remote", "1.1.1.1", 1001, keyspace, shard, topodatapb.TabletType_MASTER, true, 5, nil, nil) + ep1 = hc.addTestTablet("remote", "2.2.2.2", 1001, keyspace, shard, topodatapb.TabletType_MASTER, true, 10, nil, nil) + eps = dg.getTablets(keyspace, shard, topodatapb.TabletType_MASTER) + if len(eps) != 1 || !topo.TabletEquality(eps[0], ep1) { t.Errorf("want %+v, got %+v", ep1, eps) } } @@ -112,29 +112,29 @@ func testDiscoveryGatewayGeneric(t *testing.T, streaming bool, f func(dg Gateway hc := newFakeHealthCheck() dg := createDiscoveryGateway(hc, topo.Server{}, nil, "cell", 2, nil) - // no endpoint + // no tablet hc.Reset() - want := "shard, host: ks.0.replica, , no valid endpoint" + want := "shard, host: ks.0.replica, , no valid tablet" err := f(dg, keyspace, shard, tabletType) verifyShardError(t, err, want, vtrpcpb.ErrorCode_INTERNAL_ERROR) if hc.GetStatsFromTargetCounter != 1 { t.Errorf("hc.GetStatsFromTargetCounter = %v; want 1", hc.GetStatsFromTargetCounter) } - // endpoint with error + // tablet with error hc.Reset() - hc.addTestEndPoint("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, false, 10, fmt.Errorf("no connection"), nil) - want = "shard, host: ks.0.replica, , no valid endpoint" + hc.addTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, false, 10, fmt.Errorf("no connection"), nil) + want = "shard, host: ks.0.replica, , no valid tablet" err = f(dg, keyspace, shard, tabletType) verifyShardError(t, err, want, vtrpcpb.ErrorCode_INTERNAL_ERROR) if hc.GetStatsFromTargetCounter != 1 { t.Errorf("hc.GetStatsFromTargetCounter = %v; want 1", hc.GetStatsFromTargetCounter) } - // endpoint without connection + // tablet without connection hc.Reset() - ep1 := hc.addTestEndPoint("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, false, 10, nil, nil) - want = fmt.Sprintf(`shard, host: ks.0.replica, , no valid endpoint`) + ep1 := hc.addTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, false, 10, nil, nil) + want = fmt.Sprintf(`shard, host: ks.0.replica, , no valid tablet`) err = f(dg, keyspace, shard, tabletType) verifyShardError(t, err, want, vtrpcpb.ErrorCode_INTERNAL_ERROR) if hc.GetStatsFromTargetCounter != 1 { @@ -143,8 +143,8 @@ func testDiscoveryGatewayGeneric(t *testing.T, streaming bool, f func(dg Gateway // retry error hc.Reset() - ep1 = hc.addTestEndPoint("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{mustFailRetry: 1}) - ep2 := hc.addTestEndPoint("cell", "1.1.1.1", 1002, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{mustFailRetry: 1}) + ep1 = hc.addTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{mustFailRetry: 1}) + ep2 := hc.addTestTablet("cell", "1.1.1.1", 1002, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{mustFailRetry: 1}) wants := map[string]int{ fmt.Sprintf(`shard, host: ks.0.replica, %+v, retry: err`, ep1): 0, fmt.Sprintf(`shard, host: ks.0.replica, %+v, retry: err`, ep2): 0, @@ -159,8 +159,8 @@ func testDiscoveryGatewayGeneric(t *testing.T, streaming bool, f func(dg Gateway // fatal error hc.Reset() - ep1 = hc.addTestEndPoint("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{mustFailFatal: 1}) - ep2 = hc.addTestEndPoint("cell", "1.1.1.1", 1002, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{mustFailFatal: 1}) + ep1 = hc.addTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{mustFailFatal: 1}) + ep2 = hc.addTestTablet("cell", "1.1.1.1", 1002, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{mustFailFatal: 1}) wants = map[string]int{ fmt.Sprintf(`shard, host: ks.0.replica, %+v, fatal: err`, ep1): 0, fmt.Sprintf(`shard, host: ks.0.replica, %+v, fatal: err`, ep2): 0, @@ -180,7 +180,7 @@ func testDiscoveryGatewayGeneric(t *testing.T, streaming bool, f func(dg Gateway // server error - no retry hc.Reset() - ep1 = hc.addTestEndPoint("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{mustFailServer: 1}) + ep1 = hc.addTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{mustFailServer: 1}) want = fmt.Sprintf(`shard, host: ks.0.replica, %+v, error: err`, ep1) err = f(dg, keyspace, shard, tabletType) verifyShardError(t, err, want, vtrpcpb.ErrorCode_BAD_INPUT) @@ -190,7 +190,7 @@ func testDiscoveryGatewayGeneric(t *testing.T, streaming bool, f func(dg Gateway // conn error - no retry hc.Reset() - ep1 = hc.addTestEndPoint("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{mustFailConn: 1}) + ep1 = hc.addTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{mustFailConn: 1}) want = fmt.Sprintf(`shard, host: ks.0.replica, %+v, error: conn`, ep1) err = f(dg, keyspace, shard, tabletType) verifyShardError(t, err, want, vtrpcpb.ErrorCode_UNKNOWN_ERROR) @@ -200,7 +200,7 @@ func testDiscoveryGatewayGeneric(t *testing.T, streaming bool, f func(dg Gateway // no failure hc.Reset() - hc.addTestEndPoint("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{}) + hc.addTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{}) err = f(dg, keyspace, shard, tabletType) if err != nil { t.Errorf("want nil, got %v", err) @@ -219,8 +219,8 @@ func testDiscoveryGatewayTransact(t *testing.T, streaming bool, f func(dg Gatewa // retry error - no retry hc.Reset() - ep1 := hc.addTestEndPoint("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{mustFailRetry: 1}) - ep2 := hc.addTestEndPoint("cell", "1.1.1.1", 1002, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{mustFailRetry: 1}) + ep1 := hc.addTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{mustFailRetry: 1}) + ep2 := hc.addTestTablet("cell", "1.1.1.1", 1002, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{mustFailRetry: 1}) wants := map[string]int{ fmt.Sprintf(`shard, host: ks.0.replica, %+v, retry: err`, ep1): 0, fmt.Sprintf(`shard, host: ks.0.replica, %+v, retry: err`, ep2): 0, @@ -235,7 +235,7 @@ func testDiscoveryGatewayTransact(t *testing.T, streaming bool, f func(dg Gatewa // conn error - no retry hc.Reset() - ep1 = hc.addTestEndPoint("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{mustFailConn: 1}) + ep1 = hc.addTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil, &sandboxConn{mustFailConn: 1}) want := fmt.Sprintf(`shard, host: ks.0.replica, %+v, error: conn`, ep1) err = f(dg, keyspace, shard, tabletType) verifyShardError(t, err, want, vtrpcpb.ErrorCode_UNKNOWN_ERROR) diff --git a/go/vt/vtgate/fakehealthcheck_test.go b/go/vt/vtgate/fakehealthcheck_test.go index fffc23f583..4846d50437 100644 --- a/go/vt/vtgate/fakehealthcheck_test.go +++ b/go/vt/vtgate/fakehealthcheck_test.go @@ -14,7 +14,7 @@ func newFakeHealthCheck() *fakeHealthCheck { } type fhcItem struct { - eps *discovery.EndPointStats + ts *discovery.TabletStats conn tabletconn.TabletConn } @@ -22,9 +22,9 @@ type fhcItem struct { type fakeHealthCheck struct { items map[string]*fhcItem - // GetStatsFromTargetCounter counts GetEndpointStatsFromTarget() being called. + // GetStatsFromTargetCounter counts GetTabletStatsFromTarget() being called. GetStatsFromTargetCounter int - // GetStatsFromKeyspaceShardCounter counts GetEndPointStatsFromKeyspaceShard() being called. + // GetStatsFromKeyspaceShardCounter counts GetTabletStatsFromKeyspaceShard() being called. GetStatsFromKeyspaceShardCounter int } @@ -39,58 +39,58 @@ func (fhc *fakeHealthCheck) Reset() { func (fhc *fakeHealthCheck) SetListener(listener discovery.HealthCheckStatsListener) { } -// AddEndPoint adds the endpoint. -func (fhc *fakeHealthCheck) AddEndPoint(cell, name string, endPoint *topodatapb.EndPoint) { - key := discovery.EndPointToMapKey(endPoint) +// AddTablet adds the tablet. +func (fhc *fakeHealthCheck) AddTablet(cell, name string, tablet *topodatapb.Tablet) { + key := discovery.TabletToMapKey(tablet) item := &fhcItem{ - eps: &discovery.EndPointStats{ - EndPoint: endPoint, - Cell: cell, - Name: name, + ts: &discovery.TabletStats{ + Tablet: tablet, + Cell: cell, + Name: name, }, } fhc.items[key] = item } -// RemoveEndPoint removes the endpoint. -func (fhc *fakeHealthCheck) RemoveEndPoint(endPoint *topodatapb.EndPoint) { - key := discovery.EndPointToMapKey(endPoint) +// RemoveTablet removes the tablet. +func (fhc *fakeHealthCheck) RemoveTablet(tablet *topodatapb.Tablet) { + key := discovery.TabletToMapKey(tablet) delete(fhc.items, key) } -// GetEndPointStatsFromKeyspaceShard returns all EndPointStats for the given keyspace/shard. -func (fhc *fakeHealthCheck) GetEndPointStatsFromKeyspaceShard(keyspace, shard string) []*discovery.EndPointStats { +// GetTabletStatsFromKeyspaceShard returns all TabletStats for the given keyspace/shard. +func (fhc *fakeHealthCheck) GetTabletStatsFromKeyspaceShard(keyspace, shard string) []*discovery.TabletStats { fhc.GetStatsFromKeyspaceShardCounter++ - var res []*discovery.EndPointStats + var res []*discovery.TabletStats for _, item := range fhc.items { - if item.eps.Target == nil { + if item.ts.Target == nil { continue } - if item.eps.Target.Keyspace == keyspace && item.eps.Target.Shard == shard { - res = append(res, item.eps) + if item.ts.Target.Keyspace == keyspace && item.ts.Target.Shard == shard { + res = append(res, item.ts) } } return res } -// GetEndPointStatsFromTarget returns all EndPointStats for the given target. -func (fhc *fakeHealthCheck) GetEndPointStatsFromTarget(keyspace, shard string, tabletType topodatapb.TabletType) []*discovery.EndPointStats { +// GetTabletStatsFromTarget returns all TabletStats for the given target. +func (fhc *fakeHealthCheck) GetTabletStatsFromTarget(keyspace, shard string, tabletType topodatapb.TabletType) []*discovery.TabletStats { fhc.GetStatsFromTargetCounter++ - var res []*discovery.EndPointStats + var res []*discovery.TabletStats for _, item := range fhc.items { - if item.eps.Target == nil { + if item.ts.Target == nil { continue } - if item.eps.Target.Keyspace == keyspace && item.eps.Target.Shard == shard && item.eps.Target.TabletType == tabletType { - res = append(res, item.eps) + if item.ts.Target.Keyspace == keyspace && item.ts.Target.Shard == shard && item.ts.Target.TabletType == tabletType { + res = append(res, item.ts) } } return res } -// GetConnection returns the TabletConn of the given endpoint. -func (fhc *fakeHealthCheck) GetConnection(endPoint *topodatapb.EndPoint) tabletconn.TabletConn { - key := discovery.EndPointToMapKey(endPoint) +// GetConnection returns the TabletConn of the given tablet. +func (fhc *fakeHealthCheck) GetConnection(tablet *topodatapb.Tablet) tabletconn.TabletConn { + key := discovery.TabletToMapKey(tablet) if item := fhc.items[key]; item != nil { return item.conn } @@ -98,7 +98,7 @@ func (fhc *fakeHealthCheck) GetConnection(endPoint *topodatapb.EndPoint) tabletc } // CacheStatus returns a displayable version of the cache. -func (fhc *fakeHealthCheck) CacheStatus() discovery.EndPointsCacheStatusList { +func (fhc *fakeHealthCheck) CacheStatus() discovery.TabletsCacheStatusList { return nil } @@ -107,24 +107,24 @@ func (fhc *fakeHealthCheck) Close() error { return nil } -// addTestEndPoint inserts a fake entry into fakeHealthCheck. -func (fhc *fakeHealthCheck) addTestEndPoint(cell, host string, port int32, keyspace, shard string, tabletType topodatapb.TabletType, serving bool, reparentTS int64, err error, conn tabletconn.TabletConn) *topodatapb.EndPoint { +// addTestTablet inserts a fake entry into fakeHealthCheck. +func (fhc *fakeHealthCheck) addTestTablet(cell, host string, port int32, keyspace, shard string, tabletType topodatapb.TabletType, serving bool, reparentTS int64, err error, conn tabletconn.TabletConn) *topodatapb.Tablet { if conn != nil { conn.SetTarget(keyspace, shard, tabletType) } - ep := topo.NewEndPoint(0, host) + ep := topo.NewTablet(0, host) ep.PortMap["vt"] = port - key := discovery.EndPointToMapKey(ep) + key := discovery.TabletToMapKey(ep) item := fhc.items[key] if item == nil { - fhc.AddEndPoint(cell, "", ep) + fhc.AddTablet(cell, "", ep) item = fhc.items[key] } - item.eps.Target = &querypb.Target{Keyspace: keyspace, Shard: shard, TabletType: tabletType} - item.eps.Serving = serving - item.eps.TabletExternallyReparentedTimestamp = reparentTS - item.eps.Stats = &querypb.RealtimeStats{} - item.eps.LastError = err + item.ts.Target = &querypb.Target{Keyspace: keyspace, Shard: shard, TabletType: tabletType} + item.ts.Serving = serving + item.ts.TabletExternallyReparentedTimestamp = reparentTS + item.ts.Stats = &querypb.RealtimeStats{} + item.ts.LastError = err item.conn = conn return ep } diff --git a/go/vt/vtgate/gateway.go b/go/vt/vtgate/gateway.go index 91fb155175..bff0a91df5 100644 --- a/go/vt/vtgate/gateway.go +++ b/go/vt/vtgate/gateway.go @@ -80,8 +80,8 @@ type Gateway interface { // Close shuts down underlying connections. Close(ctx context.Context) error - // CacheStatus returns a list of GatewayEndPointCacheStatus per endpoint. - CacheStatus() GatewayEndPointCacheStatusList + // CacheStatus returns a list of GatewayTabletCacheStatus per tablet. + CacheStatus() GatewayTabletCacheStatusList } // GatewayCreator is the func which can create the actual gateway object. @@ -125,8 +125,8 @@ type ShardError struct { InTransaction bool // Err preserves the original error, so that we don't need to parse the error string. Err error - // EndPointCode is the error code to use for all the endpoint errors in aggregate - EndPointCode vtrpcpb.ErrorCode + // ErrorCode is the error code to use for all the tablet errors in aggregate + ErrorCode vtrpcpb.ErrorCode } // Error returns the error string. @@ -140,31 +140,31 @@ func (e *ShardError) Error() string { // VtErrorCode returns the underlying Vitess error code. // This is part of vterrors.VtError interface. func (e *ShardError) VtErrorCode() vtrpcpb.ErrorCode { - return e.EndPointCode + return e.ErrorCode } -// GatewayEndPointCacheStatusList is a slice of GatewayEndPointCacheStatus. -type GatewayEndPointCacheStatusList []*GatewayEndPointCacheStatus +// GatewayTabletCacheStatusList is a slice of GatewayTabletCacheStatus. +type GatewayTabletCacheStatusList []*GatewayTabletCacheStatus // Len is part of sort.Interface. -func (gepcsl GatewayEndPointCacheStatusList) Len() int { +func (gepcsl GatewayTabletCacheStatusList) Len() int { return len(gepcsl) } // Less is part of sort.Interface. -func (gepcsl GatewayEndPointCacheStatusList) Less(i, j int) bool { +func (gepcsl GatewayTabletCacheStatusList) Less(i, j int) bool { iKey := strings.Join([]string{gepcsl[i].Keyspace, gepcsl[i].Shard, string(gepcsl[i].TabletType), gepcsl[i].Name}, ".") jKey := strings.Join([]string{gepcsl[j].Keyspace, gepcsl[j].Shard, string(gepcsl[j].TabletType), gepcsl[j].Name}, ".") return iKey < jKey } // Swap is part of sort.Interface. -func (gepcsl GatewayEndPointCacheStatusList) Swap(i, j int) { +func (gepcsl GatewayTabletCacheStatusList) Swap(i, j int) { gepcsl[i], gepcsl[j] = gepcsl[j], gepcsl[i] } -// GatewayEndPointCacheStatus contains the status per endpoint for a gateway. -type GatewayEndPointCacheStatus struct { +// GatewayTabletCacheStatus contains the status per tablet for a gateway. +type GatewayTabletCacheStatus struct { Keyspace string Shard string TabletType topodatapb.TabletType @@ -187,7 +187,7 @@ var ( // muAggr protects below vars. muAggr sync.Mutex // aggregators holds all Aggregators created. - aggregators []*GatewayEndPointStatusAggregator + aggregators []*GatewayTabletStatusAggregator // gatewayStatsChanFull tracks the number of times // aggrChan becomes full. gatewayStatsChanFull *stats.Int @@ -202,7 +202,7 @@ func init() { } // registerAggregator registers an aggregator to the global list. -func registerAggregator(a *GatewayEndPointStatusAggregator) { +func registerAggregator(a *GatewayTabletStatusAggregator) { muAggr.Lock() defer muAggr.Unlock() aggregators = append(aggregators, a) @@ -227,20 +227,20 @@ func processQueryInfo() { } } -// NewGatewayEndPointStatusAggregator creates a GatewayEndPointStatusAggregator. -func NewGatewayEndPointStatusAggregator() *GatewayEndPointStatusAggregator { - gepsa := &GatewayEndPointStatusAggregator{} +// NewGatewayTabletStatusAggregator creates a GatewayTabletStatusAggregator. +func NewGatewayTabletStatusAggregator() *GatewayTabletStatusAggregator { + gepsa := &GatewayTabletStatusAggregator{} registerAggregator(gepsa) return gepsa } -// GatewayEndPointStatusAggregator tracks endpoint status for a gateway. -type GatewayEndPointStatusAggregator struct { +// GatewayTabletStatusAggregator tracks tablet status for a gateway. +type GatewayTabletStatusAggregator struct { Keyspace string Shard string TabletType topodatapb.TabletType - Name string // the alternative name of an endpoint - Addr string // the host:port of an endpoint + Name string // the alternative name of a tablet + Addr string // the host:port of a tablet // mu protects below fields. mu sync.RWMutex @@ -253,7 +253,7 @@ type GatewayEndPointStatusAggregator struct { } type queryInfo struct { - aggr *GatewayEndPointStatusAggregator + aggr *GatewayTabletStatusAggregator addr string tabletType topodatapb.TabletType elapsed time.Duration @@ -261,7 +261,7 @@ type queryInfo struct { } // UpdateQueryInfo updates the aggregator with the given information about a query. -func (gepsa *GatewayEndPointStatusAggregator) UpdateQueryInfo(addr string, tabletType topodatapb.TabletType, elapsed time.Duration, hasError bool) { +func (gepsa *GatewayTabletStatusAggregator) UpdateQueryInfo(addr string, tabletType topodatapb.TabletType, elapsed time.Duration, hasError bool) { qi := &queryInfo{ aggr: gepsa, addr: addr, @@ -276,7 +276,7 @@ func (gepsa *GatewayEndPointStatusAggregator) UpdateQueryInfo(addr string, table } } -func (gepsa *GatewayEndPointStatusAggregator) processQueryInfo(qi *queryInfo) { +func (gepsa *GatewayTabletStatusAggregator) processQueryInfo(qi *queryInfo) { gepsa.mu.Lock() defer gepsa.mu.Unlock() if gepsa.TabletType != qi.tabletType { @@ -302,9 +302,9 @@ func (gepsa *GatewayEndPointStatusAggregator) processQueryInfo(qi *queryInfo) { } } -// GetCacheStatus returns a GatewayEndPointCacheStatus representing the current gateway status. -func (gepsa *GatewayEndPointStatusAggregator) GetCacheStatus() *GatewayEndPointCacheStatus { - status := &GatewayEndPointCacheStatus{ +// GetCacheStatus returns a GatewayTabletCacheStatus representing the current gateway status. +func (gepsa *GatewayTabletStatusAggregator) GetCacheStatus() *GatewayTabletCacheStatus { + status := &GatewayTabletCacheStatus{ Keyspace: gepsa.Keyspace, Shard: gepsa.Shard, Name: gepsa.Name, @@ -331,7 +331,7 @@ func (gepsa *GatewayEndPointStatusAggregator) GetCacheStatus() *GatewayEndPointC } // resetNextSlot resets the next tracking slot. -func (gepsa *GatewayEndPointStatusAggregator) resetNextSlot() { +func (gepsa *GatewayTabletStatusAggregator) resetNextSlot() { gepsa.mu.Lock() defer gepsa.mu.Unlock() gepsa.tick = (gepsa.tick + 1) % 60 diff --git a/go/vt/vtgate/gateway_test.go b/go/vt/vtgate/gateway_test.go index 17c6aaa6c2..bc44e53de7 100644 --- a/go/vt/vtgate/gateway_test.go +++ b/go/vt/vtgate/gateway_test.go @@ -12,15 +12,15 @@ import ( topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) -func TestGatwayEndPointStatusAggregator(t *testing.T) { - aggr := &GatewayEndPointStatusAggregator{ +func TestGatwayTabletStatusAggregator(t *testing.T) { + aggr := &GatewayTabletStatusAggregator{ Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA, Name: "n", Addr: "a", } - t.Logf("aggr = GatwayEndPointStatusAggregator{k, s, replica, n, a}") + t.Logf("aggr = GatwayTabletStatusAggregator{k, s, replica, n, a}") qi := &queryInfo{ aggr: aggr, addr: "", @@ -50,7 +50,7 @@ func TestGatwayEndPointStatusAggregator(t *testing.T) { } aggr.processQueryInfo(qi) t.Logf("aggr.processQueryInfo(, replica, 3ms, true)") - want := &GatewayEndPointCacheStatus{ + want := &GatewayTabletCacheStatus{ Keyspace: "k", Shard: "s", Name: "n", @@ -88,7 +88,7 @@ func TestGatwayEndPointStatusAggregator(t *testing.T) { } aggr.processQueryInfo(qi) t.Logf("aggr.processQueryInfo(, master, 6ms, true)") - want = &GatewayEndPointCacheStatus{ + want = &GatewayTabletCacheStatus{ Keyspace: "k", Shard: "s", Name: "n", diff --git a/go/vt/vtgate/resolver.go b/go/vt/vtgate/resolver.go index 76303557e5..e5742e9e54 100644 --- a/go/vt/vtgate/resolver.go +++ b/go/vt/vtgate/resolver.go @@ -62,7 +62,7 @@ func isRetryableError(err error) bool { case *ScatterConnError: return e.Retryable case *ShardError: - return e.EndPointCode == vtrpcpb.ErrorCode_QUERY_NOT_SERVED + return e.ErrorCode == vtrpcpb.ErrorCode_QUERY_NOT_SERVED default: return false } @@ -364,7 +364,7 @@ func (res *Resolver) Rollback(ctx context.Context, inSession *vtgatepb.Session) } // GetGatewayCacheStatus returns a displayable version of the Gateway cache. -func (res *Resolver) GetGatewayCacheStatus() GatewayEndPointCacheStatusList { +func (res *Resolver) GetGatewayCacheStatus() GatewayTabletCacheStatusList { return res.scatterConn.GetGatewayCacheStatus() } diff --git a/go/vt/vtgate/resolver_test.go b/go/vt/vtgate/resolver_test.go index 741aab68bd..fc838f65c7 100644 --- a/go/vt/vtgate/resolver_test.go +++ b/go/vt/vtgate/resolver_test.go @@ -182,8 +182,8 @@ func testResolverGeneric(t *testing.T, name string, action func(hc discovery.Hea sbc0 := &sandboxConn{} sbc1 := &sandboxConn{} hc := newFakeHealthCheck() - hc.addTestEndPoint("aa", "1.1.1.1", 1001, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "1.1.1.1", 1002, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) + hc.addTestTablet("aa", "1.1.1.1", 1001, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) + hc.addTestTablet("aa", "1.1.1.1", 1002, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) _, err := action(hc) if err != nil { @@ -201,11 +201,11 @@ func testResolverGeneric(t *testing.T, name string, action func(hc discovery.Hea sbc0 = &sandboxConn{mustFailServer: 1} sbc1 = &sandboxConn{mustFailRetry: 1} hc.Reset() - hc.addTestEndPoint("aa", "-20", 1, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "20-40", 1, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) + hc.addTestTablet("aa", "-20", 1, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) + hc.addTestTablet("aa", "20-40", 1, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) _, err = action(hc) - want1 := fmt.Sprintf("shard, host: %s.-20.master, host:\"-20\" port_map: , error: err", name) - want2 := fmt.Sprintf("shard, host: %s.20-40.master, host:\"20-40\" port_map: , retry: err", name) + want1 := fmt.Sprintf("shard, host: %s.-20.master, alias:<> hostname:\"-20\" port_map: , error: err", name) + want2 := fmt.Sprintf("shard, host: %s.20-40.master, alias:<> hostname:\"20-40\" port_map: , retry: err", name) want := []string{want1, want2} sort.Strings(want) if err == nil { @@ -234,11 +234,11 @@ func testResolverGeneric(t *testing.T, name string, action func(hc discovery.Hea sbc0 = &sandboxConn{mustFailRetry: 1} sbc1 = &sandboxConn{mustFailFatal: 1} hc.Reset() - hc.addTestEndPoint("aa", "-20", 1, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "20-40", 1, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) + hc.addTestTablet("aa", "-20", 1, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) + hc.addTestTablet("aa", "20-40", 1, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) _, err = action(hc) - want1 = fmt.Sprintf("shard, host: %s.-20.master, host:\"-20\" port_map: , retry: err", name) - want2 = fmt.Sprintf("shard, host: %s.20-40.master, host:\"20-40\" port_map: , fatal: err", name) + want1 = fmt.Sprintf("shard, host: %s.-20.master, alias:<> hostname:\"-20\" port_map: , retry: err", name) + want2 = fmt.Sprintf("shard, host: %s.20-40.master, alias:<> hostname:\"20-40\" port_map: , fatal: err", name) want = []string{want1, want2} sort.Strings(want) if err == nil { @@ -268,12 +268,12 @@ func testResolverGeneric(t *testing.T, name string, action func(hc discovery.Hea sbc0 = &sandboxConn{} sbc1 = &sandboxConn{} hc.Reset() - hc.addTestEndPoint("aa", "1.1.1.1", 1001, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "1.1.1.1", 1002, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) + hc.addTestTablet("aa", "1.1.1.1", 1001, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) + hc.addTestTablet("aa", "1.1.1.1", 1002, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) s0 := createSandbox(name + "ServedFrom0") // make sure we have a fresh copy s0.ShardSpec = "-80-" sbc2 := &sandboxConn{} - hc.addTestEndPoint("aa", "1.1.1.1", 1003, name+"ServedFrom0", "-80", topodatapb.TabletType_MASTER, true, 1, nil, sbc2) + hc.addTestTablet("aa", "1.1.1.1", 1003, name+"ServedFrom0", "-80", topodatapb.TabletType_MASTER, true, 1, nil, sbc2) _, err = action(hc) if err != nil { t.Errorf("want nil, got %v", err) @@ -303,15 +303,15 @@ func testResolverGeneric(t *testing.T, name string, action func(hc discovery.Hea sbc0 = &sandboxConn{} sbc1 = &sandboxConn{mustFailFatal: 1} hc.Reset() - hc.addTestEndPoint("aa", "1.1.1.1", 1001, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "1.1.1.1", 1002, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) + hc.addTestTablet("aa", "1.1.1.1", 1001, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) + hc.addTestTablet("aa", "1.1.1.1", 1002, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) i := 0 s.SrvKeyspaceCallback = func() { if i == 1 { addSandboxServedFrom(name, name+"ServedFrom") hc.Reset() - hc.addTestEndPoint("aa", "1.1.1.1", 1001, name+"ServedFrom", "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "1.1.1.1", 1002, name+"ServedFrom", "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) + hc.addTestTablet("aa", "1.1.1.1", 1001, name+"ServedFrom", "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) + hc.addTestTablet("aa", "1.1.1.1", 1002, name+"ServedFrom", "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) } i++ } @@ -336,15 +336,15 @@ func testResolverGeneric(t *testing.T, name string, action func(hc discovery.Hea sbc0 = &sandboxConn{} sbc1 = &sandboxConn{mustFailRetry: 1} hc.Reset() - hc.addTestEndPoint("aa", "1.1.1.1", 1001, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "1.1.1.1", 1002, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) + hc.addTestTablet("aa", "1.1.1.1", 1001, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) + hc.addTestTablet("aa", "1.1.1.1", 1002, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) i = 0 s.SrvKeyspaceCallback = func() { if i == 1 { s.ShardSpec = "-20-30-40-60-80-a0-c0-e0-" hc.Reset() - hc.addTestEndPoint("aa", "1.1.1.1", 1001, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "1.1.1.1", 1002, name, "20-30", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) + hc.addTestTablet("aa", "1.1.1.1", 1001, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) + hc.addTestTablet("aa", "1.1.1.1", 1002, name, "20-30", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) } i++ } @@ -371,8 +371,8 @@ func testResolverStreamGeneric(t *testing.T, name string, action func(hc discove sbc0 := &sandboxConn{} sbc1 := &sandboxConn{} hc := newFakeHealthCheck() - hc.addTestEndPoint("aa", "1.1.1.1", 1001, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "1.1.1.1", 1002, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) + hc.addTestTablet("aa", "1.1.1.1", 1001, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) + hc.addTestTablet("aa", "1.1.1.1", 1002, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) _, err := action(hc) if err != nil { t.Errorf("want nil, got %v", err) @@ -386,10 +386,10 @@ func testResolverStreamGeneric(t *testing.T, name string, action func(hc discove sbc0 = &sandboxConn{mustFailRetry: 1} sbc1 = &sandboxConn{} hc.Reset() - hc.addTestEndPoint("aa", "-20", 1, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "20-40", 1, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) + hc.addTestTablet("aa", "-20", 1, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) + hc.addTestTablet("aa", "20-40", 1, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) _, err = action(hc) - want := fmt.Sprintf("shard, host: %s.-20.master, host:\"-20\" port_map: , retry: err", name) + want := fmt.Sprintf("shard, host: %s.-20.master, alias:<> hostname:\"-20\" port_map: , retry: err", name) if err == nil || err.Error() != want { t.Errorf("want\n%s\ngot\n%v", want, err) } @@ -467,8 +467,8 @@ func TestResolverDmlOnMultipleKeyspaceIds(t *testing.T) { sbc0 := &sandboxConn{} sbc1 := &sandboxConn{} hc := newFakeHealthCheck() - hc.addTestEndPoint("aa", "1.1.1.1", 1001, keyspace, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "1.1.1.1", 1002, keyspace, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) + hc.addTestTablet("aa", "1.1.1.1", 1001, keyspace, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) + hc.addTestTablet("aa", "1.1.1.1", 1002, keyspace, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) res := NewResolver(hc, topo.Server{}, new(sandboxTopo), "", "aa", 0, nil) errStr := "DML should not span multiple keyspace_ids" @@ -490,7 +490,7 @@ func TestResolverExecBatchReresolve(t *testing.T) { createSandbox(keyspace) sbc := &sandboxConn{mustFailRetry: 20} hc := newFakeHealthCheck() - hc.addTestEndPoint("aa", "0", 1, keyspace, "0", topodatapb.TabletType_MASTER, true, 1, nil, sbc) + hc.addTestTablet("aa", "0", 1, keyspace, "0", topodatapb.TabletType_MASTER, true, 1, nil, sbc) res := NewResolver(hc, topo.Server{}, new(sandboxTopo), "", "aa", 0, nil) @@ -509,7 +509,7 @@ func TestResolverExecBatchReresolve(t *testing.T) { } _, err := res.ExecuteBatch(context.Background(), topodatapb.TabletType_MASTER, false, nil, buildBatchRequest) - want := "shard, host: TestResolverExecBatchReresolve.0.master, host:\"0\" port_map: , retry: err" + want := "shard, host: TestResolverExecBatchReresolve.0.master, alias:<> hostname:\"0\" port_map: , retry: err" if err == nil || err.Error() != want { t.Errorf("want %s, got %v", want, err) } @@ -527,7 +527,7 @@ func TestResolverExecBatchAsTransaction(t *testing.T) { createSandbox(keyspace) sbc := &sandboxConn{mustFailRetry: 20} hc := newFakeHealthCheck() - hc.addTestEndPoint("aa", "0", 1, keyspace, "0", topodatapb.TabletType_MASTER, true, 1, nil, sbc) + hc.addTestTablet("aa", "0", 1, keyspace, "0", topodatapb.TabletType_MASTER, true, 1, nil, sbc) res := NewResolver(hc, topo.Server{}, new(sandboxTopo), "", "aa", 0, nil) @@ -546,7 +546,7 @@ func TestResolverExecBatchAsTransaction(t *testing.T) { } _, err := res.ExecuteBatch(context.Background(), topodatapb.TabletType_MASTER, true, nil, buildBatchRequest) - want := "shard, host: TestResolverExecBatchAsTransaction.0.master, host:\"0\" port_map: , retry: err" + want := "shard, host: TestResolverExecBatchAsTransaction.0.master, alias:<> hostname:\"0\" port_map: , retry: err" if err == nil || err.Error() != want { t.Errorf("want %v, got %v", want, err) } @@ -568,8 +568,8 @@ func TestIsRetryableError(t *testing.T) { {fmt.Errorf("generic error"), false}, {&ScatterConnError{Retryable: true}, true}, {&ScatterConnError{Retryable: false}, false}, - {&ShardError{EndPointCode: vtrpcpb.ErrorCode_QUERY_NOT_SERVED}, true}, - {&ShardError{EndPointCode: vtrpcpb.ErrorCode_INTERNAL_ERROR}, false}, + {&ShardError{ErrorCode: vtrpcpb.ErrorCode_QUERY_NOT_SERVED}, true}, + {&ShardError{ErrorCode: vtrpcpb.ErrorCode_INTERNAL_ERROR}, false}, // tabletconn.ServerError will not come directly here, // they'll be wrapped in ScatterConnError or ShardConnError. // So they can't be retried as is. diff --git a/go/vt/vtgate/router_framework_test.go b/go/vt/vtgate/router_framework_test.go index 4911002ce1..bb1c2efca8 100644 --- a/go/vt/vtgate/router_framework_test.go +++ b/go/vt/vtgate/router_framework_test.go @@ -161,12 +161,12 @@ func createRouterEnv() (router *Router, sbc1, sbc2, sbclookup *sandboxConn) { s.VSchema = routerVSchema sbc1 = &sandboxConn{} sbc2 = &sandboxConn{} - hc.addTestEndPoint(cell, "-20", 1, "TestRouter", "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) - hc.addTestEndPoint(cell, "40-60", 1, "TestRouter", "40-60", topodatapb.TabletType_MASTER, true, 1, nil, sbc2) + hc.addTestTablet(cell, "-20", 1, "TestRouter", "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) + hc.addTestTablet(cell, "40-60", 1, "TestRouter", "40-60", topodatapb.TabletType_MASTER, true, 1, nil, sbc2) createSandbox(KsTestUnsharded) sbclookup = &sandboxConn{} - hc.addTestEndPoint(cell, "0", 1, KsTestUnsharded, "0", topodatapb.TabletType_MASTER, true, 1, nil, sbclookup) + hc.addTestTablet(cell, "0", 1, KsTestUnsharded, "0", topodatapb.TabletType_MASTER, true, 1, nil, sbclookup) bad := createSandbox("TestBadSharding") bad.VSchema = badVSchema diff --git a/go/vt/vtgate/router_select_test.go b/go/vt/vtgate/router_select_test.go index 98d54ecec8..5800d3da01 100644 --- a/go/vt/vtgate/router_select_test.go +++ b/go/vt/vtgate/router_select_test.go @@ -605,7 +605,7 @@ func TestSelectScatter(t *testing.T) { for _, shard := range shards { sbc := &sandboxConn{} conns = append(conns, sbc) - hc.addTestEndPoint(cell, shard, 1, "TestRouter", shard, topodatapb.TabletType_MASTER, true, 1, nil, sbc) + hc.addTestTablet(cell, shard, 1, "TestRouter", shard, topodatapb.TabletType_MASTER, true, 1, nil, sbc) } serv := new(sandboxTopo) scatterConn := NewScatterConn(hc, topo.Server{}, serv, "", cell, 10, nil) @@ -638,7 +638,7 @@ func TestStreamSelectScatter(t *testing.T) { for _, shard := range shards { sbc := &sandboxConn{} conns = append(conns, sbc) - hc.addTestEndPoint(cell, shard, 1, "TestRouter", shard, topodatapb.TabletType_MASTER, true, 1, nil, sbc) + hc.addTestTablet(cell, shard, 1, "TestRouter", shard, topodatapb.TabletType_MASTER, true, 1, nil, sbc) } serv := new(sandboxTopo) scatterConn := NewScatterConn(hc, topo.Server{}, serv, "", cell, 10, nil) @@ -681,7 +681,7 @@ func TestSelectScatterFail(t *testing.T) { for _, shard := range shards { sbc := &sandboxConn{} conns = append(conns, sbc) - hc.addTestEndPoint(cell, shard, 1, "TestRouter", shard, topodatapb.TabletType_MASTER, true, 1, nil, sbc) + hc.addTestTablet(cell, shard, 1, "TestRouter", shard, topodatapb.TabletType_MASTER, true, 1, nil, sbc) } serv := new(sandboxTopo) scatterConn := NewScatterConn(hc, topo.Server{}, serv, "", cell, 10, nil) diff --git a/go/vt/vtgate/sandbox_test.go b/go/vt/vtgate/sandbox_test.go index 859a829831..6e9d5c54df 100644 --- a/go/vt/vtgate/sandbox_test.go +++ b/go/vt/vtgate/sandbox_test.go @@ -254,7 +254,7 @@ func (sct *sandboxTopo) GetSrvShard(ctx context.Context, cell, keyspace, shard s return nil, fmt.Errorf("Unsupported") } -func sandboxDialer(ctx context.Context, endPoint *topodatapb.EndPoint, keyspace, shard string, tabletType topodatapb.TabletType, timeout time.Duration) (tabletconn.TabletConn, error) { +func sandboxDialer(ctx context.Context, tablet *topodatapb.Tablet, keyspace, shard string, tabletType topodatapb.TabletType, timeout time.Duration) (tabletconn.TabletConn, error) { sand := getSandbox(keyspace) sand.sandmu.Lock() defer sand.sandmu.Unlock() @@ -269,7 +269,7 @@ func sandboxDialer(ctx context.Context, endPoint *topodatapb.EndPoint, keyspace, return nil, tabletconn.OperationalError(fmt.Sprintf("conn unreachable")) } sbc := &sandboxConn{} - sbc.endPoint = endPoint + sbc.tablet = tablet sbc.SetTarget(keyspace, shard, tabletType) return sbc, nil } @@ -279,7 +279,7 @@ type sandboxConn struct { keyspace string shard string tabletType topodatapb.TabletType - endPoint *topodatapb.EndPoint + tablet *topodatapb.Tablet mustFailRetry int mustFailFatal int @@ -512,8 +512,8 @@ func (sbc *sandboxConn) SetTarget(keyspace, shard string, tabletType topodatapb. return nil } -func (sbc *sandboxConn) EndPoint() *topodatapb.EndPoint { - return sbc.endPoint +func (sbc *sandboxConn) Tablet() *topodatapb.Tablet { + return sbc.tablet } func (sbc *sandboxConn) getNextResult() *sqltypes.Result { diff --git a/go/vt/vtgate/scatter_conn.go b/go/vt/vtgate/scatter_conn.go index afef2ec8a1..19ad11ce15 100644 --- a/go/vt/vtgate/scatter_conn.go +++ b/go/vt/vtgate/scatter_conn.go @@ -731,7 +731,7 @@ func (stc *ScatterConn) Close() error { } // GetGatewayCacheStatus returns a displayable version of the Gateway cache. -func (stc *ScatterConn) GetGatewayCacheStatus() GatewayEndPointCacheStatusList { +func (stc *ScatterConn) GetGatewayCacheStatus() GatewayTabletCacheStatusList { return stc.gateway.CacheStatus() } @@ -762,7 +762,7 @@ func (stc *ScatterConn) aggregateErrors(errors []error) error { allRetryableError := true for _, e := range errors { connError, ok := e.(*ShardError) - if !ok || (connError.EndPointCode != vtrpcpb.ErrorCode_QUERY_NOT_SERVED && connError.EndPointCode != vtrpcpb.ErrorCode_INTERNAL_ERROR) || connError.InTransaction { + if !ok || (connError.ErrorCode != vtrpcpb.ErrorCode_QUERY_NOT_SERVED && connError.ErrorCode != vtrpcpb.ErrorCode_INTERNAL_ERROR) || connError.InTransaction { allRetryableError = false break } diff --git a/go/vt/vtgate/scatter_conn_test.go b/go/vt/vtgate/scatter_conn_test.go index 9369ed0bdd..3cc65c7d4e 100644 --- a/go/vt/vtgate/scatter_conn_test.go +++ b/go/vt/vtgate/scatter_conn_test.go @@ -135,9 +135,9 @@ func testScatterConnGeneric(t *testing.T, name string, f func(hc discovery.Healt // single shard s.Reset() sbc := &sandboxConn{mustFailServer: 1} - hc.addTestEndPoint("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc) + hc.addTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc) qr, err = f(hc, []string{"0"}) - want := fmt.Sprintf("shard, host: %v.0.replica, host:\"0\" port_map: , error: err", name) + want := fmt.Sprintf("shard, host: %v.0.replica, alias:<> hostname:\"0\" port_map: , error: err", name) // Verify server error string. if err == nil || err.Error() != want { t.Errorf("want %s, got %v", want, err) @@ -152,11 +152,11 @@ func testScatterConnGeneric(t *testing.T, name string, f func(hc discovery.Healt sbc0 := &sandboxConn{mustFailServer: 1} sbc1 := &sandboxConn{mustFailServer: 1} hc.Reset() - hc.addTestEndPoint("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "1", 1, name, "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) + hc.addTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) + hc.addTestTablet("aa", "1", 1, name, "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) _, err = f(hc, []string{"0", "1"}) // Verify server errors are consolidated. - want = fmt.Sprintf("shard, host: %v.0.replica, host:\"0\" port_map: , error: err\nshard, host: %v.1.replica, host:\"1\" port_map: , error: err", name, name) + want = fmt.Sprintf("shard, host: %v.0.replica, alias:<> hostname:\"0\" port_map: , error: err\nshard, host: %v.1.replica, alias:<> hostname:\"1\" port_map: , error: err", name, name) verifyScatterConnError(t, err, want, vtrpcpb.ErrorCode_BAD_INPUT) // Ensure that we tried only once. if execCount := sbc0.ExecCount.Get(); execCount != 1 { @@ -171,11 +171,11 @@ func testScatterConnGeneric(t *testing.T, name string, f func(hc discovery.Healt sbc0 = &sandboxConn{mustFailServer: 1} sbc1 = &sandboxConn{mustFailTxPool: 1} hc.Reset() - hc.addTestEndPoint("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "1", 1, name, "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) + hc.addTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) + hc.addTestTablet("aa", "1", 1, name, "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) _, err = f(hc, []string{"0", "1"}) // Verify server errors are consolidated. - want = fmt.Sprintf("shard, host: %v.0.replica, host:\"0\" port_map: , error: err\nshard, host: %v.1.replica, host:\"1\" port_map: , tx_pool_full: err", name, name) + want = fmt.Sprintf("shard, host: %v.0.replica, alias:<> hostname:\"0\" port_map: , error: err\nshard, host: %v.1.replica, alias:<> hostname:\"1\" port_map: , tx_pool_full: err", name, name) // We should only surface the higher priority error code verifyScatterConnError(t, err, want, vtrpcpb.ErrorCode_BAD_INPUT) // Ensure that we tried only once. @@ -190,7 +190,7 @@ func testScatterConnGeneric(t *testing.T, name string, f func(hc discovery.Healt s.Reset() sbc = &sandboxConn{} hc.Reset() - hc.addTestEndPoint("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc) + hc.addTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc) qr, err = f(hc, []string{"0", "0"}) // Ensure that we executed only once. if execCount := sbc.ExecCount.Get(); execCount != 1 { @@ -202,8 +202,8 @@ func testScatterConnGeneric(t *testing.T, name string, f func(hc discovery.Healt sbc0 = &sandboxConn{} sbc1 = &sandboxConn{} hc.Reset() - hc.addTestEndPoint("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "1", 1, name, "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) + hc.addTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) + hc.addTestTablet("aa", "1", 1, name, "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) qr, err = f(hc, []string{"0", "1"}) if err != nil { t.Errorf("want nil, got %v", err) @@ -227,8 +227,8 @@ func TestMultiExecs(t *testing.T) { sbc0 := &sandboxConn{} sbc1 := &sandboxConn{} hc := newFakeHealthCheck() - hc.addTestEndPoint("aa", "0", 1, "TestMultiExecs", "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "1", 1, "TestMultiExecs", "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) + hc.addTestTablet("aa", "0", 1, "TestMultiExecs", "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) + hc.addTestTablet("aa", "1", 1, "TestMultiExecs", "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) stc := NewScatterConn(hc, topo.Server{}, new(sandboxTopo), "", "aa", retryCount, nil) shardVars := map[string]map[string]interface{}{ "0": { @@ -262,7 +262,7 @@ func TestScatterConnStreamExecuteSendError(t *testing.T) { createSandbox("TestScatterConnStreamExecuteSendError") sbc := &sandboxConn{} hc := newFakeHealthCheck() - hc.addTestEndPoint("aa", "0", 1, "TestScatterConnStreamExecuteSendError", "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc) + hc.addTestTablet("aa", "0", 1, "TestScatterConnStreamExecuteSendError", "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc) stc := NewScatterConn(hc, topo.Server{}, new(sandboxTopo), "", "aa", retryCount, nil) err := stc.StreamExecute(context.Background(), "query", nil, "TestScatterConnStreamExecuteSendError", []string{"0"}, topodatapb.TabletType_REPLICA, func(*sqltypes.Result) error { return fmt.Errorf("send error") @@ -278,7 +278,7 @@ func TestScatterCommitRollbackIncorrectSession(t *testing.T) { createSandbox("TestScatterCommitRollbackIncorrectSession") sbc0 := &sandboxConn{} hc := newFakeHealthCheck() - hc.addTestEndPoint("aa", "0", 1, "TestScatterCommitRollbackIncorrectSession", "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) + hc.addTestTablet("aa", "0", 1, "TestScatterCommitRollbackIncorrectSession", "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) stc := NewScatterConn(hc, topo.Server{}, new(sandboxTopo), "", "aa", retryCount, nil) // nil session @@ -301,8 +301,8 @@ func TestScatterConnCommitSuccess(t *testing.T) { sbc0 := &sandboxConn{} sbc1 := &sandboxConn{} hc := newFakeHealthCheck() - hc.addTestEndPoint("aa", "0", 1, "TestScatterConnCommitSuccess", "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "1", 1, "TestScatterConnCommitSuccess", "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) + hc.addTestTablet("aa", "0", 1, "TestScatterConnCommitSuccess", "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) + hc.addTestTablet("aa", "1", 1, "TestScatterConnCommitSuccess", "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) stc := NewScatterConn(hc, topo.Server{}, new(sandboxTopo), "", "aa", retryCount, nil) // Sequence the executes to ensure commit order @@ -366,8 +366,8 @@ func TestScatterConnRollback(t *testing.T) { sbc0 := &sandboxConn{} sbc1 := &sandboxConn{} hc := newFakeHealthCheck() - hc.addTestEndPoint("aa", "0", 1, "TestScatterConnRollback", "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "1", 1, "TestScatterConnRollback", "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) + hc.addTestTablet("aa", "0", 1, "TestScatterConnRollback", "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) + hc.addTestTablet("aa", "1", 1, "TestScatterConnRollback", "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) stc := NewScatterConn(hc, topo.Server{}, new(sandboxTopo), "", "aa", retryCount, nil) // Sequence the executes to ensure commit order @@ -394,7 +394,7 @@ func TestScatterConnError(t *testing.T) { err := &ScatterConnError{ Retryable: false, Errs: []error{ - &ShardError{EndPointCode: vtrpcpb.ErrorCode_PERMISSION_DENIED, Err: &tabletconn.ServerError{Err: "tabletconn error"}}, + &ShardError{ErrorCode: vtrpcpb.ErrorCode_PERMISSION_DENIED, Err: &tabletconn.ServerError{Err: "tabletconn error"}}, fmt.Errorf("generic error"), tabletconn.ConnClosed, }, @@ -416,8 +416,8 @@ func TestScatterConnQueryNotInTransaction(t *testing.T) { sbc0 := &sandboxConn{} sbc1 := &sandboxConn{} hc.Reset() - hc.addTestEndPoint("aa", "0", 1, "TestScatterConnQueryNotInTransaction", "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "1", 1, "TestScatterConnQueryNotInTransaction", "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) + hc.addTestTablet("aa", "0", 1, "TestScatterConnQueryNotInTransaction", "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) + hc.addTestTablet("aa", "1", 1, "TestScatterConnQueryNotInTransaction", "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) stc := NewScatterConn(hc, topo.Server{}, new(sandboxTopo), "", "aa", retryCount, nil) session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) stc.Execute(context.Background(), "query1", nil, "TestScatterConnQueryNotInTransaction", []string{"0"}, topodatapb.TabletType_REPLICA, session, true) @@ -457,8 +457,8 @@ func TestScatterConnQueryNotInTransaction(t *testing.T) { sbc0 = &sandboxConn{} sbc1 = &sandboxConn{} hc.Reset() - hc.addTestEndPoint("aa", "0", 1, "TestScatterConnQueryNotInTransaction", "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "1", 1, "TestScatterConnQueryNotInTransaction", "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) + hc.addTestTablet("aa", "0", 1, "TestScatterConnQueryNotInTransaction", "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) + hc.addTestTablet("aa", "1", 1, "TestScatterConnQueryNotInTransaction", "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) stc = NewScatterConn(hc, topo.Server{}, new(sandboxTopo), "", "aa", retryCount, nil) session = NewSafeSession(&vtgatepb.Session{InTransaction: true}) stc.Execute(context.Background(), "query1", nil, "TestScatterConnQueryNotInTransaction", []string{"0"}, topodatapb.TabletType_REPLICA, session, false) @@ -498,8 +498,8 @@ func TestScatterConnQueryNotInTransaction(t *testing.T) { sbc0 = &sandboxConn{} sbc1 = &sandboxConn{} hc.Reset() - hc.addTestEndPoint("aa", "0", 1, "TestScatterConnQueryNotInTransaction", "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) - hc.addTestEndPoint("aa", "1", 1, "TestScatterConnQueryNotInTransaction", "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) + hc.addTestTablet("aa", "0", 1, "TestScatterConnQueryNotInTransaction", "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc0) + hc.addTestTablet("aa", "1", 1, "TestScatterConnQueryNotInTransaction", "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) stc = NewScatterConn(hc, topo.Server{}, new(sandboxTopo), "", "aa", retryCount, nil) session = NewSafeSession(&vtgatepb.Session{InTransaction: true}) stc.Execute(context.Background(), "query1", nil, "TestScatterConnQueryNotInTransaction", []string{"0"}, topodatapb.TabletType_REPLICA, session, false) diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index 688322f69e..80dc44f95c 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -754,7 +754,7 @@ func (vtg *VTGate) GetSrvShard(ctx context.Context, keyspace, shard string) (*to } // GetGatewayCacheStatus returns a displayable version of the Gateway cache. -func (vtg *VTGate) GetGatewayCacheStatus() GatewayEndPointCacheStatusList { +func (vtg *VTGate) GetGatewayCacheStatus() GatewayTabletCacheStatusList { return vtg.resolver.GetGatewayCacheStatus() } diff --git a/go/vt/vtgate/vtgate_test.go b/go/vt/vtgate/vtgate_test.go index 0912783455..bfa55d3863 100644 --- a/go/vt/vtgate/vtgate_test.go +++ b/go/vt/vtgate/vtgate_test.go @@ -47,7 +47,7 @@ func TestVTGateExecute(t *testing.T) { createSandbox(KsTestUnsharded) sbc := &sandboxConn{} hcVTGateTest.Reset() - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_MASTER, true, 1, nil, sbc) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_MASTER, true, 1, nil, sbc) qr, err := rpcVTGate.Execute(context.Background(), "select id from t1", nil, @@ -108,7 +108,7 @@ func TestVTGateExecuteWithKeyspace(t *testing.T) { createSandbox(KsTestUnsharded) sbc := &sandboxConn{} hcVTGateTest.Reset() - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_MASTER, true, 1, nil, sbc) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_MASTER, true, 1, nil, sbc) qr, err := rpcVTGate.Execute(context.Background(), "select id from none", nil, @@ -141,7 +141,7 @@ func TestVTGateExecuteShards(t *testing.T) { createSandbox(ks) sbc := &sandboxConn{} hcVTGateTest.Reset() - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1001, ks, shard, topodatapb.TabletType_REPLICA, true, 1, nil, sbc) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1001, ks, shard, topodatapb.TabletType_REPLICA, true, 1, nil, sbc) qr, err := rpcVTGate.ExecuteShards(context.Background(), "query", nil, @@ -216,8 +216,8 @@ func TestVTGateExecuteKeyspaceIds(t *testing.T) { sbc1 := &sandboxConn{} sbc2 := &sandboxConn{} hcVTGateTest.Reset() - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil, sbc1) - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil, sbc2) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil, sbc1) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil, sbc2) // Test for successful execution qr, err := rpcVTGate.ExecuteKeyspaceIds(context.Background(), "query", @@ -289,8 +289,8 @@ func TestVTGateExecuteKeyRanges(t *testing.T) { sbc1 := &sandboxConn{} sbc2 := &sandboxConn{} hcVTGateTest.Reset() - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil, sbc1) - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil, sbc2) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil, sbc1) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil, sbc2) // Test for successful execution qr, err := rpcVTGate.ExecuteKeyRanges(context.Background(), "query", @@ -364,8 +364,8 @@ func TestVTGateExecuteEntityIds(t *testing.T) { sbc1 := &sandboxConn{} sbc2 := &sandboxConn{} hcVTGateTest.Reset() - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil, sbc1) - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil, sbc2) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil, sbc1) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil, sbc2) // Test for successful execution qr, err := rpcVTGate.ExecuteEntityIds(context.Background(), "query", @@ -463,8 +463,8 @@ func TestVTGateExecuteBatchShards(t *testing.T) { sbc1 := &sandboxConn{} sbc2 := &sandboxConn{} hcVTGateTest.Reset() - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil, sbc1) - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil, sbc2) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil, sbc1) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil, sbc2) qrl, err := rpcVTGate.ExecuteBatchShards(context.Background(), []*vtgatepb.BoundShardQuery{{ Query: &querypb.BoundQuery{ @@ -527,8 +527,8 @@ func TestVTGateExecuteBatchKeyspaceIds(t *testing.T) { sbc1 := &sandboxConn{} sbc2 := &sandboxConn{} hcVTGateTest.Reset() - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil, sbc1) - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil, sbc2) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil, sbc1) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil, sbc2) kid10 := []byte{0x10} kid30 := []byte{0x30} qrl, err := rpcVTGate.ExecuteBatchKeyspaceIds(context.Background(), @@ -591,7 +591,7 @@ func TestVTGateStreamExecute(t *testing.T) { createSandbox(ks) sbc := &sandboxConn{} hcVTGateTest.Reset() - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1001, ks, shard, topodatapb.TabletType_MASTER, true, 1, nil, sbc) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1001, ks, shard, topodatapb.TabletType_MASTER, true, 1, nil, sbc) var qrs []*sqltypes.Result err := rpcVTGate.StreamExecute(context.Background(), "select id from t1", @@ -619,8 +619,8 @@ func TestVTGateStreamExecuteKeyspaceIds(t *testing.T) { sbc := &sandboxConn{} sbc1 := &sandboxConn{} hcVTGateTest.Reset() - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil, sbc) - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil, sbc1) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil, sbc) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil, sbc1) // Test for successful execution var qrs []*sqltypes.Result err := rpcVTGate.StreamExecuteKeyspaceIds(context.Background(), @@ -684,8 +684,8 @@ func TestVTGateStreamExecuteKeyRanges(t *testing.T) { sbc := &sandboxConn{} sbc1 := &sandboxConn{} hcVTGateTest.Reset() - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil, sbc) - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil, sbc1) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil, sbc) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil, sbc1) // Test for successful execution var qrs []*sqltypes.Result err := rpcVTGate.StreamExecuteKeyRanges(context.Background(), @@ -728,7 +728,7 @@ func TestVTGateStreamExecuteShards(t *testing.T) { createSandbox(ks) sbc := &sandboxConn{} hcVTGateTest.Reset() - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1001, ks, shard, topodatapb.TabletType_MASTER, true, 1, nil, sbc) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1001, ks, shard, topodatapb.TabletType_MASTER, true, 1, nil, sbc) // Test for successful execution var qrs []*sqltypes.Result err := rpcVTGate.StreamExecuteShards(context.Background(), @@ -758,7 +758,7 @@ func TestVTGateSplitQuery(t *testing.T) { port := int32(1001) for _, kr := range keyranges { sbc := &sandboxConn{} - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", port, keyspace, key.KeyRangeString(kr), topodatapb.TabletType_RDONLY, true, 1, nil, sbc) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", port, keyspace, key.KeyRangeString(kr), topodatapb.TabletType_RDONLY, true, 1, nil, sbc) port++ } sql := "select col1, col2 from table" @@ -821,7 +821,7 @@ func TestVTGateSplitQueryV2Sharded(t *testing.T) { port := int32(1001) for _, kr := range keyranges { sbc := &sandboxConn{} - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", port, keyspace, key.KeyRangeString(kr), topodatapb.TabletType_RDONLY, true, 1, nil, sbc) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", port, keyspace, key.KeyRangeString(kr), topodatapb.TabletType_RDONLY, true, 1, nil, sbc) port++ } sql := "select col1, col2 from table" @@ -900,7 +900,7 @@ func TestVTGateSplitQueryV2Unsharded(t *testing.T) { createSandbox(keyspace) sbc := &sandboxConn{} hcVTGateTest.Reset() - hcVTGateTest.addTestEndPoint("aa", "1.1.1.1", 1001, keyspace, "0", topodatapb.TabletType_RDONLY, true, 1, nil, sbc) + hcVTGateTest.addTestTablet("aa", "1.1.1.1", 1001, keyspace, "0", topodatapb.TabletType_RDONLY, true, 1, nil, sbc) sql := "select col1, col2 from table" bindVars := map[string]interface{}{"bv1": nil} splitColumns := []string{"sc1", "sc2"} @@ -1267,8 +1267,8 @@ func setUpSandboxWithTwoShards(keyspace string) (string, []*sandboxConn) { shards := []*sandboxConn{{}, {}} createSandbox(keyspace) hcVTGateTest.Reset() - hcVTGateTest.addTestEndPoint("aa", "-20", 1, keyspace, "-20", topodatapb.TabletType_MASTER, true, 1, nil, shards[0]) - hcVTGateTest.addTestEndPoint("aa", "20-40", 1, keyspace, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, shards[1]) + hcVTGateTest.addTestTablet("aa", "-20", 1, keyspace, "-20", topodatapb.TabletType_MASTER, true, 1, nil, shards[0]) + hcVTGateTest.addTestTablet("aa", "20-40", 1, keyspace, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, shards[1]) return keyspace, shards } diff --git a/go/vt/worker/clone_utils.go b/go/vt/worker/clone_utils.go index 88eccee825..5cb00a7879 100644 --- a/go/vt/worker/clone_utils.go +++ b/go/vt/worker/clone_utils.go @@ -251,19 +251,12 @@ func makeValueString(fields []*querypb.Field, rows [][]sqltypes.Value) string { return buf.String() } -// endPointToTabletInfo converts an EndPointStats object from the discovery +// tabletStatsToTabletInfo converts a TabletStats object from the discovery // package into a TabletInfo object. The latter one is required by several // TabletManagerClient API calls. // Note that this is a best-effort conversion and won't result into the same // result as a call to topo.GetTablet(). // Note: We assume that "eps" is immutable and we can reference its data. -func endPointToTabletInfo(eps *discovery.EndPointStats) *topo.TabletInfo { - return topo.NewTabletInfo(&topodatapb.Tablet{ - Alias: eps.Alias(), - Hostname: eps.EndPoint.Host, - PortMap: eps.EndPoint.PortMap, - Keyspace: eps.Target.Keyspace, - Shard: eps.Target.Shard, - Type: eps.Target.TabletType, - }, -1 /* version */) +func tabletStatsToTabletInfo(eps *discovery.TabletStats) *topo.TabletInfo { + return topo.NewTabletInfo(eps.Tablet, -1 /* version */) } diff --git a/go/vt/worker/diff_utils.go b/go/vt/worker/diff_utils.go index 3724a716a0..b57ccb4520 100644 --- a/go/vt/worker/diff_utils.go +++ b/go/vt/worker/diff_utils.go @@ -44,12 +44,7 @@ func NewQueryResultReaderForTablet(ctx context.Context, ts topo.Server, tabletAl return nil, err } - endPoint, err := topo.TabletEndPoint(tablet.Tablet) - if err != nil { - return nil, err - } - - conn, err := tabletconn.GetDialer()(ctx, endPoint, tablet.Keyspace, tablet.Shard, tablet.Type, *remoteActionsTimeout) + conn, err := tabletconn.GetDialer()(ctx, tablet.Tablet, tablet.Keyspace, tablet.Shard, tablet.Type, *remoteActionsTimeout) if err != nil { return nil, err } diff --git a/go/vt/worker/executor.go b/go/vt/worker/executor.go index ab358e17e3..b21f1b0241 100644 --- a/go/vt/worker/executor.go +++ b/go/vt/worker/executor.go @@ -80,12 +80,12 @@ func (e *executor) fetchWithRetries(ctx context.Context, command string) error { // Is this current attempt a retry of a previous attempt? isRetry := false for { - var master *discovery.EndPointStats + var master *discovery.TabletStats var err error // Get the current master from the HealthCheck. masters := discovery.GetCurrentMaster( - e.healthCheck.GetEndPointStatsFromTarget(e.keyspace, e.shard, topodatapb.TabletType_MASTER)) + e.healthCheck.GetTabletStatsFromTarget(e.keyspace, e.shard, topodatapb.TabletType_MASTER)) if len(masters) == 0 { e.wr.Logger().Warningf("ExecuteFetch failed for keyspace/shard %v/%v because no MASTER is available; will retry until there is MASTER again", e.keyspace, e.shard) statsRetryCount.Add(1) @@ -110,7 +110,7 @@ func (e *executor) fetchWithRetries(ctx context.Context, command string) error { // new variables until the label is reached.) { tryCtx, cancel := context.WithTimeout(retryCtx, 2*time.Minute) - _, err = e.wr.TabletManagerClient().ExecuteFetchAsApp(tryCtx, endPointToTabletInfo(master), command, 0) + _, err = e.wr.TabletManagerClient().ExecuteFetchAsApp(tryCtx, tabletStatsToTabletInfo(master), command, 0) cancel() if err == nil { @@ -152,7 +152,7 @@ func (e *executor) fetchWithRetries(ctx context.Context, command string) error { // checkError returns true if the error can be ignored and the command // succeeded, false if the error is retryable and a non-nil error if the // command must not be retried. -func (e *executor) checkError(err error, isRetry bool, master *discovery.EndPointStats) (bool, error) { +func (e *executor) checkError(err error, isRetry bool, master *discovery.TabletStats) (bool, error) { tabletString := fmt.Sprintf("%v (%v/%v)", topoproto.TabletAliasString(master.Alias()), e.keyspace, e.shard) // If the ExecuteFetch call failed because of an application error, we will try to figure out why. // We need to extract the MySQL error number, and will attempt to retry if we think the error is recoverable. diff --git a/go/vt/worker/split_clone.go b/go/vt/worker/split_clone.go index c3dd168480..d62a97b53b 100644 --- a/go/vt/worker/split_clone.go +++ b/go/vt/worker/split_clone.go @@ -373,14 +373,14 @@ func (scw *SplitCloneWorker) findTargets(ctx context.Context) error { for _, si := range scw.destinationShards { waitCtx, waitCancel := context.WithTimeout(ctx, 10*time.Second) defer waitCancel() - if err := discovery.WaitForEndPoints(waitCtx, scw.healthCheck, + if err := discovery.WaitForTablets(waitCtx, scw.healthCheck, scw.cell, si.Keyspace(), si.ShardName(), []topodatapb.TabletType{topodatapb.TabletType_MASTER}); err != nil { return fmt.Errorf("cannot find MASTER tablet for destination shard for %v/%v: %v", si.Keyspace(), si.ShardName(), err) } masters := discovery.GetCurrentMaster( - scw.healthCheck.GetEndPointStatsFromTarget(si.Keyspace(), si.ShardName(), topodatapb.TabletType_MASTER)) + scw.healthCheck.GetTabletStatsFromTarget(si.Keyspace(), si.ShardName(), topodatapb.TabletType_MASTER)) if len(masters) == 0 { - return fmt.Errorf("cannot find MASTER tablet for destination shard for %v/%v in HealthCheck: empty EndPointStats list", si.Keyspace(), si.ShardName()) + return fmt.Errorf("cannot find MASTER tablet for destination shard for %v/%v in HealthCheck: empty TabletStats list", si.Keyspace(), si.ShardName()) } master := masters[0] diff --git a/go/vt/worker/topo_utils.go b/go/vt/worker/topo_utils.go index 9d5b6a5f7a..59ad55c6c4 100644 --- a/go/vt/worker/topo_utils.go +++ b/go/vt/worker/topo_utils.go @@ -20,20 +20,20 @@ import ( ) var ( - // waitForHealthyEndPointsTimeout intends to wait for the + // waitForHealthyTabletsTimeout intends to wait for the // healthcheck to automatically return rdonly instances which // have been taken out by previous *Clone or *Diff runs. // Therefore, the default for this variable must be higher // than vttablet's -health_check_interval. - waitForHealthyEndPointsTimeout = flag.Duration("wait_for_healthy_rdonly_endpoints_timeout", 60*time.Second, "maximum time to wait if less than --min_healthy_rdonly_endpoints are available") + waitForHealthyTabletsTimeout = flag.Duration("wait_for_healthy_rdonly_endpoints_timeout", 60*time.Second, "maximum time to wait if less than --min_healthy_rdonly_endpoints are available") ) -// FindHealthyRdonlyEndPoint returns a random healthy endpoint. +// FindHealthyRdonlyTablet returns a random healthy endpoint. // Since we don't want to use them all, we require at least -// minHealthyEndPoints servers to be healthy. +// minHealthyTablets servers to be healthy. // May block up to -wait_for_healthy_rdonly_endpoints_timeout. -func FindHealthyRdonlyEndPoint(ctx context.Context, wr *wrangler.Wrangler, cell, keyspace, shard string, minHealthyRdonlyEndPoints int) (*topodatapb.TabletAlias, error) { - busywaitCtx, busywaitCancel := context.WithTimeout(ctx, *waitForHealthyEndPointsTimeout) +func FindHealthyRdonlyTablet(ctx context.Context, wr *wrangler.Wrangler, cell, keyspace, shard string, minHealthyRdonlyTablets int) (*topodatapb.TabletAlias, error) { + busywaitCtx, busywaitCancel := context.WithTimeout(ctx, *waitForHealthyTabletsTimeout) defer busywaitCancel() // create a discovery healthcheck, wait for it to have one rdonly @@ -45,29 +45,29 @@ func FindHealthyRdonlyEndPoint(ctx context.Context, wr *wrangler.Wrangler, cell, deadlineForLog, _ := busywaitCtx.Deadline() wr.Logger().Infof("Waiting for enough healthy rdonly endpoints to become available. required: %v Waiting up to %.1f seconds.", - minHealthyRdonlyEndPoints, deadlineForLog.Sub(time.Now()).Seconds()) - if err := discovery.WaitForEndPoints(busywaitCtx, healthCheck, cell, keyspace, shard, []topodatapb.TabletType{topodatapb.TabletType_RDONLY}); err != nil { + minHealthyRdonlyTablets, deadlineForLog.Sub(time.Now()).Seconds()) + if err := discovery.WaitForTablets(busywaitCtx, healthCheck, cell, keyspace, shard, []topodatapb.TabletType{topodatapb.TabletType_RDONLY}); err != nil { return nil, fmt.Errorf("error waiting for rdonly endpoints for (%v,%v/%v): %v", cell, keyspace, shard, err) } - var healthyEndpoints []*discovery.EndPointStats + var healthyEndpoints []*discovery.TabletStats for { select { case <-busywaitCtx.Done(): return nil, fmt.Errorf("not enough healthy rdonly endpoints to choose from in (%v,%v/%v), have %v healthy ones, need at least %v Context error: %v", - cell, keyspace, shard, len(healthyEndpoints), minHealthyRdonlyEndPoints, busywaitCtx.Err()) + cell, keyspace, shard, len(healthyEndpoints), minHealthyRdonlyTablets, busywaitCtx.Err()) default: } - healthyEndpoints = discovery.RemoveUnhealthyEndpoints( - healthCheck.GetEndPointStatsFromTarget(keyspace, shard, topodatapb.TabletType_RDONLY)) - if len(healthyEndpoints) >= minHealthyRdonlyEndPoints { + healthyEndpoints = discovery.RemoveUnhealthyTablets( + healthCheck.GetTabletStatsFromTarget(keyspace, shard, topodatapb.TabletType_RDONLY)) + if len(healthyEndpoints) >= minHealthyRdonlyTablets { break } deadlineForLog, _ := busywaitCtx.Deadline() wr.Logger().Infof("Waiting for enough healthy rdonly endpoints to become available. available: %v required: %v Waiting up to %.1f more seconds.", - len(healthyEndpoints), minHealthyRdonlyEndPoints, deadlineForLog.Sub(time.Now()).Seconds()) + len(healthyEndpoints), minHealthyRdonlyTablets, deadlineForLog.Sub(time.Now()).Seconds()) // Block for 1 second because 2 seconds is the -health_check_interval flag value in integration tests. timer := time.NewTimer(1 * time.Second) select { @@ -79,18 +79,15 @@ func FindHealthyRdonlyEndPoint(ctx context.Context, wr *wrangler.Wrangler, cell, // random server in the list is what we want index := rand.Intn(len(healthyEndpoints)) - return &topodatapb.TabletAlias{ - Cell: cell, - Uid: healthyEndpoints[index].EndPoint.Uid, - }, nil + return healthyEndpoints[index].Tablet.Alias, nil } // FindWorkerTablet will: // - find a rdonly instance in the keyspace / shard // - mark it as worker // - tag it with our worker process -func FindWorkerTablet(ctx context.Context, wr *wrangler.Wrangler, cleaner *wrangler.Cleaner, cell, keyspace, shard string, minHealthyRdonlyEndPoints int) (*topodatapb.TabletAlias, error) { - tabletAlias, err := FindHealthyRdonlyEndPoint(ctx, wr, cell, keyspace, shard, minHealthyRdonlyEndPoints) +func FindWorkerTablet(ctx context.Context, wr *wrangler.Wrangler, cleaner *wrangler.Cleaner, cell, keyspace, shard string, minHealthyRdonlyTablets int) (*topodatapb.TabletAlias, error) { + tabletAlias, err := FindHealthyRdonlyTablet(ctx, wr, cell, keyspace, shard, minHealthyRdonlyTablets) if err != nil { return nil, err } diff --git a/go/vt/worker/vertical_split_clone.go b/go/vt/worker/vertical_split_clone.go index a311ad6216..4764a0e255 100644 --- a/go/vt/worker/vertical_split_clone.go +++ b/go/vt/worker/vertical_split_clone.go @@ -340,14 +340,14 @@ func (vscw *VerticalSplitCloneWorker) findTargets(ctx context.Context) error { vscw.wr.Logger().Infof("Finding a MASTER tablet for each destination shard...") waitCtx, waitCancel := context.WithTimeout(ctx, 10*time.Second) defer waitCancel() - if err := discovery.WaitForEndPoints(waitCtx, vscw.healthCheck, + if err := discovery.WaitForTablets(waitCtx, vscw.healthCheck, vscw.cell, vscw.destinationKeyspace, vscw.destinationShard, []topodatapb.TabletType{topodatapb.TabletType_MASTER}); err != nil { return fmt.Errorf("cannot find MASTER tablet for destination shard for %v/%v: %v", vscw.destinationKeyspace, vscw.destinationShard, err) } masters := discovery.GetCurrentMaster( - vscw.healthCheck.GetEndPointStatsFromTarget(vscw.destinationKeyspace, vscw.destinationShard, topodatapb.TabletType_MASTER)) + vscw.healthCheck.GetTabletStatsFromTarget(vscw.destinationKeyspace, vscw.destinationShard, topodatapb.TabletType_MASTER)) if len(masters) == 0 { - return fmt.Errorf("cannot find MASTER tablet for destination shard for %v/%v in HealthCheck: empty EndPointStats list", vscw.destinationKeyspace, vscw.destinationShard) + return fmt.Errorf("cannot find MASTER tablet for destination shard for %v/%v in HealthCheck: empty TabletStats list", vscw.destinationKeyspace, vscw.destinationShard) } master := masters[0] diff --git a/go/vt/wrangler/keyspace.go b/go/vt/wrangler/keyspace.go index 2b14063003..7d4ed73079 100644 --- a/go/vt/wrangler/keyspace.go +++ b/go/vt/wrangler/keyspace.go @@ -488,7 +488,7 @@ func (wr *Wrangler) waitForDrainInCell(ctx context.Context, cell, keyspace, shar watcher := discovery.NewShardReplicationWatcher(wr.TopoServer(), hc, cell, keyspace, shard, healthCheckTopologyRefresh, discovery.DefaultTopoReadConcurrency) defer watcher.Stop() - if err := discovery.WaitForEndPoints(ctx, hc, cell, keyspace, shard, []topodatapb.TabletType{servedType}); err != nil { + if err := discovery.WaitForTablets(ctx, hc, cell, keyspace, shard, []topodatapb.TabletType{servedType}); err != nil { return fmt.Errorf("%v: error waiting for initial %v endpoints for %v/%v: %v", cell, servedType, keyspace, shard, err) } @@ -504,16 +504,16 @@ func (wr *Wrangler) waitForDrainInCell(ctx context.Context, cell, keyspace, shar startTime := time.Now() for { // map key: tablet uid - drainedHealthyTablets := make(map[uint32]*discovery.EndPointStats) - notDrainedHealtyTablets := make(map[uint32]*discovery.EndPointStats) + drainedHealthyTablets := make(map[uint32]*discovery.TabletStats) + notDrainedHealtyTablets := make(map[uint32]*discovery.TabletStats) - healthyTablets := discovery.RemoveUnhealthyEndpoints( - hc.GetEndPointStatsFromTarget(keyspace, shard, servedType)) + healthyTablets := discovery.RemoveUnhealthyTablets( + hc.GetTabletStatsFromTarget(keyspace, shard, servedType)) for _, eps := range healthyTablets { if eps.Stats.Qps == 0.0 { - drainedHealthyTablets[eps.EndPoint.Uid] = eps + drainedHealthyTablets[eps.Tablet.Alias.Uid] = eps } else { - notDrainedHealtyTablets[eps.EndPoint.Uid] = eps + notDrainedHealtyTablets[eps.Tablet.Alias.Uid] = eps } } @@ -549,10 +549,10 @@ func (wr *Wrangler) waitForDrainInCell(ctx context.Context, cell, keyspace, shar return nil } -func formatEndpointStats(eps *discovery.EndPointStats) string { +func formatEndpointStats(eps *discovery.TabletStats) string { webURL := "unknown http port" - if webPort, ok := eps.EndPoint.PortMap["vt"]; ok { - webURL = fmt.Sprintf("http://%v:%d/", eps.EndPoint.Host, webPort) + if webPort, ok := eps.Tablet.PortMap["vt"]; ok { + webURL = fmt.Sprintf("http://%v:%d/", eps.Tablet.Hostname, webPort) } return fmt.Sprintf("%v: %v stats: %v", topoproto.TabletAliasString(eps.Alias()), webURL, eps.Stats) } From 4a535f7a2f22dfc6c7b1b1baed883c221f942740 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Mon, 16 May 2016 09:52:04 -0700 Subject: [PATCH 09/27] TabletManager client now uses topodatapb.Tablet. Instead of TabletInfo. To be consistent with the other RPCs, and because it doesn't use the version anyway. --- go/cmd/vtcombo/tablet_map.go | 120 ++--- go/vt/schemamanager/schemamanager_test.go | 11 +- go/vt/schemamanager/tablet_executor.go | 29 +- .../agentrpctest/test_agent_rpc.go | 474 +++++++++--------- .../tabletmanager/faketmclient/fake_client.go | 79 ++- go/vt/tabletmanager/grpctmclient/client.go | 84 ++-- .../tabletmanager/grpctmserver/server_test.go | 7 +- go/vt/tabletmanager/rpc_external_reparent.go | 2 +- .../tabletmanager/tmclient/rpc_client_api.go | 79 ++- go/vt/topo/tablet.go | 25 - go/vt/vtctl/reparent.go | 2 +- go/vt/vtctl/vtctl.go | 24 +- go/vt/vtctld/vtctld.go | 4 +- go/vt/worker/clone_utils.go | 12 +- go/vt/worker/executor.go | 2 +- go/vt/worker/split_clone.go | 8 +- go/vt/worker/split_clone_test.go | 2 +- go/vt/worker/split_diff.go | 16 +- go/vt/worker/vertical_split_clone.go | 8 +- go/vt/worker/vertical_split_diff.go | 16 +- go/vt/wrangler/cleaner.go | 20 +- go/vt/wrangler/hook.go | 31 +- go/vt/wrangler/keyspace.go | 16 +- go/vt/wrangler/permissions.go | 4 +- go/vt/wrangler/reparent.go | 32 +- go/vt/wrangler/schema.go | 8 +- go/vt/wrangler/tablet.go | 6 +- go/vt/wrangler/testlib/fake_tablet.go | 2 +- .../testlib/reparent_external_test.go | 10 +- go/vt/wrangler/validator.go | 8 +- 30 files changed, 541 insertions(+), 600 deletions(-) diff --git a/go/cmd/vtcombo/tablet_map.go b/go/cmd/vtcombo/tablet_map.go index 8d32a5b7d4..7a7b5b1d9b 100644 --- a/go/cmd/vtcombo/tablet_map.go +++ b/go/cmd/vtcombo/tablet_map.go @@ -195,7 +195,7 @@ func initTabletMap(ts topo.Server, topology string, mysqld mysqlctl.MysqlDaemon, if err != nil { log.Fatalf("cannot find tablet: %+v", tablet.agent.TabletAlias) } - tmc.RunHealthCheck(ctx, tabletInfo) + tmc.RunHealthCheck(ctx, tabletInfo.Tablet) } } @@ -482,10 +482,10 @@ func (itc *internalTabletConn) StreamHealth(ctx context.Context) (tabletconn.Str // internalTabletManagerClient implements tmclient.TabletManagerClient type internalTabletManagerClient struct{} -func (itmc *internalTabletManagerClient) Ping(ctx context.Context, tablet *topo.TabletInfo) error { - t, ok := tabletMap[tablet.Tablet.Alias.Uid] +func (itmc *internalTabletManagerClient) Ping(ctx context.Context, tablet *topodatapb.Tablet) error { + t, ok := tabletMap[tablet.Alias.Uid] if !ok { - return fmt.Errorf("tmclient: cannot find tablet %v", tablet.Tablet.Alias.Uid) + return fmt.Errorf("tmclient: cannot find tablet %v", tablet.Alias.Uid) } return t.agent.RPCWrap(ctx, actionnode.TabletActionPing, nil, nil, func() error { t.agent.Ping(ctx, "payload") @@ -493,10 +493,10 @@ func (itmc *internalTabletManagerClient) Ping(ctx context.Context, tablet *topo. }) } -func (itmc *internalTabletManagerClient) GetSchema(ctx context.Context, tablet *topo.TabletInfo, tables, excludeTables []string, includeViews bool) (*tabletmanagerdatapb.SchemaDefinition, error) { - t, ok := tabletMap[tablet.Tablet.Alias.Uid] +func (itmc *internalTabletManagerClient) GetSchema(ctx context.Context, tablet *topodatapb.Tablet, tables, excludeTables []string, includeViews bool) (*tabletmanagerdatapb.SchemaDefinition, error) { + t, ok := tabletMap[tablet.Alias.Uid] if !ok { - return nil, fmt.Errorf("tmclient: cannot find tablet %v", tablet.Tablet.Alias.Uid) + return nil, fmt.Errorf("tmclient: cannot find tablet %v", tablet.Alias.Uid) } var result *tabletmanagerdatapb.SchemaDefinition if err := t.agent.RPCWrap(ctx, actionnode.TabletActionGetSchema, nil, nil, func() error { @@ -511,10 +511,10 @@ func (itmc *internalTabletManagerClient) GetSchema(ctx context.Context, tablet * return result, nil } -func (itmc *internalTabletManagerClient) GetPermissions(ctx context.Context, tablet *topo.TabletInfo) (*tabletmanagerdatapb.Permissions, error) { - t, ok := tabletMap[tablet.Tablet.Alias.Uid] +func (itmc *internalTabletManagerClient) GetPermissions(ctx context.Context, tablet *topodatapb.Tablet) (*tabletmanagerdatapb.Permissions, error) { + t, ok := tabletMap[tablet.Alias.Uid] if !ok { - return nil, fmt.Errorf("tmclient: cannot find tablet %v", tablet.Tablet.Alias.Uid) + return nil, fmt.Errorf("tmclient: cannot find tablet %v", tablet.Alias.Uid) } var result *tabletmanagerdatapb.Permissions if err := t.agent.RPCWrap(ctx, actionnode.TabletActionGetPermissions, nil, nil, func() error { @@ -529,22 +529,22 @@ func (itmc *internalTabletManagerClient) GetPermissions(ctx context.Context, tab return result, nil } -func (itmc *internalTabletManagerClient) SetReadOnly(ctx context.Context, tablet *topo.TabletInfo) error { +func (itmc *internalTabletManagerClient) SetReadOnly(ctx context.Context, tablet *topodatapb.Tablet) error { return fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) SetReadWrite(ctx context.Context, tablet *topo.TabletInfo) error { +func (itmc *internalTabletManagerClient) SetReadWrite(ctx context.Context, tablet *topodatapb.Tablet) error { return fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) ChangeType(ctx context.Context, tablet *topo.TabletInfo, dbType topodatapb.TabletType) error { +func (itmc *internalTabletManagerClient) ChangeType(ctx context.Context, tablet *topodatapb.Tablet, dbType topodatapb.TabletType) error { return fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) Sleep(ctx context.Context, tablet *topo.TabletInfo, duration time.Duration) error { - t, ok := tabletMap[tablet.Tablet.Alias.Uid] +func (itmc *internalTabletManagerClient) Sleep(ctx context.Context, tablet *topodatapb.Tablet, duration time.Duration) error { + t, ok := tabletMap[tablet.Alias.Uid] if !ok { - return fmt.Errorf("tmclient: cannot find tablet %v", tablet.Tablet.Alias.Uid) + return fmt.Errorf("tmclient: cannot find tablet %v", tablet.Alias.Uid) } return t.agent.RPCWrapLockAction(ctx, actionnode.TabletActionSleep, nil, nil, true, func() error { t.agent.Sleep(ctx, duration) @@ -552,14 +552,14 @@ func (itmc *internalTabletManagerClient) Sleep(ctx context.Context, tablet *topo }) } -func (itmc *internalTabletManagerClient) ExecuteHook(ctx context.Context, tablet *topo.TabletInfo, hk *hook.Hook) (*hook.HookResult, error) { +func (itmc *internalTabletManagerClient) ExecuteHook(ctx context.Context, tablet *topodatapb.Tablet, hk *hook.Hook) (*hook.HookResult, error) { return nil, fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) RefreshState(ctx context.Context, tablet *topo.TabletInfo) error { - t, ok := tabletMap[tablet.Tablet.Alias.Uid] +func (itmc *internalTabletManagerClient) RefreshState(ctx context.Context, tablet *topodatapb.Tablet) error { + t, ok := tabletMap[tablet.Alias.Uid] if !ok { - return fmt.Errorf("tmclient: cannot find tablet %v", tablet.Tablet.Alias.Uid) + return fmt.Errorf("tmclient: cannot find tablet %v", tablet.Alias.Uid) } return t.agent.RPCWrapLockAction(ctx, actionnode.TabletActionRefreshState, nil, nil, true, func() error { t.agent.RefreshState(ctx) @@ -567,10 +567,10 @@ func (itmc *internalTabletManagerClient) RefreshState(ctx context.Context, table }) } -func (itmc *internalTabletManagerClient) RunHealthCheck(ctx context.Context, tablet *topo.TabletInfo) error { - t, ok := tabletMap[tablet.Tablet.Alias.Uid] +func (itmc *internalTabletManagerClient) RunHealthCheck(ctx context.Context, tablet *topodatapb.Tablet) error { + t, ok := tabletMap[tablet.Alias.Uid] if !ok { - return fmt.Errorf("tmclient: cannot find tablet %v", tablet.Tablet.Alias.Uid) + return fmt.Errorf("tmclient: cannot find tablet %v", tablet.Alias.Uid) } return t.agent.RPCWrap(ctx, actionnode.TabletActionRunHealthCheck, nil, nil, func() error { t.agent.RunHealthCheck(ctx) @@ -578,10 +578,10 @@ func (itmc *internalTabletManagerClient) RunHealthCheck(ctx context.Context, tab }) } -func (itmc *internalTabletManagerClient) IgnoreHealthError(ctx context.Context, tablet *topo.TabletInfo, pattern string) error { - t, ok := tabletMap[tablet.Tablet.Alias.Uid] +func (itmc *internalTabletManagerClient) IgnoreHealthError(ctx context.Context, tablet *topodatapb.Tablet, pattern string) error { + t, ok := tabletMap[tablet.Alias.Uid] if !ok { - return fmt.Errorf("tmclient: cannot find tablet %v", tablet.Tablet.Alias.Uid) + return fmt.Errorf("tmclient: cannot find tablet %v", tablet.Alias.Uid) } return t.agent.RPCWrap(ctx, actionnode.TabletActionIgnoreHealthError, nil, nil, func() error { t.agent.IgnoreHealthError(ctx, pattern) @@ -589,10 +589,10 @@ func (itmc *internalTabletManagerClient) IgnoreHealthError(ctx context.Context, }) } -func (itmc *internalTabletManagerClient) ReloadSchema(ctx context.Context, tablet *topo.TabletInfo) error { - t, ok := tabletMap[tablet.Tablet.Alias.Uid] +func (itmc *internalTabletManagerClient) ReloadSchema(ctx context.Context, tablet *topodatapb.Tablet) error { + t, ok := tabletMap[tablet.Alias.Uid] if !ok { - return fmt.Errorf("tmclient: cannot find tablet %v", tablet.Tablet.Alias.Uid) + return fmt.Errorf("tmclient: cannot find tablet %v", tablet.Alias.Uid) } return t.agent.RPCWrapLockAction(ctx, actionnode.TabletActionReloadSchema, nil, nil, true, func() error { t.agent.ReloadSchema(ctx) @@ -600,10 +600,10 @@ func (itmc *internalTabletManagerClient) ReloadSchema(ctx context.Context, table }) } -func (itmc *internalTabletManagerClient) PreflightSchema(ctx context.Context, tablet *topo.TabletInfo, change string) (*tmutils.SchemaChangeResult, error) { - t, ok := tabletMap[tablet.Tablet.Alias.Uid] +func (itmc *internalTabletManagerClient) PreflightSchema(ctx context.Context, tablet *topodatapb.Tablet, change string) (*tmutils.SchemaChangeResult, error) { + t, ok := tabletMap[tablet.Alias.Uid] if !ok { - return nil, fmt.Errorf("tmclient: cannot find tablet %v", tablet.Tablet.Alias.Uid) + return nil, fmt.Errorf("tmclient: cannot find tablet %v", tablet.Alias.Uid) } var result *tmutils.SchemaChangeResult if err := t.agent.RPCWrapLockAction(ctx, actionnode.TabletActionPreflightSchema, nil, nil, true, func() error { @@ -618,10 +618,10 @@ func (itmc *internalTabletManagerClient) PreflightSchema(ctx context.Context, ta return result, nil } -func (itmc *internalTabletManagerClient) ApplySchema(ctx context.Context, tablet *topo.TabletInfo, change *tmutils.SchemaChange) (*tmutils.SchemaChangeResult, error) { - t, ok := tabletMap[tablet.Tablet.Alias.Uid] +func (itmc *internalTabletManagerClient) ApplySchema(ctx context.Context, tablet *topodatapb.Tablet, change *tmutils.SchemaChange) (*tmutils.SchemaChangeResult, error) { + t, ok := tabletMap[tablet.Alias.Uid] if !ok { - return nil, fmt.Errorf("tmclient: cannot find tablet %v", tablet.Tablet.Alias.Uid) + return nil, fmt.Errorf("tmclient: cannot find tablet %v", tablet.Alias.Uid) } var result *tmutils.SchemaChangeResult if err := t.agent.RPCWrapLockAction(ctx, actionnode.TabletActionApplySchema, nil, nil, true, func() error { @@ -636,103 +636,103 @@ func (itmc *internalTabletManagerClient) ApplySchema(ctx context.Context, tablet return result, nil } -func (itmc *internalTabletManagerClient) ExecuteFetchAsDba(ctx context.Context, tablet *topo.TabletInfo, query string, maxRows int, disableBinlogs, reloadSchema bool) (*querypb.QueryResult, error) { +func (itmc *internalTabletManagerClient) ExecuteFetchAsDba(ctx context.Context, tablet *topodatapb.Tablet, query string, maxRows int, disableBinlogs, reloadSchema bool) (*querypb.QueryResult, error) { return nil, fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) ExecuteFetchAsApp(ctx context.Context, tablet *topo.TabletInfo, query string, maxRows int) (*querypb.QueryResult, error) { +func (itmc *internalTabletManagerClient) ExecuteFetchAsApp(ctx context.Context, tablet *topodatapb.Tablet, query string, maxRows int) (*querypb.QueryResult, error) { return nil, fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) SlaveStatus(ctx context.Context, tablet *topo.TabletInfo) (*replicationdatapb.Status, error) { +func (itmc *internalTabletManagerClient) SlaveStatus(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.Status, error) { return nil, fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) MasterPosition(ctx context.Context, tablet *topo.TabletInfo) (string, error) { +func (itmc *internalTabletManagerClient) MasterPosition(ctx context.Context, tablet *topodatapb.Tablet) (string, error) { return "", fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) StopSlave(ctx context.Context, tablet *topo.TabletInfo) error { +func (itmc *internalTabletManagerClient) StopSlave(ctx context.Context, tablet *topodatapb.Tablet) error { return fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) StopSlaveMinimum(ctx context.Context, tablet *topo.TabletInfo, stopPos string, waitTime time.Duration) (string, error) { +func (itmc *internalTabletManagerClient) StopSlaveMinimum(ctx context.Context, tablet *topodatapb.Tablet, stopPos string, waitTime time.Duration) (string, error) { return "", fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) StartSlave(ctx context.Context, tablet *topo.TabletInfo) error { +func (itmc *internalTabletManagerClient) StartSlave(ctx context.Context, tablet *topodatapb.Tablet) error { return fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) TabletExternallyReparented(ctx context.Context, tablet *topo.TabletInfo, externalID string) error { +func (itmc *internalTabletManagerClient) TabletExternallyReparented(ctx context.Context, tablet *topodatapb.Tablet, externalID string) error { return fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) GetSlaves(ctx context.Context, tablet *topo.TabletInfo) ([]string, error) { +func (itmc *internalTabletManagerClient) GetSlaves(ctx context.Context, tablet *topodatapb.Tablet) ([]string, error) { return nil, fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) WaitBlpPosition(ctx context.Context, tablet *topo.TabletInfo, blpPosition *tabletmanagerdatapb.BlpPosition, waitTime time.Duration) error { +func (itmc *internalTabletManagerClient) WaitBlpPosition(ctx context.Context, tablet *topodatapb.Tablet, blpPosition *tabletmanagerdatapb.BlpPosition, waitTime time.Duration) error { return fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) StopBlp(ctx context.Context, tablet *topo.TabletInfo) ([]*tabletmanagerdatapb.BlpPosition, error) { +func (itmc *internalTabletManagerClient) StopBlp(ctx context.Context, tablet *topodatapb.Tablet) ([]*tabletmanagerdatapb.BlpPosition, error) { return nil, fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) StartBlp(ctx context.Context, tablet *topo.TabletInfo) error { +func (itmc *internalTabletManagerClient) StartBlp(ctx context.Context, tablet *topodatapb.Tablet) error { return fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) RunBlpUntil(ctx context.Context, tablet *topo.TabletInfo, positions []*tabletmanagerdatapb.BlpPosition, waitTime time.Duration) (string, error) { +func (itmc *internalTabletManagerClient) RunBlpUntil(ctx context.Context, tablet *topodatapb.Tablet, positions []*tabletmanagerdatapb.BlpPosition, waitTime time.Duration) (string, error) { return "", fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) ResetReplication(ctx context.Context, tablet *topo.TabletInfo) error { +func (itmc *internalTabletManagerClient) ResetReplication(ctx context.Context, tablet *topodatapb.Tablet) error { return fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) InitMaster(ctx context.Context, tablet *topo.TabletInfo) (string, error) { +func (itmc *internalTabletManagerClient) InitMaster(ctx context.Context, tablet *topodatapb.Tablet) (string, error) { return "", fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) PopulateReparentJournal(ctx context.Context, tablet *topo.TabletInfo, timeCreatedNS int64, actionName string, masterAlias *topodatapb.TabletAlias, pos string) error { +func (itmc *internalTabletManagerClient) PopulateReparentJournal(ctx context.Context, tablet *topodatapb.Tablet, timeCreatedNS int64, actionName string, masterAlias *topodatapb.TabletAlias, pos string) error { return fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) InitSlave(ctx context.Context, tablet *topo.TabletInfo, parent *topodatapb.TabletAlias, replicationPosition string, timeCreatedNS int64) error { +func (itmc *internalTabletManagerClient) InitSlave(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, replicationPosition string, timeCreatedNS int64) error { return fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) DemoteMaster(ctx context.Context, tablet *topo.TabletInfo) (string, error) { +func (itmc *internalTabletManagerClient) DemoteMaster(ctx context.Context, tablet *topodatapb.Tablet) (string, error) { return "", fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) PromoteSlaveWhenCaughtUp(ctx context.Context, tablet *topo.TabletInfo, pos string) (string, error) { +func (itmc *internalTabletManagerClient) PromoteSlaveWhenCaughtUp(ctx context.Context, tablet *topodatapb.Tablet, pos string) (string, error) { return "", fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) SlaveWasPromoted(ctx context.Context, tablet *topo.TabletInfo) error { +func (itmc *internalTabletManagerClient) SlaveWasPromoted(ctx context.Context, tablet *topodatapb.Tablet) error { return fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) SetMaster(ctx context.Context, tablet *topo.TabletInfo, parent *topodatapb.TabletAlias, timeCreatedNS int64, forceStartSlave bool) error { +func (itmc *internalTabletManagerClient) SetMaster(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, timeCreatedNS int64, forceStartSlave bool) error { return fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) SlaveWasRestarted(ctx context.Context, tablet *topo.TabletInfo, args *actionnode.SlaveWasRestartedArgs) error { +func (itmc *internalTabletManagerClient) SlaveWasRestarted(ctx context.Context, tablet *topodatapb.Tablet, args *actionnode.SlaveWasRestartedArgs) error { return fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) StopReplicationAndGetStatus(ctx context.Context, tablet *topo.TabletInfo) (*replicationdatapb.Status, error) { +func (itmc *internalTabletManagerClient) StopReplicationAndGetStatus(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.Status, error) { return nil, fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) PromoteSlave(ctx context.Context, tablet *topo.TabletInfo) (string, error) { +func (itmc *internalTabletManagerClient) PromoteSlave(ctx context.Context, tablet *topodatapb.Tablet) (string, error) { return "", fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) Backup(ctx context.Context, tablet *topo.TabletInfo, concurrency int) (logutil.EventStream, error) { +func (itmc *internalTabletManagerClient) Backup(ctx context.Context, tablet *topodatapb.Tablet, concurrency int) (logutil.EventStream, error) { return nil, fmt.Errorf("not implemented in vtcombo") } diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index 6e90271a30..34b32de409 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -14,6 +14,7 @@ import ( "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/test/faketopo" + "github.com/youtube/vitess/go/vt/topo/topoproto" "golang.org/x/net/context" querypb "github.com/youtube/vitess/go/vt/proto/query" @@ -243,7 +244,7 @@ func (client *fakeTabletManagerClient) AddSchemaDefinition( client.schemaDefinitions[dbName] = schemaDefinition } -func (client *fakeTabletManagerClient) PreflightSchema(ctx context.Context, tablet *topo.TabletInfo, change string) (*tmutils.SchemaChangeResult, error) { +func (client *fakeTabletManagerClient) PreflightSchema(ctx context.Context, tablet *topodatapb.Tablet, change string) (*tmutils.SchemaChangeResult, error) { result, ok := client.preflightSchemas[change] if !ok { var scr tmutils.SchemaChangeResult @@ -252,15 +253,15 @@ func (client *fakeTabletManagerClient) PreflightSchema(ctx context.Context, tabl return result, nil } -func (client *fakeTabletManagerClient) GetSchema(ctx context.Context, tablet *topo.TabletInfo, tables, excludeTables []string, includeViews bool) (*tabletmanagerdatapb.SchemaDefinition, error) { - result, ok := client.schemaDefinitions[tablet.DbName()] +func (client *fakeTabletManagerClient) GetSchema(ctx context.Context, tablet *topodatapb.Tablet, tables, excludeTables []string, includeViews bool) (*tabletmanagerdatapb.SchemaDefinition, error) { + result, ok := client.schemaDefinitions[topoproto.TabletDbName(tablet)] if !ok { - return nil, fmt.Errorf("unknown database: %s", tablet.DbName()) + return nil, fmt.Errorf("unknown database: %s", topoproto.TabletDbName(tablet)) } return result, nil } -func (client *fakeTabletManagerClient) ExecuteFetchAsDba(ctx context.Context, tablet *topo.TabletInfo, query string, maxRows int, disableBinlogs, reloadSchema bool) (*querypb.QueryResult, error) { +func (client *fakeTabletManagerClient) ExecuteFetchAsDba(ctx context.Context, tablet *topodatapb.Tablet, query string, maxRows int, disableBinlogs, reloadSchema bool) (*querypb.QueryResult, error) { if client.EnableExecuteFetchAsDbaError { return nil, fmt.Errorf("ExecuteFetchAsDba occur an unknown error") } diff --git a/go/vt/schemamanager/tablet_executor.go b/go/vt/schemamanager/tablet_executor.go index a527a8afff..6a298f1491 100644 --- a/go/vt/schemamanager/tablet_executor.go +++ b/go/vt/schemamanager/tablet_executor.go @@ -10,18 +10,21 @@ import ( "time" log "github.com/golang/glog" + "golang.org/x/net/context" + "github.com/youtube/vitess/go/vt/mysqlctl/tmutils" "github.com/youtube/vitess/go/vt/sqlparser" "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" - "golang.org/x/net/context" + + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) // TabletExecutor applies schema changes to all tablets. type TabletExecutor struct { tmClient tmclient.TabletManagerClient topoServer topo.Server - tabletInfos []*topo.TabletInfo + tablets []*topodatapb.Tablet schemaDiffs []*tmutils.SchemaChangeResult isClosed bool allowBigSchemaChange bool @@ -61,7 +64,7 @@ func (exec *TabletExecutor) Open(ctx context.Context, keyspace string) error { return fmt.Errorf("unable to get shard names for keyspace: %s, error: %v", keyspace, err) } log.Infof("Keyspace: %v, Shards: %v\n", keyspace, shardNames) - exec.tabletInfos = make([]*topo.TabletInfo, len(shardNames)) + exec.tablets = make([]*topodatapb.Tablet, len(shardNames)) for i, shardName := range shardNames { shardInfo, err := exec.topoServer.GetShard(ctx, keyspace, shardName) log.Infof("\tShard: %s, ShardInfo: %v\n", shardName, shardInfo) @@ -76,11 +79,11 @@ func (exec *TabletExecutor) Open(ctx context.Context, keyspace string) error { if err != nil { return fmt.Errorf("unable to get master tablet info, keyspace: %s, shard: %s, error: %v", keyspace, shardName, err) } - exec.tabletInfos[i] = tabletInfo + exec.tablets[i] = tabletInfo.Tablet log.Infof("\t\tTabletInfo: %+v\n", tabletInfo) } - if len(exec.tabletInfos) == 0 { + if len(exec.tablets) == 0 { return fmt.Errorf("keyspace: %s does not contain any master tablets", keyspace) } exec.isClosed = false @@ -117,9 +120,9 @@ func (exec *TabletExecutor) Validate(ctx context.Context, sqls []string) error { // 1. Alter more than 100,000 rows. // 2. Change a table with more than 2,000,000 rows (Drops are fine). func (exec *TabletExecutor) detectBigSchemaChanges(ctx context.Context, parsedDDLs []*sqlparser.DDL) (bool, error) { - // exec.tabletInfos is guaranteed to have at least one element; + // exec.tablets is guaranteed to have at least one element; // Otherwise, Open should fail and executor should fail. - masterTabletInfo := exec.tabletInfos[0] + masterTabletInfo := exec.tablets[0] // get database schema, excluding views. dbSchema, err := exec.tmClient.GetSchema( ctx, masterTabletInfo, []string{}, []string{}, false) @@ -153,7 +156,7 @@ func (exec *TabletExecutor) preflightSchemaChanges(ctx context.Context, sqls []s exec.schemaDiffs = make([]*tmutils.SchemaChangeResult, len(sqls)) for i := range sqls { schemaDiff, err := exec.tmClient.PreflightSchema( - ctx, exec.tabletInfos[0], sqls[i]) + ctx, exec.tablets[0], sqls[i]) if err != nil { return err } @@ -199,12 +202,12 @@ func (exec *TabletExecutor) Execute(ctx context.Context, sqls []string) *Execute func (exec *TabletExecutor) executeOnAllTablets(ctx context.Context, execResult *ExecuteResult, sql string) { var wg sync.WaitGroup - numOfMasterTablets := len(exec.tabletInfos) + numOfMasterTablets := len(exec.tablets) wg.Add(numOfMasterTablets) errChan := make(chan ShardWithError, numOfMasterTablets) successChan := make(chan ShardResult, numOfMasterTablets) - for i := range exec.tabletInfos { - go exec.executeOneTablet(ctx, &wg, exec.tabletInfos[i], sql, errChan, successChan) + for i := range exec.tablets { + go exec.executeOneTablet(ctx, &wg, exec.tablets[i], sql, errChan, successChan) } wg.Wait() close(errChan) @@ -222,7 +225,7 @@ func (exec *TabletExecutor) executeOnAllTablets(ctx context.Context, execResult func (exec *TabletExecutor) executeOneTablet( ctx context.Context, wg *sync.WaitGroup, - tabletInfo *topo.TabletInfo, + tabletInfo *topodatapb.Tablet, sql string, errChan chan ShardWithError, successChan chan ShardResult) { @@ -238,7 +241,7 @@ func (exec *TabletExecutor) executeOneTablet( // Close clears tablet executor states func (exec *TabletExecutor) Close() { if !exec.isClosed { - exec.tabletInfos = nil + exec.tablets = nil exec.isClosed = true } } diff --git a/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go b/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go index db6557da9b..e498f5a013 100644 --- a/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go +++ b/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go @@ -23,7 +23,6 @@ import ( "github.com/youtube/vitess/go/vt/tabletmanager" "github.com/youtube/vitess/go/vt/tabletmanager/actionnode" "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" - "github.com/youtube/vitess/go/vt/topo" querypb "github.com/youtube/vitess/go/vt/proto/query" replicationdatapb "github.com/youtube/vitess/go/vt/proto/replicationdata" @@ -149,26 +148,26 @@ func (fra *fakeRPCAgent) Ping(ctx context.Context, args string) string { return args } -func agentRPCTestPing(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.Ping(ctx, ti) +func agentRPCTestPing(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.Ping(ctx, tablet) if err != nil { t.Errorf("Ping failed: %v", err) } } -func agentRPCTestPingPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.Ping(ctx, ti) +func agentRPCTestPingPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.Ping(ctx, tablet) expectRPCWrapPanic(t, err) } // agentRPCTestIsTimeoutErrorDialExpiredContext verifies that // client.IsTimeoutError() returns true for RPCs failed due to an expired // context before .Dial(). -func agentRPCTestIsTimeoutErrorDialExpiredContext(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { +func agentRPCTestIsTimeoutErrorDialExpiredContext(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { // Using a timeout of 0 here such that .Dial() will fail immediately. expiredCtx, cancel := context.WithTimeout(ctx, 0) defer cancel() - err := client.Ping(expiredCtx, ti) + err := client.Ping(expiredCtx, tablet) if err == nil { t.Fatal("agentRPCTestIsTimeoutErrorDialExpiredContext: RPC with expired context did not fail") } @@ -179,16 +178,15 @@ func agentRPCTestIsTimeoutErrorDialExpiredContext(ctx context.Context, t *testin // agentRPCTestIsTimeoutErrorDialTimeout verifies that client.IsTimeoutError() // returns true for RPCs failed due to a connect timeout during .Dial(). -func agentRPCTestIsTimeoutErrorDialTimeout(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { +func agentRPCTestIsTimeoutErrorDialTimeout(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { // Connect to a non-existing tablet. // For example, this provokes gRPC to return error grpc.ErrClientConnTimeout. - invalidTi := topo.NewTabletInfo(ti.Tablet, ti.Version()) - invalidTi.Tablet = proto.Clone(invalidTi.Tablet).(*topodatapb.Tablet) - invalidTi.Tablet.Hostname = "Non-Existent.Server" + invalidTablet := proto.Clone(tablet).(*topodatapb.Tablet) + invalidTablet.Hostname = "Non-Existent.Server" shortCtx, cancel := context.WithTimeout(ctx, time.Millisecond) defer cancel() - err := client.Ping(shortCtx, invalidTi) + err := client.Ping(shortCtx, invalidTablet) if err == nil { t.Fatal("agentRPCTestIsTimeoutErrorDialTimeout: connect to non-existant tablet did not fail") } @@ -199,7 +197,7 @@ func agentRPCTestIsTimeoutErrorDialTimeout(ctx context.Context, t *testing.T, cl // agentRPCTestIsTimeoutErrorRPC verifies that client.IsTimeoutError() returns // true for RPCs failed due to an expired context during RPC execution. -func agentRPCTestIsTimeoutErrorRPC(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo, fakeAgent *fakeRPCAgent) { +func agentRPCTestIsTimeoutErrorRPC(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet, fakeAgent *fakeRPCAgent) { // We must use a timeout > 0 such that the context deadline hasn't expired // yet in grpctmclient.Client.dial(). // NOTE: This might still race e.g. when test execution takes too long the @@ -209,7 +207,7 @@ func agentRPCTestIsTimeoutErrorRPC(ctx context.Context, t *testing.T, client tmc defer cancel() fakeAgent.setSlow(true) defer func() { fakeAgent.setSlow(false) }() - err := client.Ping(shortCtx, ti) + err := client.Ping(shortCtx, tablet) if err == nil { t.Fatal("agentRPCTestIsTimeoutErrorRPC: RPC with expired context did not fail") } @@ -255,13 +253,13 @@ func (fra *fakeRPCAgent) GetSchema(ctx context.Context, tables, excludeTables [] return testGetSchemaReply, nil } -func agentRPCTestGetSchema(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - result, err := client.GetSchema(ctx, ti, testGetSchemaTables, testGetSchemaExcludeTables, true) +func agentRPCTestGetSchema(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + result, err := client.GetSchema(ctx, tablet, testGetSchemaTables, testGetSchemaExcludeTables, true) compareError(t, "GetSchema", err, result, testGetSchemaReply) } -func agentRPCTestGetSchemaPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - _, err := client.GetSchema(ctx, ti, testGetSchemaTables, testGetSchemaExcludeTables, true) +func agentRPCTestGetSchemaPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + _, err := client.GetSchema(ctx, tablet, testGetSchemaTables, testGetSchemaExcludeTables, true) expectRPCWrapPanic(t, err) } @@ -297,13 +295,13 @@ func (fra *fakeRPCAgent) GetPermissions(ctx context.Context) (*tabletmanagerdata return testGetPermissionsReply, nil } -func agentRPCTestGetPermissions(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - result, err := client.GetPermissions(ctx, ti) +func agentRPCTestGetPermissions(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + result, err := client.GetPermissions(ctx, tablet) compareError(t, "GetPermissions", err, result, testGetPermissionsReply) } -func agentRPCTestGetPermissionsPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - _, err := client.GetPermissions(ctx, ti) +func agentRPCTestGetPermissionsPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + _, err := client.GetPermissions(ctx, tablet) expectRPCWrapPanic(t, err) } @@ -323,21 +321,21 @@ func (fra *fakeRPCAgent) SetReadOnly(ctx context.Context, rdonly bool) error { return nil } -func agentRPCTestSetReadOnly(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { +func agentRPCTestSetReadOnly(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { testSetReadOnlyExpectedValue = true - err := client.SetReadOnly(ctx, ti) + err := client.SetReadOnly(ctx, tablet) if err != nil { t.Errorf("SetReadOnly failed: %v", err) } testSetReadOnlyExpectedValue = false - err = client.SetReadWrite(ctx, ti) + err = client.SetReadWrite(ctx, tablet) if err != nil { t.Errorf("SetReadWrite failed: %v", err) } } -func agentRPCTestSetReadOnlyPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.SetReadOnly(ctx, ti) +func agentRPCTestSetReadOnlyPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.SetReadOnly(ctx, tablet) expectRPCWrapLockActionPanic(t, err) } @@ -351,15 +349,15 @@ func (fra *fakeRPCAgent) ChangeType(ctx context.Context, tabletType topodatapb.T return nil } -func agentRPCTestChangeType(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.ChangeType(ctx, ti, testChangeTypeValue) +func agentRPCTestChangeType(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.ChangeType(ctx, tablet, testChangeTypeValue) if err != nil { t.Errorf("ChangeType failed: %v", err) } } -func agentRPCTestChangeTypePanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.ChangeType(ctx, ti, testChangeTypeValue) +func agentRPCTestChangeTypePanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.ChangeType(ctx, tablet, testChangeTypeValue) expectRPCWrapLockActionPanic(t, err) } @@ -372,15 +370,15 @@ func (fra *fakeRPCAgent) Sleep(ctx context.Context, duration time.Duration) { compare(fra.t, "Sleep duration", duration, testSleepDuration) } -func agentRPCTestSleep(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.Sleep(ctx, ti, testSleepDuration) +func agentRPCTestSleep(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.Sleep(ctx, tablet, testSleepDuration) if err != nil { t.Errorf("Sleep failed: %v", err) } } -func agentRPCTestSleepPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.Sleep(ctx, ti, testSleepDuration) +func agentRPCTestSleepPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.Sleep(ctx, tablet, testSleepDuration) expectRPCWrapLockActionPanic(t, err) } @@ -406,13 +404,13 @@ func (fra *fakeRPCAgent) ExecuteHook(ctx context.Context, hk *hook.Hook) *hook.H return testExecuteHookHookResult } -func agentRPCTestExecuteHook(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - hr, err := client.ExecuteHook(ctx, ti, testExecuteHookHook) +func agentRPCTestExecuteHook(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + hr, err := client.ExecuteHook(ctx, tablet, testExecuteHookHook) compareError(t, "ExecuteHook", err, hr, testExecuteHookHookResult) } -func agentRPCTestExecuteHookPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - _, err := client.ExecuteHook(ctx, ti, testExecuteHookHook) +func agentRPCTestExecuteHookPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + _, err := client.ExecuteHook(ctx, tablet, testExecuteHookHook) expectRPCWrapLockActionPanic(t, err) } @@ -428,8 +426,8 @@ func (fra *fakeRPCAgent) RefreshState(ctx context.Context) { testRefreshStateCalled = true } -func agentRPCTestRefreshState(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.RefreshState(ctx, ti) +func agentRPCTestRefreshState(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.RefreshState(ctx, tablet) if err != nil { t.Errorf("RefreshState failed: %v", err) } @@ -438,8 +436,8 @@ func agentRPCTestRefreshState(ctx context.Context, t *testing.T, client tmclient } } -func agentRPCTestRefreshStatePanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.RefreshState(ctx, ti) +func agentRPCTestRefreshStatePanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.RefreshState(ctx, tablet) expectRPCWrapLockActionPanic(t, err) } @@ -459,27 +457,27 @@ func (fra *fakeRPCAgent) IgnoreHealthError(ctx context.Context, pattern string) return nil } -func agentRPCTestRunHealthCheck(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.RunHealthCheck(ctx, ti) +func agentRPCTestRunHealthCheck(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.RunHealthCheck(ctx, tablet) if err != nil { t.Errorf("RunHealthCheck failed: %v", err) } } -func agentRPCTestRunHealthCheckPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.RunHealthCheck(ctx, ti) +func agentRPCTestRunHealthCheckPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.RunHealthCheck(ctx, tablet) expectRPCWrapPanic(t, err) } -func agentRPCTestIgnoreHealthError(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.IgnoreHealthError(ctx, ti, testIgnoreHealthErrorValue) +func agentRPCTestIgnoreHealthError(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.IgnoreHealthError(ctx, tablet, testIgnoreHealthErrorValue) if err != nil { t.Errorf("IgnoreHealthError failed: %v", err) } } -func agentRPCTestIgnoreHealthErrorPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.IgnoreHealthError(ctx, ti, testIgnoreHealthErrorValue) +func agentRPCTestIgnoreHealthErrorPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.IgnoreHealthError(ctx, tablet, testIgnoreHealthErrorValue) expectRPCWrapPanic(t, err) } @@ -495,8 +493,8 @@ func (fra *fakeRPCAgent) ReloadSchema(ctx context.Context) { testReloadSchemaCalled = true } -func agentRPCTestReloadSchema(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.ReloadSchema(ctx, ti) +func agentRPCTestReloadSchema(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.ReloadSchema(ctx, tablet) if err != nil { t.Errorf("ReloadSchema failed: %v", err) } @@ -505,8 +503,8 @@ func agentRPCTestReloadSchema(ctx context.Context, t *testing.T, client tmclient } } -func agentRPCTestReloadSchemaPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.ReloadSchema(ctx, ti) +func agentRPCTestReloadSchemaPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.ReloadSchema(ctx, tablet) expectRPCWrapLockActionPanic(t, err) } @@ -524,13 +522,13 @@ func (fra *fakeRPCAgent) PreflightSchema(ctx context.Context, change string) (*t return testSchemaChangeResult, nil } -func agentRPCTestPreflightSchema(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - scr, err := client.PreflightSchema(ctx, ti, testPreflightSchema) +func agentRPCTestPreflightSchema(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + scr, err := client.PreflightSchema(ctx, tablet, testPreflightSchema) compareError(t, "PreflightSchema", err, scr, testSchemaChangeResult) } -func agentRPCTestPreflightSchemaPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - _, err := client.PreflightSchema(ctx, ti, testPreflightSchema) +func agentRPCTestPreflightSchemaPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + _, err := client.PreflightSchema(ctx, tablet, testPreflightSchema) expectRPCWrapLockActionPanic(t, err) } @@ -550,13 +548,13 @@ func (fra *fakeRPCAgent) ApplySchema(ctx context.Context, change *tmutils.Schema return testSchemaChangeResult, nil } -func agentRPCTestApplySchema(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - scr, err := client.ApplySchema(ctx, ti, testSchemaChange) +func agentRPCTestApplySchema(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + scr, err := client.ApplySchema(ctx, tablet, testSchemaChange) compareError(t, "ApplySchema", err, scr, testSchemaChangeResult) } -func agentRPCTestApplySchemaPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - _, err := client.ApplySchema(ctx, ti, testSchemaChange) +func agentRPCTestApplySchemaPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + _, err := client.ApplySchema(ctx, tablet, testSchemaChange) expectRPCWrapLockActionPanic(t, err) } @@ -609,18 +607,18 @@ func (fra *fakeRPCAgent) ExecuteFetchAsApp(ctx context.Context, query string, ma return testExecuteFetchResult, nil } -func agentRPCTestExecuteFetch(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - qr, err := client.ExecuteFetchAsDba(ctx, ti, testExecuteFetchQuery, testExecuteFetchMaxRows, true, true) +func agentRPCTestExecuteFetch(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + qr, err := client.ExecuteFetchAsDba(ctx, tablet, testExecuteFetchQuery, testExecuteFetchMaxRows, true, true) compareError(t, "ExecuteFetchAsDba", err, qr, testExecuteFetchResult) - qr, err = client.ExecuteFetchAsApp(ctx, ti, testExecuteFetchQuery, testExecuteFetchMaxRows) + qr, err = client.ExecuteFetchAsApp(ctx, tablet, testExecuteFetchQuery, testExecuteFetchMaxRows) compareError(t, "ExecuteFetchAsApp", err, qr, testExecuteFetchResult) } -func agentRPCTestExecuteFetchPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - _, err := client.ExecuteFetchAsDba(ctx, ti, testExecuteFetchQuery, testExecuteFetchMaxRows, true, false) +func agentRPCTestExecuteFetchPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + _, err := client.ExecuteFetchAsDba(ctx, tablet, testExecuteFetchQuery, testExecuteFetchMaxRows, true, false) expectRPCWrapPanic(t, err) - _, err = client.ExecuteFetchAsApp(ctx, ti, testExecuteFetchQuery, testExecuteFetchMaxRows) + _, err = client.ExecuteFetchAsApp(ctx, tablet, testExecuteFetchQuery, testExecuteFetchMaxRows) expectRPCWrapPanic(t, err) } @@ -645,13 +643,13 @@ func (fra *fakeRPCAgent) SlaveStatus(ctx context.Context) (*replicationdatapb.St return testReplicationStatus, nil } -func agentRPCTestSlaveStatus(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - rs, err := client.SlaveStatus(ctx, ti) +func agentRPCTestSlaveStatus(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + rs, err := client.SlaveStatus(ctx, tablet) compareError(t, "SlaveStatus", err, rs, testReplicationStatus) } -func agentRPCTestSlaveStatusPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - _, err := client.SlaveStatus(ctx, ti) +func agentRPCTestSlaveStatusPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + _, err := client.SlaveStatus(ctx, tablet) expectRPCWrapPanic(t, err) } @@ -664,13 +662,13 @@ func (fra *fakeRPCAgent) MasterPosition(ctx context.Context) (string, error) { return testReplicationPosition, nil } -func agentRPCTestMasterPosition(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - rs, err := client.MasterPosition(ctx, ti) +func agentRPCTestMasterPosition(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + rs, err := client.MasterPosition(ctx, tablet) compareError(t, "MasterPosition", err, rs, testReplicationPosition) } -func agentRPCTestMasterPositionPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - _, err := client.MasterPosition(ctx, ti) +func agentRPCTestMasterPositionPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + _, err := client.MasterPosition(ctx, tablet) expectRPCWrapPanic(t, err) } @@ -684,13 +682,13 @@ func (fra *fakeRPCAgent) StopSlave(ctx context.Context) error { return nil } -func agentRPCTestStopSlave(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.StopSlave(ctx, ti) +func agentRPCTestStopSlave(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.StopSlave(ctx, tablet) compareError(t, "StopSlave", err, true, testStopSlaveCalled) } -func agentRPCTestStopSlavePanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.StopSlave(ctx, ti) +func agentRPCTestStopSlavePanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.StopSlave(ctx, tablet) expectRPCWrapLockPanic(t, err) } @@ -705,13 +703,13 @@ func (fra *fakeRPCAgent) StopSlaveMinimum(ctx context.Context, position string, return testReplicationPositionReturned, nil } -func agentRPCTestStopSlaveMinimum(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - pos, err := client.StopSlaveMinimum(ctx, ti, testReplicationPosition, testStopSlaveMinimumWaitTime) +func agentRPCTestStopSlaveMinimum(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + pos, err := client.StopSlaveMinimum(ctx, tablet, testReplicationPosition, testStopSlaveMinimumWaitTime) compareError(t, "StopSlaveMinimum", err, pos, testReplicationPositionReturned) } -func agentRPCTestStopSlaveMinimumPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - _, err := client.StopSlaveMinimum(ctx, ti, testReplicationPosition, testStopSlaveMinimumWaitTime) +func agentRPCTestStopSlaveMinimumPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + _, err := client.StopSlaveMinimum(ctx, tablet, testReplicationPosition, testStopSlaveMinimumWaitTime) expectRPCWrapLockPanic(t, err) } @@ -725,13 +723,13 @@ func (fra *fakeRPCAgent) StartSlave(ctx context.Context) error { return nil } -func agentRPCTestStartSlave(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.StartSlave(ctx, ti) +func agentRPCTestStartSlave(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.StartSlave(ctx, tablet) compareError(t, "StartSlave", err, true, testStartSlaveCalled) } -func agentRPCTestStartSlavePanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.StartSlave(ctx, ti) +func agentRPCTestStartSlavePanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.StartSlave(ctx, tablet) expectRPCWrapLockPanic(t, err) } @@ -745,13 +743,13 @@ func (fra *fakeRPCAgent) TabletExternallyReparented(ctx context.Context, externa return nil } -func agentRPCTestTabletExternallyReparented(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.TabletExternallyReparented(ctx, ti, "") +func agentRPCTestTabletExternallyReparented(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.TabletExternallyReparented(ctx, tablet, "") compareError(t, "TabletExternallyReparented", err, true, testTabletExternallyReparentedCalled) } -func agentRPCTestTabletExternallyReparentedPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.TabletExternallyReparented(ctx, ti, "") +func agentRPCTestTabletExternallyReparentedPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.TabletExternallyReparented(ctx, tablet, "") expectRPCWrapLockPanic(t, err) } @@ -764,13 +762,13 @@ func (fra *fakeRPCAgent) GetSlaves(ctx context.Context) ([]string, error) { return testGetSlavesResult, nil } -func agentRPCTestGetSlaves(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - s, err := client.GetSlaves(ctx, ti) +func agentRPCTestGetSlaves(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + s, err := client.GetSlaves(ctx, tablet) compareError(t, "GetSlaves", err, s, testGetSlavesResult) } -func agentRPCTestGetSlavesPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - _, err := client.GetSlaves(ctx, ti) +func agentRPCTestGetSlavesPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + _, err := client.GetSlaves(ctx, tablet) expectRPCWrapPanic(t, err) } @@ -791,13 +789,13 @@ func (fra *fakeRPCAgent) WaitBlpPosition(ctx context.Context, blpPosition *table return nil } -func agentRPCTestWaitBlpPosition(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.WaitBlpPosition(ctx, ti, testBlpPosition, testWaitBlpPositionWaitTime) +func agentRPCTestWaitBlpPosition(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.WaitBlpPosition(ctx, tablet, testBlpPosition, testWaitBlpPositionWaitTime) compareError(t, "WaitBlpPosition", err, true, testWaitBlpPositionCalled) } -func agentRPCTestWaitBlpPositionPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.WaitBlpPosition(ctx, ti, testBlpPosition, testWaitBlpPositionWaitTime) +func agentRPCTestWaitBlpPositionPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.WaitBlpPosition(ctx, tablet, testBlpPosition, testWaitBlpPositionWaitTime) expectRPCWrapLockPanic(t, err) } @@ -815,13 +813,13 @@ func (fra *fakeRPCAgent) StopBlp(ctx context.Context) ([]*tabletmanagerdatapb.Bl return testBlpPositionList, nil } -func agentRPCTestStopBlp(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - bpl, err := client.StopBlp(ctx, ti) +func agentRPCTestStopBlp(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + bpl, err := client.StopBlp(ctx, tablet) compareError(t, "StopBlp", err, bpl, testBlpPositionList) } -func agentRPCTestStopBlpPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - _, err := client.StopBlp(ctx, ti) +func agentRPCTestStopBlpPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + _, err := client.StopBlp(ctx, tablet) expectRPCWrapLockPanic(t, err) } @@ -835,13 +833,13 @@ func (fra *fakeRPCAgent) StartBlp(ctx context.Context) error { return nil } -func agentRPCTestStartBlp(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.StartBlp(ctx, ti) +func agentRPCTestStartBlp(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.StartBlp(ctx, tablet) compareError(t, "StartBlp", err, true, testStartBlpCalled) } -func agentRPCTestStartBlpPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.StartBlp(ctx, ti) +func agentRPCTestStartBlpPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.StartBlp(ctx, tablet) expectRPCWrapLockPanic(t, err) } @@ -856,13 +854,13 @@ func (fra *fakeRPCAgent) RunBlpUntil(ctx context.Context, bpl []*tabletmanagerda return testReplicationPosition, nil } -func agentRPCTestRunBlpUntil(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - rp, err := client.RunBlpUntil(ctx, ti, testBlpPositionList, testRunBlpUntilWaitTime) +func agentRPCTestRunBlpUntil(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + rp, err := client.RunBlpUntil(ctx, tablet, testBlpPositionList, testRunBlpUntilWaitTime) compareError(t, "RunBlpUntil", err, rp, testReplicationPosition) } -func agentRPCTestRunBlpUntilPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - _, err := client.RunBlpUntil(ctx, ti, testBlpPositionList, testRunBlpUntilWaitTime) +func agentRPCTestRunBlpUntilPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + _, err := client.RunBlpUntil(ctx, tablet, testBlpPositionList, testRunBlpUntilWaitTime) expectRPCWrapLockPanic(t, err) } @@ -880,13 +878,13 @@ func (fra *fakeRPCAgent) ResetReplication(ctx context.Context) error { return nil } -func agentRPCTestResetReplication(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.ResetReplication(ctx, ti) +func agentRPCTestResetReplication(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.ResetReplication(ctx, tablet) compareError(t, "ResetReplication", err, true, testResetReplicationCalled) } -func agentRPCTestResetReplicationPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.ResetReplication(ctx, ti) +func agentRPCTestResetReplicationPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.ResetReplication(ctx, tablet) expectRPCWrapLockActionPanic(t, err) } @@ -897,13 +895,13 @@ func (fra *fakeRPCAgent) InitMaster(ctx context.Context) (string, error) { return testReplicationPosition, nil } -func agentRPCTestInitMaster(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - rp, err := client.InitMaster(ctx, ti) +func agentRPCTestInitMaster(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + rp, err := client.InitMaster(ctx, tablet) compareError(t, "InitMaster", err, rp, testReplicationPosition) } -func agentRPCTestInitMasterPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - _, err := client.InitMaster(ctx, ti) +func agentRPCTestInitMasterPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + _, err := client.InitMaster(ctx, tablet) expectRPCWrapLockActionPanic(t, err) } @@ -927,13 +925,13 @@ func (fra *fakeRPCAgent) PopulateReparentJournal(ctx context.Context, timeCreate return nil } -func agentRPCTestPopulateReparentJournal(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.PopulateReparentJournal(ctx, ti, testTimeCreatedNS, testActionName, testMasterAlias, testReplicationPosition) +func agentRPCTestPopulateReparentJournal(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.PopulateReparentJournal(ctx, tablet, testTimeCreatedNS, testActionName, testMasterAlias, testReplicationPosition) compareError(t, "PopulateReparentJournal", err, true, testPopulateReparentJournalCalled) } -func agentRPCTestPopulateReparentJournalPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.PopulateReparentJournal(ctx, ti, testTimeCreatedNS, testActionName, testMasterAlias, testReplicationPosition) +func agentRPCTestPopulateReparentJournalPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.PopulateReparentJournal(ctx, tablet, testTimeCreatedNS, testActionName, testMasterAlias, testReplicationPosition) expectRPCWrapPanic(t, err) } @@ -950,13 +948,13 @@ func (fra *fakeRPCAgent) InitSlave(ctx context.Context, parent *topodatapb.Table return nil } -func agentRPCTestInitSlave(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.InitSlave(ctx, ti, testMasterAlias, testReplicationPosition, testTimeCreatedNS) +func agentRPCTestInitSlave(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.InitSlave(ctx, tablet, testMasterAlias, testReplicationPosition, testTimeCreatedNS) compareError(t, "InitSlave", err, true, testInitSlaveCalled) } -func agentRPCTestInitSlavePanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.InitSlave(ctx, ti, testMasterAlias, testReplicationPosition, testTimeCreatedNS) +func agentRPCTestInitSlavePanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.InitSlave(ctx, tablet, testMasterAlias, testReplicationPosition, testTimeCreatedNS) expectRPCWrapLockActionPanic(t, err) } @@ -967,13 +965,13 @@ func (fra *fakeRPCAgent) DemoteMaster(ctx context.Context) (string, error) { return testReplicationPosition, nil } -func agentRPCTestDemoteMaster(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - rp, err := client.DemoteMaster(ctx, ti) +func agentRPCTestDemoteMaster(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + rp, err := client.DemoteMaster(ctx, tablet) compareError(t, "DemoteMaster", err, rp, testReplicationPosition) } -func agentRPCTestDemoteMasterPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - _, err := client.DemoteMaster(ctx, ti) +func agentRPCTestDemoteMasterPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + _, err := client.DemoteMaster(ctx, tablet) expectRPCWrapLockActionPanic(t, err) } @@ -987,13 +985,13 @@ func (fra *fakeRPCAgent) PromoteSlaveWhenCaughtUp(ctx context.Context, position return testReplicationPositionReturned, nil } -func agentRPCTestPromoteSlaveWhenCaughtUp(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - rp, err := client.PromoteSlaveWhenCaughtUp(ctx, ti, testReplicationPosition) +func agentRPCTestPromoteSlaveWhenCaughtUp(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + rp, err := client.PromoteSlaveWhenCaughtUp(ctx, tablet, testReplicationPosition) compareError(t, "PromoteSlaveWhenCaughtUp", err, rp, testReplicationPositionReturned) } -func agentRPCTestPromoteSlaveWhenCaughtUpPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - _, err := client.PromoteSlaveWhenCaughtUp(ctx, ti, testReplicationPosition) +func agentRPCTestPromoteSlaveWhenCaughtUpPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + _, err := client.PromoteSlaveWhenCaughtUp(ctx, tablet, testReplicationPosition) expectRPCWrapLockActionPanic(t, err) } @@ -1007,13 +1005,13 @@ func (fra *fakeRPCAgent) SlaveWasPromoted(ctx context.Context) error { return nil } -func agentRPCTestSlaveWasPromoted(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.SlaveWasPromoted(ctx, ti) +func agentRPCTestSlaveWasPromoted(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.SlaveWasPromoted(ctx, tablet) compareError(t, "SlaveWasPromoted", err, true, testSlaveWasPromotedCalled) } -func agentRPCTestSlaveWasPromotedPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.SlaveWasPromoted(ctx, ti) +func agentRPCTestSlaveWasPromotedPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.SlaveWasPromoted(ctx, tablet) expectRPCWrapLockActionPanic(t, err) } @@ -1031,13 +1029,13 @@ func (fra *fakeRPCAgent) SetMaster(ctx context.Context, parent *topodatapb.Table return nil } -func agentRPCTestSetMaster(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.SetMaster(ctx, ti, testMasterAlias, testTimeCreatedNS, testForceStartSlave) +func agentRPCTestSetMaster(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.SetMaster(ctx, tablet, testMasterAlias, testTimeCreatedNS, testForceStartSlave) compareError(t, "SetMaster", err, true, testSetMasterCalled) } -func agentRPCTestSetMasterPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.SetMaster(ctx, ti, testMasterAlias, testTimeCreatedNS, testForceStartSlave) +func agentRPCTestSetMasterPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.SetMaster(ctx, tablet, testMasterAlias, testTimeCreatedNS, testForceStartSlave) expectRPCWrapLockActionPanic(t, err) } @@ -1058,13 +1056,13 @@ func (fra *fakeRPCAgent) SlaveWasRestarted(ctx context.Context, swrd *actionnode return nil } -func agentRPCTestSlaveWasRestarted(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.SlaveWasRestarted(ctx, ti, testSlaveWasRestartedArgs) +func agentRPCTestSlaveWasRestarted(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.SlaveWasRestarted(ctx, tablet, testSlaveWasRestartedArgs) compareError(t, "SlaveWasRestarted", err, true, testSlaveWasRestartedCalled) } -func agentRPCTestSlaveWasRestartedPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.SlaveWasRestarted(ctx, ti, testSlaveWasRestartedArgs) +func agentRPCTestSlaveWasRestartedPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.SlaveWasRestarted(ctx, tablet, testSlaveWasRestartedArgs) expectRPCWrapLockActionPanic(t, err) } @@ -1075,13 +1073,13 @@ func (fra *fakeRPCAgent) StopReplicationAndGetStatus(ctx context.Context) (*repl return testReplicationStatus, nil } -func agentRPCTestStopReplicationAndGetStatus(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - rp, err := client.StopReplicationAndGetStatus(ctx, ti) +func agentRPCTestStopReplicationAndGetStatus(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + rp, err := client.StopReplicationAndGetStatus(ctx, tablet) compareError(t, "StopReplicationAndGetStatus", err, rp, testReplicationStatus) } -func agentRPCTestStopReplicationAndGetStatusPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - _, err := client.StopReplicationAndGetStatus(ctx, ti) +func agentRPCTestStopReplicationAndGetStatusPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + _, err := client.StopReplicationAndGetStatus(ctx, tablet) expectRPCWrapLockActionPanic(t, err) } @@ -1092,13 +1090,13 @@ func (fra *fakeRPCAgent) PromoteSlave(ctx context.Context) (string, error) { return testReplicationPosition, nil } -func agentRPCTestPromoteSlave(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - rp, err := client.PromoteSlave(ctx, ti) +func agentRPCTestPromoteSlave(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + rp, err := client.PromoteSlave(ctx, tablet) compareError(t, "PromoteSlave", err, rp, testReplicationPosition) } -func agentRPCTestPromoteSlavePanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - _, err := client.PromoteSlave(ctx, ti) +func agentRPCTestPromoteSlavePanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + _, err := client.PromoteSlave(ctx, tablet) expectRPCWrapLockActionPanic(t, err) } @@ -1119,8 +1117,8 @@ func (fra *fakeRPCAgent) Backup(ctx context.Context, concurrency int, logger log return nil } -func agentRPCTestBackup(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - stream, err := client.Backup(ctx, ti, testBackupConcurrency) +func agentRPCTestBackup(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + stream, err := client.Backup(ctx, tablet, testBackupConcurrency) if err != nil { t.Fatalf("Backup failed: %v", err) } @@ -1128,8 +1126,8 @@ func agentRPCTestBackup(ctx context.Context, t *testing.T, client tmclient.Table compareError(t, "Backup", err, true, testBackupCalled) } -func agentRPCTestBackupPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - stream, err := client.Backup(ctx, ti, testBackupConcurrency) +func agentRPCTestBackupPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + stream, err := client.Backup(ctx, tablet, testBackupConcurrency) if err != nil { t.Fatalf("Backup failed: %v", err) } @@ -1180,60 +1178,60 @@ func (fra *fakeRPCAgent) RPCWrapLockAction(ctx context.Context, name string, arg // the provided tablet. Tablet's vt address needs to be configured so // the client will connect to a server backed by our RPCAgent (returned // by NewFakeRPCAgent) -func Run(t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo, fakeAgent tabletmanager.RPCAgent) { +func Run(t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet, fakeAgent tabletmanager.RPCAgent) { ctx := context.Background() // Test RPC specific methods of the interface. - agentRPCTestIsTimeoutErrorDialExpiredContext(ctx, t, client, ti) - agentRPCTestIsTimeoutErrorDialTimeout(ctx, t, client, ti) - agentRPCTestIsTimeoutErrorRPC(ctx, t, client, ti, fakeAgent.(*fakeRPCAgent)) + agentRPCTestIsTimeoutErrorDialExpiredContext(ctx, t, client, tablet) + agentRPCTestIsTimeoutErrorDialTimeout(ctx, t, client, tablet) + agentRPCTestIsTimeoutErrorRPC(ctx, t, client, tablet, fakeAgent.(*fakeRPCAgent)) // Various read-only methods - agentRPCTestPing(ctx, t, client, ti) - agentRPCTestGetSchema(ctx, t, client, ti) - agentRPCTestGetPermissions(ctx, t, client, ti) + agentRPCTestPing(ctx, t, client, tablet) + agentRPCTestGetSchema(ctx, t, client, tablet) + agentRPCTestGetPermissions(ctx, t, client, tablet) // Various read-write methods - agentRPCTestSetReadOnly(ctx, t, client, ti) - agentRPCTestChangeType(ctx, t, client, ti) - agentRPCTestSleep(ctx, t, client, ti) - agentRPCTestExecuteHook(ctx, t, client, ti) - agentRPCTestRefreshState(ctx, t, client, ti) - agentRPCTestRunHealthCheck(ctx, t, client, ti) - agentRPCTestIgnoreHealthError(ctx, t, client, ti) - agentRPCTestReloadSchema(ctx, t, client, ti) - agentRPCTestPreflightSchema(ctx, t, client, ti) - agentRPCTestApplySchema(ctx, t, client, ti) - agentRPCTestExecuteFetch(ctx, t, client, ti) + agentRPCTestSetReadOnly(ctx, t, client, tablet) + agentRPCTestChangeType(ctx, t, client, tablet) + agentRPCTestSleep(ctx, t, client, tablet) + agentRPCTestExecuteHook(ctx, t, client, tablet) + agentRPCTestRefreshState(ctx, t, client, tablet) + agentRPCTestRunHealthCheck(ctx, t, client, tablet) + agentRPCTestIgnoreHealthError(ctx, t, client, tablet) + agentRPCTestReloadSchema(ctx, t, client, tablet) + agentRPCTestPreflightSchema(ctx, t, client, tablet) + agentRPCTestApplySchema(ctx, t, client, tablet) + agentRPCTestExecuteFetch(ctx, t, client, tablet) // Replication related methods - agentRPCTestSlaveStatus(ctx, t, client, ti) - agentRPCTestMasterPosition(ctx, t, client, ti) - agentRPCTestStopSlave(ctx, t, client, ti) - agentRPCTestStopSlaveMinimum(ctx, t, client, ti) - agentRPCTestStartSlave(ctx, t, client, ti) - agentRPCTestTabletExternallyReparented(ctx, t, client, ti) - agentRPCTestGetSlaves(ctx, t, client, ti) - agentRPCTestWaitBlpPosition(ctx, t, client, ti) - agentRPCTestStopBlp(ctx, t, client, ti) - agentRPCTestStartBlp(ctx, t, client, ti) - agentRPCTestRunBlpUntil(ctx, t, client, ti) + agentRPCTestSlaveStatus(ctx, t, client, tablet) + agentRPCTestMasterPosition(ctx, t, client, tablet) + agentRPCTestStopSlave(ctx, t, client, tablet) + agentRPCTestStopSlaveMinimum(ctx, t, client, tablet) + agentRPCTestStartSlave(ctx, t, client, tablet) + agentRPCTestTabletExternallyReparented(ctx, t, client, tablet) + agentRPCTestGetSlaves(ctx, t, client, tablet) + agentRPCTestWaitBlpPosition(ctx, t, client, tablet) + agentRPCTestStopBlp(ctx, t, client, tablet) + agentRPCTestStartBlp(ctx, t, client, tablet) + agentRPCTestRunBlpUntil(ctx, t, client, tablet) // Reparenting related functions - agentRPCTestResetReplication(ctx, t, client, ti) - agentRPCTestInitMaster(ctx, t, client, ti) - agentRPCTestPopulateReparentJournal(ctx, t, client, ti) - agentRPCTestInitSlave(ctx, t, client, ti) - agentRPCTestDemoteMaster(ctx, t, client, ti) - agentRPCTestPromoteSlaveWhenCaughtUp(ctx, t, client, ti) - agentRPCTestSlaveWasPromoted(ctx, t, client, ti) - agentRPCTestSetMaster(ctx, t, client, ti) - agentRPCTestSlaveWasRestarted(ctx, t, client, ti) - agentRPCTestStopReplicationAndGetStatus(ctx, t, client, ti) - agentRPCTestPromoteSlave(ctx, t, client, ti) + agentRPCTestResetReplication(ctx, t, client, tablet) + agentRPCTestInitMaster(ctx, t, client, tablet) + agentRPCTestPopulateReparentJournal(ctx, t, client, tablet) + agentRPCTestInitSlave(ctx, t, client, tablet) + agentRPCTestDemoteMaster(ctx, t, client, tablet) + agentRPCTestPromoteSlaveWhenCaughtUp(ctx, t, client, tablet) + agentRPCTestSlaveWasPromoted(ctx, t, client, tablet) + agentRPCTestSetMaster(ctx, t, client, tablet) + agentRPCTestSlaveWasRestarted(ctx, t, client, tablet) + agentRPCTestStopReplicationAndGetStatus(ctx, t, client, tablet) + agentRPCTestPromoteSlave(ctx, t, client, tablet) // Backup / restore related methods - agentRPCTestBackup(ctx, t, client, ti) + agentRPCTestBackup(ctx, t, client, tablet) // // Tests panic handling everywhere now @@ -1241,48 +1239,48 @@ func Run(t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo, fakeAgent.(*fakeRPCAgent).panics = true // Various read-only methods - agentRPCTestPingPanic(ctx, t, client, ti) - agentRPCTestGetSchemaPanic(ctx, t, client, ti) - agentRPCTestGetPermissionsPanic(ctx, t, client, ti) + agentRPCTestPingPanic(ctx, t, client, tablet) + agentRPCTestGetSchemaPanic(ctx, t, client, tablet) + agentRPCTestGetPermissionsPanic(ctx, t, client, tablet) // Various read-write methods - agentRPCTestSetReadOnlyPanic(ctx, t, client, ti) - agentRPCTestChangeTypePanic(ctx, t, client, ti) - agentRPCTestSleepPanic(ctx, t, client, ti) - agentRPCTestExecuteHookPanic(ctx, t, client, ti) - agentRPCTestRefreshStatePanic(ctx, t, client, ti) - agentRPCTestRunHealthCheckPanic(ctx, t, client, ti) - agentRPCTestReloadSchemaPanic(ctx, t, client, ti) - agentRPCTestPreflightSchemaPanic(ctx, t, client, ti) - agentRPCTestApplySchemaPanic(ctx, t, client, ti) - agentRPCTestExecuteFetchPanic(ctx, t, client, ti) + agentRPCTestSetReadOnlyPanic(ctx, t, client, tablet) + agentRPCTestChangeTypePanic(ctx, t, client, tablet) + agentRPCTestSleepPanic(ctx, t, client, tablet) + agentRPCTestExecuteHookPanic(ctx, t, client, tablet) + agentRPCTestRefreshStatePanic(ctx, t, client, tablet) + agentRPCTestRunHealthCheckPanic(ctx, t, client, tablet) + agentRPCTestReloadSchemaPanic(ctx, t, client, tablet) + agentRPCTestPreflightSchemaPanic(ctx, t, client, tablet) + agentRPCTestApplySchemaPanic(ctx, t, client, tablet) + agentRPCTestExecuteFetchPanic(ctx, t, client, tablet) // Replication related methods - agentRPCTestSlaveStatusPanic(ctx, t, client, ti) - agentRPCTestMasterPositionPanic(ctx, t, client, ti) - agentRPCTestStopSlavePanic(ctx, t, client, ti) - agentRPCTestStopSlaveMinimumPanic(ctx, t, client, ti) - agentRPCTestStartSlavePanic(ctx, t, client, ti) - agentRPCTestTabletExternallyReparentedPanic(ctx, t, client, ti) - agentRPCTestGetSlavesPanic(ctx, t, client, ti) - agentRPCTestWaitBlpPositionPanic(ctx, t, client, ti) - agentRPCTestStopBlpPanic(ctx, t, client, ti) - agentRPCTestStartBlpPanic(ctx, t, client, ti) - agentRPCTestRunBlpUntilPanic(ctx, t, client, ti) + agentRPCTestSlaveStatusPanic(ctx, t, client, tablet) + agentRPCTestMasterPositionPanic(ctx, t, client, tablet) + agentRPCTestStopSlavePanic(ctx, t, client, tablet) + agentRPCTestStopSlaveMinimumPanic(ctx, t, client, tablet) + agentRPCTestStartSlavePanic(ctx, t, client, tablet) + agentRPCTestTabletExternallyReparentedPanic(ctx, t, client, tablet) + agentRPCTestGetSlavesPanic(ctx, t, client, tablet) + agentRPCTestWaitBlpPositionPanic(ctx, t, client, tablet) + agentRPCTestStopBlpPanic(ctx, t, client, tablet) + agentRPCTestStartBlpPanic(ctx, t, client, tablet) + agentRPCTestRunBlpUntilPanic(ctx, t, client, tablet) // Reparenting related functions - agentRPCTestResetReplicationPanic(ctx, t, client, ti) - agentRPCTestInitMasterPanic(ctx, t, client, ti) - agentRPCTestPopulateReparentJournalPanic(ctx, t, client, ti) - agentRPCTestInitSlavePanic(ctx, t, client, ti) - agentRPCTestDemoteMasterPanic(ctx, t, client, ti) - agentRPCTestPromoteSlaveWhenCaughtUpPanic(ctx, t, client, ti) - agentRPCTestSlaveWasPromotedPanic(ctx, t, client, ti) - agentRPCTestSetMasterPanic(ctx, t, client, ti) - agentRPCTestSlaveWasRestartedPanic(ctx, t, client, ti) - agentRPCTestStopReplicationAndGetStatusPanic(ctx, t, client, ti) - agentRPCTestPromoteSlavePanic(ctx, t, client, ti) + agentRPCTestResetReplicationPanic(ctx, t, client, tablet) + agentRPCTestInitMasterPanic(ctx, t, client, tablet) + agentRPCTestPopulateReparentJournalPanic(ctx, t, client, tablet) + agentRPCTestInitSlavePanic(ctx, t, client, tablet) + agentRPCTestDemoteMasterPanic(ctx, t, client, tablet) + agentRPCTestPromoteSlaveWhenCaughtUpPanic(ctx, t, client, tablet) + agentRPCTestSlaveWasPromotedPanic(ctx, t, client, tablet) + agentRPCTestSetMasterPanic(ctx, t, client, tablet) + agentRPCTestSlaveWasRestartedPanic(ctx, t, client, tablet) + agentRPCTestStopReplicationAndGetStatusPanic(ctx, t, client, tablet) + agentRPCTestPromoteSlavePanic(ctx, t, client, tablet) // Backup / restore related methods - agentRPCTestBackupPanic(ctx, t, client, ti) + agentRPCTestBackupPanic(ctx, t, client, tablet) } diff --git a/go/vt/tabletmanager/faketmclient/fake_client.go b/go/vt/tabletmanager/faketmclient/fake_client.go index 5dae33e4e2..8ef037bf07 100644 --- a/go/vt/tabletmanager/faketmclient/fake_client.go +++ b/go/vt/tabletmanager/faketmclient/fake_client.go @@ -20,7 +20,6 @@ import ( "github.com/youtube/vitess/go/vt/mysqlctl/tmutils" "github.com/youtube/vitess/go/vt/tabletmanager/actionnode" "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" - "github.com/youtube/vitess/go/vt/topo" logutilpb "github.com/youtube/vitess/go/vt/proto/logutil" querypb "github.com/youtube/vitess/go/vt/proto/query" @@ -59,28 +58,28 @@ type FakeTabletManagerClient struct { // // Ping is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) Ping(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *FakeTabletManagerClient) Ping(ctx context.Context, tablet *topodatapb.Tablet) error { return nil } // Sleep is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) Sleep(ctx context.Context, tablet *topo.TabletInfo, duration time.Duration) error { +func (client *FakeTabletManagerClient) Sleep(ctx context.Context, tablet *topodatapb.Tablet, duration time.Duration) error { return nil } // ExecuteHook is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) ExecuteHook(ctx context.Context, tablet *topo.TabletInfo, hk *hook.Hook) (*hook.HookResult, error) { +func (client *FakeTabletManagerClient) ExecuteHook(ctx context.Context, tablet *topodatapb.Tablet, hk *hook.Hook) (*hook.HookResult, error) { var hr hook.HookResult return &hr, nil } // GetSchema is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) GetSchema(ctx context.Context, tablet *topo.TabletInfo, tables, excludeTables []string, includeViews bool) (*tabletmanagerdatapb.SchemaDefinition, error) { +func (client *FakeTabletManagerClient) GetSchema(ctx context.Context, tablet *topodatapb.Tablet, tables, excludeTables []string, includeViews bool) (*tabletmanagerdatapb.SchemaDefinition, error) { return client.tmc.GetSchema(ctx, tablet, tables, excludeTables, includeViews) } // GetPermissions is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) GetPermissions(ctx context.Context, tablet *topo.TabletInfo) (*tabletmanagerdatapb.Permissions, error) { +func (client *FakeTabletManagerClient) GetPermissions(ctx context.Context, tablet *topodatapb.Tablet) (*tabletmanagerdatapb.Permissions, error) { return &tabletmanagerdatapb.Permissions{}, nil } @@ -89,57 +88,57 @@ func (client *FakeTabletManagerClient) GetPermissions(ctx context.Context, table // // SetReadOnly is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) SetReadOnly(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *FakeTabletManagerClient) SetReadOnly(ctx context.Context, tablet *topodatapb.Tablet) error { return nil } // SetReadWrite is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) SetReadWrite(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *FakeTabletManagerClient) SetReadWrite(ctx context.Context, tablet *topodatapb.Tablet) error { return nil } // ChangeType is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) ChangeType(ctx context.Context, tablet *topo.TabletInfo, dbType topodatapb.TabletType) error { +func (client *FakeTabletManagerClient) ChangeType(ctx context.Context, tablet *topodatapb.Tablet, dbType topodatapb.TabletType) error { return nil } // RefreshState is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) RefreshState(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *FakeTabletManagerClient) RefreshState(ctx context.Context, tablet *topodatapb.Tablet) error { return nil } // RunHealthCheck is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) RunHealthCheck(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *FakeTabletManagerClient) RunHealthCheck(ctx context.Context, tablet *topodatapb.Tablet) error { return nil } // IgnoreHealthError is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) IgnoreHealthError(ctx context.Context, tablet *topo.TabletInfo, pattern string) error { +func (client *FakeTabletManagerClient) IgnoreHealthError(ctx context.Context, tablet *topodatapb.Tablet, pattern string) error { return nil } // ReloadSchema is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) ReloadSchema(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *FakeTabletManagerClient) ReloadSchema(ctx context.Context, tablet *topodatapb.Tablet) error { return nil } // PreflightSchema is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) PreflightSchema(ctx context.Context, tablet *topo.TabletInfo, change string) (*tmutils.SchemaChangeResult, error) { +func (client *FakeTabletManagerClient) PreflightSchema(ctx context.Context, tablet *topodatapb.Tablet, change string) (*tmutils.SchemaChangeResult, error) { return &tmutils.SchemaChangeResult{}, nil } // ApplySchema is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) ApplySchema(ctx context.Context, tablet *topo.TabletInfo, change *tmutils.SchemaChange) (*tmutils.SchemaChangeResult, error) { +func (client *FakeTabletManagerClient) ApplySchema(ctx context.Context, tablet *topodatapb.Tablet, change *tmutils.SchemaChange) (*tmutils.SchemaChangeResult, error) { return &tmutils.SchemaChangeResult{}, nil } // ExecuteFetchAsDba is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) ExecuteFetchAsDba(ctx context.Context, tablet *topo.TabletInfo, query string, maxRows int, disableBinlogs, reloadSchema bool) (*querypb.QueryResult, error) { +func (client *FakeTabletManagerClient) ExecuteFetchAsDba(ctx context.Context, tablet *topodatapb.Tablet, query string, maxRows int, disableBinlogs, reloadSchema bool) (*querypb.QueryResult, error) { return &querypb.QueryResult{}, nil } // ExecuteFetchAsApp is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) ExecuteFetchAsApp(ctx context.Context, tablet *topo.TabletInfo, query string, maxRows int) (*querypb.QueryResult, error) { +func (client *FakeTabletManagerClient) ExecuteFetchAsApp(ctx context.Context, tablet *topodatapb.Tablet, query string, maxRows int) (*querypb.QueryResult, error) { return &querypb.QueryResult{}, nil } @@ -148,47 +147,47 @@ func (client *FakeTabletManagerClient) ExecuteFetchAsApp(ctx context.Context, ta // // SlaveStatus is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) SlaveStatus(ctx context.Context, tablet *topo.TabletInfo) (*replicationdatapb.Status, error) { +func (client *FakeTabletManagerClient) SlaveStatus(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.Status, error) { return &replicationdatapb.Status{}, nil } // MasterPosition is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) MasterPosition(ctx context.Context, tablet *topo.TabletInfo) (string, error) { +func (client *FakeTabletManagerClient) MasterPosition(ctx context.Context, tablet *topodatapb.Tablet) (string, error) { return "", nil } // StopSlave is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) StopSlave(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *FakeTabletManagerClient) StopSlave(ctx context.Context, tablet *topodatapb.Tablet) error { return nil } // StopSlaveMinimum is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) StopSlaveMinimum(ctx context.Context, tablet *topo.TabletInfo, minPos string, waitTime time.Duration) (string, error) { +func (client *FakeTabletManagerClient) StopSlaveMinimum(ctx context.Context, tablet *topodatapb.Tablet, minPos string, waitTime time.Duration) (string, error) { return "", nil } // StartSlave is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) StartSlave(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *FakeTabletManagerClient) StartSlave(ctx context.Context, tablet *topodatapb.Tablet) error { return nil } // TabletExternallyReparented is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) TabletExternallyReparented(ctx context.Context, tablet *topo.TabletInfo, externalID string) error { +func (client *FakeTabletManagerClient) TabletExternallyReparented(ctx context.Context, tablet *topodatapb.Tablet, externalID string) error { return nil } // GetSlaves is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) GetSlaves(ctx context.Context, tablet *topo.TabletInfo) ([]string, error) { +func (client *FakeTabletManagerClient) GetSlaves(ctx context.Context, tablet *topodatapb.Tablet) ([]string, error) { return nil, nil } // WaitBlpPosition is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) WaitBlpPosition(ctx context.Context, tablet *topo.TabletInfo, blpPosition *tabletmanagerdatapb.BlpPosition, waitTime time.Duration) error { +func (client *FakeTabletManagerClient) WaitBlpPosition(ctx context.Context, tablet *topodatapb.Tablet, blpPosition *tabletmanagerdatapb.BlpPosition, waitTime time.Duration) error { return nil } // StopBlp is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) StopBlp(ctx context.Context, tablet *topo.TabletInfo) ([]*tabletmanagerdatapb.BlpPosition, error) { +func (client *FakeTabletManagerClient) StopBlp(ctx context.Context, tablet *topodatapb.Tablet) ([]*tabletmanagerdatapb.BlpPosition, error) { // TODO(aaijazi): this works because all tests so far only need to rely on Uid 0. // Ideally, this should turn into a full mock, where the caller can configure the exact // return value. @@ -201,12 +200,12 @@ func (client *FakeTabletManagerClient) StopBlp(ctx context.Context, tablet *topo } // StartBlp is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) StartBlp(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *FakeTabletManagerClient) StartBlp(ctx context.Context, tablet *topodatapb.Tablet) error { return nil } // RunBlpUntil is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) RunBlpUntil(ctx context.Context, tablet *topo.TabletInfo, positions []*tabletmanagerdatapb.BlpPosition, waitTime time.Duration) (string, error) { +func (client *FakeTabletManagerClient) RunBlpUntil(ctx context.Context, tablet *topodatapb.Tablet, positions []*tabletmanagerdatapb.BlpPosition, waitTime time.Duration) (string, error) { return "", nil } @@ -215,57 +214,57 @@ func (client *FakeTabletManagerClient) RunBlpUntil(ctx context.Context, tablet * // // ResetReplication is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) ResetReplication(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *FakeTabletManagerClient) ResetReplication(ctx context.Context, tablet *topodatapb.Tablet) error { return nil } // InitMaster is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) InitMaster(ctx context.Context, tablet *topo.TabletInfo) (string, error) { +func (client *FakeTabletManagerClient) InitMaster(ctx context.Context, tablet *topodatapb.Tablet) (string, error) { return "", nil } // PopulateReparentJournal is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) PopulateReparentJournal(ctx context.Context, tablet *topo.TabletInfo, timeCreatedNS int64, actionName string, masterAlias *topodatapb.TabletAlias, position string) error { +func (client *FakeTabletManagerClient) PopulateReparentJournal(ctx context.Context, tablet *topodatapb.Tablet, timeCreatedNS int64, actionName string, masterAlias *topodatapb.TabletAlias, position string) error { return nil } // InitSlave is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) InitSlave(ctx context.Context, tablet *topo.TabletInfo, parent *topodatapb.TabletAlias, position string, timeCreatedNS int64) error { +func (client *FakeTabletManagerClient) InitSlave(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, position string, timeCreatedNS int64) error { return nil } // DemoteMaster is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) DemoteMaster(ctx context.Context, tablet *topo.TabletInfo) (string, error) { +func (client *FakeTabletManagerClient) DemoteMaster(ctx context.Context, tablet *topodatapb.Tablet) (string, error) { return "", nil } // PromoteSlaveWhenCaughtUp is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) PromoteSlaveWhenCaughtUp(ctx context.Context, tablet *topo.TabletInfo, position string) (string, error) { +func (client *FakeTabletManagerClient) PromoteSlaveWhenCaughtUp(ctx context.Context, tablet *topodatapb.Tablet, position string) (string, error) { return "", nil } // SlaveWasPromoted is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) SlaveWasPromoted(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *FakeTabletManagerClient) SlaveWasPromoted(ctx context.Context, tablet *topodatapb.Tablet) error { return nil } // SetMaster is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) SetMaster(ctx context.Context, tablet *topo.TabletInfo, parent *topodatapb.TabletAlias, timeCreatedNS int64, forceStartSlave bool) error { +func (client *FakeTabletManagerClient) SetMaster(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, timeCreatedNS int64, forceStartSlave bool) error { return nil } // SlaveWasRestarted is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) SlaveWasRestarted(ctx context.Context, tablet *topo.TabletInfo, args *actionnode.SlaveWasRestartedArgs) error { +func (client *FakeTabletManagerClient) SlaveWasRestarted(ctx context.Context, tablet *topodatapb.Tablet, args *actionnode.SlaveWasRestartedArgs) error { return nil } // StopReplicationAndGetStatus is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) StopReplicationAndGetStatus(ctx context.Context, tablet *topo.TabletInfo) (*replicationdatapb.Status, error) { +func (client *FakeTabletManagerClient) StopReplicationAndGetStatus(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.Status, error) { return &replicationdatapb.Status{}, nil } // PromoteSlave is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) PromoteSlave(ctx context.Context, tablet *topo.TabletInfo) (string, error) { +func (client *FakeTabletManagerClient) PromoteSlave(ctx context.Context, tablet *topodatapb.Tablet) (string, error) { return "", nil } @@ -280,7 +279,7 @@ func (e *eofEventStream) Recv() (*logutilpb.Event, error) { } // Backup is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) Backup(ctx context.Context, tablet *topo.TabletInfo, concurrency int) (logutil.EventStream, error) { +func (client *FakeTabletManagerClient) Backup(ctx context.Context, tablet *topodatapb.Tablet, concurrency int) (logutil.EventStream, error) { return &eofEventStream{}, nil } diff --git a/go/vt/tabletmanager/grpctmclient/client.go b/go/vt/tabletmanager/grpctmclient/client.go index a4d135a67a..9198b42add 100644 --- a/go/vt/tabletmanager/grpctmclient/client.go +++ b/go/vt/tabletmanager/grpctmclient/client.go @@ -19,7 +19,7 @@ import ( "github.com/youtube/vitess/go/vt/servenv/grpcutils" "github.com/youtube/vitess/go/vt/tabletmanager/actionnode" "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" - "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/topo/topoproto" "golang.org/x/net/context" logutilpb "github.com/youtube/vitess/go/vt/proto/logutil" @@ -51,7 +51,7 @@ func init() { type Client struct{} // dial returns a client to use -func (client *Client) dial(ctx context.Context, tablet *topo.TabletInfo) (*grpc.ClientConn, tabletmanagerservicepb.TabletManagerClient, error) { +func (client *Client) dial(ctx context.Context, tablet *topodatapb.Tablet) (*grpc.ClientConn, tabletmanagerservicepb.TabletManagerClient, error) { // create the RPC client, using ctx.Deadline if set, or no timeout. var connectTimeout time.Duration deadline, ok := ctx.Deadline() @@ -83,7 +83,7 @@ func (client *Client) dial(ctx context.Context, tablet *topo.TabletInfo) (*grpc. // // Ping is part of the tmclient.TabletManagerClient interface. -func (client *Client) Ping(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *Client) Ping(ctx context.Context, tablet *topodatapb.Tablet) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -102,7 +102,7 @@ func (client *Client) Ping(ctx context.Context, tablet *topo.TabletInfo) error { } // Sleep is part of the tmclient.TabletManagerClient interface. -func (client *Client) Sleep(ctx context.Context, tablet *topo.TabletInfo, duration time.Duration) error { +func (client *Client) Sleep(ctx context.Context, tablet *topodatapb.Tablet, duration time.Duration) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -115,7 +115,7 @@ func (client *Client) Sleep(ctx context.Context, tablet *topo.TabletInfo, durati } // ExecuteHook is part of the tmclient.TabletManagerClient interface. -func (client *Client) ExecuteHook(ctx context.Context, tablet *topo.TabletInfo, hk *hook.Hook) (*hook.HookResult, error) { +func (client *Client) ExecuteHook(ctx context.Context, tablet *topodatapb.Tablet, hk *hook.Hook) (*hook.HookResult, error) { cc, c, err := client.dial(ctx, tablet) if err != nil { return nil, err @@ -137,7 +137,7 @@ func (client *Client) ExecuteHook(ctx context.Context, tablet *topo.TabletInfo, } // GetSchema is part of the tmclient.TabletManagerClient interface. -func (client *Client) GetSchema(ctx context.Context, tablet *topo.TabletInfo, tables, excludeTables []string, includeViews bool) (*tabletmanagerdatapb.SchemaDefinition, error) { +func (client *Client) GetSchema(ctx context.Context, tablet *topodatapb.Tablet, tables, excludeTables []string, includeViews bool) (*tabletmanagerdatapb.SchemaDefinition, error) { cc, c, err := client.dial(ctx, tablet) if err != nil { return nil, err @@ -155,7 +155,7 @@ func (client *Client) GetSchema(ctx context.Context, tablet *topo.TabletInfo, ta } // GetPermissions is part of the tmclient.TabletManagerClient interface. -func (client *Client) GetPermissions(ctx context.Context, tablet *topo.TabletInfo) (*tabletmanagerdatapb.Permissions, error) { +func (client *Client) GetPermissions(ctx context.Context, tablet *topodatapb.Tablet) (*tabletmanagerdatapb.Permissions, error) { cc, c, err := client.dial(ctx, tablet) if err != nil { return nil, err @@ -173,7 +173,7 @@ func (client *Client) GetPermissions(ctx context.Context, tablet *topo.TabletInf // // SetReadOnly is part of the tmclient.TabletManagerClient interface. -func (client *Client) SetReadOnly(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *Client) SetReadOnly(ctx context.Context, tablet *topodatapb.Tablet) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -184,7 +184,7 @@ func (client *Client) SetReadOnly(ctx context.Context, tablet *topo.TabletInfo) } // SetReadWrite is part of the tmclient.TabletManagerClient interface. -func (client *Client) SetReadWrite(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *Client) SetReadWrite(ctx context.Context, tablet *topodatapb.Tablet) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -195,7 +195,7 @@ func (client *Client) SetReadWrite(ctx context.Context, tablet *topo.TabletInfo) } // ChangeType is part of the tmclient.TabletManagerClient interface. -func (client *Client) ChangeType(ctx context.Context, tablet *topo.TabletInfo, dbType topodatapb.TabletType) error { +func (client *Client) ChangeType(ctx context.Context, tablet *topodatapb.Tablet, dbType topodatapb.TabletType) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -208,7 +208,7 @@ func (client *Client) ChangeType(ctx context.Context, tablet *topo.TabletInfo, d } // RefreshState is part of the tmclient.TabletManagerClient interface. -func (client *Client) RefreshState(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *Client) RefreshState(ctx context.Context, tablet *topodatapb.Tablet) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -219,7 +219,7 @@ func (client *Client) RefreshState(ctx context.Context, tablet *topo.TabletInfo) } // RunHealthCheck is part of the tmclient.TabletManagerClient interface. -func (client *Client) RunHealthCheck(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *Client) RunHealthCheck(ctx context.Context, tablet *topodatapb.Tablet) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -230,7 +230,7 @@ func (client *Client) RunHealthCheck(ctx context.Context, tablet *topo.TabletInf } // IgnoreHealthError is part of the tmclient.TabletManagerClient interface. -func (client *Client) IgnoreHealthError(ctx context.Context, tablet *topo.TabletInfo, pattern string) error { +func (client *Client) IgnoreHealthError(ctx context.Context, tablet *topodatapb.Tablet, pattern string) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -243,7 +243,7 @@ func (client *Client) IgnoreHealthError(ctx context.Context, tablet *topo.Tablet } // ReloadSchema is part of the tmclient.TabletManagerClient interface. -func (client *Client) ReloadSchema(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *Client) ReloadSchema(ctx context.Context, tablet *topodatapb.Tablet) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -254,7 +254,7 @@ func (client *Client) ReloadSchema(ctx context.Context, tablet *topo.TabletInfo) } // PreflightSchema is part of the tmclient.TabletManagerClient interface. -func (client *Client) PreflightSchema(ctx context.Context, tablet *topo.TabletInfo, change string) (*tmutils.SchemaChangeResult, error) { +func (client *Client) PreflightSchema(ctx context.Context, tablet *topodatapb.Tablet, change string) (*tmutils.SchemaChangeResult, error) { cc, c, err := client.dial(ctx, tablet) if err != nil { return nil, err @@ -273,7 +273,7 @@ func (client *Client) PreflightSchema(ctx context.Context, tablet *topo.TabletIn } // ApplySchema is part of the tmclient.TabletManagerClient interface. -func (client *Client) ApplySchema(ctx context.Context, tablet *topo.TabletInfo, change *tmutils.SchemaChange) (*tmutils.SchemaChangeResult, error) { +func (client *Client) ApplySchema(ctx context.Context, tablet *topodatapb.Tablet, change *tmutils.SchemaChange) (*tmutils.SchemaChangeResult, error) { cc, c, err := client.dial(ctx, tablet) if err != nil { return nil, err @@ -296,7 +296,7 @@ func (client *Client) ApplySchema(ctx context.Context, tablet *topo.TabletInfo, } // ExecuteFetchAsDba is part of the tmclient.TabletManagerClient interface. -func (client *Client) ExecuteFetchAsDba(ctx context.Context, tablet *topo.TabletInfo, query string, maxRows int, disableBinlogs, reloadSchema bool) (*querypb.QueryResult, error) { +func (client *Client) ExecuteFetchAsDba(ctx context.Context, tablet *topodatapb.Tablet, query string, maxRows int, disableBinlogs, reloadSchema bool) (*querypb.QueryResult, error) { cc, c, err := client.dial(ctx, tablet) if err != nil { return nil, err @@ -304,7 +304,7 @@ func (client *Client) ExecuteFetchAsDba(ctx context.Context, tablet *topo.Tablet defer cc.Close() response, err := c.ExecuteFetchAsDba(ctx, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ Query: query, - DbName: tablet.DbName(), + DbName: topoproto.TabletDbName(tablet), MaxRows: uint64(maxRows), DisableBinlogs: disableBinlogs, ReloadSchema: reloadSchema, @@ -316,7 +316,7 @@ func (client *Client) ExecuteFetchAsDba(ctx context.Context, tablet *topo.Tablet } // ExecuteFetchAsApp is part of the tmclient.TabletManagerClient interface. -func (client *Client) ExecuteFetchAsApp(ctx context.Context, tablet *topo.TabletInfo, query string, maxRows int) (*querypb.QueryResult, error) { +func (client *Client) ExecuteFetchAsApp(ctx context.Context, tablet *topodatapb.Tablet, query string, maxRows int) (*querypb.QueryResult, error) { cc, c, err := client.dial(ctx, tablet) if err != nil { return nil, err @@ -337,7 +337,7 @@ func (client *Client) ExecuteFetchAsApp(ctx context.Context, tablet *topo.Tablet // // SlaveStatus is part of the tmclient.TabletManagerClient interface. -func (client *Client) SlaveStatus(ctx context.Context, tablet *topo.TabletInfo) (*replicationdatapb.Status, error) { +func (client *Client) SlaveStatus(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.Status, error) { cc, c, err := client.dial(ctx, tablet) if err != nil { return nil, err @@ -351,7 +351,7 @@ func (client *Client) SlaveStatus(ctx context.Context, tablet *topo.TabletInfo) } // MasterPosition is part of the tmclient.TabletManagerClient interface. -func (client *Client) MasterPosition(ctx context.Context, tablet *topo.TabletInfo) (string, error) { +func (client *Client) MasterPosition(ctx context.Context, tablet *topodatapb.Tablet) (string, error) { cc, c, err := client.dial(ctx, tablet) if err != nil { return "", err @@ -365,7 +365,7 @@ func (client *Client) MasterPosition(ctx context.Context, tablet *topo.TabletInf } // StopSlave is part of the tmclient.TabletManagerClient interface. -func (client *Client) StopSlave(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *Client) StopSlave(ctx context.Context, tablet *topodatapb.Tablet) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -376,7 +376,7 @@ func (client *Client) StopSlave(ctx context.Context, tablet *topo.TabletInfo) er } // StopSlaveMinimum is part of the tmclient.TabletManagerClient interface. -func (client *Client) StopSlaveMinimum(ctx context.Context, tablet *topo.TabletInfo, minPos string, waitTime time.Duration) (string, error) { +func (client *Client) StopSlaveMinimum(ctx context.Context, tablet *topodatapb.Tablet, minPos string, waitTime time.Duration) (string, error) { cc, c, err := client.dial(ctx, tablet) if err != nil { return "", err @@ -393,7 +393,7 @@ func (client *Client) StopSlaveMinimum(ctx context.Context, tablet *topo.TabletI } // StartSlave is part of the tmclient.TabletManagerClient interface. -func (client *Client) StartSlave(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *Client) StartSlave(ctx context.Context, tablet *topodatapb.Tablet) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -404,7 +404,7 @@ func (client *Client) StartSlave(ctx context.Context, tablet *topo.TabletInfo) e } // TabletExternallyReparented is part of the tmclient.TabletManagerClient interface. -func (client *Client) TabletExternallyReparented(ctx context.Context, tablet *topo.TabletInfo, externalID string) error { +func (client *Client) TabletExternallyReparented(ctx context.Context, tablet *topodatapb.Tablet, externalID string) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -417,7 +417,7 @@ func (client *Client) TabletExternallyReparented(ctx context.Context, tablet *to } // GetSlaves is part of the tmclient.TabletManagerClient interface. -func (client *Client) GetSlaves(ctx context.Context, tablet *topo.TabletInfo) ([]string, error) { +func (client *Client) GetSlaves(ctx context.Context, tablet *topodatapb.Tablet) ([]string, error) { cc, c, err := client.dial(ctx, tablet) if err != nil { return nil, err @@ -431,7 +431,7 @@ func (client *Client) GetSlaves(ctx context.Context, tablet *topo.TabletInfo) ([ } // WaitBlpPosition is part of the tmclient.TabletManagerClient interface. -func (client *Client) WaitBlpPosition(ctx context.Context, tablet *topo.TabletInfo, blpPosition *tabletmanagerdatapb.BlpPosition, waitTime time.Duration) error { +func (client *Client) WaitBlpPosition(ctx context.Context, tablet *topodatapb.Tablet, blpPosition *tabletmanagerdatapb.BlpPosition, waitTime time.Duration) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -445,7 +445,7 @@ func (client *Client) WaitBlpPosition(ctx context.Context, tablet *topo.TabletIn } // StopBlp is part of the tmclient.TabletManagerClient interface. -func (client *Client) StopBlp(ctx context.Context, tablet *topo.TabletInfo) ([]*tabletmanagerdatapb.BlpPosition, error) { +func (client *Client) StopBlp(ctx context.Context, tablet *topodatapb.Tablet) ([]*tabletmanagerdatapb.BlpPosition, error) { cc, c, err := client.dial(ctx, tablet) if err != nil { return nil, err @@ -459,7 +459,7 @@ func (client *Client) StopBlp(ctx context.Context, tablet *topo.TabletInfo) ([]* } // StartBlp is part of the tmclient.TabletManagerClient interface. -func (client *Client) StartBlp(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *Client) StartBlp(ctx context.Context, tablet *topodatapb.Tablet) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -470,7 +470,7 @@ func (client *Client) StartBlp(ctx context.Context, tablet *topo.TabletInfo) err } // RunBlpUntil is part of the tmclient.TabletManagerClient interface. -func (client *Client) RunBlpUntil(ctx context.Context, tablet *topo.TabletInfo, positions []*tabletmanagerdatapb.BlpPosition, waitTime time.Duration) (string, error) { +func (client *Client) RunBlpUntil(ctx context.Context, tablet *topodatapb.Tablet, positions []*tabletmanagerdatapb.BlpPosition, waitTime time.Duration) (string, error) { cc, c, err := client.dial(ctx, tablet) if err != nil { return "", err @@ -491,7 +491,7 @@ func (client *Client) RunBlpUntil(ctx context.Context, tablet *topo.TabletInfo, // // ResetReplication is part of the tmclient.TabletManagerClient interface. -func (client *Client) ResetReplication(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *Client) ResetReplication(ctx context.Context, tablet *topodatapb.Tablet) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -502,7 +502,7 @@ func (client *Client) ResetReplication(ctx context.Context, tablet *topo.TabletI } // InitMaster is part of the tmclient.TabletManagerClient interface. -func (client *Client) InitMaster(ctx context.Context, tablet *topo.TabletInfo) (string, error) { +func (client *Client) InitMaster(ctx context.Context, tablet *topodatapb.Tablet) (string, error) { cc, c, err := client.dial(ctx, tablet) if err != nil { return "", err @@ -516,7 +516,7 @@ func (client *Client) InitMaster(ctx context.Context, tablet *topo.TabletInfo) ( } // PopulateReparentJournal is part of the tmclient.TabletManagerClient interface. -func (client *Client) PopulateReparentJournal(ctx context.Context, tablet *topo.TabletInfo, timeCreatedNS int64, actionName string, masterAlias *topodatapb.TabletAlias, pos string) error { +func (client *Client) PopulateReparentJournal(ctx context.Context, tablet *topodatapb.Tablet, timeCreatedNS int64, actionName string, masterAlias *topodatapb.TabletAlias, pos string) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -532,7 +532,7 @@ func (client *Client) PopulateReparentJournal(ctx context.Context, tablet *topo. } // InitSlave is part of the tmclient.TabletManagerClient interface. -func (client *Client) InitSlave(ctx context.Context, tablet *topo.TabletInfo, parent *topodatapb.TabletAlias, replicationPosition string, timeCreatedNS int64) error { +func (client *Client) InitSlave(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, replicationPosition string, timeCreatedNS int64) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -547,7 +547,7 @@ func (client *Client) InitSlave(ctx context.Context, tablet *topo.TabletInfo, pa } // DemoteMaster is part of the tmclient.TabletManagerClient interface. -func (client *Client) DemoteMaster(ctx context.Context, tablet *topo.TabletInfo) (string, error) { +func (client *Client) DemoteMaster(ctx context.Context, tablet *topodatapb.Tablet) (string, error) { cc, c, err := client.dial(ctx, tablet) if err != nil { return "", err @@ -561,7 +561,7 @@ func (client *Client) DemoteMaster(ctx context.Context, tablet *topo.TabletInfo) } // PromoteSlaveWhenCaughtUp is part of the tmclient.TabletManagerClient interface. -func (client *Client) PromoteSlaveWhenCaughtUp(ctx context.Context, tablet *topo.TabletInfo, pos string) (string, error) { +func (client *Client) PromoteSlaveWhenCaughtUp(ctx context.Context, tablet *topodatapb.Tablet, pos string) (string, error) { cc, c, err := client.dial(ctx, tablet) if err != nil { return "", err @@ -577,7 +577,7 @@ func (client *Client) PromoteSlaveWhenCaughtUp(ctx context.Context, tablet *topo } // SlaveWasPromoted is part of the tmclient.TabletManagerClient interface. -func (client *Client) SlaveWasPromoted(ctx context.Context, tablet *topo.TabletInfo) error { +func (client *Client) SlaveWasPromoted(ctx context.Context, tablet *topodatapb.Tablet) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -588,7 +588,7 @@ func (client *Client) SlaveWasPromoted(ctx context.Context, tablet *topo.TabletI } // SetMaster is part of the tmclient.TabletManagerClient interface. -func (client *Client) SetMaster(ctx context.Context, tablet *topo.TabletInfo, parent *topodatapb.TabletAlias, timeCreatedNS int64, forceStartSlave bool) error { +func (client *Client) SetMaster(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, timeCreatedNS int64, forceStartSlave bool) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -603,7 +603,7 @@ func (client *Client) SetMaster(ctx context.Context, tablet *topo.TabletInfo, pa } // SlaveWasRestarted is part of the tmclient.TabletManagerClient interface. -func (client *Client) SlaveWasRestarted(ctx context.Context, tablet *topo.TabletInfo, args *actionnode.SlaveWasRestartedArgs) error { +func (client *Client) SlaveWasRestarted(ctx context.Context, tablet *topodatapb.Tablet, args *actionnode.SlaveWasRestartedArgs) error { cc, c, err := client.dial(ctx, tablet) if err != nil { return err @@ -616,7 +616,7 @@ func (client *Client) SlaveWasRestarted(ctx context.Context, tablet *topo.Tablet } // StopReplicationAndGetStatus is part of the tmclient.TabletManagerClient interface. -func (client *Client) StopReplicationAndGetStatus(ctx context.Context, tablet *topo.TabletInfo) (*replicationdatapb.Status, error) { +func (client *Client) StopReplicationAndGetStatus(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.Status, error) { cc, c, err := client.dial(ctx, tablet) if err != nil { return nil, err @@ -630,7 +630,7 @@ func (client *Client) StopReplicationAndGetStatus(ctx context.Context, tablet *t } // PromoteSlave is part of the tmclient.TabletManagerClient interface. -func (client *Client) PromoteSlave(ctx context.Context, tablet *topo.TabletInfo) (string, error) { +func (client *Client) PromoteSlave(ctx context.Context, tablet *topodatapb.Tablet) (string, error) { cc, c, err := client.dial(ctx, tablet) if err != nil { return "", err @@ -661,7 +661,7 @@ func (e *eventStreamAdapter) Recv() (*logutilpb.Event, error) { } // Backup is part of the tmclient.TabletManagerClient interface. -func (client *Client) Backup(ctx context.Context, tablet *topo.TabletInfo, concurrency int) (logutil.EventStream, error) { +func (client *Client) Backup(ctx context.Context, tablet *topodatapb.Tablet, concurrency int) (logutil.EventStream, error) { cc, c, err := client.dial(ctx, tablet) if err != nil { return nil, err diff --git a/go/vt/tabletmanager/grpctmserver/server_test.go b/go/vt/tabletmanager/grpctmserver/server_test.go index 696fa11d7d..7a355797a0 100644 --- a/go/vt/tabletmanager/grpctmserver/server_test.go +++ b/go/vt/tabletmanager/grpctmserver/server_test.go @@ -10,7 +10,6 @@ import ( "github.com/youtube/vitess/go/vt/tabletmanager/agentrpctest" "github.com/youtube/vitess/go/vt/tabletmanager/grpctmclient" - "github.com/youtube/vitess/go/vt/topo" "google.golang.org/grpc" tabletmanagerservicepb "github.com/youtube/vitess/go/vt/proto/tabletmanagerservice" @@ -36,7 +35,7 @@ func TestGRPCTMServer(t *testing.T) { // Create a gRPC client to talk to the fake tablet. client := &grpctmclient.Client{} - ti := topo.NewTabletInfo(&topodatapb.Tablet{ + tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "test", Uid: 123, @@ -45,8 +44,8 @@ func TestGRPCTMServer(t *testing.T) { PortMap: map[string]int32{ "grpc": port, }, - }, 0) + } // and run the test suite - agentrpctest.Run(t, client, ti, fakeAgent) + agentrpctest.Run(t, client, tablet, fakeAgent) } diff --git a/go/vt/tabletmanager/rpc_external_reparent.go b/go/vt/tabletmanager/rpc_external_reparent.go index 1efdf0ad35..cca83ec47f 100644 --- a/go/vt/tabletmanager/rpc_external_reparent.go +++ b/go/vt/tabletmanager/rpc_external_reparent.go @@ -174,7 +174,7 @@ func (agent *ActionAgent) finalizeTabletExternallyReparented(ctx context.Context // Tell the old master to re-read its tablet record and change its state. // We don't need to wait for it. tmc := tmclient.NewTabletManagerClient() - tmc.RefreshState(ctx, topo.NewTabletInfo(oldMasterTablet, -1)) + tmc.RefreshState(ctx, oldMasterTablet) }() } diff --git a/go/vt/tabletmanager/tmclient/rpc_client_api.go b/go/vt/tabletmanager/tmclient/rpc_client_api.go index fbcf9a7811..6bd742b5d0 100644 --- a/go/vt/tabletmanager/tmclient/rpc_client_api.go +++ b/go/vt/tabletmanager/tmclient/rpc_client_api.go @@ -13,7 +13,6 @@ import ( "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl/tmutils" "github.com/youtube/vitess/go/vt/tabletmanager/actionnode" - "github.com/youtube/vitess/go/vt/topo" "golang.org/x/net/context" querypb "github.com/youtube/vitess/go/vt/proto/query" @@ -33,76 +32,76 @@ type TabletManagerClient interface { // // Ping will try to ping the remote tablet - Ping(ctx context.Context, tablet *topo.TabletInfo) error + Ping(ctx context.Context, tablet *topodatapb.Tablet) error // GetSchema asks the remote tablet for its database schema - GetSchema(ctx context.Context, tablet *topo.TabletInfo, tables, excludeTables []string, includeViews bool) (*tabletmanagerdatapb.SchemaDefinition, error) + GetSchema(ctx context.Context, tablet *topodatapb.Tablet, tables, excludeTables []string, includeViews bool) (*tabletmanagerdatapb.SchemaDefinition, error) // GetPermissions asks the remote tablet for its permissions list - GetPermissions(ctx context.Context, tablet *topo.TabletInfo) (*tabletmanagerdatapb.Permissions, error) + GetPermissions(ctx context.Context, tablet *topodatapb.Tablet) (*tabletmanagerdatapb.Permissions, error) // // Various read-write methods // // SetReadOnly makes the mysql instance read-only - SetReadOnly(ctx context.Context, tablet *topo.TabletInfo) error + SetReadOnly(ctx context.Context, tablet *topodatapb.Tablet) error // SetReadWrite makes the mysql instance read-write - SetReadWrite(ctx context.Context, tablet *topo.TabletInfo) error + SetReadWrite(ctx context.Context, tablet *topodatapb.Tablet) error // ChangeType asks the remote tablet to change its type - ChangeType(ctx context.Context, tablet *topo.TabletInfo, dbType topodatapb.TabletType) error + ChangeType(ctx context.Context, tablet *topodatapb.Tablet, dbType topodatapb.TabletType) error // Sleep will sleep for a duration (used for tests) - Sleep(ctx context.Context, tablet *topo.TabletInfo, duration time.Duration) error + Sleep(ctx context.Context, tablet *topodatapb.Tablet, duration time.Duration) error // ExecuteHook executes the provided hook remotely - ExecuteHook(ctx context.Context, tablet *topo.TabletInfo, hk *hook.Hook) (*hook.HookResult, error) + ExecuteHook(ctx context.Context, tablet *topodatapb.Tablet, hk *hook.Hook) (*hook.HookResult, error) // RefreshState asks the remote tablet to reload its tablet record - RefreshState(ctx context.Context, tablet *topo.TabletInfo) error + RefreshState(ctx context.Context, tablet *topodatapb.Tablet) error // RunHealthCheck asks the remote tablet to run a health check cycle - RunHealthCheck(ctx context.Context, tablet *topo.TabletInfo) error + RunHealthCheck(ctx context.Context, tablet *topodatapb.Tablet) error // IgnoreHealthError sets the regexp for health errors to ignore. - IgnoreHealthError(ctx context.Context, tablet *topo.TabletInfo, pattern string) error + IgnoreHealthError(ctx context.Context, tablet *topodatapb.Tablet, pattern string) error // ReloadSchema asks the remote tablet to reload its schema - ReloadSchema(ctx context.Context, tablet *topo.TabletInfo) error + ReloadSchema(ctx context.Context, tablet *topodatapb.Tablet) error // PreflightSchema will test a schema change - PreflightSchema(ctx context.Context, tablet *topo.TabletInfo, change string) (*tmutils.SchemaChangeResult, error) + PreflightSchema(ctx context.Context, tablet *topodatapb.Tablet, change string) (*tmutils.SchemaChangeResult, error) // ApplySchema will apply a schema change - ApplySchema(ctx context.Context, tablet *topo.TabletInfo, change *tmutils.SchemaChange) (*tmutils.SchemaChangeResult, error) + ApplySchema(ctx context.Context, tablet *topodatapb.Tablet, change *tmutils.SchemaChange) (*tmutils.SchemaChangeResult, error) // ExecuteFetchAsDba executes a query remotely using the DBA pool - ExecuteFetchAsDba(ctx context.Context, tablet *topo.TabletInfo, query string, maxRows int, disableBinlogs, reloadSchema bool) (*querypb.QueryResult, error) + ExecuteFetchAsDba(ctx context.Context, tablet *topodatapb.Tablet, query string, maxRows int, disableBinlogs, reloadSchema bool) (*querypb.QueryResult, error) // ExecuteFetchAsApp executes a query remotely using the App pool - ExecuteFetchAsApp(ctx context.Context, tablet *topo.TabletInfo, query string, maxRows int) (*querypb.QueryResult, error) + ExecuteFetchAsApp(ctx context.Context, tablet *topodatapb.Tablet, query string, maxRows int) (*querypb.QueryResult, error) // // Replication related methods // // SlaveStatus returns the tablet's mysql slave status. - SlaveStatus(ctx context.Context, tablet *topo.TabletInfo) (*replicationdatapb.Status, error) + SlaveStatus(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.Status, error) // MasterPosition returns the tablet's master position - MasterPosition(ctx context.Context, tablet *topo.TabletInfo) (string, error) + MasterPosition(ctx context.Context, tablet *topodatapb.Tablet) (string, error) // StopSlave stops the mysql replication - StopSlave(ctx context.Context, tablet *topo.TabletInfo) error + StopSlave(ctx context.Context, tablet *topodatapb.Tablet) error // StopSlaveMinimum stops the mysql replication after it reaches // the provided minimum point - StopSlaveMinimum(ctx context.Context, tablet *topo.TabletInfo, stopPos string, waitTime time.Duration) (string, error) + StopSlaveMinimum(ctx context.Context, tablet *topodatapb.Tablet, stopPos string, waitTime time.Duration) (string, error) // StartSlave starts the mysql replication - StartSlave(ctx context.Context, tablet *topo.TabletInfo) error + StartSlave(ctx context.Context, tablet *topodatapb.Tablet) error // TabletExternallyReparented tells a tablet it is now the master, after an // external tool has already promoted the underlying mysqld to master and @@ -110,25 +109,25 @@ type TabletManagerClient interface { // // externalID is an optional string provided by the external tool that // vttablet will emit in logs to facilitate cross-referencing. - TabletExternallyReparented(ctx context.Context, tablet *topo.TabletInfo, externalID string) error + TabletExternallyReparented(ctx context.Context, tablet *topodatapb.Tablet, externalID string) error // GetSlaves returns the addresses of the slaves - GetSlaves(ctx context.Context, tablet *topo.TabletInfo) ([]string, error) + GetSlaves(ctx context.Context, tablet *topodatapb.Tablet) ([]string, error) // WaitBlpPosition asks the tablet to wait until it reaches that // position in replication - WaitBlpPosition(ctx context.Context, tablet *topo.TabletInfo, blpPosition *tabletmanagerdatapb.BlpPosition, waitTime time.Duration) error + WaitBlpPosition(ctx context.Context, tablet *topodatapb.Tablet, blpPosition *tabletmanagerdatapb.BlpPosition, waitTime time.Duration) error // StopBlp asks the tablet to stop all its binlog players, // and returns the current position for all of them - StopBlp(ctx context.Context, tablet *topo.TabletInfo) ([]*tabletmanagerdatapb.BlpPosition, error) + StopBlp(ctx context.Context, tablet *topodatapb.Tablet) ([]*tabletmanagerdatapb.BlpPosition, error) // StartBlp asks the tablet to restart its binlog players - StartBlp(ctx context.Context, tablet *topo.TabletInfo) error + StartBlp(ctx context.Context, tablet *topodatapb.Tablet) error // RunBlpUntil asks the tablet to restart its binlog players until // it reaches the given positions, if not there yet. - RunBlpUntil(ctx context.Context, tablet *topo.TabletInfo, positions []*tabletmanagerdatapb.BlpPosition, waitTime time.Duration) (string, error) + RunBlpUntil(ctx context.Context, tablet *topodatapb.Tablet, positions []*tabletmanagerdatapb.BlpPosition, waitTime time.Duration) (string, error) // // Reparenting related functions @@ -137,53 +136,53 @@ type TabletManagerClient interface { // ResetReplication tells a tablet to completely reset its // replication. All binary and relay logs are flushed. All // replication positions are reset. - ResetReplication(ctx context.Context, tablet *topo.TabletInfo) error + ResetReplication(ctx context.Context, tablet *topodatapb.Tablet) error // InitMaster tells a tablet to make itself the new master, // and return the replication position the slaves should use to // reparent to it. - InitMaster(ctx context.Context, tablet *topo.TabletInfo) (string, error) + InitMaster(ctx context.Context, tablet *topodatapb.Tablet) (string, error) // PopulateReparentJournal asks the master to insert a row in // its reparent_journal table. - PopulateReparentJournal(ctx context.Context, tablet *topo.TabletInfo, timeCreatedNS int64, actionName string, masterAlias *topodatapb.TabletAlias, pos string) error + PopulateReparentJournal(ctx context.Context, tablet *topodatapb.Tablet, timeCreatedNS int64, actionName string, masterAlias *topodatapb.TabletAlias, pos string) error // InitSlave tells a tablet to make itself a slave to the // passed in master tablet alias, and wait for the row in the // reparent_journal table. - InitSlave(ctx context.Context, tablet *topo.TabletInfo, parent *topodatapb.TabletAlias, replicationPosition string, timeCreatedNS int64) error + InitSlave(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, replicationPosition string, timeCreatedNS int64) error // DemoteMaster tells the soon-to-be-former master it's gonna change, // and it should go read-only and return its current position. - DemoteMaster(ctx context.Context, tablet *topo.TabletInfo) (string, error) + DemoteMaster(ctx context.Context, tablet *topodatapb.Tablet) (string, error) // PromoteSlaveWhenCaughtUp transforms the tablet from a slave to a master. - PromoteSlaveWhenCaughtUp(ctx context.Context, tablet *topo.TabletInfo, pos string) (string, error) + PromoteSlaveWhenCaughtUp(ctx context.Context, tablet *topodatapb.Tablet, pos string) (string, error) // SlaveWasPromoted tells the remote tablet it is now the master - SlaveWasPromoted(ctx context.Context, tablet *topo.TabletInfo) error + SlaveWasPromoted(ctx context.Context, tablet *topodatapb.Tablet) error // SetMaster tells a tablet to make itself a slave to the // passed in master tablet alias, and wait for the row in the // reparent_journal table (if timeCreatedNS is non-zero). - SetMaster(ctx context.Context, tablet *topo.TabletInfo, parent *topodatapb.TabletAlias, timeCreatedNS int64, forceStartSlave bool) error + SetMaster(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, timeCreatedNS int64, forceStartSlave bool) error // SlaveWasRestarted tells the remote tablet its master has changed - SlaveWasRestarted(ctx context.Context, tablet *topo.TabletInfo, args *actionnode.SlaveWasRestartedArgs) error + SlaveWasRestarted(ctx context.Context, tablet *topodatapb.Tablet, args *actionnode.SlaveWasRestartedArgs) error // StopReplicationAndGetStatus stops replication and returns the // current position. - StopReplicationAndGetStatus(ctx context.Context, tablet *topo.TabletInfo) (*replicationdatapb.Status, error) + StopReplicationAndGetStatus(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.Status, error) // PromoteSlave makes the tablet the new master - PromoteSlave(ctx context.Context, tablet *topo.TabletInfo) (string, error) + PromoteSlave(ctx context.Context, tablet *topodatapb.Tablet) (string, error) // // Backup / restore related methods // // Backup creates a database backup - Backup(ctx context.Context, tablet *topo.TabletInfo, concurrency int) (logutil.EventStream, error) + Backup(ctx context.Context, tablet *topodatapb.Tablet, concurrency int) (logutil.EventStream, error) // // RPC related methods diff --git a/go/vt/topo/tablet.go b/go/vt/topo/tablet.go index 1ff32fbdd1..2e3dad4d96 100644 --- a/go/vt/topo/tablet.go +++ b/go/vt/topo/tablet.go @@ -101,31 +101,6 @@ func IsSlaveType(tt topodatapb.TabletType) bool { return true } -// TabletValidatePortMap returns an error if the tablet's portmap doesn't -// contain all the necessary ports for the tablet to be fully -// operational. We only care about vt port now, as mysql may not even -// be running. -func TabletValidatePortMap(tablet *topodatapb.Tablet) error { - if _, ok := tablet.PortMap["vt"]; !ok { - return fmt.Errorf("no vt port available") - } - return nil -} - -// TabletEndPoint returns an EndPoint associated with the tablet record -func TabletEndPoint(tablet *topodatapb.Tablet) (*topodatapb.EndPoint, error) { - if err := TabletValidatePortMap(tablet); err != nil { - return nil, err - } - - entry := NewEndPoint(tablet.Alias.Uid, tablet.Hostname) - for name, port := range tablet.PortMap { - entry.PortMap[name] = int32(port) - } - - return entry, nil -} - // TabletComplete validates and normalizes the tablet. If the shard name // contains a '-' it is going to try to infer the keyrange from it. func TabletComplete(tablet *topodatapb.Tablet) error { diff --git a/go/vt/vtctl/reparent.go b/go/vt/vtctl/reparent.go index f76574ad34..f738ea9d0c 100644 --- a/go/vt/vtctl/reparent.go +++ b/go/vt/vtctl/reparent.go @@ -65,7 +65,7 @@ func commandDemoteMaster(ctx context.Context, wr *wrangler.Wrangler, subFlags *f if err != nil { return err } - _, err = wr.TabletManagerClient().DemoteMaster(ctx, tabletInfo) + _, err = wr.TabletManagerClient().DemoteMaster(ctx, tabletInfo.Tablet) return err } diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index 2c5d10fe2f..6e37bed97f 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -741,7 +741,7 @@ func commandSetReadOnly(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl if err != nil { return fmt.Errorf("failed reading tablet %v: %v", tabletAlias, err) } - return wr.TabletManagerClient().SetReadOnly(ctx, ti) + return wr.TabletManagerClient().SetReadOnly(ctx, ti.Tablet) } func commandSetReadWrite(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -760,7 +760,7 @@ func commandSetReadWrite(ctx context.Context, wr *wrangler.Wrangler, subFlags *f if err != nil { return fmt.Errorf("failed reading tablet %v: %v", tabletAlias, err) } - return wr.TabletManagerClient().SetReadWrite(ctx, ti) + return wr.TabletManagerClient().SetReadWrite(ctx, ti.Tablet) } func commandStartSlave(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -779,7 +779,7 @@ func commandStartSlave(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla if err != nil { return fmt.Errorf("failed reading tablet %v: %v", tabletAlias, err) } - return wr.TabletManagerClient().StartSlave(ctx, ti) + return wr.TabletManagerClient().StartSlave(ctx, ti.Tablet) } func commandStopSlave(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -798,7 +798,7 @@ func commandStopSlave(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag if err != nil { return fmt.Errorf("failed reading tablet %v: %v", tabletAlias, err) } - return wr.TabletManagerClient().StopSlave(ctx, ti) + return wr.TabletManagerClient().StopSlave(ctx, ti.Tablet) } func commandChangeSlaveType(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -850,7 +850,7 @@ func commandPing(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.Flag if err != nil { return err } - return wr.TabletManagerClient().Ping(ctx, tabletInfo) + return wr.TabletManagerClient().Ping(ctx, tabletInfo.Tablet) } func commandRefreshState(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -868,7 +868,7 @@ func commandRefreshState(ctx context.Context, wr *wrangler.Wrangler, subFlags *f if err != nil { return err } - return wr.TabletManagerClient().RefreshState(ctx, tabletInfo) + return wr.TabletManagerClient().RefreshState(ctx, tabletInfo.Tablet) } func commandRunHealthCheck(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -886,7 +886,7 @@ func commandRunHealthCheck(ctx context.Context, wr *wrangler.Wrangler, subFlags if err != nil { return err } - return wr.TabletManagerClient().RunHealthCheck(ctx, tabletInfo) + return wr.TabletManagerClient().RunHealthCheck(ctx, tabletInfo.Tablet) } func commandIgnoreHealthError(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -905,7 +905,7 @@ func commandIgnoreHealthError(ctx context.Context, wr *wrangler.Wrangler, subFla if err != nil { return err } - return wr.TabletManagerClient().IgnoreHealthError(ctx, tabletInfo, pattern) + return wr.TabletManagerClient().IgnoreHealthError(ctx, tabletInfo.Tablet, pattern) } func commandWaitForDrain(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -958,7 +958,7 @@ func commandSleep(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.Fla if err != nil { return err } - return wr.TabletManagerClient().Sleep(ctx, ti, duration) + return wr.TabletManagerClient().Sleep(ctx, ti.Tablet, duration) } func commandBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -978,7 +978,7 @@ func commandBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.Fl if err != nil { return err } - stream, err := wr.TabletManagerClient().Backup(ctx, tabletInfo, *concurrency) + stream, err := wr.TabletManagerClient().Backup(ctx, tabletInfo.Tablet, *concurrency) if err != nil { return err } @@ -1134,7 +1134,7 @@ func commandTabletExternallyReparented(ctx context.Context, wr *wrangler.Wrangle if err != nil { return err } - return wr.TabletManagerClient().TabletExternallyReparented(ctx, ti, "") + return wr.TabletManagerClient().TabletExternallyReparented(ctx, ti.Tablet, "") } func commandValidateShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -1398,7 +1398,7 @@ func commandWaitForFilteredReplication(ctx context.Context, wr *wrangler.Wrangle // Always run an explicit healthcheck first to make sure we don't see any outdated values. // This is especially true for tests and automation where there is no pause of multiple seconds // between commands and the periodic healthcheck did not run again yet. - if err := wr.TabletManagerClient().RunHealthCheck(ctx, tabletInfo); err != nil { + if err := wr.TabletManagerClient().RunHealthCheck(ctx, tabletInfo.Tablet); err != nil { return fmt.Errorf("failed to run explicit healthcheck on tablet: %v err: %v", tabletInfo, err) } diff --git a/go/vt/vtctld/vtctld.go b/go/vt/vtctld/vtctld.go index 83f565aeef..313c650f84 100644 --- a/go/vt/vtctld/vtctld.go +++ b/go/vt/vtctld/vtctld.go @@ -76,7 +76,7 @@ func InitVtctld(ts topo.Server) { if err != nil { return "", err } - return "", wr.TabletManagerClient().Ping(ctx, ti) + return "", wr.TabletManagerClient().Ping(ctx, ti.Tablet) }) actionRepo.RegisterTabletAction("RefreshState", acl.ADMIN, @@ -85,7 +85,7 @@ func InitVtctld(ts topo.Server) { if err != nil { return "", err } - return "", wr.TabletManagerClient().RefreshState(ctx, ti) + return "", wr.TabletManagerClient().RefreshState(ctx, ti.Tablet) }) actionRepo.RegisterTabletAction("DeleteTablet", acl.ADMIN, diff --git a/go/vt/worker/clone_utils.go b/go/vt/worker/clone_utils.go index 5cb00a7879..d7a56ccb5a 100644 --- a/go/vt/worker/clone_utils.go +++ b/go/vt/worker/clone_utils.go @@ -104,7 +104,7 @@ func FindChunks(ctx context.Context, wr *wrangler.Wrangler, ti *topo.TabletInfo, // get the min and max of the leading column of the primary key query := fmt.Sprintf("SELECT MIN(%v), MAX(%v) FROM %v.%v", td.PrimaryKeyColumns[0], td.PrimaryKeyColumns[0], ti.DbName(), td.Name) shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) - qr, err := wr.TabletManagerClient().ExecuteFetchAsApp(shortCtx, ti, query, 1) + qr, err := wr.TabletManagerClient().ExecuteFetchAsApp(shortCtx, ti.Tablet, query, 1) cancel() if err != nil { return nil, fmt.Errorf("ExecuteFetchAsApp: %v", err) @@ -250,13 +250,3 @@ func makeValueString(fields []*querypb.Field, rows [][]sqltypes.Value) string { } return buf.String() } - -// tabletStatsToTabletInfo converts a TabletStats object from the discovery -// package into a TabletInfo object. The latter one is required by several -// TabletManagerClient API calls. -// Note that this is a best-effort conversion and won't result into the same -// result as a call to topo.GetTablet(). -// Note: We assume that "eps" is immutable and we can reference its data. -func tabletStatsToTabletInfo(eps *discovery.TabletStats) *topo.TabletInfo { - return topo.NewTabletInfo(eps.Tablet, -1 /* version */) -} diff --git a/go/vt/worker/executor.go b/go/vt/worker/executor.go index b21f1b0241..ad5cc173ee 100644 --- a/go/vt/worker/executor.go +++ b/go/vt/worker/executor.go @@ -110,7 +110,7 @@ func (e *executor) fetchWithRetries(ctx context.Context, command string) error { // new variables until the label is reached.) { tryCtx, cancel := context.WithTimeout(retryCtx, 2*time.Minute) - _, err = e.wr.TabletManagerClient().ExecuteFetchAsApp(tryCtx, tabletStatsToTabletInfo(master), command, 0) + _, err = e.wr.TabletManagerClient().ExecuteFetchAsApp(tryCtx, master.Tablet, command, 0) cancel() if err == nil { diff --git a/go/vt/worker/split_clone.go b/go/vt/worker/split_clone.go index d62a97b53b..d1dcb32b5d 100644 --- a/go/vt/worker/split_clone.go +++ b/go/vt/worker/split_clone.go @@ -350,13 +350,13 @@ func (scw *SplitCloneWorker) findTargets(ctx context.Context) error { } shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - err := scw.wr.TabletManagerClient().StopSlave(shortCtx, scw.sourceTablets[i]) + err := scw.wr.TabletManagerClient().StopSlave(shortCtx, scw.sourceTablets[i].Tablet) cancel() if err != nil { return fmt.Errorf("cannot stop replication on tablet %v", topoproto.TabletAliasString(alias)) } - wrangler.RecordStartSlaveAction(scw.cleaner, scw.sourceTablets[i]) + wrangler.RecordStartSlaveAction(scw.cleaner, scw.sourceTablets[i].Tablet) } // Initialize healthcheck and add destination shards to it. @@ -604,7 +604,7 @@ func (scw *SplitCloneWorker) copy(ctx context.Context) error { // get the current position from the sources for shardIndex := range scw.sourceShards { shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) - status, err := scw.wr.TabletManagerClient().SlaveStatus(shortCtx, scw.sourceTablets[shardIndex]) + status, err := scw.wr.TabletManagerClient().SlaveStatus(shortCtx, scw.sourceTablets[shardIndex].Tablet) cancel() if err != nil { return err @@ -662,7 +662,7 @@ func (scw *SplitCloneWorker) copy(ctx context.Context) error { defer destinationWaitGroup.Done() scw.wr.Logger().Infof("Reloading schema on tablet %v", ti.AliasString()) shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) - err := scw.wr.TabletManagerClient().ReloadSchema(shortCtx, ti) + err := scw.wr.TabletManagerClient().ReloadSchema(shortCtx, ti.Tablet) cancel() if err != nil { processError("ReloadSchema failed on tablet %v: %v", ti.AliasString(), err) diff --git a/go/vt/worker/split_clone_test.go b/go/vt/worker/split_clone_test.go index 5f540b9854..2dcaccf3e9 100644 --- a/go/vt/worker/split_clone_test.go +++ b/go/vt/worker/split_clone_test.go @@ -390,7 +390,7 @@ func TestSplitCloneV2_RetryDueToReparent(t *testing.T) { t.Fatalf("GetTablet failed: %v", err) } tmc := tmclient.NewTabletManagerClient() - if err := tmc.TabletExternallyReparented(context.Background(), ti, "wait id 1"); err != nil { + if err := tmc.TabletExternallyReparented(context.Background(), ti.Tablet, "wait id 1"); err != nil { t.Fatalf("TabletExternallyReparented(replica) failed: %v", err) } diff --git a/go/vt/worker/split_diff.go b/go/vt/worker/split_diff.go index 56c87fc72c..9155d3f2aa 100644 --- a/go/vt/worker/split_diff.go +++ b/go/vt/worker/split_diff.go @@ -249,12 +249,12 @@ func (sdw *SplitDiffWorker) synchronizeReplication(ctx context.Context) error { // 1 - stop the master binlog replication, get its current position sdw.wr.Logger().Infof("Stopping master binlog replication on %v", sdw.shardInfo.MasterAlias) shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - blpPositionList, err := sdw.wr.TabletManagerClient().StopBlp(shortCtx, masterInfo) + blpPositionList, err := sdw.wr.TabletManagerClient().StopBlp(shortCtx, masterInfo.Tablet) cancel() if err != nil { return fmt.Errorf("StopBlp for %v failed: %v", sdw.shardInfo.MasterAlias, err) } - wrangler.RecordStartBlpAction(sdw.cleaner, masterInfo) + wrangler.RecordStartBlpAction(sdw.cleaner, masterInfo.Tablet) // 2 - stop the source tablet at a binlog position // higher than the destination master @@ -276,7 +276,7 @@ func (sdw *SplitDiffWorker) synchronizeReplication(ctx context.Context) error { // stop replication sdw.wr.Logger().Infof("Stopping slave %v at a minimum of %v", sdw.sourceAlias, blpPos.Position) shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - stoppedAt, err := sdw.wr.TabletManagerClient().StopSlaveMinimum(shortCtx, sourceTablet, blpPos.Position, *remoteActionsTimeout) + stoppedAt, err := sdw.wr.TabletManagerClient().StopSlaveMinimum(shortCtx, sourceTablet.Tablet, blpPos.Position, *remoteActionsTimeout) cancel() if err != nil { return fmt.Errorf("cannot stop slave %v at right binlog position %v: %v", sdw.sourceAlias, blpPos.Position, err) @@ -290,13 +290,13 @@ func (sdw *SplitDiffWorker) synchronizeReplication(ctx context.Context) error { // change the cleaner actions from ChangeSlaveType(rdonly) // to StartSlave() + ChangeSlaveType(spare) - wrangler.RecordStartSlaveAction(sdw.cleaner, sourceTablet) + wrangler.RecordStartSlaveAction(sdw.cleaner, sourceTablet.Tablet) // 3 - ask the master of the destination shard to resume filtered // replication up to the new list of positions sdw.wr.Logger().Infof("Restarting master %v until it catches up to %v", sdw.shardInfo.MasterAlias, stopPositionList) shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - masterPos, err := sdw.wr.TabletManagerClient().RunBlpUntil(shortCtx, masterInfo, stopPositionList, *remoteActionsTimeout) + masterPos, err := sdw.wr.TabletManagerClient().RunBlpUntil(shortCtx, masterInfo.Tablet, stopPositionList, *remoteActionsTimeout) cancel() if err != nil { return fmt.Errorf("RunBlpUntil for %v until %v failed: %v", sdw.shardInfo.MasterAlias, stopPositionList, err) @@ -312,17 +312,17 @@ func (sdw *SplitDiffWorker) synchronizeReplication(ctx context.Context) error { return err } shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - _, err = sdw.wr.TabletManagerClient().StopSlaveMinimum(shortCtx, destinationTablet, masterPos, *remoteActionsTimeout) + _, err = sdw.wr.TabletManagerClient().StopSlaveMinimum(shortCtx, destinationTablet.Tablet, masterPos, *remoteActionsTimeout) cancel() if err != nil { return fmt.Errorf("StopSlaveMinimum for %v at %v failed: %v", sdw.destinationAlias, masterPos, err) } - wrangler.RecordStartSlaveAction(sdw.cleaner, destinationTablet) + wrangler.RecordStartSlaveAction(sdw.cleaner, destinationTablet.Tablet) // 5 - restart filtered replication on destination master sdw.wr.Logger().Infof("Restarting filtered replication on master %v", sdw.shardInfo.MasterAlias) shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - err = sdw.wr.TabletManagerClient().StartBlp(shortCtx, masterInfo) + err = sdw.wr.TabletManagerClient().StartBlp(shortCtx, masterInfo.Tablet) if err := sdw.cleaner.RemoveActionByName(wrangler.StartBlpActionName, topoproto.TabletAliasString(sdw.shardInfo.MasterAlias)); err != nil { sdw.wr.Logger().Warningf("Cannot find cleaning action %v/%v: %v", wrangler.StartBlpActionName, topoproto.TabletAliasString(sdw.shardInfo.MasterAlias), err) } diff --git a/go/vt/worker/vertical_split_clone.go b/go/vt/worker/vertical_split_clone.go index 4764a0e255..27f50a83ca 100644 --- a/go/vt/worker/vertical_split_clone.go +++ b/go/vt/worker/vertical_split_clone.go @@ -321,13 +321,13 @@ func (vscw *VerticalSplitCloneWorker) findTargets(ctx context.Context) error { // stop replication on it shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - err = vscw.wr.TabletManagerClient().StopSlave(shortCtx, vscw.sourceTablet) + err = vscw.wr.TabletManagerClient().StopSlave(shortCtx, vscw.sourceTablet.Tablet) cancel() if err != nil { return fmt.Errorf("cannot stop replication on tablet %v", topoproto.TabletAliasString(vscw.sourceAlias)) } - wrangler.RecordStartSlaveAction(vscw.cleaner, vscw.sourceTablet) + wrangler.RecordStartSlaveAction(vscw.cleaner, vscw.sourceTablet.Tablet) // Initialize healthcheck and add destination shards to it. vscw.healthCheck = discovery.NewHealthCheck(*remoteActionsTimeout, *healthcheckRetryDelay, *healthCheckTimeout, "" /* statsSuffix */) @@ -504,7 +504,7 @@ func (vscw *VerticalSplitCloneWorker) copy(ctx context.Context) error { } else { // get the current position from the source shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) - status, err := vscw.wr.TabletManagerClient().SlaveStatus(shortCtx, vscw.sourceTablet) + status, err := vscw.wr.TabletManagerClient().SlaveStatus(shortCtx, vscw.sourceTablet.Tablet) cancel() if err != nil { return err @@ -553,7 +553,7 @@ func (vscw *VerticalSplitCloneWorker) copy(ctx context.Context) error { defer destinationWaitGroup.Done() vscw.wr.Logger().Infof("Reloading schema on tablet %v", ti.AliasString()) shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) - err := vscw.wr.TabletManagerClient().ReloadSchema(shortCtx, ti) + err := vscw.wr.TabletManagerClient().ReloadSchema(shortCtx, ti.Tablet) cancel() if err != nil { processError("ReloadSchema failed on tablet %v: %v", ti.AliasString(), err) diff --git a/go/vt/worker/vertical_split_diff.go b/go/vt/worker/vertical_split_diff.go index cd9384f389..ae3ad90dd7 100644 --- a/go/vt/worker/vertical_split_diff.go +++ b/go/vt/worker/vertical_split_diff.go @@ -234,12 +234,12 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) // 1 - stop the master binlog replication, get its current position vsdw.wr.Logger().Infof("Stopping master binlog replication on %v", topoproto.TabletAliasString(vsdw.shardInfo.MasterAlias)) shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - blpPositionList, err := vsdw.wr.TabletManagerClient().StopBlp(shortCtx, masterInfo) + blpPositionList, err := vsdw.wr.TabletManagerClient().StopBlp(shortCtx, masterInfo.Tablet) cancel() if err != nil { return fmt.Errorf("StopBlp on master %v failed: %v", topoproto.TabletAliasString(vsdw.shardInfo.MasterAlias), err) } - wrangler.RecordStartBlpAction(vsdw.cleaner, masterInfo) + wrangler.RecordStartBlpAction(vsdw.cleaner, masterInfo.Tablet) // 2 - stop the source tablet at a binlog position // higher than the destination master @@ -260,7 +260,7 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) return err } shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - stoppedAt, err := vsdw.wr.TabletManagerClient().StopSlaveMinimum(shortCtx, sourceTablet, blpPos.Position, *remoteActionsTimeout) + stoppedAt, err := vsdw.wr.TabletManagerClient().StopSlaveMinimum(shortCtx, sourceTablet.Tablet, blpPos.Position, *remoteActionsTimeout) cancel() if err != nil { return fmt.Errorf("cannot stop slave %v at right binlog position %v: %v", topoproto.TabletAliasString(vsdw.sourceAlias), blpPos.Position, err) @@ -272,13 +272,13 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) // change the cleaner actions from ChangeSlaveType(rdonly) // to StartSlave() + ChangeSlaveType(spare) - wrangler.RecordStartSlaveAction(vsdw.cleaner, sourceTablet) + wrangler.RecordStartSlaveAction(vsdw.cleaner, sourceTablet.Tablet) // 3 - ask the master of the destination shard to resume filtered // replication up to the new list of positions vsdw.wr.Logger().Infof("Restarting master %v until it catches up to %v", topoproto.TabletAliasString(vsdw.shardInfo.MasterAlias), stopPositionList) shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - masterPos, err := vsdw.wr.TabletManagerClient().RunBlpUntil(shortCtx, masterInfo, stopPositionList, *remoteActionsTimeout) + masterPos, err := vsdw.wr.TabletManagerClient().RunBlpUntil(shortCtx, masterInfo.Tablet, stopPositionList, *remoteActionsTimeout) cancel() if err != nil { return fmt.Errorf("RunBlpUntil on %v until %v failed: %v", topoproto.TabletAliasString(vsdw.shardInfo.MasterAlias), stopPositionList, err) @@ -294,17 +294,17 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) return err } shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - _, err = vsdw.wr.TabletManagerClient().StopSlaveMinimum(shortCtx, destinationTablet, masterPos, *remoteActionsTimeout) + _, err = vsdw.wr.TabletManagerClient().StopSlaveMinimum(shortCtx, destinationTablet.Tablet, masterPos, *remoteActionsTimeout) cancel() if err != nil { return fmt.Errorf("StopSlaveMinimum on %v at %v failed: %v", topoproto.TabletAliasString(vsdw.destinationAlias), masterPos, err) } - wrangler.RecordStartSlaveAction(vsdw.cleaner, destinationTablet) + wrangler.RecordStartSlaveAction(vsdw.cleaner, destinationTablet.Tablet) // 5 - restart filtered replication on destination master vsdw.wr.Logger().Infof("Restarting filtered replication on master %v", topoproto.TabletAliasString(vsdw.shardInfo.MasterAlias)) shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - err = vsdw.wr.TabletManagerClient().StartBlp(shortCtx, masterInfo) + err = vsdw.wr.TabletManagerClient().StartBlp(shortCtx, masterInfo.Tablet) if err := vsdw.cleaner.RemoveActionByName(wrangler.StartBlpActionName, topoproto.TabletAliasString(vsdw.shardInfo.MasterAlias)); err != nil { vsdw.wr.Logger().Warningf("Cannot find cleaning action %v/%v: %v", wrangler.StartBlpActionName, topoproto.TabletAliasString(vsdw.shardInfo.MasterAlias), err) } diff --git a/go/vt/wrangler/cleaner.go b/go/vt/wrangler/cleaner.go index fbe6cbc1a5..19ac405238 100644 --- a/go/vt/wrangler/cleaner.go +++ b/go/vt/wrangler/cleaner.go @@ -214,7 +214,7 @@ func (tta TabletTagAction) CleanUp(ctx context.Context, wr *Wrangler) error { // StartSlaveAction will restart binlog replication on a server type StartSlaveAction struct { - TabletInfo *topo.TabletInfo + Tablet *topodatapb.Tablet } // StartSlaveActionName is the name of the slave start action @@ -222,15 +222,15 @@ const StartSlaveActionName = "StartSlaveAction" // RecordStartSlaveAction records a new StartSlaveAction // into the specified Cleaner -func RecordStartSlaveAction(cleaner *Cleaner, tabletInfo *topo.TabletInfo) { - cleaner.Record(StartSlaveActionName, topoproto.TabletAliasString(tabletInfo.Alias), &StartSlaveAction{ - TabletInfo: tabletInfo, +func RecordStartSlaveAction(cleaner *Cleaner, tablet *topodatapb.Tablet) { + cleaner.Record(StartSlaveActionName, topoproto.TabletAliasString(tablet.Alias), &StartSlaveAction{ + Tablet: tablet, }) } // CleanUp is part of CleanerAction interface. func (sba StartSlaveAction) CleanUp(ctx context.Context, wr *Wrangler) error { - return wr.TabletManagerClient().StartSlave(ctx, sba.TabletInfo) + return wr.TabletManagerClient().StartSlave(ctx, sba.Tablet) } // @@ -239,7 +239,7 @@ func (sba StartSlaveAction) CleanUp(ctx context.Context, wr *Wrangler) error { // StartBlpAction will restart binlog replication on a server type StartBlpAction struct { - TabletInfo *topo.TabletInfo + Tablet *topodatapb.Tablet } // StartBlpActionName is the name of the action to start binlog player @@ -247,13 +247,13 @@ const StartBlpActionName = "StartBlpAction" // RecordStartBlpAction records a new StartBlpAction // into the specified Cleaner -func RecordStartBlpAction(cleaner *Cleaner, tabletInfo *topo.TabletInfo) { - cleaner.Record(StartBlpActionName, topoproto.TabletAliasString(tabletInfo.Alias), &StartBlpAction{ - TabletInfo: tabletInfo, +func RecordStartBlpAction(cleaner *Cleaner, tablet *topodatapb.Tablet) { + cleaner.Record(StartBlpActionName, topoproto.TabletAliasString(tablet.Alias), &StartBlpAction{ + Tablet: tablet, }) } // CleanUp is part of CleanerAction interface. func (sba StartBlpAction) CleanUp(ctx context.Context, wr *Wrangler) error { - return wr.TabletManagerClient().StartBlp(ctx, sba.TabletInfo) + return wr.TabletManagerClient().StartBlp(ctx, sba.Tablet) } diff --git a/go/vt/wrangler/hook.go b/go/vt/wrangler/hook.go index 465118d081..af79c944c6 100644 --- a/go/vt/wrangler/hook.go +++ b/go/vt/wrangler/hook.go @@ -8,9 +8,7 @@ import ( "fmt" "strings" - log "github.com/golang/glog" hk "github.com/youtube/vitess/go/vt/hook" - "github.com/youtube/vitess/go/vt/topo" "golang.org/x/net/context" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" @@ -25,31 +23,10 @@ func (wr *Wrangler) ExecuteHook(ctx context.Context, tabletAlias *topodatapb.Tab if err != nil { return nil, err } - return wr.ExecuteTabletInfoHook(ctx, ti, hook) + return wr.ExecuteTabletHook(ctx, ti.Tablet, hook) } -// ExecuteTabletInfoHook will run the hook on the tablet described by -// TabletInfo -func (wr *Wrangler) ExecuteTabletInfoHook(ctx context.Context, ti *topo.TabletInfo, hook *hk.Hook) (hookResult *hk.HookResult, err error) { - return wr.tmc.ExecuteHook(ctx, ti, hook) -} - -// ExecuteOptionalTabletInfoHook executes a hook and returns an error -// only if the hook failed, not if the hook doesn't exist. -func (wr *Wrangler) ExecuteOptionalTabletInfoHook(ctx context.Context, ti *topo.TabletInfo, hook *hk.Hook) (err error) { - hr, err := wr.ExecuteTabletInfoHook(ctx, ti, hook) - if err != nil { - return err - } - - if hr.ExitStatus == hk.HOOK_DOES_NOT_EXIST { - log.Infof("Hook %v doesn't exist on tablet %v", hook.Name, ti.AliasString()) - return nil - } - - if hr.ExitStatus != hk.HOOK_SUCCESS { - return fmt.Errorf("Hook %v failed(%v): %v", hook.Name, hr.ExitStatus, hr.Stderr) - } - - return nil +// ExecuteTabletHook will run the hook on the provided tablet. +func (wr *Wrangler) ExecuteTabletHook(ctx context.Context, tablet *topodatapb.Tablet, hook *hk.Hook) (hookResult *hk.HookResult, err error) { + return wr.tmc.ExecuteHook(ctx, tablet, hook) } diff --git a/go/vt/wrangler/keyspace.go b/go/vt/wrangler/keyspace.go index 7d4ed73079..f77ab249bd 100644 --- a/go/vt/wrangler/keyspace.go +++ b/go/vt/wrangler/keyspace.go @@ -210,7 +210,7 @@ func (wr *Wrangler) getMastersPosition(ctx context.Context, shards []*topo.Shard return } - pos, err := wr.tmc.MasterPosition(ctx, ti) + pos, err := wr.tmc.MasterPosition(ctx, ti.Tablet) if err != nil { rec.RecordError(err) return @@ -248,13 +248,13 @@ func (wr *Wrangler) waitForFilteredReplication(ctx context.Context, sourcePositi // and wait for it wr.Logger().Infof("Waiting for %v to catch up", topoproto.TabletAliasString(si.MasterAlias)) - tablet, err := wr.ts.GetTablet(ctx, si.MasterAlias) + ti, err := wr.ts.GetTablet(ctx, si.MasterAlias) if err != nil { rec.RecordError(err) return } - if err := wr.tmc.WaitBlpPosition(ctx, tablet, blpPosition, waitTime); err != nil { + if err := wr.tmc.WaitBlpPosition(ctx, ti.Tablet, blpPosition, waitTime); err != nil { rec.RecordError(err) } else { wr.Logger().Infof("%v caught up", topoproto.TabletAliasString(si.MasterAlias)) @@ -281,7 +281,7 @@ func (wr *Wrangler) refreshMasters(ctx context.Context, shards []*topo.ShardInfo return } - if err := wr.tmc.RefreshState(ctx, ti); err != nil { + if err := wr.tmc.RefreshState(ctx, ti.Tablet); err != nil { rec.RecordError(err) } else { wr.Logger().Infof("%v responded", topoproto.TabletAliasString(si.MasterAlias)) @@ -738,20 +738,20 @@ func (wr *Wrangler) masterMigrateServedFrom(ctx context.Context, ki *topo.Keyspa // Now refresh the blacklisted table list on the source master event.DispatchUpdate(ev, "refreshing source master so it updates its blacklisted tables") - if err := wr.tmc.RefreshState(ctx, sourceMasterTabletInfo); err != nil { + if err := wr.tmc.RefreshState(ctx, sourceMasterTabletInfo.Tablet); err != nil { return err } // get the position event.DispatchUpdate(ev, "getting master position") - masterPosition, err := wr.tmc.MasterPosition(ctx, sourceMasterTabletInfo) + masterPosition, err := wr.tmc.MasterPosition(ctx, sourceMasterTabletInfo.Tablet) if err != nil { return err } // wait for it event.DispatchUpdate(ev, "waiting for destination master to catch up to source master") - if err := wr.tmc.WaitBlpPosition(ctx, destinationMasterTabletInfo, &tabletmanagerdatapb.BlpPosition{ + if err := wr.tmc.WaitBlpPosition(ctx, destinationMasterTabletInfo.Tablet, &tabletmanagerdatapb.BlpPosition{ Uid: 0, Position: masterPosition, }, filteredReplicationWaitTime); err != nil { @@ -834,7 +834,7 @@ func (wr *Wrangler) RefreshTablesByShard(ctx context.Context, si *topo.ShardInfo // Using 60 seconds because RefreshState should not take more than 30 seconds. // (RefreshState will restart the tablet's QueryService and most time will be spent on the shutdown, i.e. waiting up to 30 seconds on transactions (see Config.TransactionTimeout)). ctx, cancel := context.WithTimeout(ctx, 60*time.Second) - if err := wr.tmc.RefreshState(ctx, ti); err != nil { + if err := wr.tmc.RefreshState(ctx, ti.Tablet); err != nil { wr.Logger().Warningf("RefreshTablesByShard: failed to refresh %v: %v", ti.AliasString(), err) } cancel() diff --git a/go/vt/wrangler/permissions.go b/go/vt/wrangler/permissions.go index d2eba0b262..aeb991e55c 100644 --- a/go/vt/wrangler/permissions.go +++ b/go/vt/wrangler/permissions.go @@ -21,12 +21,12 @@ import ( // GetPermissions returns the permissions set on a remote tablet func (wr *Wrangler) GetPermissions(ctx context.Context, tabletAlias *topodatapb.TabletAlias) (*tabletmanagerdatapb.Permissions, error) { - tablet, err := wr.ts.GetTablet(ctx, tabletAlias) + ti, err := wr.ts.GetTablet(ctx, tabletAlias) if err != nil { return nil, err } - return wr.tmc.GetPermissions(ctx, tablet) + return wr.tmc.GetPermissions(ctx, ti.Tablet) } // diffPermissions is a helper method to asynchronously diff a permissions diff --git a/go/vt/wrangler/reparent.go b/go/vt/wrangler/reparent.go index 59261bbc42..db5346bc89 100644 --- a/go/vt/wrangler/reparent.go +++ b/go/vt/wrangler/reparent.go @@ -87,7 +87,7 @@ func (wr *Wrangler) tabletReplicationStatuses(ctx context.Context, tablets []*to wg.Add(1) go func(i int, ti *topo.TabletInfo) { defer wg.Done() - pos, err := wr.tmc.MasterPosition(ctx, ti) + pos, err := wr.tmc.MasterPosition(ctx, ti.Tablet) if err != nil { rec.RecordError(fmt.Errorf("MasterPosition(%v) failed: %v", ti.AliasString(), err)) return @@ -100,7 +100,7 @@ func (wr *Wrangler) tabletReplicationStatuses(ctx context.Context, tablets []*to wg.Add(1) go func(i int, ti *topo.TabletInfo) { defer wg.Done() - status, err := wr.tmc.SlaveStatus(ctx, ti) + status, err := wr.tmc.SlaveStatus(ctx, ti.Tablet) if err != nil { rec.RecordError(fmt.Errorf("SlaveStatus(%v) failed: %v", ti.AliasString(), err)) return @@ -148,7 +148,7 @@ func (wr *Wrangler) ReparentTablet(ctx context.Context, tabletAlias *topodatapb. } // and do the remote command - return wr.TabletManagerClient().SetMaster(ctx, ti, shardInfo.MasterAlias, 0, false) + return wr.tmc.SetMaster(ctx, ti.Tablet, shardInfo.MasterAlias, 0, false) } // InitShardMaster will make the provided tablet the master for the shard. @@ -234,7 +234,7 @@ func (wr *Wrangler) initShardMasterLocked(ctx context.Context, ev *events.Repare go func(alias topodatapb.TabletAlias, tabletInfo *topo.TabletInfo) { defer wg.Done() wr.logger.Infof("resetting replication on tablet %v", topoproto.TabletAliasString(&alias)) - if err := wr.TabletManagerClient().ResetReplication(ctx, tabletInfo); err != nil { + if err := wr.tmc.ResetReplication(ctx, tabletInfo.Tablet); err != nil { rec.RecordError(fmt.Errorf("Tablet %v ResetReplication failed (either fix it, or Scrap it): %v", topoproto.TabletAliasString(&alias), err)) } }(alias, tabletInfo) @@ -248,7 +248,7 @@ func (wr *Wrangler) initShardMasterLocked(ctx context.Context, ev *events.Repare // position wr.logger.Infof("initializing master on %v", topoproto.TabletAliasString(masterElectTabletAlias)) event.DispatchUpdate(ev, "initializing master") - rp, err := wr.TabletManagerClient().InitMaster(ctx, masterElectTabletInfo) + rp, err := wr.tmc.InitMaster(ctx, masterElectTabletInfo.Tablet) if err != nil { return err } @@ -270,14 +270,14 @@ func (wr *Wrangler) initShardMasterLocked(ctx context.Context, ev *events.Repare go func(alias topodatapb.TabletAlias, tabletInfo *topo.TabletInfo) { defer wgMaster.Done() wr.logger.Infof("populating reparent journal on new master %v", topoproto.TabletAliasString(&alias)) - masterErr = wr.TabletManagerClient().PopulateReparentJournal(ctx, tabletInfo, now, initShardMasterOperation, &alias, rp) + masterErr = wr.tmc.PopulateReparentJournal(ctx, tabletInfo.Tablet, now, initShardMasterOperation, &alias, rp) }(alias, tabletInfo) } else { wgSlaves.Add(1) go func(alias topodatapb.TabletAlias, tabletInfo *topo.TabletInfo) { defer wgSlaves.Done() wr.logger.Infof("initializing slave %v", topoproto.TabletAliasString(&alias)) - if err := wr.TabletManagerClient().InitSlave(ctx, tabletInfo, masterElectTabletAlias, rp, now); err != nil { + if err := wr.tmc.InitSlave(ctx, tabletInfo.Tablet, masterElectTabletAlias, rp, now); err != nil { rec.RecordError(fmt.Errorf("Tablet %v InitSlave failed: %v", topoproto.TabletAliasString(&alias), err)) } }(alias, tabletInfo) @@ -316,7 +316,7 @@ func (wr *Wrangler) initShardMasterLocked(ctx context.Context, ev *events.Repare // If the database doesn't exist, it means the user intends for these tablets // to begin serving with no data (i.e. first time initialization). createDB := fmt.Sprintf("CREATE DATABASE IF NOT EXISTS `%s`", topoproto.TabletDbName(masterElectTabletInfo.Tablet)) - if _, err := wr.TabletManagerClient().ExecuteFetchAsDba(ctx, masterElectTabletInfo, createDB, 1, false, true); err != nil { + if _, err := wr.tmc.ExecuteFetchAsDba(ctx, masterElectTabletInfo.Tablet, createDB, 1, false, true); err != nil { return fmt.Errorf("failed to create database: %v", err) } @@ -383,7 +383,7 @@ func (wr *Wrangler) plannedReparentShardLocked(ctx context.Context, ev *events.R // Demote the current master, get its replication position wr.logger.Infof("demote current master %v", shardInfo.MasterAlias) event.DispatchUpdate(ev, "demoting old master") - rp, err := wr.tmc.DemoteMaster(ctx, oldMasterTabletInfo) + rp, err := wr.tmc.DemoteMaster(ctx, oldMasterTabletInfo.Tablet) if err != nil { return fmt.Errorf("old master tablet %v DemoteMaster failed: %v", topoproto.TabletAliasString(shardInfo.MasterAlias), err) } @@ -392,7 +392,7 @@ func (wr *Wrangler) plannedReparentShardLocked(ctx context.Context, ev *events.R // then promote it wr.logger.Infof("promote slave %v", topoproto.TabletAliasString(masterElectTabletAlias)) event.DispatchUpdate(ev, "promoting slave") - rp, err = wr.tmc.PromoteSlaveWhenCaughtUp(ctx, masterElectTabletInfo, rp) + rp, err = wr.tmc.PromoteSlaveWhenCaughtUp(ctx, masterElectTabletInfo.Tablet, rp) if err != nil { return fmt.Errorf("master-elect tablet %v failed to catch up with replication or be upgraded to master: %v", topoproto.TabletAliasString(masterElectTabletAlias), err) } @@ -412,7 +412,7 @@ func (wr *Wrangler) plannedReparentShardLocked(ctx context.Context, ev *events.R go func(alias topodatapb.TabletAlias, tabletInfo *topo.TabletInfo) { defer wgMaster.Done() wr.logger.Infof("populating reparent journal on new master %v", topoproto.TabletAliasString(&alias)) - masterErr = wr.TabletManagerClient().PopulateReparentJournal(ctx, tabletInfo, now, plannedReparentShardOperation, &alias, rp) + masterErr = wr.tmc.PopulateReparentJournal(ctx, tabletInfo.Tablet, now, plannedReparentShardOperation, &alias, rp) }(alias, tabletInfo) } else { wgSlaves.Add(1) @@ -421,7 +421,7 @@ func (wr *Wrangler) plannedReparentShardLocked(ctx context.Context, ev *events.R wr.logger.Infof("setting new master on slave %v", topoproto.TabletAliasString(&alias)) // also restart replication on old master forceStartSlave := topoproto.TabletAliasEqual(&alias, oldMasterTabletInfo.Alias) - if err := wr.TabletManagerClient().SetMaster(ctx, tabletInfo, masterElectTabletAlias, now, forceStartSlave); err != nil { + if err := wr.tmc.SetMaster(ctx, tabletInfo.Tablet, masterElectTabletAlias, now, forceStartSlave); err != nil { rec.RecordError(fmt.Errorf("Tablet %v SetMaster failed: %v", topoproto.TabletAliasString(&alias), err)) return } @@ -550,7 +550,7 @@ func (wr *Wrangler) emergencyReparentShardLocked(ctx context.Context, ev *events wr.logger.Infof("getting replication position from %v", topoproto.TabletAliasString(&alias)) ctx, cancel := context.WithTimeout(ctx, waitSlaveTimeout) defer cancel() - rp, err := wr.TabletManagerClient().StopReplicationAndGetStatus(ctx, tabletInfo) + rp, err := wr.tmc.StopReplicationAndGetStatus(ctx, tabletInfo.Tablet) if err != nil { wr.logger.Warningf("failed to get replication status from %v, ignoring tablet: %v", topoproto.TabletAliasString(&alias), err) return @@ -587,7 +587,7 @@ func (wr *Wrangler) emergencyReparentShardLocked(ctx context.Context, ev *events // Promote the masterElect wr.logger.Infof("promote slave %v", topoproto.TabletAliasString(masterElectTabletAlias)) event.DispatchUpdate(ev, "promoting slave") - rp, err := wr.tmc.PromoteSlave(ctx, masterElectTabletInfo) + rp, err := wr.tmc.PromoteSlave(ctx, masterElectTabletInfo.Tablet) if err != nil { return fmt.Errorf("master-elect tablet %v failed to be upgraded to master: %v", topoproto.TabletAliasString(masterElectTabletAlias), err) } @@ -609,7 +609,7 @@ func (wr *Wrangler) emergencyReparentShardLocked(ctx context.Context, ev *events go func(alias topodatapb.TabletAlias, tabletInfo *topo.TabletInfo) { defer wgMaster.Done() wr.logger.Infof("populating reparent journal on new master %v", topoproto.TabletAliasString(&alias)) - masterErr = wr.TabletManagerClient().PopulateReparentJournal(ctx, tabletInfo, now, emergencyReparentShardOperation, &alias, rp) + masterErr = wr.tmc.PopulateReparentJournal(ctx, tabletInfo.Tablet, now, emergencyReparentShardOperation, &alias, rp) }(alias, tabletInfo) } else { wgSlaves.Add(1) @@ -620,7 +620,7 @@ func (wr *Wrangler) emergencyReparentShardLocked(ctx context.Context, ev *events if status, ok := statusMap[alias]; ok { forceStartSlave = status.SlaveIoRunning || status.SlaveSqlRunning } - if err := wr.TabletManagerClient().SetMaster(ctx, tabletInfo, masterElectTabletAlias, now, forceStartSlave); err != nil { + if err := wr.tmc.SetMaster(ctx, tabletInfo.Tablet, masterElectTabletAlias, now, forceStartSlave); err != nil { rec.RecordError(fmt.Errorf("Tablet %v SetMaster failed: %v", topoproto.TabletAliasString(&alias), err)) } }(alias, tabletInfo) diff --git a/go/vt/wrangler/schema.go b/go/vt/wrangler/schema.go index 5900013415..ed30617265 100644 --- a/go/vt/wrangler/schema.go +++ b/go/vt/wrangler/schema.go @@ -33,7 +33,7 @@ func (wr *Wrangler) GetSchema(ctx context.Context, tabletAlias *topodatapb.Table return nil, err } - return wr.tmc.GetSchema(ctx, ti, tables, excludeTables, includeViews) + return wr.tmc.GetSchema(ctx, ti.Tablet, tables, excludeTables, includeViews) } // ReloadSchema forces the remote tablet to reload its schema. @@ -43,7 +43,7 @@ func (wr *Wrangler) ReloadSchema(ctx context.Context, tabletAlias *topodatapb.Ta return err } - return wr.tmc.ReloadSchema(ctx, ti) + return wr.tmc.ReloadSchema(ctx, ti.Tablet) } // helper method to asynchronously diff a schema @@ -191,7 +191,7 @@ func (wr *Wrangler) PreflightSchema(ctx context.Context, tabletAlias *topodatapb if err != nil { return nil, err } - return wr.tmc.PreflightSchema(ctx, ti, change) + return wr.tmc.PreflightSchema(ctx, ti.Tablet, change) } // ApplySchemaKeyspace applies a schema change to an entire keyspace. @@ -309,7 +309,7 @@ func (wr *Wrangler) applySQLShard(ctx context.Context, tabletInfo *topo.TabletIn ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() // Need to make sure that we enable binlog, since we're only applying the statement on masters. - _, err = wr.tmc.ExecuteFetchAsDba(ctx, tabletInfo, filledChange, 0, false, reloadSchema) + _, err = wr.tmc.ExecuteFetchAsDba(ctx, tabletInfo.Tablet, filledChange, 0, false, reloadSchema) return err } diff --git a/go/vt/wrangler/tablet.go b/go/vt/wrangler/tablet.go index 8ea18a6dd0..aa5023bafa 100644 --- a/go/vt/wrangler/tablet.go +++ b/go/vt/wrangler/tablet.go @@ -169,7 +169,7 @@ func (wr *Wrangler) ChangeSlaveType(ctx context.Context, tabletAlias *topodatapb } // ask the tablet to make the change - if err := wr.tmc.ChangeType(ctx, ti, tabletType); err != nil { + if err := wr.tmc.ChangeType(ctx, ti.Tablet, tabletType); err != nil { return err } @@ -193,7 +193,7 @@ func (wr *Wrangler) changeTypeInternal(ctx context.Context, tabletAlias *topodat rebuildRequired := ti.IsInServingGraph() // change the type - if err := wr.tmc.ChangeType(ctx, ti, dbType); err != nil { + if err := wr.tmc.ChangeType(ctx, ti.Tablet, dbType); err != nil { return err } @@ -213,5 +213,5 @@ func (wr *Wrangler) ExecuteFetchAsDba(ctx context.Context, tabletAlias *topodata if err != nil { return nil, err } - return wr.tmc.ExecuteFetchAsDba(ctx, ti, query, maxRows, disableBinlogs, reloadSchema) + return wr.tmc.ExecuteFetchAsDba(ctx, ti.Tablet, query, maxRows, disableBinlogs, reloadSchema) } diff --git a/go/vt/wrangler/testlib/fake_tablet.go b/go/vt/wrangler/testlib/fake_tablet.go index ebf1ef3a67..23a82864fe 100644 --- a/go/vt/wrangler/testlib/fake_tablet.go +++ b/go/vt/wrangler/testlib/fake_tablet.go @@ -189,7 +189,7 @@ func (ft *FakeTablet) StartActionLoop(t *testing.T, wr *wrangler.Wrangler) { c := tmclient.NewTabletManagerClient() for timeout >= 0 { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - err := c.Ping(ctx, topo.NewTabletInfo(ft.Agent.Tablet(), -1)) + err := c.Ping(ctx, ft.Agent.Tablet()) cancel() if err == nil { break diff --git a/go/vt/wrangler/testlib/reparent_external_test.go b/go/vt/wrangler/testlib/reparent_external_test.go index 735dc317a0..e77e1e23de 100644 --- a/go/vt/wrangler/testlib/reparent_external_test.go +++ b/go/vt/wrangler/testlib/reparent_external_test.go @@ -134,7 +134,7 @@ func TestTabletExternallyReparented(t *testing.T) { t.Fatalf("GetTablet failed: %v", err) } waitID := makeWaitID() - if err := tmc.TabletExternallyReparented(context.Background(), ti, waitID); err != nil { + if err := tmc.TabletExternallyReparented(context.Background(), ti.Tablet, waitID); err != nil { t.Fatalf("TabletExternallyReparented(slave) error: %v", err) } waitForExternalReparent(t, waitID) @@ -146,7 +146,7 @@ func TestTabletExternallyReparented(t *testing.T) { t.Fatalf("GetTablet failed: %v", err) } waitID = makeWaitID() - if err := tmc.TabletExternallyReparented(context.Background(), ti, waitID); err != nil { + if err := tmc.TabletExternallyReparented(context.Background(), ti.Tablet, waitID); err != nil { t.Fatalf("TabletExternallyReparented(replica) failed: %v", err) } waitForExternalReparent(t, waitID) @@ -196,7 +196,7 @@ func TestTabletExternallyReparentedWithDifferentMysqlPort(t *testing.T) { t.Fatalf("GetTablet failed: %v", err) } waitID := makeWaitID() - if err := tmc.TabletExternallyReparented(context.Background(), ti, waitID); err != nil { + if err := tmc.TabletExternallyReparented(context.Background(), ti.Tablet, waitID); err != nil { t.Fatalf("TabletExternallyReparented(replica) failed: %v", err) } waitForExternalReparent(t, waitID) @@ -241,7 +241,7 @@ func TestTabletExternallyReparentedContinueOnUnexpectedMaster(t *testing.T) { t.Fatalf("GetTablet failed: %v", err) } waitID := makeWaitID() - if err := tmc.TabletExternallyReparented(context.Background(), ti, waitID); err != nil { + if err := tmc.TabletExternallyReparented(context.Background(), ti.Tablet, waitID); err != nil { t.Fatalf("TabletExternallyReparented(replica) failed: %v", err) } waitForExternalReparent(t, waitID) @@ -284,7 +284,7 @@ func TestTabletExternallyReparentedFailedOldMaster(t *testing.T) { t.Fatalf("GetTablet failed: %v", err) } waitID := makeWaitID() - if err := tmc.TabletExternallyReparented(context.Background(), ti, waitID); err != nil { + if err := tmc.TabletExternallyReparented(context.Background(), ti.Tablet, waitID); err != nil { t.Fatalf("TabletExternallyReparented(replica) failed: %v", err) } waitForExternalReparent(t, waitID) diff --git a/go/vt/wrangler/validator.go b/go/vt/wrangler/validator.go index cfe774180e..3d30a21d1d 100644 --- a/go/vt/wrangler/validator.go +++ b/go/vt/wrangler/validator.go @@ -178,15 +178,15 @@ func normalizeIP(ip string) string { } func (wr *Wrangler) validateReplication(ctx context.Context, shardInfo *topo.ShardInfo, tabletMap map[topodatapb.TabletAlias]*topo.TabletInfo, results chan<- error) { - masterTablet, ok := tabletMap[*shardInfo.MasterAlias] + masterTabletInfo, ok := tabletMap[*shardInfo.MasterAlias] if !ok { results <- fmt.Errorf("master %v not in tablet map", topoproto.TabletAliasString(shardInfo.MasterAlias)) return } - slaveList, err := wr.tmc.GetSlaves(ctx, masterTablet) + slaveList, err := wr.tmc.GetSlaves(ctx, masterTabletInfo.Tablet) if err != nil { - results <- fmt.Errorf("GetSlaves(%v) failed: %v", masterTablet, err) + results <- fmt.Errorf("GetSlaves(%v) failed: %v", masterTabletInfo, err) return } if len(slaveList) == 0 { @@ -226,7 +226,7 @@ func (wr *Wrangler) pingTablets(ctx context.Context, tabletMap map[topodatapb.Ta go func(tabletAlias topodatapb.TabletAlias, tabletInfo *topo.TabletInfo) { defer wg.Done() - if err := wr.tmc.Ping(ctx, tabletInfo); err != nil { + if err := wr.tmc.Ping(ctx, tabletInfo.Tablet); err != nil { results <- fmt.Errorf("Ping(%v) failed: %v tablet hostname: %v", topoproto.TabletAliasString(&tabletAlias), err, tabletInfo.Hostname) } }(tabletAlias, tabletInfo) From c3c077db14f0069d5ab3c7779e846f06a6b79197 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Mon, 16 May 2016 10:06:51 -0700 Subject: [PATCH 10/27] Removing duplicate Cell. Now that we have Tablet.Alias.Cell we can use. --- go/vt/discovery/healthcheck.go | 9 +----- go/vt/discovery/healthcheck_test.go | 11 ++------ go/vt/discovery/healthcheck_wait_test.go | 2 +- go/vt/discovery/replicationlag_test.go | 36 ++++++++++++------------ go/vt/topo/tablet.go | 7 +++-- go/vt/vtgate/discoverygateway.go | 36 ++++++++++++------------ go/vt/vtgate/fakehealthcheck_test.go | 9 ++++-- go/vt/vtgate/resolver_test.go | 14 ++++----- go/vt/vtgate/scatter_conn_test.go | 6 ++-- 9 files changed, 60 insertions(+), 70 deletions(-) diff --git a/go/vt/discovery/healthcheck.go b/go/vt/discovery/healthcheck.go index a86f1ef946..7351298f40 100644 --- a/go/vt/discovery/healthcheck.go +++ b/go/vt/discovery/healthcheck.go @@ -67,7 +67,6 @@ type HealthCheckStatsListener interface { type TabletStats struct { Tablet *topodatapb.Tablet Name string // name is an optional tag (e.g. alternative address) - Cell string Target *querypb.Target Up bool // whether the tablet is added Serving bool // whether the server is serving @@ -238,7 +237,6 @@ func (hc *HealthCheckImpl) checkConn(hcc *healthCheckConn, cell, name string, ta hcc.lastError = err ts := &TabletStats{ Tablet: tablet, - Cell: hcc.cell, Name: hcc.name, Target: hcc.target, Up: hcc.up, @@ -341,7 +339,6 @@ func (hcc *healthCheckConn) processResponse(hc *HealthCheckImpl, tablet *topodat hcc.mu.RLock() ts := &TabletStats{ Tablet: tablet, - Cell: hcc.cell, Name: hcc.name, Target: hcc.target, Up: hcc.up, @@ -407,7 +404,6 @@ func (hc *HealthCheckImpl) checkHealthCheckTimeout() { hcc.lastError = fmt.Errorf("healthcheck timed out (latest %v)", hcc.lastResponseTimestamp) ts := &TabletStats{ Tablet: hcc.tablet, - Cell: hcc.cell, Name: hcc.name, Target: hcc.target, Up: hcc.up, @@ -506,7 +502,6 @@ func (hc *HealthCheckImpl) GetTabletStatsFromKeyspaceShard(keyspace, shard strin hcc.mu.RLock() ts := &TabletStats{ Tablet: ep, - Cell: hcc.cell, Name: hcc.name, Target: hcc.target, Up: hcc.up, @@ -549,7 +544,6 @@ func (hc *HealthCheckImpl) GetTabletStatsFromTarget(keyspace, shard string, tabl hcc.mu.RLock() ts := &TabletStats{ Tablet: ep, - Cell: hcc.cell, Name: hcc.name, Target: hcc.target, Up: hcc.up, @@ -731,12 +725,11 @@ func (hc *HealthCheckImpl) CacheStatus() TabletsCacheStatusList { epcsMap[key] = epcs } stats := &TabletStats{ - Cell: hcc.cell, + Tablet: hcc.tablet, Name: hcc.name, Target: hcc.target, Up: hcc.up, Serving: hcc.serving, - Tablet: hcc.tablet, Stats: hcc.stats, TabletExternallyReparentedTimestamp: hcc.tabletExternallyReparentedTimestamp, LastError: hcc.lastError, diff --git a/go/vt/discovery/healthcheck_test.go b/go/vt/discovery/healthcheck_test.go index de5da2326f..bea93cea6c 100644 --- a/go/vt/discovery/healthcheck_test.go +++ b/go/vt/discovery/healthcheck_test.go @@ -26,7 +26,7 @@ func init() { } func TestHealthCheck(t *testing.T) { - ep := topo.NewTablet(0, "a") + ep := topo.NewTablet(0, "cell", "a") ep.PortMap["vt"] = 1 input := make(chan *querypb.StreamHealthResponse) fakeConn := createFakeConn(ep, input) @@ -52,7 +52,6 @@ func TestHealthCheck(t *testing.T) { } want := &TabletStats{ Tablet: ep, - Cell: "cell", Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, Up: true, Serving: true, @@ -75,7 +74,6 @@ func TestHealthCheck(t *testing.T) { Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, TabletsStats: TabletStatsList{{ Tablet: ep, - Cell: "cell", Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, Up: true, Serving: true, @@ -96,7 +94,6 @@ func TestHealthCheck(t *testing.T) { } want = &TabletStats{ Tablet: ep, - Cell: "cell", Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, Up: true, Serving: true, @@ -123,7 +120,6 @@ func TestHealthCheck(t *testing.T) { } want = &TabletStats{ Tablet: ep, - Cell: "cell", Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, Up: true, Serving: false, @@ -146,7 +142,6 @@ func TestHealthCheck(t *testing.T) { } want = &TabletStats{ Tablet: ep, - Cell: "cell", Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, Up: true, Serving: false, @@ -167,7 +162,6 @@ func TestHealthCheck(t *testing.T) { t.Logf(`hc.RemoveTablet({Host: "a", PortMap: {"vt": 1}})`) want = &TabletStats{ Tablet: ep, - Cell: "cell", Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, Up: false, Serving: false, @@ -189,7 +183,7 @@ func TestHealthCheck(t *testing.T) { func TestHealthCheckTimeout(t *testing.T) { timeout := 500 * time.Millisecond - ep := topo.NewTablet(0, "a") + ep := topo.NewTablet(0, "cell", "a") ep.PortMap["vt"] = 1 input := make(chan *querypb.StreamHealthResponse) createFakeConn(ep, input) @@ -209,7 +203,6 @@ func TestHealthCheckTimeout(t *testing.T) { } want := &TabletStats{ Tablet: ep, - Cell: "cell", Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, Up: true, Serving: true, diff --git a/go/vt/discovery/healthcheck_wait_test.go b/go/vt/discovery/healthcheck_wait_test.go index 68ae90efe1..9ab4eaa2e7 100644 --- a/go/vt/discovery/healthcheck_wait_test.go +++ b/go/vt/discovery/healthcheck_wait_test.go @@ -109,7 +109,7 @@ func TestWaitForTablets(t *testing.T) { defer shortCancel() waitAvailableTabletInterval = 20 * time.Millisecond - ep := topo.NewTablet(0, "a") + ep := topo.NewTablet(0, "cell", "a") ep.PortMap["vt"] = 1 input := make(chan *querypb.StreamHealthResponse) createFakeConn(ep, input) diff --git a/go/vt/discovery/replicationlag_test.go b/go/vt/discovery/replicationlag_test.go index c157d7ad59..8fac56781f 100644 --- a/go/vt/discovery/replicationlag_test.go +++ b/go/vt/discovery/replicationlag_test.go @@ -16,12 +16,12 @@ func TestFilterByReplicationLag(t *testing.T) { } // 1 serving tablet ts1 := &TabletStats{ - Tablet: topo.NewTablet(1, "host1"), + Tablet: topo.NewTablet(1, "cell", "host1"), Serving: true, Stats: &querypb.RealtimeStats{}, } ts2 := &TabletStats{ - Tablet: topo.NewTablet(2, "host2"), + Tablet: topo.NewTablet(2, "cell", "host2"), Serving: false, Stats: &querypb.RealtimeStats{}, } @@ -34,22 +34,22 @@ func TestFilterByReplicationLag(t *testing.T) { } // lags of (1s, 1s, 1s, 30s) ts1 = &TabletStats{ - Tablet: topo.NewTablet(1, "host1"), + Tablet: topo.NewTablet(1, "cell", "host1"), Serving: true, Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1}, } ts2 = &TabletStats{ - Tablet: topo.NewTablet(2, "host2"), + Tablet: topo.NewTablet(2, "cell", "host2"), Serving: true, Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1}, } ts3 := &TabletStats{ - Tablet: topo.NewTablet(3, "host3"), + Tablet: topo.NewTablet(3, "cell", "host3"), Serving: true, Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1}, } ts4 := &TabletStats{ - Tablet: topo.NewTablet(4, "host4"), + Tablet: topo.NewTablet(4, "cell", "host4"), Serving: true, Stats: &querypb.RealtimeStats{SecondsBehindMaster: 30}, } @@ -59,22 +59,22 @@ func TestFilterByReplicationLag(t *testing.T) { } // lags of (5s, 10s, 15s, 120s) ts1 = &TabletStats{ - Tablet: topo.NewTablet(1, "host1"), + Tablet: topo.NewTablet(1, "cell", "host1"), Serving: true, Stats: &querypb.RealtimeStats{SecondsBehindMaster: 5}, } ts2 = &TabletStats{ - Tablet: topo.NewTablet(2, "host2"), + Tablet: topo.NewTablet(2, "cell", "host2"), Serving: true, Stats: &querypb.RealtimeStats{SecondsBehindMaster: 10}, } ts3 = &TabletStats{ - Tablet: topo.NewTablet(3, "host3"), + Tablet: topo.NewTablet(3, "cell", "host3"), Serving: true, Stats: &querypb.RealtimeStats{SecondsBehindMaster: 15}, } ts4 = &TabletStats{ - Tablet: topo.NewTablet(4, "host4"), + Tablet: topo.NewTablet(4, "cell", "host4"), Serving: true, Stats: &querypb.RealtimeStats{SecondsBehindMaster: 120}, } @@ -84,22 +84,22 @@ func TestFilterByReplicationLag(t *testing.T) { } // lags of (30m, 35m, 40m, 45m) ts1 = &TabletStats{ - Tablet: topo.NewTablet(1, "host1"), + Tablet: topo.NewTablet(1, "cell", "host1"), Serving: true, Stats: &querypb.RealtimeStats{SecondsBehindMaster: 30 * 60}, } ts2 = &TabletStats{ - Tablet: topo.NewTablet(2, "host2"), + Tablet: topo.NewTablet(2, "cell", "host2"), Serving: true, Stats: &querypb.RealtimeStats{SecondsBehindMaster: 35 * 60}, } ts3 = &TabletStats{ - Tablet: topo.NewTablet(3, "host3"), + Tablet: topo.NewTablet(3, "cell", "host3"), Serving: true, Stats: &querypb.RealtimeStats{SecondsBehindMaster: 40 * 60}, } ts4 = &TabletStats{ - Tablet: topo.NewTablet(4, "host4"), + Tablet: topo.NewTablet(4, "cell", "host4"), Serving: true, Stats: &querypb.RealtimeStats{SecondsBehindMaster: 45 * 60}, } @@ -109,12 +109,12 @@ func TestFilterByReplicationLag(t *testing.T) { } // lags of (1m, 100m) - return at least 2 items to avoid overloading if the 2nd one is not delayed too much ts1 = &TabletStats{ - Tablet: topo.NewTablet(1, "host1"), + Tablet: topo.NewTablet(1, "cell", "host1"), Serving: true, Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1 * 60}, } ts2 = &TabletStats{ - Tablet: topo.NewTablet(2, "host2"), + Tablet: topo.NewTablet(2, "cell", "host2"), Serving: true, Stats: &querypb.RealtimeStats{SecondsBehindMaster: 100 * 60}, } @@ -124,12 +124,12 @@ func TestFilterByReplicationLag(t *testing.T) { } // lags of (1m, 3h) - return 1 if the 2nd one is delayed too much ts1 = &TabletStats{ - Tablet: topo.NewTablet(1, "host1"), + Tablet: topo.NewTablet(1, "cell", "host1"), Serving: true, Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1 * 60}, } ts2 = &TabletStats{ - Tablet: topo.NewTablet(2, "host2"), + Tablet: topo.NewTablet(2, "cell", "host2"), Serving: true, Stats: &querypb.RealtimeStats{SecondsBehindMaster: 3 * 60 * 60}, } diff --git a/go/vt/topo/tablet.go b/go/vt/topo/tablet.go index 2e3dad4d96..11c87640ae 100644 --- a/go/vt/topo/tablet.go +++ b/go/vt/topo/tablet.go @@ -113,11 +113,12 @@ func TabletComplete(tablet *topodatapb.Tablet) error { return nil } -// NewTablet create a new Tablet record with the given Hostname and id. -func NewTablet(uid uint32, host string) *topodatapb.Tablet { +// NewTablet create a new Tablet record with the given id, cell, and hostname. +func NewTablet(uid uint32, cell, host string) *topodatapb.Tablet { return &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ - Uid: uid, + Cell: cell, + Uid: uid, }, Hostname: host, PortMap: make(map[string]int32), diff --git a/go/vt/vtgate/discoverygateway.go b/go/vt/vtgate/discoverygateway.go index a7af84a060..5c6c7699a1 100644 --- a/go/vt/vtgate/discoverygateway.go +++ b/go/vt/vtgate/discoverygateway.go @@ -355,42 +355,42 @@ func shuffleTablets(tablets []*topodatapb.Tablet) { // replica - return all from local cell. // TODO(liang): select replica by replication lag. func (dg *discoveryGateway) getTablets(keyspace, shard string, tabletType topodatapb.TabletType) []*topodatapb.Tablet { - epsList := dg.hc.GetTabletStatsFromTarget(keyspace, shard, tabletType) + tsList := dg.hc.GetTabletStatsFromTarget(keyspace, shard, tabletType) // for master, use any cells and return the one with max reparent timestamp. if tabletType == topodatapb.TabletType_MASTER { var maxTimestamp int64 - var ep *topodatapb.Tablet - for _, eps := range epsList { - if eps.LastError != nil || !eps.Serving { + var t *topodatapb.Tablet + for _, ts := range tsList { + if ts.LastError != nil || !ts.Serving { continue } - if eps.TabletExternallyReparentedTimestamp >= maxTimestamp { - maxTimestamp = eps.TabletExternallyReparentedTimestamp - ep = eps.Tablet + if ts.TabletExternallyReparentedTimestamp >= maxTimestamp { + maxTimestamp = ts.TabletExternallyReparentedTimestamp + t = ts.Tablet } } - if ep == nil { + if t == nil { return nil } - return []*topodatapb.Tablet{ep} + return []*topodatapb.Tablet{t} } // for non-master, use only tablets from local cell and filter by replication lag. - list := make([]*discovery.TabletStats, 0, len(epsList)) - for _, eps := range epsList { - if eps.LastError != nil || !eps.Serving { + list := make([]*discovery.TabletStats, 0, len(tsList)) + for _, ts := range tsList { + if ts.LastError != nil || !ts.Serving { continue } - if dg.localCell != eps.Cell { + if dg.localCell != ts.Tablet.Alias.Cell { continue } - list = append(list, eps) + list = append(list, ts) } list = discovery.FilterByReplicationLag(list) - epList := make([]*topodatapb.Tablet, 0, len(list)) - for _, eps := range list { - epList = append(epList, eps.Tablet) + tList := make([]*topodatapb.Tablet, 0, len(list)) + for _, ts := range list { + tList = append(tList, ts.Tablet) } - return epList + return tList } // WrapError returns ShardError which preserves the original error code if possible, diff --git a/go/vt/vtgate/fakehealthcheck_test.go b/go/vt/vtgate/fakehealthcheck_test.go index 4846d50437..df6c5fa04a 100644 --- a/go/vt/vtgate/fakehealthcheck_test.go +++ b/go/vt/vtgate/fakehealthcheck_test.go @@ -45,7 +45,6 @@ func (fhc *fakeHealthCheck) AddTablet(cell, name string, tablet *topodatapb.Tabl item := &fhcItem{ ts: &discovery.TabletStats{ Tablet: tablet, - Cell: cell, Name: name, }, } @@ -112,7 +111,7 @@ func (fhc *fakeHealthCheck) addTestTablet(cell, host string, port int32, keyspac if conn != nil { conn.SetTarget(keyspace, shard, tabletType) } - ep := topo.NewTablet(0, host) + ep := topo.NewTablet(0, cell, host) ep.PortMap["vt"] = port key := discovery.TabletToMapKey(ep) item := fhc.items[key] @@ -120,7 +119,11 @@ func (fhc *fakeHealthCheck) addTestTablet(cell, host string, port int32, keyspac fhc.AddTablet(cell, "", ep) item = fhc.items[key] } - item.ts.Target = &querypb.Target{Keyspace: keyspace, Shard: shard, TabletType: tabletType} + item.ts.Target = &querypb.Target{ + Keyspace: keyspace, + Shard: shard, + TabletType: tabletType, + } item.ts.Serving = serving item.ts.TabletExternallyReparentedTimestamp = reparentTS item.ts.Stats = &querypb.RealtimeStats{} diff --git a/go/vt/vtgate/resolver_test.go b/go/vt/vtgate/resolver_test.go index fc838f65c7..dec5f0c2fc 100644 --- a/go/vt/vtgate/resolver_test.go +++ b/go/vt/vtgate/resolver_test.go @@ -204,8 +204,8 @@ func testResolverGeneric(t *testing.T, name string, action func(hc discovery.Hea hc.addTestTablet("aa", "-20", 1, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) hc.addTestTablet("aa", "20-40", 1, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) _, err = action(hc) - want1 := fmt.Sprintf("shard, host: %s.-20.master, alias:<> hostname:\"-20\" port_map: , error: err", name) - want2 := fmt.Sprintf("shard, host: %s.20-40.master, alias:<> hostname:\"20-40\" port_map: , retry: err", name) + want1 := fmt.Sprintf("shard, host: %s.-20.master, alias: hostname:\"-20\" port_map: , error: err", name) + want2 := fmt.Sprintf("shard, host: %s.20-40.master, alias: hostname:\"20-40\" port_map: , retry: err", name) want := []string{want1, want2} sort.Strings(want) if err == nil { @@ -237,8 +237,8 @@ func testResolverGeneric(t *testing.T, name string, action func(hc discovery.Hea hc.addTestTablet("aa", "-20", 1, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) hc.addTestTablet("aa", "20-40", 1, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) _, err = action(hc) - want1 = fmt.Sprintf("shard, host: %s.-20.master, alias:<> hostname:\"-20\" port_map: , retry: err", name) - want2 = fmt.Sprintf("shard, host: %s.20-40.master, alias:<> hostname:\"20-40\" port_map: , fatal: err", name) + want1 = fmt.Sprintf("shard, host: %s.-20.master, alias: hostname:\"-20\" port_map: , retry: err", name) + want2 = fmt.Sprintf("shard, host: %s.20-40.master, alias: hostname:\"20-40\" port_map: , fatal: err", name) want = []string{want1, want2} sort.Strings(want) if err == nil { @@ -389,7 +389,7 @@ func testResolverStreamGeneric(t *testing.T, name string, action func(hc discove hc.addTestTablet("aa", "-20", 1, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil, sbc0) hc.addTestTablet("aa", "20-40", 1, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil, sbc1) _, err = action(hc) - want := fmt.Sprintf("shard, host: %s.-20.master, alias:<> hostname:\"-20\" port_map: , retry: err", name) + want := fmt.Sprintf("shard, host: %s.-20.master, alias: hostname:\"-20\" port_map: , retry: err", name) if err == nil || err.Error() != want { t.Errorf("want\n%s\ngot\n%v", want, err) } @@ -509,7 +509,7 @@ func TestResolverExecBatchReresolve(t *testing.T) { } _, err := res.ExecuteBatch(context.Background(), topodatapb.TabletType_MASTER, false, nil, buildBatchRequest) - want := "shard, host: TestResolverExecBatchReresolve.0.master, alias:<> hostname:\"0\" port_map: , retry: err" + want := "shard, host: TestResolverExecBatchReresolve.0.master, alias: hostname:\"0\" port_map: , retry: err" if err == nil || err.Error() != want { t.Errorf("want %s, got %v", want, err) } @@ -546,7 +546,7 @@ func TestResolverExecBatchAsTransaction(t *testing.T) { } _, err := res.ExecuteBatch(context.Background(), topodatapb.TabletType_MASTER, true, nil, buildBatchRequest) - want := "shard, host: TestResolverExecBatchAsTransaction.0.master, alias:<> hostname:\"0\" port_map: , retry: err" + want := "shard, host: TestResolverExecBatchAsTransaction.0.master, alias: hostname:\"0\" port_map: , retry: err" if err == nil || err.Error() != want { t.Errorf("want %v, got %v", want, err) } diff --git a/go/vt/vtgate/scatter_conn_test.go b/go/vt/vtgate/scatter_conn_test.go index 3cc65c7d4e..1e801c4c67 100644 --- a/go/vt/vtgate/scatter_conn_test.go +++ b/go/vt/vtgate/scatter_conn_test.go @@ -137,7 +137,7 @@ func testScatterConnGeneric(t *testing.T, name string, f func(hc discovery.Healt sbc := &sandboxConn{mustFailServer: 1} hc.addTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil, sbc) qr, err = f(hc, []string{"0"}) - want := fmt.Sprintf("shard, host: %v.0.replica, alias:<> hostname:\"0\" port_map: , error: err", name) + want := fmt.Sprintf("shard, host: %v.0.replica, alias: hostname:\"0\" port_map: , error: err", name) // Verify server error string. if err == nil || err.Error() != want { t.Errorf("want %s, got %v", want, err) @@ -156,7 +156,7 @@ func testScatterConnGeneric(t *testing.T, name string, f func(hc discovery.Healt hc.addTestTablet("aa", "1", 1, name, "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) _, err = f(hc, []string{"0", "1"}) // Verify server errors are consolidated. - want = fmt.Sprintf("shard, host: %v.0.replica, alias:<> hostname:\"0\" port_map: , error: err\nshard, host: %v.1.replica, alias:<> hostname:\"1\" port_map: , error: err", name, name) + want = fmt.Sprintf("shard, host: %v.0.replica, alias: hostname:\"0\" port_map: , error: err\nshard, host: %v.1.replica, alias: hostname:\"1\" port_map: , error: err", name, name) verifyScatterConnError(t, err, want, vtrpcpb.ErrorCode_BAD_INPUT) // Ensure that we tried only once. if execCount := sbc0.ExecCount.Get(); execCount != 1 { @@ -175,7 +175,7 @@ func testScatterConnGeneric(t *testing.T, name string, f func(hc discovery.Healt hc.addTestTablet("aa", "1", 1, name, "1", topodatapb.TabletType_REPLICA, true, 1, nil, sbc1) _, err = f(hc, []string{"0", "1"}) // Verify server errors are consolidated. - want = fmt.Sprintf("shard, host: %v.0.replica, alias:<> hostname:\"0\" port_map: , error: err\nshard, host: %v.1.replica, alias:<> hostname:\"1\" port_map: , tx_pool_full: err", name, name) + want = fmt.Sprintf("shard, host: %v.0.replica, alias: hostname:\"0\" port_map: , error: err\nshard, host: %v.1.replica, alias: hostname:\"1\" port_map: , tx_pool_full: err", name, name) // We should only surface the higher priority error code verifyScatterConnError(t, err, want, vtrpcpb.ErrorCode_BAD_INPUT) // Ensure that we tried only once. From c32483d205b45b4ebe04d58acb84826d58d3a858 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Mon, 16 May 2016 10:26:53 -0700 Subject: [PATCH 11/27] Removing Endpoint from proto. --- doc/vtctlReference.md | 6 - go/vt/proto/topodata/topodata.pb.go | 177 +++++++--------- go/vt/topo/naming.go | 59 ------ php/src/Vitess/Proto/Topodata/EndPoint.php | 190 ------------------ .../Proto/Topodata/EndPoint/PortMapEntry.php | 121 ----------- proto/topodata.proto | 16 -- py/vtproto/topodata_pb2.py | 132 ++---------- 7 files changed, 90 insertions(+), 611 deletions(-) delete mode 100644 go/vt/topo/naming.go delete mode 100644 php/src/Vitess/Proto/Topodata/EndPoint.php delete mode 100644 php/src/Vitess/Proto/Topodata/EndPoint/PortMapEntry.php diff --git a/doc/vtctlReference.md b/doc/vtctlReference.md index ea8b8d338d..535098b380 100644 --- a/doc/vtctlReference.md +++ b/doc/vtctlReference.md @@ -649,7 +649,6 @@ Starts a transaction on the provided server. #### Errors * the <tablet_alias> argument is required for the <VtTabletBegin> command This error occurs if the command is not called with exactly one argument. -* cannot get EndPoint from tablet record: %v * cannot connect to tablet %v: %v * Begin failed: %v @@ -683,7 +682,6 @@ Commits a transaction on the provided server. #### Errors * the <tablet_alias> and <transaction_id> arguments are required for the <VtTabletCommit> command This error occurs if the command is not called with exactly 2 arguments. -* cannot get EndPoint from tablet record: %v * cannot connect to tablet %v: %v @@ -718,7 +716,6 @@ Executes the given query on the given tablet. #### Errors * the <tablet_alias> and <sql> arguments are required for the <VtTabletExecute> command This error occurs if the command is not called with exactly 2 arguments. -* cannot get EndPoint from tablet record: %v * cannot connect to tablet %v: %v * Execute failed: %v @@ -752,7 +749,6 @@ Rollbacks a transaction on the provided server. #### Errors * the <tablet_alias> and <transaction_id> arguments are required for the <VtTabletRollback> command This error occurs if the command is not called with exactly 2 arguments. -* cannot get EndPoint from tablet record: %v * cannot connect to tablet %v: %v @@ -780,7 +776,6 @@ Executes the StreamHealth streaming query to a vttablet process. Will stop after #### Errors * The <tablet alias> argument is required for the <VtTabletStreamHealth> command. This error occurs if the command is not called with exactly one argument. -* cannot get EndPoint from tablet record: %v * cannot connect to tablet %v: %v * stream ended early: %v @@ -1624,7 +1619,6 @@ Blocks until the specified shard has caught up with the filtered replication of * The <keyspace/shard> argument is required for the <WaitForFilteredReplication> command. This error occurs if the command is not called with exactly one argument. * shard %v/%v has no source shard * shard %v/%v has no master -* cannot get EndPoint for master tablet record: %v record: %v * failed to run explicit healthcheck on tablet: %v err: %v * cannot connect to tablet %v: %v * could not stream health records from tablet: %v err: %v diff --git a/go/vt/proto/topodata/topodata.pb.go b/go/vt/proto/topodata/topodata.pb.go index ac752efac9..3ce7caf7a3 100644 --- a/go/vt/proto/topodata/topodata.pb.go +++ b/go/vt/proto/topodata/topodata.pb.go @@ -15,7 +15,6 @@ It has these top-level messages: Shard Keyspace ShardReplication - EndPoint SrvShard ShardReference SrvKeyspace @@ -414,28 +413,6 @@ func (m *ShardReplication_Node) GetTabletAlias() *TabletAlias { return nil } -// EndPoint corresponds to a single tablet. -type EndPoint struct { - // The uid of the tablet. - Uid uint32 `protobuf:"varint,1,opt,name=uid" json:"uid,omitempty"` - // The host the tablet is running on (FQDN). - Host string `protobuf:"bytes,2,opt,name=host" json:"host,omitempty"` - // The ports opened for service. - PortMap map[string]int32 `protobuf:"bytes,3,rep,name=port_map,json=portMap" json:"port_map,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` -} - -func (m *EndPoint) Reset() { *m = EndPoint{} } -func (m *EndPoint) String() string { return proto.CompactTextString(m) } -func (*EndPoint) ProtoMessage() {} -func (*EndPoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } - -func (m *EndPoint) GetPortMap() map[string]int32 { - if m != nil { - return m.PortMap - } - return nil -} - // SrvShard is a rollup node for the shard itself. type SrvShard struct { // Copied from Shard. @@ -448,7 +425,7 @@ type SrvShard struct { func (m *SrvShard) Reset() { *m = SrvShard{} } func (m *SrvShard) String() string { return proto.CompactTextString(m) } func (*SrvShard) ProtoMessage() {} -func (*SrvShard) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*SrvShard) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *SrvShard) GetKeyRange() *KeyRange { if m != nil { @@ -467,7 +444,7 @@ type ShardReference struct { func (m *ShardReference) Reset() { *m = ShardReference{} } func (m *ShardReference) String() string { return proto.CompactTextString(m) } func (*ShardReference) ProtoMessage() {} -func (*ShardReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*ShardReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *ShardReference) GetKeyRange() *KeyRange { if m != nil { @@ -490,7 +467,7 @@ type SrvKeyspace struct { func (m *SrvKeyspace) Reset() { *m = SrvKeyspace{} } func (m *SrvKeyspace) String() string { return proto.CompactTextString(m) } func (*SrvKeyspace) ProtoMessage() {} -func (*SrvKeyspace) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*SrvKeyspace) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *SrvKeyspace) GetPartitions() []*SrvKeyspace_KeyspacePartition { if m != nil { @@ -517,7 +494,7 @@ func (m *SrvKeyspace_KeyspacePartition) Reset() { *m = SrvKeyspace_Keysp func (m *SrvKeyspace_KeyspacePartition) String() string { return proto.CompactTextString(m) } func (*SrvKeyspace_KeyspacePartition) ProtoMessage() {} func (*SrvKeyspace_KeyspacePartition) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{9, 0} + return fileDescriptor0, []int{8, 0} } func (m *SrvKeyspace_KeyspacePartition) GetShardReferences() []*ShardReference { @@ -539,7 +516,7 @@ type SrvKeyspace_ServedFrom struct { func (m *SrvKeyspace_ServedFrom) Reset() { *m = SrvKeyspace_ServedFrom{} } func (m *SrvKeyspace_ServedFrom) String() string { return proto.CompactTextString(m) } func (*SrvKeyspace_ServedFrom) ProtoMessage() {} -func (*SrvKeyspace_ServedFrom) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 1} } +func (*SrvKeyspace_ServedFrom) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8, 1} } func init() { proto.RegisterType((*KeyRange)(nil), "topodata.KeyRange") @@ -553,7 +530,6 @@ func init() { proto.RegisterType((*Keyspace_ServedFrom)(nil), "topodata.Keyspace.ServedFrom") proto.RegisterType((*ShardReplication)(nil), "topodata.ShardReplication") proto.RegisterType((*ShardReplication_Node)(nil), "topodata.ShardReplication.Node") - proto.RegisterType((*EndPoint)(nil), "topodata.EndPoint") proto.RegisterType((*SrvShard)(nil), "topodata.SrvShard") proto.RegisterType((*ShardReference)(nil), "topodata.ShardReference") proto.RegisterType((*SrvKeyspace)(nil), "topodata.SrvKeyspace") @@ -564,77 +540,74 @@ func init() { } var fileDescriptor0 = []byte{ - // 1143 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x57, 0xcd, 0x6e, 0xdb, 0x46, - 0x10, 0x2e, 0x29, 0x51, 0x96, 0x46, 0x8a, 0xc2, 0x6c, 0x9d, 0x82, 0x60, 0x51, 0xc4, 0xd0, 0xa5, - 0x81, 0x8b, 0xaa, 0x85, 0xd3, 0x1f, 0xc3, 0x40, 0x81, 0xc8, 0xaa, 0xd2, 0x3a, 0xb6, 0x65, 0x75, - 0x25, 0x23, 0xf5, 0x89, 0xa0, 0xa4, 0x8d, 0x43, 0x44, 0x22, 0x59, 0xee, 0x4a, 0x80, 0x9e, 0x21, - 0x87, 0xde, 0xfb, 0x10, 0x3d, 0xf6, 0xda, 0x27, 0xea, 0x23, 0x14, 0xe8, 0xee, 0x2c, 0x29, 0x51, - 0xf2, 0x4f, 0x9d, 0xd6, 0x27, 0xcf, 0xec, 0xfc, 0xec, 0xcc, 0xec, 0xf7, 0x0d, 0x65, 0xa8, 0x8b, - 0x28, 0x8e, 0xc6, 0xbe, 0xf0, 0x9b, 0x71, 0x12, 0x89, 0x88, 0x94, 0x33, 0xbd, 0xb1, 0x07, 0xe5, - 0x63, 0xb6, 0xa0, 0x7e, 0x78, 0xc9, 0xc8, 0x36, 0x58, 0x5c, 0xf8, 0x89, 0x70, 0x8c, 0x1d, 0xe3, - 0x69, 0x8d, 0x6a, 0x85, 0xd8, 0x50, 0x60, 0xe1, 0xd8, 0x31, 0xf1, 0x4c, 0x89, 0x8d, 0x67, 0x50, - 0x1d, 0xf8, 0xc3, 0x09, 0x13, 0xad, 0x49, 0xe0, 0x73, 0x42, 0xa0, 0x38, 0x62, 0x93, 0x09, 0x46, - 0x55, 0x28, 0xca, 0x2a, 0x68, 0x16, 0xe8, 0xa0, 0x07, 0x54, 0x89, 0x8d, 0xbf, 0x0b, 0x50, 0xd2, - 0x51, 0xe4, 0x33, 0xb0, 0x7c, 0x15, 0x89, 0x11, 0xd5, 0xbd, 0xc7, 0xcd, 0x65, 0x75, 0xb9, 0xb4, - 0x54, 0xfb, 0x10, 0x17, 0xca, 0x6f, 0x22, 0x2e, 0x42, 0x7f, 0xca, 0x30, 0x5d, 0x85, 0x2e, 0x75, - 0x52, 0x07, 0x33, 0x88, 0x9d, 0x02, 0x9e, 0x4a, 0x89, 0xec, 0x43, 0x39, 0x8e, 0x12, 0xe1, 0x4d, - 0xfd, 0xd8, 0x29, 0xee, 0x14, 0x64, 0xee, 0x4f, 0x36, 0x73, 0x37, 0x7b, 0xd2, 0xe1, 0xd4, 0x8f, - 0x3b, 0xa1, 0x48, 0x16, 0x74, 0x2b, 0xd6, 0x9a, 0xba, 0xe5, 0x2d, 0x5b, 0xf0, 0xd8, 0x1f, 0x31, - 0xc7, 0xd2, 0xb7, 0x64, 0x3a, 0x8e, 0xe5, 0x8d, 0x9f, 0x8c, 0x9d, 0x12, 0x1a, 0xb4, 0x42, 0xbe, - 0x80, 0x8a, 0xf4, 0xf0, 0x12, 0x35, 0x39, 0x67, 0x0b, 0x1b, 0x21, 0xab, 0xcb, 0xb2, 0x99, 0x62, - 0x1a, 0x3d, 0xdd, 0xa7, 0x50, 0x14, 0x8b, 0x98, 0x39, 0x65, 0xe9, 0x5b, 0xdf, 0xdb, 0xde, 0x2c, - 0x6c, 0x20, 0x6d, 0x14, 0x3d, 0xa4, 0xa7, 0x3d, 0x1e, 0x7a, 0xaa, 0x43, 0x2f, 0x9a, 0xb3, 0x24, - 0x09, 0xc6, 0xcc, 0xa9, 0xe0, 0xdd, 0xf5, 0xf1, 0xb0, 0x2b, 0x8f, 0xcf, 0xd2, 0x53, 0xd2, 0x94, - 0x39, 0xfd, 0x4b, 0xee, 0x00, 0x36, 0xeb, 0x5e, 0x69, 0x76, 0x20, 0x8d, 0xba, 0x53, 0xf4, 0x73, - 0x0f, 0xa0, 0x96, 0xef, 0x5f, 0x3d, 0x93, 0xac, 0x2f, 0x7d, 0x39, 0x25, 0xaa, 0x66, 0xe7, 0xfe, - 0x64, 0xa6, 0x67, 0x6d, 0x51, 0xad, 0x1c, 0x98, 0xfb, 0x86, 0xfb, 0x2d, 0x54, 0x96, 0xe9, 0xfe, - 0x2d, 0xb0, 0x92, 0x0b, 0x7c, 0x59, 0x2c, 0x57, 0xed, 0x5a, 0xe3, 0x5d, 0x09, 0xac, 0x3e, 0x4e, - 0x6e, 0x1f, 0x6a, 0x53, 0x9f, 0x0b, 0x96, 0x78, 0x77, 0x40, 0x41, 0x55, 0xbb, 0x6a, 0xa4, 0xad, - 0xcd, 0xdc, 0xbc, 0xc3, 0xcc, 0xbf, 0x83, 0x1a, 0x67, 0xc9, 0x9c, 0x8d, 0x3d, 0x35, 0x58, 0x2e, - 0xa1, 0xb2, 0x31, 0x27, 0xac, 0xa8, 0xd9, 0x47, 0x1f, 0x7c, 0x81, 0x2a, 0x5f, 0xca, 0x9c, 0x3c, - 0x87, 0x07, 0x3c, 0x9a, 0x25, 0x23, 0xe6, 0xe1, 0x9b, 0xf3, 0x14, 0x54, 0x1f, 0x5f, 0x89, 0x47, - 0x27, 0x94, 0x69, 0x8d, 0xaf, 0x14, 0xae, 0xa6, 0xa2, 0xf8, 0xc0, 0x25, 0xa8, 0x0a, 0x6a, 0x2a, - 0xa8, 0x90, 0x17, 0xf0, 0x50, 0x60, 0x8f, 0xde, 0x28, 0x92, 0xe3, 0x8c, 0xa4, 0xbd, 0xb4, 0x09, - 0x57, 0x9d, 0x59, 0x8f, 0xa2, 0xad, 0xbd, 0x68, 0x5d, 0xe4, 0x55, 0xee, 0x5e, 0x00, 0xac, 0x4a, - 0x27, 0x5f, 0x43, 0x35, 0xcd, 0x8a, 0x38, 0x33, 0x6e, 0xc1, 0x19, 0x88, 0xa5, 0xbc, 0x2a, 0xd1, - 0xcc, 0x95, 0xe8, 0xfe, 0x66, 0x40, 0x35, 0xd7, 0x56, 0x46, 0x68, 0x63, 0x49, 0xe8, 0x35, 0xca, - 0x98, 0x37, 0x51, 0xa6, 0x70, 0x23, 0x65, 0x8a, 0x77, 0x78, 0xbe, 0x8f, 0xa0, 0x84, 0x85, 0x66, - 0xe3, 0x4b, 0x35, 0xf7, 0x4f, 0x03, 0x1e, 0xac, 0x4d, 0xe6, 0x5e, 0x7b, 0x27, 0x7b, 0xf0, 0x78, - 0x1c, 0x70, 0xe5, 0xe5, 0xfd, 0x32, 0x63, 0xc9, 0xc2, 0x53, 0x98, 0x08, 0x64, 0x9b, 0xaa, 0x9b, - 0x32, 0xfd, 0x30, 0x35, 0xfe, 0xa4, 0x6c, 0x7d, 0x6d, 0x22, 0x9f, 0x03, 0x19, 0x4e, 0xfc, 0xd1, - 0xdb, 0x49, 0x20, 0xe1, 0x2a, 0xe1, 0xa6, 0xcb, 0x2e, 0x62, 0xda, 0x47, 0x39, 0x0b, 0x16, 0xc2, - 0x1b, 0x7f, 0x99, 0xb8, 0x77, 0xf5, 0xb4, 0xbe, 0x84, 0x6d, 0x1c, 0x50, 0x10, 0x5e, 0x4a, 0x40, - 0x4c, 0x66, 0xd3, 0x10, 0xc9, 0x9f, 0xb2, 0x8b, 0x64, 0xb6, 0x36, 0x9a, 0x14, 0xff, 0xc9, 0xcb, - 0xab, 0x11, 0xd8, 0xb7, 0x89, 0x7d, 0x3b, 0x6b, 0x43, 0xc5, 0x3b, 0x8e, 0x34, 0xba, 0x37, 0x72, - 0xe1, 0x0c, 0x76, 0xe1, 0x11, 0x8f, 0x27, 0x81, 0xd0, 0x18, 0x97, 0xe9, 0x66, 0xa1, 0xc0, 0x4e, - 0x2d, 0xfa, 0x10, 0x0d, 0x08, 0x80, 0xb6, 0x3a, 0x96, 0x84, 0xc8, 0xf8, 0xf4, 0x3a, 0x89, 0xa6, - 0xfc, 0xea, 0x92, 0xcd, 0xee, 0x4b, 0x29, 0xf5, 0x42, 0x7a, 0x65, 0x94, 0x52, 0x32, 0x77, 0x67, - 0x19, 0x64, 0x95, 0x7a, 0xbf, 0xcf, 0x96, 0x07, 0x64, 0x61, 0x1d, 0x90, 0x8d, 0x77, 0x06, 0xd8, - 0x9a, 0x9f, 0x4c, 0xb6, 0x34, 0xf2, 0x45, 0x10, 0x85, 0xf2, 0x76, 0x2b, 0x8c, 0xc6, 0x4c, 0x6d, - 0x20, 0xd5, 0xc6, 0x93, 0x0d, 0xf2, 0xe5, 0x5c, 0x9b, 0x5d, 0xe9, 0x47, 0xb5, 0xb7, 0xfb, 0x1c, - 0x8a, 0x4a, 0x55, 0x7b, 0x2c, 0x2d, 0xfe, 0x2e, 0x7b, 0x4c, 0xac, 0x94, 0xc6, 0xef, 0x06, 0x94, - 0x3b, 0xe1, 0xb8, 0x17, 0x05, 0xa1, 0xb8, 0x86, 0x59, 0xf2, 0x83, 0xaa, 0x3e, 0x71, 0x29, 0xab, - 0x50, 0x26, 0x07, 0xb9, 0x4f, 0x5b, 0x61, 0xb3, 0xdc, 0x2c, 0xd7, 0xf5, 0x1f, 0xb7, 0xff, 0xb3, - 0xf5, 0xe5, 0xf2, 0x2e, 0xda, 0x56, 0x23, 0x86, 0x72, 0x3f, 0x99, 0xeb, 0x4d, 0x20, 0xab, 0xcb, - 0xa1, 0x13, 0xe5, 0xf7, 0x5f, 0xcc, 0x4f, 0x20, 0x5d, 0xec, 0x1e, 0xfe, 0x74, 0xd0, 0xcf, 0x05, - 0xfa, 0xa8, 0x2d, 0x4f, 0x1a, 0xe7, 0x50, 0x4f, 0x1f, 0xe1, 0x35, 0x4b, 0x58, 0x28, 0x59, 0x72, - 0x1f, 0xf7, 0x36, 0xfe, 0x28, 0xca, 0xb5, 0x96, 0xcc, 0x97, 0xd4, 0xfb, 0x01, 0x20, 0x96, 0x3f, - 0x72, 0x02, 0xf5, 0xc8, 0x19, 0x0e, 0x3e, 0xcd, 0xe1, 0x60, 0xe5, 0xba, 0x84, 0x76, 0x2f, 0xf3, - 0xa7, 0xb9, 0xd0, 0x1b, 0x39, 0x6c, 0xbe, 0x37, 0x87, 0x0b, 0xff, 0x81, 0xc3, 0x2d, 0xa8, 0xe6, - 0x78, 0x99, 0xd2, 0x72, 0xe7, 0xfa, 0x3e, 0x72, 0xcc, 0x84, 0x15, 0x33, 0xaf, 0x5f, 0x03, 0xd6, - 0xb5, 0x6b, 0xc0, 0xfd, 0xd5, 0x80, 0x47, 0x57, 0xc6, 0xa1, 0xc8, 0x9c, 0xfb, 0xd8, 0xde, 0x4e, - 0xe6, 0xd5, 0x57, 0x96, 0xb4, 0xc1, 0xd6, 0x57, 0x26, 0xd9, 0x53, 0x6b, 0x5e, 0x57, 0xf3, 0x33, - 0x58, 0xc7, 0x82, 0xac, 0x68, 0x4d, 0xe7, 0xae, 0x77, 0x1f, 0x6b, 0xe5, 0x96, 0x2f, 0xda, 0xee, - 0x1e, 0xd4, 0xd7, 0xdf, 0x81, 0x54, 0xc0, 0x3a, 0xef, 0xf6, 0x3b, 0x03, 0xfb, 0x03, 0x02, 0x50, - 0x3a, 0x3f, 0xea, 0x0e, 0xbe, 0xf9, 0xca, 0x36, 0xd4, 0xf1, 0xe1, 0xc5, 0xa0, 0xd3, 0xb7, 0xcd, - 0x5d, 0x39, 0x26, 0x58, 0x5d, 0x45, 0xaa, 0xb0, 0x75, 0xde, 0x3d, 0xee, 0x9e, 0xbd, 0xea, 0xea, - 0x90, 0xd3, 0x56, 0x7f, 0xd0, 0xa1, 0x32, 0x44, 0x1a, 0x68, 0xa7, 0x77, 0x72, 0xd4, 0x6e, 0xd9, - 0xa6, 0x32, 0xd0, 0xef, 0xcf, 0xba, 0x27, 0x17, 0x76, 0x01, 0x73, 0xb5, 0x06, 0xed, 0x1f, 0xb5, - 0xd8, 0xef, 0xb5, 0x68, 0xc7, 0x2e, 0x4a, 0xfa, 0xd6, 0x3a, 0x3f, 0xf7, 0x3a, 0xf4, 0xe8, 0xb4, - 0xd3, 0x1d, 0xb4, 0x4e, 0x6c, 0x4b, 0xc5, 0x1c, 0xb6, 0xda, 0xc7, 0xe7, 0x3d, 0xbb, 0xa4, 0x93, - 0xf5, 0x07, 0x67, 0xd2, 0x75, 0x4b, 0x19, 0x5e, 0x9d, 0xd1, 0x63, 0x79, 0x4b, 0xd9, 0x35, 0x6d, - 0xe3, 0xd0, 0x05, 0x67, 0x14, 0x4d, 0x9b, 0x8b, 0x68, 0x26, 0x66, 0x43, 0xd6, 0x9c, 0x07, 0x82, - 0x71, 0xae, 0xff, 0x27, 0x18, 0x96, 0xf0, 0xcf, 0xb3, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xe4, - 0x42, 0x70, 0x73, 0x2c, 0x0c, 0x00, 0x00, + // 1097 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x56, 0xdd, 0x6e, 0xe2, 0x46, + 0x14, 0xae, 0xcd, 0x4f, 0xe0, 0x40, 0x58, 0x67, 0x9a, 0xad, 0x2c, 0x57, 0xd5, 0x46, 0xdc, 0x74, + 0x95, 0xaa, 0xb4, 0xca, 0xf6, 0x27, 0x5a, 0xa9, 0xd2, 0x12, 0xca, 0xb6, 0xd9, 0x24, 0x84, 0x0e, + 0x8e, 0xb6, 0xb9, 0xb2, 0x0c, 0xcc, 0x66, 0xad, 0x05, 0xdb, 0xf5, 0x0c, 0x48, 0x3c, 0xc3, 0x5e, + 0xf4, 0xbe, 0x0f, 0xd2, 0xdb, 0x3e, 0x51, 0x1f, 0xa1, 0x52, 0x67, 0xce, 0xd8, 0x60, 0xc8, 0x4f, + 0xb3, 0x55, 0xae, 0x98, 0x33, 0xe7, 0x67, 0xce, 0xf9, 0xe6, 0xfb, 0xc6, 0x40, 0x43, 0x44, 0x71, + 0x34, 0xf6, 0x85, 0xdf, 0x8a, 0x93, 0x48, 0x44, 0xa4, 0x92, 0xd9, 0xcd, 0x03, 0xa8, 0x9c, 0xb0, + 0x05, 0xf5, 0xc3, 0x2b, 0x46, 0x76, 0xa1, 0xc4, 0x85, 0x9f, 0x08, 0xdb, 0xd8, 0x33, 0x9e, 0xd6, + 0xa9, 0x36, 0x88, 0x05, 0x05, 0x16, 0x8e, 0x6d, 0x13, 0xf7, 0xd4, 0xb2, 0xf9, 0x0c, 0x6a, 0xae, + 0x3f, 0x9c, 0x30, 0xd1, 0x9e, 0x04, 0x3e, 0x27, 0x04, 0x8a, 0x23, 0x36, 0x99, 0x60, 0x56, 0x95, + 0xe2, 0x5a, 0x25, 0xcd, 0x02, 0x9d, 0xb4, 0x4d, 0xd5, 0xb2, 0xf9, 0x4f, 0x01, 0xca, 0x3a, 0x8b, + 0x7c, 0x01, 0x25, 0x5f, 0x65, 0x62, 0x46, 0xed, 0xe0, 0x71, 0x6b, 0xd9, 0x5d, 0xae, 0x2c, 0xd5, + 0x31, 0xc4, 0x81, 0xca, 0xdb, 0x88, 0x8b, 0xd0, 0x9f, 0x32, 0x2c, 0x57, 0xa5, 0x4b, 0x9b, 0x34, + 0xc0, 0x0c, 0x62, 0xbb, 0x80, 0xbb, 0x72, 0x45, 0x0e, 0xa1, 0x12, 0x47, 0x89, 0xf0, 0xa6, 0x7e, + 0x6c, 0x17, 0xf7, 0x0a, 0xb2, 0xf6, 0x67, 0x9b, 0xb5, 0x5b, 0x7d, 0x19, 0x70, 0xe6, 0xc7, 0xdd, + 0x50, 0x24, 0x0b, 0xba, 0x15, 0x6b, 0x4b, 0x9d, 0xf2, 0x8e, 0x2d, 0x78, 0xec, 0x8f, 0x98, 0x5d, + 0xd2, 0xa7, 0x64, 0x36, 0xc2, 0xf2, 0xd6, 0x4f, 0xc6, 0x76, 0x19, 0x1d, 0xda, 0x20, 0x5f, 0x41, + 0x55, 0x46, 0x78, 0x89, 0x42, 0xce, 0xde, 0xc2, 0x41, 0xc8, 0xea, 0xb0, 0x0c, 0x53, 0x2c, 0xa3, + 0xd1, 0x7d, 0x0a, 0x45, 0xb1, 0x88, 0x99, 0x5d, 0x91, 0xb1, 0x8d, 0x83, 0xdd, 0xcd, 0xc6, 0x5c, + 0xe9, 0xa3, 0x18, 0x21, 0x23, 0xad, 0xf1, 0xd0, 0x53, 0x13, 0x7a, 0xd1, 0x9c, 0x25, 0x49, 0x30, + 0x66, 0x76, 0x15, 0xcf, 0x6e, 0x8c, 0x87, 0x3d, 0xb9, 0x7d, 0x9e, 0xee, 0x92, 0x96, 0xac, 0xe9, + 0x5f, 0x71, 0x1b, 0x70, 0x58, 0xe7, 0xda, 0xb0, 0xae, 0x74, 0xea, 0x49, 0x31, 0xce, 0x79, 0x0e, + 0xf5, 0xfc, 0xfc, 0xea, 0x9a, 0x64, 0x7f, 0xe9, 0xcd, 0xa9, 0xa5, 0x1a, 0x76, 0xee, 0x4f, 0x66, + 0x1a, 0xeb, 0x12, 0xd5, 0xc6, 0x73, 0xf3, 0xd0, 0x70, 0xbe, 0x87, 0xea, 0xb2, 0xdc, 0x7f, 0x25, + 0x56, 0x73, 0x89, 0xaf, 0x8a, 0x95, 0x9a, 0x55, 0x6f, 0xbe, 0x2f, 0x43, 0x69, 0x80, 0xc8, 0x1d, + 0x42, 0x7d, 0xea, 0x73, 0xc1, 0x12, 0xef, 0x1e, 0x2c, 0xa8, 0xe9, 0x50, 0xcd, 0xb4, 0x35, 0xcc, + 0xcd, 0x7b, 0x60, 0xfe, 0x03, 0xd4, 0x39, 0x4b, 0xe6, 0x6c, 0xec, 0x29, 0x60, 0xb9, 0xa4, 0xca, + 0x06, 0x4e, 0xd8, 0x51, 0x6b, 0x80, 0x31, 0x78, 0x03, 0x35, 0xbe, 0x5c, 0x73, 0xf2, 0x02, 0xb6, + 0x79, 0x34, 0x4b, 0x46, 0xcc, 0xc3, 0x3b, 0xe7, 0x29, 0xa9, 0x3e, 0xbd, 0x96, 0x8f, 0x41, 0xb8, + 0xa6, 0x75, 0xbe, 0x32, 0xb8, 0x42, 0x45, 0xe9, 0x81, 0x4b, 0x52, 0x15, 0x14, 0x2a, 0x68, 0x90, + 0x97, 0xf0, 0x48, 0xe0, 0x8c, 0xde, 0x28, 0x92, 0x70, 0x46, 0xd2, 0x5f, 0xde, 0xa4, 0xab, 0xae, + 0xac, 0xa1, 0xe8, 0xe8, 0x28, 0xda, 0x10, 0x79, 0x93, 0x3b, 0x97, 0x00, 0xab, 0xd6, 0xc9, 0xb7, + 0x50, 0x4b, 0xab, 0x22, 0xcf, 0x8c, 0x3b, 0x78, 0x06, 0x62, 0xb9, 0x5e, 0xb5, 0x68, 0xe6, 0x5a, + 0x74, 0xfe, 0x30, 0xa0, 0x96, 0x1b, 0x2b, 0x13, 0xb4, 0xb1, 0x14, 0xf4, 0x9a, 0x64, 0xcc, 0xdb, + 0x24, 0x53, 0xb8, 0x55, 0x32, 0xc5, 0x7b, 0x5c, 0xdf, 0x27, 0x50, 0xc6, 0x46, 0x33, 0xf8, 0x52, + 0xcb, 0xf9, 0xcb, 0x80, 0xed, 0x35, 0x64, 0x1e, 0x74, 0x76, 0x72, 0x00, 0x8f, 0xc7, 0x01, 0x57, + 0x51, 0xde, 0x6f, 0x33, 0x96, 0x2c, 0x3c, 0xc5, 0x89, 0x40, 0x8e, 0xa9, 0xa6, 0xa9, 0xd0, 0x8f, + 0x53, 0xe7, 0x2f, 0xca, 0x37, 0xd0, 0x2e, 0xf2, 0x25, 0x90, 0xe1, 0xc4, 0x1f, 0xbd, 0x9b, 0x04, + 0x92, 0xae, 0x92, 0x6e, 0xba, 0xed, 0x22, 0x96, 0xdd, 0xc9, 0x79, 0xb0, 0x11, 0xde, 0xfc, 0xdb, + 0xc4, 0x77, 0x57, 0xa3, 0xf5, 0x35, 0xec, 0x22, 0x40, 0x41, 0x78, 0x25, 0x09, 0x31, 0x99, 0x4d, + 0x43, 0x14, 0x7f, 0xaa, 0x2e, 0x92, 0xf9, 0x3a, 0xe8, 0x52, 0xfa, 0x27, 0xaf, 0xae, 0x67, 0xe0, + 0xdc, 0x26, 0xce, 0x6d, 0xaf, 0x81, 0x8a, 0x67, 0x1c, 0x6b, 0x76, 0x6f, 0xd4, 0x42, 0x0c, 0xf6, + 0x61, 0x87, 0xc7, 0x93, 0x40, 0x68, 0x8e, 0xcb, 0x72, 0xb3, 0x50, 0xe0, 0xa4, 0x25, 0xfa, 0x08, + 0x1d, 0x48, 0x80, 0x8e, 0xda, 0x96, 0x82, 0xc8, 0xf4, 0xf4, 0x26, 0x89, 0xa6, 0xfc, 0xfa, 0x23, + 0x9b, 0x9d, 0x97, 0x4a, 0xea, 0xa5, 0x8c, 0xca, 0x24, 0xa5, 0xd6, 0xdc, 0x99, 0x65, 0x94, 0x55, + 0xe6, 0xc3, 0x5e, 0x5b, 0x9e, 0x90, 0x85, 0x75, 0x42, 0x36, 0xdf, 0x1b, 0x60, 0x69, 0x7d, 0x32, + 0x39, 0xd2, 0xc8, 0x17, 0x41, 0x14, 0xca, 0xd3, 0x4b, 0x61, 0x34, 0x66, 0xea, 0x05, 0x52, 0x63, + 0x3c, 0xd9, 0x10, 0x5f, 0x2e, 0xb4, 0xd5, 0x93, 0x71, 0x54, 0x47, 0x3b, 0x2f, 0xa0, 0xa8, 0x4c, + 0xf5, 0x8e, 0xa5, 0xcd, 0xdf, 0xe7, 0x1d, 0x13, 0x2b, 0xa3, 0x19, 0x43, 0x65, 0x90, 0xcc, 0xb5, + 0xb0, 0xe4, 0xd7, 0x33, 0x77, 0xd9, 0xb8, 0xfe, 0xf0, 0x77, 0xee, 0x09, 0xa4, 0xef, 0xa4, 0x87, + 0x5f, 0x62, 0x3d, 0x3d, 0xe8, 0xad, 0x8e, 0xdc, 0x69, 0x5e, 0x40, 0x23, 0x9d, 0xe9, 0x0d, 0x4b, + 0x58, 0x28, 0x49, 0xf7, 0x10, 0xe7, 0x36, 0xff, 0x2c, 0xca, 0x57, 0x22, 0x99, 0x2f, 0x99, 0xfc, + 0x13, 0x40, 0x2c, 0xff, 0x33, 0x04, 0x0a, 0xb3, 0x0c, 0xd6, 0xcf, 0x73, 0xb0, 0xae, 0x42, 0x97, + 0x4c, 0xe9, 0x67, 0xf1, 0x34, 0x97, 0x7a, 0xab, 0x24, 0xcc, 0x0f, 0x96, 0x44, 0xe1, 0x7f, 0x48, + 0xa2, 0x0d, 0xb5, 0x1c, 0xcd, 0x53, 0x96, 0xef, 0xdd, 0x3c, 0x47, 0x8e, 0xe8, 0xb0, 0x22, 0xfa, + 0xcd, 0xaa, 0x2a, 0xdd, 0xa8, 0x2a, 0xe7, 0x77, 0x03, 0x76, 0xae, 0xc1, 0xa1, 0xb4, 0x91, 0xfb, + 0x76, 0xdd, 0xad, 0x8d, 0xd5, 0x47, 0x8b, 0x74, 0xc0, 0xd2, 0x47, 0x26, 0xd9, 0x55, 0x6b, 0x99, + 0xd4, 0xf2, 0x18, 0xac, 0x73, 0x41, 0x76, 0xb4, 0x66, 0x73, 0xc7, 0x7b, 0x08, 0x95, 0xde, 0xf1, + 0x81, 0xd8, 0x3f, 0x80, 0xc6, 0xfa, 0x3d, 0x90, 0x2a, 0x94, 0x2e, 0x7a, 0x83, 0xae, 0x6b, 0x7d, + 0x44, 0x00, 0xca, 0x17, 0xc7, 0x3d, 0xf7, 0xbb, 0x6f, 0x2c, 0x43, 0x6d, 0x1f, 0x5d, 0xba, 0xdd, + 0x81, 0x65, 0xee, 0x4b, 0x98, 0x60, 0x75, 0x14, 0xa9, 0xc1, 0xd6, 0x45, 0xef, 0xa4, 0x77, 0xfe, + 0xba, 0xa7, 0x53, 0xce, 0xda, 0x03, 0xb7, 0x4b, 0x65, 0x8a, 0x74, 0xd0, 0x6e, 0xff, 0xf4, 0xb8, + 0xd3, 0xb6, 0x4c, 0xe5, 0xa0, 0x3f, 0x9e, 0xf7, 0x4e, 0x2f, 0xad, 0x02, 0xd6, 0x6a, 0xbb, 0x9d, + 0x9f, 0xf5, 0x72, 0xd0, 0x6f, 0xd3, 0xae, 0x55, 0x94, 0x5f, 0xb6, 0x7a, 0xf7, 0xd7, 0x7e, 0x97, + 0x1e, 0x9f, 0x75, 0x7b, 0x6e, 0xfb, 0xd4, 0x2a, 0xa9, 0x9c, 0xa3, 0x76, 0xe7, 0xe4, 0xa2, 0x6f, + 0x95, 0x75, 0xb1, 0x81, 0x7b, 0x2e, 0x43, 0xb7, 0x94, 0xe3, 0xf5, 0x39, 0x3d, 0x91, 0xa7, 0x54, + 0x1c, 0xd3, 0x32, 0x8e, 0x1c, 0xb0, 0x47, 0xd1, 0xb4, 0xb5, 0x88, 0x66, 0x62, 0x36, 0x64, 0xad, + 0x79, 0x20, 0x18, 0xe7, 0xfa, 0x2f, 0xf6, 0xb0, 0x8c, 0x3f, 0xcf, 0xfe, 0x0d, 0x00, 0x00, 0xff, + 0xff, 0x9d, 0x05, 0xe2, 0x27, 0x7b, 0x0b, 0x00, 0x00, } diff --git a/go/vt/topo/naming.go b/go/vt/topo/naming.go deleted file mode 100644 index 38567bda4b..0000000000 --- a/go/vt/topo/naming.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2012, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package topo - -/* -Handle logical name resolution - sort of like DNS but tailored to -vt and using the topology server. - -Naming is disconnected from the backend discovery and is used for -front end clients. - -The common query is "resolve keyspace.shard.db_type" and return a list -of host:port tuples that export our default server (vttablet). You can -get all shards with "keyspace.*.db_type". - -In zk, this is in /zk/local/vt/ns/// -*/ - -import topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" - -const ( - // DefaultPortName is the port named used by SrvEntries - // if "" is given as the named port. - DefaultPortName = "vt" -) - -// NewEndPoint returns a new empty EndPoint -func NewEndPoint(uid uint32, host string) *topodatapb.EndPoint { - return &topodatapb.EndPoint{ - Uid: uid, - Host: host, - PortMap: make(map[string]int32), - } -} - -// EndPointEquality returns true iff two EndPoint are representing the same data -func EndPointEquality(left, right *topodatapb.EndPoint) bool { - if left.Uid != right.Uid { - return false - } - if left.Host != right.Host { - return false - } - if len(left.PortMap) != len(right.PortMap) { - return false - } - for key, lvalue := range left.PortMap { - rvalue, ok := right.PortMap[key] - if !ok { - return false - } - if lvalue != rvalue { - return false - } - } - return true -} diff --git a/php/src/Vitess/Proto/Topodata/EndPoint.php b/php/src/Vitess/Proto/Topodata/EndPoint.php deleted file mode 100644 index 6b4e20cbed..0000000000 --- a/php/src/Vitess/Proto/Topodata/EndPoint.php +++ /dev/null @@ -1,190 +0,0 @@ -number = 1; - $f->name = "uid"; - $f->type = \DrSlump\Protobuf::TYPE_UINT32; - $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; - $descriptor->addField($f); - - // OPTIONAL STRING host = 2 - $f = new \DrSlump\Protobuf\Field(); - $f->number = 2; - $f->name = "host"; - $f->type = \DrSlump\Protobuf::TYPE_STRING; - $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; - $descriptor->addField($f); - - // REPEATED MESSAGE port_map = 3 - $f = new \DrSlump\Protobuf\Field(); - $f->number = 3; - $f->name = "port_map"; - $f->type = \DrSlump\Protobuf::TYPE_MESSAGE; - $f->rule = \DrSlump\Protobuf::RULE_REPEATED; - $f->reference = '\Vitess\Proto\Topodata\EndPoint\PortMapEntry'; - $descriptor->addField($f); - - foreach (self::$__extensions as $cb) { - $descriptor->addField($cb(), true); - } - - return $descriptor; - } - - /** - * Check if has a value - * - * @return boolean - */ - public function hasUid(){ - return $this->_has(1); - } - - /** - * Clear value - * - * @return \Vitess\Proto\Topodata\EndPoint - */ - public function clearUid(){ - return $this->_clear(1); - } - - /** - * Get value - * - * @return int - */ - public function getUid(){ - return $this->_get(1); - } - - /** - * Set value - * - * @param int $value - * @return \Vitess\Proto\Topodata\EndPoint - */ - public function setUid( $value){ - return $this->_set(1, $value); - } - - /** - * Check if has a value - * - * @return boolean - */ - public function hasHost(){ - return $this->_has(2); - } - - /** - * Clear value - * - * @return \Vitess\Proto\Topodata\EndPoint - */ - public function clearHost(){ - return $this->_clear(2); - } - - /** - * Get value - * - * @return string - */ - public function getHost(){ - return $this->_get(2); - } - - /** - * Set value - * - * @param string $value - * @return \Vitess\Proto\Topodata\EndPoint - */ - public function setHost( $value){ - return $this->_set(2, $value); - } - - /** - * Check if has a value - * - * @return boolean - */ - public function hasPortMap(){ - return $this->_has(3); - } - - /** - * Clear value - * - * @return \Vitess\Proto\Topodata\EndPoint - */ - public function clearPortMap(){ - return $this->_clear(3); - } - - /** - * Get value - * - * @param int $idx - * @return \Vitess\Proto\Topodata\EndPoint\PortMapEntry - */ - public function getPortMap($idx = NULL){ - return $this->_get(3, $idx); - } - - /** - * Set value - * - * @param \Vitess\Proto\Topodata\EndPoint\PortMapEntry $value - * @return \Vitess\Proto\Topodata\EndPoint - */ - public function setPortMap(\Vitess\Proto\Topodata\EndPoint\PortMapEntry $value, $idx = NULL){ - return $this->_set(3, $value, $idx); - } - - /** - * Get all elements of - * - * @return \Vitess\Proto\Topodata\EndPoint\PortMapEntry[] - */ - public function getPortMapList(){ - return $this->_get(3); - } - - /** - * Add a new element to - * - * @param \Vitess\Proto\Topodata\EndPoint\PortMapEntry $value - * @return \Vitess\Proto\Topodata\EndPoint - */ - public function addPortMap(\Vitess\Proto\Topodata\EndPoint\PortMapEntry $value){ - return $this->_add(3, $value); - } - } -} - diff --git a/php/src/Vitess/Proto/Topodata/EndPoint/PortMapEntry.php b/php/src/Vitess/Proto/Topodata/EndPoint/PortMapEntry.php deleted file mode 100644 index d23aabfc38..0000000000 --- a/php/src/Vitess/Proto/Topodata/EndPoint/PortMapEntry.php +++ /dev/null @@ -1,121 +0,0 @@ -number = 1; - $f->name = "key"; - $f->type = \DrSlump\Protobuf::TYPE_STRING; - $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; - $descriptor->addField($f); - - // OPTIONAL INT32 value = 2 - $f = new \DrSlump\Protobuf\Field(); - $f->number = 2; - $f->name = "value"; - $f->type = \DrSlump\Protobuf::TYPE_INT32; - $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; - $descriptor->addField($f); - - foreach (self::$__extensions as $cb) { - $descriptor->addField($cb(), true); - } - - return $descriptor; - } - - /** - * Check if has a value - * - * @return boolean - */ - public function hasKey(){ - return $this->_has(1); - } - - /** - * Clear value - * - * @return \Vitess\Proto\Topodata\EndPoint\PortMapEntry - */ - public function clearKey(){ - return $this->_clear(1); - } - - /** - * Get value - * - * @return string - */ - public function getKey(){ - return $this->_get(1); - } - - /** - * Set value - * - * @param string $value - * @return \Vitess\Proto\Topodata\EndPoint\PortMapEntry - */ - public function setKey( $value){ - return $this->_set(1, $value); - } - - /** - * Check if has a value - * - * @return boolean - */ - public function hasValue(){ - return $this->_has(2); - } - - /** - * Clear value - * - * @return \Vitess\Proto\Topodata\EndPoint\PortMapEntry - */ - public function clearValue(){ - return $this->_clear(2); - } - - /** - * Get value - * - * @return int - */ - public function getValue(){ - return $this->_get(2); - } - - /** - * Set value - * - * @param int $value - * @return \Vitess\Proto\Topodata\EndPoint\PortMapEntry - */ - public function setValue( $value){ - return $this->_set(2, $value); - } - } -} - diff --git a/proto/topodata.proto b/proto/topodata.proto index 38d5b3c09f..a1b4d1b84f 100644 --- a/proto/topodata.proto +++ b/proto/topodata.proto @@ -238,22 +238,6 @@ message ShardReplication { // Serving graph information -// EndPoint corresponds to a single tablet. -message EndPoint { - // The uid of the tablet. - uint32 uid = 1; - - // The host the tablet is running on (FQDN). - string host = 2; - - // The ports opened for service. - map port_map = 3; - - // OBSOLETE: The health entries. - // map health_map = 4; - reserved 4; -} - // SrvShard is a rollup node for the shard itself. message SrvShard { // Copied from Shard. diff --git a/py/vtproto/topodata_pb2.py b/py/vtproto/topodata_pb2.py index 746ad92aeb..3ae4df7f8c 100644 --- a/py/vtproto/topodata_pb2.py +++ b/py/vtproto/topodata_pb2.py @@ -20,7 +20,7 @@ DESCRIPTOR = _descriptor.FileDescriptor( name='topodata.proto', package='topodata', syntax='proto3', - serialized_pb=_b('\n\x0etopodata.proto\x12\x08topodata\"&\n\x08KeyRange\x12\r\n\x05start\x18\x01 \x01(\x0c\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x0c\"(\n\x0bTabletAlias\x12\x0c\n\x04\x63\x65ll\x18\x01 \x01(\t\x12\x0b\n\x03uid\x18\x02 \x01(\r\"\x90\x03\n\x06Tablet\x12$\n\x05\x61lias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12\x10\n\x08hostname\x18\x02 \x01(\t\x12\n\n\x02ip\x18\x03 \x01(\t\x12/\n\x08port_map\x18\x04 \x03(\x0b\x32\x1d.topodata.Tablet.PortMapEntry\x12\x10\n\x08keyspace\x18\x05 \x01(\t\x12\r\n\x05shard\x18\x06 \x01(\t\x12%\n\tkey_range\x18\x07 \x01(\x0b\x32\x12.topodata.KeyRange\x12\"\n\x04type\x18\x08 \x01(\x0e\x32\x14.topodata.TabletType\x12\x18\n\x10\x64\x62_name_override\x18\t \x01(\t\x12(\n\x04tags\x18\n \x03(\x0b\x32\x1a.topodata.Tablet.TagsEntry\x1a.\n\x0cPortMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01J\x04\x08\x0b\x10\x0c\"\xcb\x04\n\x05Shard\x12+\n\x0cmaster_alias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x30\n\x0cserved_types\x18\x03 \x03(\x0b\x32\x1a.topodata.Shard.ServedType\x12\x32\n\rsource_shards\x18\x04 \x03(\x0b\x32\x1b.topodata.Shard.SourceShard\x12\r\n\x05\x63\x65lls\x18\x05 \x03(\t\x12\x36\n\x0ftablet_controls\x18\x06 \x03(\x0b\x32\x1d.topodata.Shard.TabletControl\x1a\x46\n\nServedType\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x1ar\n\x0bSourceShard\x12\x0b\n\x03uid\x18\x01 \x01(\r\x12\x10\n\x08keyspace\x18\x02 \x01(\t\x12\r\n\x05shard\x18\x03 \x01(\t\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\t\x1a\x84\x01\n\rTabletControl\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x12\x1d\n\x15\x64isable_query_service\x18\x03 \x01(\x08\x12\x1a\n\x12\x62lacklisted_tables\x18\x04 \x03(\t\"\x8a\x02\n\x08Keyspace\x12\x1c\n\x14sharding_column_name\x18\x01 \x01(\t\x12\x36\n\x14sharding_column_type\x18\x02 \x01(\x0e\x32\x18.topodata.KeyspaceIdType\x12\x19\n\x11split_shard_count\x18\x03 \x01(\x05\x12\x33\n\x0cserved_froms\x18\x04 \x03(\x0b\x32\x1d.topodata.Keyspace.ServedFrom\x1aX\n\nServedFrom\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x12\x10\n\x08keyspace\x18\x03 \x01(\t\"w\n\x10ShardReplication\x12.\n\x05nodes\x18\x01 \x03(\x0b\x32\x1f.topodata.ShardReplication.Node\x1a\x33\n\x04Node\x12+\n\x0ctablet_alias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\"\x8e\x01\n\x08\x45ndPoint\x12\x0b\n\x03uid\x18\x01 \x01(\r\x12\x0c\n\x04host\x18\x02 \x01(\t\x12\x31\n\x08port_map\x18\x03 \x03(\x0b\x32\x1f.topodata.EndPoint.PortMapEntry\x1a.\n\x0cPortMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01J\x04\x08\x04\x10\x05\"T\n\x08SrvShard\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x13\n\x0bmaster_cell\x18\x03 \x01(\t\"E\n\x0eShardReference\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\"\xb1\x03\n\x0bSrvKeyspace\x12;\n\npartitions\x18\x01 \x03(\x0b\x32\'.topodata.SrvKeyspace.KeyspacePartition\x12\x1c\n\x14sharding_column_name\x18\x02 \x01(\t\x12\x36\n\x14sharding_column_type\x18\x03 \x01(\x0e\x32\x18.topodata.KeyspaceIdType\x12\x35\n\x0bserved_from\x18\x04 \x03(\x0b\x32 .topodata.SrvKeyspace.ServedFrom\x12\x19\n\x11split_shard_count\x18\x05 \x01(\x05\x1ar\n\x11KeyspacePartition\x12)\n\x0bserved_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\x32\n\x10shard_references\x18\x02 \x03(\x0b\x32\x18.topodata.ShardReference\x1aI\n\nServedFrom\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\x10\n\x08keyspace\x18\x02 \x01(\t*2\n\x0eKeyspaceIdType\x12\t\n\x05UNSET\x10\x00\x12\n\n\x06UINT64\x10\x01\x12\t\n\x05\x42YTES\x10\x02*\x8f\x01\n\nTabletType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06MASTER\x10\x01\x12\x0b\n\x07REPLICA\x10\x02\x12\n\n\x06RDONLY\x10\x03\x12\t\n\x05\x42\x41TCH\x10\x03\x12\t\n\x05SPARE\x10\x04\x12\x10\n\x0c\x45XPERIMENTAL\x10\x05\x12\n\n\x06\x42\x41\x43KUP\x10\x06\x12\x0b\n\x07RESTORE\x10\x07\x12\n\n\x06WORKER\x10\x08\x1a\x02\x10\x01\x42\x1a\n\x18\x63om.youtube.vitess.protob\x06proto3') + serialized_pb=_b('\n\x0etopodata.proto\x12\x08topodata\"&\n\x08KeyRange\x12\r\n\x05start\x18\x01 \x01(\x0c\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x0c\"(\n\x0bTabletAlias\x12\x0c\n\x04\x63\x65ll\x18\x01 \x01(\t\x12\x0b\n\x03uid\x18\x02 \x01(\r\"\x90\x03\n\x06Tablet\x12$\n\x05\x61lias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12\x10\n\x08hostname\x18\x02 \x01(\t\x12\n\n\x02ip\x18\x03 \x01(\t\x12/\n\x08port_map\x18\x04 \x03(\x0b\x32\x1d.topodata.Tablet.PortMapEntry\x12\x10\n\x08keyspace\x18\x05 \x01(\t\x12\r\n\x05shard\x18\x06 \x01(\t\x12%\n\tkey_range\x18\x07 \x01(\x0b\x32\x12.topodata.KeyRange\x12\"\n\x04type\x18\x08 \x01(\x0e\x32\x14.topodata.TabletType\x12\x18\n\x10\x64\x62_name_override\x18\t \x01(\t\x12(\n\x04tags\x18\n \x03(\x0b\x32\x1a.topodata.Tablet.TagsEntry\x1a.\n\x0cPortMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01J\x04\x08\x0b\x10\x0c\"\xcb\x04\n\x05Shard\x12+\n\x0cmaster_alias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x30\n\x0cserved_types\x18\x03 \x03(\x0b\x32\x1a.topodata.Shard.ServedType\x12\x32\n\rsource_shards\x18\x04 \x03(\x0b\x32\x1b.topodata.Shard.SourceShard\x12\r\n\x05\x63\x65lls\x18\x05 \x03(\t\x12\x36\n\x0ftablet_controls\x18\x06 \x03(\x0b\x32\x1d.topodata.Shard.TabletControl\x1a\x46\n\nServedType\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x1ar\n\x0bSourceShard\x12\x0b\n\x03uid\x18\x01 \x01(\r\x12\x10\n\x08keyspace\x18\x02 \x01(\t\x12\r\n\x05shard\x18\x03 \x01(\t\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\t\x1a\x84\x01\n\rTabletControl\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x12\x1d\n\x15\x64isable_query_service\x18\x03 \x01(\x08\x12\x1a\n\x12\x62lacklisted_tables\x18\x04 \x03(\t\"\x8a\x02\n\x08Keyspace\x12\x1c\n\x14sharding_column_name\x18\x01 \x01(\t\x12\x36\n\x14sharding_column_type\x18\x02 \x01(\x0e\x32\x18.topodata.KeyspaceIdType\x12\x19\n\x11split_shard_count\x18\x03 \x01(\x05\x12\x33\n\x0cserved_froms\x18\x04 \x03(\x0b\x32\x1d.topodata.Keyspace.ServedFrom\x1aX\n\nServedFrom\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\t\x12\x10\n\x08keyspace\x18\x03 \x01(\t\"w\n\x10ShardReplication\x12.\n\x05nodes\x18\x01 \x03(\x0b\x32\x1f.topodata.ShardReplication.Node\x1a\x33\n\x04Node\x12+\n\x0ctablet_alias\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\"T\n\x08SrvShard\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x13\n\x0bmaster_cell\x18\x03 \x01(\t\"E\n\x0eShardReference\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\"\xb1\x03\n\x0bSrvKeyspace\x12;\n\npartitions\x18\x01 \x03(\x0b\x32\'.topodata.SrvKeyspace.KeyspacePartition\x12\x1c\n\x14sharding_column_name\x18\x02 \x01(\t\x12\x36\n\x14sharding_column_type\x18\x03 \x01(\x0e\x32\x18.topodata.KeyspaceIdType\x12\x35\n\x0bserved_from\x18\x04 \x03(\x0b\x32 .topodata.SrvKeyspace.ServedFrom\x12\x19\n\x11split_shard_count\x18\x05 \x01(\x05\x1ar\n\x11KeyspacePartition\x12)\n\x0bserved_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\x32\n\x10shard_references\x18\x02 \x03(\x0b\x32\x18.topodata.ShardReference\x1aI\n\nServedFrom\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\x12\x10\n\x08keyspace\x18\x02 \x01(\t*2\n\x0eKeyspaceIdType\x12\t\n\x05UNSET\x10\x00\x12\n\n\x06UINT64\x10\x01\x12\t\n\x05\x42YTES\x10\x02*\x8f\x01\n\nTabletType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06MASTER\x10\x01\x12\x0b\n\x07REPLICA\x10\x02\x12\n\n\x06RDONLY\x10\x03\x12\t\n\x05\x42\x41TCH\x10\x03\x12\t\n\x05SPARE\x10\x04\x12\x10\n\x0c\x45XPERIMENTAL\x10\x05\x12\n\n\x06\x42\x41\x43KUP\x10\x06\x12\x0b\n\x07RESTORE\x10\x07\x12\n\n\x06WORKER\x10\x08\x1a\x02\x10\x01\x42\x1a\n\x18\x63om.youtube.vitess.protob\x06proto3') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) @@ -45,8 +45,8 @@ _KEYSPACEIDTYPE = _descriptor.EnumDescriptor( ], containing_type=None, options=None, - serialized_start=2231, - serialized_end=2281, + serialized_start=2086, + serialized_end=2136, ) _sym_db.RegisterEnumDescriptor(_KEYSPACEIDTYPE) @@ -100,8 +100,8 @@ _TABLETTYPE = _descriptor.EnumDescriptor( ], containing_type=None, options=_descriptor._ParseOptions(descriptor_pb2.EnumOptions(), _b('\020\001')), - serialized_start=2284, - serialized_end=2427, + serialized_start=2139, + serialized_end=2282, ) _sym_db.RegisterEnumDescriptor(_TABLETTYPE) @@ -735,88 +735,6 @@ _SHARDREPLICATION = _descriptor.Descriptor( ) -_ENDPOINT_PORTMAPENTRY = _descriptor.Descriptor( - name='PortMapEntry', - full_name='topodata.EndPoint.PortMapEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='topodata.EndPoint.PortMapEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='topodata.EndPoint.PortMapEntry.value', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=414, - serialized_end=460, -) - -_ENDPOINT = _descriptor.Descriptor( - name='EndPoint', - full_name='topodata.EndPoint', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='uid', full_name='topodata.EndPoint.uid', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='host', full_name='topodata.EndPoint.host', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='port_map', full_name='topodata.EndPoint.port_map', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_ENDPOINT_PORTMAPENTRY, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1494, - serialized_end=1636, -) - - _SRVSHARD = _descriptor.Descriptor( name='SrvShard', full_name='topodata.SrvShard', @@ -857,8 +775,8 @@ _SRVSHARD = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=1638, - serialized_end=1722, + serialized_start=1493, + serialized_end=1577, ) @@ -895,8 +813,8 @@ _SHARDREFERENCE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=1724, - serialized_end=1793, + serialized_start=1579, + serialized_end=1648, ) @@ -933,8 +851,8 @@ _SRVKEYSPACE_KEYSPACEPARTITION = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=2040, - serialized_end=2154, + serialized_start=1895, + serialized_end=2009, ) _SRVKEYSPACE_SERVEDFROM = _descriptor.Descriptor( @@ -970,8 +888,8 @@ _SRVKEYSPACE_SERVEDFROM = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=2156, - serialized_end=2229, + serialized_start=2011, + serialized_end=2084, ) _SRVKEYSPACE = _descriptor.Descriptor( @@ -1028,8 +946,8 @@ _SRVKEYSPACE = _descriptor.Descriptor( extension_ranges=[], oneofs=[ ], - serialized_start=1796, - serialized_end=2229, + serialized_start=1651, + serialized_end=2084, ) _TABLET_PORTMAPENTRY.containing_type = _TABLET @@ -1057,8 +975,6 @@ _KEYSPACE.fields_by_name['served_froms'].message_type = _KEYSPACE_SERVEDFROM _SHARDREPLICATION_NODE.fields_by_name['tablet_alias'].message_type = _TABLETALIAS _SHARDREPLICATION_NODE.containing_type = _SHARDREPLICATION _SHARDREPLICATION.fields_by_name['nodes'].message_type = _SHARDREPLICATION_NODE -_ENDPOINT_PORTMAPENTRY.containing_type = _ENDPOINT -_ENDPOINT.fields_by_name['port_map'].message_type = _ENDPOINT_PORTMAPENTRY _SRVSHARD.fields_by_name['key_range'].message_type = _KEYRANGE _SHARDREFERENCE.fields_by_name['key_range'].message_type = _KEYRANGE _SRVKEYSPACE_KEYSPACEPARTITION.fields_by_name['served_type'].enum_type = _TABLETTYPE @@ -1075,7 +991,6 @@ DESCRIPTOR.message_types_by_name['Tablet'] = _TABLET DESCRIPTOR.message_types_by_name['Shard'] = _SHARD DESCRIPTOR.message_types_by_name['Keyspace'] = _KEYSPACE DESCRIPTOR.message_types_by_name['ShardReplication'] = _SHARDREPLICATION -DESCRIPTOR.message_types_by_name['EndPoint'] = _ENDPOINT DESCRIPTOR.message_types_by_name['SrvShard'] = _SRVSHARD DESCRIPTOR.message_types_by_name['ShardReference'] = _SHARDREFERENCE DESCRIPTOR.message_types_by_name['SrvKeyspace'] = _SRVKEYSPACE @@ -1180,21 +1095,6 @@ ShardReplication = _reflection.GeneratedProtocolMessageType('ShardReplication', _sym_db.RegisterMessage(ShardReplication) _sym_db.RegisterMessage(ShardReplication.Node) -EndPoint = _reflection.GeneratedProtocolMessageType('EndPoint', (_message.Message,), dict( - - PortMapEntry = _reflection.GeneratedProtocolMessageType('PortMapEntry', (_message.Message,), dict( - DESCRIPTOR = _ENDPOINT_PORTMAPENTRY, - __module__ = 'topodata_pb2' - # @@protoc_insertion_point(class_scope:topodata.EndPoint.PortMapEntry) - )) - , - DESCRIPTOR = _ENDPOINT, - __module__ = 'topodata_pb2' - # @@protoc_insertion_point(class_scope:topodata.EndPoint) - )) -_sym_db.RegisterMessage(EndPoint) -_sym_db.RegisterMessage(EndPoint.PortMapEntry) - SrvShard = _reflection.GeneratedProtocolMessageType('SrvShard', (_message.Message,), dict( DESCRIPTOR = _SRVSHARD, __module__ = 'topodata_pb2' @@ -1241,8 +1141,6 @@ _TABLET_PORTMAPENTRY.has_options = True _TABLET_PORTMAPENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _TABLET_TAGSENTRY.has_options = True _TABLET_TAGSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) -_ENDPOINT_PORTMAPENTRY.has_options = True -_ENDPOINT_PORTMAPENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) import abc from grpc.beta import implementations as beta_implementations from grpc.framework.common import cardinality From e36f9db5553d76374902c6608b7128793e8979ba Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Mon, 16 May 2016 14:06:06 -0700 Subject: [PATCH 12/27] Fixing review comments. removing a somewhat useless method, using proper variable names. --- go/cmd/vtgate/status.go | 12 +-- go/vt/discovery/healthcheck.go | 94 +++++++++++------------- go/vt/discovery/healthcheck_test.go | 40 +++++----- go/vt/discovery/healthcheck_wait.go | 8 +- go/vt/discovery/healthcheck_wait_test.go | 8 +- go/vt/vtgate/discoverygateway.go | 6 +- go/vt/vtgate/discoverygateway_test.go | 12 +-- go/vt/vtgate/fakehealthcheck_test.go | 10 +-- go/vt/vtgate/gateway.go | 14 ++-- go/vt/worker/executor.go | 4 +- go/vt/worker/split_clone.go | 6 +- go/vt/worker/vertical_split_clone.go | 6 +- go/vt/wrangler/keyspace.go | 20 ++--- 13 files changed, 117 insertions(+), 123 deletions(-) diff --git a/go/cmd/vtgate/status.go b/go/cmd/vtgate/status.go index 2b0601e19c..c84cfd0461 100644 --- a/go/cmd/vtgate/status.go +++ b/go/cmd/vtgate/status.go @@ -235,13 +235,13 @@ google.setOnLoadCallback(function() { TabletType EndPointsStats - {{range $i, $eps := .}} + {{range $i, $ts := .}} - {{github_com_youtube_vitess_vtctld_srv_cell $eps.Cell}} - {{github_com_youtube_vitess_vtctld_srv_keyspace $eps.Cell $eps.Target.Keyspace}} - {{github_com_youtube_vitess_vtctld_srv_shard $eps.Cell $eps.Target.Keyspace $eps.Target.Shard}} - {{github_com_youtube_vitess_vtctld_srv_type $eps.Cell $eps.Target.Keyspace $eps.Target.Shard $eps.Target.TabletType}} - {{$eps.StatusAsHTML}} + {{github_com_youtube_vitess_vtctld_srv_cell $ts.Cell}} + {{github_com_youtube_vitess_vtctld_srv_keyspace $ts.Cell $ts.Target.Keyspace}} + {{github_com_youtube_vitess_vtctld_srv_shard $ts.Cell $ts.Target.Keyspace $ts.Target.Shard}} + {{github_com_youtube_vitess_vtctld_srv_type $ts.Cell $ts.Target.Keyspace $ts.Target.Shard $ts.Target.TabletType}} + {{$ts.StatusAsHTML}} {{end}} diff --git a/go/vt/discovery/healthcheck.go b/go/vt/discovery/healthcheck.go index 7351298f40..24828196a0 100644 --- a/go/vt/discovery/healthcheck.go +++ b/go/vt/discovery/healthcheck.go @@ -75,12 +75,6 @@ type TabletStats struct { LastError error } -// Alias returns the alias of the tablet. -// The return value can be used e.g. to generate the input for the topo API. -func (e *TabletStats) Alias() *topodatapb.TabletAlias { - return e.Tablet.Alias -} - // String is defined because we want to print a []*TabletStats array nicely. func (e *TabletStats) String() string { return fmt.Sprint(*e) @@ -325,7 +319,7 @@ func (hcc *healthCheckConn) processResponse(hc *HealthCheckImpl, tablet *topodat hc.mu.Unlock() } else if hcc.target.TabletType != shr.Target.TabletType { // tablet type changed for the tablet - log.Infof("HealthCheckUpdate(Type Change): %v, EP: %v/%+v, target %+v => %+v, reparent time: %v", hcc.name, hcc.cell, tablet, hcc.target, shr.Target, shr.TabletExternallyReparentedTimestamp) + log.Infof("HealthCheckUpdate(Type Change): %v, tablet: %v/%+v, target %+v => %+v, reparent time: %v", hcc.name, hcc.cell, tablet, hcc.target, shr.Target, shr.TabletExternallyReparentedTimestamp) hc.mu.Lock() hc.deleteTabletFromTargetProtected(hcc.target, tablet) hcc.update(shr, serving, healthErr, true) @@ -492,16 +486,16 @@ func (hc *HealthCheckImpl) GetTabletStatsFromKeyspaceShard(keyspace, shard strin return nil } res := make([]*TabletStats, 0, 1) - for _, epList := range ttMap { - for _, ep := range epList { - key := TabletToMapKey(ep) + for _, tList := range ttMap { + for _, t := range tList { + key := TabletToMapKey(t) hcc, ok := hc.addrToConns[key] if !ok { continue } hcc.mu.RLock() ts := &TabletStats{ - Tablet: ep, + Tablet: t, Name: hcc.name, Target: hcc.target, Up: hcc.up, @@ -530,20 +524,20 @@ func (hc *HealthCheckImpl) GetTabletStatsFromTarget(keyspace, shard string, tabl if !ok { return nil } - epList, ok := ttMap[tabletType] + tList, ok := ttMap[tabletType] if !ok { return nil } res := make([]*TabletStats, 0, 1) - for _, ep := range epList { - key := TabletToMapKey(ep) + for _, t := range tList { + key := TabletToMapKey(t) hcc, ok := hc.addrToConns[key] if !ok { continue } hcc.mu.RLock() ts := &TabletStats{ - Tablet: ep, + Tablet: t, Name: hcc.name, Target: hcc.target, Up: hcc.up, @@ -584,17 +578,17 @@ func (hc *HealthCheckImpl) addTabletToTargetProtected(target *querypb.Target, ta ttMap = make(map[topodatapb.TabletType][]*topodatapb.Tablet) shardMap[target.Shard] = ttMap } - epList, ok := ttMap[target.TabletType] + tList, ok := ttMap[target.TabletType] if !ok { - epList = make([]*topodatapb.Tablet, 0, 1) + tList = make([]*topodatapb.Tablet, 0, 1) } - for _, ep := range epList { - if topo.TabletEquality(ep, tablet) { + for _, t := range tList { + if topo.TabletEquality(t, tablet) { log.Warningf("tablet is already added: %+v", tablet) return } } - ttMap[target.TabletType] = append(epList, tablet) + ttMap[target.TabletType] = append(tList, tablet) } // deleteTabletFromTargetProtected deletes the tablet for the given target. @@ -608,14 +602,14 @@ func (hc *HealthCheckImpl) deleteTabletFromTargetProtected(target *querypb.Targe if !ok { return } - epList, ok := ttMap[target.TabletType] + tList, ok := ttMap[target.TabletType] if !ok { return } - for i, ep := range epList { - if topo.TabletEquality(ep, tablet) { - epList = append(epList[:i], epList[i+1:]...) - ttMap[target.TabletType] = epList + for i, t := range tList { + if topo.TabletEquality(t, tablet) { + tList = append(tList[:i], tList[i+1:]...) + ttMap[target.TabletType] = tList return } } @@ -656,12 +650,12 @@ func (tsl TabletStatsList) Swap(i, j int) { } // StatusAsHTML returns an HTML version of the status. -func (epcs *TabletsCacheStatus) StatusAsHTML() template.HTML { - epLinks := make([]string, 0, 1) - if epcs.TabletsStats != nil { - sort.Sort(epcs.TabletsStats) +func (tcs *TabletsCacheStatus) StatusAsHTML() template.HTML { + tLinks := make([]string, 0, 1) + if tcs.TabletsStats != nil { + sort.Sort(tcs.TabletsStats) } - for _, ts := range epcs.TabletsStats { + for _, ts := range tcs.TabletsStats { vtPort := ts.Tablet.PortMap["vt"] color := "green" extra := "" @@ -684,45 +678,45 @@ func (epcs *TabletsCacheStatus) StatusAsHTML() template.HTML { if name == "" { name = addr } - epLinks = append(epLinks, fmt.Sprintf(`%v%v`, addr, color, name, extra)) + tLinks = append(tLinks, fmt.Sprintf(`%v%v`, addr, color, name, extra)) } - return template.HTML(strings.Join(epLinks, "
")) + return template.HTML(strings.Join(tLinks, "
")) } // TabletsCacheStatusList is used for sorting. type TabletsCacheStatusList []*TabletsCacheStatus // Len is part of sort.Interface. -func (epcsl TabletsCacheStatusList) Len() int { - return len(epcsl) +func (tcsl TabletsCacheStatusList) Len() int { + return len(tcsl) } // Less is part of sort.Interface -func (epcsl TabletsCacheStatusList) Less(i, j int) bool { - return epcsl[i].Cell+"."+epcsl[i].Target.Keyspace+"."+epcsl[i].Target.Shard+"."+string(epcsl[i].Target.TabletType) < - epcsl[j].Cell+"."+epcsl[j].Target.Keyspace+"."+epcsl[j].Target.Shard+"."+string(epcsl[j].Target.TabletType) +func (tcsl TabletsCacheStatusList) Less(i, j int) bool { + return tcsl[i].Cell+"."+tcsl[i].Target.Keyspace+"."+tcsl[i].Target.Shard+"."+string(tcsl[i].Target.TabletType) < + tcsl[j].Cell+"."+tcsl[j].Target.Keyspace+"."+tcsl[j].Target.Shard+"."+string(tcsl[j].Target.TabletType) } // Swap is part of sort.Interface -func (epcsl TabletsCacheStatusList) Swap(i, j int) { - epcsl[i], epcsl[j] = epcsl[j], epcsl[i] +func (tcsl TabletsCacheStatusList) Swap(i, j int) { + tcsl[i], tcsl[j] = tcsl[j], tcsl[i] } // CacheStatus returns a displayable version of the cache. func (hc *HealthCheckImpl) CacheStatus() TabletsCacheStatusList { - epcsMap := make(map[string]*TabletsCacheStatus) + tcsMap := make(map[string]*TabletsCacheStatus) hc.mu.RLock() for _, hcc := range hc.addrToConns { hcc.mu.RLock() key := fmt.Sprintf("%v.%v.%v.%v", hcc.cell, hcc.target.Keyspace, hcc.target.Shard, string(hcc.target.TabletType)) - var epcs *TabletsCacheStatus + var tcs *TabletsCacheStatus var ok bool - if epcs, ok = epcsMap[key]; !ok { - epcs = &TabletsCacheStatus{ + if tcs, ok = tcsMap[key]; !ok { + tcs = &TabletsCacheStatus{ Cell: hcc.cell, Target: hcc.target, } - epcsMap[key] = epcs + tcsMap[key] = tcs } stats := &TabletStats{ Tablet: hcc.tablet, @@ -735,15 +729,15 @@ func (hc *HealthCheckImpl) CacheStatus() TabletsCacheStatusList { LastError: hcc.lastError, } hcc.mu.RUnlock() - epcs.TabletsStats = append(epcs.TabletsStats, stats) + tcs.TabletsStats = append(tcs.TabletsStats, stats) } hc.mu.RUnlock() - epcsl := make(TabletsCacheStatusList, 0, len(epcsMap)) - for _, epcs := range epcsMap { - epcsl = append(epcsl, epcs) + tcsl := make(TabletsCacheStatusList, 0, len(tcsMap)) + for _, tcs := range tcsMap { + tcsl = append(tcsl, tcs) } - sort.Sort(epcsl) - return epcsl + sort.Sort(tcsl) + return tcsl } // Close stops the healthcheck. diff --git a/go/vt/discovery/healthcheck_test.go b/go/vt/discovery/healthcheck_test.go index bea93cea6c..0a69a8be59 100644 --- a/go/vt/discovery/healthcheck_test.go +++ b/go/vt/discovery/healthcheck_test.go @@ -26,15 +26,15 @@ func init() { } func TestHealthCheck(t *testing.T) { - ep := topo.NewTablet(0, "cell", "a") - ep.PortMap["vt"] = 1 + tablet := topo.NewTablet(0, "cell", "a") + tablet.PortMap["vt"] = 1 input := make(chan *querypb.StreamHealthResponse) - fakeConn := createFakeConn(ep, input) + fakeConn := createFakeConn(tablet, input) t.Logf(`createFakeConn({Host: "a", PortMap: {"vt": 1}}, c)`) l := newListener() hc := NewHealthCheck(1*time.Millisecond, 1*time.Millisecond, time.Hour, "" /* statsSuffix */).(*HealthCheckImpl) hc.SetListener(l) - hc.AddTablet("cell", "", ep) + hc.AddTablet("cell", "", tablet) t.Logf(`hc = HealthCheck(); hc.AddTablet("cell", "", {Host: "a", PortMap: {"vt": 1}})`) // no tablet before getting first StreamHealthResponse @@ -51,7 +51,7 @@ func TestHealthCheck(t *testing.T) { RealtimeStats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.2}, } want := &TabletStats{ - Tablet: ep, + Tablet: tablet, Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, Up: true, Serving: true, @@ -68,12 +68,12 @@ func TestHealthCheck(t *testing.T) { if len(tsList) != 1 || !reflect.DeepEqual(tsList[0], want) { t.Errorf(`hc.GetTabletStatsFromKeyspaceShard("k", "s") = %+v; want %+v`, tsList, want) } - epcsl := hc.CacheStatus() - epcslWant := TabletsCacheStatusList{{ + tcsl := hc.CacheStatus() + tcslWant := TabletsCacheStatusList{{ Cell: "cell", Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, TabletsStats: TabletStatsList{{ - Tablet: ep, + Tablet: tablet, Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, Up: true, Serving: true, @@ -81,8 +81,8 @@ func TestHealthCheck(t *testing.T) { TabletExternallyReparentedTimestamp: 10, }}, }} - if !reflect.DeepEqual(epcsl, epcslWant) { - t.Errorf(`hc.CacheStatus() = %+v; want %+v`, epcsl, epcslWant) + if !reflect.DeepEqual(tcsl, tcslWant) { + t.Errorf(`hc.CacheStatus() = %+v; want %+v`, tcsl, tcslWant) } // TabletType changed @@ -93,7 +93,7 @@ func TestHealthCheck(t *testing.T) { RealtimeStats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.5}, } want = &TabletStats{ - Tablet: ep, + Tablet: tablet, Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, Up: true, Serving: true, @@ -119,7 +119,7 @@ func TestHealthCheck(t *testing.T) { RealtimeStats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.3}, } want = &TabletStats{ - Tablet: ep, + Tablet: tablet, Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, Up: true, Serving: false, @@ -141,7 +141,7 @@ func TestHealthCheck(t *testing.T) { RealtimeStats: &querypb.RealtimeStats{HealthError: "some error", SecondsBehindMaster: 1, CpuUsage: 0.3}, } want = &TabletStats{ - Tablet: ep, + Tablet: tablet, Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, Up: true, Serving: false, @@ -157,11 +157,11 @@ func TestHealthCheck(t *testing.T) { } // remove tablet - hc.deleteConn(ep) + hc.deleteConn(tablet) close(fakeConn.hcChan) t.Logf(`hc.RemoveTablet({Host: "a", PortMap: {"vt": 1}})`) want = &TabletStats{ - Tablet: ep, + Tablet: tablet, Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, Up: false, Serving: false, @@ -183,15 +183,15 @@ func TestHealthCheck(t *testing.T) { func TestHealthCheckTimeout(t *testing.T) { timeout := 500 * time.Millisecond - ep := topo.NewTablet(0, "cell", "a") - ep.PortMap["vt"] = 1 + tablet := topo.NewTablet(0, "cell", "a") + tablet.PortMap["vt"] = 1 input := make(chan *querypb.StreamHealthResponse) - createFakeConn(ep, input) + createFakeConn(tablet, input) t.Logf(`createFakeConn({Host: "a", PortMap: {"vt": 1}}, c)`) l := newListener() hc := NewHealthCheck(1*time.Millisecond, 1*time.Millisecond, timeout, "" /* statsSuffix */).(*HealthCheckImpl) hc.SetListener(l) - hc.AddTablet("cell", "", ep) + hc.AddTablet("cell", "", tablet) t.Logf(`hc = HealthCheck(); hc.AddTablet("cell", "", {Host: "a", PortMap: {"vt": 1}})`) // one tablet after receiving a StreamHealthResponse @@ -202,7 +202,7 @@ func TestHealthCheckTimeout(t *testing.T) { RealtimeStats: &querypb.RealtimeStats{SecondsBehindMaster: 1, CpuUsage: 0.2}, } want := &TabletStats{ - Tablet: ep, + Tablet: tablet, Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, Up: true, Serving: true, diff --git a/go/vt/discovery/healthcheck_wait.go b/go/vt/discovery/healthcheck_wait.go index 9e4aae29fb..3774648110 100644 --- a/go/vt/discovery/healthcheck_wait.go +++ b/go/vt/discovery/healthcheck_wait.go @@ -111,11 +111,11 @@ RetryLoop: for ks := range keyspaceShards { allPresent := true for _, tt := range types { - epl := hc.GetTabletStatsFromTarget(ks.keyspace, ks.shard, tt) + tl := hc.GetTabletStatsFromTarget(ks.keyspace, ks.shard, tt) if requireServing { hasServingEP := false - for _, eps := range epl { - if eps.LastError == nil && eps.Serving { + for _, t := range tl { + if t.LastError == nil && t.Serving { hasServingEP = true break } @@ -125,7 +125,7 @@ RetryLoop: break } } else { - if len(epl) == 0 { + if len(tl) == 0 { allPresent = false break } diff --git a/go/vt/discovery/healthcheck_wait_test.go b/go/vt/discovery/healthcheck_wait_test.go index 9ab4eaa2e7..d1f6ff49fd 100644 --- a/go/vt/discovery/healthcheck_wait_test.go +++ b/go/vt/discovery/healthcheck_wait_test.go @@ -109,13 +109,13 @@ func TestWaitForTablets(t *testing.T) { defer shortCancel() waitAvailableTabletInterval = 20 * time.Millisecond - ep := topo.NewTablet(0, "cell", "a") - ep.PortMap["vt"] = 1 + tablet := topo.NewTablet(0, "cell", "a") + tablet.PortMap["vt"] = 1 input := make(chan *querypb.StreamHealthResponse) - createFakeConn(ep, input) + createFakeConn(tablet, input) hc := NewHealthCheck(1*time.Millisecond, 1*time.Millisecond, 1*time.Hour, "" /* statsSuffix */) - hc.AddTablet("cell", "", ep) + hc.AddTablet("cell", "", tablet) // this should time out if err := WaitForTablets(shortCtx, hc, "cell", "keyspace", "shard", []topodatapb.TabletType{topodatapb.TabletType_REPLICA}); err != ErrWaitForTabletsTimeout { diff --git a/go/vt/vtgate/discoverygateway.go b/go/vt/vtgate/discoverygateway.go index 5c6c7699a1..919c7a63b7 100644 --- a/go/vt/vtgate/discoverygateway.go +++ b/go/vt/vtgate/discoverygateway.go @@ -262,9 +262,9 @@ func (dg *discoveryGateway) withRetry(ctx context.Context, keyspace, shard strin shuffleTablets(tablets) // skip tablets we tried before - for _, ep := range tablets { - if _, ok := invalidTablets[discovery.TabletToMapKey(ep)]; !ok { - tablet = ep + for _, t := range tablets { + if _, ok := invalidTablets[discovery.TabletToMapKey(t)]; !ok { + tablet = t break } } diff --git a/go/vt/vtgate/discoverygateway_test.go b/go/vt/vtgate/discoverygateway_test.go index bd7df3d25b..2479afb307 100644 --- a/go/vt/vtgate/discoverygateway_test.go +++ b/go/vt/vtgate/discoverygateway_test.go @@ -90,18 +90,18 @@ func TestDiscoveryGatewayGetTablets(t *testing.T) { hc.Reset() hc.addTestTablet("remote", "1.1.1.1", 1001, keyspace, shard, topodatapb.TabletType_REPLICA, true, 10, nil, nil) ep1 := hc.addTestTablet("local", "2.2.2.2", 1001, keyspace, shard, topodatapb.TabletType_REPLICA, true, 10, nil, nil) - eps := dg.getTablets(keyspace, shard, topodatapb.TabletType_REPLICA) - if len(eps) != 1 || !topo.TabletEquality(eps[0], ep1) { - t.Errorf("want %+v, got %+v", ep1, eps) + tsl := dg.getTablets(keyspace, shard, topodatapb.TabletType_REPLICA) + if len(tsl) != 1 || !topo.TabletEquality(tsl[0], ep1) { + t.Errorf("want %+v, got %+v", ep1, tsl) } // master should use the one with newer timestamp regardless of cell hc.Reset() hc.addTestTablet("remote", "1.1.1.1", 1001, keyspace, shard, topodatapb.TabletType_MASTER, true, 5, nil, nil) ep1 = hc.addTestTablet("remote", "2.2.2.2", 1001, keyspace, shard, topodatapb.TabletType_MASTER, true, 10, nil, nil) - eps = dg.getTablets(keyspace, shard, topodatapb.TabletType_MASTER) - if len(eps) != 1 || !topo.TabletEquality(eps[0], ep1) { - t.Errorf("want %+v, got %+v", ep1, eps) + tsl = dg.getTablets(keyspace, shard, topodatapb.TabletType_MASTER) + if len(tsl) != 1 || !topo.TabletEquality(tsl[0], ep1) { + t.Errorf("want %+v, got %+v", ep1, tsl) } } diff --git a/go/vt/vtgate/fakehealthcheck_test.go b/go/vt/vtgate/fakehealthcheck_test.go index df6c5fa04a..9c6e5c96b4 100644 --- a/go/vt/vtgate/fakehealthcheck_test.go +++ b/go/vt/vtgate/fakehealthcheck_test.go @@ -111,12 +111,12 @@ func (fhc *fakeHealthCheck) addTestTablet(cell, host string, port int32, keyspac if conn != nil { conn.SetTarget(keyspace, shard, tabletType) } - ep := topo.NewTablet(0, cell, host) - ep.PortMap["vt"] = port - key := discovery.TabletToMapKey(ep) + t := topo.NewTablet(0, cell, host) + t.PortMap["vt"] = port + key := discovery.TabletToMapKey(t) item := fhc.items[key] if item == nil { - fhc.AddTablet(cell, "", ep) + fhc.AddTablet(cell, "", t) item = fhc.items[key] } item.ts.Target = &querypb.Target{ @@ -129,5 +129,5 @@ func (fhc *fakeHealthCheck) addTestTablet(cell, host string, port int32, keyspac item.ts.Stats = &querypb.RealtimeStats{} item.ts.LastError = err item.conn = conn - return ep + return t } diff --git a/go/vt/vtgate/gateway.go b/go/vt/vtgate/gateway.go index bff0a91df5..46343543a7 100644 --- a/go/vt/vtgate/gateway.go +++ b/go/vt/vtgate/gateway.go @@ -147,20 +147,20 @@ func (e *ShardError) VtErrorCode() vtrpcpb.ErrorCode { type GatewayTabletCacheStatusList []*GatewayTabletCacheStatus // Len is part of sort.Interface. -func (gepcsl GatewayTabletCacheStatusList) Len() int { - return len(gepcsl) +func (gtcsl GatewayTabletCacheStatusList) Len() int { + return len(gtcsl) } // Less is part of sort.Interface. -func (gepcsl GatewayTabletCacheStatusList) Less(i, j int) bool { - iKey := strings.Join([]string{gepcsl[i].Keyspace, gepcsl[i].Shard, string(gepcsl[i].TabletType), gepcsl[i].Name}, ".") - jKey := strings.Join([]string{gepcsl[j].Keyspace, gepcsl[j].Shard, string(gepcsl[j].TabletType), gepcsl[j].Name}, ".") +func (gtcsl GatewayTabletCacheStatusList) Less(i, j int) bool { + iKey := strings.Join([]string{gtcsl[i].Keyspace, gtcsl[i].Shard, string(gtcsl[i].TabletType), gtcsl[i].Name}, ".") + jKey := strings.Join([]string{gtcsl[j].Keyspace, gtcsl[j].Shard, string(gtcsl[j].TabletType), gtcsl[j].Name}, ".") return iKey < jKey } // Swap is part of sort.Interface. -func (gepcsl GatewayTabletCacheStatusList) Swap(i, j int) { - gepcsl[i], gepcsl[j] = gepcsl[j], gepcsl[i] +func (gtcsl GatewayTabletCacheStatusList) Swap(i, j int) { + gtcsl[i], gtcsl[j] = gtcsl[j], gtcsl[i] } // GatewayTabletCacheStatus contains the status per tablet for a gateway. diff --git a/go/vt/worker/executor.go b/go/vt/worker/executor.go index ad5cc173ee..db6e2dc286 100644 --- a/go/vt/worker/executor.go +++ b/go/vt/worker/executor.go @@ -132,7 +132,7 @@ func (e *executor) fetchWithRetries(ctx context.Context, command string) error { retry: masterAlias := "no-master-was-available" if master != nil { - masterAlias = topoproto.TabletAliasString(master.Alias()) + masterAlias = topoproto.TabletAliasString(master.Tablet.Alias) } tabletString := fmt.Sprintf("%v (%v/%v)", masterAlias, e.keyspace, e.shard) @@ -153,7 +153,7 @@ func (e *executor) fetchWithRetries(ctx context.Context, command string) error { // succeeded, false if the error is retryable and a non-nil error if the // command must not be retried. func (e *executor) checkError(err error, isRetry bool, master *discovery.TabletStats) (bool, error) { - tabletString := fmt.Sprintf("%v (%v/%v)", topoproto.TabletAliasString(master.Alias()), e.keyspace, e.shard) + tabletString := fmt.Sprintf("%v (%v/%v)", topoproto.TabletAliasString(master.Tablet.Alias), e.keyspace, e.shard) // If the ExecuteFetch call failed because of an application error, we will try to figure out why. // We need to extract the MySQL error number, and will attempt to retry if we think the error is recoverable. match := errExtract.FindStringSubmatch(err.Error()) diff --git a/go/vt/worker/split_clone.go b/go/vt/worker/split_clone.go index d1dcb32b5d..938c12f03b 100644 --- a/go/vt/worker/split_clone.go +++ b/go/vt/worker/split_clone.go @@ -386,15 +386,15 @@ func (scw *SplitCloneWorker) findTargets(ctx context.Context) error { // Get the MySQL database name of the tablet. shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) - ti, err := scw.wr.TopoServer().GetTablet(shortCtx, master.Alias()) + ti, err := scw.wr.TopoServer().GetTablet(shortCtx, master.Tablet.Alias) cancel() if err != nil { - return fmt.Errorf("cannot get the TabletInfo for destination master (%v) to find out its db name: %v", topoproto.TabletAliasString(master.Alias()), err) + return fmt.Errorf("cannot get the TabletInfo for destination master (%v) to find out its db name: %v", topoproto.TabletAliasString(master.Tablet.Alias), err) } keyspaceAndShard := topoproto.KeyspaceShardString(si.Keyspace(), si.ShardName()) scw.destinationDbNames[keyspaceAndShard] = ti.DbName() - scw.wr.Logger().Infof("Using tablet %v as destination master for %v/%v", topoproto.TabletAliasString(master.Alias()), si.Keyspace(), si.ShardName()) + scw.wr.Logger().Infof("Using tablet %v as destination master for %v/%v", topoproto.TabletAliasString(master.Tablet.Alias), si.Keyspace(), si.ShardName()) } scw.wr.Logger().Infof("NOTE: The used master of a destination shard might change over the course of the copy e.g. due to a reparent. The HealthCheck module will track and log master changes and any error message will always refer the actually used master address.") diff --git a/go/vt/worker/vertical_split_clone.go b/go/vt/worker/vertical_split_clone.go index 27f50a83ca..ddd95e1da7 100644 --- a/go/vt/worker/vertical_split_clone.go +++ b/go/vt/worker/vertical_split_clone.go @@ -353,15 +353,15 @@ func (vscw *VerticalSplitCloneWorker) findTargets(ctx context.Context) error { // Get the MySQL database name of the tablet. shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - ti, err := vscw.wr.TopoServer().GetTablet(shortCtx, master.Alias()) + ti, err := vscw.wr.TopoServer().GetTablet(shortCtx, master.Tablet.Alias) cancel() if err != nil { - return fmt.Errorf("cannot get the TabletInfo for destination master (%v) to find out its db name: %v", topoproto.TabletAliasString(master.Alias()), err) + return fmt.Errorf("cannot get the TabletInfo for destination master (%v) to find out its db name: %v", topoproto.TabletAliasString(master.Tablet.Alias), err) } keyspaceAndShard := topoproto.KeyspaceShardString(vscw.destinationKeyspace, vscw.destinationShard) vscw.destinationDbNames[keyspaceAndShard] = ti.DbName() - vscw.wr.Logger().Infof("Using tablet %v as destination master for %v/%v", topoproto.TabletAliasString(master.Alias()), vscw.destinationKeyspace, vscw.destinationShard) + vscw.wr.Logger().Infof("Using tablet %v as destination master for %v/%v", topoproto.TabletAliasString(master.Tablet.Alias), vscw.destinationKeyspace, vscw.destinationShard) vscw.wr.Logger().Infof("NOTE: The used master of a destination shard might change over the course of the copy e.g. due to a reparent. The HealthCheck module will track and log master changes and any error message will always refer the actually used master address.") // Set up the throttler for the destination shard. diff --git a/go/vt/wrangler/keyspace.go b/go/vt/wrangler/keyspace.go index f77ab249bd..51797431dc 100644 --- a/go/vt/wrangler/keyspace.go +++ b/go/vt/wrangler/keyspace.go @@ -509,11 +509,11 @@ func (wr *Wrangler) waitForDrainInCell(ctx context.Context, cell, keyspace, shar healthyTablets := discovery.RemoveUnhealthyTablets( hc.GetTabletStatsFromTarget(keyspace, shard, servedType)) - for _, eps := range healthyTablets { - if eps.Stats.Qps == 0.0 { - drainedHealthyTablets[eps.Tablet.Alias.Uid] = eps + for _, ts := range healthyTablets { + if ts.Stats.Qps == 0.0 { + drainedHealthyTablets[ts.Tablet.Alias.Uid] = ts } else { - notDrainedHealtyTablets[eps.Tablet.Alias.Uid] = eps + notDrainedHealtyTablets[ts.Tablet.Alias.Uid] = ts } } @@ -537,8 +537,8 @@ func (wr *Wrangler) waitForDrainInCell(ctx context.Context, cell, keyspace, shar timer.Stop() var l []string - for _, eps := range notDrainedHealtyTablets { - l = append(l, formatEndpointStats(eps)) + for _, ts := range notDrainedHealtyTablets { + l = append(l, formatEndpointStats(ts)) } return fmt.Errorf("%v: WaitForDrain failed for %v tablets in %v/%v. Only %d/%d tablets were drained. err: %v List of tablets which were not drained: %v", cell, servedType, keyspace, shard, len(drainedHealthyTablets), len(healthyTablets), ctx.Err(), strings.Join(l, ";")) @@ -549,12 +549,12 @@ func (wr *Wrangler) waitForDrainInCell(ctx context.Context, cell, keyspace, shar return nil } -func formatEndpointStats(eps *discovery.TabletStats) string { +func formatEndpointStats(ts *discovery.TabletStats) string { webURL := "unknown http port" - if webPort, ok := eps.Tablet.PortMap["vt"]; ok { - webURL = fmt.Sprintf("http://%v:%d/", eps.Tablet.Hostname, webPort) + if webPort, ok := ts.Tablet.PortMap["vt"]; ok { + webURL = fmt.Sprintf("http://%v:%d/", ts.Tablet.Hostname, webPort) } - return fmt.Sprintf("%v: %v stats: %v", topoproto.TabletAliasString(eps.Alias()), webURL, eps.Stats) + return fmt.Sprintf("%v: %v stats: %v", topoproto.TabletAliasString(ts.Tablet.Alias), webURL, ts.Stats) } // MigrateServedFrom is used during vertical splits to migrate a From 06e272012822bd72e17bed68ee089c21af1d5785 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Tue, 17 May 2016 07:00:45 -0700 Subject: [PATCH 13/27] Renaming endpoint in html page. --- go/cmd/vtgate/status.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/go/cmd/vtgate/status.go b/go/cmd/vtgate/status.go index c84cfd0461..c903664546 100644 --- a/go/cmd/vtgate/status.go +++ b/go/cmd/vtgate/status.go @@ -226,14 +226,14 @@ google.setOnLoadCallback(function() { - + - + {{range $i, $ts := .}} From d96111dccfbf86186bfff93d53aa26f8514eec40 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Tue, 17 May 2016 09:48:38 -0700 Subject: [PATCH 14/27] Protecting vtctl queries with a flag. The reason is these queries can be a security problem, as they use vtctld process to query vtgate and vttablet. Adding an 'enable_queries' flag to vtctl. Had to hook in servenv in cmd/vtctl, cleaner. Enabling the flag by default in our integration tests. --- go/cmd/vtctl/vtctl.go | 13 +--- go/vt/servenv/run.go | 7 ++ go/vt/vtctl/query.go | 107 +++++++++++++++------------ go/vt/vtctl/reparent.go | 58 ++++++++------- go/vt/wrangler/testlib/vtctl_pipe.go | 10 +++ test/utils.py | 6 +- 6 files changed, 117 insertions(+), 84 deletions(-) diff --git a/go/cmd/vtctl/vtctl.go b/go/cmd/vtctl/vtctl.go index 87756be44b..fbfec9cfa9 100644 --- a/go/cmd/vtctl/vtctl.go +++ b/go/cmd/vtctl/vtctl.go @@ -17,6 +17,7 @@ import ( log "github.com/golang/glog" "github.com/youtube/vitess/go/exit" "github.com/youtube/vitess/go/vt/logutil" + "github.com/youtube/vitess/go/vt/servenv" "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vtctl" @@ -51,12 +52,6 @@ func installSignalHandlers(cancel func()) { }() } -// hooks to register plug-ins after flag init - -type initFunc func() - -var initFuncs []initFunc - func main() { defer exit.RecoverAll() defer logutil.Flush() @@ -77,6 +72,8 @@ func main() { log.Warningf("cannot connect to syslog: %v", err) } + servenv.FireRunHooks() + topoServer := topo.GetServer() defer topo.CloseServers() @@ -84,10 +81,6 @@ func main() { wr := wrangler.New(logutil.NewConsoleLogger(), topoServer, tmclient.NewTabletManagerClient()) installSignalHandlers(cancel) - for _, f := range initFuncs { - f() - } - err := vtctl.RunCommand(ctx, wr, args) cancel() switch err { diff --git a/go/vt/servenv/run.go b/go/vt/servenv/run.go index 3b5f34bfa8..3967ab27bf 100644 --- a/go/vt/servenv/run.go +++ b/go/vt/servenv/run.go @@ -48,6 +48,13 @@ func Run(port int) { Close() } +// FireRunHooks fires the hooks registered by OnHook. +// Use this in a non-server to run the hooks registered +// by servenv.OnRun(). +func FireRunHooks() { + onRunHooks.Fire() +} + // Close runs any registered exit hooks in parallel. func Close() { onCloseHooks.Fire() diff --git a/go/vt/vtctl/query.go b/go/vt/vtctl/query.go index d861afed5a..b8e2eb0d5d 100644 --- a/go/vt/vtctl/query.go +++ b/go/vt/vtctl/query.go @@ -17,6 +17,7 @@ import ( "github.com/olekukonko/tablewriter" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/logutil" + "github.com/youtube/vitess/go/vt/servenv" "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/vt/vtgate/vtgateconn" @@ -30,57 +31,67 @@ import ( const queriesGroupName = "Queries" +var ( + enableQueries = flag.Bool("enable_queries", false, "if set, allows vtgate and vttablet queries. May have security implications, as the queries will be run from this process.") +) + func init() { - addCommandGroup(queriesGroupName) + servenv.OnRun(func() { + if !*enableQueries { + return + } - // VtGate commands - addCommand(queriesGroupName, command{ - "VtGateExecute", - commandVtGateExecute, - "-server [-bind_variables ] [-connect_timeout ] [-tablet_type ] [-json] ", - "Executes the given SQL query with the provided bound variables against the vtgate server."}) - addCommand(queriesGroupName, command{ - "VtGateExecuteShards", - commandVtGateExecuteShards, - "-server -keyspace -shards ,,... [-bind_variables ] [-connect_timeout ] [-tablet_type ] [-json] ", - "Executes the given SQL query with the provided bound variables against the vtgate server. It is routed to the provided shards."}) - addCommand(queriesGroupName, command{ - "VtGateExecuteKeyspaceIds", - commandVtGateExecuteKeyspaceIds, - "-server -keyspace -keyspace_ids ,,... [-bind_variables ] [-connect_timeout ] [-tablet_type ] [-json] ", - "Executes the given SQL query with the provided bound variables against the vtgate server. It is routed to the shards that contain the provided keyspace ids."}) - addCommand(queriesGroupName, command{ - "VtGateSplitQuery", - commandVtGateSplitQuery, - "-server -keyspace [-split_column ] -split_count [-bind_variables ] [-connect_timeout ] ", - "Executes the SplitQuery computation for the given SQL query with the provided bound variables against the vtgate server (this is the base query for Map-Reduce workloads, and is provided here for debug / test purposes)."}) + addCommandGroup(queriesGroupName) - // VtTablet commands - addCommand(queriesGroupName, command{ - "VtTabletExecute", - commandVtTabletExecute, - "[-bind_variables ] [-connect_timeout ] [-transaction_id ] [-tablet_type ] [-json] -keyspace -shard ", - "Executes the given query on the given tablet."}) - addCommand(queriesGroupName, command{ - "VtTabletBegin", - commandVtTabletBegin, - "[-connect_timeout ] [-tablet_type ] -keyspace -shard ", - "Starts a transaction on the provided server."}) - addCommand(queriesGroupName, command{ - "VtTabletCommit", - commandVtTabletCommit, - "[-connect_timeout ] [-tablet_type ] -keyspace -shard ", - "Commits a transaction on the provided server."}) - addCommand(queriesGroupName, command{ - "VtTabletRollback", - commandVtTabletRollback, - "[-connect_timeout ] [-tablet_type ] -keyspace -shard ", - "Rollbacks a transaction on the provided server."}) - addCommand(queriesGroupName, command{ - "VtTabletStreamHealth", - commandVtTabletStreamHealth, - "[-count ] [-connect_timeout ] ", - "Executes the StreamHealth streaming query to a vttablet process. Will stop after getting answers."}) + // VtGate commands + addCommand(queriesGroupName, command{ + "VtGateExecute", + commandVtGateExecute, + "-server [-bind_variables ] [-connect_timeout ] [-tablet_type ] [-json] ", + "Executes the given SQL query with the provided bound variables against the vtgate server."}) + addCommand(queriesGroupName, command{ + "VtGateExecuteShards", + commandVtGateExecuteShards, + "-server -keyspace -shards ,,... [-bind_variables ] [-connect_timeout ] [-tablet_type ] [-json] ", + "Executes the given SQL query with the provided bound variables against the vtgate server. It is routed to the provided shards."}) + addCommand(queriesGroupName, command{ + "VtGateExecuteKeyspaceIds", + commandVtGateExecuteKeyspaceIds, + "-server -keyspace -keyspace_ids ,,... [-bind_variables ] [-connect_timeout ] [-tablet_type ] [-json] ", + "Executes the given SQL query with the provided bound variables against the vtgate server. It is routed to the shards that contain the provided keyspace ids."}) + addCommand(queriesGroupName, command{ + "VtGateSplitQuery", + commandVtGateSplitQuery, + "-server -keyspace [-split_column ] -split_count [-bind_variables ] [-connect_timeout ] ", + "Executes the SplitQuery computation for the given SQL query with the provided bound variables against the vtgate server (this is the base query for Map-Reduce workloads, and is provided here for debug / test purposes)."}) + + // VtTablet commands + addCommand(queriesGroupName, command{ + "VtTabletExecute", + commandVtTabletExecute, + "[-bind_variables ] [-connect_timeout ] [-transaction_id ] [-tablet_type ] [-json] -keyspace -shard ", + "Executes the given query on the given tablet."}) + addCommand(queriesGroupName, command{ + "VtTabletBegin", + commandVtTabletBegin, + "[-connect_timeout ] [-tablet_type ] -keyspace -shard ", + "Starts a transaction on the provided server."}) + addCommand(queriesGroupName, command{ + "VtTabletCommit", + commandVtTabletCommit, + "[-connect_timeout ] [-tablet_type ] -keyspace -shard ", + "Commits a transaction on the provided server."}) + addCommand(queriesGroupName, command{ + "VtTabletRollback", + commandVtTabletRollback, + "[-connect_timeout ] [-tablet_type ] -keyspace -shard ", + "Rollbacks a transaction on the provided server."}) + addCommand(queriesGroupName, command{ + "VtTabletStreamHealth", + commandVtTabletStreamHealth, + "[-count ] [-connect_timeout ] ", + "Executes the StreamHealth streaming query to a vttablet process. Will stop after getting answers."}) + }) } type bindvars map[string]interface{} diff --git a/go/vt/vtctl/reparent.go b/go/vt/vtctl/reparent.go index f738ea9d0c..943fa4365a 100644 --- a/go/vt/vtctl/reparent.go +++ b/go/vt/vtctl/reparent.go @@ -9,6 +9,7 @@ import ( "fmt" "time" + "github.com/youtube/vitess/go/vt/servenv" "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/vt/wrangler" "golang.org/x/net/context" @@ -19,31 +20,38 @@ var ( ) func init() { - addCommand("Tablets", command{ - "DemoteMaster", - commandDemoteMaster, - "", - "Demotes a master tablet."}) - addCommand("Tablets", command{ - "ReparentTablet", - commandReparentTablet, - "", - "Reparent a tablet to the current master in the shard. This only works if the current slave position matches the last known reparent action."}) - addCommand("Shards", command{ - "InitShardMaster", - commandInitShardMaster, - "[-force] [-wait_slave_timeout=] ", - "Sets the initial master for a shard. Will make all other tablets in the shard slaves of the provided master. WARNING: this could cause data loss on an already replicating shard. PlannedReparentShard or EmergencyReparentShard should be used instead."}) - addCommand("Shards", command{ - "PlannedReparentShard", - commandPlannedReparentShard, - " ", - "Reparents the shard to the new master. Both old and new master need to be up and running."}) - addCommand("Shards", command{ - "EmergencyReparentShard", - commandEmergencyReparentShard, - " ", - "Reparents the shard to the new master. Assumes the old master is dead and not responsding."}) + servenv.OnRun(func() { + if *disableActiveReparents { + return + } + + addCommand("Tablets", command{ + "DemoteMaster", + commandDemoteMaster, + "", + "Demotes a master tablet."}) + addCommand("Tablets", command{ + "ReparentTablet", + commandReparentTablet, + "", + "Reparent a tablet to the current master in the shard. This only works if the current slave position matches the last known reparent action."}) + + addCommand("Shards", command{ + "InitShardMaster", + commandInitShardMaster, + "[-force] [-wait_slave_timeout=] ", + "Sets the initial master for a shard. Will make all other tablets in the shard slaves of the provided master. WARNING: this could cause data loss on an already replicating shard. PlannedReparentShard or EmergencyReparentShard should be used instead."}) + addCommand("Shards", command{ + "PlannedReparentShard", + commandPlannedReparentShard, + " ", + "Reparents the shard to the new master. Both old and new master need to be up and running."}) + addCommand("Shards", command{ + "EmergencyReparentShard", + commandEmergencyReparentShard, + " ", + "Reparents the shard to the new master. Assumes the old master is dead and not responsding."}) + }) } func commandDemoteMaster(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { diff --git a/go/vt/wrangler/testlib/vtctl_pipe.go b/go/vt/wrangler/testlib/vtctl_pipe.go index 17c9ceb06f..80ae4ce53b 100644 --- a/go/vt/wrangler/testlib/vtctl_pipe.go +++ b/go/vt/wrangler/testlib/vtctl_pipe.go @@ -15,6 +15,7 @@ import ( "google.golang.org/grpc" "github.com/youtube/vitess/go/vt/logutil" + "github.com/youtube/vitess/go/vt/servenv" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vtctl/grpcvtctlserver" "github.com/youtube/vitess/go/vt/vtctl/vtctlclient" @@ -25,6 +26,8 @@ import ( _ "github.com/youtube/vitess/go/vt/vtctl/grpcvtctlclient" ) +var servenvInitialized = false + func init() { // make sure we use the right protocol flag.Set("vtctl_client_protocol", "grpc") @@ -40,6 +43,13 @@ type VtctlPipe struct { // NewVtctlPipe creates a new VtctlPipe based on the given topo server. func NewVtctlPipe(t *testing.T, ts topo.Server) *VtctlPipe { + // Register all vtctl commands + if !servenvInitialized { + flag.Set("enable_queries", "true") + servenv.FireRunHooks() + servenvInitialized = true + } + // Listen on a random port listener, err := net.Listen("tcp", ":0") if err != nil { diff --git a/test/utils.py b/test/utils.py index 471c8ed8ff..13883c6975 100644 --- a/test/utils.py +++ b/test/utils.py @@ -727,7 +727,10 @@ def run_vtctl(clargs, auto_log=False, expect_fail=False, def run_vtctl_vtctl(clargs, auto_log=False, expect_fail=False, **kwargs): - args = environment.binary_args('vtctl') + ['-log_dir', environment.vtlogroot] + args = environment.binary_args('vtctl') + [ + '-log_dir', environment.vtlogroot, + '-enable_queries', + ] args.extend(environment.topo_server().flags()) args.extend(['-tablet_manager_protocol', protocols_flavor().tablet_manager_protocol()]) @@ -1088,6 +1091,7 @@ class Vtctld(object): def start(self): args = environment.binary_args('vtctld') + [ + '-enable_queries', '-web_dir', environment.vttop + '/web/vtctld', '--log_dir', environment.vtlogroot, '--port', str(self.port), From f49b30d11a1998ef3be1239fa513a7537ef8183a Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Tue, 17 May 2016 09:52:01 -0700 Subject: [PATCH 15/27] Fixing extra checks. --- go/vt/vtctl/reparent.go | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/go/vt/vtctl/reparent.go b/go/vt/vtctl/reparent.go index 943fa4365a..9136dc9e9c 100644 --- a/go/vt/vtctl/reparent.go +++ b/go/vt/vtctl/reparent.go @@ -55,10 +55,6 @@ func init() { } func commandDemoteMaster(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - if *disableActiveReparents { - return fmt.Errorf("active reparent actions disable in this cluster") - } - if err := subFlags.Parse(args); err != nil { return err } @@ -78,10 +74,6 @@ func commandDemoteMaster(ctx context.Context, wr *wrangler.Wrangler, subFlags *f } func commandReparentTablet(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - if *disableActiveReparents { - return fmt.Errorf("active reparent actions disable in this cluster") - } - if err := subFlags.Parse(args); err != nil { return err } @@ -96,10 +88,6 @@ func commandReparentTablet(ctx context.Context, wr *wrangler.Wrangler, subFlags } func commandInitShardMaster(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - if *disableActiveReparents { - return fmt.Errorf("active reparent actions disable in this cluster") - } - force := subFlags.Bool("force", false, "will force the reparent even if the provided tablet is not a master or the shard master") waitSlaveTimeout := subFlags.Duration("wait_slave_timeout", 30*time.Second, "time to wait for slaves to catch up in reparenting") if err := subFlags.Parse(args); err != nil { @@ -120,10 +108,6 @@ func commandInitShardMaster(ctx context.Context, wr *wrangler.Wrangler, subFlags } func commandPlannedReparentShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - if *disableActiveReparents { - return fmt.Errorf("active reparent actions disable in this cluster") - } - waitSlaveTimeout := subFlags.Duration("wait_slave_timeout", 30*time.Second, "time to wait for slaves to catch up in reparenting") if err := subFlags.Parse(args); err != nil { return err @@ -144,10 +128,6 @@ func commandPlannedReparentShard(ctx context.Context, wr *wrangler.Wrangler, sub } func commandEmergencyReparentShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - if *disableActiveReparents { - return fmt.Errorf("active reparent actions disable in this cluster") - } - waitSlaveTimeout := subFlags.Duration("wait_slave_timeout", 30*time.Second, "time to wait for slaves to catch up in reparenting") if err := subFlags.Parse(args); err != nil { return err From 9ad31d1444732eafed71d84201d71ca1d16019f4 Mon Sep 17 00:00:00 2001 From: Rasta Date: Tue, 17 May 2016 18:59:06 +0200 Subject: [PATCH 16/27] composer.json fix - google/auth package set to the same version as defined in grpc/grpc --- composer.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/composer.json b/composer.json index 7cc4778921..7f0d60c59e 100644 --- a/composer.json +++ b/composer.json @@ -14,7 +14,7 @@ "require": { "php": ">=5.5.0", "datto/protobuf-php": "dev-master", - "google/auth": "dev-master", + "google/auth": "v0.7", "grpc/grpc": "dev-release-0_13" }, "autoload": { From 7196ce6547389b5bd4531ab59aa20110de60fdde Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Tue, 17 May 2016 11:20:57 -0700 Subject: [PATCH 17/27] Refactoring options for vttest local cluster. Using the same style as gRPC Dial options, for instance. --- go/vt/tabletserver/endtoend/main_test.go | 2 +- go/vt/vttest/local_cluster.go | 232 ++++++++++++++--------- go/vt/vttest/local_cluster_test.go | 4 +- 3 files changed, 147 insertions(+), 91 deletions(-) diff --git a/go/vt/tabletserver/endtoend/main_test.go b/go/vt/tabletserver/endtoend/main_test.go index 2034487b3a..1a19e5b7ec 100644 --- a/go/vt/tabletserver/endtoend/main_test.go +++ b/go/vt/tabletserver/endtoend/main_test.go @@ -35,7 +35,7 @@ func TestMain(m *testing.M) { tabletserver.Init() exitCode := func() int { - hdl, err := vttest.LaunchMySQL("vttest", testSchema, testing.Verbose()) + hdl, err := vttest.LaunchVitess(vttest.MySQLOnly("vttest"), vttest.Schema(testSchema), vttest.Verbose(testing.Verbose())) if err != nil { fmt.Fprintf(os.Stderr, "could not launch mysql: %v\n", err) return 1 diff --git a/go/vt/vttest/local_cluster.go b/go/vt/vttest/local_cluster.go index bc5bca851e..8629fc3121 100644 --- a/go/vt/vttest/local_cluster.go +++ b/go/vt/vttest/local_cluster.go @@ -17,6 +17,7 @@ import ( "os/exec" "path" "strconv" + "strings" "time" log "github.com/golang/glog" @@ -34,6 +35,112 @@ type Handle struct { dbname string } +// VitessOption is the type for generic options to be passed in to LaunchVitess. +type VitessOption struct { + beforeRun func(*Handle) error + afterRun func() +} + +// Verbose makes the underlying local_cluster verbose. +func Verbose(verbose bool) VitessOption { + return VitessOption{ + beforeRun: func(hdl *Handle) error { + if verbose { + hdl.cmd.Args = append(hdl.cmd.Args, "--verbose") + } + return nil + }, + } +} + +// SchemaDirectory is used to specify a directory to read schema from. +// It conflicts with Schema / MySQLOnly. +func SchemaDirectory(dir string) VitessOption { + return VitessOption{ + beforeRun: func(hdl *Handle) error { + if dir != "" { + hdl.cmd.Args = append(hdl.cmd.Args, "--schema_dir", dir) + } + return nil + }, + } +} + +// Topology is used to pass in the topology string. +// It conflicts with MySQLOnly. +func Topology(topo string) VitessOption { + return VitessOption{ + beforeRun: func(hdl *Handle) error { + hdl.cmd.Args = append(hdl.cmd.Args, "--topology", topo) + return nil + }, + } +} + +// MySQLOnly is used to launch only a mysqld instance, with the specified db name. +// Use it before Schema option. +// It is incompativle with the Topology option. +func MySQLOnly(dbName string) VitessOption { + return VitessOption{ + beforeRun: func(hdl *Handle) error { + hdl.dbname = dbName + hdl.cmd.Args = append(hdl.cmd.Args, + "--topology", fmt.Sprintf("%s/0:%s", dbName, dbName), + "--mysql_only") + return nil + }, + } +} + +// Schema is used to specify SQL commands to run at startup. +// It conflicts with SchemaDirectory +func Schema(schema string) VitessOption { + schemaDir := "" + return VitessOption{ + beforeRun: func(hdl *Handle) error { + if schema == "" { + return nil + } + if hdl.dbname == "" { + return fmt.Errorf("Schema option requires a previously passed MySQLOnly option") + } + var err error + schemaDir, err = ioutil.TempDir("", "vt") + if err != nil { + return err + } + ksDir := path.Join(schemaDir, hdl.dbname) + err = os.Mkdir(ksDir, os.ModeDir|0775) + if err != nil { + return err + } + fileName := path.Join(ksDir, "schema.sql") + f, err := os.Create(fileName) + if err != nil { + return err + } + n, err := f.WriteString(schema) + if n != len(schema) { + return errors.New("short write") + } + if err != nil { + return err + } + err = f.Close() + if err != nil { + return err + } + hdl.cmd.Args = append(hdl.cmd.Args, "--schema_dir", schemaDir) + return nil + }, + afterRun: func() { + if schemaDir != "" { + os.RemoveAll(schemaDir) + } + }, + } +} + // InitDataOptions contain the command line arguments that configure // initialization of vttest with random data. See the documentation of // the corresponding command line flags in py/vttest/run_local_database.py @@ -47,60 +154,34 @@ type InitDataOptions struct { nullProbability *float64 } -// LaunchVitess launches a vitess test cluster. -func LaunchVitess( - topo, schemaDir string, verbose bool, initDataOptions *InitDataOptions, -) (hdl *Handle, err error) { - hdl = &Handle{} - err = hdl.run(randomPort(), topo, schemaDir, false, verbose, initDataOptions) - if err != nil { - return nil, err +// InitData returns a VitessOption that sets the InitDataOptions parameters. +func InitData(i *InitDataOptions) VitessOption { + return VitessOption{ + beforeRun: func(hdl *Handle) error { + hdl.cmd.Args = append(hdl.cmd.Args, "--initialize_with_random_data") + if i.rngSeed != nil { + hdl.cmd.Args = append(hdl.cmd.Args, "--rng_seed", fmt.Sprintf("%v", *i.rngSeed)) + } + if i.minTableShardSize != nil { + hdl.cmd.Args = append(hdl.cmd.Args, "--min_table_shard_size", fmt.Sprintf("%v", *i.minTableShardSize)) + } + if i.maxTableShardSize != nil { + hdl.cmd.Args = append(hdl.cmd.Args, "--max_table_shard_size", fmt.Sprintf("%v", *i.maxTableShardSize)) + } + if i.nullProbability != nil { + hdl.cmd.Args = append(hdl.cmd.Args, "--null_probability", fmt.Sprintf("%v", *i.nullProbability)) + } + return nil + }, } - return hdl, nil } -// LaunchMySQL launches just a MySQL instance with the specified db name. The schema -// is specified as a string instead of a file. -func LaunchMySQL(dbName, schema string, verbose bool) (hdl *Handle, err error) { - hdl = &Handle{ - dbname: dbName, - } - var schemaDir string - if schema != "" { - schemaDir, err = ioutil.TempDir("", "vt") - if err != nil { - return nil, err - } - defer os.RemoveAll(schemaDir) - ksDir := path.Join(schemaDir, dbName) - err = os.Mkdir(ksDir, os.ModeDir|0775) - if err != nil { - return nil, err - } - fileName := path.Join(ksDir, "schema.sql") - f, err := os.Create(fileName) - if err != nil { - return nil, err - } - n, err := f.WriteString(schema) - if n != len(schema) { - return nil, errors.New("short write") - } - if err != nil { - return nil, err - } - err = f.Close() - if err != nil { - return nil, err - } - } - err = hdl.run( - randomPort(), - fmt.Sprintf("%s/0:%s", dbName, dbName), - schemaDir, - true, - verbose, - nil /* initDataOptions */) +// LaunchVitess launches a vitess test cluster. +func LaunchVitess( + options ...VitessOption, +) (hdl *Handle, err error) { + hdl = &Handle{} + err = hdl.run(options...) if err != nil { return nil, err } @@ -167,36 +248,28 @@ func (hdl *Handle) MySQLConnParams() (sqldb.ConnParams, error) { } func (hdl *Handle) run( - port int, - topo string, - schemaDir string, - mysqlOnly bool, - verbose bool, - initDataOptions *InitDataOptions, + options ...VitessOption, ) error { launcher, err := launcherPath() if err != nil { return err } - log.Infof("executing: %v --port %v --topology %v", - launcher, strconv.Itoa(port), topo) + port := randomPort() hdl.cmd = exec.Command( launcher, "--port", strconv.Itoa(port), - "--topology", topo, ) - if schemaDir != "" { - hdl.cmd.Args = append(hdl.cmd.Args, "--schema_dir", schemaDir) - } - if mysqlOnly { - hdl.cmd.Args = append(hdl.cmd.Args, "--mysql_only") - } - if verbose { - hdl.cmd.Args = append(hdl.cmd.Args, "--verbose") - } - if initDataOptions != nil { - hdl.cmd.Args = initDataOptions.appendArgs(hdl.cmd.Args) + for _, option := range options { + if err := option.beforeRun(hdl); err != nil { + return err + } + if option.afterRun != nil { + defer option.afterRun() + } } + + log.Infof("executing: %v", strings.Join(hdl.cmd.Args, " ")) + hdl.cmd.Stderr = os.Stderr stdout, err := hdl.cmd.StdoutPipe() if err != nil { @@ -219,23 +292,6 @@ func (hdl *Handle) run( return err } -func (i *InitDataOptions) appendArgs(args []string) []string { - args = append(args, "--initialize_with_random_data") - if i.rngSeed != nil { - args = append(args, "--rng_seed", fmt.Sprintf("%v", *i.rngSeed)) - } - if i.minTableShardSize != nil { - args = append(args, "--min_table_shard_size", fmt.Sprintf("%v", *i.minTableShardSize)) - } - if i.maxTableShardSize != nil { - args = append(args, "--max_table_shard_size", fmt.Sprintf("%v", *i.maxTableShardSize)) - } - if i.nullProbability != nil { - args = append(args, "--null_probability", fmt.Sprintf("%v", *i.nullProbability)) - } - return args -} - // randomPort returns a random number between 10k & 30k. func randomPort() int { v := rand.Int31n(20000) diff --git a/go/vt/vttest/local_cluster_test.go b/go/vt/vttest/local_cluster_test.go index 966f27e821..591d159e2a 100644 --- a/go/vt/vttest/local_cluster_test.go +++ b/go/vt/vttest/local_cluster_test.go @@ -17,7 +17,7 @@ import ( ) func TestVitess(t *testing.T) { - hdl, err := LaunchVitess("test_keyspace/0:test_keyspace", "", false, nil /* initDataOptions */) + hdl, err := LaunchVitess(Topology("test_keyspace/0:test_keyspace")) if err != nil { t.Error(err) return @@ -57,7 +57,7 @@ func TestVitess(t *testing.T) { } func TestMySQL(t *testing.T) { - hdl, err := LaunchMySQL("vttest", "create table a(id int, name varchar(128), primary key(id))", false) + hdl, err := LaunchVitess(MySQLOnly("vttest"), Schema("create table a(id int, name varchar(128), primary key(id))")) if err != nil { t.Error(err) return From 60b8981ff9df5b607267bb2b1f7f4acb2da21473 Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Wed, 11 May 2016 23:16:18 -0700 Subject: [PATCH 18/27] Fix golint issues in cephbackupstorage. --- go/vt/mysqlctl/cephbackupstorage/ceph.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/go/vt/mysqlctl/cephbackupstorage/ceph.go b/go/vt/mysqlctl/cephbackupstorage/ceph.go index 1346f6c07f..3e4bfa8522 100644 --- a/go/vt/mysqlctl/cephbackupstorage/ceph.go +++ b/go/vt/mysqlctl/cephbackupstorage/ceph.go @@ -1,4 +1,4 @@ -// Package Cephbackupstorage implements the BackupStorage interface +// Package cephbackupstorage implements the BackupStorage interface // for Ceph Cloud Storage. package cephbackupstorage @@ -12,7 +12,7 @@ import ( "strings" "sync" - "github.com/minio/minio-go" + minio "github.com/minio/minio-go" "github.com/youtube/vitess/go/vt/concurrency" "github.com/youtube/vitess/go/vt/mysqlctl/backupstorage" ) @@ -25,7 +25,7 @@ var ( "Path to JSON config file for ceph backup storage") ) -var StorageConfig struct { +var storageConfig struct { AccessKey string `json:"accessKey"` SecretKey string `json:"secretKey"` EndPoint string `json:"endPoint"` @@ -220,20 +220,20 @@ func (bs *CephBackupStorage) client() (*minio.Client, error) { } defer configFile.Close() jsonParser := json.NewDecoder(configFile) - if err = jsonParser.Decode(&StorageConfig); err != nil { + if err = jsonParser.Decode(&storageConfig); err != nil { return nil, fmt.Errorf("Error parsing the json file : %v", err) } - bucket = StorageConfig.Bucket - accessKey := StorageConfig.AccessKey - secretKey := StorageConfig.SecretKey - url := StorageConfig.EndPoint + bucket = storageConfig.Bucket + accessKey := storageConfig.AccessKey + secretKey := storageConfig.SecretKey + url := storageConfig.EndPoint - ceph_client, err := minio.NewV2(url, accessKey, secretKey, true) + client, err := minio.NewV2(url, accessKey, secretKey, true) if err != nil { return nil, err } - bs._client = ceph_client + bs._client = client } return bs._client, nil } From 05ef93a1591a323c871654a81606e68f85cfcde4 Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Thu, 12 May 2016 17:31:12 -0700 Subject: [PATCH 19/27] mysqlctl: Make TabletDir accessible through MysqlDaemon interface. --- go/vt/mysqlctl/mysql_daemon.go | 7 +++++++ go/vt/mysqlctl/mysqld.go | 22 ++++++++++++++-------- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index 7eddc37f44..bad6f18aab 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -26,6 +26,8 @@ import ( type MysqlDaemon interface { // Cnf returns the underlying mycnf Cnf() *Mycnf + // TabletDir returns the tablet directory. + TabletDir() string // methods related to mysql running or not Start(ctx context.Context) error @@ -221,6 +223,11 @@ func (fmd *FakeMysqlDaemon) Cnf() *Mycnf { return fmd.Mycnf } +// TabletDir is part of the MysqlDaemon interface. +func (fmd *FakeMysqlDaemon) TabletDir() string { + return "" +} + // Start is part of the MysqlDaemon interface func (fmd *FakeMysqlDaemon) Start(ctx context.Context) error { if fmd.Running { diff --git a/go/vt/mysqlctl/mysqld.go b/go/vt/mysqlctl/mysqld.go index ca41a3abe5..95cdb5e343 100644 --- a/go/vt/mysqlctl/mysqld.go +++ b/go/vt/mysqlctl/mysqld.go @@ -59,7 +59,7 @@ type Mysqld struct { appPool *dbconnpool.ConnectionPool replParams *sqldb.ConnParams dbaMysqlStats *stats.Timings - TabletDir string + tabletDir string SnapshotDir string // mutex protects the fields below. @@ -107,7 +107,7 @@ func NewMysqld(dbaName, appName string, config *Mycnf, dba, app, repl *sqldb.Con appPool: appPool, replParams: repl, dbaMysqlStats: dbaMysqlStats, - TabletDir: TabletDir(config.ServerID), + tabletDir: TabletDir(config.ServerID), SnapshotDir: SnapshotDir(config.ServerID), } } @@ -117,6 +117,12 @@ func (mysqld *Mysqld) Cnf() *Mycnf { return mysqld.config } +// TabletDir returns the main tablet directory. +// It's a method so it can be accessed through the MysqlDaemon interface. +func (mysqld *Mysqld) TabletDir() string { + return mysqld.tabletDir +} + // RunMysqlUpgrade will run the mysql_upgrade program on the current install. // Will not be called when mysqld is running. func (mysqld *Mysqld) RunMysqlUpgrade() error { @@ -482,8 +488,8 @@ func (mysqld *Mysqld) initConfig(root string) error { } func (mysqld *Mysqld) createDirs() error { - log.Infof("creating directory %s", mysqld.TabletDir) - if err := os.MkdirAll(mysqld.TabletDir, os.ModePerm); err != nil { + log.Infof("creating directory %s", mysqld.tabletDir) + if err := os.MkdirAll(mysqld.tabletDir, os.ModePerm); err != nil { return err } for _, dir := range TopLevelDirs() { @@ -509,19 +515,19 @@ func (mysqld *Mysqld) createDirs() error { // /vt/data is present, it will create the following structure: // /vt/data/vt_xxxx /vt/vt_xxxx/data -> /vt/data/vt_xxxx func (mysqld *Mysqld) createTopDir(dir string) error { - vtname := path.Base(mysqld.TabletDir) + vtname := path.Base(mysqld.tabletDir) target := path.Join(vtenv.VtDataRoot(), dir) _, err := os.Lstat(target) if err != nil { if os.IsNotExist(err) { - topdir := path.Join(mysqld.TabletDir, dir) + topdir := path.Join(mysqld.tabletDir, dir) log.Infof("creating directory %s", topdir) return os.MkdirAll(topdir, os.ModePerm) } return err } linkto := path.Join(target, vtname) - source := path.Join(mysqld.TabletDir, dir) + source := path.Join(mysqld.tabletDir, dir) log.Infof("creating directory %s", linkto) err = os.MkdirAll(linkto, os.ModePerm) if err != nil { @@ -542,7 +548,7 @@ func (mysqld *Mysqld) Teardown(ctx context.Context, force bool) error { } var removalErr error for _, dir := range TopLevelDirs() { - qdir := path.Join(mysqld.TabletDir, dir) + qdir := path.Join(mysqld.tabletDir, dir) if err := deleteTopDir(qdir); err != nil { removalErr = err } From aa7f60ec6d79b3ce386e40a3f06d884bebc7c846 Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Thu, 12 May 2016 17:31:57 -0700 Subject: [PATCH 20/27] mysqlctl: Distinguish failed query from empty result in SlaveStatus(). A failed query indicates, e.g., a connection problem. An empty result for "SHOW SLAVE STATUS" means mysqld is running, but there is no slave config entered. --- go/vt/mysqlctl/mysql_flavor_mariadb.go | 5 +++++ go/vt/mysqlctl/mysql_flavor_mysql56.go | 5 +++++ go/vt/mysqlctl/query.go | 9 +++++++-- 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/go/vt/mysqlctl/mysql_flavor_mariadb.go b/go/vt/mysqlctl/mysql_flavor_mariadb.go index 14a6683a8f..1859cc4b61 100644 --- a/go/vt/mysqlctl/mysql_flavor_mariadb.go +++ b/go/vt/mysqlctl/mysql_flavor_mariadb.go @@ -42,6 +42,11 @@ func (flavor *mariaDB10) MasterPosition(mysqld *Mysqld) (rp replication.Position func (flavor *mariaDB10) SlaveStatus(mysqld *Mysqld) (replication.Status, error) { fields, err := mysqld.fetchSuperQueryMap("SHOW ALL SLAVES STATUS") if err != nil { + return replication.Status{}, err + } + if len(fields) == 0 { + // The query returned no data, meaning the server + // is not configured as a slave. return replication.Status{}, ErrNotSlave } status := parseSlaveStatus(fields) diff --git a/go/vt/mysqlctl/mysql_flavor_mysql56.go b/go/vt/mysqlctl/mysql_flavor_mysql56.go index c09bd1e172..02fb2c315a 100644 --- a/go/vt/mysqlctl/mysql_flavor_mysql56.go +++ b/go/vt/mysqlctl/mysql_flavor_mysql56.go @@ -43,6 +43,11 @@ func (flavor *mysql56) MasterPosition(mysqld *Mysqld) (rp replication.Position, func (flavor *mysql56) SlaveStatus(mysqld *Mysqld) (replication.Status, error) { fields, err := mysqld.fetchSuperQueryMap("SHOW SLAVE STATUS") if err != nil { + return replication.Status{}, err + } + if len(fields) == 0 { + // The query returned no data, meaning the server + // is not configured as a slave. return replication.Status{}, ErrNotSlave } status := parseSlaveStatus(fields) diff --git a/go/vt/mysqlctl/query.go b/go/vt/mysqlctl/query.go index 93f1e924ff..d46a5595a0 100644 --- a/go/vt/mysqlctl/query.go +++ b/go/vt/mysqlctl/query.go @@ -72,13 +72,18 @@ func (mysqld *Mysqld) FetchSuperQuery(query string) (*sqltypes.Result, error) { } // fetchSuperQueryMap returns a map from column names to cell data for a query -// that should return exactly 1 row. +// that should return either 0 or 1 row. If the query returns zero rows, this +// will return a nil map and nil error. func (mysqld *Mysqld) fetchSuperQueryMap(query string) (map[string]string, error) { qr, err := mysqld.FetchSuperQuery(query) if err != nil { return nil, err } - if len(qr.Rows) != 1 { + if len(qr.Rows) == 0 { + // The query succeeded, but there is no data. + return nil, nil + } + if len(qr.Rows) > 1 { return nil, fmt.Errorf("query %#v returned %d rows, expected 1", query, len(qr.Rows)) } if len(qr.Fields) != len(qr.Rows[0]) { From 7109bc13396a4e3449575c43c1bc14a79e1e5261 Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Thu, 12 May 2016 17:39:02 -0700 Subject: [PATCH 21/27] tabletmanager: Remember if we've been told to stop replicating. We remember the setting on the file system, because we want it to work regardless of whether mysqld is running, and because if that tablet dir is lost, the particular position at which we're stopped is also lost. Therefore, the setting to stay stopped should not persist across loss of the tablet dir (e.g. it should not be stored in topology). --- go/vt/tabletmanager/action_agent.go | 59 +++++++++++++++++++++++++- go/vt/tabletmanager/rpc_replication.go | 14 ++++-- 2 files changed, 68 insertions(+), 5 deletions(-) diff --git a/go/vt/tabletmanager/action_agent.go b/go/vt/tabletmanager/action_agent.go index 0e920f87f7..3b779da082 100644 --- a/go/vt/tabletmanager/action_agent.go +++ b/go/vt/tabletmanager/action_agent.go @@ -28,6 +28,8 @@ import ( "fmt" "io/ioutil" "net" + "os" + "path" "regexp" "sync" "time" @@ -56,8 +58,15 @@ import ( topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) -// Query rules from keyrange -const keyrangeQueryRules string = "KeyrangeQueryRules" +const ( + // keyrangeQueryRules is the QueryRuleSource name for rules that are based + // on the key range. + keyrangeQueryRules = "KeyrangeQueryRules" + + // slaveStoppedFile is the file name for the file whose existence informs + // vttablet to NOT try to repair replication. + slaveStoppedFile = "do_not_replicate" +) var ( tabletHostname = flag.String("tablet_hostname", "", "if not empty, this hostname will be assumed instead of trying to resolve it") @@ -159,6 +168,10 @@ type ActionAgent struct { // _ignoreHealthErrorExpr can be set by RPC to selectively disable certain // healthcheck errors. It should only be accessed while holding actionMutex. _ignoreHealthErrorExpr *regexp.Regexp + + // _slaveStopped remembers if we've been told to stop replicating. + // If it's nil, we'll try to check for the slaveStoppedFile. + _slaveStopped *bool } func loadSchemaOverrides(overridesFile string) []tabletserver.SchemaOverride { @@ -408,6 +421,48 @@ func (agent *ActionAgent) EnableUpdateStream() bool { return agent._enableUpdateStream } +func (agent *ActionAgent) slaveStopped() bool { + agent.mutex.Lock() + defer agent.mutex.Unlock() + + // If we already know the value, don't bother checking the file. + if agent._slaveStopped != nil { + return *agent._slaveStopped + } + + // If the marker file exists, we're stopped. + // Treat any read error as if the file doesn't exist. + _, err := os.Stat(path.Join(agent.MysqlDaemon.TabletDir(), slaveStoppedFile)) + slaveStopped := err == nil + agent._slaveStopped = &slaveStopped + return slaveStopped +} + +func (agent *ActionAgent) setSlaveStopped(slaveStopped bool) { + agent.mutex.Lock() + defer agent.mutex.Unlock() + + agent._slaveStopped = &slaveStopped + + // Make a best-effort attempt to persist the value across tablet restarts. + // We store a marker in the filesystem so it works regardless of whether + // mysqld is running, and so it's tied to this particular instance of the + // tablet data dir (the one that's paused at a known replication position). + tabletDir := agent.MysqlDaemon.TabletDir() + if tabletDir == "" { + return + } + markerFile := path.Join(tabletDir, slaveStoppedFile) + if slaveStopped { + file, err := os.Create(markerFile) + if err == nil { + file.Close() + } + } else { + os.Remove(markerFile) + } +} + func (agent *ActionAgent) setServicesDesiredState(disallowQueryService string, enableUpdateStream bool) { agent.mutex.Lock() agent._disallowQueryService = disallowQueryService diff --git a/go/vt/tabletmanager/rpc_replication.go b/go/vt/tabletmanager/rpc_replication.go index 141ef54a53..0d15bb29cd 100644 --- a/go/vt/tabletmanager/rpc_replication.go +++ b/go/vt/tabletmanager/rpc_replication.go @@ -51,6 +51,10 @@ func (agent *ActionAgent) MasterPosition(ctx context.Context) (string, error) { // replication or not (using hook if not). // Should be called under RPCWrapLock. func (agent *ActionAgent) StopSlave(ctx context.Context) error { + // Remember that we were told to stop, so we don't try to + // restart ourselves (in replication_reporter). + agent.setSlaveStopped(true) + return mysqlctl.StopSlave(agent.MysqlDaemon, agent.hookExtraEnv()) } @@ -65,7 +69,7 @@ func (agent *ActionAgent) StopSlaveMinimum(ctx context.Context, position string, if err := agent.MysqlDaemon.WaitMasterPos(pos, waitTime); err != nil { return "", err } - if err := mysqlctl.StopSlave(agent.MysqlDaemon, agent.hookExtraEnv()); err != nil { + if err := agent.StopSlave(ctx); err != nil { return "", err } pos, err = agent.MysqlDaemon.MasterPosition() @@ -79,6 +83,8 @@ func (agent *ActionAgent) StopSlaveMinimum(ctx context.Context, position string, // replication or not (using hook if not). // Should be called under RPCWrapLock. func (agent *ActionAgent) StartSlave(ctx context.Context) error { + agent.setSlaveStopped(false) + if *enableSemiSync { if err := agent.enableSemiSync(false); err != nil { return err @@ -100,7 +106,7 @@ func (agent *ActionAgent) ResetReplication(ctx context.Context) error { if err != nil { return err } - + agent.setSlaveStopped(true) return agent.MysqlDaemon.ExecuteSuperQueryList(cmds) } @@ -169,6 +175,8 @@ func (agent *ActionAgent) InitSlave(ctx context.Context, parent *topodatapb.Tabl return err } + agent.setSlaveStopped(false) + // If using semi-sync, we need to enable it before connecting to master. if *enableSemiSync { if err := agent.enableSemiSync(false); err != nil { @@ -394,7 +402,7 @@ func (agent *ActionAgent) StopReplicationAndGetStatus(ctx context.Context) (*rep // no replication is running, just return what we got return replication.StatusToProto(rs), nil } - if err := mysqlctl.StopSlave(agent.MysqlDaemon, agent.hookExtraEnv()); err != nil { + if err := agent.StopSlave(ctx); err != nil { return nil, fmt.Errorf("stop slave failed: %v", err) } // now patch in the current position From ebcb5b386eb947dfc8608baf5757ea73fddaf8ae Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Thu, 12 May 2016 17:42:16 -0700 Subject: [PATCH 22/27] healthcheck: Try to heal replication automatically. This allows tablets (consisting of vttablet+mysqld) to be restarted without having to manually re-point slaves at the master and tell them to start replicating again. This is important for rollout flows, where each slave is restarted in turn. We can't just check at vttablet startup, because then we'd miss the case when only mysqld is restarted. Instead, we check at each healthcheck interval so replication becomes self-healing. The replication lag check had to be moved from mysqlctl to tabletmanager and integrated with this healing, to make sure the healing attempt is synchronized with the slave status check. This is also needed for Orchestrator integration, because we are responsible for giving the slave a master in the first place so Orchestrator knows which replication tree the slave belongs to. --- config/mycnf/default.cnf | 6 +- go/cmd/vttablet/health.go | 18 --- go/cmd/vttablet/vttablet.go | 1 - go/vt/mysqlctl/health.go | 66 ----------- go/vt/tabletmanager/healthcheck.go | 2 + go/vt/tabletmanager/replication_reporter.go | 109 ++++++++++++++++++ .../replication_reporter_test.go} | 51 ++++---- test/tablet.py | 4 +- test/tabletmanager.py | 30 ++++- 9 files changed, 172 insertions(+), 115 deletions(-) delete mode 100644 go/cmd/vttablet/health.go delete mode 100644 go/vt/mysqlctl/health.go create mode 100644 go/vt/tabletmanager/replication_reporter.go rename go/vt/{mysqlctl/health_test.go => tabletmanager/replication_reporter_test.go} (58%) diff --git a/config/mycnf/default.cnf b/config/mycnf/default.cnf index 8697e0a2d2..8eecb584ac 100644 --- a/config/mycnf/default.cnf +++ b/config/mycnf/default.cnf @@ -42,11 +42,9 @@ read_buffer_size = 1M read_rnd_buffer_size = 1M server-id = {{.ServerID}} skip-name-resolve -# we now need networking for replication. this is a tombstone to simpler times. -#skip_networking # all db instances should skip the slave startup - that way we can do any -# out-of-bounds checking before we restart everything - in case we need to do -# some extra work to skip mangled transactions or fudge the slave start +# additional configuration (like enabling semi-sync) before we connect to +# the master. skip_slave_start slave_net_timeout = 60 slave_load_tmpdir = {{.SlaveLoadTmpDir}} diff --git a/go/cmd/vttablet/health.go b/go/cmd/vttablet/health.go deleted file mode 100644 index b7772f869f..0000000000 --- a/go/cmd/vttablet/health.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -import ( - "flag" - - "github.com/youtube/vitess/go/vt/health" - "github.com/youtube/vitess/go/vt/mysqlctl" -) - -var ( - enableReplicationLagCheck = flag.Bool("enable_replication_lag_check", false, "will register the mysql health check module that directly calls mysql") -) - -func registerHealthReporter(mysqld *mysqlctl.Mysqld) { - if *enableReplicationLagCheck { - health.DefaultAggregator.Register("replication_reporter", mysqlctl.MySQLReplicationLag(mysqld)) - } -} diff --git a/go/cmd/vttablet/vttablet.go b/go/cmd/vttablet/vttablet.go index 1347c1c116..b577188e19 100644 --- a/go/cmd/vttablet/vttablet.go +++ b/go/cmd/vttablet/vttablet.go @@ -117,7 +117,6 @@ func main() { // done by the agent has the right reporter) mysqld := mysqlctl.NewMysqld("Dba", "App", mycnf, &dbcfgs.Dba, &dbcfgs.App.ConnParams, &dbcfgs.Repl) servenv.OnClose(mysqld.Close) - registerHealthReporter(mysqld) // Depends on both query and updateStream. gRPCPort := int32(0) diff --git a/go/vt/mysqlctl/health.go b/go/vt/mysqlctl/health.go deleted file mode 100644 index 9145ad0b65..0000000000 --- a/go/vt/mysqlctl/health.go +++ /dev/null @@ -1,66 +0,0 @@ -package mysqlctl - -import ( - "html/template" - "time" - - "github.com/youtube/vitess/go/vt/health" -) - -// mysqlReplicationLag implements health.Reporter -type mysqlReplicationLag struct { - // set at construction time - mysqld MysqlDaemon - now func() time.Time - - // store the last time we successfully got the lag, so if we - // can't get the lag any more, we can extrapolate. - lastKnownValue time.Duration - lastKnownTime time.Time -} - -// Report is part of the health.Reporter interface -func (mrl *mysqlReplicationLag) Report(isSlaveType, shouldQueryServiceBeRunning bool) (time.Duration, error) { - if !isSlaveType { - return 0, nil - } - - slaveStatus, err := mrl.mysqld.SlaveStatus() - if err != nil { - // mysqld is not running. We can't report healthy. - return 0, err - } - if !slaveStatus.SlaveRunning() { - // mysqld is running, but slave is not replicating (most likely, - // replication has been stopped). See if we can extrapolate. - if mrl.lastKnownTime.IsZero() { - // we can't. - return 0, health.ErrSlaveNotRunning - } - - // we can extrapolate with the worst possible - // value (that is we made no replication - // progress since last time, and just fell more behind). - elapsed := mrl.now().Sub(mrl.lastKnownTime) - return elapsed + mrl.lastKnownValue, nil - } - - // we got a real value, save it. - mrl.lastKnownValue = time.Duration(slaveStatus.SecondsBehindMaster) * time.Second - mrl.lastKnownTime = mrl.now() - return mrl.lastKnownValue, nil -} - -// HTMLName is part of the health.Reporter interface -func (mrl *mysqlReplicationLag) HTMLName() template.HTML { - return template.HTML("MySQLReplicationLag") -} - -// MySQLReplicationLag lag returns a reporter that reports the MySQL -// replication lag. -func MySQLReplicationLag(mysqld MysqlDaemon) health.Reporter { - return &mysqlReplicationLag{ - mysqld: mysqld, - now: time.Now, - } -} diff --git a/go/vt/tabletmanager/healthcheck.go b/go/vt/tabletmanager/healthcheck.go index 5f0d9c636a..6b825b43f6 100644 --- a/go/vt/tabletmanager/healthcheck.go +++ b/go/vt/tabletmanager/healthcheck.go @@ -128,6 +128,8 @@ func ConfigHTML() template.HTML { // and configure the healthcheck shutdown. It is only run by NewActionAgent // for real vttablet agents (not by tests, nor vtcombo). func (agent *ActionAgent) initHealthCheck() { + registerReplicationReporter(agent) + log.Infof("Starting periodic health check every %v", *healthCheckInterval) t := timer.NewTimer(*healthCheckInterval) servenv.OnTermSync(func() { diff --git a/go/vt/tabletmanager/replication_reporter.go b/go/vt/tabletmanager/replication_reporter.go new file mode 100644 index 0000000000..4a3f8cbf6a --- /dev/null +++ b/go/vt/tabletmanager/replication_reporter.go @@ -0,0 +1,109 @@ +package tabletmanager + +import ( + "flag" + "fmt" + "html/template" + "time" + + log "github.com/golang/glog" + "golang.org/x/net/context" + + "github.com/youtube/vitess/go/vt/health" +) + +var ( + enableReplicationReporter = flag.Bool("enable_replication_reporter", false, "Register the health check module that monitors MySQL replication") +) + +// replicationReporter implements health.Reporter +type replicationReporter struct { + // set at construction time + agent *ActionAgent + now func() time.Time + + // store the last time we successfully got the lag, so if we + // can't get the lag any more, we can extrapolate. + lastKnownValue time.Duration + lastKnownTime time.Time +} + +// Report is part of the health.Reporter interface +func (r *replicationReporter) Report(isSlaveType, shouldQueryServiceBeRunning bool) (time.Duration, error) { + if !isSlaveType { + return 0, nil + } + + status, statusErr := r.agent.MysqlDaemon.SlaveStatus() + if statusErr == nil && !status.SlaveSQLRunning && !status.SlaveIORunning { + // Slave is configured, but not running. + // Both SQL and IO threads are stopped, so it's probably either + // stopped on purpose, or stopped because of a mysqld restart. + if !r.agent.slaveStopped() { + // As far as we've been told, it isn't stopped on purpose, + // so let's try to start it. + log.Infof("Slave is stopped. Trying to reconnect to master...") + ctx, cancel := context.WithTimeout(r.agent.batchCtx, 5*time.Second) + if err := repairReplication(ctx, r.agent); err != nil { + log.Infof("Failed to reconnect to master: %v", err) + } + cancel() + // Check status again. + status, statusErr = r.agent.MysqlDaemon.SlaveStatus() + } + } + if statusErr != nil { + // mysqld is not running or slave is not configured. + // We can't report healthy. + return 0, statusErr + } + if !status.SlaveRunning() { + // mysqld is running, but slave is not replicating (most likely, + // replication has been stopped). See if we can extrapolate. + if r.lastKnownTime.IsZero() { + // we can't. + return 0, health.ErrSlaveNotRunning + } + + // we can extrapolate with the worst possible + // value (that is we made no replication + // progress since last time, and just fell more behind). + elapsed := r.now().Sub(r.lastKnownTime) + return elapsed + r.lastKnownValue, nil + } + + // we got a real value, save it. + r.lastKnownValue = time.Duration(status.SecondsBehindMaster) * time.Second + r.lastKnownTime = r.now() + return r.lastKnownValue, nil +} + +// HTMLName is part of the health.Reporter interface +func (r *replicationReporter) HTMLName() template.HTML { + return template.HTML("MySQLReplicationLag") +} + +// repairReplication tries to connect this slave to whoever is +// the current master of the shard, and start replicating. +func repairReplication(ctx context.Context, agent *ActionAgent) error { + ts := agent.TopoServer + tablet := agent.Tablet() + si, err := ts.GetShard(ctx, tablet.Keyspace, tablet.Shard) + if err != nil { + return err + } + if !si.HasMaster() { + return fmt.Errorf("no master tablet for shard %v/%v", tablet.Keyspace, tablet.Shard) + } + return agent.SetMaster(ctx, si.MasterAlias, 0, true) +} + +func registerReplicationReporter(agent *ActionAgent) { + if *enableReplicationReporter { + health.DefaultAggregator.Register("replication_reporter", + &replicationReporter{ + agent: agent, + now: time.Now, + }) + } +} diff --git a/go/vt/mysqlctl/health_test.go b/go/vt/tabletmanager/replication_reporter_test.go similarity index 58% rename from go/vt/mysqlctl/health_test.go rename to go/vt/tabletmanager/replication_reporter_test.go index f400fb24a5..894386b336 100644 --- a/go/vt/mysqlctl/health_test.go +++ b/go/vt/tabletmanager/replication_reporter_test.go @@ -1,4 +1,4 @@ -package mysqlctl +package tabletmanager import ( "errors" @@ -6,50 +6,54 @@ import ( "time" "github.com/youtube/vitess/go/vt/health" + "github.com/youtube/vitess/go/vt/mysqlctl" ) func TestBasicMySQLReplicationLag(t *testing.T) { - mysqld := NewFakeMysqlDaemon(nil) + mysqld := mysqlctl.NewFakeMysqlDaemon(nil) mysqld.Replicating = true mysqld.SecondsBehindMaster = 10 + slaveStopped := true - lag := &mysqlReplicationLag{ - mysqld: mysqld, - now: time.Now, + rep := &replicationReporter{ + agent: &ActionAgent{MysqlDaemon: mysqld, _slaveStopped: &slaveStopped}, + now: time.Now, } - dur, err := lag.Report(true, true) + dur, err := rep.Report(true, true) if err != nil || dur != 10*time.Second { t.Fatalf("wrong Report result: %v %v", dur, err) } } func TestNoKnownMySQLReplicationLag(t *testing.T) { - mysqld := NewFakeMysqlDaemon(nil) + mysqld := mysqlctl.NewFakeMysqlDaemon(nil) mysqld.Replicating = false + slaveStopped := true - lag := &mysqlReplicationLag{ - mysqld: mysqld, - now: time.Now, + rep := &replicationReporter{ + agent: &ActionAgent{MysqlDaemon: mysqld, _slaveStopped: &slaveStopped}, + now: time.Now, } - dur, err := lag.Report(true, true) + dur, err := rep.Report(true, true) if err != health.ErrSlaveNotRunning { t.Fatalf("wrong Report result: %v %v", dur, err) } } func TestExtrapolatedMySQLReplicationLag(t *testing.T) { - mysqld := NewFakeMysqlDaemon(nil) + mysqld := mysqlctl.NewFakeMysqlDaemon(nil) mysqld.Replicating = true mysqld.SecondsBehindMaster = 10 + slaveStopped := true now := time.Now() - lag := &mysqlReplicationLag{ - mysqld: mysqld, - now: func() time.Time { return now }, + rep := &replicationReporter{ + agent: &ActionAgent{MysqlDaemon: mysqld, _slaveStopped: &slaveStopped}, + now: func() time.Time { return now }, } // seed the last known value with a good value - dur, err := lag.Report(true, true) + dur, err := rep.Report(true, true) if err != nil || dur != 10*time.Second { t.Fatalf("wrong Report result: %v %v", dur, err) } @@ -58,25 +62,26 @@ func TestExtrapolatedMySQLReplicationLag(t *testing.T) { // we should get 20 more seconds in lag now = now.Add(20 * time.Second) mysqld.Replicating = false - dur, err = lag.Report(true, true) + dur, err = rep.Report(true, true) if err != nil || dur != 30*time.Second { t.Fatalf("wrong Report result: %v %v", dur, err) } } func TestNoExtrapolatedMySQLReplicationLag(t *testing.T) { - mysqld := NewFakeMysqlDaemon(nil) + mysqld := mysqlctl.NewFakeMysqlDaemon(nil) mysqld.Replicating = true mysqld.SecondsBehindMaster = 10 + slaveStopped := true now := time.Now() - lag := &mysqlReplicationLag{ - mysqld: mysqld, - now: func() time.Time { return now }, + rep := &replicationReporter{ + agent: &ActionAgent{MysqlDaemon: mysqld, _slaveStopped: &slaveStopped}, + now: func() time.Time { return now }, } // seed the last known value with a good value - dur, err := lag.Report(true, true) + dur, err := rep.Report(true, true) if err != nil || dur != 10*time.Second { t.Fatalf("wrong Report result: %v %v", dur, err) } @@ -84,7 +89,7 @@ func TestNoExtrapolatedMySQLReplicationLag(t *testing.T) { // now 20 seconds later, mysqld is down now = now.Add(20 * time.Second) mysqld.SlaveStatusError = errors.New("mysql is down") - dur, err = lag.Report(true, true) + dur, err = rep.Report(true, true) if err != mysqld.SlaveStatusError { t.Fatalf("wrong Report error: %v", err) } diff --git a/test/tablet.py b/test/tablet.py index 9500fb233d..f5e51a7598 100644 --- a/test/tablet.py +++ b/test/tablet.py @@ -429,9 +429,9 @@ class Tablet(object): args.extend(['-binlog_player_healthcheck_retry_delay', '1s']) args.extend(['-binlog_player_retry_delay', '1s']) args.extend(['-pid_file', os.path.join(self.tablet_dir, 'vttablet.pid')]) - # always enable_replication_lag_check with somewhat short values for tests + # always enable_replication_reporter with somewhat short values for tests args.extend(['-health_check_interval', '2s']) - args.extend(['-enable_replication_lag_check']) + args.extend(['-enable_replication_reporter']) args.extend(['-degraded_threshold', '5s']) if enable_semi_sync: args.append('-enable_semi_sync') diff --git a/test/tabletmanager.py b/test/tabletmanager.py index 3a5693e8cc..bbcea7927d 100755 --- a/test/tabletmanager.py +++ b/test/tabletmanager.py @@ -374,6 +374,24 @@ class TestTabletManager(unittest.TestCase): self.assertEqual(ti['type'], topodata_pb2.MASTER, 'unexpected master type: %s' % ti['type']) + # stop replication at the mysql level. + tablet_62044.mquery('', 'stop slave') + # vttablet replication_reporter should restart it. + utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias]) + # insert something on the master and wait for it on the slave. + tablet_62344.mquery('vt_test_keyspace', [ + 'create table repl_test_table (id int)', + 'insert into repl_test_table values (123)'], write=True) + timeout = 10.0 + while True: + result = tablet_62044.mquery('vt_test_keyspace', + 'select * from repl_test_table') + if result: + self.assertEqual(result[0][0], 123L) + break + timeout = utils.wait_step( + 'slave replication repaired by replication_reporter', timeout) + # stop replication, make sure we don't go unhealthy. # (we have a baseline as well, so the time should be good). utils.run_vtctl(['StopSlave', tablet_62044.tablet_alias]) @@ -530,6 +548,16 @@ class TestTabletManager(unittest.TestCase): t.wait_for_vttablet_state('NOT_SERVING') self.check_healthz(t, False) + # Tell slave to not try to repair replication in healthcheck. + # The StopSlave will ultimately fail because mysqld is not running, + # But vttablet should remember that it's not supposed to fix replication. + utils.run_vtctl(['StopSlave', tablet_62044.tablet_alias], expect_fail=True) + + # The above notice to not fix replication should survive tablet restart. + tablet_62044.kill_vttablet() + tablet_62044.start_vttablet(wait_for_state='NOT_SERVING', + full_mycnf_args=True, include_mysql_port=False) + # restart mysqld start_procs = [ tablet_62344.start_mysql(), @@ -558,7 +586,7 @@ class TestTabletManager(unittest.TestCase): # restart replication, wait until health check goes small # (a value of zero is default and won't be in structure) - tablet_62044.mquery('', ['START SLAVE']) + utils.run_vtctl(['StartSlave', tablet_62044.tablet_alias]) timeout = 10 while True: utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias], From 4cbb02c96089d7ffa027d48f0e877df7863416f5 Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Thu, 12 May 2016 17:57:21 -0700 Subject: [PATCH 23/27] Enable replication reporter in examples. --- examples/kubernetes/vttablet-pod-benchmarking-template.yaml | 1 + examples/kubernetes/vttablet-pod-template.yaml | 1 + examples/local/vttablet-up.sh | 1 + 3 files changed, 3 insertions(+) diff --git a/examples/kubernetes/vttablet-pod-benchmarking-template.yaml b/examples/kubernetes/vttablet-pod-benchmarking-template.yaml index af1a0feafe..58008a8245 100644 --- a/examples/kubernetes/vttablet-pod-benchmarking-template.yaml +++ b/examples/kubernetes/vttablet-pod-benchmarking-template.yaml @@ -72,6 +72,7 @@ spec: -queryserver-config-schema-reload-time 1 -queryserver-config-pool-size 100 -enable-rowcache + -enable_replication_reporter -rowcache-bin /usr/bin/memcached -rowcache-socket $VTDATAROOT/{{tablet_subdir}}/memcache.sock" vitess env: diff --git a/examples/kubernetes/vttablet-pod-template.yaml b/examples/kubernetes/vttablet-pod-template.yaml index 535c4d1acb..aba3296f9c 100644 --- a/examples/kubernetes/vttablet-pod-template.yaml +++ b/examples/kubernetes/vttablet-pod-template.yaml @@ -77,6 +77,7 @@ spec: -db-config-filtered-charset utf8 -enable-rowcache -enable_semi_sync + -enable_replication_reporter -rowcache-bin /usr/bin/memcached -rowcache-socket $VTDATAROOT/{{tablet_subdir}}/memcache.sock -restore_from_backup {{backup_flags}}" vitess diff --git a/examples/local/vttablet-up.sh b/examples/local/vttablet-up.sh index 627a7885b1..cffa6716d1 100755 --- a/examples/local/vttablet-up.sh +++ b/examples/local/vttablet-up.sh @@ -109,6 +109,7 @@ for uid_index in $uids; do -health_check_interval 5s \ -enable-rowcache \ -enable_semi_sync \ + -enable_replication_reporter \ -rowcache-bin $memcached_path \ -rowcache-socket $VTDATAROOT/$tablet_dir/memcache.sock \ -backup_storage_implementation file \ From 5edd9f9f9d10077d31e491fa9453eba0d3e65ec9 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Tue, 17 May 2016 14:16:19 -0700 Subject: [PATCH 24/27] Fixing comments. --- go/vt/vttest/local_cluster.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/go/vt/vttest/local_cluster.go b/go/vt/vttest/local_cluster.go index 8629fc3121..ab597dbfb8 100644 --- a/go/vt/vttest/local_cluster.go +++ b/go/vt/vttest/local_cluster.go @@ -37,8 +37,13 @@ type Handle struct { // VitessOption is the type for generic options to be passed in to LaunchVitess. type VitessOption struct { + // beforeRun is executed before we start run_local_database.py. beforeRun func(*Handle) error - afterRun func() + + // afterRun is executed after run_local_database.py has been + // started and is running (and is done reading and applying + // the schema). + afterRun func() } // Verbose makes the underlying local_cluster verbose. @@ -54,7 +59,7 @@ func Verbose(verbose bool) VitessOption { } // SchemaDirectory is used to specify a directory to read schema from. -// It conflicts with Schema / MySQLOnly. +// It cannot be used at the same time as Schema. func SchemaDirectory(dir string) VitessOption { return VitessOption{ beforeRun: func(hdl *Handle) error { @@ -67,7 +72,7 @@ func SchemaDirectory(dir string) VitessOption { } // Topology is used to pass in the topology string. -// It conflicts with MySQLOnly. +// It cannot be used at the same time as MySQLOnly. func Topology(topo string) VitessOption { return VitessOption{ beforeRun: func(hdl *Handle) error { @@ -79,7 +84,7 @@ func Topology(topo string) VitessOption { // MySQLOnly is used to launch only a mysqld instance, with the specified db name. // Use it before Schema option. -// It is incompativle with the Topology option. +// It cannot be used at the same as Topology. func MySQLOnly(dbName string) VitessOption { return VitessOption{ beforeRun: func(hdl *Handle) error { @@ -198,7 +203,7 @@ func (hdl *Handle) TearDown() error { } // MySQLConnParams builds the MySQL connection params. -// It's valid only if you used LaunchMySQL. +// It's valid only if you used MySQLOnly option. func (hdl *Handle) MySQLConnParams() (sqldb.ConnParams, error) { params := sqldb.ConnParams{ Charset: "utf8", From 6df4754ae206a1954fe757d3bdc420f35d3ee388 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Tue, 17 May 2016 13:31:32 -0700 Subject: [PATCH 25/27] Reworking cleaner code. To be easier to use, and simpler. Adding a check for the ChangeSlaveType to only do it if the original type is what we expect. For the worker tests to pass, I then need a better TabletManagerClient that actually does something with 'ChangeType', so adding that too. --- go/vt/worker/split_diff_test.go | 3 +- go/vt/worker/topo_utils.go | 2 +- go/vt/worker/utils_test.go | 28 +++++ go/vt/worker/vertical_split_diff_test.go | 3 +- go/vt/wrangler/cleaner.go | 153 +++++++---------------- 5 files changed, 77 insertions(+), 112 deletions(-) diff --git a/go/vt/worker/split_diff_test.go b/go/vt/worker/split_diff_test.go index 3e4d12b845..59914c2827 100644 --- a/go/vt/worker/split_diff_test.go +++ b/go/vt/worker/split_diff_test.go @@ -13,7 +13,6 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl/tmutils" - "github.com/youtube/vitess/go/vt/tabletmanager/faketmclient" "github.com/youtube/vitess/go/vt/tabletserver/grpcqueryservice" "github.com/youtube/vitess/go/vt/tabletserver/queryservice/fakes" "github.com/youtube/vitess/go/vt/vttest/fakesqldb" @@ -284,7 +283,7 @@ func testSplitDiff(t *testing.T, v3 bool) { // We need to use FakeTabletManagerClient because we don't // have a good way to fake the binlog player yet, which is // necessary for synchronizing replication. - wr := wrangler.New(logutil.NewConsoleLogger(), ts, faketmclient.NewFakeTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, newFakeTMCTopo(ts)) if err := runCommand(t, wi, wr, args); err != nil { t.Fatal(err) } diff --git a/go/vt/worker/topo_utils.go b/go/vt/worker/topo_utils.go index 59ad55c6c4..d44424e5d4 100644 --- a/go/vt/worker/topo_utils.go +++ b/go/vt/worker/topo_utils.go @@ -124,7 +124,7 @@ func FindWorkerTablet(ctx context.Context, wr *wrangler.Wrangler, cleaner *wrang // Record a clean-up action to take the tablet back to rdonly. // We will alter this one later on and let the tablet go back to // 'spare' if we have stopped replication for too long on it. - wrangler.RecordChangeSlaveTypeAction(cleaner, tabletAlias, topodatapb.TabletType_RDONLY) + wrangler.RecordChangeSlaveTypeAction(cleaner, tabletAlias, topodatapb.TabletType_WORKER, topodatapb.TabletType_RDONLY) return tabletAlias, nil } diff --git a/go/vt/worker/utils_test.go b/go/vt/worker/utils_test.go index 5a1a84100e..e2f908a727 100644 --- a/go/vt/worker/utils_test.go +++ b/go/vt/worker/utils_test.go @@ -5,11 +5,17 @@ import ( "strconv" "testing" + "golang.org/x/net/context" + "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/dbconnpool" + "github.com/youtube/vitess/go/vt/tabletmanager/faketmclient" + "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" + "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/wrangler" querypb "github.com/youtube/vitess/go/vt/proto/query" + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) // This file contains common test helper. @@ -72,3 +78,25 @@ func sourceRdonlyFactory(t *testing.T, dbAndTableName string, min, max int) func }) return f.getFactory() } + +// This FakeTabletManagerClient extension will implement ChangeType using the topo server +type FakeTMCTopo struct { + tmclient.TabletManagerClient + server topo.Server +} + +func newFakeTMCTopo(ts topo.Server) tmclient.TabletManagerClient { + return &FakeTMCTopo{ + TabletManagerClient: faketmclient.NewFakeTabletManagerClient(), + server: ts, + } +} + +// ChangeType is part of the tmclient.TabletManagerClient interface. +func (client *FakeTMCTopo) ChangeType(ctx context.Context, tablet *topodatapb.Tablet, dbType topodatapb.TabletType) error { + _, err := client.server.UpdateTabletFields(ctx, tablet.Alias, func(t *topodatapb.Tablet) error { + t.Type = dbType + return nil + }) + return err +} diff --git a/go/vt/worker/vertical_split_diff_test.go b/go/vt/worker/vertical_split_diff_test.go index d1aa8c427a..f679d76e4e 100644 --- a/go/vt/worker/vertical_split_diff_test.go +++ b/go/vt/worker/vertical_split_diff_test.go @@ -13,7 +13,6 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl/tmutils" - "github.com/youtube/vitess/go/vt/tabletmanager/faketmclient" "github.com/youtube/vitess/go/vt/tabletserver/grpcqueryservice" "github.com/youtube/vitess/go/vt/tabletserver/queryservice/fakes" "github.com/youtube/vitess/go/vt/vttest/fakesqldb" @@ -176,7 +175,7 @@ func TestVerticalSplitDiff(t *testing.T) { // We need to use FakeTabletManagerClient because we don't // have a good way to fake the binlog player yet, which is // necessary for synchronizing replication. - wr := wrangler.New(logutil.NewConsoleLogger(), ts, faketmclient.NewFakeTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, newFakeTMCTopo(ts)) if err := runCommand(t, wi, wr, args); err != nil { t.Fatal(err) } diff --git a/go/vt/wrangler/cleaner.go b/go/vt/wrangler/cleaner.go index 19ac405238..446a9cac2f 100644 --- a/go/vt/wrangler/cleaner.go +++ b/go/vt/wrangler/cleaner.go @@ -21,7 +21,7 @@ import ( // action cleanup steps, and execute them at the end in reverse // order, with various guarantees. type Cleaner struct { - // following members protected by lock + // mu protects the following members mu sync.Mutex actions []cleanerActionReference } @@ -30,16 +30,14 @@ type Cleaner struct { type cleanerActionReference struct { name string target string - action CleanerAction + action CleanerFunction } -// CleanerAction is the interface that clean-up actions need to implement -type CleanerAction interface { - CleanUp(context.Context, *Wrangler) error -} +// CleanerFunction is the interface that clean-up actions need to implement +type CleanerFunction func(context.Context, *Wrangler) error // Record will add a cleaning action to the list -func (cleaner *Cleaner) Record(name, target string, action CleanerAction) { +func (cleaner *Cleaner) Record(name, target string, action CleanerFunction) { cleaner.mu.Lock() cleaner.actions = append(cleaner.actions, cleanerActionReference{ name: name, @@ -76,10 +74,10 @@ func (cleaner *Cleaner) CleanUp(wr *Wrangler) error { actionMap[actionReference.target] = helper } if helper.err != nil { - wr.Logger().Warningf("previous action failed on target %v, no running %v", actionReference.target, actionReference.name) + wr.Logger().Warningf("previous action failed on target %v, not running %v", actionReference.target, actionReference.name) continue } - err := actionReference.action.CleanUp(ctx, wr) + err := actionReference.action(ctx, wr) if err != nil { helper.err = err rec.RecordError(err) @@ -93,19 +91,6 @@ func (cleaner *Cleaner) CleanUp(wr *Wrangler) error { return rec.Error() } -// GetActionByName returns the first action in the list with the given -// name and target -func (cleaner *Cleaner) GetActionByName(name, target string) (CleanerAction, error) { - cleaner.mu.Lock() - defer cleaner.mu.Unlock() - for _, action := range cleaner.actions { - if action.name == name && action.target == target { - return action.action, nil - } - } - return nil, topo.ErrNoNode -} - // RemoveActionByName removes an action from the cleaner list func (cleaner *Cleaner) RemoveActionByName(name, target string) error { cleaner.mu.Lock() @@ -127,133 +112,87 @@ func (cleaner *Cleaner) RemoveActionByName(name, target string) error { } // -// ChangeSlaveTypeAction CleanerAction +// ChangeSlaveTypeAction CleanerFunction // -// ChangeSlaveTypeAction will change a server type to another type -type ChangeSlaveTypeAction struct { - TabletAlias *topodatapb.TabletAlias - TabletType topodatapb.TabletType -} - // ChangeSlaveTypeActionName is the name of the action to change a slave type // (can be used to find such an action by name) const ChangeSlaveTypeActionName = "ChangeSlaveTypeAction" // RecordChangeSlaveTypeAction records a new ChangeSlaveTypeAction // into the specified Cleaner -func RecordChangeSlaveTypeAction(cleaner *Cleaner, tabletAlias *topodatapb.TabletAlias, tabletType topodatapb.TabletType) { - cleaner.Record(ChangeSlaveTypeActionName, topoproto.TabletAliasString(tabletAlias), &ChangeSlaveTypeAction{ - TabletAlias: tabletAlias, - TabletType: tabletType, +func RecordChangeSlaveTypeAction(cleaner *Cleaner, tabletAlias *topodatapb.TabletAlias, from topodatapb.TabletType, to topodatapb.TabletType) { + cleaner.Record(ChangeSlaveTypeActionName, topoproto.TabletAliasString(tabletAlias), func(ctx context.Context, wr *Wrangler) error { + ti, err := wr.ts.GetTablet(ctx, tabletAlias) + if err != nil { + return err + } + if from != topodatapb.TabletType_UNKNOWN { + if ti.Type != from { + return fmt.Errorf("tablet %v is not of the right type (got %v expected %v), not changing it to %v", topoproto.TabletAliasString(tabletAlias), ti.Type, from, to) + } + } + if !topo.IsTrivialTypeChange(ti.Type, to) { + return fmt.Errorf("tablet %v type change %v -> %v is not an allowed transition for ChangeSlaveType", topoproto.TabletAliasString(tabletAlias), ti.Type, to) + } + + // ask the tablet to make the change + return wr.tmc.ChangeType(ctx, ti.Tablet, to) }) } -// FindChangeSlaveTypeActionByTarget finds the first action for the target -func FindChangeSlaveTypeActionByTarget(cleaner *Cleaner, tabletAlias *topodatapb.TabletAlias) (*ChangeSlaveTypeAction, error) { - action, err := cleaner.GetActionByName(ChangeSlaveTypeActionName, topoproto.TabletAliasString(tabletAlias)) - if err != nil { - return nil, err - } - result, ok := action.(*ChangeSlaveTypeAction) - if !ok { - return nil, fmt.Errorf("Action with wrong type: %v", action) - } - return result, nil -} - -// CleanUp is part of CleanerAction interface. -func (csta ChangeSlaveTypeAction) CleanUp(ctx context.Context, wr *Wrangler) error { - return wr.ChangeSlaveType(ctx, csta.TabletAlias, csta.TabletType) -} - // -// TabletTagAction CleanerAction +// TabletTagAction CleanerFunction // -// TabletTagAction will add / remove a tag to a tablet. If Value is -// empty, will remove the tag. -type TabletTagAction struct { - TabletAlias *topodatapb.TabletAlias - Name string - Value string -} - // TabletTagActionName is the name of the Tag action const TabletTagActionName = "TabletTagAction" -// RecordTabletTagAction records a new TabletTagAction +// RecordTabletTagAction records a new action to set / remove a tag // into the specified Cleaner func RecordTabletTagAction(cleaner *Cleaner, tabletAlias *topodatapb.TabletAlias, name, value string) { - cleaner.Record(TabletTagActionName, topoproto.TabletAliasString(tabletAlias), &TabletTagAction{ - TabletAlias: tabletAlias, - Name: name, - Value: value, + cleaner.Record(TabletTagActionName, topoproto.TabletAliasString(tabletAlias), func(ctx context.Context, wr *Wrangler) error { + _, err := wr.TopoServer().UpdateTabletFields(ctx, tabletAlias, func(tablet *topodatapb.Tablet) error { + if tablet.Tags == nil { + tablet.Tags = make(map[string]string) + } + if value != "" { + tablet.Tags[name] = value + } else { + delete(tablet.Tags, name) + } + return nil + }) + return err }) } -// CleanUp is part of CleanerAction interface. -func (tta TabletTagAction) CleanUp(ctx context.Context, wr *Wrangler) error { - _, err := wr.TopoServer().UpdateTabletFields(ctx, tta.TabletAlias, func(tablet *topodatapb.Tablet) error { - if tablet.Tags == nil { - tablet.Tags = make(map[string]string) - } - if tta.Value != "" { - tablet.Tags[tta.Name] = tta.Value - } else { - delete(tablet.Tags, tta.Name) - } - return nil - }) - return err -} - // // StartSlaveAction CleanerAction // -// StartSlaveAction will restart binlog replication on a server -type StartSlaveAction struct { - Tablet *topodatapb.Tablet -} - // StartSlaveActionName is the name of the slave start action const StartSlaveActionName = "StartSlaveAction" -// RecordStartSlaveAction records a new StartSlaveAction +// RecordStartSlaveAction records a new action to restart binlog replication on a server // into the specified Cleaner func RecordStartSlaveAction(cleaner *Cleaner, tablet *topodatapb.Tablet) { - cleaner.Record(StartSlaveActionName, topoproto.TabletAliasString(tablet.Alias), &StartSlaveAction{ - Tablet: tablet, + cleaner.Record(StartSlaveActionName, topoproto.TabletAliasString(tablet.Alias), func(ctx context.Context, wr *Wrangler) error { + return wr.TabletManagerClient().StartSlave(ctx, tablet) }) } -// CleanUp is part of CleanerAction interface. -func (sba StartSlaveAction) CleanUp(ctx context.Context, wr *Wrangler) error { - return wr.TabletManagerClient().StartSlave(ctx, sba.Tablet) -} - // // StartBlpAction CleanerAction // -// StartBlpAction will restart binlog replication on a server -type StartBlpAction struct { - Tablet *topodatapb.Tablet -} - // StartBlpActionName is the name of the action to start binlog player const StartBlpActionName = "StartBlpAction" -// RecordStartBlpAction records a new StartBlpAction +// RecordStartBlpAction records an action to restart binlog replication on a server // into the specified Cleaner func RecordStartBlpAction(cleaner *Cleaner, tablet *topodatapb.Tablet) { - cleaner.Record(StartBlpActionName, topoproto.TabletAliasString(tablet.Alias), &StartBlpAction{ - Tablet: tablet, + cleaner.Record(StartBlpActionName, topoproto.TabletAliasString(tablet.Alias), func(ctx context.Context, wr *Wrangler) error { + return wr.TabletManagerClient().StartBlp(ctx, tablet) }) } - -// CleanUp is part of CleanerAction interface. -func (sba StartBlpAction) CleanUp(ctx context.Context, wr *Wrangler) error { - return wr.TabletManagerClient().StartBlp(ctx, sba.Tablet) -} From 8229a5ee3acec978156b08614482a91b8bee805b Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Tue, 17 May 2016 13:55:59 -0700 Subject: [PATCH 26/27] Fixing comment. --- go/vt/worker/utils_test.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/go/vt/worker/utils_test.go b/go/vt/worker/utils_test.go index e2f908a727..6271da41b2 100644 --- a/go/vt/worker/utils_test.go +++ b/go/vt/worker/utils_test.go @@ -79,21 +79,22 @@ func sourceRdonlyFactory(t *testing.T, dbAndTableName string, min, max int) func return f.getFactory() } -// This FakeTabletManagerClient extension will implement ChangeType using the topo server -type FakeTMCTopo struct { +// fakeTMCTopo is a FakeTabletManagerClient extension that implements ChangeType +// using the provided topo server. +type fakeTMCTopo struct { tmclient.TabletManagerClient server topo.Server } func newFakeTMCTopo(ts topo.Server) tmclient.TabletManagerClient { - return &FakeTMCTopo{ + return &fakeTMCTopo{ TabletManagerClient: faketmclient.NewFakeTabletManagerClient(), server: ts, } } // ChangeType is part of the tmclient.TabletManagerClient interface. -func (client *FakeTMCTopo) ChangeType(ctx context.Context, tablet *topodatapb.Tablet, dbType topodatapb.TabletType) error { +func (client *fakeTMCTopo) ChangeType(ctx context.Context, tablet *topodatapb.Tablet, dbType topodatapb.TabletType) error { _, err := client.server.UpdateTabletFields(ctx, tablet.Alias, func(t *topodatapb.Tablet) error { t.Type = dbType return nil From d4d7a08248d1b91a094eb84636dacffc7afac448 Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Tue, 17 May 2016 16:52:06 -0700 Subject: [PATCH 27/27] Update Concepts doc. --- doc/Concepts.md | 198 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 140 insertions(+), 58 deletions(-) diff --git a/doc/Concepts.md b/doc/Concepts.md index 73a6eb05ea..33b8c0e5fa 100644 --- a/doc/Concepts.md +++ b/doc/Concepts.md @@ -2,7 +2,7 @@ This document defines common Vitess concepts and terminology. ## Keyspace -A *keyspace* is a logical database. In its simplest form, it maps directly +A *keyspace* is a logical database. In the unsharded case, it maps directly to a MySQL database name, but it can also map to multiple MySQL databases. Reading data from a keyspace is like reading from a MySQL database. However, @@ -13,8 +13,9 @@ structured as if it were reading from a single MySQL database. When a database is [sharded](http://en.wikipedia.org/wiki/Shard_(database_architecture)), -a keyspace maps to multiple MySQL databases. In that case, a read operation -fetches the necessary data from one of the shards. +a keyspace maps to multiple MySQL databases. In that case, a single query sent +to Vitess will be routed to one or more shards, depending on where the requested +data resides. ## Keyspace ID @@ -37,98 +38,179 @@ for you, for example by using a `hash` vindex. ## Shard -A *shard* is a division within a keyspace. A shard typically contains one MySQL master and many MySQL slaves. +A *shard* is a division within a keyspace. A shard typically contains one MySQL +master and many MySQL slaves. -Each MySQL instance within a shard has the same data or should have the same data, excepting some replication lag. The slaves can serve read-only traffic (with eventual consistency guarantees), execute long-running data analysis tools, or perform administrative tasks (backups, restore, diffs, etc.). +Each MySQL instance within a shard has the same data (excepting some replication +lag). The slaves can serve read-only traffic (with eventual consistency guarantees), +execute long-running data analysis tools, or perform administrative tasks +(backup, restore, diff, etc.). -A keyspace that does not use sharding effectively has one shard. Vitess names the shard 0 by convention. When sharded a keyspace has N shards with non-overlapping data. Usually, N is a power of 2. +A keyspace that does not use sharding effectively has one shard. +Vitess names the shard `0` by convention. When sharded, a keyspace has `N` +shards with non-overlapping data. -Vitess supports [dynamic resharding](http://vitess.io/user-guide/sharding.html#resharding), in which one shard is split into multiple shards for instance. During dynamic resharding, the data in the source shard is split into the destination shards. Then the source shard is deleted. +### Resharding + +Vitess supports [dynamic resharding](http://vitess.io/user-guide/sharding.html#resharding), +in which the number of shards is changed on a live cluster. This can be either +splitting one or more shards into smaller pieces, or merging neighboring shards +into bigger pieces. + +During dynamic resharding, the data in the source shards is copied into the +destination shards, allowed to catch up on replication, and then compared +against the original to ensure data integrity. Then the live serving +infrastructure is shifted to the destination shards, and the source shards are +deleted. ## Tablet -A *tablet* is a single server that runs: +A *tablet* is a combination of a `mysqld` process and a corresponding `vttablet` +process, usually running on the same machine. -* a MySQL instance -* a vttablet instance -* (optionally) a local row cache instance -* any other database-specific process necessary for operational purposes +Each tablet is assigned a *tablet type*, which specifies what role it currently +performs. -A tablet has a type. Common types are listed below: +### Tablet Types -* **type** - * master - The read-write database that is the MySQL master - * replica - A MySQL slave that serves read-only traffic with guaranteed low replication latency - * rdonly - A MySQL slave that serves read-only traffic for backend processing jobs, such as MapReduce-type jobs. This type of table does not have guaranteed replication latency. - * spare - A MySQL slave that is not currently in use. - -There are several other tablet types that each serve a specific purpose, including experimental, schema, backup, restore, worker. - -Only master, replica, and rdonly tablets are included in the [serving graph](#serving-graph). +* **master** - A *replica* tablet that happens to currently be the MySQL master + for its shard. +* **replica** - A MySQL slave that is eligible to be promoted to *master*. + Conventionally, these are reserved for serving live, user-facing + requests (like from the website's frontend). +* **rdonly** - A MySQL slave that cannot be promoted to *master*. + Conventionally, these are used for background processing jobs, + such as taking backups, dumping data to other systems, heavy + analytical queries, MapReduce, and resharding. +* **backup** - A tablet that has stopped replication at a consistent snapshot, + so it can upload a new backup for its shard. After it finishes, + it will resume replication and return to its previous type. +* **restore** - A tablet that has started up with no data, and is in the process + of restoring itself from the latest backup. After it finishes, + it will begin replicating at the GTID position of the backup, + and become either *replica* or *rdonly*. +* **worker** - A *rdonly* tablet that has been reserved by a Vitess background + process (such as resharding). While it is a *worker* type, the + tablet will not be available to serve queries from Vitess clients. + After the background job finishes, the tablet will resume + replication (if necessary) and go back to being *rdonly*.
TODO: Add pointer to complete list of types and explain how to update type?
-## Shard graph +## Keyspace Graph -The *shard graph* maps keyspace IDs to the shards for that keyspace. The shard graph ensures that any given keyspace ID maps to exactly one shard. +The *keyspace graph* allows Vitess to decide which set of shards to use for a +given keyspace, cell, and tablet type. -Vitess uses range-based sharding. This basically means that the shard graph specifies a per-keyspace list of non-intersecting ranges that cover all possible values of a keyspace ID. As such, Vitess uses a simple, in-memory lookup to identify the appropriate shard for SQL queries. +### Partitions -In general, database sharding is most effective when the assigned [keyspace IDs](#keyspace-id) are evenly distributed among shards. With this in mind, a best practice is for keyspace IDs to use hashed key values rather than sequentially incrementing key values. This helps to ensure that assigned keyspace IDs are distributed more randomly among shards. +During horizontal resharding (splitting or merging shards), there can be shards +with overlapping key ranges. For example, the source shard of a split may serve +`c0-d0` while its destination shards serve `c0-c8` and `c8-d0` respectively. -For example, an application that uses an incrementing UserID as its primary key for user records should use a hashed version of that ID as a keyspace ID. All data related to a particular user would share that keyspace ID and, thus, would be on the same shard. +Since these shards need to exist simultaneously during the migration, +the keyspace graph maintains a list (called a *partitioning* or just a *partition*) +of shards whose ranges cover all possible keyspace ID values, while being +non-overlapping and contiguous. Shards can be moved in and out of this list to +determine whether they are active. -## Replication graph +The keyspace graph stores a separate partitioning for each `(cell, tablet type)` pair. +This allows migrations to proceed in phases: first migrate *rdonly* and +*replica* requests, one cell at a time, and finally migrate *master* requests. + +### Served From + +During vertical resharding (moving tables out from one keyspace to form a new +keyspace), there can be multiple keyspaces that contain the same table. + +Since these multiple copies of the table need to exist simultaneously during +the migration, the keyspace graph supports keyspace redirects, called +`ServedFrom` records. That enables a migration flow like this: + +1. Create `new_keyspace` and set its `ServedFrom` to point to `old_keyspace`. +1. Update the app to look for the tables to be moved in `new_keyspace`. + Vitess will automatically redirect these requests to `old_keyspace`. +1. Perform a vertical split clone to copy data to the new keyspace and start + filtered replication. +1. Remove the `ServedFrom` redirect to begin actually serving from `new_keyspace`. +1. Drop the now unused copies of the tables from `old_keyspace`. + +There can be a different `ServedFrom` record for each `(cell, tablet type)` pair. +This allows migrations to proceed in phases: first migrate *rdonly* and +*replica* requests, one cell at a time, and finally migrate *master* requests. + +## Replication Graph The *replication graph* identifies the relationships between master databases and their respective replicas. During a master failover, the replication graph enables Vitess to point all existing replicas to a newly designated master database so that replication can continue. -## Serving graph - -The *serving graph* represents the list of servers that are available -to serve queries. Vitess derives the serving graph from the -[shard](#shard-graph) and [replication](#replication-graph) graphs. - -[VTGate](/overview/#vtgate) (or another smart client) queries the -serving graph to determine which servers it can send queries to. - ## Topology Service -The *[Topology Service](https://github.com/youtube/vitess/blob/master/doc/TopologyService.md)* is a set of backend processes running on different servers. Those servers store topology data and provide a locking service. +The *[Topology Service](https://github.com/youtube/vitess/blob/master/doc/TopologyService.md)* +is a set of backend processes running on different servers. +Those servers store topology data and provide a distributed locking service. -The Vitess team does not design or maintain topology servers. The implementation in the Vitess source code tree uses ZooKeeper (Apache) as the locking service. On [Kubernetes](/getting-started/), Vitess uses etcd (CoreOS) as the locking service. +Vitess uses a plug-in system to support various backends for storing topology +data, which are assumed to provide a distributed, consistent key-value store. +By default, our [local example](http://vitess.io/getting-started/local-instance.html) +uses the ZooKeeper plugin, and the [Kubernetes example](http://vitess.io/getting-started/) +uses etcd. The topology service exists for several reasons: -* It stores rules that determine where data is located. -* It ensures that write operations execute successfully. -* It enables Vitess to transparently handle a data center (cell) being unavailable. -* It enables a data center to be taken offline and rebuilt as a unit. +* It enables tablets to coordinate among themselves as a cluster. +* It enables Vitess to discover tablets, so it knows where to route queries. +* It stores Vitess configuration provided by the database administrator that is + needed by many different servers in the cluster, and that must persist between + server restarts. -A Vitess implementation has one global instance of the topology service and one local instance of the topology service in each data center, or cell. Vitess clients are designed to only need access to the local serving graph. As such, clients only need the local instance of the topology service to be constantly available. +A Vitess cluster has one global topology service, and a local topology service +in each cell. Since *cluster* is an overloaded term, and one Vitess cluster is +distinguished from another by the fact that each has its own global topology +service, we refer to each Vitess cluster as a **toposphere**. -* **Global instance**
- The global instance stores global data that does not change frequently. Specifically, it contains data about keyspaces and shards as well as the master alias of the replication graph.

- The global instance is used for some operations, including reparenting and resharding. By design, the global topology server is not used a lot. +### Global Topology -* **Local instance**
- Each local instance contains information about information specific to the cell where it is located. Specifically, it contains data about tablets in the cell, the serving graph for that cell, and the master-slave map for MySQL instances in that cell.

- The local topology server must be available for Vitess to serve data. +The global topology stores Vitess-wide data that does not change frequently. +Specifically, it contains data about keyspaces and shards as well as the +master tablet alias for each shard. -
- To ensure reliability, the topology service has multiple server processes running on different servers. Those servers elect a master and perform chorum writes. In ZooKeeper, for a write to succeed, more than half of the servers must acknowledge it. Thus, a typical ZooKeeper configuration consists of either three or five servers, where two (out of three) or three (out of five) servers must agree for a write operation to succeed. -The instance is the set of servers providing topology services. So, in a Vitess implementation using ZooKeeper, the global and local instances likely consist of three or five servers apiece. - To be reliable, the global instance needs to have server processes spread across all regions and cells. Read-only replicas of the global instance can be maintained in each data center (cell). -
+The global topology is used for some operations, including reparenting and +resharding. By design, the global topology server is not used a lot. + +In order to survive any single cell going down, the global topology service +should have nodes in multiple cells, with enough to maintain quorum in the +event of a cell failure. + +### Local Topology + +Each local topology contains information related to its own cell. +Specifically, it contains data about tablets in the cell, the keyspace graph +for that cell, and the replication graph for that cell. + +The local topology service must be available for Vitess to discover tablets +and adjust routing as tablets come and go. However, no calls to the topology +service are made in the critical path of serving a query at steady state. +That means queries are still served during temporary unavailability of topology. ## Cell (Data Center) -A *cell* is a group of servers and network infrastructure collocated in an area. It is typically either a full data center or a subset of a data center. Vitess gracefully handles cell-level failures, such as when a cell is cut off the network. +A *cell* is a group of servers and network infrastructure collocated in an area, +and isolated from failures in other cells. It is typically either a full data +center or a subset of a data center, sometimes called a *zone* or *availability zone*. +Vitess gracefully handles cell-level failures, such as when a cell is cut off the network. -Each cell in a Vitess implementation has a [local topology server](#topology-service), which is hosted in that cell. The topology server contains most of the information about the Vitess tablets in its cell. This enables a cell to be taken down and rebuilt as a unit. +Each cell in a Vitess implementation has a [local topology service](#topology-service), +which is hosted in that cell. The topology service contains most of the +information about the Vitess tablets in its cell. +This enables a cell to be taken down and rebuilt as a unit. -Vitess limits cross-cell traffic for both data and metadata. While it is useful to also have the ability to route client traffic to individual cells, Vitess does not provide that feature. +Vitess limits cross-cell traffic for both data and metadata. +While it may be useful to also have the ability to route read traffic to +individual cells, Vitess currently serves reads only from the local cell. +Writes will go cross-cell when necessary, to wherever the master for that shard +resides.
HealthCheck EndPoints CacheHealthCheck Tablet Cache
Cell Keyspace Shard TabletTypeEndPointsStatsTabletStats