worker: Rename flag "worker_retry_delay" to "worker_healthcheck_retry_delay".

This way we are consistent and all healthcheck related flags are properly grouped.

vttablet: Split up flag "binlog_player_retry_delay" into the existing flag and "binlog_player_healthcheck_retry_delay". Now the two flags are used for two different purposes. Similar to the worker code, all healthcheck flags are properly grouped now.
This commit is contained in:
Michael Berlin 2016-01-19 15:43:43 -08:00
Родитель 733178a705
Коммит 88be3099fe
3 изменённых файлов: 7 добавлений и 4 удалений

Просмотреть файл

@ -36,8 +36,10 @@ import (
)
var (
retryDelay = flag.Duration("binlog_player_retry_delay", 5*time.Second, "delay before retrying a failed binlog connection")
healthCheckTopologyRefresh = flag.Duration("binlog_player_healthcheck_topology_refresh", 30*time.Second, "refresh interval for re-reading the topology when filtered replication is running")
retryDelay = flag.Duration("binlog_player_retry_delay", 5*time.Second, "delay before retrying a failed healthcheck or a failed binlog connection")
healthcheckRetryDelay = flag.Duration("binlog_player_healthcheck_retry_delay", 5*time.Second, "delay before retrying a failed healthcheck")
healthCheckTimeout = flag.Duration("binlog_player_healthcheck_timeout", time.Minute, "the health check timeout period")
)
@ -103,7 +105,7 @@ func newBinlogPlayerController(ts topo.Server, vtClientFactory func() binlogplay
dbName: dbName,
sourceShard: sourceShard,
binlogPlayerStats: binlogplayer.NewStats(),
healthCheck: discovery.NewHealthCheck(*binlogplayer.BinlogPlayerConnTimeout, *retryDelay, *healthCheckTimeout),
healthCheck: discovery.NewHealthCheck(*binlogplayer.BinlogPlayerConnTimeout, *healthcheckRetryDelay, *healthCheckTimeout),
}
blc.shardReplicationWatcher = discovery.NewShardReplicationWatcher(ts, blc.healthCheck, cell, sourceShard.Keyspace, sourceShard.Shard, *healthCheckTopologyRefresh, 5)
return blc

Просмотреть файл

@ -31,7 +31,7 @@ var (
WaitForHealthyEndPointsTimeout = flag.Duration("wait_for_healthy_rdonly_endpoints_timeout", 60*time.Second, "maximum time to wait if less than --min_healthy_rdonly_endpoints are available")
healthCheckTopologyRefresh = flag.Duration("worker_healthcheck_topology_refresh", 30*time.Second, "refresh interval for re-reading the topology when filtered replication is running")
retryDelay = flag.Duration("worker_retry_delay", 5*time.Second, "delay before retrying a failed healthcheck or a failed binlog connection")
healthcheckRetryDelay = flag.Duration("worker_healthcheck_retry_delay", 5*time.Second, "delay before retrying a failed healthcheck")
healthCheckTimeout = flag.Duration("worker_healthcheck_timeout", time.Minute, "the health check timeout period")
)
@ -45,7 +45,7 @@ func FindHealthyRdonlyEndPoint(ctx context.Context, wr *wrangler.Wrangler, cell,
// create a discovery healthcheck, wait for it to have one rdonly
// endpoints at this point
healthCheck := discovery.NewHealthCheck(*remoteActionsTimeout, *retryDelay, *healthCheckTimeout)
healthCheck := discovery.NewHealthCheck(*remoteActionsTimeout, *healthcheckRetryDelay, *healthCheckTimeout)
watcher := discovery.NewShardReplicationWatcher(wr.TopoServer(), healthCheck, cell, keyspace, shard, *healthCheckTopologyRefresh, 5 /*topoReadConcurrency*/)
defer watcher.Stop()
defer healthCheck.Close()

Просмотреть файл

@ -419,6 +419,7 @@ class Tablet(object):
protocols_flavor().tablet_manager_protocol()])
args.extend(['-tablet_protocol', protocols_flavor().tabletconn_protocol()])
args.extend(['-binlog_player_healthcheck_topology_refresh', '1s'])
args.extend(['-binlog_player_healthcheck_retry_delay', '1s'])
args.extend(['-binlog_player_retry_delay', '1s'])
args.extend(['-pid_file', os.path.join(self.tablet_dir, 'vttablet.pid')])
if self.use_mysqlctld: