From 3dec9dbd4a9f36eeb6eddf975b2a5bbca637a550 Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Fri, 6 Aug 2021 20:37:02 +0200 Subject: [PATCH 1/5] Replace urls in repo pointing to master branch to main Signed-off-by: Rohit Nayak --- GOVERNANCE.md | 6 ++--- GUIDING_PRINCIPLES.md | 2 +- README.md | 4 ++-- SECURITY.md | 6 ++--- doc/DockerBuild.md | 10 ++++---- doc/LifeOfAQuery.md | 10 ++++---- ...icatoinLagBasedThrottlingOfTransactions.md | 4 ++-- doc/TwoPhaseCommitDesign.md | 4 ++-- doc/V3HighLevelDesign.md | 4 ++-- doc/VTGateSubqueries.md | 2 +- doc/VTGateV3Features.md | 2 +- doc/VindexAsTable.md | 2 +- doc/Vision.md | 2 +- doc/internal/ReleaseInstructions.md | 2 +- doc/releasenotes/8_0_0_release_notes.md | 2 +- doc/releasenotes/9_0_0_release_notes.md | 2 +- docker/README.md | 23 ++++--------------- docker/bootstrap/README.md | 2 +- go/vt/vitessdriver/doc.go | 8 +++---- helm/vitess/README.md | 2 +- java/README.md | 2 +- .../src/main/java/io/vitess/client/Proto.java | 2 +- .../main/java/io/vitess/client/RpcClient.java | 8 +++---- java/pom.xml | 4 ++-- 24 files changed, 50 insertions(+), 65 deletions(-) diff --git a/GOVERNANCE.md b/GOVERNANCE.md index 1ddf1df91c..e1a743e8cb 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -59,7 +59,7 @@ New committers can be nominated by any existing committer. Once they have been n Nominees may decline their appointment as a committer. However, this is unusual, as the project does not expect any specific time or resource commitment from its community members. The intention behind the role of committer is to allow people to contribute to the project more easily, not to tie them in to the project in any formal way. -It is important to recognise that commitership is a privilege, not a right. That privilege must be earned and once earned it can be removed by the PMC for conduct inconsistent with the [Guiding Principles](https://github.com/vitessio/vitess/blob/master/GUIDING_PRINCIPLES.md) or if they drop below a level of commitment and engagement required to be a Committer, as determined by the PMC. The PMC also reserves the right to remove a person for any other reason inconsistent with the goals of Vitess. +It is important to recognise that commitership is a privilege, not a right. That privilege must be earned and once earned it can be removed by the PMC for conduct inconsistent with the [Guiding Principles](https://github.com/vitessio/vitess/blob/main/GUIDING_PRINCIPLES.md) or if they drop below a level of commitment and engagement required to be a Committer, as determined by the PMC. The PMC also reserves the right to remove a person for any other reason inconsistent with the goals of Vitess. A committer who shows an above-average level of contribution to the project, particularly with respect to its strategic direction and long-term health, may be nominated to become a member of the PMC. This role is described below. @@ -83,7 +83,7 @@ Membership of the PMC is by invitation from the existing PMC members. A nominati The number of PMC members should be limited to 7. This number is chosen to ensure that sufficient points of view are represented, while preserving the efficiency of the decision making process. -The PMC is responsible for maintaining the [Guiding Principles](https://github.com/vitessio/vitess/blob/master/GUIDING_PRINCIPLES.md) and the code of conduct. It is also responsible for ensuring that those rules and principles are followed. +The PMC is responsible for maintaining the [Guiding Principles](https://github.com/vitessio/vitess/blob/main/GUIDING_PRINCIPLES.md) and the code of conduct. It is also responsible for ensuring that those rules and principles are followed. ## PMC Chair @@ -106,7 +106,7 @@ The Slack channel list is the most appropriate place for a contributor to ask fo Decisions about the future of the project are made by the PMC. New proposals and ideas can be brought to the PMC’s attention through the Slack channel or by filing an issue. If necessary, the PMC will seek input from others to come to the final decision. -The PMC’s decision is itself governed by the project’s [Guiding Principles](https://github.com/vitessio/vitess/blob/master/GUIDING_PRINCIPLES.md), which shall be used to reach consensus. If a consensus cannot be reached, a simple majority voting process will be used to reach resolution. In case of a tie, the PMC chair has the casting vote. +The PMC’s decision is itself governed by the project’s [Guiding Principles](https://github.com/vitessio/vitess/blob/main/GUIDING_PRINCIPLES.md), which shall be used to reach consensus. If a consensus cannot be reached, a simple majority voting process will be used to reach resolution. In case of a tie, the PMC chair has the casting vote. # Credits The contents of this document are based on http://oss-watch.ac.uk/resources/meritocraticgovernancemodel by Ross Gardler and Gabriel Hanganu. diff --git a/GUIDING_PRINCIPLES.md b/GUIDING_PRINCIPLES.md index d8bad4dbec..7c89196f70 100644 --- a/GUIDING_PRINCIPLES.md +++ b/GUIDING_PRINCIPLES.md @@ -24,4 +24,4 @@ Vitess is driven by high technical standards, and these must be maintained. It i * Diversity * Inclusiveness * Openness -* Adherence to the [Code of Conduct](https://github.com/vitessio/vitess/blob/master/CODE_OF_CONDUCT.md) +* Adherence to the [Code of Conduct](https://github.com/vitessio/vitess/blob/main/CODE_OF_CONDUCT.md) diff --git a/README.md b/README.md index e5c8159922..ed2d8d8040 100644 --- a/README.md +++ b/README.md @@ -22,10 +22,10 @@ since 2011, and has grown to encompass tens of thousands of MySQL nodes. For more about Vitess, please visit [vitess.io](https://vitess.io). Vitess has a growing community. You can view the list of adopters -[here](https://github.com/vitessio/vitess/blob/master/ADOPTERS.md). +[here](https://github.com/vitessio/vitess/blob/main/ADOPTERS.md). ## Reporting a Problem, Issue, or Bug -To report a problem, the best way to get attention is to create a GitHub [issue](.https://github.com/vitessio/vitess/issues ) using proper severity level based on this [guide](https://github.com/vitessio/vitess/blob/master/SEVERITY.md). +To report a problem, the best way to get attention is to create a GitHub [issue](.https://github.com/vitessio/vitess/issues ) using proper severity level based on this [guide](https://github.com/vitessio/vitess/blob/main/SEVERITY.md). For topics that are better discussed live, please join the [Vitess Slack](https://vitess.io/slack) workspace. You may post any questions on the #general channel or join some of the special-interest channels. diff --git a/SECURITY.md b/SECURITY.md index 1838013030..8579d2b927 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -33,11 +33,11 @@ score >= 4; see below). If the fix relies on another upstream project's disclosu will adjust the process as well. We will work with the upstream project to fit their timeline and best protect our users. -#### Policy for master-only vulnerabilities +#### Policy for main-only vulnerabilities -If a security vulnerability affects master, but not a currently supported branch, then the following process will apply: +If a security vulnerability affects main, but not a currently supported branch, then the following process will apply: -* The fix will land in master. +* The fix will land in main. * A courtesy notice will be posted in #developers on Vitess Slack. #### Policy for unsupported releases diff --git a/doc/DockerBuild.md b/doc/DockerBuild.md index afb9643065..b8ce05819d 100644 --- a/doc/DockerBuild.md +++ b/doc/DockerBuild.md @@ -1,10 +1,10 @@ -By default, the [Helm Charts](https://github.com/vitessio/vitess/tree/master/helm) +By default, the [Helm Charts](https://github.com/vitessio/vitess/tree/main/helm) point to the `vitess/lite` image on [Docker Hub](https://hub.docker.com/u/vitess/). We created the `lite` image as a stripped down version of our main image `base` such that Kubernetes pods can start faster. The `lite` image does not change very often and is updated manually by the Vitess team with every release. In contrast, the `base` image is updated automatically after every push to the GitHub master branch. -For more information on the different images we provide, please read the [`docker/README.md`](https://github.com/vitessio/vitess/tree/master/docker) file. +For more information on the different images we provide, please read the [`docker/README.md`](https://github.com/vitessio/vitess/tree/main/docker) file. If your goal is run the latest Vitess code, the simplest solution is to use the bigger `base` image instead of `lite`. @@ -22,9 +22,9 @@ Then you can run our build script for the `lite` image which extracts the Vitess 1. Go to your `src/vitess.io/vitess` directory. -1. Usually, you won't need to [build your own bootstrap image](https://github.com/vitessio/vitess/blob/master/docker/bootstrap/README.md) - unless you edit [bootstrap.sh](https://github.com/vitessio/vitess/blob/master/bootstrap.sh) - or [vendor.json](https://github.com/vitessio/vitess/blob/master/vendor/vendor.json), +1. Usually, you won't need to [build your own bootstrap image](https://github.com/vitessio/vitess/blob/main/docker/bootstrap/README.md) + unless you edit [bootstrap.sh](https://github.com/vitessio/vitess/blob/main/bootstrap.sh) + or [vendor.json](https://github.com/vitessio/vitess/blob/main/vendor/vendor.json), for example to add new dependencies. If you do need it then build the bootstrap image, otherwise pull the image using one of the following commands depending on the MySQL flavor you want: diff --git a/doc/LifeOfAQuery.md b/doc/LifeOfAQuery.md index d01e0df8da..b40ef21f26 100644 --- a/doc/LifeOfAQuery.md +++ b/doc/LifeOfAQuery.md @@ -11,7 +11,7 @@ Life of A Query A query means a request for information from database and it involves four components in the case of Vitess, including the client application, VtGate, VtTablet and MySQL instance. This doc explains the interaction which happens between and within components. -![](https://raw.githubusercontent.com/vitessio/vitess/master/doc/life_of_a_query.png) +![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/life_of_a_query.png) At a very high level, as the graph shows, first the client sends a query to VtGate. VtGate then resolves the query and routes it to the right VtTablets. For each VtTablet that receives the query, it does necessary validations and passes the query to the underlying MySQL instance. After gathering results from MySQL, VtTablet sends the response back to VtGate. Once VtGate receives responses from all VtTablets, it sends the combined result to the client. In the presence of VtTablet errors, VtGate will retry the query if errors are recoverable and it only fails the query if either errors are unrecoverable or the maximum number of retries has been reached. @@ -19,13 +19,13 @@ At a very high level, as the graph shows, first the client sends a query to VtGa A client application first sends an rpc with an embedded sql query to VtGate. VtGate's rpc server unmarshals this rpc request, calls the appropriate VtGate method and return its result back to client. -![](https://raw.githubusercontent.com/vitessio/vitess/master/doc/life_of_a_query_client_to_vtgate.png) +![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/life_of_a_query_client_to_vtgate.png) VtGate keeps an in-memory table that stores all available rpc methods for each service, e.g. VtGate uses "VTGate" as its service name and most of its methods defined in [go/vt/vtgate/vtgate.go](../go/vt/vtgate/vtgate.go) are used to serve rpc request. ## From VtGate to VtTablet -![](https://raw.githubusercontent.com/vitessio/vitess/master/doc/life_of_a_query_vtgate_to_vttablet.png) +![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/life_of_a_query_vtgate_to_vttablet.png) After receiving an rpc call from the client and one of its Execute* method being invoked, VtGate needs to figure out which shards should receive the query and send it to each of them. In addition, VtGate talks to the topo server to get necessary information to create a VtTablet connection for each shard. At this point, VtGate is able to send the query to the right VtTablets in parallel. VtGate also does retry if timeout happens or some VtTablets return recoverable errors. @@ -35,13 +35,13 @@ A ShardConn object represents a load balanced connection to a group of VtTablets ## From VtTablet to MySQL -![](https://raw.githubusercontent.com/vitessio/vitess/master/doc/life_of_a_query_vttablet_to_mysql.png) +![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/life_of_a_query_vttablet_to_mysql.png) Once VtTablet received an rpc call from VtGate, it does a few checks before passing the query to MySQL. First, it validates the current VtTablet state including the session id, then generates a query plan and applies predefined query rules and does ACL checks. It also checks whether the query hits the row cache and returns the result immediately if so. In addition, VtTablet consolidates duplicate queries from executing simultaneously and shares results between them. At this point, VtTablet has no way but pass the query down to MySQL layer and wait for the result. ## Putting it all together -![](https://raw.githubusercontent.com/vitessio/vitess/master/doc/life_of_a_query_all.png) +![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/life_of_a_query_all.png) ## TopoServer diff --git a/doc/ReplicatoinLagBasedThrottlingOfTransactions.md b/doc/ReplicatoinLagBasedThrottlingOfTransactions.md index 3fecbb7087..a0dcc88952 100644 --- a/doc/ReplicatoinLagBasedThrottlingOfTransactions.md +++ b/doc/ReplicatoinLagBasedThrottlingOfTransactions.md @@ -21,11 +21,11 @@ A boolean flag controlling whether the replication-lag-based throttling is enabl * *tx-throttler-config* -A text-format representation of the [throttlerdata.Configuration](https://github.com/vitessio/vitess/blob/master/proto/throttlerdata.proto) protocol buffer +A text-format representation of the [throttlerdata.Configuration](https://github.com/vitessio/vitess/blob/main/proto/throttlerdata.proto) protocol buffer that contains configuration options for the throttler. The most important fields in that message are *target_replication_lag_sec* and *max_replication_lag_sec* that specify the desired limits on the replication lag. See the comments in the protocol definition file for more details. -If this is not specified a [default](https://github.com/vitessio/vitess/tree/master/go/vt/vttablet/tabletserver/tabletenv/config.go) configuration will be used. +If this is not specified a [default](https://github.com/vitessio/vitess/tree/main/go/vt/vttablet/tabletserver/tabletenv/config.go) configuration will be used. * *tx-throttler-healthcheck-cells* diff --git a/doc/TwoPhaseCommitDesign.md b/doc/TwoPhaseCommitDesign.md index 81eef23370..a04cdf41a0 100644 --- a/doc/TwoPhaseCommitDesign.md +++ b/doc/TwoPhaseCommitDesign.md @@ -106,7 +106,7 @@ For #1 and #2, the Rollback workflow is initiated. For #3, the commit is resumed The following diagram illustrates the life-cycle of a Vitess transaction. -![](https://raw.githubusercontent.com/vitessio/vitess/master/doc/TxLifecycle.png) +![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/TxLifecycle.png) A transaction generally starts off as a single DB transaction. It becomes a distributed transaction as soon as more than one VTTablet is affected. If the app issues a rollback, then all participants are simply rolled back. If a BEC is issued, then all transactions are individually committed. These actions are the same irrespective of single or distributed transactions. @@ -132,7 +132,7 @@ In order to make 2PC work, the following pieces of functionality have to be buil The diagram below show how the various components interact. -![](https://raw.githubusercontent.com/vitessio/vitess/master/doc/TxInteractions.png) +![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/TxInteractions.png) The detailed design explains all the functionalities and interactions. diff --git a/doc/V3HighLevelDesign.md b/doc/V3HighLevelDesign.md index 44045610a4..4ed0959df0 100644 --- a/doc/V3HighLevelDesign.md +++ b/doc/V3HighLevelDesign.md @@ -6,7 +6,7 @@ The goal of this document is to describe the guiding principles that will be use ### Prerequisites -Before reading this doc you must be familiar with [vindexes](https://github.com/vitessio/vitess/blob/master/doc/V3VindexDesign.md), which is used as foundation for the arguments presented here. +Before reading this doc you must be familiar with [vindexes](https://github.com/vitessio/vitess/blob/main/doc/V3VindexDesign.md), which is used as foundation for the arguments presented here. # Background @@ -1194,7 +1194,7 @@ The overall strategy is as follows: In order to align ourselves with our priorities, we’ll start off with a limited set of primitives, and then we can expand from there. -VTGate already has `Route` and `RouteMerge` as primitives. To this list, let’s add `Join` and `LeftJoin`. Using these primitives, we should be able to cover priorities 1-3 (mentioned in the [Prioritization](https://github.com/vitessio/vitess/blob/master/doc/V3HighLevelDesign.md#prioritization) section). So, any constructs that will require VTGate to do additional work will not be supported. Here’s a recap of what each primitive must do: +VTGate already has `Route` and `RouteMerge` as primitives. To this list, let’s add `Join` and `LeftJoin`. Using these primitives, we should be able to cover priorities 1-3 (mentioned in the [Prioritization](https://github.com/vitessio/vitess/blob/main/doc/V3HighLevelDesign.md#prioritization) section). So, any constructs that will require VTGate to do additional work will not be supported. Here’s a recap of what each primitive must do: * `Route`: Sends a query to a single shard or unsharded keyspace. * `RouteMerge`: Sends a (mostly) identical query to multiple shards and returns the combined results in no particular order. diff --git a/doc/VTGateSubqueries.md b/doc/VTGateSubqueries.md index 95eedecba0..1bc3fc70f9 100644 --- a/doc/VTGateSubqueries.md +++ b/doc/VTGateSubqueries.md @@ -2,7 +2,7 @@ # Introduction -This document builds on top of [The V3 high level design](https://github.com/vitessio/vitess/blob/master/doc/V3HighLevelDesign.md). It discusses implementation of subquery support in greater detail. +This document builds on top of [The V3 high level design](https://github.com/vitessio/vitess/blob/main/doc/V3HighLevelDesign.md). It discusses implementation of subquery support in greater detail. diff --git a/doc/VTGateV3Features.md b/doc/VTGateV3Features.md index 60ee600cc1..44dd9e16db 100644 --- a/doc/VTGateV3Features.md +++ b/doc/VTGateV3Features.md @@ -34,7 +34,7 @@ there are some additional benefits: underneath without changing much of the app. The -[V3 design](https://github.com/vitessio/vitess/blob/master/doc/V3VindexDesign.md) +[V3 design](https://github.com/vitessio/vitess/blob/main/doc/V3VindexDesign.md) is quite elaborate. If necessary, it will allow you to plug in custom indexes and sharding schemes. However, it comes equipped with some pre-cooked recipes that satisfy the immediate needs of the real-world: diff --git a/doc/VindexAsTable.md b/doc/VindexAsTable.md index 642f74c482..61357d1e95 100644 --- a/doc/VindexAsTable.md +++ b/doc/VindexAsTable.md @@ -10,7 +10,7 @@ One can think of a vindex as a table that looks like this: create my_vdx(id int, keyspace_id varbinary(255)) // id can be of any type. ``` -Looking at the vindex interface defined [here](https://github.com/vitessio/vitess/blob/master/go/vt/vtgate/vindexes/vindex.go), we can come up with SQL syntax that represents them: +Looking at the vindex interface defined [here](https://github.com/vitessio/vitess/blob/main/go/vt/vtgate/vindexes/vindex.go), we can come up with SQL syntax that represents them: * Map: `select id, keyspace_id from my_vdx where id = :id`. * Create: `insert into my_vdx values(:id, :keyspace_id)`. * Delete: `delete from my_vdx where id = :id and keyspace_id :keyspace_id`. diff --git a/doc/Vision.md b/doc/Vision.md index 7df33c1d22..4b918fb77f 100644 --- a/doc/Vision.md +++ b/doc/Vision.md @@ -70,4 +70,4 @@ data is stored only once, and fetched only if needed. The following diagram illustrates where vitess fits in the spectrum of storage solutions: -![Spectrum](https://raw.github.com/vitessio/vitess/master/doc/VitessSpectrum.png) +![Spectrum](https://raw.github.com/vitessio/vitess/main/doc/VitessSpectrum.png) diff --git a/doc/internal/ReleaseInstructions.md b/doc/internal/ReleaseInstructions.md index 51dd7e3d51..01322102b6 100644 --- a/doc/internal/ReleaseInstructions.md +++ b/doc/internal/ReleaseInstructions.md @@ -13,7 +13,7 @@ backward-incompatible way -- for example, when removing deprecated interfaces. Our public API includes (but is not limited to): -* The VTGate [RPC interfaces](https://github.com/vitessio/vitess/tree/master/proto). +* The VTGate [RPC interfaces](https://github.com/vitessio/vitess/tree/main/proto). * The interfaces exposed by the VTGate client library in each language. Care must also be taken when changing the format of any data stored by a live diff --git a/doc/releasenotes/8_0_0_release_notes.md b/doc/releasenotes/8_0_0_release_notes.md index 26d50aab3d..cebfe4d9e6 100644 --- a/doc/releasenotes/8_0_0_release_notes.md +++ b/doc/releasenotes/8_0_0_release_notes.md @@ -107,7 +107,7 @@ If a scatter query is attempting to collect and process too many rows in memory ### Set Statement Support -Set statement support is added in Vitess. There are [some system variables](https://github.com/vitessio/vitess/blob/master/go/vt/sysvars/sysvars.go#L147,L190) which are disabled by default and can be enabled using flag `-enable_system_settings` on VTGate.These system variables are set on the backing MySQL instance, and will force the connection to be dedicated instead of part of the connection pool. +Set statement support is added in Vitess. There are [some system variables](https://github.com/vitessio/vitess/blob/main/go/vt/sysvars/sysvars.go#L147,L190) which are disabled by default and can be enabled using flag `-enable_system_settings` on VTGate.These system variables are set on the backing MySQL instance, and will force the connection to be dedicated instead of part of the connection pool. * Disabled passthrough system variables by default. #6859 * Allow switching workload between OLAP and OLTP #4086 #6691 diff --git a/doc/releasenotes/9_0_0_release_notes.md b/doc/releasenotes/9_0_0_release_notes.md index 6dc076df1c..94e5767cd1 100644 --- a/doc/releasenotes/9_0_0_release_notes.md +++ b/doc/releasenotes/9_0_0_release_notes.md @@ -100,7 +100,7 @@ Vitess 9.0 is not compatible with the previous release of the Vitess Kubernetes ### Set Statement Support -Set statement support has been added in Vitess. There are [some system variables](https://github.com/vitessio/vitess/blob/master/go/vt/sysvars/sysvars.go#L147,L190) which are disabled by default and can be enabled using flag `-enable_system_settings` on VTGate. These system variables are set on the mysql server. Because they change the mysql session, using them leads to the Vitess connection no longer using the connection pool and forcing dedicated connections. +Set statement support has been added in Vitess. There are [some system variables](https://github.com/vitessio/vitess/blob/main/go/vt/sysvars/sysvars.go#L147,L190) which are disabled by default and can be enabled using flag `-enable_system_settings` on VTGate. These system variables are set on the mysql server. Because they change the mysql session, using them leads to the Vitess connection no longer using the connection pool and forcing dedicated connections. ### VReplication diff --git a/docker/README.md b/docker/README.md index 0741e7a2f1..f7b9ff482e 100644 --- a/docker/README.md +++ b/docker/README.md @@ -13,25 +13,10 @@ The structure of this directory and our Dockerfile files is guided by the follow * The configuration of each Vitess image is in the directory `docker//`. * Configurations for other images e.g. our internal tool Keytar (see below), can be in a different location. -* Images with more complex build steps have a `build.sh` script e.g. see [lite/build.sh](https://github.com/vitessio/vitess/blob/master/docker/lite/build.sh). -* Tags are used to provide (stable) versions e.g. see tag `v2.0` for the image [vitess/lite](https://hub.docker.com/r/vitess/lite/tags). -* Where applicable, we provide a `latest` tag to reference the latest build of an image. - -## Images - -Our list of images can be grouped into: - -* published Vitess code -* dependencies for our Kubernetes tutorial -* internally used tools - -### Vitess - -| Image | How (When) Updated | Description | -| --- | --- | --- | -| **bootstrap** | manual (after incompatible changes are made to [bootstrap.sh](https://github.com/vitessio/vitess/blob/master/bootstrap.sh) or [vendor/vendor.json](https://github.com/vitessio/vitess/blob/master/vendor/vendor.json) | Basis for all Vitess images. It is a snapshot of the checked out repository after running `./bootstrap.sh`. Used to cache dependencies. Avoids lengthy recompilation of dependencies if they did not change. Our internal test runner [`test.go`](https://github.com/vitessio/vitess/blob/master/test.go) uses it to test the code against different MySQL versions. | -| **base** | automatic (after every GitHub push to the master branch) | Contains all Vitess server binaries. Snapshot after running `make build`. | -| **root** | automatic (after every GitHub push to the master branch) | Same as **base** but with the default user set to "root". Required for Kubernetes. | +* Images with more complex build steps have a `build.sh` script e.g. see [lite/build.sh](https://github.com/vitessio/vitess/blob/main/docker/lite/build.sh). +* Tags are used to provide (stable) versions e.g. see tag `v2.0` for the image [vitess/lite](https://hub.docker.com/r/vitess/lite/tags).Vhttps://github.com/vitessio/vitess/blob/main/test.go) uses it to test the code against different MySQL versions. | +| **base** | automatic (after every GitHub push to the main branch) | Contains all Vitess server binaries. Snapshot after running `make build`. | +| **root** | automatic (after every GitHub push to the main branch) | Same as **base** but with the default user set to "root". Required for Kubernetes. | | **lite** | manual (updated with every Vitess release) | Stripped down version of **base** e.g. source code and build dependencies are removed. Default image in our Kubernetes templates for minimized startup time. | All these Vitess images include a specific MySQL/MariaDB version ("flavor"). diff --git a/docker/bootstrap/README.md b/docker/bootstrap/README.md index 3d19d0b750..202bb12c16 100644 --- a/docker/bootstrap/README.md +++ b/docker/bootstrap/README.md @@ -15,7 +15,7 @@ The `vitess/bootstrap` image comes in different flavors: **NOTE: Unlike the base image that builds Vitess itself, this bootstrap image will NOT be rebuilt automatically on every push to the Vitess master branch.** -To build a new bootstrap image, use the [build.sh](https://github.com/vitessio/vitess/blob/master/docker/bootstrap/build.sh) +To build a new bootstrap image, use the [build.sh](https://github.com/vitessio/vitess/blob/main/docker/bootstrap/build.sh) script. First build the `common` image, then any flavors you want. For example: diff --git a/go/vt/vitessdriver/doc.go b/go/vt/vitessdriver/doc.go index 74f2d59221..b3ddffdc3f 100644 --- a/go/vt/vitessdriver/doc.go +++ b/go/vt/vitessdriver/doc.go @@ -38,7 +38,7 @@ Using this SQL driver is as simple as: // Use "db" via the Golang sql interface. } -For a full example, please see: https://github.com/vitessio/vitess/blob/master/test/client.go +For a full example, please see: https://github.com/vitessio/vitess/blob/main/test/client.go The full example is based on our tutorial for running Vitess locally: https://vitess.io/docs/get-started/local/ @@ -61,21 +61,21 @@ The driver uses the V3 API which doesn't require you to specify routing information. You just send the query as if Vitess was a regular database. VTGate analyzes the query and uses additional metadata called VSchema to perform the necessary routing. See the vtgate v3 Features doc for an overview: -https://github.com/vitessio/vitess/blob/master/doc/VTGateV3Features.md +https://github.com/vitessio/vitess/blob/main/doc/VTGateV3Features.md As of 12/2015, the VSchema creation is not documented yet as we are in the process of simplifying the VSchema definition and the overall process for creating one. If you want to create your own VSchema, we recommend to have a look at the VSchema from the vtgate v3 demo: -https://github.com/vitessio/vitess/blob/master/examples/demo/schema +https://github.com/vitessio/vitess/blob/main/examples/demo/schema (The demo itself is interactive and can be run by executing "./run.py" in the "examples/demo/" directory.) The vtgate v3 design doc, which we will also update and simplify in the future, contains more details on the VSchema: -https://github.com/vitessio/vitess/blob/master/doc/V3VindexDesign.md +https://github.com/vitessio/vitess/blob/main/doc/V3VindexDesign.md Isolation levels diff --git a/helm/vitess/README.md b/helm/vitess/README.md index ce116d8fe0..93d53975ef 100644 --- a/helm/vitess/README.md +++ b/helm/vitess/README.md @@ -391,7 +391,7 @@ metadata: data: extra.cnf: |- early-plugin-load=keyring_vault=keyring_vault.so - # this includes default rpl plugins, see https://github.com/vitessio/vitess/blob/master/config/mycnf/master_mysql57.cnf for details + # this includes default rpl plugins, see https://github.com/vitessio/vitess/blob/main/config/mycnf/master_mysql57.cnf for details plugin-load=rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so;keyring_udf=keyring_udf.so keyring_vault_config=/vt/usersecrets/vttablet-vault/vault.conf # load keyring configuration from secret innodb_encrypt_tables=ON # encrypt all tables by default diff --git a/java/README.md b/java/README.md index 73a72d27e3..df2934204f 100644 --- a/java/README.md +++ b/java/README.md @@ -5,7 +5,7 @@ This subdirectory contains all Vitess Java code. It is split in the following subdirectories (Maven modules): * **client:** Our Java client library. - * See [VTGateConn.java](https://github.com/vitessio/vitess/blob/master/java/client/src/main/java/io/vitess/client/VTGateConn.java) and [VTGateBlockingConn.java](https://github.com/vitessio/vitess/blob/master/java/client/src/main/java/io/vitess/client/VTGateBlockingConn.java) for the API. + * See [VTGateConn.java](https://github.com/vitessio/vitess/blob/main/java/client/src/main/java/io/vitess/client/VTGateConn.java) and [VTGateBlockingConn.java](https://github.com/vitessio/vitess/blob/main/java/client/src/main/java/io/vitess/client/VTGateBlockingConn.java) for the API. * Note: The library is agnostic of the underlying RPC system and only defines an interface for that. * In open-source, the library must always be used together with the code in `grpc-client`. * **grpc-client:** Implements the client's RPC interface for gRPC. diff --git a/java/client/src/main/java/io/vitess/client/Proto.java b/java/client/src/main/java/io/vitess/client/Proto.java index f72cdde8c2..1564a80a06 100644 --- a/java/client/src/main/java/io/vitess/client/Proto.java +++ b/java/client/src/main/java/io/vitess/client/Proto.java @@ -66,7 +66,7 @@ public class Proto { * *

* Errors returned by Vitess are documented in the - * vtrpc proto. + * vtrpc proto. */ public static void checkError(RPCError error) throws SQLException { if (error != null) { diff --git a/java/client/src/main/java/io/vitess/client/RpcClient.java b/java/client/src/main/java/io/vitess/client/RpcClient.java index 45cbf0c2ce..85a1574efb 100644 --- a/java/client/src/main/java/io/vitess/client/RpcClient.java +++ b/java/client/src/main/java/io/vitess/client/RpcClient.java @@ -38,7 +38,7 @@ public interface RpcClient extends Closeable { * Sends a single query using the VTGate V3 API. * *

See the - * proto + * proto * definition for canonical documentation on this VTGate API. */ ListenableFuture execute(Context ctx, ExecuteRequest request) @@ -48,7 +48,7 @@ public interface RpcClient extends Closeable { * Sends a list of queries using the VTGate V3 API. * *

See the - * proto + * proto * definition for canonical documentation on this VTGate API. */ ListenableFuture executeBatch(Context ctx, @@ -64,7 +64,7 @@ public interface RpcClient extends Closeable { * received from the server. * *

See the - * proto + * proto * definition for canonical documentation on this VTGate API. */ StreamIterator streamExecute(Context ctx, StreamExecuteRequest request) @@ -76,7 +76,7 @@ public interface RpcClient extends Closeable { * Stream begins at the specified VGTID. * *

See the - * proto + * proto * definition for canonical documentation on this VTGate API. */ StreamIterator getVStream( diff --git a/java/pom.xml b/java/pom.xml index 45a545cb1b..29133369fc 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -44,14 +44,14 @@ Apache Version 2.0 - https://github.com/vitessio/vitess/blob/master/LICENSE + https://github.com/vitessio/vitess/blob/main/LICENSE manual scm:git:git@github.com:vitessio/vitess.git scm:git:git@github.com:vitessio/vitess.git - https://github.com/vitessio/vitess/tree/master + https://github.com/vitessio/vitess/tree/main GitHub From 6cd299fc43b2d05c6d438c04692f758af3de0a22 Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Fri, 6 Aug 2021 21:14:38 +0200 Subject: [PATCH 2/5] Remove refs in vstreamer and split clone tests Signed-off-by: Rohit Nayak --- .../tabletserver/vstreamer/uvstreamer_test.go | 12 +- .../tabletserver/vstreamer/vstreamer_test.go | 28 ++--- go/vt/worker/split_clone_flaky_test.go | 116 +++++++++--------- 3 files changed, 78 insertions(+), 78 deletions(-) diff --git a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_test.go index 801745cf6e..963010f281 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_test.go @@ -204,7 +204,7 @@ func TestVStreamCopyCompleteFlow(t *testing.T) { log.Info("Inserting row for fast forward to find, locking t2") conn.ExecuteFetch("lock tables t2 write", 1, false) insertRow(t, "t1", 1, numInitialRows+2) - log.Infof("Position after second insert into t1: %s", masterPosition(t)) + log.Infof("Position after second insert into t1: %s", primaryPosition(t)) conn.ExecuteFetch("unlock tables", 1, false) log.Info("Inserted row for fast forward to find, unlocked tables") @@ -219,7 +219,7 @@ func TestVStreamCopyCompleteFlow(t *testing.T) { conn.ExecuteFetch("lock tables t3 write", 1, false) insertRow(t, "t1", 1, numInitialRows+3) insertRow(t, "t2", 2, numInitialRows+2) - log.Infof("Position after third insert into t1: %s", masterPosition(t)) + log.Infof("Position after third insert into t1: %s", primaryPosition(t)) conn.ExecuteFetch("unlock tables", 1, false) log.Info("Inserted rows for fast forward to find, unlocked tables") @@ -335,7 +335,7 @@ func insertMultipleRows(t *testing.T, table string, idx int, numRows int) { func initTables(t *testing.T, tables []string) { var idx int - positions["start"] = masterPosition(t) + positions["start"] = primaryPosition(t) for i, table := range tables { idx = i + 1 execStatement(t, fmt.Sprintf(createTableQuery, table, idx, idx, idx)) @@ -344,7 +344,7 @@ func initTables(t *testing.T, tables []string) { tableName := table idx = i + 1 insertMultipleRows(t, table, idx, numInitialRows) - positions[fmt.Sprintf("%sBulkInsert", table)] = masterPosition(t) + positions[fmt.Sprintf("%sBulkInsert", table)] = primaryPosition(t) callbacks[fmt.Sprintf("LASTPK.*%s.*%d", table, numInitialRows)] = func() { ctx := context.Background() @@ -364,11 +364,11 @@ func initTables(t *testing.T, tables []string) { "commit", } env.Mysqld.ExecuteSuperQueryList(ctx, queries) - log.Infof("Position after first insert into t1 and t2: %s", masterPosition(t)) + log.Infof("Position after first insert into t1 and t2: %s", primaryPosition(t)) } } } - positions["afterInitialInsert"] = masterPosition(t) + positions["afterInitialInsert"] = primaryPosition(t) } func initialize(t *testing.T) { diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go index eca6122dad..3bf3a37f0f 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go @@ -325,7 +325,7 @@ func TestMissingTables(t *testing.T) { "drop table t1", "drop table _shortlived", }) - startPos := masterPosition(t) + startPos := primaryPosition(t) execStatements(t, []string{ "insert into shortlived values (1,1), (2,2)", "alter table shortlived rename to _shortlived", @@ -378,9 +378,9 @@ func TestVStreamCopySimpleFlow(t *testing.T) { "create table t1(id11 int, id12 int, primary key(id11))", "create table t2(id21 int, id22 int, primary key(id21))", }) - log.Infof("Pos before bulk insert: %s", masterPosition(t)) + log.Infof("Pos before bulk insert: %s", primaryPosition(t)) insertLotsOfData(t, 10) - log.Infof("Pos after bulk insert: %s", masterPosition(t)) + log.Infof("Pos after bulk insert: %s", primaryPosition(t)) defer execStatements(t, []string{ "drop table t1", "drop table t2", @@ -454,7 +454,7 @@ func TestVStreamCopySimpleFlow(t *testing.T) { } runCases(t, filter, testcases, "vscopy", tablePKs) - log.Infof("Pos at end of test: %s", masterPosition(t)) + log.Infof("Pos at end of test: %s", primaryPosition(t)) } func TestVStreamCopyWithDifferentFilters(t *testing.T) { @@ -849,9 +849,9 @@ func TestOther(t *testing.T) { }} for _, stmt := range testcases { - startPosition := masterPosition(t) + startPosition := primaryPosition(t) execStatement(t, stmt) - endPosition := masterPosition(t) + endPosition := primaryPosition(t) if startPosition == endPosition { t.Logf("statement %s did not affect binlog", stmt) continue @@ -1177,7 +1177,7 @@ func TestDDLAddColumn(t *testing.T) { }) // Record position before the next few statements. - pos := masterPosition(t) + pos := primaryPosition(t) execStatements(t, []string{ "begin", "insert into ddl_test1 values(1, 'aaa')", @@ -1250,7 +1250,7 @@ func TestDDLDropColumn(t *testing.T) { defer execStatement(t, "drop table ddl_test2") // Record position before the next few statements. - pos := masterPosition(t) + pos := primaryPosition(t) execStatements(t, []string{ "insert into ddl_test2 values(1, 'aaa', 'ccc')", // Adding columns is allowed. @@ -1414,7 +1414,7 @@ func TestBestEffortNameInFieldEvent(t *testing.T) { execStatements(t, []string{ "create table vitess_test(id int, val varbinary(128), primary key(id))", }) - position := masterPosition(t) + position := primaryPosition(t) execStatements(t, []string{ "insert into vitess_test values(1, 'abc')", "rename table vitess_test to vitess_test_new", @@ -1468,7 +1468,7 @@ func TestInternalTables(t *testing.T) { "create table _vt_PURGE_1f9194b43b2011eb8a0104ed332e05c2_20201210194431(id int, val varbinary(128), primary key(id))", "create table _product_old(id int, val varbinary(128), primary key(id))", }) - position := masterPosition(t) + position := primaryPosition(t) execStatements(t, []string{ "insert into vitess_test values(1, 'abc')", "insert into _1e275eef_3b20_11eb_a38f_04ed332e05c2_20201210204529_gho values(1, 'abc')", @@ -1764,7 +1764,7 @@ func TestMinimalMode(t *testing.T) { engine.se.Reload(context.Background()) // Record position before the next few statements. - pos := masterPosition(t) + pos := primaryPosition(t) execStatements(t, []string{ "set @@session.binlog_row_image='minimal'", "update t1 set val1='bbb' where id=1", @@ -1856,7 +1856,7 @@ func TestNoFutureGTID(t *testing.T) { }) engine.se.Reload(context.Background()) - pos := masterPosition(t) + pos := primaryPosition(t) t.Logf("current position: %v", pos) // Both mysql and mariadb have '-' in their gtids. // Invent a GTID in the future. @@ -2086,7 +2086,7 @@ func expectLog(ctx context.Context, t *testing.T, input interface{}, ch <-chan [ func startStream(ctx context.Context, t *testing.T, filter *binlogdatapb.Filter, position string, tablePKs []*binlogdatapb.TableLastPK) (*sync.WaitGroup, <-chan []*binlogdatapb.VEvent) { switch position { case "": - position = masterPosition(t) + position = primaryPosition(t) case "vscopy": position = "" } @@ -2141,7 +2141,7 @@ func execStatements(t *testing.T, queries []string) { } } -func masterPosition(t *testing.T) string { +func primaryPosition(t *testing.T) string { t.Helper() // We use the engine's cp because there is one test that overrides // the flavor to FilePos. If so, we have to obtain the position diff --git a/go/vt/worker/split_clone_flaky_test.go b/go/vt/worker/split_clone_flaky_test.go index 346d510ed3..c3021e196e 100644 --- a/go/vt/worker/split_clone_flaky_test.go +++ b/go/vt/worker/split_clone_flaky_test.go @@ -78,10 +78,10 @@ type splitCloneTestCase struct { sourceRdonlyQs []*testQueryService // Destination tablets. - leftMasterFakeDb *fakesqldb.DB - leftMasterQs *testQueryService - rightMasterFakeDb *fakesqldb.DB - rightMasterQs *testQueryService + leftPrimaryFakeDb *fakesqldb.DB + leftPrimaryQs *testQueryService + rightPrimaryFakeDb *fakesqldb.DB + rightPrimaryQs *testQueryService // leftReplica is used by the reparent test. leftReplica *testlib.FakeTablet @@ -153,9 +153,9 @@ func (tc *splitCloneTestCase) setUpWithConcurrency(v3 bool, concurrency, writeQu // Create the fake databases. sourceRdonlyFakeDB := sourceRdonlyFakeDB(tc.t, "vt_ks", "table1", splitCloneTestMin, splitCloneTestMax) - tc.leftMasterFakeDb = fakesqldb.New(tc.t).SetName("leftMaster").OrderMatters() + tc.leftPrimaryFakeDb = fakesqldb.New(tc.t).SetName("leftPrimary").OrderMatters() tc.leftReplicaFakeDb = fakesqldb.New(tc.t).SetName("leftReplica").OrderMatters() - tc.rightMasterFakeDb = fakesqldb.New(tc.t).SetName("rightMaster").OrderMatters() + tc.rightPrimaryFakeDb = fakesqldb.New(tc.t).SetName("rightPrimary").OrderMatters() sourceMaster := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil, testlib.TabletKeyspaceShard(tc.t, "ks", "-80")) @@ -164,8 +164,8 @@ func (tc *splitCloneTestCase) setUpWithConcurrency(v3 bool, concurrency, writeQu sourceRdonly2 := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 2, topodatapb.TabletType_RDONLY, sourceRdonlyFakeDB, testlib.TabletKeyspaceShard(tc.t, "ks", "-80")) - leftMaster := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 10, - topodatapb.TabletType_PRIMARY, tc.leftMasterFakeDb, testlib.TabletKeyspaceShard(tc.t, "ks", "-40")) + leftPrimary := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 10, + topodatapb.TabletType_PRIMARY, tc.leftPrimaryFakeDb, testlib.TabletKeyspaceShard(tc.t, "ks", "-40")) // leftReplica is used by the reparent test. leftReplica := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 11, topodatapb.TabletType_REPLICA, tc.leftReplicaFakeDb, testlib.TabletKeyspaceShard(tc.t, "ks", "-40")) @@ -175,15 +175,15 @@ func (tc *splitCloneTestCase) setUpWithConcurrency(v3 bool, concurrency, writeQu leftRdonly2 := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 13, topodatapb.TabletType_RDONLY, nil, testlib.TabletKeyspaceShard(tc.t, "ks", "-40")) - rightMaster := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 20, - topodatapb.TabletType_PRIMARY, tc.rightMasterFakeDb, testlib.TabletKeyspaceShard(tc.t, "ks", "40-80")) + rightPrimary := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 20, + topodatapb.TabletType_PRIMARY, tc.rightPrimaryFakeDb, testlib.TabletKeyspaceShard(tc.t, "ks", "40-80")) rightRdonly1 := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 22, topodatapb.TabletType_RDONLY, nil, testlib.TabletKeyspaceShard(tc.t, "ks", "40-80")) rightRdonly2 := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 23, topodatapb.TabletType_RDONLY, nil, testlib.TabletKeyspaceShard(tc.t, "ks", "40-80")) tc.tablets = []*testlib.FakeTablet{sourceMaster, sourceRdonly1, sourceRdonly2, - leftMaster, tc.leftReplica, leftRdonly1, leftRdonly2, rightMaster, rightRdonly1, rightRdonly2} + leftPrimary, tc.leftReplica, leftRdonly1, leftRdonly2, rightPrimary, rightRdonly1, rightRdonly2} // add the topo and schema data we'll need if err := tc.ts.CreateShard(ctx, "ks", "80-"); err != nil { @@ -251,22 +251,22 @@ func (tc *splitCloneTestCase) setUpWithConcurrency(v3 bool, concurrency, writeQu insertsPerThread := math.Ceil(float64(rowsPerThread) / float64(writeQueryMaxRows)) insertsTotal := int(insertsPerThread) * concurrency for i := 1; i <= insertsTotal; i++ { - tc.leftMasterFakeDb.AddExpectedQuery("INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*", nil) + tc.leftPrimaryFakeDb.AddExpectedQuery("INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*", nil) // leftReplica is unused by default. - tc.rightMasterFakeDb.AddExpectedQuery("INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*", nil) + tc.rightPrimaryFakeDb.AddExpectedQuery("INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*", nil) } // Fake stream health responses because vtworker needs them to find the master. - shqs := fakes.NewStreamHealthQueryService(leftMaster.Target()) + shqs := fakes.NewStreamHealthQueryService(leftPrimary.Target()) shqs.AddDefaultHealthResponse() - tc.leftMasterQs = newTestQueryService(tc.t, leftMaster.Target(), shqs, 0, 2, topoproto.TabletAliasString(leftMaster.Tablet.Alias), false /* omitKeyspaceID */) + tc.leftPrimaryQs = newTestQueryService(tc.t, leftPrimary.Target(), shqs, 0, 2, topoproto.TabletAliasString(leftPrimary.Tablet.Alias), false /* omitKeyspaceID */) tc.leftReplicaQs = fakes.NewStreamHealthQueryService(leftReplica.Target()) - shqs = fakes.NewStreamHealthQueryService(rightMaster.Target()) + shqs = fakes.NewStreamHealthQueryService(rightPrimary.Target()) shqs.AddDefaultHealthResponse() - tc.rightMasterQs = newTestQueryService(tc.t, rightMaster.Target(), shqs, 1, 2, topoproto.TabletAliasString(rightMaster.Tablet.Alias), false /* omitKeyspaceID */) - grpcqueryservice.Register(leftMaster.RPCServer, tc.leftMasterQs) + tc.rightPrimaryQs = newTestQueryService(tc.t, rightPrimary.Target(), shqs, 1, 2, topoproto.TabletAliasString(rightPrimary.Tablet.Alias), false /* omitKeyspaceID */) + grpcqueryservice.Register(leftPrimary.RPCServer, tc.leftPrimaryQs) grpcqueryservice.Register(leftReplica.RPCServer, tc.leftReplicaQs) - grpcqueryservice.Register(rightMaster.RPCServer, tc.rightMasterQs) + grpcqueryservice.Register(rightPrimary.RPCServer, tc.rightPrimaryQs) tc.defaultWorkerArgs = []string{ "SplitClone", @@ -300,9 +300,9 @@ func (tc *splitCloneTestCase) tearDown() { ft.RPCServer = nil ft.FakeMysqlDaemon = nil } - tc.leftMasterFakeDb.VerifyAllExecutedOrFail() + tc.leftPrimaryFakeDb.VerifyAllExecutedOrFail() tc.leftReplicaFakeDb.VerifyAllExecutedOrFail() - tc.rightMasterFakeDb.VerifyAllExecutedOrFail() + tc.rightPrimaryFakeDb.VerifyAllExecutedOrFail() } // testQueryService is a local QueryService implementation to support the tests. @@ -644,8 +644,8 @@ func TestSplitCloneV2_Offline_FailOverStreamingQuery_NotAllowed(t *testing.T) { // vtworker fails due to the read error and may write less than all but the // last errored error. We cannot reliably expect any number of written rows. - defer tc.leftMasterFakeDb.DeleteAllEntries() - defer tc.rightMasterFakeDb.DeleteAllEntries() + defer tc.leftPrimaryFakeDb.DeleteAllEntries() + defer tc.rightPrimaryFakeDb.DeleteAllEntries() // Run the vtworker command. args := []string{"SplitClone", "--min_healthy_rdonly_tablets", "1"} @@ -679,8 +679,8 @@ func TestSplitCloneV2_Online_FailOverStreamingQuery(t *testing.T) { defer tc.tearDown() // In the online phase we won't enable filtered replication. Don't expect it. - tc.leftMasterFakeDb.DeleteAllEntriesAfterIndex(4) - tc.rightMasterFakeDb.DeleteAllEntriesAfterIndex(4) + tc.leftPrimaryFakeDb.DeleteAllEntriesAfterIndex(4) + tc.rightPrimaryFakeDb.DeleteAllEntriesAfterIndex(4) // Ensure that this test uses only the first tablet initially. tc.sourceRdonlyQs[1].AddHealthResponseWithReplicationLag(3600) @@ -740,10 +740,10 @@ func TestSplitCloneV2_Online_TabletsUnavailableDuringRestart(t *testing.T) { defer tc.tearDown() // In the online phase we won't enable filtered replication. Don't expect it. - tc.leftMasterFakeDb.DeleteAllEntriesAfterIndex(4) - tc.rightMasterFakeDb.DeleteAllEntriesAfterIndex(4) + tc.leftPrimaryFakeDb.DeleteAllEntriesAfterIndex(4) + tc.rightPrimaryFakeDb.DeleteAllEntriesAfterIndex(4) // The last row will never make it. Don't expect it. - tc.rightMasterFakeDb.DeleteAllEntriesAfterIndex(3) + tc.rightPrimaryFakeDb.DeleteAllEntriesAfterIndex(3) // Ensure that this test uses only the first tablet initially. tc.sourceRdonlyQs[1].AddHealthResponseWithNotServing() @@ -793,8 +793,8 @@ func TestSplitCloneV2_Online(t *testing.T) { defer tc.tearDown() // In the online phase we won't enable filtered replication. Don't expect it. - tc.leftMasterFakeDb.DeleteAllEntriesAfterIndex(29) - tc.rightMasterFakeDb.DeleteAllEntriesAfterIndex(29) + tc.leftPrimaryFakeDb.DeleteAllEntriesAfterIndex(29) + tc.rightPrimaryFakeDb.DeleteAllEntriesAfterIndex(29) // Run the vtworker command. args := make([]string, len(tc.defaultWorkerArgs)) @@ -825,11 +825,11 @@ func TestSplitCloneV2_Online_Offline(t *testing.T) { // When the online clone inserted the last rows, modify the destination test // query service such that it will return them as well. - tc.leftMasterFakeDb.GetEntry(29).AfterFunc = func() { - tc.leftMasterQs.addGeneratedRows(100, 200) + tc.leftPrimaryFakeDb.GetEntry(29).AfterFunc = func() { + tc.leftPrimaryQs.addGeneratedRows(100, 200) } - tc.rightMasterFakeDb.GetEntry(29).AfterFunc = func() { - tc.rightMasterQs.addGeneratedRows(100, 200) + tc.rightPrimaryFakeDb.GetEntry(29).AfterFunc = func() { + tc.rightPrimaryQs.addGeneratedRows(100, 200) } // Run the vtworker command. @@ -880,28 +880,28 @@ func TestSplitCloneV2_Offline_Reconciliation(t *testing.T) { // The destination has rows 100-190 with the source in common. // Rows 191-200 are extraneous on the destination. - tc.leftMasterQs.addGeneratedRows(100, 200) - tc.rightMasterQs.addGeneratedRows(100, 200) + tc.leftPrimaryQs.addGeneratedRows(100, 200) + tc.rightPrimaryQs.addGeneratedRows(100, 200) // But some data is outdated data and must be updated. - tc.leftMasterQs.modifyFirstRows(2) - tc.rightMasterQs.modifyFirstRows(2) + tc.leftPrimaryQs.modifyFirstRows(2) + tc.rightPrimaryQs.modifyFirstRows(2) // The destination tablets should see inserts, updates and deletes. // Clear the entries added by setUp() because the reconciliation will // produce different statements in this test case. - tc.leftMasterFakeDb.DeleteAllEntries() - tc.rightMasterFakeDb.DeleteAllEntries() + tc.leftPrimaryFakeDb.DeleteAllEntries() + tc.rightPrimaryFakeDb.DeleteAllEntries() // Update statements. (One query will update one row.) - tc.leftMasterFakeDb.AddExpectedQuery("UPDATE `vt_ks`.`table1` SET `msg`='Text for 100',`keyspace_id`=2305843009213693952 WHERE `id`=100", nil) - tc.leftMasterFakeDb.AddExpectedQuery("UPDATE `vt_ks`.`table1` SET `msg`='Text for 102',`keyspace_id`=2305843009213693952 WHERE `id`=102", nil) - tc.rightMasterFakeDb.AddExpectedQuery("UPDATE `vt_ks`.`table1` SET `msg`='Text for 101',`keyspace_id`=6917529027641081856 WHERE `id`=101", nil) - tc.rightMasterFakeDb.AddExpectedQuery("UPDATE `vt_ks`.`table1` SET `msg`='Text for 103',`keyspace_id`=6917529027641081856 WHERE `id`=103", nil) + tc.leftPrimaryFakeDb.AddExpectedQuery("UPDATE `vt_ks`.`table1` SET `msg`='Text for 100',`keyspace_id`=2305843009213693952 WHERE `id`=100", nil) + tc.leftPrimaryFakeDb.AddExpectedQuery("UPDATE `vt_ks`.`table1` SET `msg`='Text for 102',`keyspace_id`=2305843009213693952 WHERE `id`=102", nil) + tc.rightPrimaryFakeDb.AddExpectedQuery("UPDATE `vt_ks`.`table1` SET `msg`='Text for 101',`keyspace_id`=6917529027641081856 WHERE `id`=101", nil) + tc.rightPrimaryFakeDb.AddExpectedQuery("UPDATE `vt_ks`.`table1` SET `msg`='Text for 103',`keyspace_id`=6917529027641081856 WHERE `id`=103", nil) // Insert statements. (All are combined in one.) - tc.leftMasterFakeDb.AddExpectedQuery("INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (96,'Text for 96',2305843009213693952),(98,'Text for 98',2305843009213693952)", nil) - tc.rightMasterFakeDb.AddExpectedQuery("INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (97,'Text for 97',6917529027641081856),(99,'Text for 99',6917529027641081856)", nil) + tc.leftPrimaryFakeDb.AddExpectedQuery("INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (96,'Text for 96',2305843009213693952),(98,'Text for 98',2305843009213693952)", nil) + tc.rightPrimaryFakeDb.AddExpectedQuery("INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (97,'Text for 97',6917529027641081856),(99,'Text for 99',6917529027641081856)", nil) // Delete statements. (All are combined in one.) - tc.leftMasterFakeDb.AddExpectedQuery("DELETE FROM `vt_ks`.`table1` WHERE (`id`=190) OR (`id`=192) OR (`id`=194) OR (`id`=196) OR (`id`=198)", nil) - tc.rightMasterFakeDb.AddExpectedQuery("DELETE FROM `vt_ks`.`table1` WHERE (`id`=191) OR (`id`=193) OR (`id`=195) OR (`id`=197) OR (`id`=199)", nil) + tc.leftPrimaryFakeDb.AddExpectedQuery("DELETE FROM `vt_ks`.`table1` WHERE (`id`=190) OR (`id`=192) OR (`id`=194) OR (`id`=196) OR (`id`=198)", nil) + tc.rightPrimaryFakeDb.AddExpectedQuery("DELETE FROM `vt_ks`.`table1` WHERE (`id`=191) OR (`id`=193) OR (`id`=195) OR (`id`=197) OR (`id`=199)", nil) // Run the vtworker command. if err := runCommand(t, tc.wi, tc.wi.wr, tc.defaultWorkerArgs); err != nil { @@ -978,8 +978,8 @@ func TestSplitCloneV2_RetryDueToReadonly(t *testing.T) { *executeFetchRetryTime = 1 * time.Millisecond // Provoke a retry to test the error handling. - tc.leftMasterFakeDb.AddExpectedQueryAtIndex(0, "INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*", errReadOnly) - tc.rightMasterFakeDb.AddExpectedQueryAtIndex(0, "INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*", errReadOnly) + tc.leftPrimaryFakeDb.AddExpectedQueryAtIndex(0, "INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*", errReadOnly) + tc.rightPrimaryFakeDb.AddExpectedQueryAtIndex(0, "INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*", errReadOnly) // Run the vtworker command. if err := runCommand(t, tc.wi, tc.wi.wr, tc.defaultWorkerArgs); err != nil { @@ -1017,22 +1017,22 @@ func TestSplitCloneV2_NoMasterAvailable(t *testing.T) { tc.leftReplicaFakeDb.AddExpectedQuery("INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*", nil) // During the 29th write, let the MASTER disappear. - tc.leftMasterFakeDb.GetEntry(28).AfterFunc = func() { + tc.leftPrimaryFakeDb.GetEntry(28).AfterFunc = func() { t.Logf("setting MASTER tablet to REPLICA") - tc.leftMasterQs.UpdateType(topodatapb.TabletType_REPLICA) - tc.leftMasterQs.AddDefaultHealthResponse() + tc.leftPrimaryQs.UpdateType(topodatapb.TabletType_REPLICA) + tc.leftPrimaryQs.AddDefaultHealthResponse() } // If the HealthCheck didn't pick up the change yet, the 30th write would // succeed. To prevent this from happening, replace it with an error. - tc.leftMasterFakeDb.DeleteAllEntriesAfterIndex(28) - tc.leftMasterFakeDb.AddExpectedQuery("INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*", errReadOnly) - tc.leftMasterFakeDb.EnableInfinite() - // vtworker may not retry on leftMaster again if HealthCheck picks up the + tc.leftPrimaryFakeDb.DeleteAllEntriesAfterIndex(28) + tc.leftPrimaryFakeDb.AddExpectedQuery("INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*", errReadOnly) + tc.leftPrimaryFakeDb.EnableInfinite() + // vtworker may not retry on leftPrimary again if HealthCheck picks up the // change very fast. In that case, the error was never encountered. // Delete it or verifyAllExecutedOrFail() will fail because it was not // processed. - defer tc.leftMasterFakeDb.DeleteAllEntriesAfterIndex(28) + defer tc.leftPrimaryFakeDb.DeleteAllEntriesAfterIndex(28) // Wait for a retry due to NoMasterAvailable to happen, expect the 30th write // on leftReplica and change leftReplica from REPLICA to MASTER. From 5817942e71904974f9d41fbbc8aab522360f2188 Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Sat, 7 Aug 2021 15:43:58 +0200 Subject: [PATCH 3/5] Change master to primary in comments where applicable Signed-off-by: Rohit Nayak --- examples/compose/client.go | 4 +- examples/compose/vtcompose/vtcompose.go | 4 +- go/cmd/vtbackup/vtbackup.go | 2 +- go/cmd/vtctldclient/cli/shards.go | 6 +- go/mysql/flavor.go | 2 +- go/test/endtoend/cluster/cluster_process.go | 2 +- go/test/endtoend/keyspace/keyspace_test.go | 6 +- go/test/endtoend/mysqlctl/mysqlctl_test.go | 2 +- go/test/endtoend/mysqlctld/mysqlctld_test.go | 2 +- go/test/endtoend/reparent/reparent_test.go | 2 +- .../endtoend/tabletmanager/commands_test.go | 2 +- .../tabletmanager/lock_unlock_test.go | 8 +- .../reservedconn/reconnect2/main_test.go | 4 +- .../automation/horizontal_resharding_task.go | 2 +- go/vt/automation/vertical_split_task.go | 2 +- go/vt/automation/vertical_split_task_test.go | 2 +- .../wait_for_filtered_replication_task.go | 2 +- go/vt/binlog/binlog_streamer.go | 2 +- go/vt/discovery/healthcheck.go | 16 ++-- go/vt/discovery/healthcheck_test.go | 18 ++-- go/vt/discovery/legacy_healthcheck.go | 8 +- go/vt/discovery/legacy_tablet_stats_cache.go | 14 +-- .../legacy_tablet_stats_cache_test.go | 10 +- go/vt/discovery/tablet_health_check.go | 6 +- go/vt/discovery/tablet_picker_test.go | 2 +- go/vt/mysqlctl/backupengine.go | 2 +- go/vt/mysqlctl/cmd.go | 4 +- go/vt/mysqlctl/query.go | 2 +- go/vt/mysqlctl/reparent.go | 8 +- go/vt/mysqlctl/replication.go | 16 ++-- go/vt/mysqlctl/replication_test.go | 2 +- go/vt/mysqlctl/tmutils/permissions.go | 2 +- go/vt/orchestrator/app/cli.go | 2 +- go/vt/orchestrator/config/config.go | 44 ++++----- go/vt/orchestrator/http/api.go | 28 +++--- go/vt/orchestrator/inst/analysis.go | 2 +- go/vt/orchestrator/inst/analysis_dao.go | 8 +- .../inst/candidate_database_instance_dao.go | 4 +- go/vt/orchestrator/inst/cluster.go | 6 +- go/vt/orchestrator/inst/cluster_alias_dao.go | 2 +- go/vt/orchestrator/inst/downtime_dao.go | 6 +- go/vt/orchestrator/inst/durability.go | 2 +- go/vt/orchestrator/inst/instance.go | 16 ++-- go/vt/orchestrator/inst/instance_dao.go | 62 ++++++------ go/vt/orchestrator/inst/instance_topology.go | 96 +++++++++---------- .../inst/instance_topology_dao.go | 8 +- go/vt/orchestrator/inst/tablet_dao.go | 6 +- go/vt/orchestrator/logic/orchestrator.go | 4 +- go/vt/orchestrator/logic/tablet_discovery.go | 4 +- go/vt/orchestrator/logic/topology_recovery.go | 78 +++++++-------- .../tabletmanagerdata/tabletmanagerdata.pb.go | 4 +- .../tabletmanagerservice_grpc.pb.go | 36 +++---- go/vt/proto/vtctldata/vtctldata.pb.go | 2 +- go/vt/proto/vttest/vttest.pb.go | 2 +- go/vt/schemamanager/schemamanager_test.go | 2 +- go/vt/schemamanager/tablet_executor.go | 4 +- go/vt/schemamanager/tablet_executor_test.go | 2 +- go/vt/throttler/demo/throttler_demo.go | 16 ++-- go/vt/throttler/max_replication_lag_module.go | 2 +- go/vt/topo/conn.go | 16 ++-- go/vt/topo/consultopo/election.go | 4 +- go/vt/topo/etcd2topo/election.go | 4 +- go/vt/topo/etcd2topo/error.go | 2 +- go/vt/topo/etcd2topo/lock.go | 2 +- go/vt/topo/k8stopo/election.go | 4 +- go/vt/topo/k8stopo/lock.go | 2 +- go/vt/topo/keyspace.go | 4 +- go/vt/topo/keyspace_test.go | 2 +- go/vt/topo/locks.go | 2 +- go/vt/topo/memorytopo/election.go | 2 +- go/vt/topo/memorytopo/memorytopo.go | 2 +- go/vt/topo/shard.go | 10 +- go/vt/topo/tablet.go | 8 +- go/vt/topo/test/election.go | 14 +-- go/vt/topo/topotests/srv_keyspace_test.go | 2 +- go/vt/topo/zk2topo/election.go | 12 +-- go/vt/topotools/rebuild_keyspace.go | 2 +- go/vt/topotools/tablet.go | 14 +-- go/vt/topotools/utils.go | 4 +- go/vt/vtcombo/tablet_map.go | 6 +- go/vt/vtctl/grpcvtctldserver/server.go | 26 ++--- go/vt/vtctl/grpcvtctldserver/server_test.go | 2 +- go/vt/vtctl/grpcvtctldserver/testutil/util.go | 8 +- go/vt/vtctl/grpcvtctldserver/topo.go | 2 +- .../reparentutil/emergency_reparenter.go | 6 +- .../reparentutil/emergency_reparenter_test.go | 2 +- .../vtctl/reparentutil/planned_reparenter.go | 4 +- .../reparentutil/planned_reparenter_test.go | 4 +- go/vt/vtctl/reparentutil/util.go | 8 +- go/vt/vtctld/tablet_stats_cache.go | 2 +- go/vt/vtctld/tablet_stats_cache_test.go | 2 +- go/vt/vtctld/workflow.go | 2 +- go/vt/vtgate/buffer/buffer.go | 14 +-- go/vt/vtgate/buffer/buffer_test.go | 14 +-- go/vt/vtgate/buffer/shard_buffer.go | 22 ++--- go/vt/vtgate/discoverygateway.go | 6 +- go/vt/vtgate/discoverygateway_test.go | 2 +- go/vt/vtgate/executor.go | 2 +- go/vt/vtgate/planbuilder/show.go | 2 +- go/vt/vtgate/scatter_conn.go | 2 +- go/vt/vtgate/tabletgateway.go | 4 +- go/vt/vtgate/vcursor_impl.go | 2 +- go/vt/vtgr/controller/diagnose.go | 6 +- go/vt/vtgr/controller/refresh.go | 2 +- go/vt/vttablet/endtoend/config_test.go | 2 +- go/vt/vttablet/endtoend/framework/client.go | 2 +- go/vt/vttablet/endtoend/sequence_test.go | 2 +- go/vt/vttablet/onlineddl/executor.go | 2 +- .../fakes/stream_health_query_service.go | 2 +- go/vt/vttablet/tabletmanager/replmanager.go | 2 +- go/vt/vttablet/tabletmanager/restore.go | 36 +++---- go/vt/vttablet/tabletmanager/rpc_actions.go | 2 +- go/vt/vttablet/tabletmanager/rpc_backup.go | 4 +- .../vttablet/tabletmanager/rpc_replication.go | 70 +++++++------- go/vt/vttablet/tabletmanager/shard_sync.go | 36 +++---- .../vttablet/tabletmanager/shard_sync_test.go | 2 +- go/vt/vttablet/tabletmanager/tm_init.go | 16 ++-- go/vt/vttablet/tabletmanager/tm_state.go | 2 +- .../tabletmanager/vreplication/vcopier.go | 2 +- .../vttablet/tabletserver/health_streamer.go | 2 +- .../tabletserver/health_streamer_test.go | 4 +- .../tabletserver/repltracker/reader.go | 2 +- .../tabletserver/repltracker/repltracker.go | 4 +- .../tabletserver/repltracker/writer.go | 2 +- go/vt/vttablet/tabletserver/schema/engine.go | 6 +- go/vt/vttablet/tabletserver/state_manager.go | 4 +- .../tabletserver/state_manager_test.go | 2 +- go/vt/vttablet/tabletserver/tx_engine.go | 2 +- .../tabletserver/txthrottler/tx_throttler.go | 4 +- .../tabletserver/vstreamer/vstreamer.go | 2 +- go/vt/vttablet/tmclient/rpc_client_api.go | 26 ++--- go/vt/worker/executor.go | 10 +- go/vt/worker/legacy_split_clone.go | 6 +- go/vt/worker/legacy_split_clone_test.go | 12 +-- go/vt/worker/multi_split_diff.go | 14 +-- go/vt/worker/split_clone.go | 16 ++-- go/vt/worker/split_clone_flaky_test.go | 14 +-- go/vt/worker/split_diff.go | 20 ++-- go/vt/worker/vertical_split_clone_test.go | 2 +- go/vt/worker/vertical_split_diff.go | 20 ++-- go/vt/workflow/manager.go | 4 +- go/vt/workflow/node.go | 2 +- go/vt/wrangler/keyspace.go | 38 ++++---- go/vt/wrangler/materializer.go | 4 +- go/vt/wrangler/permissions.go | 12 +-- go/vt/wrangler/reparent.go | 18 ++-- go/vt/wrangler/schema.go | 16 ++-- go/vt/wrangler/shard.go | 2 +- go/vt/wrangler/tablet.go | 30 +++--- go/vt/wrangler/tablet_test.go | 12 +-- go/vt/wrangler/testlib/backup_test.go | 2 +- go/vt/wrangler/traffic_switcher_env_test.go | 2 +- go/vt/wrangler/validator.go | 2 +- go/vt/wrangler/vdiff_env_test.go | 2 +- go/vt/wrangler/version.go | 8 +- go/vt/wrangler/vexec.go | 16 ++-- test/client.go | 4 +- 157 files changed, 704 insertions(+), 704 deletions(-) diff --git a/examples/compose/client.go b/examples/compose/client.go index 3d97a563f0..c5ead7602a 100644 --- a/examples/compose/client.go +++ b/examples/compose/client.go @@ -72,7 +72,7 @@ func main() { } } - // Read it back from the master. + // Read it back from the primary. fmt.Println("Reading from master...") rows, err := db.Query("SELECT page, time_created_ns, message FROM messages") if err != nil { @@ -94,7 +94,7 @@ func main() { } // Read from a replica. - // Note that this may be behind master due to replication lag. + // Note that this may be behind primary due to replication lag. fmt.Println("Reading from replica...") dbr, err := vitessdriver.Open(*server, "@replica") diff --git a/examples/compose/vtcompose/vtcompose.go b/examples/compose/vtcompose/vtcompose.go index 0472de0cbb..9bd228090b 100644 --- a/examples/compose/vtcompose/vtcompose.go +++ b/examples/compose/vtcompose/vtcompose.go @@ -517,7 +517,7 @@ func applyShardPatches( } func generateDefaultShard(tabAlias int, shard string, keyspaceData keyspaceInfo, opts vtOptions) string { - aliases := []int{tabAlias + 1} // master alias, e.g. 201 + aliases := []int{tabAlias + 1} // primary alias, e.g. 201 for i := 0; i < keyspaceData.replicaTablets; i++ { aliases = append(aliases, tabAlias+2+i) // replica aliases, e.g. 202, 203, ... } @@ -546,7 +546,7 @@ func generateExternalmaster( opts vtOptions, ) string { - aliases := []int{tabAlias + 1} // master alias, e.g. 201 + aliases := []int{tabAlias + 1} // primary alias, e.g. 201 for i := 0; i < keyspaceData.replicaTablets; i++ { aliases = append(aliases, tabAlias+2+i) // replica aliases, e.g. 202, 203, ... } diff --git a/go/cmd/vtbackup/vtbackup.go b/go/cmd/vtbackup/vtbackup.go index f7b86d97f7..4fb160b33b 100644 --- a/go/cmd/vtbackup/vtbackup.go +++ b/go/cmd/vtbackup/vtbackup.go @@ -319,7 +319,7 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back } // Get the current primary replication position, and wait until we catch up - // to that point. We do this instead of looking at Seconds_Behind_Master + // to that point. We do this instead of looking at ReplicationLag // because that value can // sometimes lie and tell you there's 0 lag when actually replication is // stopped. Also, if replication is making progress but is too slow to ever diff --git a/go/cmd/vtctldclient/cli/shards.go b/go/cmd/vtctldclient/cli/shards.go index f45b8324c0..93d7529d9a 100644 --- a/go/cmd/vtctldclient/cli/shards.go +++ b/go/cmd/vtctldclient/cli/shards.go @@ -71,8 +71,8 @@ func (rts rTablets) Less(i, j int) bool { return false } - // the type proto has MASTER first, so sort by that. Will show - // the MASTER first, then each replica type sorted by + // the type proto has PRIMARY first, so sort by that. Will show + // the PRIMARY first, then each replica type sorted by // replication position. if l.Tablet.Type < r.Tablet.Type { return true @@ -101,7 +101,7 @@ func (rts rTablets) Less(i, j int) bool { // // The sorting order is: // 1. Tablets that do not have a replication Status. -// 2. Any tablets of type MASTER. +// 2. Any tablets of type PRIMARY. // 3. Remaining tablets sorted by comparing replication positions. func SortedReplicatingTablets(tabletMap map[string]*topodatapb.Tablet, replicationStatuses map[string]*replicationdatapb.Status) []*ReplicatingTablet { rtablets := make([]*ReplicatingTablet, 0, len(tabletMap)) diff --git a/go/mysql/flavor.go b/go/mysql/flavor.go index 0e99b381a8..2902308d84 100644 --- a/go/mysql/flavor.go +++ b/go/mysql/flavor.go @@ -92,7 +92,7 @@ type flavor interface { setReplicationPositionCommands(pos Position) []string // changeReplicationSourceArg returns the specific parameter to add to - // a "change master" command. + // a "change primary" command. changeReplicationSourceArg() string // status returns the result of the appropriate status command, diff --git a/go/test/endtoend/cluster/cluster_process.go b/go/test/endtoend/cluster/cluster_process.go index cd61c061b1..9b8f19b392 100644 --- a/go/test/endtoend/cluster/cluster_process.go +++ b/go/test/endtoend/cluster/cluster_process.go @@ -149,7 +149,7 @@ func (shard *Shard) Rdonly() *Vttablet { } // Replica get the last but one tablet which is replica -// Mostly we have either 3 tablet setup [master, replica, rdonly] +// Mostly we have either 3 tablet setup [primary, replica, rdonly] func (shard *Shard) Replica() *Vttablet { for idx, tablet := range shard.Vttablets { if tablet.Type == "replica" && idx > 0 { diff --git a/go/test/endtoend/keyspace/keyspace_test.go b/go/test/endtoend/keyspace/keyspace_test.go index 64c56aa95a..e21e6ed309 100644 --- a/go/test/endtoend/keyspace/keyspace_test.go +++ b/go/test/endtoend/keyspace/keyspace_test.go @@ -325,7 +325,7 @@ func TestShardCountForAllKeyspaces(t *testing.T) { func testShardCountForKeyspace(t *testing.T, keyspace string, count int) { srvKeyspace := getSrvKeyspace(t, cell, keyspace) - // for each served type MASTER REPLICA RDONLY, the shard ref count should match + // for each served type PRIMARY REPLICA RDONLY, the shard ref count should match for _, partition := range srvKeyspace.Partitions { if servedTypes[partition.ServedType] { assert.Equal(t, len(partition.ShardReferences), count) @@ -342,7 +342,7 @@ func TestShardNameForAllKeyspaces(t *testing.T) { func testShardNameForKeyspace(t *testing.T, keyspace string, shardNames []string) { srvKeyspace := getSrvKeyspace(t, cell, keyspace) - // for each served type MASTER REPLICA RDONLY, the shard ref count should match + // for each served type PRIMARY REPLICA RDONLY, the shard ref count should match for _, partition := range srvKeyspace.Partitions { if servedTypes[partition.ServedType] { for _, shardRef := range partition.ShardReferences { @@ -357,7 +357,7 @@ func TestKeyspaceToShardName(t *testing.T) { var id []byte srvKeyspace := getSrvKeyspace(t, cell, keyspaceShardedName) - // for each served type MASTER REPLICA RDONLY, the shard ref count should match + // for each served type PRIMARY REPLICA RDONLY, the shard ref count should match for _, partition := range srvKeyspace.Partitions { if partition.ServedType == topodata.TabletType_PRIMARY { for _, shardRef := range partition.ShardReferences { diff --git a/go/test/endtoend/mysqlctl/mysqlctl_test.go b/go/test/endtoend/mysqlctl/mysqlctl_test.go index 4148857f39..dcbc3c601e 100644 --- a/go/test/endtoend/mysqlctl/mysqlctl_test.go +++ b/go/test/endtoend/mysqlctl/mysqlctl_test.go @@ -94,7 +94,7 @@ func initCluster(shardNames []string, totalTabletsRequired int) { MySQLPort: clusterInstance.GetAndReservePort(), Alias: fmt.Sprintf("%s-%010d", clusterInstance.Cell, tabletUID), } - if i == 0 { // Make the first one as master + if i == 0 { // Make the first one as primary tablet.Type = "master" } // Start Mysqlctl process diff --git a/go/test/endtoend/mysqlctld/mysqlctld_test.go b/go/test/endtoend/mysqlctld/mysqlctld_test.go index bdb969fd85..981174ee68 100644 --- a/go/test/endtoend/mysqlctld/mysqlctld_test.go +++ b/go/test/endtoend/mysqlctld/mysqlctld_test.go @@ -94,7 +94,7 @@ func initCluster(shardNames []string, totalTabletsRequired int) error { MySQLPort: clusterInstance.GetAndReservePort(), Alias: fmt.Sprintf("%s-%010d", clusterInstance.Cell, tabletUID), } - if i == 0 { // Make the first one as master + if i == 0 { // Make the first one as primary tablet.Type = "master" } // Start Mysqlctld process diff --git a/go/test/endtoend/reparent/reparent_test.go b/go/test/endtoend/reparent/reparent_test.go index 92f723c881..2ef026f356 100644 --- a/go/test/endtoend/reparent/reparent_test.go +++ b/go/test/endtoend/reparent/reparent_test.go @@ -43,7 +43,7 @@ func TestMasterToSpareStateChangeImpossible(t *testing.T) { setupReparentCluster(t) defer teardownCluster() - // We cannot change a master to spare + // We cannot change a primary to spare out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ChangeTabletType", tab1.Alias, "spare") require.Error(t, err, out) require.Contains(t, out, "type change PRIMARY -> SPARE is not an allowed transition for ChangeTabletType") diff --git a/go/test/endtoend/tabletmanager/commands_test.go b/go/test/endtoend/tabletmanager/commands_test.go index a63dfc9431..55c7b9adfb 100644 --- a/go/test/endtoend/tabletmanager/commands_test.go +++ b/go/test/endtoend/tabletmanager/commands_test.go @@ -203,7 +203,7 @@ func runHookAndAssert(t *testing.T, params []string, expectedStatus string, expe } func TestShardReplicationFix(t *testing.T) { - // make sure the replica is in the replication graph, 2 nodes: 1 master, 1 replica + // make sure the replica is in the replication graph, 2 nodes: 1 primary, 1 replica defer cluster.PanicHandler(t) result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShardReplication", cell, keyspaceShard) require.Nil(t, err, "error should be Nil") diff --git a/go/test/endtoend/tabletmanager/lock_unlock_test.go b/go/test/endtoend/tabletmanager/lock_unlock_test.go index 6166d0e0fd..12a0dbe127 100644 --- a/go/test/endtoend/tabletmanager/lock_unlock_test.go +++ b/go/test/endtoend/tabletmanager/lock_unlock_test.go @@ -44,7 +44,7 @@ func TestLockAndUnlock(t *testing.T) { require.Nil(t, err) defer replicaConn.Close() - // first make sure that our writes to the master make it to the replica + // first make sure that our writes to the primary make it to the replica exec(t, conn, "delete from t1") exec(t, conn, "insert into t1(id, value) values(1,'a'), (2,'b')") checkDataOnReplica(t, replicaConn, `[[VARCHAR("a")] [VARCHAR("b")]]`) @@ -52,7 +52,7 @@ func TestLockAndUnlock(t *testing.T) { // now lock the replica err = tmcLockTables(ctx, replicaTablet.GrpcPort) require.Nil(t, err) - // make sure that writing to the master does not show up on the replica while locked + // make sure that writing to the primary does not show up on the replica while locked exec(t, conn, "insert into t1(id, value) values(3,'c')") checkDataOnReplica(t, replicaConn, `[[VARCHAR("a")] [VARCHAR("b")]]`) @@ -139,7 +139,7 @@ func TestLockAndTimeout(t *testing.T) { require.Nil(t, err) defer replicaConn.Close() - // first make sure that our writes to the master make it to the replica + // first make sure that our writes to the primary make it to the replica exec(t, masterConn, "insert into t1(id, value) values(1,'a')") checkDataOnReplica(t, replicaConn, `[[VARCHAR("a")]]`) @@ -147,7 +147,7 @@ func TestLockAndTimeout(t *testing.T) { err = tmcLockTables(ctx, replicaTablet.GrpcPort) require.Nil(t, err) - // make sure that writing to the master does not show up on the replica while locked + // make sure that writing to the primary does not show up on the replica while locked exec(t, masterConn, "insert into t1(id, value) values(2,'b')") checkDataOnReplica(t, replicaConn, `[[VARCHAR("a")]]`) diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go index 627f203647..25ae5ea6c2 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go @@ -109,14 +109,14 @@ func TestTabletChange(t *testing.T) { checkedExec(t, conn, "use @master") checkedExec(t, conn, "set sql_mode = ''") - // this will create reserved connection on master on -80 and 80- shards. + // this will create reserved connection on primary on -80 and 80- shards. checkedExec(t, conn, "select * from test") // Change Master err = clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "-keyspace_shard", fmt.Sprintf("%s/%s", keyspaceName, "-80")) require.NoError(t, err) - // this should pass as there is new master tablet and is serving. + // this should pass as there is a new primary tablet and is serving. _, err = exec(t, conn, "select * from test") assert.NoError(t, err) } diff --git a/go/vt/automation/horizontal_resharding_task.go b/go/vt/automation/horizontal_resharding_task.go index 233c9a3944..9df5e7c18e 100644 --- a/go/vt/automation/horizontal_resharding_task.go +++ b/go/vt/automation/horizontal_resharding_task.go @@ -97,7 +97,7 @@ func (t *HorizontalReshardingTask) Run(parameters map[string]string) ([]*automat newTasks = append(newTasks, splitDiffTask) } - for _, servedType := range []string{"rdonly", "replica", "master"} { + for _, servedType := range []string{"rdonly", "replica", "primary"} { migrateServedTypesTasks := NewTaskContainer() for _, sourceShard := range sourceShards { AddTask(migrateServedTypesTasks, "MigrateServedTypesTask", map[string]string{ diff --git a/go/vt/automation/vertical_split_task.go b/go/vt/automation/vertical_split_task.go index a490b5a25b..0f0b3faab4 100644 --- a/go/vt/automation/vertical_split_task.go +++ b/go/vt/automation/vertical_split_task.go @@ -97,7 +97,7 @@ func (t *VerticalSplitTask) Run(parameters map[string]string) ([]*automationpb.T newTasks = append(newTasks, vSplitDiffTask) } - for _, servedType := range []string{"rdonly", "replica", "master"} { + for _, servedType := range []string{"rdonly", "replica", "primary"} { migrateServedTypesTasks := NewTaskContainer() for _, shard := range shards { AddTask(migrateServedTypesTasks, "MigrateServedFromTask", map[string]string{ diff --git a/go/vt/automation/vertical_split_task_test.go b/go/vt/automation/vertical_split_task_test.go index 86356056e9..2873ae87f1 100644 --- a/go/vt/automation/vertical_split_task_test.go +++ b/go/vt/automation/vertical_split_task_test.go @@ -50,7 +50,7 @@ func TestVerticalSplitTask(t *testing.T) { vtworker.RegisterResult([]string{"VerticalSplitDiff", "--min_healthy_rdonly_tablets=1", "destination_keyspace/0"}, "", nil) vtctld.RegisterResult([]string{"MigrateServedFrom", "destination_keyspace/0", "rdonly"}, "", nil) vtctld.RegisterResult([]string{"MigrateServedFrom", "destination_keyspace/0", "replica"}, "", nil) - vtctld.RegisterResult([]string{"MigrateServedFrom", "destination_keyspace/0", "master"}, + vtctld.RegisterResult([]string{"MigrateServedFrom", "destination_keyspace/0", "primary"}, "ALL_DONE", nil) diff --git a/go/vt/automation/wait_for_filtered_replication_task.go b/go/vt/automation/wait_for_filtered_replication_task.go index cca47ad2da..c5c6f91157 100644 --- a/go/vt/automation/wait_for_filtered_replication_task.go +++ b/go/vt/automation/wait_for_filtered_replication_task.go @@ -23,7 +23,7 @@ import ( "vitess.io/vitess/go/vt/topo/topoproto" ) -// WaitForFilteredReplicationTask runs vtctl WaitForFilteredReplication to block until the destination master +// WaitForFilteredReplicationTask runs vtctl WaitForFilteredReplication to block until the destination primary // (i.e. the receiving side of the filtered replication) has caught up to max_delay with the source shard. type WaitForFilteredReplicationTask struct { } diff --git a/go/vt/binlog/binlog_streamer.go b/go/vt/binlog/binlog_streamer.go index 986b1bfdf3..51f9d84022 100644 --- a/go/vt/binlog/binlog_streamer.go +++ b/go/vt/binlog/binlog_streamer.go @@ -322,7 +322,7 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog // tells us the size of the event header. if format.IsZero() { // The only thing that should come before the FORMAT_DESCRIPTION_EVENT - // is a fake ROTATE_EVENT, which the master sends to tell us the name + // is a fake ROTATE_EVENT, which the primary sends to tell us the name // of the current log file. if ev.IsRotate() { continue diff --git a/go/vt/discovery/healthcheck.go b/go/vt/discovery/healthcheck.go index f8542c9577..092ce88c32 100644 --- a/go/vt/discovery/healthcheck.go +++ b/go/vt/discovery/healthcheck.go @@ -73,7 +73,7 @@ var ( //TODO(deepthi): change these vars back to unexported when discoveryGateway is removed - // AllowedTabletTypes is the list of allowed tablet types. e.g. {MASTER, REPLICA} + // AllowedTabletTypes is the list of allowed tablet types. e.g. {PRIMARY, REPLICA} AllowedTabletTypes []topodata.TabletType // TabletFilters are the keyspace|shard or keyrange filters to apply to the full set of tablets TabletFilters flagutil.StringListValue @@ -192,12 +192,12 @@ type HealthCheck interface { // GetHealthyTabletStats returns only the healthy tablets. // The returned array is owned by the caller. // For TabletType_PRIMARY, this will only return at most one entry, - // the most recent tablet of type master. + // the most recent tablet of type primary. // This returns a copy of the data so that callers can access without // synchronization GetHealthyTabletStats(target *query.Target) []*TabletHealth - // Subscribe adds a listener. Used by vtgate buffer to learn about master changes. + // Subscribe adds a listener. Used by vtgate buffer to learn about primary changes. Subscribe() chan *TabletHealth // Unsubscribe removes a listener. @@ -261,7 +261,7 @@ type HealthCheckImpl struct { // localCell. // The localCell for this healthcheck // callback. -// A function to call when there is a master change. Used to notify vtgate's buffer to stop buffering. +// A function to call when there is a primary change. Used to notify vtgate's buffer to stop buffering. func NewHealthCheck(ctx context.Context, retryDelay, healthCheckTimeout time.Duration, topoServer *topo.Server, localCell, cellsToWatch string) *HealthCheckImpl { log.Infof("loading tablets for cells: %v", cellsToWatch) @@ -467,7 +467,7 @@ func (hc *HealthCheckImpl) updateHealth(th *TabletHealth, prevTarget *query.Targ if !trivialUpdate { // We re-sort the healthy tablet list whenever we get a health update for tablets we can route to. - // Tablets from other cells for non-master targets should not trigger a re-sort; + // Tablets from other cells for non-primary targets should not trigger a re-sort; // they should also be excluded from healthy list. if th.Target.TabletType != topodata.TabletType_PRIMARY && hc.isIncluded(th.Target.TabletType, th.Tablet.Alias) { hc.recomputeHealthy(targetKey) @@ -501,7 +501,7 @@ func (hc *HealthCheckImpl) recomputeHealthy(key keyspaceShardTabletType) { hc.healthy[key] = FilterStatsByReplicationLag(allArray) } -// Subscribe adds a listener. Used by vtgate buffer to learn about master changes. +// Subscribe adds a listener. Used by vtgate buffer to learn about primary changes. func (hc *HealthCheckImpl) Subscribe() chan *TabletHealth { hc.subMu.Lock() defer hc.subMu.Unlock() @@ -590,7 +590,7 @@ func (hc *HealthCheckImpl) Close() error { // GetHealthyTabletStats returns only the healthy tablets. // The returned array is owned by the caller. // For TabletType_PRIMARY, this will only return at most one entry, -// the most recent tablet of type master. +// the most recent tablet of type primary. // This returns a copy of the data so that callers can access without // synchronization func (hc *HealthCheckImpl) GetHealthyTabletStats(target *query.Target) []*TabletHealth { @@ -606,7 +606,7 @@ func (hc *HealthCheckImpl) GetHealthyTabletStats(target *query.Target) []*Tablet // getTabletStats returns all tablets for the given target. // The returned array is owned by the caller. // For TabletType_PRIMARY, this will only return at most one entry, -// the most recent tablet of type master. +// the most recent tablet of type primary. func (hc *HealthCheckImpl) getTabletStats(target *query.Target) []*TabletHealth { var result []*TabletHealth hc.mu.Lock() diff --git a/go/vt/discovery/healthcheck_test.go b/go/vt/discovery/healthcheck_test.go index bb03c6e1bc..c8ec16646b 100644 --- a/go/vt/discovery/healthcheck_test.go +++ b/go/vt/discovery/healthcheck_test.go @@ -397,7 +397,7 @@ func TestHealthCheckErrorOnPrimaryAfterExternalReparent(t *testing.T) { // Stream error from tablet 1 fc1.errCh <- fmt.Errorf("some stream error") <-resultChan - // tablet 2 should still be the master + // tablet 2 should still be the primary a = hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}) mustMatch(t, health, a, "unexpected result") } @@ -835,7 +835,7 @@ func TestGetHealthyTablets(t *testing.T) { a = hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}) assert.Equal(t, 1, len(a), "Wrong number of results") - // second tablet turns into a master + // second tablet turns into a primary shr2 = &querypb.StreamHealthResponse{ TabletAlias: tablet2.Alias, Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, @@ -859,11 +859,11 @@ func TestGetHealthyTablets(t *testing.T) { Stats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.2}, PrimaryTermStartTime: 10, }} - // check we have a master now + // check we have a primary now a = hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}) mustMatch(t, want2, a, "unexpected result") - // reparent: old replica goes into master + // reparent: old replica goes into primary shr = &querypb.StreamHealthResponse{ TabletAlias: tablet.Alias, Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, @@ -881,13 +881,13 @@ func TestGetHealthyTablets(t *testing.T) { PrimaryTermStartTime: 20, }} - // check we lost all replicas, and master is new one + // check we lost all replicas, and primary is new one a = hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}) assert.Empty(t, a, "Wrong number of results") a = hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}) mustMatch(t, want, a, "unexpected result") - // old master sending an old ping should be ignored + // old primary sending an old ping should be ignored input2 <- shr2 <-resultChan a = hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}) @@ -899,7 +899,7 @@ func TestMasterInOtherCell(t *testing.T) { hc := NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, "cell1", "cell1, cell2") defer hc.Close() - // add a tablet as master in different cell + // add a tablet as primary in different cell tablet := createTestTablet(1, "cell2", "host1") tablet.Type = topodatapb.TabletType_PRIMARY input := make(chan *querypb.StreamHealthResponse) @@ -939,13 +939,13 @@ func TestMasterInOtherCell(t *testing.T) { case err := <-fc.cbErrCh: require.Fail(t, "Unexpected error: %v", err) case got := <-resultChan: - // check that we DO receive health check update for MASTER in other cell + // check that we DO receive health check update for PRIMARY in other cell mustMatch(t, want, got, "Wrong TabletHealth data") case <-ticker.C: require.Fail(t, "Timed out waiting for HealthCheck update") } - // check that MASTER tablet from other cell IS in healthy tablet list + // check that PRIMARY tablet from other cell IS in healthy tablet list a := hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}) require.Len(t, a, 1, "") mustMatch(t, want, a[0], "Expecting healthy master") diff --git a/go/vt/discovery/legacy_healthcheck.go b/go/vt/discovery/legacy_healthcheck.go index dbba5cdd9e..60b113ca3d 100644 --- a/go/vt/discovery/legacy_healthcheck.go +++ b/go/vt/discovery/legacy_healthcheck.go @@ -145,9 +145,9 @@ type LegacyTabletStats struct { // Serving describes if the tablet can be serving traffic. Serving bool // TabletExternallyReparentedTimestamp is the last timestamp - // that this tablet was either elected the master, or received + // that this tablet was either elected the primary, or received // a TabletExternallyReparented event. It is set to 0 if the - // tablet doesn't think it's a master. + // tablet doesn't think it's a primary. TabletExternallyReparentedTimestamp int64 // Stats is the current health status, as received by the // StreamHealth RPC (replication lag, ...). @@ -293,7 +293,7 @@ type LegacyHealthCheck interface { RegisterStats() // SetListener sets the listener for healthcheck // updates. sendDownEvents is used when a tablet changes type - // (from replica to master for instance). If the listener + // (from replica to primary for instance). If the listener // wants two events (Up=false on old type, Up=True on new // type), sendDownEvents should be set. Otherwise, the // healthcheck will only send one event (Up=true on new type). @@ -498,7 +498,7 @@ func (hc *LegacyHealthCheckImpl) updateHealth(ts *LegacyTabletStats, conn querys hc.listener.StatsUpdate(&oldts) } - // Track how often a tablet gets promoted to master. It is used for + // Track how often a tablet gets promoted to primary. It is used for // comparing against the variables in go/vtgate/buffer/variables.go. if oldts.Target.TabletType != topodatapb.TabletType_PRIMARY && ts.Target.TabletType == topodatapb.TabletType_PRIMARY { hcMasterPromotedCounters.Add([]string{ts.Target.Keyspace, ts.Target.Shard}, 1) diff --git a/go/vt/discovery/legacy_tablet_stats_cache.go b/go/vt/discovery/legacy_tablet_stats_cache.go index ac76e5bc96..6869ccb0a7 100644 --- a/go/vt/discovery/legacy_tablet_stats_cache.go +++ b/go/vt/discovery/legacy_tablet_stats_cache.go @@ -30,10 +30,10 @@ import ( // LegacyTabletStatsCache is a LegacyHealthCheckStatsListener that keeps both the // current list of available LegacyTabletStats, and a serving list: -// - for master tablets, only the current master is kept. -// - for non-master tablets, we filter the list using FilterLegacyStatsByReplicationLag. +// - for primary tablets, only the current primary is kept. +// - for non-primary tablets, we filter the list using FilterLegacyStatsByReplicationLag. // It keeps entries for all tablets in the cell(s) it's configured to serve for, -// and for the master independently of which cell it's in. +// and for the primary independently of which cell it's in. // Note the healthy tablet computation is done when we receive a tablet // update only, not at serving time. // Also note the cache may not have the last entry received by the tablet. @@ -41,7 +41,7 @@ import ( // keep its new update. type LegacyTabletStatsCache struct { // cell is the cell we are keeping all tablets for. - // Note we keep track of all master tablets in all cells. + // Note we keep track of all primary tablets in all cells. cell string // ts is the topo server in use. ts *topo.Server @@ -68,7 +68,7 @@ type legacyTabletStatsCacheEntry struct { func (e *legacyTabletStatsCacheEntry) updateHealthyMapForMaster(ts *LegacyTabletStats) { if ts.Up { - // We have an Up master. + // We have an Up primary. if len(e.healthy) == 0 { // We have a new Up server, just remember it. e.healthy = append(e.healthy, ts) @@ -92,7 +92,7 @@ func (e *legacyTabletStatsCacheEntry) updateHealthyMapForMaster(ts *LegacyTablet return } - // We have a Down master, remove it only if it's exactly the same. + // We have a Down primary, remove it only if it's exactly the same. if len(e.healthy) != 0 { if ts.Key == e.healthy[0].Key { // Same guy, remove it. @@ -285,7 +285,7 @@ func (tc *LegacyTabletStatsCache) GetTabletStats(keyspace, shard string, tabletT // GetHealthyTabletStats returns only the healthy targets. // The returned array is owned by the caller. // For TabletType_PRIMARY, this will only return at most one entry, -// the most recent tablet of type master. +// the most recent tablet of type primary. func (tc *LegacyTabletStatsCache) GetHealthyTabletStats(keyspace, shard string, tabletType topodatapb.TabletType) []LegacyTabletStats { e := tc.getEntry(keyspace, shard, tabletType) if e == nil { diff --git a/go/vt/discovery/legacy_tablet_stats_cache_test.go b/go/vt/discovery/legacy_tablet_stats_cache_test.go index 4b96e37a83..c88b6c0bd8 100644 --- a/go/vt/discovery/legacy_tablet_stats_cache_test.go +++ b/go/vt/discovery/legacy_tablet_stats_cache_test.go @@ -190,7 +190,7 @@ func TestLegacyTabletStatsCache(t *testing.T) { t.Errorf("unexpected result: %v", a) } - // second tablet turns into a master, we receive down + up + // second tablet turns into a primary, we receive down + up ts2.Serving = true ts2.Up = false tsc.StatsUpdate(ts2) @@ -205,13 +205,13 @@ func TestLegacyTabletStatsCache(t *testing.T) { t.Errorf("unexpected result: %v", a) } - // check we have a master now + // check we have a primary now a = tsc.GetTabletStats("k", "s", topodatapb.TabletType_PRIMARY) if len(a) != 1 || !ts2.DeepEqual(&a[0]) { t.Errorf("unexpected result: %v", a) } - // reparent: old replica goes into master + // reparent: old replica goes into primary ts1.Up = false tsc.StatsUpdate(ts1) ts1.Up = true @@ -219,7 +219,7 @@ func TestLegacyTabletStatsCache(t *testing.T) { ts1.TabletExternallyReparentedTimestamp = 20 tsc.StatsUpdate(ts1) - // check we lost all replicas, and master is new one + // check we lost all replicas, and primary is new one a = tsc.GetTabletStats("k", "s", topodatapb.TabletType_REPLICA) if len(a) != 0 { t.Errorf("unexpected result: %v", a) @@ -229,7 +229,7 @@ func TestLegacyTabletStatsCache(t *testing.T) { t.Errorf("unexpected result: %v", a) } - // old master sending an old ping should be ignored + // old primary sending an old ping should be ignored tsc.StatsUpdate(ts2) a = tsc.GetHealthyTabletStats("k", "s", topodatapb.TabletType_PRIMARY) if len(a) != 1 || !ts1.DeepEqual(&a[0]) { diff --git a/go/vt/discovery/tablet_health_check.go b/go/vt/discovery/tablet_health_check.go index be2fefbd4f..a090025302 100644 --- a/go/vt/discovery/tablet_health_check.go +++ b/go/vt/discovery/tablet_health_check.go @@ -59,9 +59,9 @@ type tabletHealthCheck struct { // Serving describes if the tablet can be serving traffic. Serving bool // PrimaryTermStartTime is the last time at which - // this tablet was either elected the master, or received + // this tablet was either elected the primary, or received // a TabletExternallyReparented event. It is set to 0 if the - // tablet doesn't think it's a master. + // tablet doesn't think it's a primary. PrimaryTermStartTime int64 // Stats is the current health status, as received by the // StreamHealth RPC (replication lag, ...). @@ -199,7 +199,7 @@ func (thc *tabletHealthCheck) processResponse(hc *HealthCheckImpl, shr *query.St } thc.setServingState(serving, reason) - // notify downstream for master change + // notify downstream for primary change hc.updateHealth(thc.SimpleCopy(), prevTarget, trivialUpdate, thc.Serving) return nil } diff --git a/go/vt/discovery/tablet_picker_test.go b/go/vt/discovery/tablet_picker_test.go index ee3e27ea9f..ec92d72224 100644 --- a/go/vt/discovery/tablet_picker_test.go +++ b/go/vt/discovery/tablet_picker_test.go @@ -170,7 +170,7 @@ func TestPickRespectsTabletType(t *testing.T) { tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "replica,rdonly") require.NoError(t, err) - // In 20 attempts, master tablet must be never picked + // In 20 attempts, primary tablet must be never picked for i := 0; i < 20; i++ { tablet, err := tp.PickForStreaming(context.Background()) require.NoError(t, err) diff --git a/go/vt/mysqlctl/backupengine.go b/go/vt/mysqlctl/backupengine.go index 932fe2c159..b858854902 100644 --- a/go/vt/mysqlctl/backupengine.go +++ b/go/vt/mysqlctl/backupengine.go @@ -57,7 +57,7 @@ type BackupParams struct { Concurrency int // Extra env variables for pre-backup and post-backup transform hooks HookExtraEnv map[string]string - // TopoServer, Keyspace and Shard are used to discover master tablet + // TopoServer, Keyspace and Shard are used to discover primary tablet TopoServer *topo.Server // Keyspace and Shard are used to infer the directory where backups should be stored Keyspace string diff --git a/go/vt/mysqlctl/cmd.go b/go/vt/mysqlctl/cmd.go index f54815cf2c..e1524da379 100644 --- a/go/vt/mysqlctl/cmd.go +++ b/go/vt/mysqlctl/cmd.go @@ -35,8 +35,8 @@ func CreateMysqldAndMycnf(tabletUID uint32, mysqlSocket string, mysqlPort int32) // because reusing server-ids is not safe. // // For example, if a tablet comes back with an empty data dir, it will restore - // from backup and then connect to the master. But if this tablet has the same - // server-id as before, and if this tablet was recently a master, then it can + // from backup and then connect to the primary. But if this tablet has the same + // server-id as before, and if this tablet was recently a primary, then it can // lose data by skipping binlog events due to replicate-same-server-id=FALSE, // which is the default setting. if err := mycnf.RandomizeMysqlServerID(); err != nil { diff --git a/go/vt/mysqlctl/query.go b/go/vt/mysqlctl/query.go index c081c0aa4e..fa6c842a37 100644 --- a/go/vt/mysqlctl/query.go +++ b/go/vt/mysqlctl/query.go @@ -219,7 +219,7 @@ const ( func redactPassword(input string) string { i := strings.Index(input, masterPasswordStart) - // We have master password in the query, try to redact it + // We have primary password in the query, try to redact it if i != -1 { j := strings.Index(input[i+len(masterPasswordStart):], masterPasswordEnd) if j == -1 { diff --git a/go/vt/mysqlctl/reparent.go b/go/vt/mysqlctl/reparent.go index 8a93bf8413..726a6003d0 100644 --- a/go/vt/mysqlctl/reparent.go +++ b/go/vt/mysqlctl/reparent.go @@ -92,7 +92,7 @@ func (mysqld *Mysqld) WaitForReparentJournal(ctx context.Context, timeCreatedNS } } -// Promote will promote this server to be the new master. +// Promote will promote this server to be the new primary. func (mysqld *Mysqld) Promote(hookExtraEnv map[string]string) (mysql.Position, error) { ctx := context.TODO() conn, err := getPoolReconnect(ctx, mysqld.dbaPool) @@ -104,10 +104,10 @@ func (mysqld *Mysqld) Promote(hookExtraEnv map[string]string) (mysql.Position, e // Since we handle replication, just stop it. cmds := []string{ conn.StopReplicationCommand(), - "RESET SLAVE ALL", // "ALL" makes it forget master host:port. - // When using semi-sync and GTID, a replica first connects to the new master with a given GTID set, + "RESET SLAVE ALL", // "ALL" makes it forget primary host:port. + // When using semi-sync and GTID, a replica first connects to the new primary with a given GTID set, // it can take a long time to scan the current binlog file to find the corresponding position. - // This can cause commits that occur soon after the master is promoted to take a long time waiting + // This can cause commits that occur soon after the primary is promoted to take a long time waiting // for a semi-sync ACK, since replication is not fully set up. // More details in: https://github.com/vitessio/vitess/issues/4161 "FLUSH BINARY LOGS", diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index 0d0d4b7ecb..45b878fee7 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -38,7 +38,7 @@ import ( ) // WaitForReplicationStart waits until the deadline for replication to start. -// This validates the current master is correct and can be connected to. +// This validates the current primary is correct and can be connected to. func WaitForReplicationStart(mysqld MysqlDaemon, replicaStartDeadline int) error { var rowMap map[string]string for replicaWait := 0; replicaWait < replicaStartDeadline; replicaWait++ { @@ -215,7 +215,7 @@ func (mysqld *Mysqld) WaitSourcePos(ctx context.Context, targetPos mysql.Positio waitCommandName := "WaitUntilPositionCommand" var query string if targetPos.MatchesFlavor(mysql.FilePosFlavorID) { - // If we are the master, WaitUntilFilePositionCommand will fail. + // If we are the primary, WaitUntilFilePositionCommand will fail. // But position is most likely reached. So, check the position // first. mpos, err := conn.PrimaryFilePosition() @@ -233,7 +233,7 @@ func (mysqld *Mysqld) WaitSourcePos(ctx context.Context, targetPos mysql.Positio } waitCommandName = "WaitUntilFilePositionCommand" } else { - // If we are the master, WaitUntilPositionCommand will fail. + // If we are the primary, WaitUntilPositionCommand will fail. // But position is most likely reached. So, check the position // first. mpos, err := conn.PrimaryPosition() @@ -280,7 +280,7 @@ func (mysqld *Mysqld) ReplicationStatus() (mysql.ReplicationStatus, error) { return conn.ShowReplicationStatus() } -// PrimaryStatus returns the master replication statuses +// PrimaryStatus returns the primary replication statuses func (mysqld *Mysqld) PrimaryStatus(ctx context.Context) (mysql.PrimaryStatus, error) { conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { @@ -291,7 +291,7 @@ func (mysqld *Mysqld) PrimaryStatus(ctx context.Context) (mysql.PrimaryStatus, e return conn.ShowPrimaryStatus() } -// PrimaryPosition returns the master replication position. +// PrimaryPosition returns the primary replication position. func (mysqld *Mysqld) PrimaryPosition() (mysql.Position, error) { conn, err := getPoolReconnect(context.TODO(), mysqld.dbaPool) if err != nil { @@ -316,7 +316,7 @@ func (mysqld *Mysqld) SetReplicationPosition(ctx context.Context, pos mysql.Posi return mysqld.executeSuperQueryListConn(ctx, conn, cmds) } -// SetReplicationSource makes the provided host / port the master. It optionally +// SetReplicationSource makes the provided host / port the primary. It optionally // stops replication before, and starts it after. func (mysqld *Mysqld) SetReplicationSource(ctx context.Context, masterHost string, masterPort int, replicationStopBefore bool, replicationStartAfter bool) error { params, err := mysqld.dbcfgs.ReplConnector().MysqlParams() @@ -457,7 +457,7 @@ func (mysqld *Mysqld) DisableBinlogPlayback() error { } // SetSemiSyncEnabled enables or disables semi-sync replication for -// master and/or replica mode. +// primary and/or replica mode. func (mysqld *Mysqld) SetSemiSyncEnabled(master, replica bool) error { log.Infof("Setting semi-sync mode: master=%v, replica=%v", master, replica) @@ -479,7 +479,7 @@ func (mysqld *Mysqld) SetSemiSyncEnabled(master, replica bool) error { return nil } -// SemiSyncEnabled returns whether semi-sync is enabled for master or replica. +// SemiSyncEnabled returns whether semi-sync is enabled for primary or replica. // If the semi-sync plugin is not loaded, we assume semi-sync is disabled. func (mysqld *Mysqld) SemiSyncEnabled() (master, replica bool) { vars, err := mysqld.fetchVariables(context.TODO(), "rpl_semi_sync_%_enabled") diff --git a/go/vt/mysqlctl/replication_test.go b/go/vt/mysqlctl/replication_test.go index 65ae9be093..1502ad4773 100644 --- a/go/vt/mysqlctl/replication_test.go +++ b/go/vt/mysqlctl/replication_test.go @@ -70,7 +70,7 @@ func TestRedactPassword(t *testing.T) { testRedacted(t, `START xxx USER = 'vt_repl', PASSWORD = 'AAA`, `START xxx USER = 'vt_repl', PASSWORD = 'AAA`) - // both master password and password + // both primary password and password testRedacted(t, `START xxx MASTER_PASSWORD = 'AAA', PASSWORD = 'BBB' diff --git a/go/vt/mysqlctl/tmutils/permissions.go b/go/vt/mysqlctl/tmutils/permissions.go index 0f340a0d6a..2770491f8c 100644 --- a/go/vt/mysqlctl/tmutils/permissions.go +++ b/go/vt/mysqlctl/tmutils/permissions.go @@ -71,7 +71,7 @@ func NewUserPermission(fields []*querypb.Field, values []sqltypes.Value) *tablet up.PasswordChecksum = crc64.Checksum(values[i].ToBytes(), hashTable) case "password_last_changed": // we skip this one, as the value may be - // different on master and replicas. + // different on primary and replicas. default: up.Privileges[field.Name] = values[i].ToString() } diff --git a/go/vt/orchestrator/app/cli.go b/go/vt/orchestrator/app/cli.go index 49d5fd2819..94eb8dab21 100644 --- a/go/vt/orchestrator/app/cli.go +++ b/go/vt/orchestrator/app/cli.go @@ -316,7 +316,7 @@ func Cli(command string, strict bool, instance string, destination string, owner case registerCliCommand("repoint", "Classic file:pos relocation", `Make the given instance replicate from another instance without changing the binglog coordinates. Use with care`): { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) - // destinationKey can be null, in which case the instance repoints to its existing master + // destinationKey can be null, in which case the instance repoints to its existing primary instance, err := inst.Repoint(instanceKey, destinationKey, inst.GTIDHintNeutral) if err != nil { log.Fatale(err) diff --git a/go/vt/orchestrator/config/config.go b/go/vt/orchestrator/config/config.go index f547379e8d..41516e7930 100644 --- a/go/vt/orchestrator/config/config.go +++ b/go/vt/orchestrator/config/config.go @@ -113,7 +113,7 @@ type Configuration struct { DefaultInstancePort int // In case port was not specified on command line SlaveLagQuery string // Synonym to ReplicationLagQuery ReplicationLagQuery string // custom query to check on replica lg (e.g. heartbeat table). Must return a single row with a single numeric column, which is the lag. - ReplicationCredentialsQuery string // custom query to get replication credentials. Must return a single row, with two text columns: 1st is username, 2nd is password. This is optional, and can be used by orchestrator to configure replication after master takeover or setup of co-masters. You need to ensure the orchestrator user has the privileges to run this query + ReplicationCredentialsQuery string // custom query to get replication credentials. Must return a single row, with two text columns: 1st is username, 2nd is password. This is optional, and can be used by orchestrator to configure replication after primary takeover or setup of co-primary. You need to ensure the orchestrator user has the privileges to run this query DiscoverByShowSlaveHosts bool // Attempt SHOW SLAVE HOSTS before PROCESSLIST UseSuperReadOnly bool // Should orchestrator super_read_only any time it sets read_only InstancePollSeconds uint // Number of seconds between instance reads @@ -137,7 +137,7 @@ type Configuration struct { ProblemIgnoreHostnameFilters []string // Will minimize problem visualization for hostnames matching given regexp filters VerifyReplicationFilters bool // Include replication filters check before approving topology refactoring ReasonableMaintenanceReplicationLagSeconds int // Above this value move-up and move-below are blocked - CandidateInstanceExpireMinutes uint // Minutes after which a suggestion to use an instance as a candidate replica (to be preferably promoted on master failover) is expired. + CandidateInstanceExpireMinutes uint // Minutes after which a suggestion to use an instance as a candidate replica (to be preferably promoted on primary failover) is expired. AuditLogFile string // Name of log file for audit operations. Disabled when empty. AuditToSyslog bool // If true, audit messages are written to syslog AuditToBackendDB bool // If true, audit messages are written to the backend DB's `audit` table (default: true) @@ -156,8 +156,8 @@ type Configuration struct { AccessTokenUseExpirySeconds uint // Time by which an issued token must be used AccessTokenExpiryMinutes uint // Time after which HTTP access token expires ClusterNameToAlias map[string]string // map between regex matching cluster name to a human friendly alias - DetectClusterAliasQuery string // Optional query (executed on topology instance) that returns the alias of a cluster. Query will only be executed on cluster master (though until the topology's master is resovled it may execute on other/all replicas). If provided, must return one row, one column - DetectClusterDomainQuery string // Optional query (executed on topology instance) that returns the VIP/CNAME/Alias/whatever domain name for the master of this cluster. Query will only be executed on cluster master (though until the topology's master is resovled it may execute on other/all replicas). If provided, must return one row, one column + DetectClusterAliasQuery string // Optional query (executed on topology instance) that returns the alias of a cluster. Query will only be executed on cluster primary (though until the topology's primary is resovled it may execute on other/all replicas). If provided, must return one row, one column + DetectClusterDomainQuery string // Optional query (executed on topology instance) that returns the VIP/CNAME/Alias/whatever domain name for the primary of this cluster. Query will only be executed on cluster primary (though until the topology's primary is resovled it may execute on other/all replicas). If provided, must return one row, one column DetectInstanceAliasQuery string // Optional query (executed on topology instance) that returns the alias of an instance. If provided, must return one row, one column DetectPromotionRuleQuery string // Optional query (executed on topology instance) that returns the promotion rule of an instance. If provided, must return one row, one column. DataCenterPattern string // Regexp pattern with one group, extracting the datacenter name from the hostname @@ -166,7 +166,7 @@ type Configuration struct { DetectDataCenterQuery string // Optional query (executed on topology instance) that returns the data center of an instance. If provided, must return one row, one column. Overrides DataCenterPattern and useful for installments where DC cannot be inferred by hostname DetectRegionQuery string // Optional query (executed on topology instance) that returns the region of an instance. If provided, must return one row, one column. Overrides RegionPattern and useful for installments where Region cannot be inferred by hostname DetectPhysicalEnvironmentQuery string // Optional query (executed on topology instance) that returns the physical environment of an instance. If provided, must return one row, one column. Overrides PhysicalEnvironmentPattern and useful for installments where env cannot be inferred by hostname - DetectSemiSyncEnforcedQuery string // Optional query (executed on topology instance) to determine whether semi-sync is fully enforced for master writes (async fallback is not allowed under any circumstance). If provided, must return one row, one column, value 0 or 1. + DetectSemiSyncEnforcedQuery string // Optional query (executed on topology instance) to determine whether semi-sync is fully enforced for primary writes (async fallback is not allowed under any circumstance). If provided, must return one row, one column, value 0 or 1. SupportFuzzyPoolHostnames bool // Should "submit-pool-instances" command be able to pass list of fuzzy instances (fuzzy means non-fqdn, but unique enough to recognize). Defaults 'true', implies more queries on backend db InstancePoolExpiryMinutes uint // Time after which entries in database_instance_pool are expired (resubmit via `submit-pool-instances`) PromotionIgnoreHostnameFilters []string // Orchestrator will not promote replicas with hostname matching pattern (via -c recovery; for example, avoid promoting dev-dedicated machines) @@ -198,7 +198,7 @@ type Configuration struct { RecoveryPeriodBlockMinutes int // (supported for backwards compatibility but please use newer `RecoveryPeriodBlockSeconds` instead) The time for which an instance's recovery is kept "active", so as to avoid concurrent recoveries on smae instance as well as flapping RecoveryPeriodBlockSeconds int // (overrides `RecoveryPeriodBlockMinutes`) The time for which an instance's recovery is kept "active", so as to avoid concurrent recoveries on smae instance as well as flapping RecoveryIgnoreHostnameFilters []string // Recovery analysis will completely ignore hosts matching given patterns - RecoverMasterClusterFilters []string // Only do master recovery on clusters matching these regexp patterns (of course the ".*" pattern matches everything) + RecoverMasterClusterFilters []string // Only do primary recovery on clusters matching these regexp patterns (of course the ".*" pattern matches everything) RecoverIntermediateMasterClusterFilters []string // Only do IM recovery on clusters matching these regexp patterns (of course the ".*" pattern matches everything) ProcessesShellCommand string // Shell that executes command scripts OnFailureDetectionProcesses []string // Processes to execute when detecting a failover scenario (before making a decision whether to failover or not). May and should use some of these placeholders: {failureType}, {instanceType}, {isMaster}, {isCoMaster}, {failureDescription}, {command}, {failedHost}, {failureCluster}, {failureClusterAlias}, {failureClusterDomain}, {failedPort}, {successorHost}, {successorPort}, {successorAlias}, {countReplicas}, {replicaHosts}, {isDowntimed}, {autoMasterRecovery}, {autoIntermediateMasterRecovery} @@ -206,35 +206,35 @@ type Configuration struct { PreFailoverProcesses []string // Processes to execute before doing a failover (aborting operation should any once of them exits with non-zero code; order of execution undefined). May and should use some of these placeholders: {failureType}, {instanceType}, {isMaster}, {isCoMaster}, {failureDescription}, {command}, {failedHost}, {failureCluster}, {failureClusterAlias}, {failureClusterDomain}, {failedPort}, {countReplicas}, {replicaHosts}, {isDowntimed} PostFailoverProcesses []string // Processes to execute after doing a failover (order of execution undefined). May and should use some of these placeholders: {failureType}, {instanceType}, {isMaster}, {isCoMaster}, {failureDescription}, {command}, {failedHost}, {failureCluster}, {failureClusterAlias}, {failureClusterDomain}, {failedPort}, {successorHost}, {successorPort}, {successorAlias}, {countReplicas}, {replicaHosts}, {isDowntimed}, {isSuccessful}, {lostReplicas}, {countLostReplicas} PostUnsuccessfulFailoverProcesses []string // Processes to execute after a not-completely-successful failover (order of execution undefined). May and should use some of these placeholders: {failureType}, {instanceType}, {isMaster}, {isCoMaster}, {failureDescription}, {command}, {failedHost}, {failureCluster}, {failureClusterAlias}, {failureClusterDomain}, {failedPort}, {successorHost}, {successorPort}, {successorAlias}, {countReplicas}, {replicaHosts}, {isDowntimed}, {isSuccessful}, {lostReplicas}, {countLostReplicas} - PostMasterFailoverProcesses []string // Processes to execute after doing a master failover (order of execution undefined). Uses same placeholders as PostFailoverProcesses - PostIntermediateMasterFailoverProcesses []string // Processes to execute after doing a master failover (order of execution undefined). Uses same placeholders as PostFailoverProcesses - PostGracefulTakeoverProcesses []string // Processes to execute after runnign a graceful master takeover. Uses same placeholders as PostFailoverProcesses + PostMasterFailoverProcesses []string // Processes to execute after doing a primary failover (order of execution undefined). Uses same placeholders as PostFailoverProcesses + PostIntermediateMasterFailoverProcesses []string // Processes to execute after doing a primary failover (order of execution undefined). Uses same placeholders as PostFailoverProcesses + PostGracefulTakeoverProcesses []string // Processes to execute after running a graceful primary takeover. Uses same placeholders as PostFailoverProcesses PostTakeMasterProcesses []string // Processes to execute after a successful Take-Master event has taken place - CoMasterRecoveryMustPromoteOtherCoMaster bool // When 'false', anything can get promoted (and candidates are prefered over others). When 'true', orchestrator will promote the other co-master or else fail + CoMasterRecoveryMustPromoteOtherCoMaster bool // When 'false', anything can get promoted (and candidates are prefered over others). When 'true', orchestrator will promote the other co-primary or else fail DetachLostSlavesAfterMasterFailover bool // synonym to DetachLostReplicasAfterMasterFailover - DetachLostReplicasAfterMasterFailover bool // Should replicas that are not to be lost in master recovery (i.e. were more up-to-date than promoted replica) be forcibly detached - ApplyMySQLPromotionAfterMasterFailover bool // Should orchestrator take upon itself to apply MySQL master promotion: set read_only=0, detach replication, etc. - PreventCrossDataCenterMasterFailover bool // When true (default: false), cross-DC master failover are not allowed, orchestrator will do all it can to only fail over within same DC, or else not fail over at all. - PreventCrossRegionMasterFailover bool // When true (default: false), cross-region master failover are not allowed, orchestrator will do all it can to only fail over within same region, or else not fail over at all. - MasterFailoverLostInstancesDowntimeMinutes uint // Number of minutes to downtime any server that was lost after a master failover (including failed master & lost replicas). 0 to disable + DetachLostReplicasAfterMasterFailover bool // Should replicas that are not to be lost in primary recovery (i.e. were more up-to-date than promoted replica) be forcibly detached + ApplyMySQLPromotionAfterMasterFailover bool // Should orchestrator take upon itself to apply MySQL primary promotion: set read_only=0, detach replication, etc. + PreventCrossDataCenterMasterFailover bool // When true (default: false), cross-DC primary failover are not allowed, orchestrator will do all it can to only fail over within same DC, or else not fail over at all. + PreventCrossRegionMasterFailover bool // When true (default: false), cross-region primary failover are not allowed, orchestrator will do all it can to only fail over within same region, or else not fail over at all. + MasterFailoverLostInstancesDowntimeMinutes uint // Number of minutes to downtime any server that was lost after a primary failover (including failed primary & lost replicas). 0 to disable MasterFailoverDetachSlaveMasterHost bool // synonym to MasterFailoverDetachReplicaMasterHost - MasterFailoverDetachReplicaMasterHost bool // Should orchestrator issue a detach-replica-master-host on newly promoted master (this makes sure the new master will not attempt to replicate old master if that comes back to life). Defaults 'false'. Meaningless if ApplyMySQLPromotionAfterMasterFailover is 'true'. - FailMasterPromotionOnLagMinutes uint // when > 0, fail a master promotion if the candidate replica is lagging >= configured number of minutes. - FailMasterPromotionIfSQLThreadNotUpToDate bool // when true, and a master failover takes place, if candidate master has not consumed all relay logs, promotion is aborted with error - DelayMasterPromotionIfSQLThreadNotUpToDate bool // when true, and a master failover takes place, if candidate master has not consumed all relay logs, delay promotion until the sql thread has caught up + MasterFailoverDetachReplicaMasterHost bool // Should orchestrator issue a detach-replica-master-host on newly promoted primary (this makes sure the new primary will not attempt to replicate old primary if that comes back to life). Defaults 'false'. Meaningless if ApplyMySQLPromotionAfterMasterFailover is 'true'. + FailMasterPromotionOnLagMinutes uint // when > 0, fail a primary promotion if the candidate replica is lagging >= configured number of minutes. + FailMasterPromotionIfSQLThreadNotUpToDate bool // when true, and a primary failover takes place, if candidate primary has not consumed all relay logs, promotion is aborted with error + DelayMasterPromotionIfSQLThreadNotUpToDate bool // when true, and a primary failover takes place, if candidate primary has not consumed all relay logs, delay promotion until the sql thread has caught up PostponeSlaveRecoveryOnLagMinutes uint // Synonym to PostponeReplicaRecoveryOnLagMinutes - PostponeReplicaRecoveryOnLagMinutes uint // On crash recovery, replicas that are lagging more than given minutes are only resurrected late in the recovery process, after master/IM has been elected and processes executed. Value of 0 disables this feature + PostponeReplicaRecoveryOnLagMinutes uint // On crash recovery, replicas that are lagging more than given minutes are only resurrected late in the recovery process, after primary/IM has been elected and processes executed. Value of 0 disables this feature OSCIgnoreHostnameFilters []string // OSC replicas recommendation will ignore replica hostnames matching given patterns URLPrefix string // URL prefix to run orchestrator on non-root web path, e.g. /orchestrator to put it behind nginx. DiscoveryIgnoreReplicaHostnameFilters []string // Regexp filters to apply to prevent auto-discovering new replicas. Usage: unreachable servers due to firewalls, applications which trigger binlog dumps - DiscoveryIgnoreMasterHostnameFilters []string // Regexp filters to apply to prevent auto-discovering a master. Usage: pointing your master temporarily to replicate seom data from external host + DiscoveryIgnoreMasterHostnameFilters []string // Regexp filters to apply to prevent auto-discovering a primary. Usage: pointing your primary temporarily to replicate seom data from external host DiscoveryIgnoreHostnameFilters []string // Regexp filters to apply to prevent discovering instances of any kind ConsulAddress string // Address where Consul HTTP api is found. Example: 127.0.0.1:8500 ConsulScheme string // Scheme (http or https) for Consul ConsulAclToken string // ACL token used to write to Consul KV ConsulCrossDataCenterDistribution bool // should orchestrator automatically auto-deduce all consul DCs and write KVs in all DCs ZkAddress string // UNSUPPERTED YET. Address where (single or multiple) ZooKeeper servers are found, in `srv1[:port1][,srv2[:port2]...]` format. Default port is 2181. Example: srv-a,srv-b:12181,srv-c - KVClusterMasterPrefix string // Prefix to use for clusters' masters entries in KV stores (internal, consul, ZK), default: "mysql/master" + KVClusterMasterPrefix string // Prefix to use for clusters' primary's entries in KV stores (internal, consul, ZK), default: "mysql/master" WebMessage string // If provided, will be shown on all web pages below the title bar MaxConcurrentReplicaOperations int // Maximum number of concurrent operations on replicas InstanceDBExecContextTimeoutSeconds int // Timeout on context used while calling ExecContext on instance database diff --git a/go/vt/orchestrator/http/api.go b/go/vt/orchestrator/http/api.go index a7bb28d552..33c421c705 100644 --- a/go/vt/orchestrator/http/api.go +++ b/go/vt/orchestrator/http/api.go @@ -530,7 +530,7 @@ func (this *HttpAPI) MoveUpReplicas(params martini.Params, r render.Render, req Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Moved up %d replicas of %+v below %+v; %d errors: %+v", len(replicas), instanceKey, newMaster.Key, len(errs), errs), Details: replicas}) } -// Repoint positiones a replica under another (or same) master with exact same coordinates. +// Repoint positiones a replica under another (or same) primary with exact same coordinates. // Useful for binlog servers func (this *HttpAPI) Repoint(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { @@ -578,7 +578,7 @@ func (this *HttpAPI) RepointReplicas(params martini.Params, r render.Render, req Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Repointed %d replicas of %+v", len(replicas), instanceKey), Details: replicas}) } -// MakeCoMaster attempts to make an instance co-master with its own master +// MakeCoMaster attempts to make an instance co-primary with its own primary func (this *HttpAPI) MakeCoMaster(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) @@ -599,7 +599,7 @@ func (this *HttpAPI) MakeCoMaster(params martini.Params, r render.Render, req *h Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Instance made co-master: %+v", instance.Key), Details: instance}) } -// ResetReplication makes a replica forget about its master, effectively breaking the replication +// ResetReplication makes a replica forget about its primary, effectively breaking the replication func (this *HttpAPI) ResetReplication(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) @@ -620,7 +620,7 @@ func (this *HttpAPI) ResetReplication(params martini.Params, r render.Render, re Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Replica reset on %+v", instance.Key), Details: instance}) } -// DetachReplicaMasterHost detaches a replica from its master by setting an invalid +// DetachReplicaMasterHost detaches a replica from its primary by setting an invalid // (yet revertible) host name func (this *HttpAPI) DetachReplicaMasterHost(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { @@ -643,7 +643,7 @@ func (this *HttpAPI) DetachReplicaMasterHost(params martini.Params, r render.Ren } // ReattachReplicaMasterHost reverts a detachReplicaMasterHost command -// by resoting the original master hostname in CHANGE MASTER TO +// by resetting the original primary hostname in CHANGE MASTER TO func (this *HttpAPI) ReattachReplicaMasterHost(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) @@ -743,7 +743,7 @@ func (this *HttpAPI) ErrantGTIDResetMaster(params martini.Params, r render.Rende Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Removed errant GTID on %+v and issued a RESET MASTER", instance.Key), Details: instance}) } -// ErrantGTIDInjectEmpty removes errant transactions by injecting and empty transaction on the cluster's master +// ErrantGTIDInjectEmpty removes errant transactions by injecting and empty transaction on the cluster's primary func (this *HttpAPI) ErrantGTIDInjectEmpty(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) @@ -1637,7 +1637,7 @@ func (this *HttpAPI) UntagAll(params martini.Params, r render.Render, req *http. Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("%s removed from %+v instances", tag.TagName, len(*untagged)), Details: untagged.GetInstanceKeys()}) } -// Write a cluster's master (or all clusters masters) to kv stores. +// SubmitMastersToKvStores writes a cluster's primary (or all clusters primaries) to kv stores. // This should generally only happen once in a lifetime of a cluster. Otherwise KV // stores are updated via failovers. func (this *HttpAPI) SubmitMastersToKvStores(params martini.Params, r render.Render, req *http.Request) { @@ -1654,7 +1654,7 @@ func (this *HttpAPI) SubmitMastersToKvStores(params martini.Params, r render.Ren Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Submitted %d masters", submittedCount), Details: kvPairs}) } -// Clusters provides list of known masters +// Clusters provides list of known primaries func (this *HttpAPI) Masters(params martini.Params, r render.Render, req *http.Request) { instances, err := inst.ReadWriteableClustersMasters() @@ -1666,7 +1666,7 @@ func (this *HttpAPI) Masters(params martini.Params, r render.Render, req *http.R r.JSON(http.StatusOK, instances) } -// ClusterMaster returns the writable master of a given cluster +// ClusterMaster returns the writable primary of a given cluster func (this *HttpAPI) ClusterMaster(params martini.Params, r render.Render, req *http.Request) { clusterName, err := figureClusterName(getClusterHint(params)) if err != nil { @@ -2304,7 +2304,7 @@ func (this *HttpAPI) Recover(params martini.Params, r render.Render, req *http.R Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Recovery executed on %+v", instanceKey), Details: *promotedInstanceKey}) } -// GracefulMasterTakeover gracefully fails over a master onto its single replica. +// GracefulMasterTakeover gracefully fails over a primary onto its single replica. func (this *HttpAPI) gracefulMasterTakeover(params martini.Params, r render.Render, req *http.Request, user auth.User, auto bool) { if !isAuthorizedForAction(req, user) { Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) @@ -2329,19 +2329,19 @@ func (this *HttpAPI) gracefulMasterTakeover(params martini.Params, r render.Rend Respond(r, &APIResponse{Code: OK, Message: "graceful-master-takeover: successor promoted", Details: topologyRecovery}) } -// GracefulMasterTakeover gracefully fails over a master, either: +// GracefulMasterTakeover gracefully fails over a primary, either: // - onto its single replica, or // - onto a replica indicated by the user func (this *HttpAPI) GracefulMasterTakeover(params martini.Params, r render.Render, req *http.Request, user auth.User) { this.gracefulMasterTakeover(params, r, req, user, false) } -// GracefulMasterTakeoverAuto gracefully fails over a master onto a replica of orchestrator's choosing +// GracefulMasterTakeoverAuto gracefully fails over a primary onto a replica of orchestrator's choosing func (this *HttpAPI) GracefulMasterTakeoverAuto(params martini.Params, r render.Render, req *http.Request, user auth.User) { this.gracefulMasterTakeover(params, r, req, user, true) } -// ForceMasterFailover fails over a master (even if there's no particular problem with the master) +// ForceMasterFailover fails over a primary (even if there's no particular problem with the primary) func (this *HttpAPI) ForceMasterFailover(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) @@ -2364,7 +2364,7 @@ func (this *HttpAPI) ForceMasterFailover(params martini.Params, r render.Render, } } -// ForceMasterTakeover fails over a master (even if there's no particular problem with the master) +// ForceMasterTakeover fails over a primary (even if there's no particular problem with the primary) func (this *HttpAPI) ForceMasterTakeover(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) diff --git a/go/vt/orchestrator/inst/analysis.go b/go/vt/orchestrator/inst/analysis.go index 6debabe9c5..fc1b556b46 100644 --- a/go/vt/orchestrator/inst/analysis.go +++ b/go/vt/orchestrator/inst/analysis.go @@ -226,7 +226,7 @@ func (this *ReplicationAnalysis) AnalysisString() string { return strings.Join(result, ", ") } -// Get a string description of the analyzed instance type (master? co-master? intermediate-master?) +// Get a string description of the analyzed instance type (primary? co-primary? intermediate-primary?) func (this *ReplicationAnalysis) GetAnalysisInstanceType() AnalysisInstanceType { if this.IsCoMaster { return AnalysisInstanceTypeCoMaster diff --git a/go/vt/orchestrator/inst/analysis_dao.go b/go/vt/orchestrator/inst/analysis_dao.go index 50190833ff..cc4809c164 100644 --- a/go/vt/orchestrator/inst/analysis_dao.go +++ b/go/vt/orchestrator/inst/analysis_dao.go @@ -60,7 +60,7 @@ type clusterAnalysis struct { masterKey *InstanceKey } -// GetReplicationAnalysis will check for replication problems (dead master; unreachable master; etc) +// GetReplicationAnalysis will check for replication problems (dead primary; unreachable primary; etc) func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) ([]ReplicationAnalysis, error) { result := []ReplicationAnalysis{} @@ -550,7 +550,7 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) a.Description = "Master cannot be reached by orchestrator but it has replicating replicas; possibly a network/host issue" // } else if a.IsMaster && !a.LastCheckValid && a.LastCheckPartialSuccess && a.CountReplicasFailingToConnectToMaster > 0 && a.CountValidReplicas > 0 && a.CountValidReplicatingReplicas > 0 { - // there's partial success, but also at least one replica is failing to connect to master + // there's partial success, but also at least one replica is failing to connect to primary a.Analysis = UnreachableMaster a.Description = "Master cannot be reached by orchestrator but it has replicating replicas; possibly a network/host issue" // @@ -624,9 +624,9 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) // } else if !a.IsMaster && a.LastCheckValid && a.CountReplicas > 1 && a.CountValidReplicatingReplicas == 0 && a.CountReplicasFailingToConnectToMaster > 0 && a.CountReplicasFailingToConnectToMaster == a.CountValidReplicas { - // All replicas are either failing to connect to master (and at least one of these have to exist) + // All replicas are either failing to connect to primary (and at least one of these have to exist) // or completely dead. - // Must have at least two replicas to reach such conclusion -- do note that the intermediate master is still + // Must have at least two replicas to reach such conclusion -- do note that the intermediate primary is still // reachable to orchestrator, so we base our conclusion on replicas only at this point. a.Analysis = AllIntermediateMasterReplicasFailingToConnectOrDead a.Description = "Intermediate master is reachable but all of its replicas are failing to connect" diff --git a/go/vt/orchestrator/inst/candidate_database_instance_dao.go b/go/vt/orchestrator/inst/candidate_database_instance_dao.go index acbfc701f1..30a2220eb9 100644 --- a/go/vt/orchestrator/inst/candidate_database_instance_dao.go +++ b/go/vt/orchestrator/inst/candidate_database_instance_dao.go @@ -24,7 +24,7 @@ import ( "vitess.io/vitess/go/vt/orchestrator/db" ) -// RegisterCandidateInstance markes a given instance as suggested for successoring a master in the event of failover. +// RegisterCandidateInstance markes a given instance as suggested for succeeding a primary in the event of failover. func RegisterCandidateInstance(candidate *CandidateDatabaseInstance) error { if candidate.LastSuggestedString == "" { candidate = candidate.WithCurrentTime() @@ -50,7 +50,7 @@ func RegisterCandidateInstance(candidate *CandidateDatabaseInstance) error { return ExecDBWriteFunc(writeFunc) } -// ExpireCandidateInstances removes stale master candidate suggestions. +// ExpireCandidateInstances removes stale primary candidate suggestions. func ExpireCandidateInstances() error { writeFunc := func() error { _, err := db.ExecOrchestrator(` diff --git a/go/vt/orchestrator/inst/cluster.go b/go/vt/orchestrator/inst/cluster.go index d5bb4ca94b..e1a69b5b95 100644 --- a/go/vt/orchestrator/inst/cluster.go +++ b/go/vt/orchestrator/inst/cluster.go @@ -39,8 +39,8 @@ func getClusterMasterKVPair(clusterAlias string, masterKey *InstanceKey) *kv.KVP return kv.NewKVPair(GetClusterMasterKVKey(clusterAlias), masterKey.StringCode()) } -// GetClusterMasterKVPairs returns all KV pairs associated with a master. This includes the -// full identity of the master as well as a breakdown by hostname, port, ipv4, ipv6 +// GetClusterMasterKVPairs returns all KV pairs associated with a primary. This includes the +// full identity of the primary as well as a breakdown by hostname, port, ipv4, ipv6 func GetClusterMasterKVPairs(clusterAlias string, masterKey *InstanceKey) (kvPairs [](*kv.KVPair)) { masterKVPair := getClusterMasterKVPair(clusterAlias, masterKey) if masterKVPair == nil { @@ -81,7 +81,7 @@ func mappedClusterNameToAlias(clusterName string) string { type ClusterInfo struct { ClusterName string ClusterAlias string // Human friendly alias - ClusterDomain string // CNAME/VIP/A-record/whatever of the master of this cluster + ClusterDomain string // CNAME/VIP/A-record/whatever of the primary of this cluster CountInstances uint HeuristicLag int64 HasAutomatedMasterRecovery bool diff --git a/go/vt/orchestrator/inst/cluster_alias_dao.go b/go/vt/orchestrator/inst/cluster_alias_dao.go index fa60033690..2b6014e325 100644 --- a/go/vt/orchestrator/inst/cluster_alias_dao.go +++ b/go/vt/orchestrator/inst/cluster_alias_dao.go @@ -195,7 +195,7 @@ func ReplaceAliasClusterName(oldClusterName string, newClusterName string) (err return err } -// ReadUnambiguousSuggestedClusterAliases reads potential master hostname:port who have suggested cluster aliases, +// ReadUnambiguousSuggestedClusterAliases reads potential primary hostname:port who have suggested cluster aliases, // where no one else shares said suggested cluster alias. Such hostname:port are likely true owners // of the alias. func ReadUnambiguousSuggestedClusterAliases() (result map[string]InstanceKey, err error) { diff --git a/go/vt/orchestrator/inst/downtime_dao.go b/go/vt/orchestrator/inst/downtime_dao.go index 20cd4c79b6..ecc68977d2 100644 --- a/go/vt/orchestrator/inst/downtime_dao.go +++ b/go/vt/orchestrator/inst/downtime_dao.go @@ -147,7 +147,7 @@ func expireLostInRecoveryDowntime() error { for _, instance := range instances { // We _may_ expire this downtime, but only after a minute // This is a graceful period, during which other servers can claim ownership of the alias, - // or can update their own cluster name to match a new master's name + // or can update their own cluster name to match a new primary's name if instance.ElapsedDowntime < time.Minute { continue } @@ -159,10 +159,10 @@ func expireLostInRecoveryDowntime() error { // back, alive, replicating in some topology endDowntime = true } else if instance.ReplicationDepth == 0 { - // instance makes the appearance of a master + // instance makes the appearance of a primary if unambiguousKey, ok := unambiguousAliases[instance.SuggestedClusterAlias]; ok { if unambiguousKey.Equals(&instance.Key) { - // This instance seems to be a master, which is valid, and has a suggested alias, + // This instance seems to be a primary, which is valid, and has a suggested alias, // and is the _only_ one to have this suggested alias (i.e. no one took its place) endDowntime = true } diff --git a/go/vt/orchestrator/inst/durability.go b/go/vt/orchestrator/inst/durability.go index a6a61afa2a..0c4e0fd96e 100644 --- a/go/vt/orchestrator/inst/durability.go +++ b/go/vt/orchestrator/inst/durability.go @@ -65,7 +65,7 @@ func PromotionRule(tablet *topodatapb.Tablet) CandidatePromotionRule { return curDurabilityPolicy.promotionRule(tablet) } -// MasterSemiSync returns the master semi-sync setting for the instance. +// MasterSemiSync returns the primary semi-sync setting for the instance. // 0 means none. Non-zero specifies the number of required ackers. func MasterSemiSync(instanceKey InstanceKey) int { return curDurabilityPolicy.masterSemiSync(instanceKey) diff --git a/go/vt/orchestrator/inst/instance.go b/go/vt/orchestrator/inst/instance.go index 495f450cb9..fc4c1cd951 100644 --- a/go/vt/orchestrator/inst/instance.go +++ b/go/vt/orchestrator/inst/instance.go @@ -93,7 +93,7 @@ type Instance struct { IsCoMaster bool HasReplicationCredentials bool ReplicationCredentialsAvailable bool - SemiSyncAvailable bool // when both semi sync plugins (master & replica) are loaded + SemiSyncAvailable bool // when both semi sync plugins (primary & replica) are loaded SemiSyncEnforced bool SemiSyncMasterEnabled bool SemiSyncReplicaEnabled bool @@ -314,20 +314,20 @@ func (this *Instance) IsReplica() bool { return this.MasterKey.Hostname != "" && this.MasterKey.Hostname != "_" && this.MasterKey.Port != 0 && (this.ReadBinlogCoordinates.LogFile != "" || this.UsingGTID()) } -// IsMaster makes simple heuristics to decide whether this instance is a master (not replicating from any other server), +// IsMaster makes simple heuristics to decide whether this instance is a primary (not replicating from any other server), // either via traditional async/semisync replication or group replication func (this *Instance) IsMaster() bool { - // If traditional replication is configured, it is for sure not a master + // If traditional replication is configured, it is for sure not a primary if this.IsReplica() { return false } // If traditional replication is not configured, and it is also not part of a replication group, this host is - // a master + // a primary if !this.IsReplicationGroupMember() { return true } // If traditional replication is not configured, and this host is part of a group, it is only considered a - // master if it has the role of group Primary. Otherwise it is not a master. + // primary if it has the role of group Primary. Otherwise it is not a primary. if this.ReplicationGroupMemberRole == GroupReplicationMemberRolePrimary { return true } @@ -407,12 +407,12 @@ func (this *Instance) GetNextBinaryLog(binlogCoordinates BinlogCoordinates) (Bin return binlogCoordinates.NextFileCoordinates() } -// IsReplicaOf returns true if this instance claims to replicate from given master +// IsReplicaOf returns true if this instance claims to replicate from given primary func (this *Instance) IsReplicaOf(master *Instance) bool { return this.MasterKey.Equals(&master.Key) } -// IsReplicaOf returns true if this i supposed master of given replica +// IsReplicaOf returns true if this i supposed primary of given replica func (this *Instance) IsMasterOf(replica *Instance) bool { return replica.IsReplicaOf(this) } @@ -440,7 +440,7 @@ func (this *Instance) CanReplicateFrom(other *Instance) (bool, error) { if !other.LogReplicationUpdatesEnabled { return false, fmt.Errorf("instance does not have log_slave_updates enabled: %+v", other.Key) } - // OK for a master to not have log_slave_updates + // OK for a primary to not have log_slave_updates // Not OK for a replica, for it has to relay the logs. } if this.IsSmallerMajorVersion(other) && !this.IsBinlogServer() { diff --git a/go/vt/orchestrator/inst/instance_dao.go b/go/vt/orchestrator/inst/instance_dao.go index 6220ced15a..9c3a8010eb 100644 --- a/go/vt/orchestrator/inst/instance_dao.go +++ b/go/vt/orchestrator/inst/instance_dao.go @@ -687,7 +687,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, instance.SuggestedClusterAlias = fmt.Sprintf("%v:%v", tablet.Keyspace, tablet.Shard) if instance.ReplicationDepth == 0 && config.Config.DetectClusterDomainQuery != "" { - // Only need to do on masters + // Only need to do on primary tablets domainName := "" if err := db.QueryRow(config.Config.DetectClusterDomainQuery).Scan(&domainName); err != nil { domainName = "" @@ -719,7 +719,7 @@ Cleanup: if instanceFound { if instance.IsCoMaster { - // Take co-master into account, and avoid infinite loop + // Take co-primary into account, and avoid infinite loop instance.AncestryUUID = fmt.Sprintf("%s,%s", instance.MasterUUID, instance.ServerUUID) } else { instance.AncestryUUID = fmt.Sprintf("%s,%s", instance.AncestryUUID, instance.ServerUUID) @@ -729,18 +729,18 @@ Cleanup: instance.AncestryUUID = fmt.Sprintf("%s,%s", instance.AncestryUUID, instance.ReplicationGroupName) instance.AncestryUUID = strings.Trim(instance.AncestryUUID, ",") if instance.ExecutedGtidSet != "" && instance.masterExecutedGtidSet != "" { - // Compare master & replica GTID sets, but ignore the sets that present the master's UUID. - // This is because orchestrator may pool master and replica at an inconvenient timing, - // such that the replica may _seems_ to have more entries than the master, when in fact - // it's just that the master's probing is stale. + // Compare primary & replica GTID sets, but ignore the sets that present the primary's UUID. + // This is because orchestrator may pool primary and replica at an inconvenient timing, + // such that the replica may _seems_ to have more entries than the primary, when in fact + // it's just that the primary's probing is stale. redactedExecutedGtidSet, _ := NewOracleGtidSet(instance.ExecutedGtidSet) for _, uuid := range strings.Split(instance.AncestryUUID, ",") { if uuid != instance.ServerUUID { redactedExecutedGtidSet.RemoveUUID(uuid) } if instance.IsCoMaster && uuid == instance.ServerUUID { - // If this is a co-master, then this server is likely to show its own generated GTIDs as errant, - // because its co-master has not applied them yet + // If this is a co-primary, then this server is likely to show its own generated GTIDs as errant, + // because its co-primary has not applied them yet redactedExecutedGtidSet.RemoveUUID(uuid) } } @@ -807,7 +807,7 @@ func ReadReplicationGroupPrimary(instance *Instance) (err error) { return err } -// ReadInstanceClusterAttributes will return the cluster name for a given instance by looking at its master +// ReadInstanceClusterAttributes will return the cluster name for a given instance by looking at its primary // and getting it from there. // It is a non-recursive function and so-called-recursion is performed upon periodic reading of // instances. @@ -819,7 +819,7 @@ func ReadInstanceClusterAttributes(instance *Instance) (err error) { var masterOrGroupPrimaryExecutedGtidSet string masterOrGroupPrimaryDataFound := false - // Read the cluster_name of the _master_ or _group_primary_ of our instance, derive it from there. + // Read the cluster_name of the _primary_ or _group_primary_ of our instance, derive it from there. query := ` select cluster_name, @@ -833,8 +833,8 @@ func ReadInstanceClusterAttributes(instance *Instance) (err error) { where hostname=? and port=? ` // For instances that are part of a replication group, if the host is not the group's primary, we use the - // information from the group primary. If it is the group primary, we use the information of its master - // (if it has any). If it is not a group member, we use the information from the host's master. + // information from the group primary. If it is the group primary, we use the information of its primary + // (if it has any). If it is not a group member, we use the information from the host's primary. if instance.IsReplicationGroupSecondary() { masterOrGroupPrimaryInstanceKey = instance.ReplicationGroupPrimaryInstanceKey } else { @@ -863,17 +863,17 @@ func ReadInstanceClusterAttributes(instance *Instance) (err error) { } clusterNameByInstanceKey := instance.Key.StringCode() if clusterName == "" { - // Nothing from master; we set it to be named after the instance itself + // Nothing from primary; we set it to be named after the instance itself clusterName = clusterNameByInstanceKey } isCoMaster := false if masterOrGroupPrimaryInstanceKey.Equals(&instance.Key) { - // co-master calls for special case, in fear of the infinite loop + // co-primary calls for special case, in fear of the infinite loop isCoMaster = true clusterNameByCoMasterKey := instance.MasterKey.StringCode() if clusterName != clusterNameByInstanceKey && clusterName != clusterNameByCoMasterKey { - // Can be caused by a co-master topology failover + // Can be caused by a co-primary topology failover log.Errorf("ReadInstanceClusterAttributes: in co-master topology %s is not in (%s, %s). Forcing it to become one of them", clusterName, clusterNameByInstanceKey, clusterNameByCoMasterKey) clusterName = math.TernaryString(instance.Key.SmallerThan(&instance.MasterKey), clusterNameByInstanceKey, clusterNameByCoMasterKey) } @@ -1153,9 +1153,9 @@ func ReadClusterInstances(clusterName string) ([](*Instance), error) { return readInstancesByCondition(condition, sqlutils.Args(clusterName), "") } -// ReadClusterWriteableMaster returns the/a writeable master of this cluster -// Typically, the cluster name indicates the master of the cluster. However, in circular -// master-master replication one master can assume the name of the cluster, and it is +// ReadClusterWriteableMaster returns the/a writeable primary of this cluster +// Typically, the cluster name indicates the primary of the cluster. However, in circular +// primary-primary replication one primary can assume the name of the cluster, and it is // not guaranteed that it is the writeable one. func ReadClusterWriteableMaster(clusterName string) ([](*Instance), error) { condition := ` @@ -1166,9 +1166,9 @@ func ReadClusterWriteableMaster(clusterName string) ([](*Instance), error) { return readInstancesByCondition(condition, sqlutils.Args(clusterName), "replication_depth asc") } -// ReadClusterMaster returns the master of this cluster. -// - if the cluster has co-masters, the/a writable one is returned -// - if the cluster has a single master, that master is retuened whether it is read-only or writable. +// ReadClusterMaster returns the primary of this cluster. +// - if the cluster has co-primaries, the/a writable one is returned +// - if the cluster has a single primary, that primary is returned whether it is read-only or writable. func ReadClusterMaster(clusterName string) ([](*Instance), error) { condition := ` cluster_name = ? @@ -1177,7 +1177,7 @@ func ReadClusterMaster(clusterName string) ([](*Instance), error) { return readInstancesByCondition(condition, sqlutils.Args(clusterName), "read_only asc, replication_depth asc") } -// ReadWriteableClustersMasters returns writeable masters of all clusters, but only one +// ReadWriteableClustersMasters returns writeable primaries of all clusters, but only one // per cluster, in similar logic to ReadClusterWriteableMaster func ReadWriteableClustersMasters() (instances [](*Instance), err error) { condition := ` @@ -1204,7 +1204,7 @@ func ReadClusterAliasInstances(clusterAlias string) ([](*Instance), error) { return readInstancesByCondition(condition, sqlutils.Args(clusterAlias), "") } -// ReadReplicaInstances reads replicas of a given master +// ReadReplicaInstances reads replicas of a given primary func ReadReplicaInstances(masterKey *InstanceKey) ([](*Instance), error) { condition := ` master_host = ? @@ -1233,7 +1233,7 @@ func ReadReplicaInstancesIncludingBinlogServerSubReplicas(masterKey *InstanceKey return replicas, err } -// ReadBinlogServerReplicaInstances reads direct replicas of a given master that are binlog servers +// ReadBinlogServerReplicaInstances reads direct replicas of a given primary that are binlog servers func ReadBinlogServerReplicaInstances(masterKey *InstanceKey) ([](*Instance), error) { condition := ` master_host = ? @@ -1452,7 +1452,7 @@ func filterOSCInstances(instances [](*Instance)) [](*Instance) { } // GetClusterOSCReplicas returns a heuristic list of replicas which are fit as controll replicas for an OSC operation. -// These would be intermediate masters +// These would be intermediate primaries func GetClusterOSCReplicas(clusterName string) ([](*Instance), error) { var intermediateMasters [](*Instance) result := [](*Instance){} @@ -1665,8 +1665,8 @@ func updateInstanceClusterName(instance *Instance) error { return ExecDBWriteFunc(writeFunc) } -// ReplaceClusterName replaces all occurances of oldClusterName with newClusterName -// It is called after a master failover +// ReplaceClusterName replaces all occurrences of oldClusterName with newClusterName +// It is called after a primary failover func ReplaceClusterName(oldClusterName string, newClusterName string) error { if oldClusterName == "" { return log.Errorf("replaceClusterName: skipping empty oldClusterName") @@ -1723,7 +1723,7 @@ func ReviewUnseenInstances() error { return err } -// readUnseenMasterKeys will read list of masters that have never been seen, and yet whose replicas +// readUnseenMasterKeys will read list of primaries that have never been seen, and yet whose replicas // seem to be replicating. func readUnseenMasterKeys() ([]InstanceKey, error) { res := []InstanceKey{} @@ -1775,8 +1775,8 @@ func InjectSeed(instanceKey *InstanceKey) error { return err } -// InjectUnseenMasters will review masters of instances that are known to be replicating, yet which are not listed -// in database_instance. Since their replicas are listed as replicating, we can assume that such masters actually do +// InjectUnseenMasters will review primaries of instances that are known to be replicating, yet which are not listed +// in database_instance. Since their replicas are listed as replicating, we can assume that such primaries actually do // exist: we shall therefore inject them with minimal details into the database_instance table. func InjectUnseenMasters() error { @@ -2045,7 +2045,7 @@ func ReadClustersInfo(clusterName string) ([]ClusterInfo, error) { return clusters, err } -// Get a listing of KVPair for clusters masters, for all clusters or for a specific cluster. +// Get a listing of KVPair for clusters primaries, for all clusters or for a specific cluster. func GetMastersKVPairs(clusterName string) (kvPairs [](*kv.KVPair), err error) { clusterAliasMap := make(map[string]string) diff --git a/go/vt/orchestrator/inst/instance_topology.go b/go/vt/orchestrator/inst/instance_topology.go index fdaad91368..307dd158d3 100644 --- a/go/vt/orchestrator/inst/instance_topology.go +++ b/go/vt/orchestrator/inst/instance_topology.go @@ -129,10 +129,10 @@ func ASCIITopology(clusterName string, historyTimestampPattern string, tabulated // Get entries: var entries []string if masterInstance != nil { - // Single master + // Single primary entries = getASCIITopologyEntry(0, masterInstance, replicationMap, historyTimestampPattern == "", fillerCharacter, tabulated, printTags) } else { - // Co-masters? For visualization we put each in its own branch while ignoring its other co-masters. + // Co-primaries? For visualization we put each in its own branch while ignoring its other co-primaries. for _, instance := range instances { if instance.IsCoMaster { entries = append(entries, getASCIITopologyEntry(1, instance, replicationMap, historyTimestampPattern == "", fillerCharacter, tabulated, printTags)...) @@ -179,13 +179,13 @@ func shouldPostponeRelocatingReplica(replica *Instance, postponedFunctionsContai } // GetInstanceMaster synchronously reaches into the replication topology -// and retrieves master's data +// and retrieves primary's data func GetInstanceMaster(instance *Instance) (*Instance, error) { master, err := ReadTopologyInstance(&instance.MasterKey) return master, err } -// InstancesAreSiblings checks whether both instances are replicating from same master +// InstancesAreSiblings checks whether both instances are replicating from same primary func InstancesAreSiblings(instance0, instance1 *Instance) bool { if !instance0.IsReplica() { return false @@ -200,7 +200,7 @@ func InstancesAreSiblings(instance0, instance1 *Instance) bool { return instance0.MasterKey.Equals(&instance1.MasterKey) } -// InstanceIsMasterOf checks whether an instance is the master of another +// InstanceIsMasterOf checks whether an instance is the primary of another func InstanceIsMasterOf(allegedMaster, allegedReplica *Instance) bool { if !allegedReplica.IsReplica() { return false @@ -214,7 +214,7 @@ func InstanceIsMasterOf(allegedMaster, allegedReplica *Instance) bool { // MoveUp will attempt moving instance indicated by instanceKey up the topology hierarchy. // It will perform all safety and sanity checks and will tamper with this instance's replication -// as well as its master. +// as well as its primary. func MoveUp(instanceKey *InstanceKey) (*Instance, error) { instance, err := ReadTopologyInstance(instanceKey) if err != nil { @@ -278,7 +278,7 @@ func MoveUp(instanceKey *InstanceKey) (*Instance, error) { } } - // We can skip hostname unresolve; we just copy+paste whatever our master thinks of its master. + // We can skip hostname unresolve; we just copy+paste whatever our primary thinks of its primary. _, err = ChangeMasterTo(instanceKey, &master.MasterKey, &master.ExecBinlogCoordinates, true, GTIDHintDeny) if err != nil { goto Cleanup @@ -446,7 +446,7 @@ func MoveBelow(instanceKey, siblingKey *InstanceKey) (*Instance, error) { } if sibling.IsBinlogServer() { - // Binlog server has same coordinates as master + // Binlog server has same coordinates as primary // Easy solution! return Repoint(instanceKey, &sibling.Key, GTIDHintDeny) } @@ -686,7 +686,7 @@ func moveReplicasViaGTID(replicas [](*Instance), other *Instance, postponedFunct return movedReplicas, unmovedReplicas, err, errs } -// MoveReplicasGTID will (attempt to) move all replicas of given master below given instance. +// MoveReplicasGTID will (attempt to) move all replicas of given primary below given instance. func MoveReplicasGTID(masterKey *InstanceKey, belowKey *InstanceKey, pattern string) (movedReplicas [](*Instance), unmovedReplicas [](*Instance), err error, errs []error) { belowInstance, err := ReadTopologyInstance(belowKey) if err != nil { @@ -712,8 +712,8 @@ func MoveReplicasGTID(masterKey *InstanceKey, belowKey *InstanceKey, pattern str return movedReplicas, unmovedReplicas, err, errs } -// Repoint connects a replica to a master using its exact same executing coordinates. -// The given masterKey can be null, in which case the existing master is used. +// Repoint connects a replica to a primary using its exact same executing coordinates. +// The given masterKey can be null, in which case the existing primary is used. // Two use cases: // - masterKey is nil: use case is corrupted relay logs on replica // - masterKey is not nil: using Binlog servers (coordinates remain the same) @@ -733,9 +733,9 @@ func Repoint(instanceKey *InstanceKey, masterKey *InstanceKey, gtidHint Operatio if masterKey == nil { masterKey = &instance.MasterKey } - // With repoint we *prefer* the master to be alive, but we don't strictly require it. - // The use case for the master being alive is with hostname-resolve or hostname-unresolve: asking the replica - // to reconnect to its same master while changing the MASTER_HOST in CHANGE MASTER TO due to DNS changes etc. + // With repoint we *prefer* the primary to be alive, but we don't strictly require it. + // The use case for the primary being alive is with hostname-resolve or hostname-unresolve: asking the replica + // to reconnect to its same primary while changing the MASTER_HOST in CHANGE MASTER TO due to DNS changes etc. master, err := ReadTopologyInstance(masterKey) masterIsAccessible := (err == nil) if !masterIsAccessible { @@ -770,7 +770,7 @@ func Repoint(instanceKey *InstanceKey, masterKey *InstanceKey, gtidHint Operatio goto Cleanup } - // See above, we are relaxed about the master being accessible/inaccessible. + // See above, we are relaxed about the primary being accessible/inaccessible. // If accessible, we wish to do hostname-unresolve. If inaccessible, we can skip the test and not fail the // ChangeMasterTo operation. This is why we pass "!masterIsAccessible" below. if instance.ExecBinlogCoordinates.IsEmpty() { @@ -793,7 +793,7 @@ Cleanup: } -// RepointTo repoints list of replicas onto another master. +// RepointTo repoints list of replicas onto another primary. // Binlog Server is the major use case func RepointTo(replicas [](*Instance), belowKey *InstanceKey) ([](*Instance), error, []error) { res := [](*Instance){} @@ -846,7 +846,7 @@ func RepointTo(replicas [](*Instance), belowKey *InstanceKey) ([](*Instance), er return res, nil, errs } -// RepointReplicasTo repoints replicas of a given instance (possibly filtered) onto another master. +// RepointReplicasTo repoints replicas of a given instance (possibly filtered) onto another primary. // Binlog Server is the major use case func RepointReplicasTo(instanceKey *InstanceKey, pattern string, belowKey *InstanceKey) ([](*Instance), error, []error) { res := [](*Instance){} @@ -863,20 +863,20 @@ func RepointReplicasTo(instanceKey *InstanceKey, pattern string, belowKey *Insta return res, nil, errs } if belowKey == nil { - // Default to existing master. All replicas are of the same master, hence just pick one. + // Default to existing primary. All replicas are of the same primary, hence just pick one. belowKey = &replicas[0].MasterKey } log.Infof("Will repoint replicas of %+v to %+v", *instanceKey, *belowKey) return RepointTo(replicas, belowKey) } -// RepointReplicas repoints all replicas of a given instance onto its existing master. +// RepointReplicas repoints all replicas of a given instance onto its existing primary. func RepointReplicas(instanceKey *InstanceKey, pattern string) ([](*Instance), error, []error) { return RepointReplicasTo(instanceKey, pattern, nil) } -// MakeCoMaster will attempt to make an instance co-master with its master, by making its master a replica of its own. -// This only works out if the master is not replicating; the master does not have a known master (it may have an unknown master). +// MakeCoMaster will attempt to make an instance co-primary with its primary, by making its primary a replica of its own. +// This only works out if the primary is not replicating; the primary does not have a known primary (it may have an unknown primary). func MakeCoMaster(instanceKey *InstanceKey) (*Instance, error) { instance, err := ReadTopologyInstance(instanceKey) if err != nil { @@ -905,16 +905,16 @@ func MakeCoMaster(instanceKey *InstanceKey) (*Instance, error) { return instance, fmt.Errorf("instance %+v is not read-only; first make it read-only before making it co-master", instance.Key) } if master.IsCoMaster { - // We allow breaking of an existing co-master replication. Here's the breakdown: + // We allow breaking of an existing co-primary replication. Here's the breakdown: // Ideally, this would not eb allowed, and we would first require the user to RESET SLAVE on 'master' - // prior to making it participate as co-master with our 'instance'. + // prior to making it participate as co-primary with our 'instance'. // However there's the problem that upon RESET SLAVE we lose the replication's user/password info. // Thus, we come up with the following rule: - // If S replicates from M1, and M1<->M2 are co masters, we allow S to become co-master of M1 (S<->M1) if: + // If S replicates from M1, and M1<->M2 are co primaries, we allow S to become co-primary of M1 (S<->M1) if: // - M1 is writeable // - M2 is read-only or is unreachable/invalid // - S is read-only - // And so we will be replacing one read-only co-master with another. + // And so we will be replacing one read-only co-primary with another. otherCoMaster, found, _ := ReadInstance(&master.MasterKey) if found && otherCoMaster.IsLastCheckValid && !otherCoMaster.ReadOnly { return instance, fmt.Errorf("master %+v is already co-master with %+v, and %+v is alive, and not read-only; cowardly refusing to demote it. Please set it as read-only beforehand", master.Key, otherCoMaster.Key, otherCoMaster.Key) @@ -942,10 +942,10 @@ func MakeCoMaster(instanceKey *InstanceKey) (*Instance, error) { defer EndMaintenance(maintenanceToken) } - // the coMaster used to be merely a replica. Just point master into *some* position + // the coMaster used to be merely a replica. Just point primary into *some* position // within coMaster... if master.IsReplica() { - // this is the case of a co-master. For masters, the StopReplication operation throws an error, and + // this is the case of a co-primary. For primaries, the StopReplication operation throws an error, and // there's really no point in doing it. master, err = StopReplication(&master.Key) if err != nil { @@ -1021,7 +1021,7 @@ Cleanup: return instance, err } -// DetachReplicaMasterHost detaches a replica from its master by corrupting the Master_Host (in such way that is reversible) +// DetachReplicaMasterHost detaches a replica from its primary by corrupting the Master_Host (in such way that is reversible) func DetachReplicaMasterHost(instanceKey *InstanceKey) (*Instance, error) { instance, err := ReadTopologyInstance(instanceKey) if err != nil { @@ -1065,7 +1065,7 @@ Cleanup: return instance, err } -// ReattachReplicaMasterHost reattaches a replica back onto its master by undoing a DetachReplicaMasterHost operation +// ReattachReplicaMasterHost reattaches a replica back onto its primary by undoing a DetachReplicaMasterHost operation func ReattachReplicaMasterHost(instanceKey *InstanceKey) (*Instance, error) { instance, err := ReadTopologyInstance(instanceKey) if err != nil { @@ -1098,7 +1098,7 @@ func ReattachReplicaMasterHost(instanceKey *InstanceKey) (*Instance, error) { if err != nil { goto Cleanup } - // Just in case this instance used to be a master: + // Just in case this instance used to be a primary: ReplaceAliasClusterName(instanceKey.StringCode(), reattachedMasterKey.StringCode()) Cleanup: @@ -1322,7 +1322,7 @@ Cleanup: return instance, err } -// ErrantGTIDInjectEmpty will inject an empty transaction on the master of an instance's cluster in order to get rid +// ErrantGTIDInjectEmpty will inject an empty transaction on the primary of an instance's cluster in order to get rid // of an errant transaction observed on the instance. func ErrantGTIDInjectEmpty(instanceKey *InstanceKey) (instance *Instance, clusterMaster *Instance, countInjectedTransactions int64, err error) { instance, err = ReadTopologyInstance(instanceKey) @@ -1418,10 +1418,10 @@ func TakeMasterHook(successor *Instance, demoted *Instance) { } -// TakeMaster will move an instance up the chain and cause its master to become its replica. -// It's almost a role change, just that other replicas of either 'instance' or its master are currently unaffected +// TakeMaster will move an instance up the chain and cause its primary to become its replica. +// It's almost a role change, just that other replicas of either 'instance' or its primary are currently unaffected // (they continue replicate without change) -// Note that the master must itself be a replica; however the grandparent does not necessarily have to be reachable +// Note that the primary must itself be a replica; however the grandparent does not necessarily have to be reachable // and can in fact be dead. func TakeMaster(instanceKey *InstanceKey, allowTakingCoMaster bool) (*Instance, error) { instance, err := ReadTopologyInstance(instanceKey) @@ -1461,14 +1461,14 @@ func TakeMaster(instanceKey *InstanceKey, allowTakingCoMaster bool) (*Instance, } // instance and masterInstance are equal - // We skip name unresolve. It is OK if the master's master is dead, unreachable, does not resolve properly. - // We just copy+paste info from the master. + // We skip name unresolve. It is OK if the primary's primary is dead, unreachable, does not resolve properly. + // We just copy+paste info from the primary. // In particular, this is commonly calledin DeadMaster recovery instance, err = ChangeMasterTo(&instance.Key, &masterInstance.MasterKey, &masterInstance.ExecBinlogCoordinates, true, GTIDHintNeutral) if err != nil { goto Cleanup } - // instance is now sibling of master + // instance is now sibling of primary masterInstance, err = ChangeMasterTo(&masterInstance.Key, &instance.Key, &instance.SelfBinlogCoordinates, false, GTIDHintNeutral) if err != nil { goto Cleanup @@ -1508,7 +1508,7 @@ func sortInstances(instances [](*Instance)) { sortInstancesDataCenterHint(instances, "") } -// getReplicasForSorting returns a list of replicas of a given master potentially for candidate choosing +// getReplicasForSorting returns a list of replicas of a given primary potentially for candidate choosing func getReplicasForSorting(masterKey *InstanceKey, includeBinlogServerSubReplicas bool) (replicas [](*Instance), err error) { if includeBinlogServerSubReplicas { replicas, err = ReadReplicaInstancesIncludingBinlogServerSubReplicas(masterKey) @@ -1522,10 +1522,10 @@ func sortedReplicas(replicas [](*Instance), stopReplicationMethod StopReplicatio return sortedReplicasDataCenterHint(replicas, stopReplicationMethod, "") } -// sortedReplicas returns the list of replicas of some master, sorted by exec coordinates +// sortedReplicas returns the list of replicas of some primary, sorted by exec coordinates // (most up-to-date replica first). // This function assumes given `replicas` argument is indeed a list of instances all replicating -// from the same master (the result of `getReplicasForSorting()` is appropriate) +// from the same primary (the result of `getReplicasForSorting()` is appropriate) func sortedReplicasDataCenterHint(replicas [](*Instance), stopReplicationMethod StopReplicationMethod, dataCenterHint string) [](*Instance) { if len(replicas) <= 1 { return replicas @@ -1541,7 +1541,7 @@ func sortedReplicasDataCenterHint(replicas [](*Instance), stopReplicationMethod return replicas } -// GetSortedReplicas reads list of replicas of a given master, and returns them sorted by exec coordinates +// GetSortedReplicas reads list of replicas of a given primary, and returns them sorted by exec coordinates // (most up-to-date replica first). func GetSortedReplicas(masterKey *InstanceKey, stopReplicationMethod StopReplicationMethod) (replicas [](*Instance), err error) { if replicas, err = getReplicasForSorting(masterKey, false); err != nil { @@ -1583,7 +1583,7 @@ func isGenerallyValidAsCandidateReplica(replica *Instance) bool { } // isValidAsCandidateMasterInBinlogServerTopology let's us know whether a given replica is generally -// valid to promote to be master. +// valid to promote to be primary. func isValidAsCandidateMasterInBinlogServerTopology(replica *Instance) bool { if !replica.IsLastCheckValid { // something wrong with this replica right now. We shouldn't hope to be able to promote it @@ -1674,7 +1674,7 @@ func chooseCandidateReplica(replicas [](*Instance)) (candidateReplica *Instance, } } if candidateReplica == nil { - // Unable to find a candidate that will master others. + // Unable to find a candidate that will primary others. // Instead, pick a (single) replica which is not banned. for _, replica := range replicas { replica := replica @@ -1710,7 +1710,7 @@ func chooseCandidateReplica(replicas [](*Instance)) (candidateReplica *Instance, return candidateReplica, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err } -// GetCandidateReplica chooses the best replica to promote given a (possibly dead) master +// GetCandidateReplica chooses the best replica to promote given a (possibly dead) primary func GetCandidateReplica(masterKey *InstanceKey, forRematchPurposes bool) (*Instance, [](*Instance), [](*Instance), [](*Instance), [](*Instance), error) { var candidateReplica *Instance aheadReplicas := [](*Instance){} @@ -1751,7 +1751,7 @@ func GetCandidateReplica(masterKey *InstanceKey, forRematchPurposes bool) (*Inst return candidateReplica, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, nil } -// GetCandidateReplicaOfBinlogServerTopology chooses the best replica to promote given a (possibly dead) master +// GetCandidateReplicaOfBinlogServerTopology chooses the best replica to promote given a (possibly dead) primary func GetCandidateReplicaOfBinlogServerTopology(masterKey *InstanceKey) (candidateReplica *Instance, err error) { replicas, err := getReplicasForSorting(masterKey, true) if err != nil { @@ -1963,7 +1963,7 @@ func relocateBelowInternal(instance, other *Instance) (*Instance, error) { return Repoint(&instance.Key, &other.Key, GTIDHintDeny) } - // Relocate to its master, then repoint to the binlog server + // Relocate to its primary, then repoint to the binlog server otherMaster, found, err := ReadInstance(&other.MasterKey) if err != nil { return instance, err @@ -1983,7 +1983,7 @@ func relocateBelowInternal(instance, other *Instance) (*Instance, error) { } if instance.IsBinlogServer() { // Can only move within the binlog-server family tree - // And these have been covered just now: move up from a master binlog server, move below a binling binlog server. + // And these have been covered just now: move up from a primary binlog server, move below a binling binlog server. // sure, the family can be more complex, but we keep these operations atomic return nil, log.Errorf("Relocating binlog server %+v below %+v turns to be too complex; please do it manually", instance.Key, other.Key) } @@ -2001,7 +2001,7 @@ func relocateBelowInternal(instance, other *Instance) (*Instance, error) { } // See if we need to MoveUp if instanceMaster != nil && instanceMaster.MasterKey.Equals(&other.Key) { - // Moving to grandparent--handles co-mastering writable case + // Moving to grandparent--handles co-primary writable case return MoveUp(&instance.Key) } if instanceMaster != nil && instanceMaster.IsBinlogServer() { diff --git a/go/vt/orchestrator/inst/instance_topology_dao.go b/go/vt/orchestrator/inst/instance_topology_dao.go index b5c69eb594..03a1880b18 100644 --- a/go/vt/orchestrator/inst/instance_topology_dao.go +++ b/go/vt/orchestrator/inst/instance_topology_dao.go @@ -566,7 +566,7 @@ func workaroundBug83713(instanceKey *InstanceKey) { } } -// ChangeMasterTo changes the given instance's master according to given input. +// ChangeMasterTo changes the given instance's primary according to given input. // TODO(sougou): deprecate ReplicationCredentialsQuery, and all other credential discovery. func ChangeMasterTo(instanceKey *InstanceKey, masterKey *InstanceKey, masterBinlogCoordinates *BinlogCoordinates, skipUnresolve bool, gtidHint OperationGTIDHint) (*Instance, error) { user, password := config.Config.MySQLReplicaUser, config.Config.MySQLReplicaPassword @@ -617,7 +617,7 @@ func ChangeMasterTo(instanceKey *InstanceKey, masterKey *InstanceKey, masterBinl // Is MariaDB; not using GTID, turn into GTID mariadbGTIDHint := "slave_pos" if !instance.ReplicationThreadsExist() { - // This instance is currently a master. As per https://mariadb.com/kb/en/change-master-to/#master_use_gtid + // This instance is currently a primary. As per https://mariadb.com/kb/en/change-master-to/#master_use_gtid // we should be using current_pos. // See also: // - https://github.com/openark/orchestrator/issues/1146 @@ -684,9 +684,9 @@ func ChangeMasterTo(instanceKey *InstanceKey, masterKey *InstanceKey, masterBinl return instance, err } -// SkipToNextBinaryLog changes master position to beginning of next binlog +// SkipToNextBinaryLog changes primary position to beginning of next binlog // USE WITH CARE! -// Use case is binlog servers where the master was gone & replaced by another. +// Use case is binlog servers where the primary was gone & replaced by another. func SkipToNextBinaryLog(instanceKey *InstanceKey) (*Instance, error) { instance, err := ReadTopologyInstance(instanceKey) if err != nil { diff --git a/go/vt/orchestrator/inst/tablet_dao.go b/go/vt/orchestrator/inst/tablet_dao.go index 8eb84bf4cd..d674dc5e28 100644 --- a/go/vt/orchestrator/inst/tablet_dao.go +++ b/go/vt/orchestrator/inst/tablet_dao.go @@ -40,8 +40,8 @@ var TopoServ *topo.Server // ErrTabletAliasNil is a fixed error message. var ErrTabletAliasNil = errors.New("tablet alias is nil") -// SwitchMaster makes the new tablet the master and proactively performs -// the necessary propagation to the old master. The propagation is best +// SwitchMaster makes the new tablet the primary and proactively performs +// the necessary propagation to the old primary. The propagation is best // effort. If it fails, the tablet's shard sync will eventually converge. // The proactive propagation allows a competing Orchestrator from discovering // the successful action of a previous one, which reduces churn. @@ -87,7 +87,7 @@ func SwitchMaster(newMasterKey, oldMasterKey InstanceKey) error { return nil } -// ChangeTabletType designates the tablet that owns an instance as the master. +// ChangeTabletType designates the tablet that owns an instance as the primary. func ChangeTabletType(instanceKey InstanceKey, tabletType topodatapb.TabletType) (*topodatapb.Tablet, error) { if instanceKey.Hostname == "" { return nil, errors.New("can't set tablet to master: instance is unspecified") diff --git a/go/vt/orchestrator/logic/orchestrator.go b/go/vt/orchestrator/logic/orchestrator.go index 28c4d2062d..176f015cab 100644 --- a/go/vt/orchestrator/logic/orchestrator.go +++ b/go/vt/orchestrator/logic/orchestrator.go @@ -176,7 +176,7 @@ func handleDiscoveryRequests() { } // DiscoverInstance will attempt to discover (poll) an instance (unless -// it is already up to date) and will also ensure that its master and +// it is already up to date) and will also ensure that its primary and // replicas (if any) are also checked. func DiscoverInstance(instanceKey inst.InstanceKey) { if inst.InstanceIsForgotten(&instanceKey) { @@ -324,7 +324,7 @@ func onHealthTick() { } } -// Write a cluster's master (or all clusters masters) to kv stores. +// SubmitMastersToKvStores records a cluster's primary (or all clusters primaries) to kv stores. // This should generally only happen once in a lifetime of a cluster. Otherwise KV // stores are updated via failovers. func SubmitMastersToKvStores(clusterName string, force bool) (kvPairs [](*kv.KVPair), submittedCount int, err error) { diff --git a/go/vt/orchestrator/logic/tablet_discovery.go b/go/vt/orchestrator/logic/tablet_discovery.go index 2a915952cc..70f57859a9 100644 --- a/go/vt/orchestrator/logic/tablet_discovery.go +++ b/go/vt/orchestrator/logic/tablet_discovery.go @@ -284,12 +284,12 @@ func TabletRefresh(instanceKey inst.InstanceKey) (*topodatapb.Tablet, error) { return ti.Tablet, nil } -// TabletDemoteMaster requests the master tablet to stop accepting transactions. +// TabletDemoteMaster requests the primary tablet to stop accepting transactions. func TabletDemoteMaster(instanceKey inst.InstanceKey) error { return tabletDemoteMaster(instanceKey, true) } -// TabletUndoDemoteMaster requests the master tablet to undo the demote. +// TabletUndoDemoteMaster requests the primary tablet to undo the demote. func TabletUndoDemoteMaster(instanceKey inst.InstanceKey) error { return tabletDemoteMaster(instanceKey, false) } diff --git a/go/vt/orchestrator/logic/topology_recovery.go b/go/vt/orchestrator/logic/topology_recovery.go index 825e1c4331..6dfb8d6861 100644 --- a/go/vt/orchestrator/logic/topology_recovery.go +++ b/go/vt/orchestrator/logic/topology_recovery.go @@ -423,7 +423,7 @@ func recoverDeadMasterInBinlogServerTopology(topologyRecovery *TopologyRecovery) if err != nil { return promotedReplica, log.Errore(err) } - // Reconnect binlog servers to promoted replica (now master): + // Reconnect binlog servers to promoted replica (now primary): promotedBinlogServer, err = inst.SkipToNextBinaryLog(&promotedBinlogServer.Key) if err != nil { return promotedReplica, log.Errore(err) @@ -434,9 +434,9 @@ func recoverDeadMasterInBinlogServerTopology(topologyRecovery *TopologyRecovery) } func() { - // Move binlog server replicas up to replicate from master. + // Move binlog server replicas up to replicate from primary. // This can only be done once a BLS has skipped to the next binlog - // We postpone this operation. The master is already promoted and we're happy. + // We postpone this operation. The primary is already promoted and we're happy. binlogServerReplicas, err := inst.ReadBinlogServerReplicaInstances(&promotedBinlogServer.Key) if err != nil { return @@ -452,8 +452,8 @@ func recoverDeadMasterInBinlogServerTopology(topologyRecovery *TopologyRecovery) if err != nil { return err } - // Make sure the BLS has the "next binlog" -- the one the master flushed & purged to. Otherwise the BLS - // will request a binlog the master does not have + // Make sure the BLS has the "next binlog" -- the one the primary flushed & purged to. Otherwise the BLS + // will request a binlog the primary does not have if binlogServerReplica.ExecBinlogCoordinates.SmallerThan(&promotedBinlogServer.ExecBinlogCoordinates) { binlogServerReplica, err = inst.StartReplicationUntilMasterCoordinates(&binlogServerReplica.Key, &promotedBinlogServer.ExecBinlogCoordinates) if err != nil { @@ -480,7 +480,7 @@ func GetMasterRecoveryType(analysisEntry *inst.ReplicationAnalysis) (masterRecov return masterRecoveryType } -// recoverDeadMaster recovers a dead master, complete logic inside +// recoverDeadMaster recovers a dead primary, complete logic inside func recoverDeadMaster(topologyRecovery *TopologyRecovery, candidateInstanceKey *inst.InstanceKey, skipProcesses bool) (recoveryAttempted bool, promotedReplica *inst.Instance, lostReplicas [](*inst.Instance), err error) { topologyRecovery.Type = MasterRecovery analysisEntry := &topologyRecovery.AnalysisEntry @@ -617,7 +617,7 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de // Maybe we actually promoted such a replica. Does that mean we should keep it? // Maybe we promoted a "neutral", and some "prefer" server is available. // Maybe we promoted a "prefer_not" - // Maybe we promoted a server in a different DC than the master + // Maybe we promoted a server in a different DC than the primary // There's many options. We may wish to replace the server we promoted with a better one. AuditTopologyRecovery(topologyRecovery, "checking if should replace promoted replica with a better candidate") if candidateInstanceKey == nil { @@ -651,11 +651,11 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de } } if candidateInstanceKey == nil { - // We cannot find a candidate in same DC and ENV as dead master + // We cannot find a candidate in same DC and ENV as dead primary AuditTopologyRecovery(topologyRecovery, "+ checking if promoted replica is an OK candidate") for _, candidateReplica := range candidateReplicas { if promotedReplica.Key.Equals(&candidateReplica.Key) { - // Seems like we promoted a candidate replica (though not in same DC and ENV as dead master) + // Seems like we promoted a candidate replica (though not in same DC and ENV as dead primary) if satisfied, reason := MasterFailoverGeographicConstraintSatisfied(&topologyRecovery.AnalysisEntry, candidateReplica); satisfied { // Good enough. No further action required. AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("promoted replica %+v is a good candidate", promotedReplica.Key)) @@ -709,7 +709,7 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de if candidateInstanceKey == nil { // Still nothing? Then we didn't find a replica marked as "candidate". OK, further down the stream we have: - // find neutral instance in same dv&env as dead master + // find neutral instance in same dv&env as dead primary AuditTopologyRecovery(topologyRecovery, "+ searching for a neutral server to replace promoted server, in same DC and env as dead master") for _, neutralReplica := range neutralReplicas { if canTakeOverPromotedServerAsMaster(neutralReplica, promotedReplica) && @@ -763,7 +763,7 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de return replacement, true, err } -// replacePromotedReplicaWithCandidate is called after a master (or co-master) +// replacePromotedReplicaWithCandidate is called after a primary (or co-primary) // died and was replaced by some promotedReplica. // But, is there an even better replica to promote? // if candidateInstanceKey is given, then it is forced to be promoted over the promotedReplica @@ -920,7 +920,7 @@ func checkAndRecoverDeadMaster(analysisEntry inst.ReplicationAnalysis, candidate AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadMaster: applying read-only=0 on promoted master: success=%t", (err == nil))) } } - // Let's attempt, though we won't necessarily succeed, to set old master as read-only + // Let's attempt, though we won't necessarily succeed, to set old primary as read-only go func() { _, err := inst.SetReadOnly(&analysisEntry.AnalyzedInstanceKey, true) AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadMaster: applying read-only=1 on demoted master: success=%t", (err == nil))) @@ -961,7 +961,7 @@ func checkAndRecoverDeadMaster(analysisEntry inst.ReplicationAnalysis, candidate attributes.SetGeneralAttribute(analysisEntry.ClusterDetails.ClusterDomain, promotedReplica.Key.StringCode()) if !skipProcesses { - // Execute post master-failover processes + // Execute post primary-failover processes executeProcesses(config.Config.PostMasterFailoverProcesses, "PostMasterFailoverProcesses", topologyRecovery, false) } } else { @@ -1048,7 +1048,7 @@ func canTakeOverPromotedServerAsMaster(wantToTakeOver *inst.Instance, toBeTakenO return true } -// GetCandidateSiblingOfIntermediateMaster chooses the best sibling of a dead intermediate master +// GetCandidateSiblingOfIntermediateMaster chooses the best sibling of a dead intermediate primary // to whom the IM's replicas can be moved. func GetCandidateSiblingOfIntermediateMaster(topologyRecovery *TopologyRecovery, intermediateMasterInstance *inst.Instance) (*inst.Instance, error) { @@ -1107,7 +1107,7 @@ func GetCandidateSiblingOfIntermediateMaster(topologyRecovery *TopologyRecovery, return nil, log.Errorf("topology_recovery: cannot find candidate sibling of %+v", intermediateMasterInstance.Key) } -// RecoverDeadIntermediateMaster performs intermediate master recovery; complete logic inside +// RecoverDeadIntermediateMaster performs intermediate primary recovery; complete logic inside func RecoverDeadIntermediateMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) (successorInstance *inst.Instance, err error) { topologyRecovery.Type = IntermediateMasterRecovery analysisEntry := &topologyRecovery.AnalysisEntry @@ -1152,7 +1152,7 @@ func RecoverDeadIntermediateMaster(topologyRecovery *TopologyRecovery, skipProce inst.AuditOperation("recover-dead-intermediate-master", failedInstanceKey, fmt.Sprintf("Relocated %d replicas under candidate sibling: %+v; %d errors: %+v", len(relocatedReplicas), candidateSibling.Key, len(errs), errs)) } } - // Plan A: find a replacement intermediate master in same Data Center + // Plan A: find a replacement intermediate primary in same Data Center if candidateSiblingOfIntermediateMaster != nil && candidateSiblingOfIntermediateMaster.DataCenter == intermediateMasterInstance.DataCenter { relocateReplicasToCandidateSibling() } @@ -1173,7 +1173,7 @@ func RecoverDeadIntermediateMaster(topologyRecovery *TopologyRecovery, skipProce successorInstance = regroupPromotedReplica } } - // Plan C: try replacement intermediate master in other DC... + // Plan C: try replacement intermediate primary in other DC... if candidateSiblingOfIntermediateMaster != nil && candidateSiblingOfIntermediateMaster.DataCenter != intermediateMasterInstance.DataCenter { AuditTopologyRecovery(topologyRecovery, "- RecoverDeadIntermediateMaster: will next attempt relocating to another DC server") relocateReplicasToCandidateSibling() @@ -1242,7 +1242,7 @@ func checkAndRecoverDeadIntermediateMaster(analysisEntry inst.ReplicationAnalysi return true, topologyRecovery, err } -// RecoverDeadCoMaster recovers a dead co-master, complete logic inside +// RecoverDeadCoMaster recovers a dead co-primary, complete logic inside func RecoverDeadCoMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) (promotedReplica *inst.Instance, lostReplicas [](*inst.Instance), err error) { topologyRecovery.Type = CoMasterRecovery analysisEntry := &topologyRecovery.AnalysisEntry @@ -1317,9 +1317,9 @@ func RecoverDeadCoMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) topologyRecovery.ParticipatingInstanceKeys.AddKey(promotedReplica.Key) } - // OK, we may have someone promoted. Either this was the other co-master or another replica. - // Noting down that we DO NOT attempt to set a new co-master topology. We are good with remaining with a single master. - // I tried solving the "let's promote a replica and create a new co-master setup" but this turns so complex due to various factors. + // OK, we may have someone promoted. Either this was the other co-primary or another replica. + // Noting down that we DO NOT attempt to set a new co-primary topology. We are good with remaining with a single primary. + // I tried solving the "let's promote a replica and create a new co-primary setup" but this turns so complex due to various factors. // I see this as risky and not worth the questionable benefit. // Maybe future me is a smarter person and finds a simple solution. Unlikely. I'm getting dumber. // @@ -1330,7 +1330,7 @@ func RecoverDeadCoMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) // !! This is an evil 3-node circle that must be broken. // config.Config.ApplyMySQLPromotionAfterMasterFailover, if true, will cause it to break, because we would RESET SLAVE on S1 // but we want to make sure the circle is broken no matter what. - // So in the case we promoted not-the-other-co-master, we issue a detach-replica-master-host, which is a reversible operation + // So in the case we promoted not-the-other-co-primary, we issue a detach-replica-master-host, which is a reversible operation if promotedReplica != nil && !promotedReplica.Key.Equals(otherCoMasterKey) { _, err = inst.DetachReplicaMasterHost(&promotedReplica.Key) topologyRecovery.AddError(log.Errore(err)) @@ -1471,9 +1471,9 @@ func isInEmergencyOperationGracefulPeriod(instanceKey *inst.InstanceKey) bool { // emergentlyRestartReplicationOnTopologyInstanceReplicas forces a stop slave + start slave on // replicas of a given instance, in an attempt to cause them to re-evaluate their replication state. -// This can be useful in scenarios where the master has Too Many Connections, but long-time connected +// This can be useful in scenarios where the primary has Too Many Connections, but long-time connected // replicas are not seeing this; when they stop+start replication, they need to re-authenticate and -// that's where we hope they realize the master is bad. +// that's where we hope they realize the primary is bad. func emergentlyRestartReplicationOnTopologyInstanceReplicas(instanceKey *inst.InstanceKey, analysisCode inst.AnalysisCode) { if existsInCacheError := emergencyRestartReplicaTopologyInstanceMap.Add(instanceKey.StringCode(), true, cache.DefaultExpiration); existsInCacheError != nil { // While each replica's RestartReplication() is throttled on its own, it's also wasteful to @@ -1520,7 +1520,7 @@ func getCheckAndRecoverFunction(analysisCode inst.AnalysisCode, analyzedInstance isActionableRecovery bool, ) { switch analysisCode { - // master + // primary case inst.DeadMaster, inst.DeadMasterAndSomeReplicas: if isInEmergencyOperationGracefulPeriod(analyzedInstanceKey) { return checkAndRecoverGenericProblem, false @@ -1543,7 +1543,7 @@ func getCheckAndRecoverFunction(analysisCode inst.AnalysisCode, analyzedInstance case inst.NotConnectedToMaster, inst.ConnectedToWrongMaster, inst.ReplicationStopped, inst.ReplicaIsWritable, inst.ReplicaSemiSyncMustBeSet, inst.ReplicaSemiSyncMustNotBeSet: return fixReplica, false - // intermediate master + // intermediate primary case inst.DeadIntermediateMaster: return checkAndRecoverDeadIntermediateMaster, true case inst.DeadIntermediateMasterAndSomeReplicas: @@ -1554,12 +1554,12 @@ func getCheckAndRecoverFunction(analysisCode inst.AnalysisCode, analyzedInstance return checkAndRecoverDeadIntermediateMaster, true case inst.DeadIntermediateMasterAndReplicas: return checkAndRecoverGenericProblem, false - // co-master + // co-primary case inst.DeadCoMaster: return checkAndRecoverDeadCoMaster, true case inst.DeadCoMasterAndSomeReplicas: return checkAndRecoverDeadCoMaster, true - // master, non actionable + // primary, non actionable case inst.DeadMasterAndReplicas: return checkAndRecoverGenericProblem, false case inst.UnreachableMaster: @@ -1769,7 +1769,7 @@ func ForceExecuteRecovery(analysisEntry inst.ReplicationAnalysis, candidateInsta return executeCheckAndRecoverFunction(analysisEntry, candidateInstanceKey, true, skipProcesses) } -// ForceMasterFailover *trusts* master of given cluster is dead and initiates a failover +// ForceMasterFailover *trusts* primary of given cluster is dead and initiates a failover func ForceMasterFailover(clusterName string) (topologyRecovery *TopologyRecovery, err error) { clusterMasters, err := inst.ReadClusterMaster(clusterName) if err != nil { @@ -1800,7 +1800,7 @@ func ForceMasterFailover(clusterName string) (topologyRecovery *TopologyRecovery return topologyRecovery, nil } -// ForceMasterTakeover *trusts* master of given cluster is dead and fails over to designated instance, +// ForceMasterTakeover *trusts* primary of given cluster is dead and fails over to designated instance, // which has to be its direct child. func ForceMasterTakeover(clusterName string, destination *inst.Instance) (topologyRecovery *TopologyRecovery, err error) { clusterMasters, err := inst.ReadClusterWriteableMaster(clusterName) @@ -1861,7 +1861,7 @@ func getGracefulMasterTakeoverDesignatedInstance(clusterMasterKey *inst.Instance return designatedInstance, nil } - // Verify designated instance is a direct replica of master + // Verify designated instance is a direct replica of primary for _, directReplica := range clusterMasterDirectReplicas { if directReplica.Key.Equals(designatedKey) { designatedInstance = directReplica @@ -1874,12 +1874,12 @@ func getGracefulMasterTakeoverDesignatedInstance(clusterMasterKey *inst.Instance return designatedInstance, nil } -// GracefulMasterTakeover will demote master of existing topology and promote its +// GracefulMasterTakeover will demote primary of existing topology and promote its // direct replica instead. // It expects that replica to have no siblings. -// This function is graceful in that it will first lock down the master, then wait +// This function is graceful in that it will first lock down the primary, then wait // for the designated replica to catch up with last position. -// It will point old master at the newly promoted master at the correct coordinates. +// It will point old primary at the newly promoted primary at the correct coordinates. func GracefulMasterTakeover(clusterName string, designatedKey *inst.InstanceKey, auto bool) (topologyRecovery *TopologyRecovery, promotedMasterCoordinates *inst.BinlogCoordinates, err error) { clusterMasters, err := inst.ReadClusterMaster(clusterName) if err != nil { @@ -1927,7 +1927,7 @@ func GracefulMasterTakeover(clusterName string, designatedKey *inst.InstanceKey, log.Infof("GracefulMasterTakeover: Will let %+v take over its siblings", designatedInstance.Key) relocatedReplicas, _, err, _ := inst.RelocateReplicas(&clusterMaster.Key, &designatedInstance.Key, "") if len(relocatedReplicas) != len(clusterMasterDirectReplicas)-1 { - // We are unable to make designated instance master of all its siblings + // We are unable to make designated instance primary of all its siblings relocatedReplicasKeyMap := inst.NewInstanceKeyMap() relocatedReplicasKeyMap.AddInstances(relocatedReplicas) // Let's see which replicas have not been relocated @@ -2004,7 +2004,7 @@ func GracefulMasterTakeover(clusterName string, designatedKey *inst.InstanceKey, return topologyRecovery, promotedMasterCoordinates, err } -// electNewMaster elects a new master while none were present before. +// electNewMaster elects a new primary while none were present before. // TODO(sougou): this should be mreged with recoverDeadMaster func electNewMaster(analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { topologyRecovery, err = AttemptRecoveryRegistration(&analysisEntry, false, true) @@ -2103,7 +2103,7 @@ func fixClusterAndMaster(analysisEntry inst.ReplicationAnalysis, candidateInstan } log.Infof("Analysis: %v, will fix incorrect mastership %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey) - // Reset replication on current master. This will prevent the comaster code-path. + // Reset replication on current primary. This will prevent the co-primary code-path. // TODO(sougou): this should probably done while holding a lock. _, err = inst.ResetReplicationOperation(&analysisEntry.AnalyzedInstanceKey) if err != nil { @@ -2124,7 +2124,7 @@ func fixClusterAndMaster(analysisEntry inst.ReplicationAnalysis, candidateInstan return recoveryAttempted, topologyRecovery, err } -// fixMaster sets the master as read-write. +// fixMaster sets the primary as read-write. func fixMaster(analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { topologyRecovery, err = AttemptRecoveryRegistration(&analysisEntry, false, true) if topologyRecovery == nil { @@ -2156,7 +2156,7 @@ func fixMaster(analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *ins return true, topologyRecovery, nil } -// fixReplica sets the replica as read-only and points it at the current master. +// fixReplica sets the replica as read-only and points it at the current primary. func fixReplica(analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { topologyRecovery, err = AttemptRecoveryRegistration(&analysisEntry, false, true) if topologyRecovery == nil { diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go index 58186ce40f..255d9dd408 100644 --- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go +++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go @@ -3752,11 +3752,11 @@ type DemotePrimaryResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Position is deprecated, and is a string representation of a demoted masters executed position. + // Position is deprecated, and is a string representation of a demoted primaries executed position. // // Deprecated: Do not use. DeprecatedPosition string `protobuf:"bytes,1,opt,name=deprecated_position,json=deprecatedPosition,proto3" json:"deprecated_position,omitempty"` - // PrimaryStatus represents the response from calling `SHOW MASTER STATUS` on a master that has been demoted. + // PrimaryStatus represents the response from calling `SHOW MASTER STATUS` on a primary that has been demoted. PrimaryStatus *replicationdata.PrimaryStatus `protobuf:"bytes,2,opt,name=primary_status,json=primaryStatus,proto3" json:"primary_status,omitempty"` } diff --git a/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go b/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go index 63eefd4388..31a589ea9a 100644 --- a/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go +++ b/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go @@ -47,13 +47,13 @@ type TabletManagerClient interface { ExecuteFetchAsApp(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsAppRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsAppResponse, error) // ReplicationStatus returns the current replication status. ReplicationStatus(ctx context.Context, in *tabletmanagerdata.ReplicationStatusRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReplicationStatusResponse, error) - // MasterStatus returns the current master status. + // MasterStatus returns the current primary status. MasterStatus(ctx context.Context, in *tabletmanagerdata.PrimaryStatusRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PrimaryStatusResponse, error) - // PrimaryStatus returns the current master status. + // PrimaryStatus returns the current primary status. PrimaryStatus(ctx context.Context, in *tabletmanagerdata.PrimaryStatusRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PrimaryStatusResponse, error) - // MasterPosition returns the current master position + // MasterPosition returns the current primary position MasterPosition(ctx context.Context, in *tabletmanagerdata.PrimaryPositionRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PrimaryPositionResponse, error) - // PrimaryPosition returns the current master position + // PrimaryPosition returns the current primary position PrimaryPosition(ctx context.Context, in *tabletmanagerdata.PrimaryPositionRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PrimaryPositionResponse, error) // WaitForPosition waits for the position to be reached WaitForPosition(ctx context.Context, in *tabletmanagerdata.WaitForPositionRequest, opts ...grpc.CallOption) (*tabletmanagerdata.WaitForPositionResponse, error) @@ -76,12 +76,12 @@ type TabletManagerClient interface { ResetReplication(ctx context.Context, in *tabletmanagerdata.ResetReplicationRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ResetReplicationResponse, error) // Deprecated, use InitPrimary instead InitMaster(ctx context.Context, in *tabletmanagerdata.InitPrimaryRequest, opts ...grpc.CallOption) (*tabletmanagerdata.InitPrimaryResponse, error) - // InitPrimary initializes the tablet as a master + // InitPrimary initializes the tablet as a primary InitPrimary(ctx context.Context, in *tabletmanagerdata.InitPrimaryRequest, opts ...grpc.CallOption) (*tabletmanagerdata.InitPrimaryResponse, error) // PopulateReparentJournal tells the tablet to add an entry to its // reparent journal PopulateReparentJournal(ctx context.Context, in *tabletmanagerdata.PopulateReparentJournalRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PopulateReparentJournalResponse, error) - // InitReplica tells the tablet to reparent to the master unconditionally + // InitReplica tells the tablet to reparent to the primary unconditionally InitReplica(ctx context.Context, in *tabletmanagerdata.InitReplicaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.InitReplicaResponse, error) // Deprecated, see DemotePrimary instead DemoteMaster(ctx context.Context, in *tabletmanagerdata.DemotePrimaryRequest, opts ...grpc.CallOption) (*tabletmanagerdata.DemotePrimaryResponse, error) @@ -91,18 +91,18 @@ type TabletManagerClient interface { UndoDemoteMaster(ctx context.Context, in *tabletmanagerdata.UndoDemotePrimaryRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UndoDemotePrimaryResponse, error) // UndoDemotePrimary reverts all changes made by DemotePrimary UndoDemotePrimary(ctx context.Context, in *tabletmanagerdata.UndoDemotePrimaryRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UndoDemotePrimaryResponse, error) - // ReplicaWasPromoted tells the remote tablet it is now the master + // ReplicaWasPromoted tells the remote tablet it is now the primary ReplicaWasPromoted(ctx context.Context, in *tabletmanagerdata.ReplicaWasPromotedRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReplicaWasPromotedResponse, error) // SetMaster tells the replica to reparent SetMaster(ctx context.Context, in *tabletmanagerdata.SetReplicationSourceRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SetReplicationSourceResponse, error) // SetReplicationSource tells the replica to reparent SetReplicationSource(ctx context.Context, in *tabletmanagerdata.SetReplicationSourceRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SetReplicationSourceResponse, error) - // ReplicaWasRestarted tells the remote tablet its master has changed + // ReplicaWasRestarted tells the remote tablet its primary has changed ReplicaWasRestarted(ctx context.Context, in *tabletmanagerdata.ReplicaWasRestartedRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReplicaWasRestartedResponse, error) // StopReplicationAndGetStatus stops MySQL replication, and returns the // replication status StopReplicationAndGetStatus(ctx context.Context, in *tabletmanagerdata.StopReplicationAndGetStatusRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StopReplicationAndGetStatusResponse, error) - // PromoteReplica makes the replica the new master + // PromoteReplica makes the replica the new primary PromoteReplica(ctx context.Context, in *tabletmanagerdata.PromoteReplicaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PromoteReplicaResponse, error) Backup(ctx context.Context, in *tabletmanagerdata.BackupRequest, opts ...grpc.CallOption) (TabletManager_BackupClient, error) // RestoreFromBackup deletes all local data and restores it from the latest backup. @@ -656,13 +656,13 @@ type TabletManagerServer interface { ExecuteFetchAsApp(context.Context, *tabletmanagerdata.ExecuteFetchAsAppRequest) (*tabletmanagerdata.ExecuteFetchAsAppResponse, error) // ReplicationStatus returns the current replication status. ReplicationStatus(context.Context, *tabletmanagerdata.ReplicationStatusRequest) (*tabletmanagerdata.ReplicationStatusResponse, error) - // MasterStatus returns the current master status. + // MasterStatus returns the current primary status. MasterStatus(context.Context, *tabletmanagerdata.PrimaryStatusRequest) (*tabletmanagerdata.PrimaryStatusResponse, error) - // PrimaryStatus returns the current master status. + // PrimaryStatus returns the current primary status. PrimaryStatus(context.Context, *tabletmanagerdata.PrimaryStatusRequest) (*tabletmanagerdata.PrimaryStatusResponse, error) - // MasterPosition returns the current master position + // MasterPosition returns the current primary position MasterPosition(context.Context, *tabletmanagerdata.PrimaryPositionRequest) (*tabletmanagerdata.PrimaryPositionResponse, error) - // PrimaryPosition returns the current master position + // PrimaryPosition returns the current primary position PrimaryPosition(context.Context, *tabletmanagerdata.PrimaryPositionRequest) (*tabletmanagerdata.PrimaryPositionResponse, error) // WaitForPosition waits for the position to be reached WaitForPosition(context.Context, *tabletmanagerdata.WaitForPositionRequest) (*tabletmanagerdata.WaitForPositionResponse, error) @@ -685,12 +685,12 @@ type TabletManagerServer interface { ResetReplication(context.Context, *tabletmanagerdata.ResetReplicationRequest) (*tabletmanagerdata.ResetReplicationResponse, error) // Deprecated, use InitPrimary instead InitMaster(context.Context, *tabletmanagerdata.InitPrimaryRequest) (*tabletmanagerdata.InitPrimaryResponse, error) - // InitPrimary initializes the tablet as a master + // InitPrimary initializes the tablet as a primary InitPrimary(context.Context, *tabletmanagerdata.InitPrimaryRequest) (*tabletmanagerdata.InitPrimaryResponse, error) // PopulateReparentJournal tells the tablet to add an entry to its // reparent journal PopulateReparentJournal(context.Context, *tabletmanagerdata.PopulateReparentJournalRequest) (*tabletmanagerdata.PopulateReparentJournalResponse, error) - // InitReplica tells the tablet to reparent to the master unconditionally + // InitReplica tells the tablet to reparent to the primary unconditionally InitReplica(context.Context, *tabletmanagerdata.InitReplicaRequest) (*tabletmanagerdata.InitReplicaResponse, error) // Deprecated, see DemotePrimary instead DemoteMaster(context.Context, *tabletmanagerdata.DemotePrimaryRequest) (*tabletmanagerdata.DemotePrimaryResponse, error) @@ -700,18 +700,18 @@ type TabletManagerServer interface { UndoDemoteMaster(context.Context, *tabletmanagerdata.UndoDemotePrimaryRequest) (*tabletmanagerdata.UndoDemotePrimaryResponse, error) // UndoDemotePrimary reverts all changes made by DemotePrimary UndoDemotePrimary(context.Context, *tabletmanagerdata.UndoDemotePrimaryRequest) (*tabletmanagerdata.UndoDemotePrimaryResponse, error) - // ReplicaWasPromoted tells the remote tablet it is now the master + // ReplicaWasPromoted tells the remote tablet it is now the primary ReplicaWasPromoted(context.Context, *tabletmanagerdata.ReplicaWasPromotedRequest) (*tabletmanagerdata.ReplicaWasPromotedResponse, error) // SetMaster tells the replica to reparent SetMaster(context.Context, *tabletmanagerdata.SetReplicationSourceRequest) (*tabletmanagerdata.SetReplicationSourceResponse, error) // SetReplicationSource tells the replica to reparent SetReplicationSource(context.Context, *tabletmanagerdata.SetReplicationSourceRequest) (*tabletmanagerdata.SetReplicationSourceResponse, error) - // ReplicaWasRestarted tells the remote tablet its master has changed + // ReplicaWasRestarted tells the remote tablet its primary has changed ReplicaWasRestarted(context.Context, *tabletmanagerdata.ReplicaWasRestartedRequest) (*tabletmanagerdata.ReplicaWasRestartedResponse, error) // StopReplicationAndGetStatus stops MySQL replication, and returns the // replication status StopReplicationAndGetStatus(context.Context, *tabletmanagerdata.StopReplicationAndGetStatusRequest) (*tabletmanagerdata.StopReplicationAndGetStatusResponse, error) - // PromoteReplica makes the replica the new master + // PromoteReplica makes the replica the new primary PromoteReplica(context.Context, *tabletmanagerdata.PromoteReplicaRequest) (*tabletmanagerdata.PromoteReplicaResponse, error) Backup(*tabletmanagerdata.BackupRequest, TabletManager_BackupServer) error // RestoreFromBackup deletes all local data and restores it from the latest backup. diff --git a/go/vt/proto/vtctldata/vtctldata.pb.go b/go/vt/proto/vtctldata/vtctldata.pb.go index 0a6097250b..3d1754bcbb 100644 --- a/go/vt/proto/vtctldata/vtctldata.pb.go +++ b/go/vt/proto/vtctldata/vtctldata.pb.go @@ -1848,7 +1848,7 @@ type DeleteTabletsRequest struct { // TabletAliases is the list of tablets to delete. TabletAliases []*topodata.TabletAlias `protobuf:"bytes,1,rep,name=tablet_aliases,json=tabletAliases,proto3" json:"tablet_aliases,omitempty"` - // AllowPrimary allows for the master/primary tablet of a shard to be deleted. + // AllowPrimary allows for the primary tablet of a shard to be deleted. // Use with caution. AllowPrimary bool `protobuf:"varint,2,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` } diff --git a/go/vt/proto/vttest/vttest.pb.go b/go/vt/proto/vttest/vttest.pb.go index 89541a7f73..fe2d20a1f9 100644 --- a/go/vt/proto/vttest/vttest.pb.go +++ b/go/vt/proto/vttest/vttest.pb.go @@ -139,7 +139,7 @@ type Keyspace struct { ShardingColumnType string `protobuf:"bytes,4,opt,name=sharding_column_type,json=shardingColumnType,proto3" json:"sharding_column_type,omitempty"` // redirects all traffic to another keyspace. If set, shards is ignored. ServedFrom string `protobuf:"bytes,5,opt,name=served_from,json=servedFrom,proto3" json:"served_from,omitempty"` - // number of replica tablets to instantiate. This includes the master tablet. + // number of replica tablets to instantiate. This includes the primary tablet. ReplicaCount int32 `protobuf:"varint,6,opt,name=replica_count,json=replicaCount,proto3" json:"replica_count,omitempty"` // number of rdonly tablets to instantiate. RdonlyCount int32 `protobuf:"varint,7,opt,name=rdonly_count,json=rdonlyCount,proto3" json:"rdonly_count,omitempty"` diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index 0dfda28381..81022d22ed 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -287,7 +287,7 @@ func (client *fakeTabletManagerClient) ExecuteFetchAsDba(ctx context.Context, ta // newFakeTopo returns a topo with: // - a keyspace named 'test_keyspace'. // - 3 shards named '1', '2', '3'. -// - A master tablet for each shard. +// - A primary tablet for each shard. func newFakeTopo(t *testing.T) *topo.Server { ts := memorytopo.NewServer("test_cell") ctx := context.Background() diff --git a/go/vt/schemamanager/tablet_executor.go b/go/vt/schemamanager/tablet_executor.go index 2e429d70e3..40dd768f1c 100644 --- a/go/vt/schemamanager/tablet_executor.go +++ b/go/vt/schemamanager/tablet_executor.go @@ -83,7 +83,7 @@ func (exec *TabletExecutor) SkipPreflight() { exec.skipPreflight = true } -// Open opens a connection to the master for every shard. +// Open opens a connection to the primary for every shard. func (exec *TabletExecutor) Open(ctx context.Context, keyspace string) error { if !exec.isClosed { return nil @@ -412,7 +412,7 @@ func (exec *TabletExecutor) executeOneTablet( return } // Get a replication position that's guaranteed to be after the schema change - // was applied on the master. + // was applied on the primary. pos, err := exec.wr.TabletManagerClient().MasterPosition(ctx, tablet) if err != nil { errChan <- ShardWithError{ diff --git a/go/vt/schemamanager/tablet_executor_test.go b/go/vt/schemamanager/tablet_executor_test.go index cfee0bed56..fdd1a28f87 100644 --- a/go/vt/schemamanager/tablet_executor_test.go +++ b/go/vt/schemamanager/tablet_executor_test.go @@ -68,7 +68,7 @@ func TestTabletExecutorOpenWithEmptyMasterAlias(t *testing.T) { Type: topodatapb.TabletType_REPLICA, } // This will create the Keyspace, Shard and Tablet record. - // Since this is a replica tablet, the Shard will have no master. + // Since this is a replica tablet, the Shard will have no primary. if err := wr.InitTablet(ctx, tablet, false /*allowMasterOverride*/, true /*createShardAndKeyspace*/, false /*allowUpdate*/); err != nil { t.Fatalf("InitTablet failed: %v", err) } diff --git a/go/vt/throttler/demo/throttler_demo.go b/go/vt/throttler/demo/throttler_demo.go index 1b5c4f5658..de92853899 100644 --- a/go/vt/throttler/demo/throttler_demo.go +++ b/go/vt/throttler/demo/throttler_demo.go @@ -45,16 +45,16 @@ import ( // throttler adapts its throttling rate to the replication lag. // // The throttler is necessary because replicas apply transactions at a slower -// rate than masters and fall behind at high write throughput. +// rate than primaries and fall behind at high write throughput. // (Mostly they fall behind because MySQL replication is single threaded but -// the write throughput on the master does not have to.) +// the write throughput on the primary does not have to.) // -// This demo simulates a client (writer), a master and a replica. -// The client writes to the master which in turn replicas everything to the +// This demo simulates a client (writer), a primary and a replica. +// The client writes to the primary which in turn replicas everything to the // replica. // The replica measures its replication lag via the timestamp which is part of // each message. -// While the master has no rate limit, the replica is limited to +// While the primary has no rate limit, the replica is limited to // --rate (see below) transactions/second. The client runs the resharding // throttler which tries to throttle the client based on the observed // replication lag. @@ -67,7 +67,7 @@ var ( replicaDegrationDuration = flag.Duration("replica_degration_duration", 10*time.Second, "duration a simulated degration should take") ) -// master simulates an *unthrottled* MySQL master which replicates every +// primary simulates an *unthrottled* MySQL primary which replicates every // received "execute" call to a known "replica". type master struct { replica *replica @@ -79,13 +79,13 @@ func (m *master) execute(msg time.Time) { } // replica simulates a *throttled* MySQL replica. -// If it cannot keep up with applying the master writes, it will report a +// If it cannot keep up with applying the primary writes, it will report a // replication lag > 0 seconds. type replica struct { fakeTablet *testlib.FakeTablet qs *fakes.StreamHealthQueryService - // replicationStream is the incoming stream of messages from the master. + // replicationStream is the incoming stream of messages from the primary. replicationStream chan time.Time // throttler is used to enforce the maximum rate at which replica applies diff --git a/go/vt/throttler/max_replication_lag_module.go b/go/vt/throttler/max_replication_lag_module.go index c73ece41dc..64c922f7b6 100644 --- a/go/vt/throttler/max_replication_lag_module.go +++ b/go/vt/throttler/max_replication_lag_module.go @@ -597,7 +597,7 @@ func (m *MaxReplicationLagModule) decreaseAndGuessRate(r *result, now time.Time, } // Find out the average rate (per second) at which we inserted data - // at the master during the observed timespan. + // at the primary during the observed timespan. from := lagRecordBefore.time to := lagRecordNow.time avgMasterRate := m.actualRatesHistory.average(from, to) diff --git a/go/vt/topo/conn.go b/go/vt/topo/conn.go index 9552a2535d..0f11ffa189 100644 --- a/go/vt/topo/conn.go +++ b/go/vt/topo/conn.go @@ -156,7 +156,7 @@ type Conn interface { // // Master election methods. This is meant to have a small - // number of processes elect a master within a group. The + // number of processes elect a primary within a group. The // backend storage for this can either be the global topo // server, or a resilient quorum of individual cells, to // reduce the load / dependency on the global topo server. @@ -198,7 +198,7 @@ type DirEntry struct { // Ephemeral is set if the directory / file only contains // data that was not set by the file API, like lock files - // or master-election related files. + // or primary-election related files. // Only filled in if full is true. Ephemeral bool } @@ -284,7 +284,7 @@ type WatchData struct { // case topo.ErrInterrupted: // return // default: -// log.Errorf("Got error while waiting for master, will retry in 5s: %v", err) +// log.Errorf("Got error while waiting for primary, will retry in 5s: %v", err) // time.Sleep(5 * time.Second) // } // } @@ -303,16 +303,16 @@ type WatchData struct { // }) type MasterParticipation interface { // WaitForMastership makes the current process a candidate - // for election, and waits until this process is the master. - // After we become the master, we may lose mastership. In that case, + // for election, and waits until this process is the primary. + // After we become the primary, we may lose primaryship. In that case, // the returned context will be canceled. If Stop was called, // WaitForMastership will return nil, ErrInterrupted. WaitForMastership() (context.Context, error) // Stop is called when we don't want to participate in the - // master election any more. Typically, that is when the + // primary election any more. Typically, that is when the // hosting process is terminating. We will relinquish - // mastership at that point, if we had it. Stop should + // primaryship at that point, if we had it. Stop should // not return until everything has been done. // The MasterParticipation object should be discarded // after Stop has been called. Any call to WaitForMastership @@ -321,7 +321,7 @@ type MasterParticipation interface { // nil, ErrInterrupted as soon as possible. Stop() - // GetCurrentMasterID returns the current master id. + // GetCurrentMasterID returns the current primary id. // This may not work after Stop has been called. GetCurrentMasterID(ctx context.Context) (string, error) } diff --git a/go/vt/topo/consultopo/election.go b/go/vt/topo/consultopo/election.go index 6ea86c74e4..19ca2f6d96 100644 --- a/go/vt/topo/consultopo/election.go +++ b/go/vt/topo/consultopo/election.go @@ -90,7 +90,7 @@ func (mp *consulMasterParticipation) WaitForMastership() (context.Context, error return nil, err } - // We have the lock, keep mastership until we lose it. + // We have the lock, keep primaryship until we lose it. lockCtx, lockCancel := context.WithCancel(context.Background()) go func() { select { @@ -103,7 +103,7 @@ func (mp *consulMasterParticipation) WaitForMastership() (context.Context, error case <-mp.stop: // Stop was called. We stop the context first, // so the running process is not thinking it - // is the master any more, then we unlock. + // is the primary any more, then we unlock. lockCancel() if err := l.Unlock(); err != nil { log.Errorf("master election(%v) Unlock failed: %v", mp.name, err) diff --git a/go/vt/topo/etcd2topo/election.go b/go/vt/topo/etcd2topo/election.go index 47a684242e..46be45afb4 100644 --- a/go/vt/topo/etcd2topo/election.go +++ b/go/vt/topo/etcd2topo/election.go @@ -86,7 +86,7 @@ func (mp *etcdMasterParticipation) WaitForMastership() (context.Context, error) close(mp.done) }() - // Try to get the mastership, by getting a lock. + // Try to get the primaryship, by getting a lock. var err error ld, err = mp.s.lock(lockCtx, electionPath, mp.id) if err != nil { @@ -118,7 +118,7 @@ func (mp *etcdMasterParticipation) GetCurrentMasterID(ctx context.Context) (stri return "", convertError(err, electionPath) } if len(resp.Kvs) == 0 { - // No key starts with this prefix, means nobody is the master. + // No key starts with this prefix, means nobody is the primary. return "", nil } return string(resp.Kvs[0].Value), nil diff --git a/go/vt/topo/etcd2topo/error.go b/go/vt/topo/etcd2topo/error.go index 206834b700..e784fecd9b 100644 --- a/go/vt/topo/etcd2topo/error.go +++ b/go/vt/topo/etcd2topo/error.go @@ -58,7 +58,7 @@ func convertError(err error, nodePath string) error { // seem to be using the codes.Unavailable // category. So changing all of them to ErrTimeout. // The other reasons for codes.Unavailable are when - // etcd master election is failing, so timeout + // etcd primary election is failing, so timeout // also sounds reasonable there. return topo.NewError(topo.Timeout, nodePath) } diff --git a/go/vt/topo/etcd2topo/lock.go b/go/vt/topo/etcd2topo/lock.go index 61439d590a..1bc1d437e6 100644 --- a/go/vt/topo/etcd2topo/lock.go +++ b/go/vt/topo/etcd2topo/lock.go @@ -136,7 +136,7 @@ func (s *Server) Lock(ctx context.Context, dirPath, contents string) (topo.LockD return s.lock(ctx, dirPath, contents) } -// lock is used by both Lock() and master election. +// lock is used by both Lock() and primary election. func (s *Server) lock(ctx context.Context, nodePath, contents string) (topo.LockDescriptor, error) { nodePath = path.Join(s.root, nodePath, locksPath) diff --git a/go/vt/topo/k8stopo/election.go b/go/vt/topo/k8stopo/election.go index 9edfe6077a..85ebbdf83c 100644 --- a/go/vt/topo/k8stopo/election.go +++ b/go/vt/topo/k8stopo/election.go @@ -90,7 +90,7 @@ func (mp *kubernetesMasterParticipation) WaitForMastership() (context.Context, e close(mp.done) }() - // Try to get the mastership, by getting a lock. + // Try to get the primaryship, by getting a lock. var err error ld, err = mp.s.lock(lockCtx, electionPath, mp.id, true) if err != nil { @@ -113,7 +113,7 @@ func (mp *kubernetesMasterParticipation) Stop() { func (mp *kubernetesMasterParticipation) GetCurrentMasterID(ctx context.Context) (string, error) { id, _, err := mp.s.Get(ctx, mp.getElectionPath()) if err != nil { - // NoNode means nobody is the master + // NoNode means nobody is the primary if topo.IsErrType(err, topo.NoNode) { return "", nil } diff --git a/go/vt/topo/k8stopo/lock.go b/go/vt/topo/k8stopo/lock.go index aab5fb4ed9..51c86423b0 100644 --- a/go/vt/topo/k8stopo/lock.go +++ b/go/vt/topo/k8stopo/lock.go @@ -40,7 +40,7 @@ func (s *Server) Lock(ctx context.Context, dirPath, contents string) (topo.LockD return s.lock(ctx, dirPath, contents, false) } -// lock is used by both Lock() and master election. +// lock is used by both Lock() and primary election. // it blocks until the lock is taken, interrupted, or times out func (s *Server) lock(ctx context.Context, nodePath, contents string, createMissing bool) (topo.LockDescriptor, error) { // Satisfy the topo.Conn interface diff --git a/go/vt/topo/keyspace.go b/go/vt/topo/keyspace.go index 326f15401b..74df8f81ee 100755 --- a/go/vt/topo/keyspace.go +++ b/go/vt/topo/keyspace.go @@ -61,7 +61,7 @@ func (ki *KeyspaceInfo) GetServedFrom(tabletType topodatapb.TabletType) *topodat // CheckServedFromMigration makes sure a requested migration is safe func (ki *KeyspaceInfo) CheckServedFromMigration(tabletType topodatapb.TabletType, cells []string, keyspace string, remove bool) error { - // master is a special case with a few extra checks + // primary is a special case with a few extra checks if tabletType == topodatapb.TabletType_PRIMARY { if !remove { return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cannot add master back to %v", ki.keyspace) @@ -242,7 +242,7 @@ func (ts *Server) FindAllShardsInKeyspace(ctx context.Context, keyspace string) return result, nil } -// GetServingShards returns all shards where the master is serving. +// GetServingShards returns all shards where the primary is serving. func (ts *Server) GetServingShards(ctx context.Context, keyspace string) ([]*ShardInfo, error) { shards, err := ts.GetShardNames(ctx, keyspace) if err != nil { diff --git a/go/vt/topo/keyspace_test.go b/go/vt/topo/keyspace_test.go index 0c539efb17..84cb4fef6d 100644 --- a/go/vt/topo/keyspace_test.go +++ b/go/vt/topo/keyspace_test.go @@ -116,7 +116,7 @@ func TestUpdateServedFromMap(t *testing.T) { t.Fatalf("migrate rdonly again should have failed: %v", err) } - // finally migrate the master + // finally migrate the primary if err := ki.UpdateServedFromMap(topodatapb.TabletType_PRIMARY, []string{"second"}, "source", true, allCells); err == nil || err.Error() != "cannot migrate only some cells for master removal in keyspace ks" { t.Fatalf("migrate master with cells should have failed: %v", err) } diff --git a/go/vt/topo/locks.go b/go/vt/topo/locks.go index e7aa487ad0..9c6d6dbd70 100644 --- a/go/vt/topo/locks.go +++ b/go/vt/topo/locks.go @@ -270,7 +270,7 @@ func (l *Lock) unlockKeyspace(ctx context.Context, ts *Server, keyspace string, // * PlannedReparentShard // * EmergencyReparentShard // * operations that we don't want to conflict with re-parenting: -// * DeleteTablet when it's the shard's current master +// * DeleteTablet when it's the shard's current primary // func (ts *Server) LockShard(ctx context.Context, keyspace, shard, action string) (context.Context, func(*error), error) { i, ok := ctx.Value(locksKey).(*locksInfo) diff --git a/go/vt/topo/memorytopo/election.go b/go/vt/topo/memorytopo/election.go index 025fa99a8f..ed4394210e 100644 --- a/go/vt/topo/memorytopo/election.go +++ b/go/vt/topo/memorytopo/election.go @@ -93,7 +93,7 @@ func (mp *cMasterParticipation) WaitForMastership() (context.Context, error) { close(mp.done) }() - // Try to get the mastership, by getting a lock. + // Try to get the primaryship, by getting a lock. var err error ld, err = mp.c.Lock(lockCtx, electionPath, mp.id) if err != nil { diff --git a/go/vt/topo/memorytopo/memorytopo.go b/go/vt/topo/memorytopo/memorytopo.go index 84ff8e2c10..155a1e6643 100644 --- a/go/vt/topo/memorytopo/memorytopo.go +++ b/go/vt/topo/memorytopo/memorytopo.go @@ -144,7 +144,7 @@ type node struct { // lockContents is the contents of the locks. // For regular locks, it has the contents that was passed in. - // For master election, it has the id of the election leader. + // For primary election, it has the id of the election leader. lockContents string } diff --git a/go/vt/topo/shard.go b/go/vt/topo/shard.go index a4f703e7d5..5da9b50a27 100644 --- a/go/vt/topo/shard.go +++ b/go/vt/topo/shard.go @@ -47,7 +47,7 @@ import ( const ( blTablesAlreadyPresent = "one or more tables are already present in the blacklist" blTablesNotPresent = "cannot remove tables since one or more do not exist in the blacklist" - blNoCellsForMaster = "you cannot specify cells for a master's tablet control" + blNoCellsForMaster = "you cannot specify cells for a primary's tablet control" ) // Functions for dealing with shard representations in topology. @@ -177,17 +177,17 @@ func (si *ShardInfo) Version() Version { return si.version } -// HasMaster returns true if the Shard has an assigned Master. +// HasMaster returns true if the Shard has an assigned primary. func (si *ShardInfo) HasMaster() bool { return !topoproto.TabletAliasIsZero(si.Shard.PrimaryAlias) } -// GetPrimaryTermStartTime returns the shard's master term start time as a Time value. +// GetPrimaryTermStartTime returns the shard's primary term start time as a Time value. func (si *ShardInfo) GetPrimaryTermStartTime() time.Time { return logutil.ProtoToTime(si.Shard.PrimaryTermStartTime) } -// SetPrimaryTermStartTime sets the shard's master term start time as a Time value. +// SetPrimaryTermStartTime sets the shard's primary term start time as a Time value. func (si *ShardInfo) SetPrimaryTermStartTime(t time.Time) { si.Shard.PrimaryTermStartTime = logutil.TimeToProto(t) } @@ -296,7 +296,7 @@ func (ts *Server) CreateShard(ctx context.Context, keyspace, shard string) (err KeyRange: keyRange, } - // Set master as serving only if its keyrange doesn't overlap + // Set primary as serving only if its keyrange doesn't overlap // with other shards. This applies to unsharded keyspaces also value.IsPrimaryServing = true sis, err := ts.FindAllShardsInKeyspace(ctx, keyspace) diff --git a/go/vt/topo/tablet.go b/go/vt/topo/tablet.go index 544c563cb3..8a7406d88d 100644 --- a/go/vt/topo/tablet.go +++ b/go/vt/topo/tablet.go @@ -109,9 +109,9 @@ func IsRunningUpdateStream(tt topodatapb.TabletType) bool { return false } -// IsReplicaType returns if this type should be connected to a master db +// IsReplicaType returns if this type should be connected to a primary db // and actively replicating? -// MASTER is not obviously (only support one level replication graph) +// PRIMARY is not obviously (only support one level replication graph) // BACKUP, RESTORE, DRAINED may or may not be, but we don't know for sure func IsReplicaType(tt topodatapb.TabletType) bool { switch tt { @@ -211,7 +211,7 @@ func (ti *TabletInfo) IsReplicaType() bool { return IsReplicaType(ti.Type) } -// GetPrimaryTermStartTime returns the tablet's master term start time as a Time value. +// GetPrimaryTermStartTime returns the tablet's primary term start time as a Time value. func (ti *TabletInfo) GetPrimaryTermStartTime() time.Time { return logutil.ProtoToTime(ti.Tablet.PrimaryTermStartTime) } @@ -476,7 +476,7 @@ func (ts *Server) GetTabletsByCell(ctx context.Context, cell string) ([]*topodat } // ParseServingTabletType parses the tablet type into the enum, and makes sure -// that the enum is of serving type (MASTER, REPLICA, RDONLY/BATCH). +// that the enum is of serving type (PRIMARY, REPLICA, RDONLY/BATCH). // // Note: This function more closely belongs in topoproto, but that would create // a circular import between packages topo and topoproto. diff --git a/go/vt/topo/test/election.go b/go/vt/topo/test/election.go index 7a5e6fecc1..5365e15e05 100644 --- a/go/vt/topo/test/election.go +++ b/go/vt/topo/test/election.go @@ -61,10 +61,10 @@ func checkElection(t *testing.T, ts *topo.Server) { t.Fatalf("cannot create mp1: %v", err) } - // no master yet, check name + // no primary yet, check name waitForMasterID(t, mp1, "") - // wait for id1 to be the master + // wait for id1 to be the primary ctx1, err := mp1.WaitForMastership() if err != nil { t.Fatalf("mp1 cannot become master: %v", err) @@ -84,7 +84,7 @@ func checkElection(t *testing.T, ts *topo.Server) { } } - // get the current master name, better be id1 + // get the current primary name, better be id1 waitForMasterID(t, mp1, id1) // create a second MasterParticipation on same name @@ -94,7 +94,7 @@ func checkElection(t *testing.T, ts *topo.Server) { t.Fatalf("cannot create mp2: %v", err) } - // wait until mp2 gets to be the master in the background + // wait until mp2 gets to be the primary in the background mp2IsMaster := make(chan error) var mp2Context context.Context go func() { @@ -103,7 +103,7 @@ func checkElection(t *testing.T, ts *topo.Server) { mp2IsMaster <- err }() - // ask mp2 for master name, should get id1 + // ask mp2 for primary name, should get id1 waitForMasterID(t, mp2, id1) // stop mp1 @@ -118,13 +118,13 @@ func checkElection(t *testing.T, ts *topo.Server) { t.Fatalf("shutting down mp1 didn't close ctx1 in time") } - // now mp2 should be master + // now mp2 should be primary err = <-mp2IsMaster if err != nil { t.Fatalf("mp2 awoke with error: %v", err) } - // ask mp2 for master name, should get id2 + // ask mp2 for primary name, should get id2 waitForMasterID(t, mp2, id2) // stop mp2, we're done diff --git a/go/vt/topo/topotests/srv_keyspace_test.go b/go/vt/topo/topotests/srv_keyspace_test.go index 7e97b9e5e1..3ad426de19 100644 --- a/go/vt/topo/topotests/srv_keyspace_test.go +++ b/go/vt/topo/topotests/srv_keyspace_test.go @@ -1106,7 +1106,7 @@ func TestMasterMigrateServedType(t *testing.T) { t.Errorf("MigrateServedType() failure. Got %v, want: %v", string(got), string(want)) } - // migrating master type cleans up shard tablet controls records + // migrating primary type cleans up shard tablet controls records targetKs = &topodatapb.SrvKeyspace{ Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ diff --git a/go/vt/topo/zk2topo/election.go b/go/vt/topo/zk2topo/election.go index 4bcbb0f6f2..d33f934ab1 100644 --- a/go/vt/topo/zk2topo/election.go +++ b/go/vt/topo/zk2topo/election.go @@ -30,7 +30,7 @@ import ( "vitess.io/vitess/go/vt/topo" ) -// This file contains the master election code for zk2topo.Server. +// This file contains the primary election code for zk2topo.Server. // NewMasterParticipation is part of the topo.Server interface. // We use the full path: /election/ @@ -129,14 +129,14 @@ func (mp *zkMasterParticipation) WaitForMastership() (context.Context, error) { return ctx, nil } -// watchMastership is the background go routine we run while we are the master. +// watchMastership is the background go routine we run while we are the primary. // We will do two things: // - watch for changes to the proposal file. If anything happens there, // it most likely means we lost the ZK session, so we want to stop -// being the master. +// being the primary. // - wait for mp.stop. func (mp *zkMasterParticipation) watchMastership(ctx context.Context, conn *ZkConn, proposal string, cancel context.CancelFunc) { - // any interruption of this routine means we're not master any more. + // any interruption of this routine means we're not primary any more. defer cancel() // get to work watching our own proposal @@ -179,7 +179,7 @@ func (mp *zkMasterParticipation) GetCurrentMasterID(ctx context.Context) (string return "", convertError(err, zkPath) } if len(children) == 0 { - // no current master + // no current primary return "", nil } sort.Strings(children) @@ -188,7 +188,7 @@ func (mp *zkMasterParticipation) GetCurrentMasterID(ctx context.Context) (string data, _, err := mp.zs.conn.Get(ctx, childPath) if err != nil { if err == zk.ErrNoNode { - // master terminated in front of our own eyes, + // primary terminated in front of our own eyes, // try again continue } diff --git a/go/vt/topotools/rebuild_keyspace.go b/go/vt/topotools/rebuild_keyspace.go index a3cc540e68..c472f93ace 100644 --- a/go/vt/topotools/rebuild_keyspace.go +++ b/go/vt/topotools/rebuild_keyspace.go @@ -109,7 +109,7 @@ func RebuildKeyspaceLocked(ctx context.Context, log logutil.Logger, ts *topo.Ser // - check the ranges are compatible (no hole, covers everything) for cell, srvKeyspace := range srvKeyspaceMap { for _, si := range shards { - // We rebuild keyspace iff shard master is in a serving state. + // We rebuild keyspace iff shard primary is in a serving state. if !si.GetIsPrimaryServing() { continue } diff --git a/go/vt/topotools/tablet.go b/go/vt/topotools/tablet.go index d724ff7646..0603490ca8 100644 --- a/go/vt/topotools/tablet.go +++ b/go/vt/topotools/tablet.go @@ -26,7 +26,7 @@ topotools is used by wrangler, so it ends up in all tools using wrangler (vtctl, vtctld, ...). It is also included by vttablet, so it contains: - most of the logic to create a shard / keyspace (tablet's init code) - some of the logic to perform a TabletExternallyReparented (RPC call - to master vttablet to let it know it's the master). + to primary vttablet to let it know it's the primary). */ package topotools @@ -66,7 +66,7 @@ func ConfigureTabletHook(hk *hook.Hook, tabletAlias *topodatapb.TabletAlias) { // If successful, the updated tablet record is returned. func ChangeType(ctx context.Context, ts *topo.Server, tabletAlias *topodatapb.TabletAlias, newType topodatapb.TabletType, PrimaryTermStartTime *vttime.Time) (*topodatapb.Tablet, error) { var result *topodatapb.Tablet - // Always clear out the master timestamp if not master. + // Always clear out the primary timestamp if not primary. if newType != topodatapb.TabletType_PRIMARY { PrimaryTermStartTime = nil } @@ -107,7 +107,7 @@ func CheckOwnership(oldTablet, newTablet *topodatapb.Tablet) error { // is a primary before we allow its tablet record to be deleted. The canonical // way to determine the only true primary in a shard is to list all the tablets // and find the one with the highest PrimaryTermStartTime among the ones that -// claim to be master. +// claim to be primary. // // We err on the side of caution here, i.e. we should never return false for // a true primary tablet, but it is okay to return true for a tablet that isn't @@ -115,7 +115,7 @@ func CheckOwnership(oldTablet, newTablet *topodatapb.Tablet) error { // the system is in transition (a reparenting event is in progress and parts of // the topo have not yet been updated). func IsPrimaryTablet(ctx context.Context, ts *topo.Server, ti *topo.TabletInfo) (bool, error) { - // Tablet record claims to be non-master, we believe it + // Tablet record claims to be non-primary, we believe it if ti.Type != topodatapb.TabletType_PRIMARY { return false, nil } @@ -127,14 +127,14 @@ func IsPrimaryTablet(ctx context.Context, ts *topo.Server, ti *topo.TabletInfo) return false, err } - // Tablet record claims to be master, and shard record matches + // Tablet record claims to be primary, and shard record matches if topoproto.TabletAliasEqual(si.PrimaryAlias, ti.Tablet.Alias) { return true, nil } - // Shard record has another tablet as master, so check PrimaryTermStartTime + // Shard record has another tablet as primary, so check PrimaryTermStartTime // If tablet record's PrimaryTermStartTime is later than the one in the shard - // record, then the tablet is master + // record, then the tablet is primary tabletMTST := ti.GetPrimaryTermStartTime() shardMTST := si.GetPrimaryTermStartTime() diff --git a/go/vt/topotools/utils.go b/go/vt/topotools/utils.go index c403115252..1f5f6a8175 100644 --- a/go/vt/topotools/utils.go +++ b/go/vt/topotools/utils.go @@ -115,11 +115,11 @@ func GetAllTabletsAcrossCells(ctx context.Context, ts *topo.Server) ([]*topo.Tab } // SortedTabletMap returns two maps: -// - The replicaMap contains all the non-master non-scrapped hosts. +// - The replicaMap contains all the non-primary non-scrapped hosts. // This can be used as a list of replicas to fix up for reparenting // - The masterMap contains all the tablets without parents // (scrapped or not). This can be used to special case -// the old master, and any tablet in a weird state, left over, ... +// the old primary, and any tablet in a weird state, left over, ... func SortedTabletMap(tabletMap map[string]*topo.TabletInfo) (map[string]*topo.TabletInfo, map[string]*topo.TabletInfo) { replicaMap := make(map[string]*topo.TabletInfo) masterMap := make(map[string]*topo.TabletInfo) diff --git a/go/vt/vtcombo/tablet_map.go b/go/vt/vtcombo/tablet_map.go index 5abcc8f74f..8f34ef8251 100644 --- a/go/vt/vtcombo/tablet_map.go +++ b/go/vt/vtcombo/tablet_map.go @@ -73,7 +73,7 @@ type comboTablet struct { var tabletMap map[uint32]*comboTablet // CreateTablet creates an individual tablet, with its tm, and adds -// it to the map. If it's a master tablet, it also issues a TER. +// it to the map. If it's a primary tablet, it also issues a TER. func CreateTablet(ctx context.Context, ts *topo.Server, cell string, uid uint32, keyspace, shard, dbname string, tabletType topodatapb.TabletType, mysqld mysqlctl.MysqlDaemon, dbcfgs *dbconfigs.DBConfigs) error { alias := &topodatapb.TabletAlias{ Cell: cell, @@ -296,7 +296,7 @@ func CreateKs(ctx context.Context, ts *topo.Server, tpb *vttestpb.VTTestTopology replicas := int(kpb.ReplicaCount) if replicas == 0 { - // 2 replicas in order to ensure the master cell has a master and a replica + // 2 replicas in order to ensure the primary cell has a primary and a replica replicas = 2 } rdonlys := int(kpb.RdonlyCount) @@ -321,7 +321,7 @@ func CreateKs(ctx context.Context, ts *topo.Server, tpb *vttestpb.VTTestTopology if cell == tpb.Cells[0] { replicas-- - // create the master + // create the primary if err := CreateTablet(ctx, ts, cell, uid, keyspace, shard, dbname, topodatapb.TabletType_PRIMARY, mysqld, dbcfgs.Clone()); err != nil { return 0, err } diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go index 18e1b7d9dd..bcad5df667 100644 --- a/go/vt/vtctl/grpcvtctldserver/server.go +++ b/go/vt/vtctl/grpcvtctldserver/server.go @@ -1020,7 +1020,7 @@ func (s *VtctldServer) GetTablets(ctx context.Context, req *vtctldatapb.GetTable span.Annotate("strict", req.Strict) // It is possible that an old primary has not yet updated its type in the - // topo. In that case, report its type as UNKNOWN. It used to be MASTER but + // topo. In that case, report its type as UNKNOWN. It used to be PRIMARY but // is no longer the serving primary. adjustTypeForStalePrimary := func(ti *topo.TabletInfo, mtst time.Time) { if ti.Type == topodatapb.TabletType_PRIMARY && ti.GetPrimaryTermStartTime().Before(mtst) { @@ -1144,7 +1144,7 @@ func (s *VtctldServer) GetTablets(ctx context.Context, req *vtctldatapb.GetTable } } - // Collect true master term start times, and optionally filter out any + // Collect true primary term start times, and optionally filter out any // tablets by keyspace according to the request. PrimaryTermStartTimes := map[string]time.Time{} filteredTablets := make([]*topo.TabletInfo, 0, len(allTablets)) @@ -1168,7 +1168,7 @@ func (s *VtctldServer) GetTablets(ctx context.Context, req *vtctldatapb.GetTable adjustedTablets := make([]*topodatapb.Tablet, len(filteredTablets)) - // collect the tablets with adjusted master term start times. they've + // collect the tablets with adjusted primary term start times. they've // already been filtered by the above loop, so no keyspace filtering // here. for i, ti := range filteredTablets { @@ -1303,7 +1303,7 @@ func (s *VtctldServer) InitShardPrimaryLocked( return err } - // Check the master elect is in tabletMap. + // Check the primary elect is in tabletMap. masterElectTabletAliasStr := topoproto.TabletAliasString(req.PrimaryElectTabletAlias) masterElectTabletInfo, ok := tabletMap[masterElectTabletAliasStr] if !ok { @@ -1311,7 +1311,7 @@ func (s *VtctldServer) InitShardPrimaryLocked( } ev.NewMaster = proto.Clone(masterElectTabletInfo.Tablet).(*topodatapb.Tablet) - // Check the master is the only master is the shard, or -force was used. + // Check the primary is the only primary is the shard, or -force was used. _, masterTabletMap := topotools.SortedTabletMap(tabletMap) if !topoproto.TabletAliasEqual(shardInfo.PrimaryAlias, req.PrimaryElectTabletAlias) { if !req.Force { @@ -1372,7 +1372,7 @@ func (s *VtctldServer) InitShardPrimaryLocked( return fmt.Errorf("lost topology lock, aborting: %v", err) } - // Tell the new master to break its replicas, return its replication + // Tell the new primary to break its replicas, return its replication // position logger.Infof("initializing master on %v", topoproto.TabletAliasString(req.PrimaryElectTabletAlias)) event.DispatchUpdate(ev, "initializing master") @@ -1391,11 +1391,11 @@ func (s *VtctldServer) InitShardPrimaryLocked( replCtx, replCancel := context.WithTimeout(ctx, waitReplicasTimeout) defer replCancel() - // Now tell the new master to insert the reparent_journal row, - // and tell everybody else to become a replica of the new master, + // Now tell the new primary to insert the reparent_journal row, + // and tell everybody else to become a replica of the new primary, // and wait for the row in the reparent_journal table. // We start all these in parallel, to handle the semi-sync - // case: for the master to be able to commit its row in the + // case: for the primary to be able to commit its row in the // reparent_journal table, it needs connected replicas. event.DispatchUpdate(ev, "reparenting all tablets") now := time.Now().UnixNano() @@ -1424,11 +1424,11 @@ func (s *VtctldServer) InitShardPrimaryLocked( } } - // After the master is done, we can update the shard record + // After the primary is done, we can update the shard record // (note with semi-sync, it also means at least one replica is done). wgMaster.Wait() if masterErr != nil { - // The master failed, there is no way the + // The primary failed, there is no way the // replicas will work. So we cancel them all. logger.Warningf("master failed to PopulateReparentJournal, canceling replicas") replCancel() @@ -1454,7 +1454,7 @@ func (s *VtctldServer) InitShardPrimaryLocked( return err } - // Create database if necessary on the master. replicas will get it too through + // Create database if necessary on the primary. replicas will get it too through // replication. Since the user called InitShardPrimary, they've told us to // assume that whatever data is on all the replicas is what they intended. // If the database doesn't exist, it means the user intends for these tablets @@ -1869,7 +1869,7 @@ func (s *VtctldServer) TabletExternallyReparented(ctx context.Context, req *vtct OldPrimary: shard.PrimaryAlias, } - // If the externally reparented (new primary) tablet is already MASTER in + // If the externally reparented (new primary) tablet is already PRIMARY in // the topo, this is a no-op. if tablet.Type == topodatapb.TabletType_PRIMARY { return resp, nil diff --git a/go/vt/vtctl/grpcvtctldserver/server_test.go b/go/vt/vtctl/grpcvtctldserver/server_test.go index a7ea5d30c6..e5b02240c2 100644 --- a/go/vt/vtctl/grpcvtctldserver/server_test.go +++ b/go/vt/vtctl/grpcvtctldserver/server_test.go @@ -6273,7 +6273,7 @@ func TestTabletExternallyReparented(t *testing.T) { }, shouldErr: false, // NOTE: this seems weird, right? Why is the old primary still a - // MASTER, and why is the new primary's term start 0,0? Well, our + // PRIMARY, and why is the new primary's term start 0,0? Well, our // test client implementation is a little incomplete. See // ./testutil/test_tmclient.go for reference. expectedTopo: []*topodatapb.Tablet{ diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/util.go b/go/vt/vtctl/grpcvtctldserver/testutil/util.go index a7dce66ef9..d79d35d9bd 100644 --- a/go/vt/vtctl/grpcvtctldserver/testutil/util.go +++ b/go/vt/vtctl/grpcvtctldserver/testutil/util.go @@ -121,13 +121,13 @@ func AddKeyspaces(ctx context.Context, t *testing.T, ts *topo.Server, keyspaces // AddTablet. type AddTabletOptions struct { // AlsoSetShardMaster is an option to control additional setup to take when - // AddTablet receives a tablet of type MASTER. When set, AddTablet will also + // AddTablet receives a tablet of type PRIMARY. When set, AddTablet will also // update the shard record to make that tablet the primary, and fail the // test if the shard record has a serving primary already. AlsoSetShardMaster bool // ForceSetShardMaster, when combined with AlsoSetShardMaster, will ignore // any existing primary in the shard, making the current tablet the serving - // primary (given it is type MASTER), and log that it has done so. + // primary (given it is type PRIMARY), and log that it has done so. ForceSetShardMaster bool // SkipShardCreation, when set, makes AddTablet never attempt to create a // shard record in the topo under any circumstances. @@ -143,8 +143,8 @@ type AddTabletOptions struct { // from the topo server without error. // // If AddTablet receives a tablet record with a keyspace and shard set, and that -// tablet's type is MASTER, and opts.AlsoSetShardMaster is set, then AddTablet -// will update the shard record to make that tablet the shard master and set the +// tablet's type is PRIMARY, and opts.AlsoSetShardMaster is set, then AddTablet +// will update the shard record to make that tablet the shard primary and set the // shard to serving. If that shard record already has a serving primary, then // AddTablet will fail the test. func AddTablet(ctx context.Context, t *testing.T, ts *topo.Server, tablet *topodatapb.Tablet, opts *AddTabletOptions) { diff --git a/go/vt/vtctl/grpcvtctldserver/topo.go b/go/vt/vtctl/grpcvtctldserver/topo.go index d1114121b6..c78d9f467e 100644 --- a/go/vt/vtctl/grpcvtctldserver/topo.go +++ b/go/vt/vtctl/grpcvtctldserver/topo.go @@ -196,7 +196,7 @@ func deleteTablet(ctx context.Context, ts *topo.Server, alias *topodatapb.Tablet return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cannot delete tablet %v as it is a master, pass AllowPrimary = true", topoproto.TabletAliasString(alias)) } - // Update the Shard object if the master was scrapped. We do this before + // Update the Shard object if the primary was scrapped. We do this before // calling DeleteTablet so that the operation can be retried in case of // failure. if isPrimary { diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter.go b/go/vt/vtctl/reparentutil/emergency_reparenter.go index 525dfc1f37..7d5cf37e09 100644 --- a/go/vt/vtctl/reparentutil/emergency_reparenter.go +++ b/go/vt/vtctl/reparentutil/emergency_reparenter.go @@ -361,13 +361,13 @@ func (erp *EmergencyReparenter) waitForAllRelayLogsToApply( // maps: (1) the StopReplicationStatus of any replicas that actually // stopped replication; and (2) the MasterStatus of anything that // returned ErrNotReplica, which is a tablet that is either the current - // primary or is stuck thinking it is a MASTER but is not in actuality. + // primary or is stuck thinking it is a PRIMARY but is not in actuality. // // If we have a tablet in the validCandidates map that does not appear // in the statusMap, then we have either (a) the current primary, which // is not replicating, so it is not applying relay logs; or (b) a tablet - // that is stuck thinking it is MASTER but is not in actuality. In that - // second case - (b) - we will most likely find that the stuck MASTER + // that is stuck thinking it is PRIMARY but is not in actuality. In that + // second case - (b) - we will most likely find that the stuck PRIMARY // does not have a winning position, and fail the ERS. If, on the other // hand, it does have a winning position, we are trusting the operator // to know what they are doing by emergency-reparenting onto that diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go index 9b038d9747..cea43ab129 100644 --- a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go +++ b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go @@ -371,7 +371,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { StopStatus *replicationdatapb.StopReplicationStatus Error error }{ - "zone1-0000000100": { // This tablet claims MASTER, so is not running replication. + "zone1-0000000100": { // This tablet claims PRIMARY, so is not running replication. Error: mysql.ErrNotReplica, }, "zone1-0000000101": { diff --git a/go/vt/vtctl/reparentutil/planned_reparenter.go b/go/vt/vtctl/reparentutil/planned_reparenter.go index 90604b57b6..7a95ba66df 100644 --- a/go/vt/vtctl/reparentutil/planned_reparenter.go +++ b/go/vt/vtctl/reparentutil/planned_reparenter.go @@ -448,7 +448,7 @@ func (pr *PlannedReparenter) performPotentialPromotion( return "", vterrors.Wrap(err, "lost topology lock; aborting") } - // Promote the candidate primary to type:MASTER. + // Promote the candidate primary to type:PRIMARY. promoteCtx, promoteCancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) defer promoteCancel() @@ -592,7 +592,7 @@ func (pr *PlannedReparenter) reparentTablets( // attempt, we can no longer assume that we know who the former // primary was. Instead, we rely on the former primary to remember // that it needs to start replication after transitioning from - // MASTER => REPLICA. + // PRIMARY => REPLICA. forceStartReplication := false if err := pr.tmc.SetMaster(replCtx, tablet, ev.NewMaster.Alias, reparentJournalTimestamp, "", forceStartReplication); err != nil { rec.RecordError(vterrors.Wrapf(err, "tablet %v failed to SetMaster(%v): %v", alias, primaryElectAliasStr, err)) diff --git a/go/vt/vtctl/reparentutil/planned_reparenter_test.go b/go/vt/vtctl/reparentutil/planned_reparenter_test.go index b494d0a529..a6918db33e 100644 --- a/go/vt/vtctl/reparentutil/planned_reparenter_test.go +++ b/go/vt/vtctl/reparentutil/planned_reparenter_test.go @@ -2233,7 +2233,7 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { Seconds: 1000, Nanoseconds: 500, }, - Hostname: "primary1", // claims to be MASTER with same term as primary2 + Hostname: "primary1", // claims to be PRIMARY with same term as primary2 Keyspace: "testkeyspace", Shard: "-", }, @@ -2247,7 +2247,7 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { Seconds: 1000, Nanoseconds: 500, }, - Hostname: "primary2", // claims to be MASTER with same term as primary1 + Hostname: "primary2", // claims to be PRIMARY with same term as primary1 Keyspace: "testkeyspace", Shard: "-", }, diff --git a/go/vt/vtctl/reparentutil/util.go b/go/vt/vtctl/reparentutil/util.go index 3253091824..007f8fe99d 100644 --- a/go/vt/vtctl/reparentutil/util.go +++ b/go/vt/vtctl/reparentutil/util.go @@ -94,13 +94,13 @@ func ChooseNewPrimary( } // FindCurrentPrimary returns the current primary tablet of a shard, if any. The -// current primary is whichever tablet of type MASTER (if any) has the most +// current primary is whichever tablet of type PRIMARY (if any) has the most // recent PrimaryTermStartTime, which is the same rule that vtgate uses to route -// master traffic. +// primary traffic. // // The return value is nil if the current primary cannot be definitively -// determined. This can happen either if no tablet claims to be type MASTER, or -// if multiple tablets claim to be type MASTER and happen to have the same +// determined. This can happen either if no tablet claims to be type PRIMARY, or +// if multiple tablets claim to be type PRIMARY and happen to have the same // PrimaryTermStartTime timestamp (a tie). // // The tabletMap must be a complete map (not a partial result) for the shard. diff --git a/go/vt/vtctld/tablet_stats_cache.go b/go/vt/vtctld/tablet_stats_cache.go index 7375f4aeda..9f2d4343c3 100644 --- a/go/vt/vtctld/tablet_stats_cache.go +++ b/go/vt/vtctld/tablet_stats_cache.go @@ -73,7 +73,7 @@ func (a byTabletUID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byTabletUID) Less(i, j int) bool { return a[i].Tablet.Alias.Uid < a[j].Tablet.Alias.Uid } // availableTabletTypes is an array of tabletTypes that are being considered to display on the heatmap. -// Note: this list must always be sorted by the order they should appear (i.e. MASTER first, then REPLICA, then RDONLY) +// Note: this list must always be sorted by the order they should appear (i.e. PRIMARY first, then REPLICA, then RDONLY) var availableTabletTypes = []topodatapb.TabletType{topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY} // tabletStatsCache holds the most recent status update received for diff --git a/go/vt/vtctld/tablet_stats_cache_test.go b/go/vt/vtctld/tablet_stats_cache_test.go index 2c4c57e203..823e167ee7 100644 --- a/go/vt/vtctld/tablet_stats_cache_test.go +++ b/go/vt/vtctld/tablet_stats_cache_test.go @@ -302,7 +302,7 @@ func TestHeatmapData(t *testing.T) { t.Errorf("got: %v, want: %v", got4, want4) } - // Checking that the heatmap data is returned correctly for the following view: (keyspace="ks1", cell="cell2", type="MASTER"). + // Checking that the heatmap data is returned correctly for the following view: (keyspace="ks1", cell="cell2", type="PRIMARY"). got5, err := tabletStatsCache.heatmapData("ks1", "cell2", "MASTER", "lag") if err != nil { t.Errorf("could not get heatmap data: %v", err) diff --git a/go/vt/vtctld/workflow.go b/go/vt/vtctld/workflow.go index da815a69c0..5b6ae00b8d 100644 --- a/go/vt/vtctld/workflow.go +++ b/go/vt/vtctld/workflow.go @@ -113,7 +113,7 @@ func runWorkflowManagerElection(ts *topo.Server) { } // Set up a redirect host so when we are not the - // master, we can redirect traffic properly. + // primary, we can redirect traffic properly. vtctl.WorkflowManager.SetRedirectFunc(func() (string, error) { ctx := context.Background() return mp.GetCurrentMasterID(ctx) diff --git a/go/vt/vtgate/buffer/buffer.go b/go/vt/vtgate/buffer/buffer.go index ece9c4180f..7301a5757b 100644 --- a/go/vt/vtgate/buffer/buffer.go +++ b/go/vt/vtgate/buffer/buffer.go @@ -14,9 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package buffer provides a buffer for MASTER traffic during failovers. +// Package buffer provides a buffer for PRIMARY traffic during failovers. // -// Instead of returning an error to the application (when the vttablet master +// Instead of returning an error to the application (when the vttablet primary // becomes unavailable), the buffer will automatically retry buffered requests // after the end of the failover was detected. // @@ -62,9 +62,9 @@ const ( bufferDryRun ) -// Buffer is used to track ongoing MASTER tablet failovers and buffer -// requests while the MASTER tablet is unavailable. -// Once the new MASTER starts accepting requests, buffering stops and requests +// Buffer is used to track ongoing PRIMARY tablet failovers and buffer +// requests while the PRIMARY tablet is unavailable. +// Once the new PRIMARY starts accepting requests, buffering stops and requests // queued so far will be automatically retried. // // There should be exactly one instance of this buffer. For each failover, an @@ -213,7 +213,7 @@ func (b *Buffer) WaitForFailoverEnd(ctx context.Context, keyspace, shard string, return sb.waitForFailoverEnd(ctx, keyspace, shard, err) } -// ProcessMasterHealth notifies the buffer to record a new master +// ProcessMasterHealth notifies the buffer to record a new primary // and end any failover buffering that may be in progress func (b *Buffer) ProcessMasterHealth(th *discovery.TabletHealth) { if th.Target.TabletType != topodatapb.TabletType_PRIMARY { @@ -235,7 +235,7 @@ func (b *Buffer) ProcessMasterHealth(th *discovery.TabletHealth) { } // StatsUpdate keeps track of the "tablet_externally_reparented_timestamp" of -// each master. This way we can detect the end of a failover. +// each primary. This way we can detect the end of a failover. // It is part of the discovery.LegacyHealthCheckStatsListener interface. func (b *Buffer) StatsUpdate(ts *discovery.LegacyTabletStats) { if ts.Target.TabletType != topodatapb.TabletType_PRIMARY { diff --git a/go/vt/vtgate/buffer/buffer_test.go b/go/vt/vtgate/buffer/buffer_test.go index 3e9f2da3f0..49247d12ee 100644 --- a/go/vt/vtgate/buffer/buffer_test.go +++ b/go/vt/vtgate/buffer/buffer_test.go @@ -88,11 +88,11 @@ func TestBuffer(t *testing.T) { now := time.Now() b := newWithNow(func() time.Time { return now }) - // Simulate that the current master reports its ExternallyReparentedTimestamp. + // Simulate that the current primary reports its ExternallyReparentedTimestamp. // vtgate sees this at startup. Additional periodic updates will be sent out // after this. If the TabletExternallyReparented RPC is called regularly by // an external failover tool, the timestamp will be increased (even though - // the master did not change.) + // the primary did not change.) b.StatsUpdate(&discovery.LegacyTabletStats{ Tablet: oldMaster, Target: &querypb.Target{Keyspace: keyspace, Shard: shard, TabletType: topodatapb.TabletType_PRIMARY}, @@ -370,7 +370,7 @@ func TestLastReparentTooRecent_BufferingSkipped(t *testing.T) { now := time.Now() b := newWithNow(func() time.Time { return now }) - // Simulate that the old master notified us about its reparented timestamp + // Simulate that the old primary notified us about its reparented timestamp // very recently (time.Now()). // vtgate should see this immediately after the start. b.StatsUpdate(&discovery.LegacyTabletStats{ @@ -379,7 +379,7 @@ func TestLastReparentTooRecent_BufferingSkipped(t *testing.T) { TabletExternallyReparentedTimestamp: now.Unix(), }) - // Failover to new master. Its end is detected faster than the beginning. + // Failover to new primary. Its end is detected faster than the beginning. // Do not start buffering. now = now.Add(1 * time.Second) b.StatsUpdate(&discovery.LegacyTabletStats{ @@ -414,7 +414,7 @@ func TestLastReparentTooRecent_Buffering(t *testing.T) { now := time.Now() b := newWithNow(func() time.Time { return now }) - // Simulate that the old master notified us about its reparented timestamp + // Simulate that the old primary notified us about its reparented timestamp // very recently (time.Now()). // vtgate should see this immediately after the start. b.StatsUpdate(&discovery.LegacyTabletStats{ @@ -423,7 +423,7 @@ func TestLastReparentTooRecent_Buffering(t *testing.T) { TabletExternallyReparentedTimestamp: now.Unix(), }) - // Failover to new master. Do not issue any requests before or after i.e. + // Failover to new primary. Do not issue any requests before or after i.e. // there was 0 QPS traffic and no buffering was started. now = now.Add(1 * time.Second) b.StatsUpdate(&discovery.LegacyTabletStats{ @@ -541,7 +541,7 @@ func TestPassthroughIgnoredKeyspaceOrShard(t *testing.T) { } // TestRequestCanceled_ExplicitEnd stops the buffering because the we see the -// new master. +// new primary. func TestRequestCanceled_ExplicitEnd(t *testing.T) { testRequestCanceled(t, true) } diff --git a/go/vt/vtgate/buffer/shard_buffer.go b/go/vt/vtgate/buffer/shard_buffer.go index f811959856..3af0dee6f0 100644 --- a/go/vt/vtgate/buffer/shard_buffer.go +++ b/go/vt/vtgate/buffer/shard_buffer.go @@ -75,17 +75,17 @@ type shardBuffer struct { queue []*entry // externallyReparented is the maximum value of all seen // "StreamHealthResponse.TabletexternallyReparentedTimestamp" values across - // all MASTER tablets of this shard. + // all PRIMARY tablets of this shard. // In practice, it is a) the last time the shard was reparented or b) the last // time the TabletExternallyReparented RPC was called on the tablet to confirm - // that the tablet is the current MASTER. + // that the tablet is the current PRIMARY. // We assume the value is a Unix timestamp in seconds. externallyReparented int64 // lastStart is the last time we saw the start of a failover. lastStart time.Time // lastEnd is the last time we saw the end of a failover. lastEnd time.Time - // lastReparent is the last time we saw that the tablet alias of the MASTER + // lastReparent is the last time we saw that the tablet alias of the PRIMARY // changed i.e. we definitely reparented to a different tablet. lastReparent time.Time // currentMaster is tracked to determine when to update "lastReparent". @@ -175,7 +175,7 @@ func (sb *shardBuffer) waitForFailoverEnd(ctx context.Context, keyspace, shard s // a) Buffering was stopped recently. // This can happen when we stop buffering while MySQL is not ready yet - // (read-only mode is not cleared yet on the new master). + // (read-only mode is not cleared yet on the new primary). lastBufferingStopped := now.Sub(sb.lastEnd) if !sb.lastEnd.IsZero() && lastBufferingStopped < *minTimeBetweenFailovers { sb.mu.Unlock() @@ -193,11 +193,11 @@ func (sb *shardBuffer) waitForFailoverEnd(ctx context.Context, keyspace, shard s return nil, nil } - // b) The MASTER was reparented recently (but we did not buffer it.) + // b) The PRIMARY was reparented recently (but we did not buffer it.) // This can happen when we see the end of the reparent *before* the first // request failure caused by the reparent. This is possible if the QPS is // very low. If we do not skip buffering here, we would start buffering but - // not stop because we already observed the promotion of the new master. + // not stop because we already observed the promotion of the new primary. lastReparentAgo := now.Sub(sb.lastReparent) if !sb.lastReparent.IsZero() && lastReparentAgo < *minTimeBetweenFailovers { sb.mu.Unlock() @@ -345,7 +345,7 @@ func (sb *shardBuffer) bufferRequestLocked(ctx context.Context) (*entry, error) // If releaseSlot is true, the buffer semaphore will be decreased by 1 when // the request retried and finished. // If blockingWait is true, this call will block until the request retried and -// finished. This mode is used during the drain (to avoid flooding the master) +// finished. This mode is used during the drain (to avoid flooding the primary) // while the non-blocking mode is used when a) evicting a request (e.g. because // the buffer is full or it exceeded the buffering window) or b) when the // request was canceled from outside and we removed it. @@ -420,7 +420,7 @@ func (sb *shardBuffer) evictOldestEntry(e *entry) { // timeout thread as fast as possible. However, the slot of the evicted // request is only returned after it has finished i.e. the buffer may stay // full in the meantime. This is a design tradeoff to keep things simple and - // avoid additional pressure on the master tablet. + // avoid additional pressure on the primary tablet. sb.unblockAndWait(e, nil /* err */, true /* releaseSlot */, false /* blockingWait */) sb.queue = sb.queue[1:] statsKeyWithReason := append(sb.statsKey, evictedWindowExceeded) @@ -472,9 +472,9 @@ func (sb *shardBuffer) recordExternallyReparentedTimestamp(timestamp int64, alia // Fast path (read lock): Check if new timestamp is higher. sb.mu.RLock() if timestamp <= sb.externallyReparented { - // Do nothing. Equal values are reported if the MASTER has not changed. - // Smaller values can be reported during the failover by the old master - // after the new master already took over. + // Do nothing. Equal values are reported if the primary has not changed. + // Smaller values can be reported during the failover by the old primary + // after the new primary already took over. sb.mu.RUnlock() return } diff --git a/go/vt/vtgate/discoverygateway.go b/go/vt/vtgate/discoverygateway.go index bce40ec841..47b5088ae7 100644 --- a/go/vt/vtgate/discoverygateway.go +++ b/go/vt/vtgate/discoverygateway.go @@ -74,7 +74,7 @@ type DiscoveryGateway struct { // keyspace/shard/tablet_type. statusAggregators map[string]*TabletStatusAggregator - // buffer, if enabled, buffers requests during a detected MASTER failover. + // buffer, if enabled, buffers requests during a detected PRIMARY failover. buffer *buffer.Buffer } @@ -91,7 +91,7 @@ func createDiscoveryGateway(ctx context.Context, hc discovery.LegacyHealthCheck, // NewDiscoveryGateway creates a new DiscoveryGateway using the provided healthcheck and toposerver. // cell is the cell where the gateway is located a.k.a localCell. -// This gateway can route to MASTER in any cell provided by the cells_to_watch command line argument. +// This gateway can route to PRIMARY in any cell provided by the cells_to_watch command line argument. // Other tablet type requests (REPLICA/RDONLY) are only routed to tablets in the same cell. func NewDiscoveryGateway(ctx context.Context, hc discovery.LegacyHealthCheck, serv srvtopo.Server, cell string, retryCount int) *DiscoveryGateway { var topoServer *topo.Server @@ -263,7 +263,7 @@ func (dg *DiscoveryGateway) withRetry(ctx context.Context, target *querypb.Targe bufferedOnce := false for i := 0; i < dg.retryCount+1; i++ { - // Check if we should buffer MASTER queries which failed due to an ongoing + // Check if we should buffer PRIMARY queries which failed due to an ongoing // failover. // Note: We only buffer once and only "!inTransaction" queries i.e. // a) no transaction is necessary (e.g. critical reads) or diff --git a/go/vt/vtgate/discoverygateway_test.go b/go/vt/vtgate/discoverygateway_test.go index 39b4835b58..76ede3f341 100644 --- a/go/vt/vtgate/discoverygateway_test.go +++ b/go/vt/vtgate/discoverygateway_test.go @@ -122,7 +122,7 @@ func TestDiscoveryGatewayGetTablets(t *testing.T) { t.Errorf("want %+v, got %+v", ep1, tsl) } - // master should use the one with newer timestamp regardless of cell + // primary should use the one with newer timestamp regardless of cell hc.Reset() dg.tsc.ResetForTesting() hc.AddTestTablet("remote", "1.1.1.1", 1001, keyspace, shard, topodatapb.TabletType_PRIMARY, true, 5, nil) diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go index f6c58d37b7..1203443e42 100644 --- a/go/vt/vtgate/executor.go +++ b/go/vt/vtgate/executor.go @@ -227,7 +227,7 @@ func (e *Executor) legacyExecute(ctx context.Context, safeSession *SafeSession, logStats.Keyspace = destKeyspace logStats.TabletType = destTabletType.String() - // Legacy gateway allows transactions only on MASTER + // Legacy gateway allows transactions only on PRIMARY if UsingLegacyGateway() && safeSession.InTransaction() && destTabletType != topodatapb.TabletType_PRIMARY { return 0, nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "transaction is supported only for primary tablet type, current type: %v", destTabletType) } diff --git a/go/vt/vtgate/planbuilder/show.go b/go/vt/vtgate/planbuilder/show.go index 87e8434620..a67efd09af 100644 --- a/go/vt/vtgate/planbuilder/show.go +++ b/go/vt/vtgate/planbuilder/show.go @@ -196,7 +196,7 @@ func buildDBPlan(show *sqlparser.ShowBasic, vschema ContextVSchema) (engine.Prim return engine.NewRowsPrimitive(rows, buildVarCharFields("Database")), nil } -// buildShowVMigrationsPlan serves `SHOW VITESS_MIGRATIONS ...` queries. It invokes queries on _vt.schema_migrations on all MASTER tablets on keyspace's shards. +// buildShowVMigrationsPlan serves `SHOW VITESS_MIGRATIONS ...` queries. It invokes queries on _vt.schema_migrations on all primary tablets on keyspace's shards. func buildShowVMigrationsPlan(show *sqlparser.ShowBasic, vschema ContextVSchema) (engine.Primitive, error) { dest, ks, tabletType, err := vschema.TargetDestination(show.DbName.String()) if err != nil { diff --git a/go/vt/vtgate/scatter_conn.go b/go/vt/vtgate/scatter_conn.go index fd711d1edb..ddcb370a00 100644 --- a/go/vt/vtgate/scatter_conn.go +++ b/go/vt/vtgate/scatter_conn.go @@ -436,7 +436,7 @@ func (stc *ScatterConn) MessageStream(ctx context.Context, rss []*srvtopo.Resolv return stc.processOneStreamingResult(&mu, &fieldSent, qr, callback) }) // nil and EOF are equivalent. UNAVAILABLE can be returned by vttablet if it's demoted - // from master to replica. For any of these conditions, we have to retry. + // from primary to replica. For any of these conditions, we have to retry. if err != nil && err != io.EOF && vterrors.Code(err) != vtrpcpb.Code_UNAVAILABLE { cancel() return err diff --git a/go/vt/vtgate/tabletgateway.go b/go/vt/vtgate/tabletgateway.go index 3f8f04b034..2ca0532611 100644 --- a/go/vt/vtgate/tabletgateway.go +++ b/go/vt/vtgate/tabletgateway.go @@ -69,7 +69,7 @@ type TabletGateway struct { // keyspace/shard/tablet_type. statusAggregators map[string]*TabletStatusAggregator - // buffer, if enabled, buffers requests during a detected MASTER failover. + // buffer, if enabled, buffers requests during a detected PRIMARY failover. buffer *buffer.Buffer } @@ -208,7 +208,7 @@ func (gw *TabletGateway) withRetry(ctx context.Context, target *querypb.Target, bufferedOnce := false for i := 0; i < gw.retryCount+1; i++ { - // Check if we should buffer MASTER queries which failed due to an ongoing + // Check if we should buffer PRIMARY queries which failed due to an ongoing // failover. // Note: We only buffer once and only "!inTransaction" queries i.e. // a) no transaction is necessary (e.g. critical reads) or diff --git a/go/vt/vtgate/vcursor_impl.go b/go/vt/vtgate/vcursor_impl.go index 18c6aa348f..f1a20f8c9c 100644 --- a/go/vt/vtgate/vcursor_impl.go +++ b/go/vt/vtgate/vcursor_impl.go @@ -125,7 +125,7 @@ func newVCursorImpl( return nil, err } - // With DiscoveryGateway transactions are only allowed on master. + // With DiscoveryGateway transactions are only allowed on primary. if UsingLegacyGateway() && safeSession.InTransaction() && tabletType != topodatapb.TabletType_PRIMARY { return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "transaction is supported only for primary tablet type, current type: %v", tabletType) } diff --git a/go/vt/vtgr/controller/diagnose.go b/go/vt/vtgr/controller/diagnose.go index 87deccff87..2ba42c1ec7 100644 --- a/go/vt/vtgr/controller/diagnose.go +++ b/go/vt/vtgr/controller/diagnose.go @@ -370,7 +370,7 @@ func (shard *GRShard) findShardPrimaryTablet() *grInstance { for _, instance := range shard.instances { if instance.tablet.Type == topodatapb.TabletType_PRIMARY { foundPrimary = true - // It is possible that there are more than one master in topo server + // It is possible that there are more than one primary in topo server // we should compare timestamp to pick the latest one if latestMasterTimestamp.Before(instance.masterTimeStamp) { latestMasterTimestamp = instance.masterTimeStamp @@ -408,8 +408,8 @@ func (shard *GRShard) disconnectedInstance() (*grInstance, error) { shard.instances[i], shard.instances[j] = shard.instances[j], shard.instances[i] }) for _, instance := range shard.instances { - // Skip master because VTGR always join group and then update tablet type - // which means if a tablet has type master then it should have a group already + // Skip primary because VTGR always join group and then update tablet type + // which means if a tablet has type primary then it should have a group already if instance.tablet.Type == topodatapb.TabletType_PRIMARY { continue } diff --git a/go/vt/vtgr/controller/refresh.go b/go/vt/vtgr/controller/refresh.go index 547a2b5c1f..8e44b8b3e4 100644 --- a/go/vt/vtgr/controller/refresh.go +++ b/go/vt/vtgr/controller/refresh.go @@ -182,7 +182,7 @@ func parseTabletInfos(tablets map[string]*topo.TabletInfo) []*grInstance { var newReplicas []*grInstance for alias, tabletInfo := range tablets { tablet := tabletInfo.Tablet - // Only monitor master, replica and ronly tablet types + // Only monitor primary, replica and ronly tablet types switch tablet.Type { case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY: // mysql hostname and port might be empty here if tablet is not running diff --git a/go/vt/vttablet/endtoend/config_test.go b/go/vt/vttablet/endtoend/config_test.go index 7b1a892c28..e6d5a2454c 100644 --- a/go/vt/vttablet/endtoend/config_test.go +++ b/go/vt/vttablet/endtoend/config_test.go @@ -135,7 +135,7 @@ func TestConsolidatorReplicasOnly(t *testing.T) { revert := changeVar(t, "Consolidator", tabletenv.NotOnMaster) defer revert() - // master should not do query consolidation + // primary should not do query consolidation var wg2 sync.WaitGroup wg2.Add(2) go func() { diff --git a/go/vt/vttablet/endtoend/framework/client.go b/go/vt/vttablet/endtoend/framework/client.go index a350483fe6..bd10224363 100644 --- a/go/vt/vttablet/endtoend/framework/client.go +++ b/go/vt/vttablet/endtoend/framework/client.go @@ -162,7 +162,7 @@ func (client *QueryClient) ReadTransaction(dtid string) (*querypb.TransactionMet } // SetServingType is for testing transitions. -// It currently supports only master->replica and back. +// It currently supports only primary->replica and back. func (client *QueryClient) SetServingType(tabletType topodatapb.TabletType) error { err := client.server.SetServingType(tabletType, time.Time{}, true /* serving */, "" /* reason */) // Wait for TwoPC transition, if necessary diff --git a/go/vt/vttablet/endtoend/sequence_test.go b/go/vt/vttablet/endtoend/sequence_test.go index bfbb28bc28..0652e18ee9 100644 --- a/go/vt/vttablet/endtoend/sequence_test.go +++ b/go/vt/vttablet/endtoend/sequence_test.go @@ -125,7 +125,7 @@ func TestResetSequence(t *testing.T) { } utils.MustMatch(t, &want, qr) - // Reset mastership + // Reset primaryship err = client.SetServingType(topodatapb.TabletType_REPLICA) if err != nil { t.Fatal(err) diff --git a/go/vt/vttablet/onlineddl/executor.go b/go/vt/vttablet/onlineddl/executor.go index e8528b6efa..c6ffe6d28f 100644 --- a/go/vt/vttablet/onlineddl/executor.go +++ b/go/vt/vttablet/onlineddl/executor.go @@ -2344,7 +2344,7 @@ func (e *Executor) reviewStaleMigrations(ctx context.Context) error { } } if onlineDDL.TabletAlias != e.TabletAliasString() { - // This means another tablet started the migration, and the migration has failed due to the tablet failure (e.g. master failover) + // This means another tablet started the migration, and the migration has failed due to the tablet failure (e.g. primary failover) if err := e.updateTabletFailure(ctx, onlineDDL.UUID); err != nil { return err } diff --git a/go/vt/vttablet/queryservice/fakes/stream_health_query_service.go b/go/vt/vttablet/queryservice/fakes/stream_health_query_service.go index 4feaa73db0..d3426e96df 100644 --- a/go/vt/vttablet/queryservice/fakes/stream_health_query_service.go +++ b/go/vt/vttablet/queryservice/fakes/stream_health_query_service.go @@ -103,7 +103,7 @@ func (q *StreamHealthQueryService) AddHealthResponseWithQPS(qps float64) { } // AddHealthResponseWithReplicationLag adds a faked health response to the -// buffer channel. Only "seconds_behind_master" is different in this message. +// buffer channel. Only "replication_lag_seconds" is different in this message. func (q *StreamHealthQueryService) AddHealthResponseWithReplicationLag(replicationLag uint32) { q.healthResponses <- &querypb.StreamHealthResponse{ Target: proto.Clone(q.target).(*querypb.Target), diff --git a/go/vt/vttablet/tabletmanager/replmanager.go b/go/vt/vttablet/tabletmanager/replmanager.go index 7592d7d5df..1da98b415f 100644 --- a/go/vt/vttablet/tabletmanager/replmanager.go +++ b/go/vt/vttablet/tabletmanager/replmanager.go @@ -39,7 +39,7 @@ const ( ) // replManager runs a poller to ensure mysql is replicating from -// the master. If necessary, it invokes tm.repairReplication to get it +// the primary. If necessary, it invokes tm.repairReplication to get it // fixed. On state change, SetTabletType must be called before changing // the tabletserver state. This will ensure that replication is fixed // upfront, allowing tabletserver to start off healthy. diff --git a/go/vt/vttablet/tabletmanager/restore.go b/go/vt/vttablet/tabletmanager/restore.go index 951a91894f..08bf60ee56 100644 --- a/go/vt/vttablet/tabletmanager/restore.go +++ b/go/vt/vttablet/tabletmanager/restore.go @@ -190,8 +190,8 @@ func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.L metadataManager := &mysqlctl.MetadataManager{} return metadataManager.PopulateMetadataTables(params.Mysqld, params.LocalMetadata, params.DbName) } - // We should not become master after restore, because that would incorrectly - // start a new master term, and it's likely our data dir will be out of date. + // We should not become primary after restore, because that would incorrectly + // start a new primary term, and it's likely our data dir will be out of date. if originalType == topodatapb.TabletType_PRIMARY { originalType = tm.baseTabletType } @@ -235,7 +235,7 @@ func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.L // Starting from here we won't be able to recover if we get stopped by a cancelled // context. Thus we use the background context to get through to the finish. if keyspaceInfo.KeyspaceType == topodatapb.KeyspaceType_NORMAL { - // Reconnect to master only for "NORMAL" keyspaces + // Reconnect to primary only for "NORMAL" keyspaces if err := tm.startReplication(context.Background(), pos, originalType); err != nil { return err } @@ -486,36 +486,36 @@ func (tm *TabletManager) catchupToGTID(ctx context.Context, afterGTIDPos string, func (tm *TabletManager) startReplication(ctx context.Context, pos mysql.Position, tabletType topodatapb.TabletType) error { cmds := []string{ "STOP SLAVE", - "RESET SLAVE ALL", // "ALL" makes it forget master host:port. + "RESET SLAVE ALL", // "ALL" makes it forget primary host:port. } if err := tm.MysqlDaemon.ExecuteSuperQueryList(ctx, cmds); err != nil { return vterrors.Wrap(err, "failed to reset replication") } - // Set the position at which to resume from the master. + // Set the position at which to resume from the primary. if err := tm.MysqlDaemon.SetReplicationPosition(ctx, pos); err != nil { return vterrors.Wrap(err, "failed to set replication position") } - // Read the shard to find the current master, and its location. + // Read the shard to find the current primary, and its location. tablet := tm.Tablet() si, err := tm.TopoServer.GetShard(ctx, tablet.Keyspace, tablet.Shard) if err != nil { return vterrors.Wrap(err, "can't read shard") } if si.PrimaryAlias == nil { - // We've restored, but there's no master. This is fine, since we've + // We've restored, but there's no primary. This is fine, since we've // already set the position at which to resume when we're later reparented. // If we had instead considered this fatal, all tablets would crash-loop - // until a master appears, which would make it impossible to elect a master. + // until a primary appears, which would make it impossible to elect a primary. log.Warningf("Can't start replication after restore: shard %v/%v has no master.", tablet.Keyspace, tablet.Shard) return nil } if topoproto.TabletAliasEqual(si.PrimaryAlias, tablet.Alias) { - // We used to be the master before we got restarted in an empty data dir, - // and no other master has been elected in the meantime. + // We used to be the primary before we got restarted in an empty data dir, + // and no other primary has been elected in the meantime. // This shouldn't happen, so we'll let the operator decide which tablet - // should actually be promoted to master. + // should actually be promoted to primary. log.Warningf("Can't start replication after restore: master record still points to this tablet.") return nil } @@ -524,12 +524,12 @@ func (tm *TabletManager) startReplication(ctx context.Context, pos mysql.Positio return vterrors.Wrapf(err, "Cannot read master tablet %v", si.PrimaryAlias) } - // If using semi-sync, we need to enable it before connecting to master. + // If using semi-sync, we need to enable it before connecting to primary. if err := tm.fixSemiSync(tabletType); err != nil { return err } - // Set master and start replication. + // Set primary and start replication. if err := tm.MysqlDaemon.SetReplicationSource(ctx, ti.Tablet.MysqlHostname, int(ti.Tablet.MysqlPort), false /* stopReplicationBefore */, !*mysqlctl.DisableActiveReparents /* startReplicationAfter */); err != nil { return vterrors.Wrap(err, "MysqlDaemon.SetReplicationSource failed") } @@ -539,10 +539,10 @@ func (tm *TabletManager) startReplication(ctx context.Context, pos mysql.Positio if *mysqlctl.DisableActiveReparents { return nil } - // wait for reliable seconds behind master + // wait for reliable replication_lag_seconds // we have pos where we want to resume from // if PrimaryPosition is the same, that means no writes - // have happened to master, so we are up-to-date + // have happened to primary, so we are up-to-date // otherwise, wait for replica's Position to change from // the initial pos before proceeding tmc := tmclient.NewTabletManagerClient() @@ -551,11 +551,11 @@ func (tm *TabletManager) startReplication(ctx context.Context, pos mysql.Positio defer remoteCancel() posStr, err := tmc.MasterPosition(remoteCtx, ti.Tablet) if err != nil { - // It is possible that though PrimaryAlias is set, the master tablet is unreachable + // It is possible that though PrimaryAlias is set, the primary tablet is unreachable // Log a warning and let tablet restore in that case // If we had instead considered this fatal, all tablets would crash-loop - // until a master appears, which would make it impossible to elect a master. - log.Warningf("Can't get master replication position after restore: %v", err) + // until a primary appears, which would make it impossible to elect a primary. + log.Warningf("Can't get primary replication position after restore: %v", err) return nil } masterPos, err := mysql.DecodePosition(posStr) diff --git a/go/vt/vttablet/tabletmanager/rpc_actions.go b/go/vt/vttablet/tabletmanager/rpc_actions.go index 54828fe45c..4c913a2f0f 100644 --- a/go/vt/vttablet/tabletmanager/rpc_actions.go +++ b/go/vt/vttablet/tabletmanager/rpc_actions.go @@ -34,7 +34,7 @@ import ( ) // DBAction is used to tell ChangeTabletType whether to call SetReadOnly on change to -// MASTER tablet type +// PRIMARY tablet type type DBAction int // Allowed values for DBAction diff --git a/go/vt/vttablet/tabletmanager/rpc_backup.go b/go/vt/vttablet/tabletmanager/rpc_backup.go index 3dc3ac80e0..be86f86e26 100644 --- a/go/vt/vttablet/tabletmanager/rpc_backup.go +++ b/go/vt/vttablet/tabletmanager/rpc_backup.go @@ -42,7 +42,7 @@ func (tm *TabletManager) Backup(ctx context.Context, concurrency int, logger log } // Check tablet type current process has. - // During a network partition it is possible that from the topology perspective this is no longer the master, + // During a network partition it is possible that from the topology perspective this is no longer the primary, // but the process didn't find out about this. // It is not safe to take backups from tablet in this state currentTablet := tm.Tablet() @@ -125,7 +125,7 @@ func (tm *TabletManager) Backup(ctx context.Context, concurrency int, logger log // above call to Backup. Thus we use the background context to get through to the finish. // Change our type back to the original value. - // Original type could be master so pass in a real value for PrimaryTermStartTime + // Original type could be primary so pass in a real value for PrimaryTermStartTime if err := tm.changeTypeLocked(bgCtx, originalType, DBActionNone); err != nil { // failure in changing the topology type is probably worse, // so returning that (we logged the snapshot error anyway) diff --git a/go/vt/vttablet/tabletmanager/rpc_replication.go b/go/vt/vttablet/tabletmanager/rpc_replication.go index 6a2be32cb0..73e7835870 100644 --- a/go/vt/vttablet/tabletmanager/rpc_replication.go +++ b/go/vt/vttablet/tabletmanager/rpc_replication.go @@ -51,12 +51,12 @@ func (tm *TabletManager) ReplicationStatus(ctx context.Context) (*replicationdat return mysql.ReplicationStatusToProto(status), nil } -// MasterStatus returns the replication status fopr a master tablet. +// MasterStatus returns the replication status fopr a primary tablet. func (tm *TabletManager) MasterStatus(ctx context.Context) (*replicationdatapb.PrimaryStatus, error) { return tm.PrimaryStatus(ctx) } -// PrimaryStatus returns the replication status fopr a master tablet. +// PrimaryStatus returns the replication status fopr a primary tablet. func (tm *TabletManager) PrimaryStatus(ctx context.Context) (*replicationdatapb.PrimaryStatus, error) { status, err := tm.MysqlDaemon.PrimaryStatus(ctx) if err != nil { @@ -65,7 +65,7 @@ func (tm *TabletManager) PrimaryStatus(ctx context.Context) (*replicationdatapb. return mysql.PrimaryStatusToProto(status), nil } -// MasterPosition returns the master position +// MasterPosition returns the primary position func (tm *TabletManager) MasterPosition(ctx context.Context) (string, error) { return tm.PrimaryPosition(ctx) } @@ -265,7 +265,7 @@ func (tm *TabletManager) InitPrimary(ctx context.Context) (string, error) { return "", err } - // Enforce semi-sync after changing the tablet)type to MASTER. Otherwise, the + // Enforce semi-sync after changing the tablet)type to PRIMARY. Otherwise, the // primary will hang while trying to create the database. if err := tm.fixSemiSync(topodatapb.TabletType_PRIMARY); err != nil { return "", err @@ -286,7 +286,7 @@ func (tm *TabletManager) PopulateReparentJournal(ctx context.Context, timeCreate return tm.MysqlDaemon.ExecuteSuperQueryList(ctx, cmds) } -// InitReplica sets replication master and position, and waits for the +// InitReplica sets replication primary and position, and waits for the // reparent_journal table entry up to context timeout func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.TabletAlias, position string, timeCreatedNS int64) error { if err := tm.lock(ctx); err != nil { @@ -294,9 +294,9 @@ func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.Tab } defer tm.unlock() - // If we were a master type, switch our type to replica. This - // is used on the old master when using InitShardPrimary with - // -force, and the new master is different from the old master. + // If we were a primary type, switch our type to replica. This + // is used on the old primary when using InitShardPrimary with + // -force, and the new primary is different from the old primary. if tm.Tablet().Type == topodatapb.TabletType_PRIMARY { if err := tm.changeTypeLocked(ctx, topodatapb.TabletType_REPLICA, DBActionNone); err != nil { return err @@ -314,8 +314,8 @@ func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.Tab tm.replManager.setReplicationStopped(false) - // If using semi-sync, we need to enable it before connecting to master. - // If we were a master type, we need to switch back to replica settings. + // If using semi-sync, we need to enable it before connecting to primary. + // If we were a primary type, we need to switch back to replica settings. // Otherwise we won't be able to commit anything. tt := tm.Tablet().Type if tt == topodatapb.TabletType_PRIMARY { @@ -336,7 +336,7 @@ func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.Tab return tm.MysqlDaemon.WaitForReparentJournal(ctx, timeCreatedNS) } -// DemotePrimary prepares a MASTER tablet to give up leadership to another tablet. +// DemotePrimary prepares a PRIMARY tablet to give up leadership to another tablet. // // It attemps to idempotently ensure the following guarantees upon returning // successfully: @@ -347,7 +347,7 @@ func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.Tab // // If necessary, it waits for all in-flight writes to complete or time out. // -// It should be safe to call this on a MASTER tablet that was already demoted, +// It should be safe to call this on a PRIMARY tablet that was already demoted, // or on a tablet that already transitioned to REPLICA. // // If a step fails in the middle, it will try to undo any changes it made. @@ -379,8 +379,8 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure return nil, err } - // If we are a master tablet and not yet read-only, stop accepting new - // queries and wait for in-flight queries to complete. If we are not master, + // If we are a primary tablet and not yet read-only, stop accepting new + // queries and wait for in-flight queries to complete. If we are not primary, // or if we are already read-only, there's no need to stop the queryservice // in order to ensure the guarantee we are being asked to provide, which is // that no writes are occurring. @@ -416,7 +416,7 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure // Now that we know no writes are in-flight and no new writes can occur, // set MySQL to read-only mode. If we are already read-only because of a - // previous demotion, or because we are not master anyway, this should be + // previous demotion, or because we are not primary anyway, this should be // idempotent. if *setSuperReadOnly { // Setting super_read_only also sets read_only @@ -437,13 +437,13 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure } }() - // If using semi-sync, we need to disable master-side. + // If using semi-sync, we need to disable primary-side. if err := tm.fixSemiSync(topodatapb.TabletType_REPLICA); err != nil { return nil, err } defer func() { if finalErr != nil && revertPartialFailure && wasMaster { - // enable master-side semi-sync again + // enable primary-side semi-sync again if err := tm.fixSemiSync(topodatapb.TabletType_PRIMARY); err != nil { log.Warningf("fixSemiSync(MASTER) failed during revert: %v", err) } @@ -465,7 +465,7 @@ func (tm *TabletManager) UndoDemoteMaster(ctx context.Context) error { // UndoDemotePrimary reverts a previous call to DemotePrimary // it sets read-only to false, fixes semi-sync -// and returns its master position. +// and returns its primary position. func (tm *TabletManager) UndoDemotePrimary(ctx context.Context) error { if err := tm.lock(ctx); err != nil { return err @@ -501,12 +501,12 @@ func (tm *TabletManager) UndoDemotePrimary(ctx context.Context) error { return nil } -// ReplicaWasPromoted promotes a replica to master, no questions asked. +// ReplicaWasPromoted promotes a replica to primary, no questions asked. func (tm *TabletManager) ReplicaWasPromoted(ctx context.Context) error { return tm.ChangeType(ctx, topodatapb.TabletType_PRIMARY) } -// SetReplicationSource sets replication master, and waits for the +// SetReplicationSource sets replication primary, and waits for the // reparent_journal table entry up to context timeout func (tm *TabletManager) SetReplicationSource(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) error { if err := tm.lock(ctx); err != nil { @@ -552,11 +552,11 @@ func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentA }() }() - // Change our type to REPLICA if we used to be MASTER. - // Being sent SetReplicationSource means another MASTER has been successfully promoted, + // Change our type to REPLICA if we used to be PRIMARY. + // Being sent SetReplicationSource means another PRIMARY has been successfully promoted, // so we convert to REPLICA first, since we want to do it even if other // steps fail below. - // Note it is important to check for MASTER here so that we don't + // Note it is important to check for PRIMARY here so that we don't // unintentionally change the type of RDONLY tablets tablet := tm.Tablet() if tablet.Type == topodatapb.TabletType_PRIMARY { @@ -572,8 +572,8 @@ func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentA if err == mysql.ErrNotReplica { // This is a special error that means we actually succeeded in reading // the status, but the status is empty because replication is not - // configured. We assume this means we used to be a master, so we always - // try to start replicating once we are told who the new master is. + // configured. We assume this means we used to be a primary, so we always + // try to start replicating once we are told who the new primary is. shouldbeReplicating = true // Since we continue in the case of this error, make sure 'status' is // in a known, empty state. @@ -591,7 +591,7 @@ func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentA } // If using semi-sync, we need to enable it before connecting to primary. - // If we are currently MASTER, assume we are about to become REPLICA. + // If we are currently PRIMARY, assume we are about to become REPLICA. tabletType := tm.Tablet().Type if tabletType == topodatapb.TabletType_PRIMARY { tabletType = topodatapb.TabletType_REPLICA @@ -602,7 +602,7 @@ func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentA // Update the primary/source address only if needed. // We don't want to interrupt replication for no reason. if parentAlias == nil { - // if there is no master in the shard, return an error so that we can retry + // if there is no primary in the shard, return an error so that we can retry return vterrors.New(vtrpc.Code_FAILED_PRECONDITION, "Shard masterAlias is nil") } parent, err := tm.TopoServer.GetTablet(ctx, parentAlias) @@ -660,7 +660,7 @@ func (tm *TabletManager) ReplicaWasRestarted(ctx context.Context, parent *topoda } defer tm.unlock() - // Only change type of former MASTER tablets. + // Only change type of former PRIMARY tablets. // Don't change type of RDONLY tablet := tm.Tablet() if tablet.Type != topodatapb.TabletType_PRIMARY { @@ -760,7 +760,7 @@ type StopReplicationAndGetStatusResponse struct { Status *replicationdatapb.StopReplicationStatus } -// PromoteReplica makes the current tablet the master +// PromoteReplica makes the current tablet the primary func (tm *TabletManager) PromoteReplica(ctx context.Context) (string, error) { if err := tm.lock(ctx); err != nil { return "", err @@ -799,14 +799,14 @@ func (tm *TabletManager) fixSemiSync(tabletType topodatapb.TabletType) error { return nil } - // Only enable if we're eligible for becoming master (REPLICA type). + // Only enable if we're eligible for becoming primary (REPLICA type). // Ineligible tablets (RDONLY) shouldn't ACK because we'll never promote them. if !isMasterEligible(tabletType) { return tm.MysqlDaemon.SetSemiSyncEnabled(false, false) } - // Always enable replica-side since it doesn't hurt to keep it on for a master. - // The master-side needs to be off for a replica, or else it will get stuck. + // Always enable replica-side since it doesn't hurt to keep it on for a primary. + // The primary-side needs to be off for a replica, or else it will get stuck. return tm.MysqlDaemon.SetSemiSyncEnabled(tabletType == topodatapb.TabletType_PRIMARY, true) } @@ -817,7 +817,7 @@ func (tm *TabletManager) fixSemiSyncAndReplication(tabletType topodatapb.TabletT } if tabletType == topodatapb.TabletType_PRIMARY { - // Master is special. It is always handled at the + // Primary is special. It is always handled at the // right time by the reparent operations, it doesn't // need to be fixed. return nil @@ -875,7 +875,7 @@ func (tm *TabletManager) handleRelayLogError(err error) error { } // repairReplication tries to connect this server to whoever is -// the current master of the shard, and start replicating. +// the current primary of the shard, and start replicating. func (tm *TabletManager) repairReplication(ctx context.Context) error { tablet := tm.Tablet() @@ -888,7 +888,7 @@ func (tm *TabletManager) repairReplication(ctx context.Context) error { } if topoproto.TabletAliasEqual(si.PrimaryAlias, tablet.Alias) { - // The shard record says we are master, but we disagree; we wouldn't + // The shard record says we are primary, but we disagree; we wouldn't // reach this point unless we were told to check replication. // Hopefully someone is working on fixing that, but in any case, // we should not try to reparent to ourselves. diff --git a/go/vt/vttablet/tabletmanager/shard_sync.go b/go/vt/vttablet/tabletmanager/shard_sync.go index 9865e1f375..0e41e04b08 100644 --- a/go/vt/vttablet/tabletmanager/shard_sync.go +++ b/go/vt/vttablet/tabletmanager/shard_sync.go @@ -57,7 +57,7 @@ func (tm *TabletManager) shardSyncLoop(ctx context.Context, notifyChan <-chan st var retryChan <-chan time.Time // shardWatch is how we get notified when the shard record is updated. - // We only watch the shard record while we are master. + // We only watch the shard record while we are primary. shardWatch := &shardWatcher{} defer shardWatch.stop() @@ -96,14 +96,14 @@ func (tm *TabletManager) shardSyncLoop(ctx context.Context, notifyChan <-chan st switch tablet.Type { case topodatapb.TabletType_PRIMARY: // This is a failsafe code because we've seen races that can cause - // master term start time to become zero. + // primary term start time to become zero. if tablet.PrimaryTermStartTime == nil { log.Errorf("PrimaryTermStartTime should not be nil: %v", tablet) // Start retry timer and go back to sleep. retryChan = time.After(*shardSyncRetryDelay) continue } - // If we think we're master, check if we need to update the shard record. + // If we think we're primary, check if we need to update the shard record. // Fetch the start time from the record we just got, because the tm's tablet can change. masterAlias, shouldDemote, err := syncShardMaster(ctx, tm.TopoServer, tablet, logutil.ProtoToTime(tablet.PrimaryTermStartTime)) if err != nil { @@ -113,8 +113,8 @@ func (tm *TabletManager) shardSyncLoop(ctx context.Context, notifyChan <-chan st continue } if shouldDemote { - // Someone updated the PrimaryTermStartTime while we still think we're master. - // This means that we should abort our term, since someone else must have claimed mastership + // Someone updated the PrimaryTermStartTime while we still think we're primary. + // This means that we should abort our term, since someone else must have claimed primaryship // and wrote to the shard record if err := tm.abortMasterTerm(ctx, masterAlias); err != nil { log.Errorf("Failed to abort master term: %v", err) @@ -122,13 +122,13 @@ func (tm *TabletManager) shardSyncLoop(ctx context.Context, notifyChan <-chan st retryChan = time.After(*shardSyncRetryDelay) continue } - // We're not master anymore, so stop watching the shard record. + // We're not primary anymore, so stop watching the shard record. shardWatch.stop() continue } - // As long as we're master, watch the shard record so we'll be - // notified if another master takes over. + // As long as we're primary, watch the shard record so we'll be + // notified if another primary takes over. if shardWatch.active() { // We already have an active watch. Nothing to do. continue @@ -140,20 +140,20 @@ func (tm *TabletManager) shardSyncLoop(ctx context.Context, notifyChan <-chan st continue } default: - // If we're not master, stop watching the shard record, - // so only masters contribute to global topo watch load. + // If we're not primary, stop watching the shard record, + // so only primaries contribute to global topo watch load. shardWatch.stop() } } } -// syncShardMaster is called when we think we're master. +// syncShardMaster is called when we think we're primary. // It checks that the shard record agrees, and updates it if possible. // // If the returned error is nil, the returned masterAlias indicates the current -// master tablet according to the shard record. +// primary tablet according to the shard record. // -// If the shard record indicates a new master has taken over, this returns +// If the shard record indicates a new primary has taken over, this returns // success (we successfully synchronized), but the returned masterAlias will be // different from the input tablet.Alias. func syncShardMaster(ctx context.Context, ts *topo.Server, tablet *topodatapb.Tablet, PrimaryTermStartTime time.Time) (masterAlias *topodatapb.TabletAlias, shouldDemote bool, err error) { @@ -192,12 +192,12 @@ func syncShardMaster(ctx context.Context, ts *topo.Server, tablet *topodatapb.Ta return shardInfo.PrimaryAlias, shouldDemote, nil } -// abortMasterTerm is called when we unexpectedly lost mastership. +// abortMasterTerm is called when we unexpectedly lost primaryship. // // Under normal circumstances, we should be gracefully demoted before a new -// master appears. This function is only reached when that graceful demotion -// failed or was skipped, so we only found out we're no longer master after the -// new master started advertising itself. +// primary appears. This function is only reached when that graceful demotion +// failed or was skipped, so we only found out we're no longer primary after the +// new primary started advertising itself. // // If active reparents are enabled, we demote our own MySQL to a replica and // update our tablet type to REPLICA. @@ -221,7 +221,7 @@ func (tm *TabletManager) abortMasterTerm(ctx context.Context, masterAlias *topod // Do a full demotion to convert MySQL into a replica. // We do not revert on partial failure here because this code path only - // triggers after a new master has taken over, so we are past the point of + // triggers after a new primary has taken over, so we are past the point of // no return. Instead, we should leave partial results and retry the rest // later. log.Infof("Active reparents are enabled; converting MySQL to replica.") diff --git a/go/vt/vttablet/tabletmanager/shard_sync_test.go b/go/vt/vttablet/tabletmanager/shard_sync_test.go index c26f40fdca..6cbfcd716e 100644 --- a/go/vt/vttablet/tabletmanager/shard_sync_test.go +++ b/go/vt/vttablet/tabletmanager/shard_sync_test.go @@ -50,7 +50,7 @@ func TestShardSync(t *testing.T) { tm := newTestTM(t, ts, 100, keyspace, shard) defer tm.Stop() - // update the master info in the shard record and set it to nil + // update the primary info in the shard record and set it to nil originalTime := time.Now() updateMasterInfoInShardRecord(ctx, t, tm, nil, originalTime) diff --git a/go/vt/vttablet/tabletmanager/tm_init.go b/go/vt/vttablet/tabletmanager/tm_init.go index c4c3db6ca7..aeadb7d3a2 100644 --- a/go/vt/vttablet/tabletmanager/tm_init.go +++ b/go/vt/vttablet/tabletmanager/tm_init.go @@ -144,7 +144,7 @@ type TabletManager struct { tabletAlias *topodatapb.TabletAlias // baseTabletType is the tablet type we revert back to - // when we transition back from something like MASTER. + // when we transition back from something like PRIMARY. baseTabletType topodatapb.TabletType // actionSema is there to run only one action at a time. @@ -480,10 +480,10 @@ func (tm *TabletManager) rebuildKeyspace(ctx context.Context, done chan<- struct func (tm *TabletManager) checkPrimaryShip(ctx context.Context, si *topo.ShardInfo) error { if si.PrimaryAlias != nil && topoproto.TabletAliasEqual(si.PrimaryAlias, tm.tabletAlias) { - // We're marked as master in the shard record, which could mean the master + // We're marked as primary in the shard record, which could mean the primary // tablet process was just restarted. However, we need to check if a new - // master is in the process of taking over. In that case, it will let us - // know by forcibly updating the old master's tablet record. + // primary is in the process of taking over. In that case, it will let us + // know by forcibly updating the old primary's tablet record. oldTablet, err := tm.TopoServer.GetTablet(ctx, tm.tabletAlias) switch { case topo.IsErrType(err, topo.NoNode): @@ -492,15 +492,15 @@ func (tm *TabletManager) checkPrimaryShip(ctx context.Context, si *topo.ShardInf log.Infof("Shard master alias matches, but there is no existing tablet record. Switching to master with 'Now' as time") tm.tmState.UpdateTablet(func(tablet *topodatapb.Tablet) { tablet.Type = topodatapb.TabletType_PRIMARY - // Update the master term start time (current value is 0) because we - // assume that we are actually the MASTER and in case of a tiebreak, + // Update the primary term start time (current value is 0) because we + // assume that we are actually the PRIMARY and in case of a tiebreak, // vtgate should prefer us. tablet.PrimaryTermStartTime = logutil.TimeToProto(time.Now()) }) case err == nil: if oldTablet.Type == topodatapb.TabletType_PRIMARY { log.Infof("Shard master alias matches, and existing tablet agrees. Switching to master with tablet's master term start time: %v", oldTablet.PrimaryTermStartTime) - // We're marked as master in the shard record, + // We're marked as primary in the shard record, // and our existing tablet record agrees. tm.tmState.UpdateTablet(func(tablet *topodatapb.Tablet) { tablet.Type = topodatapb.TabletType_PRIMARY @@ -523,7 +523,7 @@ func (tm *TabletManager) checkPrimaryShip(ctx context.Context, si *topo.ShardInf // There's no existing tablet record, so there is nothing to do case err == nil: if oldTablet.Type == topodatapb.TabletType_PRIMARY { - // Our existing tablet type is master, but the shard record does not agree. + // Our existing tablet type is primary, but the shard record does not agree. // Only take over if our primary_term_start_time is after what is in the shard record oldPrimaryTermStartTime := oldTablet.GetPrimaryTermStartTime() currentShardTime := si.GetPrimaryTermStartTime() diff --git a/go/vt/vttablet/tabletmanager/tm_state.go b/go/vt/vttablet/tabletmanager/tm_state.go index ca0c0052f6..403fd35a71 100644 --- a/go/vt/vttablet/tabletmanager/tm_state.go +++ b/go/vt/vttablet/tabletmanager/tm_state.go @@ -185,7 +185,7 @@ func (ts *tmState) ChangeTabletType(ctx context.Context, tabletType topodatapb.T } if action == DBActionSetReadWrite { // We call SetReadOnly only after the topo has been updated to avoid - // situations where two tablets are master at the DB level but not at the vitess level + // situations where two tablets are primary at the DB level but not at the vitess level if err := ts.tm.MysqlDaemon.SetReadOnly(false); err != nil { return err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go index 3e0ac3572e..661fe1cf29 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go @@ -140,7 +140,7 @@ func (vc *vcopier) copyNext(ctx context.Context, settings binlogplayer.VRSetting } // catchup replays events to the subset of the tables that have been copied -// until replication is caught up. In order to stop, the seconds behind master has +// until replication is caught up. In order to stop, the seconds behind primary has // to fall below replicationLagTolerance. func (vc *vcopier) catchup(ctx context.Context, copyState map[string]*sqltypes.Result) error { ctx, cancel := context.WithCancel(ctx) diff --git a/go/vt/vttablet/tabletserver/health_streamer.go b/go/vt/vttablet/tabletserver/health_streamer.go index 3d578ecc55..64511782c5 100644 --- a/go/vt/vttablet/tabletserver/health_streamer.go +++ b/go/vt/vttablet/tabletserver/health_streamer.go @@ -313,7 +313,7 @@ func (hs *healthStreamer) SetUnhealthyThreshold(v time.Duration) { func (hs *healthStreamer) reload() error { hs.mu.Lock() defer hs.mu.Unlock() - // Schema Reload to happen only on master. + // Schema Reload to happen only on primary. if hs.state.Target.TabletType != topodatapb.TabletType_PRIMARY { return nil } diff --git a/go/vt/vttablet/tabletserver/health_streamer_test.go b/go/vt/vttablet/tabletserver/health_streamer_test.go index 63088f3271..e1bb06bda4 100644 --- a/go/vt/vttablet/tabletserver/health_streamer_test.go +++ b/go/vt/vttablet/tabletserver/health_streamer_test.go @@ -100,7 +100,7 @@ func TestHealthStreamerBroadcast(t *testing.T) { } assert.Equal(t, want, shr) - // Test master and timestamp. + // Test primary and timestamp. now := time.Now() hs.ChangeState(topodatapb.TabletType_PRIMARY, now, 0, nil, true) shr = <-ch @@ -118,7 +118,7 @@ func TestHealthStreamerBroadcast(t *testing.T) { } assert.Equal(t, want, shr) - // Test non-serving, and 0 timestamp for non-master. + // Test non-serving, and 0 timestamp for non-primary. hs.ChangeState(topodatapb.TabletType_REPLICA, now, 1*time.Second, nil, false) shr = <-ch want = &querypb.StreamHealthResponse{ diff --git a/go/vt/vttablet/tabletserver/repltracker/reader.go b/go/vt/vttablet/tabletserver/repltracker/reader.go index 6204a804f3..be9bd09113 100644 --- a/go/vt/vttablet/tabletserver/repltracker/reader.go +++ b/go/vt/vttablet/tabletserver/repltracker/reader.go @@ -44,7 +44,7 @@ const ( // heartbeatReader reads the heartbeat table at a configured interval in order // to calculate replication lag. It is meant to be run on a replica, and paired -// with a heartbeatWriter on a master. +// with a heartbeatWriter on a primary. // Lag is calculated by comparing the most recent timestamp in the heartbeat // table against the current time at read time. type heartbeatReader struct { diff --git a/go/vt/vttablet/tabletserver/repltracker/repltracker.go b/go/vt/vttablet/tabletserver/repltracker/repltracker.go index 92167e2f81..7799a19a64 100644 --- a/go/vt/vttablet/tabletserver/repltracker/repltracker.go +++ b/go/vt/vttablet/tabletserver/repltracker/repltracker.go @@ -79,7 +79,7 @@ func (rt *ReplTracker) InitDBConfig(target *querypb.Target, mysqld mysqlctl.Mysq rt.poller.InitDBConfig(mysqld) } -// MakeMaster must be called if the tablet type becomes MASTER. +// MakeMaster must be called if the tablet type becomes PRIMARY. func (rt *ReplTracker) MakeMaster() { rt.mu.Lock() defer rt.mu.Unlock() @@ -95,7 +95,7 @@ func (rt *ReplTracker) MakeMaster() { } } -// MakeNonMaster must be called if the tablet type becomes non-MASTER. +// MakeNonMaster must be called if the tablet type becomes non-PRIMARY. func (rt *ReplTracker) MakeNonMaster() { rt.mu.Lock() defer rt.mu.Unlock() diff --git a/go/vt/vttablet/tabletserver/repltracker/writer.go b/go/vt/vttablet/tabletserver/repltracker/writer.go index 60df9f05cb..25ec3de889 100644 --- a/go/vt/vttablet/tabletserver/repltracker/writer.go +++ b/go/vt/vttablet/tabletserver/repltracker/writer.go @@ -54,7 +54,7 @@ var withDDL = withddl.New([]string{ fmt.Sprintf(sqlCreateHeartbeatTable, "_vt"), }) -// heartbeatWriter runs on master tablets and writes heartbeats to the _vt.heartbeat +// heartbeatWriter runs on primary tablets and writes heartbeats to the _vt.heartbeat // table at a regular interval, defined by heartbeat_interval. type heartbeatWriter struct { env tabletenv.Env diff --git a/go/vt/vttablet/tabletserver/schema/engine.go b/go/vt/vttablet/tabletserver/schema/engine.go index ed2188543a..7be476e8db 100644 --- a/go/vt/vttablet/tabletserver/schema/engine.go +++ b/go/vt/vttablet/tabletserver/schema/engine.go @@ -124,7 +124,7 @@ func (se *Engine) InitDBConfig(cp dbconfigs.Connector) { } // EnsureConnectionAndDB ensures that we can connect to mysql. -// If tablet type is master and there is no db, then the database is created. +// If tablet type is primary and there is no db, then the database is created. // This function can be called before opening the Engine. func (se *Engine) EnsureConnectionAndDB(tabletType topodatapb.TabletType) error { ctx := tabletenv.LocalContext() @@ -141,7 +141,7 @@ func (se *Engine) EnsureConnectionAndDB(tabletType topodatapb.TabletType) error return err } - // We are master and db is not found. Let's create it. + // We are primary and db is not found. Let's create it. // We use allprivs instead of DBA because we want db create to fail if we're read-only. conn, err = dbconnpool.NewDBConnection(ctx, se.env.Config().DB.AllPrivsConnector()) if err != nil { @@ -241,7 +241,7 @@ func (se *Engine) Close() { } // MakeNonMaster clears the sequence caches to make sure that -// they don't get accidentally reused after losing mastership. +// they don't get accidentally reused after losing primaryship. func (se *Engine) MakeNonMaster() { // This function is tested through endtoend test. se.mu.Lock() diff --git a/go/vt/vttablet/tabletserver/state_manager.go b/go/vt/vttablet/tabletserver/state_manager.go index 6837a0bc8b..4e6f8436d1 100644 --- a/go/vt/vttablet/tabletserver/state_manager.go +++ b/go/vt/vttablet/tabletserver/state_manager.go @@ -440,7 +440,7 @@ func (sm *stateManager) unserveMaster() error { } func (sm *stateManager) serveNonMaster(wantTabletType topodatapb.TabletType) error { - // We are likely transitioning from master. We have to honor + // We are likely transitioning from primary. We have to honor // the shutdown grace period. cancel := sm.handleShutdownGracePeriod() defer cancel() @@ -584,7 +584,7 @@ func (sm *stateManager) stateStringLocked(tabletType topodatapb.TabletType, stat func (sm *stateManager) handleGracePeriod(tabletType topodatapb.TabletType) { if tabletType != topodatapb.TabletType_PRIMARY { - // We allow serving of previous type only for a master transition. + // We allow serving of previous type only for a primary transition. sm.alsoAllow = nil return } diff --git a/go/vt/vttablet/tabletserver/state_manager_test.go b/go/vt/vttablet/tabletserver/state_manager_test.go index 05d1d7a7c0..bf1a136a45 100644 --- a/go/vt/vttablet/tabletserver/state_manager_test.go +++ b/go/vt/vttablet/tabletserver/state_manager_test.go @@ -426,7 +426,7 @@ func TestStateManagerShutdownGracePeriod(t *testing.T) { assert.False(t, kconn1.killed.Get()) assert.False(t, kconn2.killed.Get()) - // Transition to master with a short shutdown grace period should kill both conns. + // Transition to primary with a short shutdown grace period should kill both conns. err = sm.SetServingType(topodatapb.TabletType_PRIMARY, testNow, StateServing, "") require.NoError(t, err) sm.shutdownGracePeriod = 10 * time.Millisecond diff --git a/go/vt/vttablet/tabletserver/tx_engine.go b/go/vt/vttablet/tabletserver/tx_engine.go index b942d426a9..089f949e34 100644 --- a/go/vt/vttablet/tabletserver/tx_engine.go +++ b/go/vt/vttablet/tabletserver/tx_engine.go @@ -415,7 +415,7 @@ outer: // shutdownTransactions rolls back all open transactions // including the prepared ones. -// This is used for transitioning from a master to a non-master +// This is used for transitioning from a primary to a non-primary // serving type. func (te *TxEngine) shutdownTransactions() { te.rollbackPrepared() diff --git a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go index e3f880aeb1..217c3ac1f1 100644 --- a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go +++ b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go @@ -330,9 +330,9 @@ func (ts *txThrottlerState) deallocateResources() { // StatsUpdate is part of the LegacyHealthCheckStatsListener interface. func (ts *txThrottlerState) StatsUpdate(tabletStats *discovery.LegacyTabletStats) { - // Ignore MASTER and RDONLY stats. + // Ignore PRIMARY and RDONLY stats. // We currently do not monitor RDONLY tablets for replication lag. RDONLY tablets are not - // candidates for becoming master during failover, and it's acceptable to serve somewhat + // candidates for becoming primary during failover, and it's acceptable to serve somewhat // stale date from these. // TODO(erez): If this becomes necessary, we can add a configuration option that would // determine whether we consider RDONLY tablets here, as well. diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go index cb2e1296b9..c41f251bcf 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go @@ -377,7 +377,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e // tells us the size of the event header. if vs.format.IsZero() { // The only thing that should come before the FORMAT_DESCRIPTION_EVENT - // is a fake ROTATE_EVENT, which the master sends to tell us the name + // is a fake ROTATE_EVENT, which the primary sends to tell us the name // of the current log file. if ev.IsRotate() { return nil, nil diff --git a/go/vt/vttablet/tmclient/rpc_client_api.go b/go/vt/vttablet/tmclient/rpc_client_api.go index 26d57a5cf8..7b228bc4ea 100644 --- a/go/vt/vttablet/tmclient/rpc_client_api.go +++ b/go/vt/vttablet/tmclient/rpc_client_api.go @@ -112,10 +112,10 @@ type TabletManagerClient interface { // Replication related methods // - // Deprecated MasterStatus returns the tablet's mysql master status. + // Deprecated MasterStatus returns the tablet's mysql primary status. MasterStatus(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.PrimaryStatus, error) - // PrimaryStatus returns the tablet's mysql master status. + // PrimaryStatus returns the tablet's mysql primary status. PrimaryStatus(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.PrimaryStatus, error) // ReplicationStatus returns the tablet's mysql replication status. @@ -137,10 +137,10 @@ type TabletManagerClient interface { // GetReplicas returns the addresses of the replicas GetReplicas(ctx context.Context, tablet *topodatapb.Tablet) ([]string, error) - // Deprecated MasterPosition returns the tablet's master position + // Deprecated MasterPosition returns the tablet's primary position MasterPosition(ctx context.Context, tablet *topodatapb.Tablet) (string, error) - // PrimaryPosition returns the tablet's master position + // PrimaryPosition returns the tablet's primary position PrimaryPosition(ctx context.Context, tablet *topodatapb.Tablet) (string, error) // WaitForPosition waits for the position to be reached @@ -162,7 +162,7 @@ type TabletManagerClient interface { // replication positions are reset. ResetReplication(ctx context.Context, tablet *topodatapb.Tablet) error - // Deprecated InitMaster tells a tablet to make itself the new master, + // Deprecated InitMaster tells a tablet to make itself the new primary, // and return the replication position the replicas should use to // reparent to it. InitMaster(ctx context.Context, tablet *topodatapb.Tablet) (string, error) @@ -172,21 +172,21 @@ type TabletManagerClient interface { // reparent to it. InitPrimary(ctx context.Context, tablet *topodatapb.Tablet) (string, error) - // PopulateReparentJournal asks the master to insert a row in + // PopulateReparentJournal asks the primary to insert a row in // its reparent_journal table. PopulateReparentJournal(ctx context.Context, tablet *topodatapb.Tablet, timeCreatedNS int64, actionName string, masterAlias *topodatapb.TabletAlias, pos string) error // InitReplica tells a tablet to start replicating from the - // passed in master tablet alias, and wait for the row in the + // passed in primary tablet alias, and wait for the row in the // reparent_journal table. InitReplica(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, replicationPosition string, timeCreatedNS int64) error - // Deprecated DemoteMaster tells the soon-to-be-former master it's going to change, + // Deprecated DemoteMaster tells the soon-to-be-former primary it's going to change, // and it should go read-only and return its current position. DemoteMaster(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.PrimaryStatus, error) // Deprecated UndoDemoteMaster reverts all changes made by DemoteMaster - // To be used if we are unable to promote the chosen new master + // To be used if we are unable to promote the chosen new primary UndoDemoteMaster(ctx context.Context, tablet *topodatapb.Tablet) error // DemotePrimary tells the soon-to-be-former primary it's going to change, @@ -197,11 +197,11 @@ type TabletManagerClient interface { // To be used if we are unable to promote the chosen new primary UndoDemotePrimary(ctx context.Context, tablet *topodatapb.Tablet) error - // ReplicaWasPromoted tells the remote tablet it is now the master + // ReplicaWasPromoted tells the remote tablet it is now the primary ReplicaWasPromoted(ctx context.Context, tablet *topodatapb.Tablet) error // Deprecated SetMaster tells a tablet to start replicating from the - // passed in master tablet alias, and wait for the row in the + // passed in primary tablet alias, and wait for the row in the // reparent_journal table (if timeCreatedNS is non-zero). SetMaster(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) error @@ -210,14 +210,14 @@ type TabletManagerClient interface { // reparent_journal table (if timeCreatedNS is non-zero). SetReplicationSource(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) error - // ReplicaWasRestarted tells the replica tablet its master has changed + // ReplicaWasRestarted tells the replica tablet its primary has changed ReplicaWasRestarted(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias) error // StopReplicationAndGetStatus stops replication and returns the // current position. StopReplicationAndGetStatus(ctx context.Context, tablet *topodatapb.Tablet, stopReplicationMode replicationdatapb.StopReplicationMode) (*replicationdatapb.Status, *replicationdatapb.StopReplicationStatus, error) - // PromoteReplica makes the tablet the new master + // PromoteReplica makes the tablet the new primary PromoteReplica(ctx context.Context, tablet *topodatapb.Tablet) (string, error) // diff --git a/go/vt/worker/executor.go b/go/vt/worker/executor.go index 6d84726de0..5be3bedc53 100644 --- a/go/vt/worker/executor.go +++ b/go/vt/worker/executor.go @@ -64,7 +64,7 @@ func newExecutor(wr *wrangler.Wrangler, tsc *discovery.LegacyTabletStatsCache, t } // fetchLoop loops over the provided insertChannel and sends the commands to the -// current master. +// current primary. func (e *executor) fetchLoop(ctx context.Context, insertChannel chan string) error { for { select { @@ -112,8 +112,8 @@ func (e *executor) refreshState(ctx context.Context) error { // If will keep retrying the ExecuteFetch (for a finite but longer duration) if // it fails due to a timeout or a retriable application error. // -// executeFetchWithRetries will always get the current MASTER tablet from the -// LegacyTabletStatsCache instance. If no MASTER is available, it will keep retrying. +// executeFetchWithRetries will always get the current PRIMARY tablet from the +// LegacyTabletStatsCache instance. If no PRIMARY is available, it will keep retrying. func (e *executor) fetchWithRetries(ctx context.Context, action func(ctx context.Context, tablet *topodatapb.Tablet) error) error { retryDuration := *retryDuration // We should keep retrying up until the retryCtx runs out. @@ -125,7 +125,7 @@ func (e *executor) fetchWithRetries(ctx context.Context, action func(ctx context var master *discovery.LegacyTabletStats var err error - // Get the current master from the LegacyTabletStatsCache. + // Get the current primary from the LegacyTabletStatsCache. masters := e.tsc.GetHealthyTabletStats(e.keyspace, e.shard, topodatapb.TabletType_PRIMARY) if len(masters) == 0 { e.wr.Logger().Warningf("ExecuteFetch failed for keyspace/shard %v/%v because no MASTER is available; will retry until there is MASTER again", e.keyspace, e.shard) @@ -185,7 +185,7 @@ func (e *executor) fetchWithRetries(ctx context.Context, action func(ctx context } return vterrors.Wrapf(err, "interrupted while trying to run a command on tablet %v", tabletString) case <-time.After(*executeFetchRetryTime): - // Retry 30s after the failure using the current master seen by the LegacyHealthCheck. + // Retry 30s after the failure using the current primary seen by the LegacyHealthCheck. } isRetry = true } diff --git a/go/vt/worker/legacy_split_clone.go b/go/vt/worker/legacy_split_clone.go index 7e839a651a..7a35c9dee1 100644 --- a/go/vt/worker/legacy_split_clone.go +++ b/go/vt/worker/legacy_split_clone.go @@ -74,7 +74,7 @@ type LegacySplitCloneWorker struct { // populated during WorkerStateFindTargets, read-only after that sourceAliases []*topodatapb.TabletAlias sourceTablets []*topodatapb.Tablet - // healthCheck tracks the health of all MASTER and REPLICA tablets. + // healthCheck tracks the health of all PRIMARY and REPLICA tablets. // It must be closed at the end of the command. healthCheck discovery.LegacyHealthCheck tsc *discovery.LegacyTabletStatsCache @@ -395,7 +395,7 @@ func (scw *LegacySplitCloneWorker) findTargets(ctx context.Context) error { scw.destinationShardWatchers = append(scw.destinationShardWatchers, watcher) } - // Make sure we find a master for each destination shard and log it. + // Make sure we find a primary for each destination shard and log it. scw.wr.Logger().Infof("Finding a MASTER tablet for each destination shard...") for _, si := range scw.destinationShards { waitCtx, waitCancel := context.WithTimeout(ctx, 10*time.Second) @@ -438,7 +438,7 @@ func (scw *LegacySplitCloneWorker) findTargets(ctx context.Context) error { } // copy phase: -// - copy the data from source tablets to destination masters (with replication on) +// - copy the data from source tablets to destination primaries (with replication on) // Assumes that the schema has already been created on each destination tablet // (probably from vtctl's CopySchemaShard) func (scw *LegacySplitCloneWorker) copy(ctx context.Context) error { diff --git a/go/vt/worker/legacy_split_clone_test.go b/go/vt/worker/legacy_split_clone_test.go index 063667b300..304da98e16 100644 --- a/go/vt/worker/legacy_split_clone_test.go +++ b/go/vt/worker/legacy_split_clone_test.go @@ -200,7 +200,7 @@ func (tc *legacySplitCloneTestCase) setUp(v3 bool) { tc.rightMasterFakeDb.AddExpectedQuery("INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*", nil) } - // Fake stream health responses because vtworker needs them to find the master. + // Fake stream health responses because vtworker needs them to find the primary. tc.leftMasterQs = fakes.NewStreamHealthQueryService(leftMaster.Target()) tc.leftMasterQs.AddDefaultHealthResponse() tc.leftReplicaQs = fakes.NewStreamHealthQueryService(leftReplica.Target()) @@ -357,7 +357,7 @@ func TestLegacySplitCloneV2_Throttled(t *testing.T) { } // TestLegacySplitCloneV2_RetryDueToReadonly is identical to the regular test -// TestLegacySplitCloneV2 with the additional twist that the destination masters +// TestLegacySplitCloneV2 with the additional twist that the destination primaries // fail the first write because they are read-only and succeed after that. func TestLegacySplitCloneV2_RetryDueToReadonly(t *testing.T) { delay := discovery.GetTabletPickerRetryDelay() @@ -392,7 +392,7 @@ func TestLegacySplitCloneV2_RetryDueToReadonly(t *testing.T) { } // TestLegacySplitCloneV2_NoMasterAvailable tests that vtworker correctly retries -// even in a period where no MASTER tablet is available according to the +// even in a period where no PRIMARY tablet is available according to the // HealthCheck instance. func TestLegacySplitCloneV2_NoMasterAvailable(t *testing.T) { delay := discovery.GetTabletPickerRetryDelay() @@ -408,7 +408,7 @@ func TestLegacySplitCloneV2_NoMasterAvailable(t *testing.T) { // leftReplica will take over for the last, 30th, insert and the vreplication checkpoint. tc.leftReplicaFakeDb.AddExpectedQuery("INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*", nil) - // During the 29th write, let the MASTER disappear. + // During the 29th write, let the PRIMARY disappear. tc.leftMasterFakeDb.GetEntry(28).AfterFunc = func() { tc.leftMasterQs.UpdateType(topodatapb.TabletType_REPLICA) tc.leftMasterQs.AddDefaultHealthResponse() @@ -426,7 +426,7 @@ func TestLegacySplitCloneV2_NoMasterAvailable(t *testing.T) { defer tc.leftMasterFakeDb.DeleteAllEntriesAfterIndex(28) // Wait for a retry due to NoMasterAvailable to happen, expect the 30th write - // on leftReplica and change leftReplica from REPLICA to MASTER. + // on leftReplica and change leftReplica from REPLICA to PRIMARY. // // Reset the retry stats now. It also happens when the worker starts but that // is too late because this Go routine potentially reads it before the worker @@ -452,7 +452,7 @@ func TestLegacySplitCloneV2_NoMasterAvailable(t *testing.T) { } } - // Make leftReplica the new MASTER. + // Make leftReplica the new PRIMARY. tc.leftReplica.TM.ChangeType(ctx, topodatapb.TabletType_PRIMARY) tc.leftReplicaQs.UpdateType(topodatapb.TabletType_PRIMARY) tc.leftReplicaQs.AddDefaultHealthResponse() diff --git a/go/vt/worker/multi_split_diff.go b/go/vt/worker/multi_split_diff.go index 8c843195f9..5c6f5c6bdb 100644 --- a/go/vt/worker/multi_split_diff.go +++ b/go/vt/worker/multi_split_diff.go @@ -340,9 +340,9 @@ func (msdw *MultiSplitDiffWorker) findTargets(ctx context.Context) error { return nil } -// ask the master of the destination shard to pause filtered replication, +// ask the primary of the destination shard to pause filtered replication, // and return the source binlog positions -// (add a cleanup task to restart filtered replication on master) +// (add a cleanup task to restart filtered replication on primary) func (msdw *MultiSplitDiffWorker) stopVreplicationOnAll(ctx context.Context, tabletInfo []*topo.TabletInfo) ([]string, error) { destVreplicationPos := make([]string, len(msdw.destinationShards)) @@ -386,7 +386,7 @@ func (msdw *MultiSplitDiffWorker) getMasterTabletInfoForShard(ctx context.Contex } // stop the source tablet at a binlog position higher than the -// destination masters. Return the reached position +// destination primaries. Return the reached position // (add a cleanup task to restart binlog replication on the source tablet, and // change the existing ChangeTabletType cleanup action to 'spare' type) func (msdw *MultiSplitDiffWorker) stopReplicationOnSourceTabletAt(ctx context.Context, destVreplicationPos []string) (string, error) { @@ -428,7 +428,7 @@ func (msdw *MultiSplitDiffWorker) stopReplicationOnSourceTabletAt(ctx context.Co return mysqlPos, nil } -// ask the master of the destination shard to resume filtered replication +// ask the primary of the destination shard to resume filtered replication // up to the specified source position, and return the destination position. func (msdw *MultiSplitDiffWorker) stopVreplicationAt(ctx context.Context, shardInfo *topo.ShardInfo, sourcePosition string, masterInfo *topo.TabletInfo) (string, error) { msdw.wr.Logger().Infof("Restarting master %v until it catches up to %v", shardInfo.PrimaryAlias, sourcePosition) @@ -455,7 +455,7 @@ func (msdw *MultiSplitDiffWorker) stopVreplicationAt(ctx context.Context, shardI return masterPos, nil } -// wait until the destination tablet is equal or passed that master +// wait until the destination tablet is equal or passed that primary // binlog position, and stop its replication. // (add a cleanup task to restart binlog replication on it, and change // the existing ChangeTabletType cleanup action to 'spare' type) @@ -490,7 +490,7 @@ func (msdw *MultiSplitDiffWorker) stopReplicationAt(ctx context.Context, destina return nil } -// restart filtered replication on the destination master. +// restart filtered replication on the destination primary. // (remove the cleanup task that does the same) func (msdw *MultiSplitDiffWorker) startVreplication(ctx context.Context, shardInfo *topo.ShardInfo, masterInfo *topo.TabletInfo) error { msdw.wr.Logger().Infof("restarting filtered replication on master %v", shardInfo.PrimaryAlias) @@ -596,7 +596,7 @@ func (msdw *MultiSplitDiffWorker) synchronizeSrcAndDestTxState(ctx context.Conte msdw.scanners[i] = i2 } - // 4. Make sure all replicas have caught up with the master + // 4. Make sure all replicas have caught up with the primary for i, shardInfo := range msdw.destinationShards { masterInfo := masterInfos[i] destinationAlias := msdw.destinationAliases[i] diff --git a/go/vt/worker/split_clone.go b/go/vt/worker/split_clone.go index 7b9ab54760..b59ab652a0 100644 --- a/go/vt/worker/split_clone.go +++ b/go/vt/worker/split_clone.go @@ -94,7 +94,7 @@ type SplitCloneWorker struct { destinationShards []*topo.ShardInfo keyspaceSchema *vindexes.KeyspaceSchema // healthCheck is used for the destination shards to a) find out the current - // MASTER tablet, b) get the list of healthy RDONLY tablets and c) track the + // PRIMARY tablet, b) get the list of healthy RDONLY tablets and c) track the // replication lag of all REPLICA tablets. // It must be closed at the end of the command. healthCheck discovery.LegacyHealthCheck @@ -441,7 +441,7 @@ func (scw *SplitCloneWorker) run(ctx context.Context) error { return err } - // Phase 2: Find destination master tablets. + // Phase 2: Find destination primary tablets. if err := scw.findDestinationMasters(ctx); err != nil { return vterrors.Wrap(err, "findDestinationMasters() failed") } @@ -477,8 +477,8 @@ func (scw *SplitCloneWorker) run(ctx context.Context) error { scw.wr.Logger().Infof("Offline clone will be run now.") if scw.online { // Wait until the inserts from the online clone were propagated - // from the destination master to the rdonly tablets. - // TODO(mberlin): Remove the sleep and get the destination master position + // from the destination primary to the rdonly tablets. + // TODO(mberlin): Remove the sleep and get the destination primary position // instead and wait until all selected destination tablets have reached // it. time.Sleep(1 * time.Second) @@ -836,11 +836,11 @@ func (scw *SplitCloneWorker) findTransactionalSources(ctx context.Context) error return nil } -// findDestinationMasters finds for each destination shard the current master. +// findDestinationMasters finds for each destination shard the current primary. func (scw *SplitCloneWorker) findDestinationMasters(ctx context.Context) error { scw.setState(WorkerStateFindTargets) - // Make sure we find a master for each destination shard and log it. + // Make sure we find a primary for each destination shard and log it. scw.wr.Logger().Infof("Finding a MASTER tablet for each destination shard...") for _, si := range scw.destinationShards { waitCtx, waitCancel := context.WithTimeout(ctx, *waitForHealthyTabletsTimeout) @@ -1141,7 +1141,7 @@ func (scw *SplitCloneWorker) startCloningData(ctx context.Context, state StatusW } // copy phase: -// - copy the data from source tablets to destination masters (with replication on) +// - copy the data from source tablets to destination primaries (with replication on) // Assumes that the schema has already been created on each destination tablet // (probably from vtctl's CopySchemaShard) func (scw *SplitCloneWorker) clone(ctx context.Context, state StatusWorkerState) error { @@ -1353,7 +1353,7 @@ func (scw *SplitCloneWorker) createKeyResolver(td *tabletmanagerdatapb.TableDefi return newV2Resolver(scw.destinationKeyspaceInfo, td) } -// StatsUpdate receives replication lag updates for each destination master +// StatsUpdate receives replication lag updates for each destination primary // and forwards them to the respective throttler instance. // It also forwards any update to the LegacyTabletStatsCache to keep it up to date. // It is part of the discovery.LegacyHealthCheckStatsListener interface. diff --git a/go/vt/worker/split_clone_flaky_test.go b/go/vt/worker/split_clone_flaky_test.go index c3021e196e..561d5ba674 100644 --- a/go/vt/worker/split_clone_flaky_test.go +++ b/go/vt/worker/split_clone_flaky_test.go @@ -256,7 +256,7 @@ func (tc *splitCloneTestCase) setUpWithConcurrency(v3 bool, concurrency, writeQu tc.rightPrimaryFakeDb.AddExpectedQuery("INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*", nil) } - // Fake stream health responses because vtworker needs them to find the master. + // Fake stream health responses because vtworker needs them to find the primary. shqs := fakes.NewStreamHealthQueryService(leftPrimary.Target()) shqs.AddDefaultHealthResponse() tc.leftPrimaryQs = newTestQueryService(tc.t, leftPrimary.Target(), shqs, 0, 2, topoproto.TabletAliasString(leftPrimary.Tablet.Alias), false /* omitKeyspaceID */) @@ -859,7 +859,7 @@ func TestSplitCloneV2_Offline_Reconciliation(t *testing.T) { tc := &splitCloneTestCase{t: t} // We reduce the parallelism to 1 to test the order of expected - // insert/update/delete statements on the destination master. + // insert/update/delete statements on the destination primary. tc.setUpWithConcurrency(false /* v3 */, 1, 10, splitCloneTestRowsCount) defer tc.tearDown() @@ -961,7 +961,7 @@ func TestSplitCloneV2_Throttled(t *testing.T) { } // TestSplitCloneV2_RetryDueToReadonly is identical to the regular test -// TestSplitCloneV2 with the additional twist that the destination masters +// TestSplitCloneV2 with the additional twist that the destination primaries // fail the first write because they are read-only and succeed after that. func TestSplitCloneV2_RetryDueToReadonly(t *testing.T) { delay := discovery.GetTabletPickerRetryDelay() @@ -997,7 +997,7 @@ func TestSplitCloneV2_RetryDueToReadonly(t *testing.T) { } // TestSplitCloneV2_NoMasterAvailable tests that vtworker correctly retries -// even in a period where no MASTER tablet is available according to the +// even in a period where no PRIMARY tablet is available according to the // HealthCheck instance. func TestSplitCloneV2_NoMasterAvailable(t *testing.T) { delay := discovery.GetTabletPickerRetryDelay() @@ -1016,7 +1016,7 @@ func TestSplitCloneV2_NoMasterAvailable(t *testing.T) { // leftReplica will take over for the last, 30th, insert and the vreplication checkpoint. tc.leftReplicaFakeDb.AddExpectedQuery("INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*", nil) - // During the 29th write, let the MASTER disappear. + // During the 29th write, let the PRIMARY disappear. tc.leftPrimaryFakeDb.GetEntry(28).AfterFunc = func() { t.Logf("setting MASTER tablet to REPLICA") tc.leftPrimaryQs.UpdateType(topodatapb.TabletType_REPLICA) @@ -1035,7 +1035,7 @@ func TestSplitCloneV2_NoMasterAvailable(t *testing.T) { defer tc.leftPrimaryFakeDb.DeleteAllEntriesAfterIndex(28) // Wait for a retry due to NoMasterAvailable to happen, expect the 30th write - // on leftReplica and change leftReplica from REPLICA to MASTER. + // on leftReplica and change leftReplica from REPLICA to PRIMARY. // // Reset the stats now. It also happens when the worker starts but that's too // late because this Go routine looks at it and can run before the worker. @@ -1059,7 +1059,7 @@ func TestSplitCloneV2_NoMasterAvailable(t *testing.T) { } } - // Make leftReplica the new MASTER. + // Make leftReplica the new PRIMARY. tc.leftReplica.TM.ChangeType(ctx, topodatapb.TabletType_PRIMARY) t.Logf("resetting tablet back to MASTER") tc.leftReplicaQs.UpdateType(topodatapb.TabletType_PRIMARY) diff --git a/go/vt/worker/split_diff.go b/go/vt/worker/split_diff.go index 0a92cfc9da..62b5d1254b 100644 --- a/go/vt/worker/split_diff.go +++ b/go/vt/worker/split_diff.go @@ -279,20 +279,20 @@ func (sdw *SplitDiffWorker) findTargets(ctx context.Context) error { } // synchronizeReplication phase: -// 1 - ask the master of the destination shard to pause filtered replication, +// 1 - ask the primary of the destination shard to pause filtered replication, // and return the source binlog positions -// (add a cleanup task to restart filtered replication on master) +// (add a cleanup task to restart filtered replication on primary) // 2 - stop the source tablet at a binlog position higher than the -// destination master. Get that new list of positions. +// destination primary. Get that new list of positions. // (add a cleanup task to restart binlog replication on the source tablet, and // change the existing ChangeTabletType cleanup action to 'spare' type) -// 3 - ask the master of the destination shard to resume filtered replication +// 3 - ask the primary of the destination shard to resume filtered replication // up to the new list of positions, and return its binlog position. -// 4 - wait until the destination tablet is equal or passed that master +// 4 - wait until the destination tablet is equal or passed that primary // binlog position, and stop its replication. // (add a cleanup task to restart binlog replication on it, and change // the existing ChangeTabletType cleanup action to 'spare' type) -// 5 - restart filtered replication on the destination master. +// 5 - restart filtered replication on the destination primary. // (remove the cleanup task that does the same) // At this point, the source and the destination tablet are stopped at the same // point. @@ -307,7 +307,7 @@ func (sdw *SplitDiffWorker) synchronizeReplication(ctx context.Context) error { return vterrors.Wrapf(err, "synchronizeReplication: cannot get Tablet record for master %v", sdw.shardInfo.PrimaryAlias) } - // 1 - stop the master binlog replication, get its current position + // 1 - stop the primary binlog replication, get its current position sdw.wr.Logger().Infof("Stopping master binlog replication on %v", sdw.shardInfo.PrimaryAlias) shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) defer cancel() @@ -345,7 +345,7 @@ func (sdw *SplitDiffWorker) synchronizeReplication(ctx context.Context) error { // to StartReplication() + ChangeTabletType(spare) wrangler.RecordStartReplicationAction(sdw.cleaner, sourceTablet.Tablet) - // 3 - ask the master of the destination shard to resume filtered + // 3 - ask the primary of the destination shard to resume filtered // replication up to the new list of positions sdw.wr.Logger().Infof("Restarting master %v until it catches up to %v", sdw.shardInfo.PrimaryAlias, mysqlPos) shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) @@ -363,7 +363,7 @@ func (sdw *SplitDiffWorker) synchronizeReplication(ctx context.Context) error { } // 4 - wait until the destination tablet is equal or passed - // that master binlog position, and stop its replication. + // that primary binlog position, and stop its replication. sdw.wr.Logger().Infof("Waiting for destination tablet %v to catch up to %v", sdw.destinationAlias, masterPos) shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) defer cancel() @@ -378,7 +378,7 @@ func (sdw *SplitDiffWorker) synchronizeReplication(ctx context.Context) error { } wrangler.RecordStartReplicationAction(sdw.cleaner, destinationTablet.Tablet) - // 5 - restart filtered replication on destination master + // 5 - restart filtered replication on destination primary sdw.wr.Logger().Infof("Restarting filtered replication on master %v", sdw.shardInfo.PrimaryAlias) shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) defer cancel() diff --git a/go/vt/worker/vertical_split_clone_test.go b/go/vt/worker/vertical_split_clone_test.go index 5e2592bf98..cbfc5a7fd6 100644 --- a/go/vt/worker/vertical_split_clone_test.go +++ b/go/vt/worker/vertical_split_clone_test.go @@ -152,7 +152,7 @@ func TestVerticalSplitClone(t *testing.T) { sourceRdonlyQs.addGeneratedRows(verticalSplitCloneTestMin, verticalSplitCloneTestMax) grpcqueryservice.Register(sourceRdonly.RPCServer, sourceRdonlyQs) - // Set up destination master which will be used as input for the diff during the clone. + // Set up destination primary which will be used as input for the diff during the clone. destMasterShqs := fakes.NewStreamHealthQueryService(destMaster.Target()) destMasterShqs.AddDefaultHealthResponse() destMasterQs := newTestQueryService(t, destMaster.Target(), destMasterShqs, 0, 1, topoproto.TabletAliasString(destMaster.Tablet.Alias), true /* omitKeyspaceID */) diff --git a/go/vt/worker/vertical_split_diff.go b/go/vt/worker/vertical_split_diff.go index 7138854d16..60ac2555b2 100644 --- a/go/vt/worker/vertical_split_diff.go +++ b/go/vt/worker/vertical_split_diff.go @@ -241,20 +241,20 @@ func (vsdw *VerticalSplitDiffWorker) findTargets(ctx context.Context) error { } // synchronizeReplication phase: -// 1 - ask the master of the destination shard to pause filtered replication, +// 1 - ask the primary of the destination shard to pause filtered replication, // and return the source binlog positions -// (add a cleanup task to restart filtered replication on master) +// (add a cleanup task to restart filtered replication on primary) // 2 - stop the source tablet at a binlog position higher than the -// destination master. Get that new position. +// destination primary. Get that new position. // (add a cleanup task to restart binlog replication on it, and change // the existing ChangeTabletType cleanup action to 'spare' type) -// 3 - ask the master of the destination shard to resume filtered replication +// 3 - ask the primary of the destination shard to resume filtered replication // up to the new list of positions, and return its binlog position. -// 4 - wait until the destination tablet is equal or passed that master +// 4 - wait until the destination tablet is equal or passed that primary // binlog position, and stop its replication. // (add a cleanup task to restart binlog replication on it, and change // the existing ChangeTabletType cleanup action to 'spare' type) -// 5 - restart filtered replication on destination master. +// 5 - restart filtered replication on destination primary. // (remove the cleanup task that does the same) // At this point, all source and destination tablets are stopped at the same point. @@ -270,7 +270,7 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) ss := vsdw.shardInfo.SourceShards[0] - // 1 - stop the master binlog replication, get its current position + // 1 - stop the primary binlog replication, get its current position vsdw.wr.Logger().Infof("Stopping master binlog replication on %v", topoproto.TabletAliasString(vsdw.shardInfo.PrimaryAlias)) shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) defer cancel() @@ -306,7 +306,7 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) // to StartReplication() + ChangeTabletType(spare) wrangler.RecordStartReplicationAction(vsdw.cleaner, sourceTablet.Tablet) - // 3 - ask the master of the destination shard to resume filtered + // 3 - ask the primary of the destination shard to resume filtered // replication up to the new list of positions vsdw.wr.Logger().Infof("Restarting master %v until it catches up to %v", topoproto.TabletAliasString(vsdw.shardInfo.PrimaryAlias), mysqlPos) shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) @@ -324,7 +324,7 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) } // 4 - wait until the destination tablet is equal or passed - // that master binlog position, and stop its replication. + // that primary binlog position, and stop its replication. vsdw.wr.Logger().Infof("Waiting for destination tablet %v to catch up to %v", topoproto.TabletAliasString(vsdw.destinationAlias), masterPos) shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) defer cancel() @@ -340,7 +340,7 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) } wrangler.RecordStartReplicationAction(vsdw.cleaner, destinationTablet.Tablet) - // 5 - restart filtered replication on destination master + // 5 - restart filtered replication on destination primary vsdw.wr.Logger().Infof("Restarting filtered replication on master %v", topoproto.TabletAliasString(vsdw.shardInfo.PrimaryAlias)) shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) defer cancel() diff --git a/go/vt/workflow/manager.go b/go/vt/workflow/manager.go index 345b51b350..77bc6a6ac5 100644 --- a/go/vt/workflow/manager.go +++ b/go/vt/workflow/manager.go @@ -87,7 +87,7 @@ type Manager struct { // set if the manager is inside the Run() method. Outside of // the Run method, ctx will be nil. It means the Manager is // shut down, either at startup or shutdown, or is not the - // elected master. + // elected primary. ctx context.Context // started is used to signal that the manager is running i.e. Run() has been // successfully called and the manager can start workflows. @@ -515,7 +515,7 @@ func (m *Manager) isRunning() bool { // getAndWatchFullTree returns the initial tree and a channel to watch // the changes. -// If this manager is not the master, and we have a redirectFunc +// If this manager is not the primary, and we have a redirectFunc // defined, the initial bytes will be set, but the channel will be nil, // and the index is undefined. // So return can have one of three combinations: diff --git a/go/vt/workflow/node.go b/go/vt/workflow/node.go index a431f8983e..27b0949f90 100644 --- a/go/vt/workflow/node.go +++ b/go/vt/workflow/node.go @@ -163,7 +163,7 @@ type Action struct { // long-polling HTTP connection to the clients. type Update struct { // Redirect is set to the URL to go to if we are not the - // master. It is only set in the initial response, and if set + // primary. It is only set in the initial response, and if set // then no other field in this structure is set. Redirect string `json:"redirect,omitempty"` diff --git a/go/vt/wrangler/keyspace.go b/go/vt/wrangler/keyspace.go index 642d7c65d1..0beaf2c155 100644 --- a/go/vt/wrangler/keyspace.go +++ b/go/vt/wrangler/keyspace.go @@ -391,12 +391,12 @@ func (wr *Wrangler) cancelHorizontalResharding(ctx context.Context, keyspace, sh func (wr *Wrangler) MigrateServedTypes(ctx context.Context, keyspace, shard string, cells []string, servedType topodatapb.TabletType, reverse, skipReFreshState bool, filteredReplicationWaitTime time.Duration, reverseReplication bool) (err error) { // check input parameters if servedType == topodatapb.TabletType_PRIMARY { - // we cannot migrate a master back, since when master migration + // we cannot migrate a primary back, since when primary migration // is done, the source shards are dead if reverse { return fmt.Errorf("cannot migrate master back to %v/%v", keyspace, shard) } - // we cannot skip refresh state for a master + // we cannot skip refresh state for a primary if skipReFreshState { return fmt.Errorf("cannot skip refresh state for master migration on %v/%v", keyspace, shard) } @@ -450,7 +450,7 @@ func (wr *Wrangler) MigrateServedTypes(ctx context.Context, keyspace, shard stri // refresh // TODO(b/26388813): Integrate vtctl WaitForDrain here instead of just sleeping. // Anything that's not a replica will use the RDONLY sleep time. - // Master Migrate performs its own refresh but we will refresh all non master + // Master Migrate performs its own refresh but we will refresh all non primary // tablets after each migration waitForDrainSleep := *waitForDrainSleepRdonly if servedType == topodatapb.TabletType_REPLICA { @@ -586,7 +586,7 @@ func (wr *Wrangler) waitForFilteredReplication(ctx context.Context, sourcePositi return rec.Error() } -// refreshMasters will just RPC-ping all the masters with RefreshState +// refreshMasters will just RPC-ping all the primaries with RefreshState func (wr *Wrangler) refreshMasters(ctx context.Context, shards []*topo.ShardInfo) error { wg := sync.WaitGroup{} rec := concurrency.AllErrorRecorder{} @@ -695,7 +695,7 @@ func (wr *Wrangler) masterMigrateServedType(ctx context.Context, keyspace string }() // Phase 1 - // - check topology service can successfully refresh both source and target master + // - check topology service can successfully refresh both source and target primary // - switch the source shards to read-only by disabling query service // - gather all replication points // - wait for filtered replication to catch up @@ -733,7 +733,7 @@ func (wr *Wrangler) masterMigrateServedType(ctx context.Context, keyspace string return err } - // We've reached the point of no return. Freeze the tablet control records in the source masters. + // We've reached the point of no return. Freeze the tablet control records in the source primaries. if err := wr.updateFrozenFlag(ctx, sourceShards, true); err != nil { wr.cancelMasterMigrateServedTypes(ctx, keyspace, sourceShards) return err @@ -837,7 +837,7 @@ func (wr *Wrangler) cancelMasterMigrateServedTypes(ctx context.Context, keyspace } func (wr *Wrangler) setupReverseReplication(ctx context.Context, sourceShards, destinationShards []*topo.ShardInfo) error { - // Retrieve master positions of all destinations. + // Retrieve primary positions of all destinations. masterPositions := make([]string, len(destinationShards)) for i, dest := range destinationShards { ti, err := wr.ts.GetTablet(ctx, dest.PrimaryAlias) @@ -867,7 +867,7 @@ func (wr *Wrangler) setupReverseReplication(ctx context.Context, sourceShards, d if kr == nil { kr = &topodatapb.KeyRange{} } - // Create replications streams first using the retrieved master positions. + // Create replications streams first using the retrieved primary positions. uids := make([]uint32, len(destinationShards)) for j, dest := range destinationShards { bls := &binlogdatapb.BinlogSource{ @@ -921,8 +921,8 @@ func (wr *Wrangler) updateShardRecords(ctx context.Context, keyspace string, sha return topotools.UpdateShardRecords(ctx, wr.ts, wr.tmc, keyspace, shards, cells, servedType, isFrom, clearSourceShards, wr.Logger()) } -// updateFrozenFlag sets or unsets the Frozen flag for master migration. This is performed -// for all master tablet control records. +// updateFrozenFlag sets or unsets the Frozen flag for primary migration. This is performed +// for all primary tablet control records. func (wr *Wrangler) updateFrozenFlag(ctx context.Context, shards []*topo.ShardInfo, value bool) (err error) { for i, si := range shards { updatedShard, err := wr.ts.UpdateShardFields(ctx, si.Keyspace(), si.ShardName(), func(si *topo.ShardInfo) error { @@ -1119,7 +1119,7 @@ func (wr *Wrangler) cancelVerticalResharding(ctx context.Context, keyspace, shar }); err != nil { return err } - // set destination master back to serving + // set destination primary back to serving return wr.refreshMasters(ctx, []*topo.ShardInfo{destinationShard}) } @@ -1201,7 +1201,7 @@ func (wr *Wrangler) migrateServedFromLocked(ctx context.Context, ki *topo.Keyspa } tables := destinationShard.SourceShards[0].Tables - // read the source shard, we'll need its master, and we'll need to + // read the source shard, we'll need its primary, and we'll need to // update the blacklisted tables. var sourceShard *topo.ShardInfo sourceShard, err = wr.ts.GetShard(ctx, destinationShard.SourceShards[0].Keyspace, destinationShard.SourceShards[0].Shard) @@ -1254,15 +1254,15 @@ func (wr *Wrangler) replicaMigrateServedFrom(ctx context.Context, ki *topo.Keysp return wr.RefreshTabletsByShard(ctx, sourceShard, cells) } -// masterMigrateServedFrom handles the master migration. The ordering is +// masterMigrateServedFrom handles the primary migration. The ordering is // a bit different than for rdonly / replica to guarantee a smooth transition. // // The order is as follows: -// - Add BlacklistedTables on the source shard map for master -// - Refresh the source master, so it stops writing on the tables -// - Get the source master position, wait until destination master reaches it +// - Add BlacklistedTables on the source shard map for primary +// - Refresh the source primary, so it stops writing on the tables +// - Get the source primary position, wait until destination primary reaches it // - Clear SourceShard on the destination Shard -// - Refresh the destination master, so its stops its filtered +// - Refresh the destination primary, so its stops its filtered // replication and starts accepting writes func (wr *Wrangler) masterMigrateServedFrom(ctx context.Context, ki *topo.KeyspaceInfo, sourceShard *topo.ShardInfo, destinationShard *topo.ShardInfo, tables []string, ev *events.MigrateServedFrom, filteredReplicationWaitTime time.Duration) error { // Read the data we need @@ -1285,7 +1285,7 @@ func (wr *Wrangler) masterMigrateServedFrom(ctx context.Context, ki *topo.Keyspa return err } - // Now refresh the blacklisted table list on the source master + // Now refresh the blacklisted table list on the source primary event.DispatchUpdate(ev, "refreshing source master so it updates its blacklisted tables") if err := wr.tmc.RefreshState(ctx, sourceMasterTabletInfo.Tablet); err != nil { return err @@ -1330,7 +1330,7 @@ func (wr *Wrangler) masterMigrateServedFrom(ctx context.Context, ki *topo.Keyspa return err } - // Tell the new shards masters they can now be read-write. + // Tell the new shards primaries they can now be read-write. // Invoking a remote action will also make the tablet stop filtered // replication. event.DispatchUpdate(ev, "setting destination shard masters read-write") diff --git a/go/vt/wrangler/materializer.go b/go/vt/wrangler/materializer.go index c31629b664..74ad1ba356 100644 --- a/go/vt/wrangler/materializer.go +++ b/go/vt/wrangler/materializer.go @@ -926,8 +926,8 @@ func (mz *materializer) deploySchema(ctx context.Context) error { mu.Lock() if len(sourceDDLs) == 0 { //only get ddls for tables, once and lazily: if we need to copy the schema from source to target - //we copy schemas from masters on the source keyspace - //and we have found use cases where user just has a replica (no master) in the source keyspace + //we copy schemas from primaries on the source keyspace + //and we have found use cases where user just has a replica (no primary) in the source keyspace sourceDDLs, err = mz.getSourceTableDDLs(ctx) } mu.Unlock() diff --git a/go/vt/wrangler/permissions.go b/go/vt/wrangler/permissions.go index e8e10fc6a4..b3527b3616 100644 --- a/go/vt/wrangler/permissions.go +++ b/go/vt/wrangler/permissions.go @@ -64,7 +64,7 @@ func (wr *Wrangler) ValidatePermissionsShard(ctx context.Context, keyspace, shar return err } - // get permissions from the master, or error + // get permissions from the primary, or error if !si.HasMaster() { return fmt.Errorf("no master in shard %v/%v", keyspace, shard) } @@ -75,13 +75,13 @@ func (wr *Wrangler) ValidatePermissionsShard(ctx context.Context, keyspace, shar } // read all the aliases in the shard, that is all tablets that are - // replicating from the master + // replicating from the primary aliases, err := wr.ts.FindAllTabletAliasesInShard(ctx, keyspace, shard) if err != nil { return err } - // then diff all of them, except master + // then diff all of them, except primary er := concurrency.AllErrorRecorder{} wg := sync.WaitGroup{} for _, alias := range aliases { @@ -116,7 +116,7 @@ func (wr *Wrangler) ValidatePermissionsKeyspace(ctx context.Context, keyspace st return wr.ValidatePermissionsShard(ctx, keyspace, shards[0]) } - // find the reference permissions using the first shard's master + // find the reference permissions using the first shard's primary si, err := wr.ts.GetShard(ctx, keyspace, shards[0]) if err != nil { return err @@ -125,13 +125,13 @@ func (wr *Wrangler) ValidatePermissionsKeyspace(ctx context.Context, keyspace st return fmt.Errorf("no master in shard %v/%v", keyspace, shards[0]) } referenceAlias := si.PrimaryAlias - log.Infof("Gathering permissions for reference master %v", topoproto.TabletAliasString(referenceAlias)) + log.Infof("Gathering permissions for reference primary %v", topoproto.TabletAliasString(referenceAlias)) referencePermissions, err := wr.GetPermissions(ctx, si.PrimaryAlias) if err != nil { return err } - // then diff with all tablets but master 0 + // then diff with all tablets but primary 0 er := concurrency.AllErrorRecorder{} wg := sync.WaitGroup{} for _, shard := range shards { diff --git a/go/vt/wrangler/reparent.go b/go/vt/wrangler/reparent.go index 7a4ba5840d..55e959fe10 100644 --- a/go/vt/wrangler/reparent.go +++ b/go/vt/wrangler/reparent.go @@ -96,7 +96,7 @@ func (wr *Wrangler) ShardReplicationStatuses(ctx context.Context, keyspace, shar } // ReparentTablet tells a tablet to reparent this tablet to the current -// master, based on the current replication position. If there is no +// primary, based on the current replication position. If there is no // match, it will fail. func (wr *Wrangler) ReparentTablet(ctx context.Context, tabletAlias *topodatapb.TabletAlias) error { _, err := wr.vtctld.ReparentTablet(ctx, &vtctldatapb.ReparentTabletRequest{ @@ -105,7 +105,7 @@ func (wr *Wrangler) ReparentTablet(ctx context.Context, tabletAlias *topodatapb. return err } -// InitShardPrimary will make the provided tablet the master for the shard. +// InitShardPrimary will make the provided tablet the primary for the shard. func (wr *Wrangler) InitShardPrimary(ctx context.Context, keyspace, shard string, masterElectTabletAlias *topodatapb.TabletAlias, force bool, waitReplicasTimeout time.Duration) (err error) { // lock the shard ctx, unlock, lockErr := wr.ts.LockShard(ctx, keyspace, shard, fmt.Sprintf("InitShardPrimary(%v)", topoproto.TabletAliasString(masterElectTabletAlias))) @@ -132,8 +132,8 @@ func (wr *Wrangler) InitShardPrimary(ctx context.Context, keyspace, shard string return err } -// PlannedReparentShard will make the provided tablet the master for the shard, -// when both the current and new master are reachable and in good shape. +// PlannedReparentShard will make the provided tablet the primary for the shard, +// when both the current and new primary are reachable and in good shape. func (wr *Wrangler) PlannedReparentShard(ctx context.Context, keyspace, shard string, masterElectTabletAlias, avoidMasterAlias *topodatapb.TabletAlias, waitReplicasTimeout time.Duration) (err error) { _, err = reparentutil.NewPlannedReparenter(wr.ts, wr.tmc, wr.logger).ReparentShard( ctx, @@ -149,8 +149,8 @@ func (wr *Wrangler) PlannedReparentShard(ctx context.Context, keyspace, shard st return err } -// EmergencyReparentShard will make the provided tablet the master for -// the shard, when the old master is completely unreachable. +// EmergencyReparentShard will make the provided tablet the primary for +// the shard, when the old primary is completely unreachable. func (wr *Wrangler) EmergencyReparentShard(ctx context.Context, keyspace, shard string, masterElectTabletAlias *topodatapb.TabletAlias, waitReplicasTimeout time.Duration, ignoredTablets sets.String) (err error) { _, err = reparentutil.NewEmergencyReparenter(wr.ts, wr.tmc, wr.logger).ReparentShard( ctx, @@ -166,9 +166,9 @@ func (wr *Wrangler) EmergencyReparentShard(ctx context.Context, keyspace, shard return err } -// TabletExternallyReparented changes the type of new master for this shard to MASTER +// TabletExternallyReparented changes the type of new primary for this shard to PRIMARY // and updates it's tablet record in the topo. Updating the shard record is handled -// by the new master tablet +// by the new primary tablet func (wr *Wrangler) TabletExternallyReparented(ctx context.Context, newMasterAlias *topodatapb.TabletAlias) error { tabletInfo, err := wr.ts.GetTablet(ctx, newMasterAlias) @@ -185,7 +185,7 @@ func (wr *Wrangler) TabletExternallyReparented(ctx context.Context, newMasterAli return err } - // We update the tablet only if it is not currently master + // We update the tablet only if it is not currently primary if tablet.Type != topodatapb.TabletType_PRIMARY { log.Infof("TabletExternallyReparented: executing tablet type change to MASTER") diff --git a/go/vt/wrangler/schema.go b/go/vt/wrangler/schema.go index 7ff6bb8e66..b3f4e5304b 100644 --- a/go/vt/wrangler/schema.go +++ b/go/vt/wrangler/schema.go @@ -87,7 +87,7 @@ func (wr *Wrangler) ReloadSchemaShard(ctx context.Context, keyspace, shard, repl var wg sync.WaitGroup for _, ti := range tablets { if !includeMaster && ti.Type == topodatapb.TabletType_PRIMARY { - // We don't need to reload on the master + // We don't need to reload on the primary // because we assume ExecuteFetchAsDba() // already did that. continue @@ -149,7 +149,7 @@ func (wr *Wrangler) ValidateSchemaShard(ctx context.Context, keyspace, shard str return fmt.Errorf("GetShard(%v, %v) failed: %v", keyspace, shard, err) } - // get schema from the master, or error + // get schema from the primary, or error if !si.HasMaster() { return fmt.Errorf("no master in shard %v/%v", keyspace, shard) } @@ -167,7 +167,7 @@ func (wr *Wrangler) ValidateSchemaShard(ctx context.Context, keyspace, shard str } // read all the aliases in the shard, that is all tablets that are - // replicating from the master + // replicating from the primary aliases, err := wr.ts.FindAllTabletAliasesInShard(ctx, keyspace, shard) if err != nil { return fmt.Errorf("FindAllTabletAliasesInShard(%v, %v) failed: %v", keyspace, shard, err) @@ -326,7 +326,7 @@ func (wr *Wrangler) PreflightSchema(ctx context.Context, tabletAlias *topodatapb } // CopySchemaShardFromShard copies the schema from a source shard to the specified destination shard. -// For both source and destination it picks the master tablet. See also CopySchemaShard. +// For both source and destination it picks the primary tablet. See also CopySchemaShard. func (wr *Wrangler) CopySchemaShardFromShard(ctx context.Context, tables, excludeTables []string, includeViews bool, sourceKeyspace, sourceShard, destKeyspace, destShard string, waitReplicasTimeout time.Duration, skipVerify bool) error { sourceShardInfo, err := wr.ts.GetShard(ctx, sourceKeyspace, sourceShard) if err != nil { @@ -340,7 +340,7 @@ func (wr *Wrangler) CopySchemaShardFromShard(ctx context.Context, tables, exclud } // CopySchemaShard copies the schema from a source tablet to the -// specified shard. The schema is applied directly on the master of +// specified shard. The schema is applied directly on the primary of // the destination shard, and is propagated to the replicas through // binlogs. func (wr *Wrangler) CopySchemaShard(ctx context.Context, sourceTabletAlias *topodatapb.TabletAlias, tables, excludeTables []string, includeViews bool, destKeyspace, destShard string, waitReplicasTimeout time.Duration, skipVerify bool) error { @@ -418,7 +418,7 @@ func (wr *Wrangler) CopySchemaShard(ctx context.Context, sourceTabletAlias *topo // copyShardMetadata copies contents of _vt.shard_metadata table from the source // tablet to the destination tablet. It's assumed that destination tablet is a -// master and binlogging is not turned off when INSERT statements are executed. +// primary and binlogging is not turned off when INSERT statements are executed. func (wr *Wrangler) copyShardMetadata(ctx context.Context, srcTabletAlias *topodatapb.TabletAlias, destTabletAlias *topodatapb.TabletAlias) error { sql := "SELECT 1 FROM information_schema.tables WHERE table_schema = '_vt' AND table_name = 'shard_metadata'" presenceResult, err := wr.ExecuteFetchAsDba(ctx, srcTabletAlias, sql, 1, false, false) @@ -477,7 +477,7 @@ func (wr *Wrangler) compareSchemas(ctx context.Context, sourceAlias, destAlias * // applySQLShard applies a given SQL change on a given tablet alias. It allows executing arbitrary // SQL statements, but doesn't return any results, so it's only useful for SQL statements // that would be run for their effects (e.g., CREATE). -// It works by applying the SQL statement on the shard's master tablet with replication turned on. +// It works by applying the SQL statement on the shard's primary tablet with replication turned on. // Thus it should be used only for changes that can be applied on a live instance without causing issues; // it shouldn't be used for anything that will require a pivot. // The SQL statement string is expected to have {{.DatabaseName}} in place of the actual db name. @@ -488,7 +488,7 @@ func (wr *Wrangler) applySQLShard(ctx context.Context, tabletInfo *topo.TabletIn } ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - // Need to make sure that we enable binlog, since we're only applying the statement on masters. + // Need to make sure that we enable binlog, since we're only applying the statement on primaries. _, err = wr.tmc.ExecuteFetchAsDba(ctx, tabletInfo.Tablet, false, []byte(filledChange), 0, false, reloadSchema) return err } diff --git a/go/vt/wrangler/shard.go b/go/vt/wrangler/shard.go index f00a37439d..9399eea58e 100644 --- a/go/vt/wrangler/shard.go +++ b/go/vt/wrangler/shard.go @@ -248,7 +248,7 @@ func (wr *Wrangler) RemoveShardCell(ctx context.Context, keyspace, shard, cell s return fmt.Errorf("cell %v in not in shard info", cell) } - // check the master alias is not in the cell + // check the primary alias is not in the cell if shardInfo.PrimaryAlias != nil && shardInfo.PrimaryAlias.Cell == cell { return fmt.Errorf("master %v is in the cell '%v' we want to remove", topoproto.TabletAliasString(shardInfo.PrimaryAlias), cell) } diff --git a/go/vt/wrangler/tablet.go b/go/vt/wrangler/tablet.go index ead6c0b3ef..5af2771c38 100644 --- a/go/vt/wrangler/tablet.go +++ b/go/vt/wrangler/tablet.go @@ -41,8 +41,8 @@ import ( // appropriate parent. If createShardAndKeyspace is true and the // parent keyspace or shard don't exist, they will be created. If // allowUpdate is true, and a tablet with the same ID exists, just update it. -// If a tablet is created as master, and there is already a different -// master in the shard, allowMasterOverride must be set. +// If a tablet is created as primary, and there is already a different +// primary in the shard, allowMasterOverride must be set. func (wr *Wrangler) InitTablet(ctx context.Context, tablet *topodatapb.Tablet, allowMasterOverride, createShardAndKeyspace, allowUpdate bool) error { shard, kr, err := topo.ValidateShardName(tablet.Shard) if err != nil { @@ -76,8 +76,8 @@ func (wr *Wrangler) InitTablet(ctx context.Context, tablet *topodatapb.Tablet, a } if tablet.Type == topodatapb.TabletType_PRIMARY { - // we update primary_term_start_time even if the master hasn't changed - // because that means a new master term with the same master + // we update primary_term_start_time even if the primary hasn't changed + // because that means a new primary term with the same primary tablet.PrimaryTermStartTime = logutil.TimeToProto(time.Now()) } @@ -104,8 +104,8 @@ func (wr *Wrangler) InitTablet(ctx context.Context, tablet *topodatapb.Tablet, a } // DeleteTablet removes a tablet from a shard. -// - if allowMaster is set, we can Delete a master tablet (and clear -// its record from the Shard record if it was the master). +// - if allowMaster is set, we can Delete a primary tablet (and clear +// its record from the Shard record if it was the primary). func (wr *Wrangler) DeleteTablet(ctx context.Context, tabletAlias *topodatapb.TabletAlias, allowPrimary bool) (err error) { // load the tablet, see if we'll need to rebuild ti, err := wr.ts.GetTablet(ctx, tabletAlias) @@ -122,7 +122,7 @@ func (wr *Wrangler) DeleteTablet(ctx context.Context, tabletAlias *topodatapb.Ta return fmt.Errorf("cannot delete tablet %v as it is a master, use allow_master flag", topoproto.TabletAliasString(tabletAlias)) } - // update the Shard object if the master was scrapped. + // update the Shard object if the primary was scrapped. // we do this before calling DeleteTablet so that the operation can be retried in case of failure. if wasMaster { // We lock the shard to not conflict with reparent operations. @@ -132,7 +132,7 @@ func (wr *Wrangler) DeleteTablet(ctx context.Context, tabletAlias *topodatapb.Ta } defer unlock(&err) - // update the shard record's master + // update the shard record's primary if _, err := wr.ts.UpdateShardFields(ctx, ti.Keyspace, ti.Shard, func(si *topo.ShardInfo) error { if !topoproto.TabletAliasEqual(si.PrimaryAlias, tabletAlias) { wr.Logger().Warningf("Deleting master %v from shard %v/%v but master in Shard object was %v", topoproto.TabletAliasString(tabletAlias), ti.Keyspace, ti.Shard, topoproto.TabletAliasString(si.PrimaryAlias)) @@ -157,8 +157,8 @@ func (wr *Wrangler) DeleteTablet(ctx context.Context, tabletAlias *topodatapb.Ta // ChangeTabletType changes the type of tablet and recomputes all // necessary derived paths in the serving graph, if necessary. // -// Note we don't update the master record in the Shard here, as we -// can't ChangeType from and out of master anyway. +// Note we don't update the primary record in the Shard here, as we +// can't ChangeType from and out of primary anyway. func (wr *Wrangler) ChangeTabletType(ctx context.Context, tabletAlias *topodatapb.TabletAlias, tabletType topodatapb.TabletType) error { // Load tablet to find endpoint, and keyspace and shard assignment. ti, err := wr.ts.GetTablet(ctx, tabletAlias) @@ -223,13 +223,13 @@ func (wr *Wrangler) GenericVExec(ctx context.Context, tabletAlias *topodatapb.Ta } // isMasterTablet is a shortcut way to determine whether the current tablet -// is a master before we allow its tablet record to be deleted. The canonical -// way to determine the only true master in a shard is to list all the tablets +// is a primary before we allow its tablet record to be deleted. The canonical +// way to determine the only true primary in a shard is to list all the tablets // and find the one with the highest PrimaryTermStartTime among the ones that -// claim to be master. +// claim to be primary. // We err on the side of caution here, i.e. we should never return false for -// a true master tablet, but it is ok to return true for a tablet that isn't -// the true master. This can occur if someone issues a DeleteTablet while +// a true primary tablet, but it is ok to return true for a tablet that isn't +// the true primary. This can occur if someone issues a DeleteTablet while // the system is in transition (a reparenting event is in progress and parts of // the topo have not yet been updated). func (wr *Wrangler) isMasterTablet(ctx context.Context, ti *topo.TabletInfo) (bool, error) { diff --git a/go/vt/wrangler/tablet_test.go b/go/vt/wrangler/tablet_test.go index bf68042c1a..bd999109a2 100644 --- a/go/vt/wrangler/tablet_test.go +++ b/go/vt/wrangler/tablet_test.go @@ -60,7 +60,7 @@ func TestInitTabletShardConversion(t *testing.T) { } } -// TestDeleteTabletBasic tests delete of non-master tablet +// TestDeleteTabletBasic tests delete of non-primary tablet func TestDeleteTabletBasic(t *testing.T) { cell := "cell1" ts := memorytopo.NewServer(cell) @@ -87,7 +87,7 @@ func TestDeleteTabletBasic(t *testing.T) { } } -// TestDeleteTabletTrueMaster tests that you can delete a true master tablet +// TestDeleteTabletTrueMaster tests that you can delete a true primary tablet // only if allowMaster is set to true func TestDeleteTabletTrueMaster(t *testing.T) { cell := "cell1" @@ -111,7 +111,7 @@ func TestDeleteTabletTrueMaster(t *testing.T) { t.Fatalf("GetTablet failed: %v", err) } - // set PrimaryAlias and PrimaryTermStartTime on shard to match chosen master tablet + // set PrimaryAlias and PrimaryTermStartTime on shard to match chosen primary tablet if _, err := ts.UpdateShardFields(context.Background(), "test", "0", func(si *topo.ShardInfo) error { si.PrimaryAlias = tablet.Alias si.PrimaryTermStartTime = tablet.PrimaryTermStartTime @@ -131,7 +131,7 @@ func TestDeleteTabletTrueMaster(t *testing.T) { } } -// TestDeleteTabletFalseMaster tests that you can delete a false master tablet +// TestDeleteTabletFalseMaster tests that you can delete a false primary tablet // with allowMaster set to false func TestDeleteTabletFalseMaster(t *testing.T) { cell := "cell1" @@ -165,7 +165,7 @@ func TestDeleteTabletFalseMaster(t *testing.T) { t.Fatalf("InitTablet failed: %v", err) } - // set PrimaryAlias and PrimaryTermStartTime on shard to match chosen master tablet + // set PrimaryAlias and PrimaryTermStartTime on shard to match chosen primary tablet if _, err := ts.UpdateShardFields(context.Background(), "test", "0", func(si *topo.ShardInfo) error { si.PrimaryAlias = tablet2.Alias si.PrimaryTermStartTime = tablet2.PrimaryTermStartTime @@ -174,7 +174,7 @@ func TestDeleteTabletFalseMaster(t *testing.T) { t.Fatalf("UpdateShardFields failed: %v", err) } - // Should be able to delete old (false) master with allowMaster = false + // Should be able to delete old (false) primary with allowMaster = false if err := wr.DeleteTablet(context.Background(), tablet1.Alias, false); err != nil { t.Fatalf("DeleteTablet failed: %v", err) } diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go index 41227c0549..3b30a78659 100644 --- a/go/vt/wrangler/testlib/backup_test.go +++ b/go/vt/wrangler/testlib/backup_test.go @@ -225,7 +225,7 @@ func TestBackupRestore(t *testing.T) { // restore primary from backup require.NoError(t, primary.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */), "RestoreData failed") - // tablet was created as MASTER, so it's baseTabletType is MASTER + // tablet was created as MASTER, so it's baseTabletType is PRIMARY assert.Equal(t, topodatapb.TabletType_PRIMARY, primary.Tablet.Type) assert.False(t, primary.FakeMysqlDaemon.Replicating) assert.True(t, primary.FakeMysqlDaemon.Running) diff --git a/go/vt/wrangler/traffic_switcher_env_test.go b/go/vt/wrangler/traffic_switcher_env_test.go index 31ae2149b8..0be5fb589d 100644 --- a/go/vt/wrangler/traffic_switcher_env_test.go +++ b/go/vt/wrangler/traffic_switcher_env_test.go @@ -357,7 +357,7 @@ func (tme *testMigraterEnv) startTablets(t *testing.T) { for _, master := range allMasters { master.StartActionLoop(t, tme.wr) } - // Wait for the shard record masters to be set. + // Wait for the shard record primaries to be set. for _, master := range allMasters { masterFound := false for i := 0; i < 10; i++ { diff --git a/go/vt/wrangler/validator.go b/go/vt/wrangler/validator.go index 9d5a4a0e60..608329ffe7 100644 --- a/go/vt/wrangler/validator.go +++ b/go/vt/wrangler/validator.go @@ -228,7 +228,7 @@ func (wr *Wrangler) validateReplication(ctx context.Context, shardInfo *topo.Sha replicaIPMap[normalizeIP(replicaAddr)] = true } - // See if every entry in the replication graph is connected to the master. + // See if every entry in the replication graph is connected to the primary. for _, tablet := range tabletMap { if !tablet.IsReplicaType() { continue diff --git a/go/vt/wrangler/vdiff_env_test.go b/go/vt/wrangler/vdiff_env_test.go index d6f5b31e53..763b46e576 100644 --- a/go/vt/wrangler/vdiff_env_test.go +++ b/go/vt/wrangler/vdiff_env_test.go @@ -45,7 +45,7 @@ const ( // vdiffSourceGtid should be the position reported by the source side VStreamResults. // It's expected to be higher the vdiffStopPosition. vdiffSourceGtid = "MariaDB/5-456-893" - // vdiffTargetMasterPosition is the master position of the target after + // vdiffTargetMasterPosition is the primary position of the target after // vreplication has been synchronized. vdiffTargetMasterPosition = "MariaDB/6-456-892" ) diff --git a/go/vt/wrangler/version.go b/go/vt/wrangler/version.go index e5919e632d..9a8667a932 100644 --- a/go/vt/wrangler/version.go +++ b/go/vt/wrangler/version.go @@ -107,7 +107,7 @@ func (wr *Wrangler) ValidateVersionShard(ctx context.Context, keyspace, shard st return err } - // get version from the master, or error + // get version from the primary, or error if !si.HasMaster() { return fmt.Errorf("no master in shard %v/%v", keyspace, shard) } @@ -118,7 +118,7 @@ func (wr *Wrangler) ValidateVersionShard(ctx context.Context, keyspace, shard st } // read all the aliases in the shard, that is all tablets that are - // replicating from the master + // replicating from the primary aliases, err := wr.ts.FindAllTabletAliasesInShard(ctx, keyspace, shard) if err != nil { return err @@ -160,7 +160,7 @@ func (wr *Wrangler) ValidateVersionKeyspace(ctx context.Context, keyspace string return wr.ValidateVersionShard(ctx, keyspace, shards[0]) } - // find the reference version using the first shard's master + // find the reference version using the first shard's primary si, err := wr.ts.GetShard(ctx, keyspace, shards[0]) if err != nil { return err @@ -175,7 +175,7 @@ func (wr *Wrangler) ValidateVersionKeyspace(ctx context.Context, keyspace string return err } - // then diff with all tablets but master 0 + // then diff with all tablets but primary 0 er := concurrency.AllErrorRecorder{} wg := sync.WaitGroup{} for _, shard := range shards { diff --git a/go/vt/wrangler/vexec.go b/go/vt/wrangler/vexec.go index 2957c9b802..a89ff49797 100644 --- a/go/vt/wrangler/vexec.go +++ b/go/vt/wrangler/vexec.go @@ -147,7 +147,7 @@ func (wr *Wrangler) VExecResult(ctx context.Context, workflow, keyspace, query s return qr, nil } -// VExec executes queries on a table on all masters in the target keyspace of the workflow +// VExec executes queries on a table on all primaries in the target keyspace of the workflow func (wr *Wrangler) VExec(ctx context.Context, workflow, keyspace, query string, dryRun bool) (map[*topo.TabletInfo]*sqltypes.Result, error) { results, err := wr.runVexec(ctx, workflow, keyspace, query, dryRun) retResults := make(map[*topo.TabletInfo]*sqltypes.Result) @@ -194,7 +194,7 @@ func (vx *vexec) outputDryRunInfo(ctx context.Context) error { return vx.planner.dryRun(ctx) } -// exec runs our planned query on backend shard masters. It collects query results from all +// exec runs our planned query on backend shard primaries. It collects query results from all // shards and returns an aggregate (UNION ALL -like) result. func (vx *vexec) exec() (map[*topo.TabletInfo]*querypb.QueryResult, error) { var wg sync.WaitGroup @@ -232,7 +232,7 @@ func (vx *vexec) parseQuery() (err error) { return nil } -// getMasters identifies master tablet for all shards relevant to our keyspace +// getMasters identifies primary tablet for all shards relevant to our keyspace func (vx *vexec) getMasters() error { var err error shards, err := vx.wr.ts.GetShardNames(vx.ctx, vx.keyspace) @@ -283,7 +283,7 @@ func (wr *Wrangler) convertQueryResultToSQLTypesResult(results map[*topo.TabletI return retResults } -// WorkflowAction can start/stop/delete or list streams in _vt.vreplication on all masters in the target keyspace of the workflow. +// WorkflowAction can start/stop/delete or list streams in _vt.vreplication on all primaries in the target keyspace of the workflow. func (wr *Wrangler) WorkflowAction(ctx context.Context, workflow, keyspace, action string, dryRun bool) (map[*topo.TabletInfo]*sqltypes.Result, error) { if action == "show" { @@ -347,7 +347,7 @@ type ReplicationStatusResult struct { // MaxVReplicationLag represents the maximum vreplication lag seen across all shards. MaxVReplicationLag int64 - // Statuses is a map of / : ShardReplicationStatus (for the given shard). + // Statuses is a map of / : ShardReplicationStatus (for the given shard). ShardStatuses map[string]*ShardReplicationStatus } @@ -359,11 +359,11 @@ type ReplicationLocation struct { // ShardReplicationStatus holds relevant vreplication related info for the given shard. type ShardReplicationStatus struct { - // MasterReplicationStatuses represents all of the replication statuses for the master tablets in the given shard. + // MasterReplicationStatuses represents all of the replication statuses for the primary tablets in the given shard. MasterReplicationStatuses []*ReplicationStatus // TabletControls represents the tablet controls for the tablets in the shard. TabletControls []*topodatapb.Shard_TabletControl - // MasterIsServing indicates whether the master tablet of the given shard is currently serving write traffic. + // MasterIsServing indicates whether the primary tablet of the given shard is currently serving write traffic. MasterIsServing bool } @@ -549,7 +549,7 @@ func (wr *Wrangler) ListAllWorkflows(ctx context.Context, keyspace string, activ qr := sqltypes.Proto3ToResult(result) for _, row := range qr.Rows { for _, value := range row { - // Even though we query for distinct, we must de-dup because we query per master tablet. + // Even though we query for distinct, we must de-dup because we query per primary tablet. workflowsSet.Insert(value.ToString()) } } diff --git a/test/client.go b/test/client.go index 3d97a563f0..c5ead7602a 100644 --- a/test/client.go +++ b/test/client.go @@ -72,7 +72,7 @@ func main() { } } - // Read it back from the master. + // Read it back from the primary. fmt.Println("Reading from master...") rows, err := db.Query("SELECT page, time_created_ns, message FROM messages") if err != nil { @@ -94,7 +94,7 @@ func main() { } // Read from a replica. - // Note that this may be behind master due to replication lag. + // Note that this may be behind primary due to replication lag. fmt.Println("Reading from replica...") dbr, err := vitessdriver.Open(*server, "@replica") From d842e54150059b47e53a28d453f95f98d8ceee5c Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Mon, 9 Aug 2021 15:47:56 +0200 Subject: [PATCH 4/5] make proto. fix incorrect deletion Signed-off-by: Rohit Nayak --- docker/README.md | 48 ++++++++++++++----- .../tabletmanagerdata/tabletmanagerdata.pb.go | 4 +- .../tabletmanagerservice_grpc.pb.go | 36 +++++++------- go/vt/proto/vtctldata/vtctldata.pb.go | 2 +- go/vt/proto/vttest/vttest.pb.go | 2 +- go/vt/vtgate/planbuilder/show.go | 2 +- 6 files changed, 58 insertions(+), 36 deletions(-) diff --git a/docker/README.md b/docker/README.md index f7b9ff482e..22a9e4a8a2 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,11 +1,11 @@ # Vitess Docker Images -The Vitess Project publishes several Docker images in the [Docker Hub "vitess" repository](https://hub.docker.com/u/vitess/). -This file describes the purpose of the different images. +The Vitess Project publishes several Docker images in +the [Docker Hub "vitess" repository](https://hub.docker.com/u/vitess/). This file describes the purpose of the different +images. -**TL;DR:** Use the [vitess/lite](https://hub.docker.com/r/vitess/lite/) image for running Vitess. -Our Kubernetes Tutorial uses it as well. -Instead of using the `latest` tag, you can pin it to a known stable version e.g. `v4.0`. +**TL;DR:** Use the [vitess/lite](https://hub.docker.com/r/vitess/lite/) image for running Vitess. Our Kubernetes +Tutorial uses it as well. Instead of using the `latest` tag, you can pin it to a known stable version e.g. `v4.0`. ## Principles @@ -13,15 +13,37 @@ The structure of this directory and our Dockerfile files is guided by the follow * The configuration of each Vitess image is in the directory `docker//`. * Configurations for other images e.g. our internal tool Keytar (see below), can be in a different location. -* Images with more complex build steps have a `build.sh` script e.g. see [lite/build.sh](https://github.com/vitessio/vitess/blob/main/docker/lite/build.sh). -* Tags are used to provide (stable) versions e.g. see tag `v2.0` for the image [vitess/lite](https://hub.docker.com/r/vitess/lite/tags).Vhttps://github.com/vitessio/vitess/blob/main/test.go) uses it to test the code against different MySQL versions. | -| **base** | automatic (after every GitHub push to the main branch) | Contains all Vitess server binaries. Snapshot after running `make build`. | -| **root** | automatic (after every GitHub push to the main branch) | Same as **base** but with the default user set to "root". Required for Kubernetes. | -| **lite** | manual (updated with every Vitess release) | Stripped down version of **base** e.g. source code and build dependencies are removed. Default image in our Kubernetes templates for minimized startup time. | +* Images with more complex build steps have a `build.sh` script e.g. + see [lite/build.sh](https://github.com/vitessio/vitess/blob/main/docker/lite/build.sh). +* Tags are used to provide (stable) versions e.g. see tag `v2.0` for the + image [vitess/lite](https://hub.docker.com/r/vitess/lite/tags). +* Where applicable, we provide a `latest` tag to reference the latest build of an image. + +## Images + +Our list of images can be grouped into: + +* published Vitess code +* dependencies for our Kubernetes tutorial +* internally used tools + +### Vitess + +| Image | How (When) Updated | Description | +| --- | --- | --- | +| ** +bootstrap** | manual (after incompatible changes are made to [bootstrap.sh](https://github.com/vitessio/vitess/blob/main/bootstrap.sh) or [vendor/vendor.json](https://github.com/vitessio/vitess/blob/main/vendor/vendor.json) | Basis for all Vitess images. It is a snapshot of the checked out repository after running `./bootstrap.sh`. Used to cache dependencies. Avoids lengthy recompilation of dependencies if they did not change. Our internal test runner [`test.go`](https://github.com/vitessio/vitess/blob/master/test.go) uses it to test the code against different MySQL versions. | +| ** +base** | automatic (after every GitHub push to the master branch) | Contains all Vitess server binaries. Snapshot after running `make build`. | +| **root** | automatic (after every GitHub push to the master branch) | Same as ** +base** but with the default user set to "root". Required for Kubernetes. | +| **lite** | manual (updated with every Vitess release) | Stripped down version of ** +base** e.g. source code and build dependencies are removed. Default image in our Kubernetes templates for minimized startup time. | All these Vitess images include a specific MySQL/MariaDB version ("flavor"). - * We provide Dockerfile files for multiple flavors (`Dockerfile.`). - * On Docker Hub we publish only images with MySQL 5.7 to minimize maintenance overhead and avoid confusion. +* We provide Dockerfile files for multiple flavors (`Dockerfile.`). +* On Docker Hub we publish only images with MySQL 5.7 to minimize maintenance overhead and avoid confusion. -If you are looking for a stable version of Vitess, use the **lite** image with a fixed version. If you are looking for the latest Vitess code in binary form, use the "latest" tag of the **base** image. +If you are looking for a stable version of Vitess, use the **lite** image with a fixed version. If you are looking for +the latest Vitess code in binary form, use the "latest" tag of the **base** image. \ No newline at end of file diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go index 255d9dd408..58186ce40f 100644 --- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go +++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go @@ -3752,11 +3752,11 @@ type DemotePrimaryResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Position is deprecated, and is a string representation of a demoted primaries executed position. + // Position is deprecated, and is a string representation of a demoted masters executed position. // // Deprecated: Do not use. DeprecatedPosition string `protobuf:"bytes,1,opt,name=deprecated_position,json=deprecatedPosition,proto3" json:"deprecated_position,omitempty"` - // PrimaryStatus represents the response from calling `SHOW MASTER STATUS` on a primary that has been demoted. + // PrimaryStatus represents the response from calling `SHOW MASTER STATUS` on a master that has been demoted. PrimaryStatus *replicationdata.PrimaryStatus `protobuf:"bytes,2,opt,name=primary_status,json=primaryStatus,proto3" json:"primary_status,omitempty"` } diff --git a/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go b/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go index 31a589ea9a..63eefd4388 100644 --- a/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go +++ b/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go @@ -47,13 +47,13 @@ type TabletManagerClient interface { ExecuteFetchAsApp(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsAppRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsAppResponse, error) // ReplicationStatus returns the current replication status. ReplicationStatus(ctx context.Context, in *tabletmanagerdata.ReplicationStatusRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReplicationStatusResponse, error) - // MasterStatus returns the current primary status. + // MasterStatus returns the current master status. MasterStatus(ctx context.Context, in *tabletmanagerdata.PrimaryStatusRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PrimaryStatusResponse, error) - // PrimaryStatus returns the current primary status. + // PrimaryStatus returns the current master status. PrimaryStatus(ctx context.Context, in *tabletmanagerdata.PrimaryStatusRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PrimaryStatusResponse, error) - // MasterPosition returns the current primary position + // MasterPosition returns the current master position MasterPosition(ctx context.Context, in *tabletmanagerdata.PrimaryPositionRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PrimaryPositionResponse, error) - // PrimaryPosition returns the current primary position + // PrimaryPosition returns the current master position PrimaryPosition(ctx context.Context, in *tabletmanagerdata.PrimaryPositionRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PrimaryPositionResponse, error) // WaitForPosition waits for the position to be reached WaitForPosition(ctx context.Context, in *tabletmanagerdata.WaitForPositionRequest, opts ...grpc.CallOption) (*tabletmanagerdata.WaitForPositionResponse, error) @@ -76,12 +76,12 @@ type TabletManagerClient interface { ResetReplication(ctx context.Context, in *tabletmanagerdata.ResetReplicationRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ResetReplicationResponse, error) // Deprecated, use InitPrimary instead InitMaster(ctx context.Context, in *tabletmanagerdata.InitPrimaryRequest, opts ...grpc.CallOption) (*tabletmanagerdata.InitPrimaryResponse, error) - // InitPrimary initializes the tablet as a primary + // InitPrimary initializes the tablet as a master InitPrimary(ctx context.Context, in *tabletmanagerdata.InitPrimaryRequest, opts ...grpc.CallOption) (*tabletmanagerdata.InitPrimaryResponse, error) // PopulateReparentJournal tells the tablet to add an entry to its // reparent journal PopulateReparentJournal(ctx context.Context, in *tabletmanagerdata.PopulateReparentJournalRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PopulateReparentJournalResponse, error) - // InitReplica tells the tablet to reparent to the primary unconditionally + // InitReplica tells the tablet to reparent to the master unconditionally InitReplica(ctx context.Context, in *tabletmanagerdata.InitReplicaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.InitReplicaResponse, error) // Deprecated, see DemotePrimary instead DemoteMaster(ctx context.Context, in *tabletmanagerdata.DemotePrimaryRequest, opts ...grpc.CallOption) (*tabletmanagerdata.DemotePrimaryResponse, error) @@ -91,18 +91,18 @@ type TabletManagerClient interface { UndoDemoteMaster(ctx context.Context, in *tabletmanagerdata.UndoDemotePrimaryRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UndoDemotePrimaryResponse, error) // UndoDemotePrimary reverts all changes made by DemotePrimary UndoDemotePrimary(ctx context.Context, in *tabletmanagerdata.UndoDemotePrimaryRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UndoDemotePrimaryResponse, error) - // ReplicaWasPromoted tells the remote tablet it is now the primary + // ReplicaWasPromoted tells the remote tablet it is now the master ReplicaWasPromoted(ctx context.Context, in *tabletmanagerdata.ReplicaWasPromotedRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReplicaWasPromotedResponse, error) // SetMaster tells the replica to reparent SetMaster(ctx context.Context, in *tabletmanagerdata.SetReplicationSourceRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SetReplicationSourceResponse, error) // SetReplicationSource tells the replica to reparent SetReplicationSource(ctx context.Context, in *tabletmanagerdata.SetReplicationSourceRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SetReplicationSourceResponse, error) - // ReplicaWasRestarted tells the remote tablet its primary has changed + // ReplicaWasRestarted tells the remote tablet its master has changed ReplicaWasRestarted(ctx context.Context, in *tabletmanagerdata.ReplicaWasRestartedRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReplicaWasRestartedResponse, error) // StopReplicationAndGetStatus stops MySQL replication, and returns the // replication status StopReplicationAndGetStatus(ctx context.Context, in *tabletmanagerdata.StopReplicationAndGetStatusRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StopReplicationAndGetStatusResponse, error) - // PromoteReplica makes the replica the new primary + // PromoteReplica makes the replica the new master PromoteReplica(ctx context.Context, in *tabletmanagerdata.PromoteReplicaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PromoteReplicaResponse, error) Backup(ctx context.Context, in *tabletmanagerdata.BackupRequest, opts ...grpc.CallOption) (TabletManager_BackupClient, error) // RestoreFromBackup deletes all local data and restores it from the latest backup. @@ -656,13 +656,13 @@ type TabletManagerServer interface { ExecuteFetchAsApp(context.Context, *tabletmanagerdata.ExecuteFetchAsAppRequest) (*tabletmanagerdata.ExecuteFetchAsAppResponse, error) // ReplicationStatus returns the current replication status. ReplicationStatus(context.Context, *tabletmanagerdata.ReplicationStatusRequest) (*tabletmanagerdata.ReplicationStatusResponse, error) - // MasterStatus returns the current primary status. + // MasterStatus returns the current master status. MasterStatus(context.Context, *tabletmanagerdata.PrimaryStatusRequest) (*tabletmanagerdata.PrimaryStatusResponse, error) - // PrimaryStatus returns the current primary status. + // PrimaryStatus returns the current master status. PrimaryStatus(context.Context, *tabletmanagerdata.PrimaryStatusRequest) (*tabletmanagerdata.PrimaryStatusResponse, error) - // MasterPosition returns the current primary position + // MasterPosition returns the current master position MasterPosition(context.Context, *tabletmanagerdata.PrimaryPositionRequest) (*tabletmanagerdata.PrimaryPositionResponse, error) - // PrimaryPosition returns the current primary position + // PrimaryPosition returns the current master position PrimaryPosition(context.Context, *tabletmanagerdata.PrimaryPositionRequest) (*tabletmanagerdata.PrimaryPositionResponse, error) // WaitForPosition waits for the position to be reached WaitForPosition(context.Context, *tabletmanagerdata.WaitForPositionRequest) (*tabletmanagerdata.WaitForPositionResponse, error) @@ -685,12 +685,12 @@ type TabletManagerServer interface { ResetReplication(context.Context, *tabletmanagerdata.ResetReplicationRequest) (*tabletmanagerdata.ResetReplicationResponse, error) // Deprecated, use InitPrimary instead InitMaster(context.Context, *tabletmanagerdata.InitPrimaryRequest) (*tabletmanagerdata.InitPrimaryResponse, error) - // InitPrimary initializes the tablet as a primary + // InitPrimary initializes the tablet as a master InitPrimary(context.Context, *tabletmanagerdata.InitPrimaryRequest) (*tabletmanagerdata.InitPrimaryResponse, error) // PopulateReparentJournal tells the tablet to add an entry to its // reparent journal PopulateReparentJournal(context.Context, *tabletmanagerdata.PopulateReparentJournalRequest) (*tabletmanagerdata.PopulateReparentJournalResponse, error) - // InitReplica tells the tablet to reparent to the primary unconditionally + // InitReplica tells the tablet to reparent to the master unconditionally InitReplica(context.Context, *tabletmanagerdata.InitReplicaRequest) (*tabletmanagerdata.InitReplicaResponse, error) // Deprecated, see DemotePrimary instead DemoteMaster(context.Context, *tabletmanagerdata.DemotePrimaryRequest) (*tabletmanagerdata.DemotePrimaryResponse, error) @@ -700,18 +700,18 @@ type TabletManagerServer interface { UndoDemoteMaster(context.Context, *tabletmanagerdata.UndoDemotePrimaryRequest) (*tabletmanagerdata.UndoDemotePrimaryResponse, error) // UndoDemotePrimary reverts all changes made by DemotePrimary UndoDemotePrimary(context.Context, *tabletmanagerdata.UndoDemotePrimaryRequest) (*tabletmanagerdata.UndoDemotePrimaryResponse, error) - // ReplicaWasPromoted tells the remote tablet it is now the primary + // ReplicaWasPromoted tells the remote tablet it is now the master ReplicaWasPromoted(context.Context, *tabletmanagerdata.ReplicaWasPromotedRequest) (*tabletmanagerdata.ReplicaWasPromotedResponse, error) // SetMaster tells the replica to reparent SetMaster(context.Context, *tabletmanagerdata.SetReplicationSourceRequest) (*tabletmanagerdata.SetReplicationSourceResponse, error) // SetReplicationSource tells the replica to reparent SetReplicationSource(context.Context, *tabletmanagerdata.SetReplicationSourceRequest) (*tabletmanagerdata.SetReplicationSourceResponse, error) - // ReplicaWasRestarted tells the remote tablet its primary has changed + // ReplicaWasRestarted tells the remote tablet its master has changed ReplicaWasRestarted(context.Context, *tabletmanagerdata.ReplicaWasRestartedRequest) (*tabletmanagerdata.ReplicaWasRestartedResponse, error) // StopReplicationAndGetStatus stops MySQL replication, and returns the // replication status StopReplicationAndGetStatus(context.Context, *tabletmanagerdata.StopReplicationAndGetStatusRequest) (*tabletmanagerdata.StopReplicationAndGetStatusResponse, error) - // PromoteReplica makes the replica the new primary + // PromoteReplica makes the replica the new master PromoteReplica(context.Context, *tabletmanagerdata.PromoteReplicaRequest) (*tabletmanagerdata.PromoteReplicaResponse, error) Backup(*tabletmanagerdata.BackupRequest, TabletManager_BackupServer) error // RestoreFromBackup deletes all local data and restores it from the latest backup. diff --git a/go/vt/proto/vtctldata/vtctldata.pb.go b/go/vt/proto/vtctldata/vtctldata.pb.go index 3d1754bcbb..0a6097250b 100644 --- a/go/vt/proto/vtctldata/vtctldata.pb.go +++ b/go/vt/proto/vtctldata/vtctldata.pb.go @@ -1848,7 +1848,7 @@ type DeleteTabletsRequest struct { // TabletAliases is the list of tablets to delete. TabletAliases []*topodata.TabletAlias `protobuf:"bytes,1,rep,name=tablet_aliases,json=tabletAliases,proto3" json:"tablet_aliases,omitempty"` - // AllowPrimary allows for the primary tablet of a shard to be deleted. + // AllowPrimary allows for the master/primary tablet of a shard to be deleted. // Use with caution. AllowPrimary bool `protobuf:"varint,2,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` } diff --git a/go/vt/proto/vttest/vttest.pb.go b/go/vt/proto/vttest/vttest.pb.go index fe2d20a1f9..89541a7f73 100644 --- a/go/vt/proto/vttest/vttest.pb.go +++ b/go/vt/proto/vttest/vttest.pb.go @@ -139,7 +139,7 @@ type Keyspace struct { ShardingColumnType string `protobuf:"bytes,4,opt,name=sharding_column_type,json=shardingColumnType,proto3" json:"sharding_column_type,omitempty"` // redirects all traffic to another keyspace. If set, shards is ignored. ServedFrom string `protobuf:"bytes,5,opt,name=served_from,json=servedFrom,proto3" json:"served_from,omitempty"` - // number of replica tablets to instantiate. This includes the primary tablet. + // number of replica tablets to instantiate. This includes the master tablet. ReplicaCount int32 `protobuf:"varint,6,opt,name=replica_count,json=replicaCount,proto3" json:"replica_count,omitempty"` // number of rdonly tablets to instantiate. RdonlyCount int32 `protobuf:"varint,7,opt,name=rdonly_count,json=rdonlyCount,proto3" json:"rdonly_count,omitempty"` diff --git a/go/vt/vtgate/planbuilder/show.go b/go/vt/vtgate/planbuilder/show.go index a67efd09af..00f40926da 100644 --- a/go/vt/vtgate/planbuilder/show.go +++ b/go/vt/vtgate/planbuilder/show.go @@ -196,7 +196,7 @@ func buildDBPlan(show *sqlparser.ShowBasic, vschema ContextVSchema) (engine.Prim return engine.NewRowsPrimitive(rows, buildVarCharFields("Database")), nil } -// buildShowVMigrationsPlan serves `SHOW VITESS_MIGRATIONS ...` queries. It invokes queries on _vt.schema_migrations on all primary tablets on keyspace's shards. +// buildShowVMigrationsPlan serves `SHOW VITESS_MIGRATIONS ...` queries. It invokes queries on _vt.schema_migrations on all PRIMARY tablets on keyspace's shards. func buildShowVMigrationsPlan(show *sqlparser.ShowBasic, vschema ContextVSchema) (engine.Primitive, error) { dest, ks, tabletType, err := vschema.TargetDestination(show.DbName.String()) if err != nil { From 46c72acfac0a7722859d106df31f149875f6cb9e Mon Sep 17 00:00:00 2001 From: Rohit Nayak Date: Mon, 9 Aug 2021 21:06:23 +0200 Subject: [PATCH 5/5] Update proto files and generate pb and vtadmin js files Signed-off-by: Rohit Nayak --- .../tabletmanagerdata/tabletmanagerdata.pb.go | 4 +- .../tabletmanagerservice_grpc.pb.go | 36 +-- go/vt/proto/vtctldata/vtctldata.pb.go | 2 +- go/vt/proto/vttest/vttest.pb.go | 2 +- proto/tabletmanagerdata.proto | 4 +- proto/tabletmanagerservice.proto | 18 +- proto/vtctldata.proto | 2 +- proto/vttest.proto | 2 +- web/vtadmin/src/proto/vtadmin.d.ts | 46 ++-- web/vtadmin/src/proto/vtadmin.js | 237 +++++++++++++----- 10 files changed, 235 insertions(+), 118 deletions(-) diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go index 58186ce40f..255d9dd408 100644 --- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go +++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go @@ -3752,11 +3752,11 @@ type DemotePrimaryResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Position is deprecated, and is a string representation of a demoted masters executed position. + // Position is deprecated, and is a string representation of a demoted primaries executed position. // // Deprecated: Do not use. DeprecatedPosition string `protobuf:"bytes,1,opt,name=deprecated_position,json=deprecatedPosition,proto3" json:"deprecated_position,omitempty"` - // PrimaryStatus represents the response from calling `SHOW MASTER STATUS` on a master that has been demoted. + // PrimaryStatus represents the response from calling `SHOW MASTER STATUS` on a primary that has been demoted. PrimaryStatus *replicationdata.PrimaryStatus `protobuf:"bytes,2,opt,name=primary_status,json=primaryStatus,proto3" json:"primary_status,omitempty"` } diff --git a/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go b/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go index 63eefd4388..31a589ea9a 100644 --- a/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go +++ b/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go @@ -47,13 +47,13 @@ type TabletManagerClient interface { ExecuteFetchAsApp(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsAppRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsAppResponse, error) // ReplicationStatus returns the current replication status. ReplicationStatus(ctx context.Context, in *tabletmanagerdata.ReplicationStatusRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReplicationStatusResponse, error) - // MasterStatus returns the current master status. + // MasterStatus returns the current primary status. MasterStatus(ctx context.Context, in *tabletmanagerdata.PrimaryStatusRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PrimaryStatusResponse, error) - // PrimaryStatus returns the current master status. + // PrimaryStatus returns the current primary status. PrimaryStatus(ctx context.Context, in *tabletmanagerdata.PrimaryStatusRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PrimaryStatusResponse, error) - // MasterPosition returns the current master position + // MasterPosition returns the current primary position MasterPosition(ctx context.Context, in *tabletmanagerdata.PrimaryPositionRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PrimaryPositionResponse, error) - // PrimaryPosition returns the current master position + // PrimaryPosition returns the current primary position PrimaryPosition(ctx context.Context, in *tabletmanagerdata.PrimaryPositionRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PrimaryPositionResponse, error) // WaitForPosition waits for the position to be reached WaitForPosition(ctx context.Context, in *tabletmanagerdata.WaitForPositionRequest, opts ...grpc.CallOption) (*tabletmanagerdata.WaitForPositionResponse, error) @@ -76,12 +76,12 @@ type TabletManagerClient interface { ResetReplication(ctx context.Context, in *tabletmanagerdata.ResetReplicationRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ResetReplicationResponse, error) // Deprecated, use InitPrimary instead InitMaster(ctx context.Context, in *tabletmanagerdata.InitPrimaryRequest, opts ...grpc.CallOption) (*tabletmanagerdata.InitPrimaryResponse, error) - // InitPrimary initializes the tablet as a master + // InitPrimary initializes the tablet as a primary InitPrimary(ctx context.Context, in *tabletmanagerdata.InitPrimaryRequest, opts ...grpc.CallOption) (*tabletmanagerdata.InitPrimaryResponse, error) // PopulateReparentJournal tells the tablet to add an entry to its // reparent journal PopulateReparentJournal(ctx context.Context, in *tabletmanagerdata.PopulateReparentJournalRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PopulateReparentJournalResponse, error) - // InitReplica tells the tablet to reparent to the master unconditionally + // InitReplica tells the tablet to reparent to the primary unconditionally InitReplica(ctx context.Context, in *tabletmanagerdata.InitReplicaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.InitReplicaResponse, error) // Deprecated, see DemotePrimary instead DemoteMaster(ctx context.Context, in *tabletmanagerdata.DemotePrimaryRequest, opts ...grpc.CallOption) (*tabletmanagerdata.DemotePrimaryResponse, error) @@ -91,18 +91,18 @@ type TabletManagerClient interface { UndoDemoteMaster(ctx context.Context, in *tabletmanagerdata.UndoDemotePrimaryRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UndoDemotePrimaryResponse, error) // UndoDemotePrimary reverts all changes made by DemotePrimary UndoDemotePrimary(ctx context.Context, in *tabletmanagerdata.UndoDemotePrimaryRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UndoDemotePrimaryResponse, error) - // ReplicaWasPromoted tells the remote tablet it is now the master + // ReplicaWasPromoted tells the remote tablet it is now the primary ReplicaWasPromoted(ctx context.Context, in *tabletmanagerdata.ReplicaWasPromotedRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReplicaWasPromotedResponse, error) // SetMaster tells the replica to reparent SetMaster(ctx context.Context, in *tabletmanagerdata.SetReplicationSourceRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SetReplicationSourceResponse, error) // SetReplicationSource tells the replica to reparent SetReplicationSource(ctx context.Context, in *tabletmanagerdata.SetReplicationSourceRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SetReplicationSourceResponse, error) - // ReplicaWasRestarted tells the remote tablet its master has changed + // ReplicaWasRestarted tells the remote tablet its primary has changed ReplicaWasRestarted(ctx context.Context, in *tabletmanagerdata.ReplicaWasRestartedRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReplicaWasRestartedResponse, error) // StopReplicationAndGetStatus stops MySQL replication, and returns the // replication status StopReplicationAndGetStatus(ctx context.Context, in *tabletmanagerdata.StopReplicationAndGetStatusRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StopReplicationAndGetStatusResponse, error) - // PromoteReplica makes the replica the new master + // PromoteReplica makes the replica the new primary PromoteReplica(ctx context.Context, in *tabletmanagerdata.PromoteReplicaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PromoteReplicaResponse, error) Backup(ctx context.Context, in *tabletmanagerdata.BackupRequest, opts ...grpc.CallOption) (TabletManager_BackupClient, error) // RestoreFromBackup deletes all local data and restores it from the latest backup. @@ -656,13 +656,13 @@ type TabletManagerServer interface { ExecuteFetchAsApp(context.Context, *tabletmanagerdata.ExecuteFetchAsAppRequest) (*tabletmanagerdata.ExecuteFetchAsAppResponse, error) // ReplicationStatus returns the current replication status. ReplicationStatus(context.Context, *tabletmanagerdata.ReplicationStatusRequest) (*tabletmanagerdata.ReplicationStatusResponse, error) - // MasterStatus returns the current master status. + // MasterStatus returns the current primary status. MasterStatus(context.Context, *tabletmanagerdata.PrimaryStatusRequest) (*tabletmanagerdata.PrimaryStatusResponse, error) - // PrimaryStatus returns the current master status. + // PrimaryStatus returns the current primary status. PrimaryStatus(context.Context, *tabletmanagerdata.PrimaryStatusRequest) (*tabletmanagerdata.PrimaryStatusResponse, error) - // MasterPosition returns the current master position + // MasterPosition returns the current primary position MasterPosition(context.Context, *tabletmanagerdata.PrimaryPositionRequest) (*tabletmanagerdata.PrimaryPositionResponse, error) - // PrimaryPosition returns the current master position + // PrimaryPosition returns the current primary position PrimaryPosition(context.Context, *tabletmanagerdata.PrimaryPositionRequest) (*tabletmanagerdata.PrimaryPositionResponse, error) // WaitForPosition waits for the position to be reached WaitForPosition(context.Context, *tabletmanagerdata.WaitForPositionRequest) (*tabletmanagerdata.WaitForPositionResponse, error) @@ -685,12 +685,12 @@ type TabletManagerServer interface { ResetReplication(context.Context, *tabletmanagerdata.ResetReplicationRequest) (*tabletmanagerdata.ResetReplicationResponse, error) // Deprecated, use InitPrimary instead InitMaster(context.Context, *tabletmanagerdata.InitPrimaryRequest) (*tabletmanagerdata.InitPrimaryResponse, error) - // InitPrimary initializes the tablet as a master + // InitPrimary initializes the tablet as a primary InitPrimary(context.Context, *tabletmanagerdata.InitPrimaryRequest) (*tabletmanagerdata.InitPrimaryResponse, error) // PopulateReparentJournal tells the tablet to add an entry to its // reparent journal PopulateReparentJournal(context.Context, *tabletmanagerdata.PopulateReparentJournalRequest) (*tabletmanagerdata.PopulateReparentJournalResponse, error) - // InitReplica tells the tablet to reparent to the master unconditionally + // InitReplica tells the tablet to reparent to the primary unconditionally InitReplica(context.Context, *tabletmanagerdata.InitReplicaRequest) (*tabletmanagerdata.InitReplicaResponse, error) // Deprecated, see DemotePrimary instead DemoteMaster(context.Context, *tabletmanagerdata.DemotePrimaryRequest) (*tabletmanagerdata.DemotePrimaryResponse, error) @@ -700,18 +700,18 @@ type TabletManagerServer interface { UndoDemoteMaster(context.Context, *tabletmanagerdata.UndoDemotePrimaryRequest) (*tabletmanagerdata.UndoDemotePrimaryResponse, error) // UndoDemotePrimary reverts all changes made by DemotePrimary UndoDemotePrimary(context.Context, *tabletmanagerdata.UndoDemotePrimaryRequest) (*tabletmanagerdata.UndoDemotePrimaryResponse, error) - // ReplicaWasPromoted tells the remote tablet it is now the master + // ReplicaWasPromoted tells the remote tablet it is now the primary ReplicaWasPromoted(context.Context, *tabletmanagerdata.ReplicaWasPromotedRequest) (*tabletmanagerdata.ReplicaWasPromotedResponse, error) // SetMaster tells the replica to reparent SetMaster(context.Context, *tabletmanagerdata.SetReplicationSourceRequest) (*tabletmanagerdata.SetReplicationSourceResponse, error) // SetReplicationSource tells the replica to reparent SetReplicationSource(context.Context, *tabletmanagerdata.SetReplicationSourceRequest) (*tabletmanagerdata.SetReplicationSourceResponse, error) - // ReplicaWasRestarted tells the remote tablet its master has changed + // ReplicaWasRestarted tells the remote tablet its primary has changed ReplicaWasRestarted(context.Context, *tabletmanagerdata.ReplicaWasRestartedRequest) (*tabletmanagerdata.ReplicaWasRestartedResponse, error) // StopReplicationAndGetStatus stops MySQL replication, and returns the // replication status StopReplicationAndGetStatus(context.Context, *tabletmanagerdata.StopReplicationAndGetStatusRequest) (*tabletmanagerdata.StopReplicationAndGetStatusResponse, error) - // PromoteReplica makes the replica the new master + // PromoteReplica makes the replica the new primary PromoteReplica(context.Context, *tabletmanagerdata.PromoteReplicaRequest) (*tabletmanagerdata.PromoteReplicaResponse, error) Backup(*tabletmanagerdata.BackupRequest, TabletManager_BackupServer) error // RestoreFromBackup deletes all local data and restores it from the latest backup. diff --git a/go/vt/proto/vtctldata/vtctldata.pb.go b/go/vt/proto/vtctldata/vtctldata.pb.go index 0a6097250b..3d1754bcbb 100644 --- a/go/vt/proto/vtctldata/vtctldata.pb.go +++ b/go/vt/proto/vtctldata/vtctldata.pb.go @@ -1848,7 +1848,7 @@ type DeleteTabletsRequest struct { // TabletAliases is the list of tablets to delete. TabletAliases []*topodata.TabletAlias `protobuf:"bytes,1,rep,name=tablet_aliases,json=tabletAliases,proto3" json:"tablet_aliases,omitempty"` - // AllowPrimary allows for the master/primary tablet of a shard to be deleted. + // AllowPrimary allows for the primary tablet of a shard to be deleted. // Use with caution. AllowPrimary bool `protobuf:"varint,2,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` } diff --git a/go/vt/proto/vttest/vttest.pb.go b/go/vt/proto/vttest/vttest.pb.go index 89541a7f73..fe2d20a1f9 100644 --- a/go/vt/proto/vttest/vttest.pb.go +++ b/go/vt/proto/vttest/vttest.pb.go @@ -139,7 +139,7 @@ type Keyspace struct { ShardingColumnType string `protobuf:"bytes,4,opt,name=sharding_column_type,json=shardingColumnType,proto3" json:"sharding_column_type,omitempty"` // redirects all traffic to another keyspace. If set, shards is ignored. ServedFrom string `protobuf:"bytes,5,opt,name=served_from,json=servedFrom,proto3" json:"served_from,omitempty"` - // number of replica tablets to instantiate. This includes the master tablet. + // number of replica tablets to instantiate. This includes the primary tablet. ReplicaCount int32 `protobuf:"varint,6,opt,name=replica_count,json=replicaCount,proto3" json:"replica_count,omitempty"` // number of rdonly tablets to instantiate. RdonlyCount int32 `protobuf:"varint,7,opt,name=rdonly_count,json=rdonlyCount,proto3" json:"rdonly_count,omitempty"` diff --git a/proto/tabletmanagerdata.proto b/proto/tabletmanagerdata.proto index e9d89ed344..f9c11a3db4 100644 --- a/proto/tabletmanagerdata.proto +++ b/proto/tabletmanagerdata.proto @@ -387,10 +387,10 @@ message DemotePrimaryRequest { } message DemotePrimaryResponse { - // Position is deprecated, and is a string representation of a demoted masters executed position. + // Position is deprecated, and is a string representation of a demoted primaries executed position. string deprecated_position = 1 [deprecated=true]; - // PrimaryStatus represents the response from calling `SHOW MASTER STATUS` on a master that has been demoted. + // PrimaryStatus represents the response from calling `SHOW MASTER STATUS` on a primary that has been demoted. replicationdata.PrimaryStatus primary_status = 2; } diff --git a/proto/tabletmanagerservice.proto b/proto/tabletmanagerservice.proto index 979cfbb1c5..2751d250fb 100644 --- a/proto/tabletmanagerservice.proto +++ b/proto/tabletmanagerservice.proto @@ -87,16 +87,16 @@ service TabletManager { // ReplicationStatus returns the current replication status. rpc ReplicationStatus(tabletmanagerdata.ReplicationStatusRequest) returns (tabletmanagerdata.ReplicationStatusResponse) {}; - // MasterStatus returns the current master status. + // MasterStatus returns the current primary status. rpc MasterStatus(tabletmanagerdata.PrimaryStatusRequest) returns (tabletmanagerdata.PrimaryStatusResponse) {}; - // PrimaryStatus returns the current master status. + // PrimaryStatus returns the current primary status. rpc PrimaryStatus(tabletmanagerdata.PrimaryStatusRequest) returns (tabletmanagerdata.PrimaryStatusResponse) {}; - // MasterPosition returns the current master position + // MasterPosition returns the current primary position rpc MasterPosition(tabletmanagerdata.PrimaryPositionRequest) returns (tabletmanagerdata.PrimaryPositionResponse) {}; - // PrimaryPosition returns the current master position + // PrimaryPosition returns the current primary position rpc PrimaryPosition(tabletmanagerdata.PrimaryPositionRequest) returns (tabletmanagerdata.PrimaryPositionResponse) {}; // WaitForPosition waits for the position to be reached @@ -133,14 +133,14 @@ service TabletManager { // Deprecated, use InitPrimary instead rpc InitMaster(tabletmanagerdata.InitPrimaryRequest) returns (tabletmanagerdata.InitPrimaryResponse) {}; - // InitPrimary initializes the tablet as a master + // InitPrimary initializes the tablet as a primary rpc InitPrimary(tabletmanagerdata.InitPrimaryRequest) returns (tabletmanagerdata.InitPrimaryResponse) {}; // PopulateReparentJournal tells the tablet to add an entry to its // reparent journal rpc PopulateReparentJournal(tabletmanagerdata.PopulateReparentJournalRequest) returns (tabletmanagerdata.PopulateReparentJournalResponse) {}; - // InitReplica tells the tablet to reparent to the master unconditionally + // InitReplica tells the tablet to reparent to the primary unconditionally rpc InitReplica(tabletmanagerdata.InitReplicaRequest) returns (tabletmanagerdata.InitReplicaResponse) {}; // Deprecated, see DemotePrimary instead @@ -155,7 +155,7 @@ service TabletManager { // UndoDemotePrimary reverts all changes made by DemotePrimary rpc UndoDemotePrimary(tabletmanagerdata.UndoDemotePrimaryRequest) returns (tabletmanagerdata.UndoDemotePrimaryResponse) {}; - // ReplicaWasPromoted tells the remote tablet it is now the master + // ReplicaWasPromoted tells the remote tablet it is now the primary rpc ReplicaWasPromoted(tabletmanagerdata.ReplicaWasPromotedRequest) returns (tabletmanagerdata.ReplicaWasPromotedResponse) {}; // SetMaster tells the replica to reparent @@ -164,14 +164,14 @@ service TabletManager { // SetReplicationSource tells the replica to reparent rpc SetReplicationSource(tabletmanagerdata.SetReplicationSourceRequest) returns (tabletmanagerdata.SetReplicationSourceResponse) {}; - // ReplicaWasRestarted tells the remote tablet its master has changed + // ReplicaWasRestarted tells the remote tablet its primary has changed rpc ReplicaWasRestarted(tabletmanagerdata.ReplicaWasRestartedRequest) returns (tabletmanagerdata.ReplicaWasRestartedResponse) {}; // StopReplicationAndGetStatus stops MySQL replication, and returns the // replication status rpc StopReplicationAndGetStatus(tabletmanagerdata.StopReplicationAndGetStatusRequest) returns (tabletmanagerdata.StopReplicationAndGetStatusResponse) {}; - // PromoteReplica makes the replica the new master + // PromoteReplica makes the replica the new primary rpc PromoteReplica(tabletmanagerdata.PromoteReplicaRequest) returns (tabletmanagerdata.PromoteReplicaResponse) {}; // diff --git a/proto/vtctldata.proto b/proto/vtctldata.proto index 1aa63681f6..d0159caa49 100644 --- a/proto/vtctldata.proto +++ b/proto/vtctldata.proto @@ -322,7 +322,7 @@ message DeleteShardsResponse { message DeleteTabletsRequest { // TabletAliases is the list of tablets to delete. repeated topodata.TabletAlias tablet_aliases = 1; - // AllowPrimary allows for the master/primary tablet of a shard to be deleted. + // AllowPrimary allows for the primary tablet of a shard to be deleted. // Use with caution. bool allow_primary = 2; } diff --git a/proto/vttest.proto b/proto/vttest.proto index 2e76c7f764..987463fa24 100644 --- a/proto/vttest.proto +++ b/proto/vttest.proto @@ -75,7 +75,7 @@ message Keyspace { // redirects all traffic to another keyspace. If set, shards is ignored. string served_from = 5; - // number of replica tablets to instantiate. This includes the master tablet. + // number of replica tablets to instantiate. This includes the primary tablet. int32 replica_count = 6; // number of rdonly tablets to instantiate. diff --git a/web/vtadmin/src/proto/vtadmin.d.ts b/web/vtadmin/src/proto/vtadmin.d.ts index 512e2458da..90518c3c8b 100644 --- a/web/vtadmin/src/proto/vtadmin.d.ts +++ b/web/vtadmin/src/proto/vtadmin.d.ts @@ -5721,6 +5721,7 @@ export namespace topodata { /** TabletType enum. */ enum TabletType { UNKNOWN = 0, + PRIMARY = 1, MASTER = 1, REPLICA = 2, RDONLY = 3, @@ -5768,8 +5769,8 @@ export namespace topodata { /** Tablet mysql_port */ mysql_port?: (number|null); - /** Tablet master_term_start_time */ - master_term_start_time?: (vttime.ITime|null); + /** Tablet primary_term_start_time */ + primary_term_start_time?: (vttime.ITime|null); } /** Represents a Tablet. */ @@ -5814,8 +5815,8 @@ export namespace topodata { /** Tablet mysql_port. */ public mysql_port: number; - /** Tablet master_term_start_time. */ - public master_term_start_time?: (vttime.ITime|null); + /** Tablet primary_term_start_time. */ + public primary_term_start_time?: (vttime.ITime|null); /** * Creates a new Tablet instance using the specified properties. @@ -5891,11 +5892,11 @@ export namespace topodata { /** Properties of a Shard. */ interface IShard { - /** Shard master_alias */ - master_alias?: (topodata.ITabletAlias|null); + /** Shard primary_alias */ + primary_alias?: (topodata.ITabletAlias|null); - /** Shard master_term_start_time */ - master_term_start_time?: (vttime.ITime|null); + /** Shard primary_term_start_time */ + primary_term_start_time?: (vttime.ITime|null); /** Shard key_range */ key_range?: (topodata.IKeyRange|null); @@ -5906,8 +5907,8 @@ export namespace topodata { /** Shard tablet_controls */ tablet_controls?: (topodata.Shard.ITabletControl[]|null); - /** Shard is_master_serving */ - is_master_serving?: (boolean|null); + /** Shard is_primary_serving */ + is_primary_serving?: (boolean|null); } /** Represents a Shard. */ @@ -5919,11 +5920,11 @@ export namespace topodata { */ constructor(properties?: topodata.IShard); - /** Shard master_alias. */ - public master_alias?: (topodata.ITabletAlias|null); + /** Shard primary_alias. */ + public primary_alias?: (topodata.ITabletAlias|null); - /** Shard master_term_start_time. */ - public master_term_start_time?: (vttime.ITime|null); + /** Shard primary_term_start_time. */ + public primary_term_start_time?: (vttime.ITime|null); /** Shard key_range. */ public key_range?: (topodata.IKeyRange|null); @@ -5934,8 +5935,8 @@ export namespace topodata { /** Shard tablet_controls. */ public tablet_controls: topodata.Shard.ITabletControl[]; - /** Shard is_master_serving. */ - public is_master_serving: boolean; + /** Shard is_primary_serving. */ + public is_primary_serving: boolean; /** * Creates a new Shard instance using the specified properties. @@ -24865,6 +24866,13 @@ export namespace vtctldata { public toJSON(): { [k: string]: any }; } + /** MaterializationIntent enum. */ + enum MaterializationIntent { + CUSTOM = 0, + MOVETABLES = 1, + CREATELOOKUPINDEX = 2 + } + /** Properties of a TableMaterializeSettings. */ interface ITableMaterializeSettings { @@ -24993,6 +25001,9 @@ export namespace vtctldata { /** MaterializeSettings external_cluster */ external_cluster?: (string|null); + + /** MaterializeSettings materialization_intent */ + materialization_intent?: (vtctldata.MaterializationIntent|null); } /** Represents a MaterializeSettings. */ @@ -25028,6 +25039,9 @@ export namespace vtctldata { /** MaterializeSettings external_cluster. */ public external_cluster: string; + /** MaterializeSettings materialization_intent. */ + public materialization_intent: vtctldata.MaterializationIntent; + /** * Creates a new MaterializeSettings instance using the specified properties. * @param [properties] Properties to set diff --git a/web/vtadmin/src/proto/vtadmin.js b/web/vtadmin/src/proto/vtadmin.js index f8002301a9..c17b93d4d4 100644 --- a/web/vtadmin/src/proto/vtadmin.js +++ b/web/vtadmin/src/proto/vtadmin.js @@ -12990,6 +12990,7 @@ $root.topodata = (function() { * @name topodata.TabletType * @enum {number} * @property {number} UNKNOWN=0 UNKNOWN value + * @property {number} PRIMARY=1 PRIMARY value * @property {number} MASTER=1 MASTER value * @property {number} REPLICA=2 REPLICA value * @property {number} RDONLY=3 RDONLY value @@ -13003,7 +13004,8 @@ $root.topodata = (function() { topodata.TabletType = (function() { var valuesById = {}, values = Object.create(valuesById); values[valuesById[0] = "UNKNOWN"] = 0; - values[valuesById[1] = "MASTER"] = 1; + values[valuesById[1] = "PRIMARY"] = 1; + values["MASTER"] = 1; values[valuesById[2] = "REPLICA"] = 2; values[valuesById[3] = "RDONLY"] = 3; values["BATCH"] = 3; @@ -13032,7 +13034,7 @@ $root.topodata = (function() { * @property {Object.|null} [tags] Tablet tags * @property {string|null} [mysql_hostname] Tablet mysql_hostname * @property {number|null} [mysql_port] Tablet mysql_port - * @property {vttime.ITime|null} [master_term_start_time] Tablet master_term_start_time + * @property {vttime.ITime|null} [primary_term_start_time] Tablet primary_term_start_time */ /** @@ -13141,12 +13143,12 @@ $root.topodata = (function() { Tablet.prototype.mysql_port = 0; /** - * Tablet master_term_start_time. - * @member {vttime.ITime|null|undefined} master_term_start_time + * Tablet primary_term_start_time. + * @member {vttime.ITime|null|undefined} primary_term_start_time * @memberof topodata.Tablet * @instance */ - Tablet.prototype.master_term_start_time = null; + Tablet.prototype.primary_term_start_time = null; /** * Creates a new Tablet instance using the specified properties. @@ -13196,8 +13198,8 @@ $root.topodata = (function() { writer.uint32(/* id 12, wireType 2 =*/98).string(message.mysql_hostname); if (message.mysql_port != null && Object.hasOwnProperty.call(message, "mysql_port")) writer.uint32(/* id 13, wireType 0 =*/104).int32(message.mysql_port); - if (message.master_term_start_time != null && Object.hasOwnProperty.call(message, "master_term_start_time")) - $root.vttime.Time.encode(message.master_term_start_time, writer.uint32(/* id 14, wireType 2 =*/114).fork()).ldelim(); + if (message.primary_term_start_time != null && Object.hasOwnProperty.call(message, "primary_term_start_time")) + $root.vttime.Time.encode(message.primary_term_start_time, writer.uint32(/* id 14, wireType 2 =*/114).fork()).ldelim(); return writer; }; @@ -13304,7 +13306,7 @@ $root.topodata = (function() { message.mysql_port = reader.int32(); break; case 14: - message.master_term_start_time = $root.vttime.Time.decode(reader, reader.uint32()); + message.primary_term_start_time = $root.vttime.Time.decode(reader, reader.uint32()); break; default: reader.skipType(tag & 7); @@ -13374,6 +13376,7 @@ $root.topodata = (function() { return "type: enum value expected"; case 0: case 1: + case 1: case 2: case 3: case 3: @@ -13401,10 +13404,10 @@ $root.topodata = (function() { if (message.mysql_port != null && message.hasOwnProperty("mysql_port")) if (!$util.isInteger(message.mysql_port)) return "mysql_port: integer expected"; - if (message.master_term_start_time != null && message.hasOwnProperty("master_term_start_time")) { - var error = $root.vttime.Time.verify(message.master_term_start_time); + if (message.primary_term_start_time != null && message.hasOwnProperty("primary_term_start_time")) { + var error = $root.vttime.Time.verify(message.primary_term_start_time); if (error) - return "master_term_start_time." + error; + return "primary_term_start_time." + error; } return null; }; @@ -13449,6 +13452,10 @@ $root.topodata = (function() { case 0: message.type = 0; break; + case "PRIMARY": + case 1: + message.type = 1; + break; case "MASTER": case 1: message.type = 1; @@ -13499,10 +13506,10 @@ $root.topodata = (function() { message.mysql_hostname = String(object.mysql_hostname); if (object.mysql_port != null) message.mysql_port = object.mysql_port | 0; - if (object.master_term_start_time != null) { - if (typeof object.master_term_start_time !== "object") - throw TypeError(".topodata.Tablet.master_term_start_time: object expected"); - message.master_term_start_time = $root.vttime.Time.fromObject(object.master_term_start_time); + if (object.primary_term_start_time != null) { + if (typeof object.primary_term_start_time !== "object") + throw TypeError(".topodata.Tablet.primary_term_start_time: object expected"); + message.primary_term_start_time = $root.vttime.Time.fromObject(object.primary_term_start_time); } return message; }; @@ -13534,7 +13541,7 @@ $root.topodata = (function() { object.db_name_override = ""; object.mysql_hostname = ""; object.mysql_port = 0; - object.master_term_start_time = null; + object.primary_term_start_time = null; } if (message.alias != null && message.hasOwnProperty("alias")) object.alias = $root.topodata.TabletAlias.toObject(message.alias, options); @@ -13565,8 +13572,8 @@ $root.topodata = (function() { object.mysql_hostname = message.mysql_hostname; if (message.mysql_port != null && message.hasOwnProperty("mysql_port")) object.mysql_port = message.mysql_port; - if (message.master_term_start_time != null && message.hasOwnProperty("master_term_start_time")) - object.master_term_start_time = $root.vttime.Time.toObject(message.master_term_start_time, options); + if (message.primary_term_start_time != null && message.hasOwnProperty("primary_term_start_time")) + object.primary_term_start_time = $root.vttime.Time.toObject(message.primary_term_start_time, options); return object; }; @@ -13590,12 +13597,12 @@ $root.topodata = (function() { * Properties of a Shard. * @memberof topodata * @interface IShard - * @property {topodata.ITabletAlias|null} [master_alias] Shard master_alias - * @property {vttime.ITime|null} [master_term_start_time] Shard master_term_start_time + * @property {topodata.ITabletAlias|null} [primary_alias] Shard primary_alias + * @property {vttime.ITime|null} [primary_term_start_time] Shard primary_term_start_time * @property {topodata.IKeyRange|null} [key_range] Shard key_range * @property {Array.|null} [source_shards] Shard source_shards * @property {Array.|null} [tablet_controls] Shard tablet_controls - * @property {boolean|null} [is_master_serving] Shard is_master_serving + * @property {boolean|null} [is_primary_serving] Shard is_primary_serving */ /** @@ -13616,20 +13623,20 @@ $root.topodata = (function() { } /** - * Shard master_alias. - * @member {topodata.ITabletAlias|null|undefined} master_alias + * Shard primary_alias. + * @member {topodata.ITabletAlias|null|undefined} primary_alias * @memberof topodata.Shard * @instance */ - Shard.prototype.master_alias = null; + Shard.prototype.primary_alias = null; /** - * Shard master_term_start_time. - * @member {vttime.ITime|null|undefined} master_term_start_time + * Shard primary_term_start_time. + * @member {vttime.ITime|null|undefined} primary_term_start_time * @memberof topodata.Shard * @instance */ - Shard.prototype.master_term_start_time = null; + Shard.prototype.primary_term_start_time = null; /** * Shard key_range. @@ -13656,12 +13663,12 @@ $root.topodata = (function() { Shard.prototype.tablet_controls = $util.emptyArray; /** - * Shard is_master_serving. - * @member {boolean} is_master_serving + * Shard is_primary_serving. + * @member {boolean} is_primary_serving * @memberof topodata.Shard * @instance */ - Shard.prototype.is_master_serving = false; + Shard.prototype.is_primary_serving = false; /** * Creates a new Shard instance using the specified properties. @@ -13687,8 +13694,8 @@ $root.topodata = (function() { Shard.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.master_alias != null && Object.hasOwnProperty.call(message, "master_alias")) - $root.topodata.TabletAlias.encode(message.master_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.primary_alias != null && Object.hasOwnProperty.call(message, "primary_alias")) + $root.topodata.TabletAlias.encode(message.primary_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); if (message.key_range != null && Object.hasOwnProperty.call(message, "key_range")) $root.topodata.KeyRange.encode(message.key_range, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); if (message.source_shards != null && message.source_shards.length) @@ -13697,10 +13704,10 @@ $root.topodata = (function() { if (message.tablet_controls != null && message.tablet_controls.length) for (var i = 0; i < message.tablet_controls.length; ++i) $root.topodata.Shard.TabletControl.encode(message.tablet_controls[i], writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); - if (message.is_master_serving != null && Object.hasOwnProperty.call(message, "is_master_serving")) - writer.uint32(/* id 7, wireType 0 =*/56).bool(message.is_master_serving); - if (message.master_term_start_time != null && Object.hasOwnProperty.call(message, "master_term_start_time")) - $root.vttime.Time.encode(message.master_term_start_time, writer.uint32(/* id 8, wireType 2 =*/66).fork()).ldelim(); + if (message.is_primary_serving != null && Object.hasOwnProperty.call(message, "is_primary_serving")) + writer.uint32(/* id 7, wireType 0 =*/56).bool(message.is_primary_serving); + if (message.primary_term_start_time != null && Object.hasOwnProperty.call(message, "primary_term_start_time")) + $root.vttime.Time.encode(message.primary_term_start_time, writer.uint32(/* id 8, wireType 2 =*/66).fork()).ldelim(); return writer; }; @@ -13736,10 +13743,10 @@ $root.topodata = (function() { var tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.master_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + message.primary_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; case 8: - message.master_term_start_time = $root.vttime.Time.decode(reader, reader.uint32()); + message.primary_term_start_time = $root.vttime.Time.decode(reader, reader.uint32()); break; case 2: message.key_range = $root.topodata.KeyRange.decode(reader, reader.uint32()); @@ -13755,7 +13762,7 @@ $root.topodata = (function() { message.tablet_controls.push($root.topodata.Shard.TabletControl.decode(reader, reader.uint32())); break; case 7: - message.is_master_serving = reader.bool(); + message.is_primary_serving = reader.bool(); break; default: reader.skipType(tag & 7); @@ -13792,15 +13799,15 @@ $root.topodata = (function() { Shard.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.master_alias != null && message.hasOwnProperty("master_alias")) { - var error = $root.topodata.TabletAlias.verify(message.master_alias); + if (message.primary_alias != null && message.hasOwnProperty("primary_alias")) { + var error = $root.topodata.TabletAlias.verify(message.primary_alias); if (error) - return "master_alias." + error; + return "primary_alias." + error; } - if (message.master_term_start_time != null && message.hasOwnProperty("master_term_start_time")) { - var error = $root.vttime.Time.verify(message.master_term_start_time); + if (message.primary_term_start_time != null && message.hasOwnProperty("primary_term_start_time")) { + var error = $root.vttime.Time.verify(message.primary_term_start_time); if (error) - return "master_term_start_time." + error; + return "primary_term_start_time." + error; } if (message.key_range != null && message.hasOwnProperty("key_range")) { var error = $root.topodata.KeyRange.verify(message.key_range); @@ -13825,9 +13832,9 @@ $root.topodata = (function() { return "tablet_controls." + error; } } - if (message.is_master_serving != null && message.hasOwnProperty("is_master_serving")) - if (typeof message.is_master_serving !== "boolean") - return "is_master_serving: boolean expected"; + if (message.is_primary_serving != null && message.hasOwnProperty("is_primary_serving")) + if (typeof message.is_primary_serving !== "boolean") + return "is_primary_serving: boolean expected"; return null; }; @@ -13843,15 +13850,15 @@ $root.topodata = (function() { if (object instanceof $root.topodata.Shard) return object; var message = new $root.topodata.Shard(); - if (object.master_alias != null) { - if (typeof object.master_alias !== "object") - throw TypeError(".topodata.Shard.master_alias: object expected"); - message.master_alias = $root.topodata.TabletAlias.fromObject(object.master_alias); + if (object.primary_alias != null) { + if (typeof object.primary_alias !== "object") + throw TypeError(".topodata.Shard.primary_alias: object expected"); + message.primary_alias = $root.topodata.TabletAlias.fromObject(object.primary_alias); } - if (object.master_term_start_time != null) { - if (typeof object.master_term_start_time !== "object") - throw TypeError(".topodata.Shard.master_term_start_time: object expected"); - message.master_term_start_time = $root.vttime.Time.fromObject(object.master_term_start_time); + if (object.primary_term_start_time != null) { + if (typeof object.primary_term_start_time !== "object") + throw TypeError(".topodata.Shard.primary_term_start_time: object expected"); + message.primary_term_start_time = $root.vttime.Time.fromObject(object.primary_term_start_time); } if (object.key_range != null) { if (typeof object.key_range !== "object") @@ -13878,8 +13885,8 @@ $root.topodata = (function() { message.tablet_controls[i] = $root.topodata.Shard.TabletControl.fromObject(object.tablet_controls[i]); } } - if (object.is_master_serving != null) - message.is_master_serving = Boolean(object.is_master_serving); + if (object.is_primary_serving != null) + message.is_primary_serving = Boolean(object.is_primary_serving); return message; }; @@ -13901,13 +13908,13 @@ $root.topodata = (function() { object.tablet_controls = []; } if (options.defaults) { - object.master_alias = null; + object.primary_alias = null; object.key_range = null; - object.is_master_serving = false; - object.master_term_start_time = null; + object.is_primary_serving = false; + object.primary_term_start_time = null; } - if (message.master_alias != null && message.hasOwnProperty("master_alias")) - object.master_alias = $root.topodata.TabletAlias.toObject(message.master_alias, options); + if (message.primary_alias != null && message.hasOwnProperty("primary_alias")) + object.primary_alias = $root.topodata.TabletAlias.toObject(message.primary_alias, options); if (message.key_range != null && message.hasOwnProperty("key_range")) object.key_range = $root.topodata.KeyRange.toObject(message.key_range, options); if (message.source_shards && message.source_shards.length) { @@ -13920,10 +13927,10 @@ $root.topodata = (function() { for (var j = 0; j < message.tablet_controls.length; ++j) object.tablet_controls[j] = $root.topodata.Shard.TabletControl.toObject(message.tablet_controls[j], options); } - if (message.is_master_serving != null && message.hasOwnProperty("is_master_serving")) - object.is_master_serving = message.is_master_serving; - if (message.master_term_start_time != null && message.hasOwnProperty("master_term_start_time")) - object.master_term_start_time = $root.vttime.Time.toObject(message.master_term_start_time, options); + if (message.is_primary_serving != null && message.hasOwnProperty("is_primary_serving")) + object.is_primary_serving = message.is_primary_serving; + if (message.primary_term_start_time != null && message.hasOwnProperty("primary_term_start_time")) + object.primary_term_start_time = $root.vttime.Time.toObject(message.primary_term_start_time, options); return object; }; @@ -14422,6 +14429,7 @@ $root.topodata = (function() { return "tablet_type: enum value expected"; case 0: case 1: + case 1: case 2: case 3: case 3: @@ -14469,6 +14477,10 @@ $root.topodata = (function() { case 0: message.tablet_type = 0; break; + case "PRIMARY": + case 1: + message.tablet_type = 1; + break; case "MASTER": case 1: message.tablet_type = 1; @@ -15101,6 +15113,7 @@ $root.topodata = (function() { return "tablet_type: enum value expected"; case 0: case 1: + case 1: case 2: case 3: case 3: @@ -15141,6 +15154,10 @@ $root.topodata = (function() { case 0: message.tablet_type = 0; break; + case "PRIMARY": + case 1: + message.tablet_type = 1; + break; case "MASTER": case 1: message.tablet_type = 1; @@ -16575,6 +16592,7 @@ $root.topodata = (function() { return "served_type: enum value expected"; case 0: case 1: + case 1: case 2: case 3: case 3: @@ -16623,6 +16641,10 @@ $root.topodata = (function() { case 0: message.served_type = 0; break; + case "PRIMARY": + case 1: + message.served_type = 1; + break; case "MASTER": case 1: message.served_type = 1; @@ -16881,6 +16903,7 @@ $root.topodata = (function() { return "tablet_type: enum value expected"; case 0: case 1: + case 1: case 2: case 3: case 3: @@ -16914,6 +16937,10 @@ $root.topodata = (function() { case 0: message.tablet_type = 0; break; + case "PRIMARY": + case 1: + message.tablet_type = 1; + break; case "MASTER": case 1: message.tablet_type = 1; @@ -23109,6 +23136,7 @@ $root.tabletmanagerdata = (function() { return "tablet_type: enum value expected"; case 0: case 1: + case 1: case 2: case 3: case 3: @@ -23139,6 +23167,10 @@ $root.tabletmanagerdata = (function() { case 0: message.tablet_type = 0; break; + case "PRIMARY": + case 1: + message.tablet_type = 1; + break; case "MASTER": case 1: message.tablet_type = 1; @@ -37521,6 +37553,7 @@ $root.query = (function() { return "tablet_type: enum value expected"; case 0: case 1: + case 1: case 2: case 3: case 3: @@ -37558,6 +37591,10 @@ $root.query = (function() { case 0: message.tablet_type = 0; break; + case "PRIMARY": + case 1: + message.tablet_type = 1; + break; case "MASTER": case 1: message.tablet_type = 1; @@ -59194,6 +59231,22 @@ $root.vtctldata = (function() { return ExecuteVtctlCommandResponse; })(); + /** + * MaterializationIntent enum. + * @name vtctldata.MaterializationIntent + * @enum {number} + * @property {number} CUSTOM=0 CUSTOM value + * @property {number} MOVETABLES=1 MOVETABLES value + * @property {number} CREATELOOKUPINDEX=2 CREATELOOKUPINDEX value + */ + vtctldata.MaterializationIntent = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "CUSTOM"] = 0; + values[valuesById[1] = "MOVETABLES"] = 1; + values[valuesById[2] = "CREATELOOKUPINDEX"] = 2; + return values; + })(); + vtctldata.TableMaterializeSettings = (function() { /** @@ -59440,6 +59493,7 @@ $root.vtctldata = (function() { * @property {string|null} [cell] MaterializeSettings cell * @property {string|null} [tablet_types] MaterializeSettings tablet_types * @property {string|null} [external_cluster] MaterializeSettings external_cluster + * @property {vtctldata.MaterializationIntent|null} [materialization_intent] MaterializeSettings materialization_intent */ /** @@ -59522,6 +59576,14 @@ $root.vtctldata = (function() { */ MaterializeSettings.prototype.external_cluster = ""; + /** + * MaterializeSettings materialization_intent. + * @member {vtctldata.MaterializationIntent} materialization_intent + * @memberof vtctldata.MaterializeSettings + * @instance + */ + MaterializeSettings.prototype.materialization_intent = 0; + /** * Creates a new MaterializeSettings instance using the specified properties. * @function create @@ -59563,6 +59625,8 @@ $root.vtctldata = (function() { writer.uint32(/* id 7, wireType 2 =*/58).string(message.tablet_types); if (message.external_cluster != null && Object.hasOwnProperty.call(message, "external_cluster")) writer.uint32(/* id 8, wireType 2 =*/66).string(message.external_cluster); + if (message.materialization_intent != null && Object.hasOwnProperty.call(message, "materialization_intent")) + writer.uint32(/* id 9, wireType 0 =*/72).int32(message.materialization_intent); return writer; }; @@ -59623,6 +59687,9 @@ $root.vtctldata = (function() { case 8: message.external_cluster = reader.string(); break; + case 9: + message.materialization_intent = reader.int32(); + break; default: reader.skipType(tag & 7); break; @@ -59688,6 +59755,15 @@ $root.vtctldata = (function() { if (message.external_cluster != null && message.hasOwnProperty("external_cluster")) if (!$util.isString(message.external_cluster)) return "external_cluster: string expected"; + if (message.materialization_intent != null && message.hasOwnProperty("materialization_intent")) + switch (message.materialization_intent) { + default: + return "materialization_intent: enum value expected"; + case 0: + case 1: + case 2: + break; + } return null; }; @@ -59727,6 +59803,20 @@ $root.vtctldata = (function() { message.tablet_types = String(object.tablet_types); if (object.external_cluster != null) message.external_cluster = String(object.external_cluster); + switch (object.materialization_intent) { + case "CUSTOM": + case 0: + message.materialization_intent = 0; + break; + case "MOVETABLES": + case 1: + message.materialization_intent = 1; + break; + case "CREATELOOKUPINDEX": + case 2: + message.materialization_intent = 2; + break; + } return message; }; @@ -59753,6 +59843,7 @@ $root.vtctldata = (function() { object.cell = ""; object.tablet_types = ""; object.external_cluster = ""; + object.materialization_intent = options.enums === String ? "CUSTOM" : 0; } if (message.workflow != null && message.hasOwnProperty("workflow")) object.workflow = message.workflow; @@ -59773,6 +59864,8 @@ $root.vtctldata = (function() { object.tablet_types = message.tablet_types; if (message.external_cluster != null && message.hasOwnProperty("external_cluster")) object.external_cluster = message.external_cluster; + if (message.materialization_intent != null && message.hasOwnProperty("materialization_intent")) + object.materialization_intent = options.enums === String ? $root.vtctldata.MaterializationIntent[message.materialization_intent] : message.materialization_intent; return object; }; @@ -64136,6 +64229,7 @@ $root.vtctldata = (function() { return "db_type: enum value expected"; case 0: case 1: + case 1: case 2: case 3: case 3: @@ -64174,6 +64268,10 @@ $root.vtctldata = (function() { case 0: message.db_type = 0; break; + case "PRIMARY": + case 1: + message.db_type = 1; + break; case "MASTER": case 1: message.db_type = 1; @@ -83281,6 +83379,7 @@ $root.binlogdata = (function() { return "tablet_type: enum value expected"; case 0: case 1: + case 1: case 2: case 3: case 3: @@ -83351,6 +83450,10 @@ $root.binlogdata = (function() { case 0: message.tablet_type = 0; break; + case "PRIMARY": + case 1: + message.tablet_type = 1; + break; case "MASTER": case 1: message.tablet_type = 1;