зеркало из https://github.com/github/vitess-gh.git
Merge branch 'master' into tal_backup_test
Signed-off-by: Arindam Nayak <arindam.nayak@outlook.com>
This commit is contained in:
Коммит
c60b60683a
|
@ -3,7 +3,7 @@ on: [push, pull_request]
|
|||
jobs:
|
||||
|
||||
build:
|
||||
name: Cluster End-to-End Test
|
||||
name: Cluster End-to-End Test for Shard 11
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
|
@ -25,10 +25,7 @@ jobs:
|
|||
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
|
||||
go mod download
|
||||
|
||||
- name: Run make minimaltools
|
||||
- name: cluster_endtoend_shard_11
|
||||
run: |
|
||||
make minimaltools
|
||||
|
||||
- name: cluster_endtoend
|
||||
run: |
|
||||
make e2e_test_cluster
|
||||
source build.env
|
||||
go run test.go -docker=false -print-log -shard 11
|
|
@ -0,0 +1,31 @@
|
|||
name: cluster_endtoend
|
||||
on: [push, pull_request]
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Cluster End-to-End Test for Shard 12
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.13
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Get dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget
|
||||
sudo service mysql stop
|
||||
sudo service etcd stop
|
||||
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
|
||||
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
|
||||
go mod download
|
||||
|
||||
- name: cluster_endtoend_shard_12
|
||||
run: |
|
||||
source build.env
|
||||
go run test.go -docker=false -print-log -shard 12
|
|
@ -0,0 +1,31 @@
|
|||
name: cluster_endtoend
|
||||
on: [push, pull_request]
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Cluster End-to-End Test for Shard 13
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.13
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Get dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget
|
||||
sudo service mysql stop
|
||||
sudo service etcd stop
|
||||
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
|
||||
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
|
||||
go mod download
|
||||
|
||||
- name: cluster_endtoend_shard_13
|
||||
run: |
|
||||
source build.env
|
||||
go run test.go -docker=false -print-log -shard 13
|
|
@ -38,12 +38,14 @@ So far we have converted the following Python end to end test cases
|
|||
- sharded tests
|
||||
- tabletmanager tests
|
||||
- vtgate v3 tests
|
||||
|
||||
### In-progress
|
||||
- Inital sharding
|
||||
- resharding
|
||||
- vsplit
|
||||
- reparent
|
||||
|
||||
### In-progress
|
||||
- Backup
|
||||
- Encryption
|
||||
|
||||
After a Python test is migrated in Go it will be removed from end to end ci test run by updating the shard value to 5 in `test/config.json`
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ func TestMasterBackup(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
|
||||
restoreWaitForBackup(t, "replica")
|
||||
err = replica2.VttabletProcess.WaitForTabletTypeForTimeout("SERVING", 15*time.Second)
|
||||
err = replica2.VttabletProcess.WaitForTabletTypesForTimeout([]string{"SERVING"}, 15*time.Second)
|
||||
assert.Nil(t, err)
|
||||
|
||||
cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2)
|
||||
|
@ -110,7 +110,7 @@ func TestMasterReplicaSameBackup(t *testing.T) {
|
|||
|
||||
// now bring up the other replica, letting it restore from backup.
|
||||
restoreWaitForBackup(t, "replica")
|
||||
err = replica2.VttabletProcess.WaitForTabletTypeForTimeout("SERVING", 15*time.Second)
|
||||
err = replica2.VttabletProcess.WaitForTabletTypesForTimeout([]string{"SERVING"}, 15*time.Second)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// check the new replica has the data
|
||||
|
@ -325,7 +325,7 @@ func testBackup(t *testing.T, tabletType string) {
|
|||
_, err = master.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = replica2.VttabletProcess.WaitForTabletTypeForTimeout("SERVING", 15*time.Second)
|
||||
err = replica2.VttabletProcess.WaitForTabletTypesForTimeout([]string{"SERVING"}, 15*time.Second)
|
||||
assert.Nil(t, err)
|
||||
cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2)
|
||||
|
||||
|
@ -396,7 +396,7 @@ func verifyRestoreTablet(t *testing.T, tablet *cluster.Vttablet, status string)
|
|||
err := tablet.VttabletProcess.Setup()
|
||||
assert.Nil(t, err)
|
||||
if status != "" {
|
||||
err = tablet.VttabletProcess.WaitForTabletTypeForTimeout(status, 15*time.Second)
|
||||
err = tablet.VttabletProcess.WaitForTabletTypesForTimeout([]string{status}, 15*time.Second)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -319,11 +319,6 @@ func (cluster *LocalProcessCluster) LaunchCluster(keyspace *Keyspace, shards []S
|
|||
// Create shard
|
||||
for _, shard := range shards {
|
||||
for _, tablet := range shard.Vttablets {
|
||||
err = cluster.VtctlclientProcess.InitTablet(tablet, tablet.Cell, keyspace.Name, cluster.Hostname, shard.Name)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Setup MysqlctlProcess
|
||||
tablet.MysqlctlProcess = *MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, cluster.TmpDirectory)
|
||||
|
|
|
@ -38,6 +38,7 @@ type MysqlctlProcess struct {
|
|||
MySQLPort int
|
||||
InitDBFile string
|
||||
ExtraArgs []string
|
||||
InitMysql bool
|
||||
}
|
||||
|
||||
// InitDb executes mysqlctl command to add cell info
|
||||
|
@ -74,8 +75,12 @@ func (mysqlctl *MysqlctlProcess) StartProcess() (*exec.Cmd, error) {
|
|||
if len(mysqlctl.ExtraArgs) > 0 {
|
||||
tmpProcess.Args = append(tmpProcess.Args, mysqlctl.ExtraArgs...)
|
||||
}
|
||||
if mysqlctl.InitMysql {
|
||||
tmpProcess.Args = append(tmpProcess.Args, "init",
|
||||
"-init_db_sql_file", mysqlctl.InitDBFile)
|
||||
} else {
|
||||
tmpProcess.Args = append(tmpProcess.Args, "start")
|
||||
}
|
||||
return tmpProcess, tmpProcess.Start()
|
||||
}
|
||||
|
||||
|
@ -121,6 +126,7 @@ func MysqlCtlProcessInstance(tabletUID int, mySQLPort int, tmpDirectory string)
|
|||
}
|
||||
mysqlctl.MySQLPort = mySQLPort
|
||||
mysqlctl.TabletUID = tabletUID
|
||||
mysqlctl.InitMysql = true
|
||||
return mysqlctl
|
||||
}
|
||||
|
||||
|
|
|
@ -110,6 +110,9 @@ func (vtctlclient *VtctlClientProcess) InitTablet(tablet *Vttablet, cell string,
|
|||
if tablet.MySQLPort > 0 {
|
||||
args = append(args, "-mysql_port", fmt.Sprintf("%d", tablet.MySQLPort))
|
||||
}
|
||||
if tablet.GrpcPort > 0 {
|
||||
args = append(args, "-grpc_port", fmt.Sprintf("%d", tablet.GrpcPort))
|
||||
}
|
||||
args = append(args, fmt.Sprintf("%s-%010d", cell, tablet.TabletUID), tabletType)
|
||||
return vtctlclient.ExecuteCommand(args...)
|
||||
}
|
||||
|
|
|
@ -130,7 +130,7 @@ func (vttablet *VttabletProcess) Setup() (err error) {
|
|||
|
||||
if vttablet.ServingStatus != "" {
|
||||
if err = vttablet.WaitForTabletType(vttablet.ServingStatus); err != nil {
|
||||
return fmt.Errorf("process '%s' timed out after 60s (err: %s)", vttablet.Name, <-vttablet.exit)
|
||||
return fmt.Errorf("process '%s' timed out after 10s (err: %s)", vttablet.Name, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -185,16 +185,21 @@ func (vttablet *VttabletProcess) GetTabletStatus() string {
|
|||
|
||||
// WaitForTabletType waits for 10 second till expected type reached
|
||||
func (vttablet *VttabletProcess) WaitForTabletType(expectedType string) error {
|
||||
return vttablet.WaitForTabletTypeForTimeout(expectedType, 10*time.Second)
|
||||
return vttablet.WaitForTabletTypesForTimeout([]string{expectedType}, 10*time.Second)
|
||||
}
|
||||
|
||||
// WaitForTabletTypeForTimeout waits for specified duration till the tablet status reaches desired type
|
||||
func (vttablet *VttabletProcess) WaitForTabletTypeForTimeout(expectedType string, timeout time.Duration) error {
|
||||
// WaitForTabletTypes waits for 10 second till expected type reached
|
||||
func (vttablet *VttabletProcess) WaitForTabletTypes(expectedTypes []string) error {
|
||||
return vttablet.WaitForTabletTypesForTimeout(expectedTypes, 10*time.Second)
|
||||
}
|
||||
|
||||
// WaitForTabletTypesForTimeout waits till the tablet reaches to any of the provided status
|
||||
func (vttablet *VttabletProcess) WaitForTabletTypesForTimeout(expectedTypes []string, timeout time.Duration) error {
|
||||
timeToWait := time.Now().Add(timeout)
|
||||
var currentStatus string
|
||||
var status string
|
||||
for time.Now().Before(timeToWait) {
|
||||
currentStatus = vttablet.GetTabletStatus()
|
||||
if currentStatus == expectedType {
|
||||
status = vttablet.GetTabletStatus()
|
||||
if contains(expectedTypes, status) {
|
||||
return nil
|
||||
}
|
||||
select {
|
||||
|
@ -204,7 +209,17 @@ func (vttablet *VttabletProcess) WaitForTabletTypeForTimeout(expectedType string
|
|||
time.Sleep(300 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("vttablet %s, expected status [%s] not reached, current status - [%s]", vttablet.TabletPath, expectedType, currentStatus)
|
||||
return fmt.Errorf("Vttablet %s, current status = %s, expected status [%s] not reached ",
|
||||
vttablet.TabletPath, status, strings.Join(expectedTypes, ","))
|
||||
}
|
||||
|
||||
func contains(arr []string, str string) bool {
|
||||
for _, a := range arr {
|
||||
if a == str {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// WaitForBinLogPlayerCount waits till binlog player count var matches
|
||||
|
@ -273,7 +288,8 @@ func (vttablet *VttabletProcess) TearDown() error {
|
|||
|
||||
// CreateDB creates the database for keyspace
|
||||
func (vttablet *VttabletProcess) CreateDB(keyspace string) error {
|
||||
_, err := vttablet.QueryTablet(fmt.Sprintf("create database vt_%s", keyspace), keyspace, false)
|
||||
_, _ = vttablet.QueryTablet(fmt.Sprintf("drop database IF EXISTS vt_%s", keyspace), keyspace, false)
|
||||
_, err := vttablet.QueryTablet(fmt.Sprintf("create database IF NOT EXISTS vt_%s", keyspace), keyspace, false)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -213,7 +213,7 @@ func restoreTablet(t *testing.T, tablet *cluster.Vttablet, restoreKSName string)
|
|||
err = tablet.VttabletProcess.Setup()
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = tablet.VttabletProcess.WaitForTabletTypeForTimeout("SERVING", 20*time.Second)
|
||||
err = tablet.VttabletProcess.WaitForTabletTypesForTimeout([]string{"SERVING"}, 20*time.Second)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
func listBackups(t *testing.T) []string {
|
||||
|
|
|
@ -0,0 +1,175 @@
|
|||
/*
|
||||
Copyright 2019 The Vitess Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package reparent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"vitess.io/vitess/go/mysql"
|
||||
"vitess.io/vitess/go/sqltypes"
|
||||
"vitess.io/vitess/go/test/endtoend/cluster"
|
||||
tmc "vitess.io/vitess/go/vt/vttablet/grpctmclient"
|
||||
)
|
||||
|
||||
var (
|
||||
// ClusterInstance instance to be used for test with different params
|
||||
clusterInstance *cluster.LocalProcessCluster
|
||||
tmClient *tmc.Client
|
||||
keyspaceName = "ks"
|
||||
shardName = "0"
|
||||
shard1Name = "0000000000000000-ffffffffffffffff"
|
||||
keyspaceShard = keyspaceName + "/" + shardName
|
||||
dbName = "vt_" + keyspaceName
|
||||
username = "vt_dba"
|
||||
hostname = "localhost"
|
||||
cell1 = "zone1"
|
||||
cell2 = "zone2"
|
||||
insertSQL = "insert into vt_insert_test(id, msg) values (%d, 'test %d')"
|
||||
sqlSchema = `
|
||||
create table vt_insert_test (
|
||||
id bigint,
|
||||
msg varchar(64),
|
||||
primary key (id)
|
||||
) Engine=InnoDB
|
||||
`
|
||||
// Tablets for for shard0
|
||||
tablet62344 *cluster.Vttablet
|
||||
tablet62044 *cluster.Vttablet
|
||||
tablet41983 *cluster.Vttablet
|
||||
tablet31981 *cluster.Vttablet
|
||||
|
||||
// Tablets for for shard1
|
||||
masterTablet *cluster.Vttablet
|
||||
replicaTablet *cluster.Vttablet
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
flag.Parse()
|
||||
|
||||
exitCode := func() int {
|
||||
clusterInstance = cluster.NewCluster(cell1, hostname)
|
||||
defer clusterInstance.Teardown()
|
||||
|
||||
// Launch keyspace
|
||||
keyspace := &cluster.Keyspace{Name: keyspaceName}
|
||||
|
||||
// Start topo server
|
||||
err := clusterInstance.StartTopo()
|
||||
if err != nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Adding another cell in the same cluster
|
||||
err = clusterInstance.TopoProcess.ManageTopoDir("mkdir", "/vitess/"+cell2)
|
||||
if err != nil {
|
||||
return 1
|
||||
}
|
||||
err = clusterInstance.VtctlProcess.AddCellInfo(cell2)
|
||||
if err != nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
tablet62344 = clusterInstance.GetVttabletInstance("replica", 62344, "")
|
||||
tablet62044 = clusterInstance.GetVttabletInstance("replica", 62044, "")
|
||||
tablet41983 = clusterInstance.GetVttabletInstance("replica", 41983, "")
|
||||
tablet31981 = clusterInstance.GetVttabletInstance("replica", 31981, cell2)
|
||||
|
||||
shard0 := &cluster.Shard{Name: shardName}
|
||||
shard0.Vttablets = []*cluster.Vttablet{tablet62344, tablet62044, tablet41983, tablet31981}
|
||||
|
||||
// Initiate shard1 - required for ranged based reparenting
|
||||
masterTablet = clusterInstance.GetVttabletInstance("replica", 0, "")
|
||||
replicaTablet = clusterInstance.GetVttabletInstance("replica", 0, "")
|
||||
|
||||
shard1 := &cluster.Shard{Name: shard1Name}
|
||||
shard1.Vttablets = []*cluster.Vttablet{masterTablet, replicaTablet}
|
||||
|
||||
clusterInstance.VtTabletExtraArgs = []string{
|
||||
"-lock_tables_timeout", "5s",
|
||||
"-enable_semi_sync",
|
||||
}
|
||||
|
||||
// Initialize Cluster
|
||||
err = clusterInstance.LaunchCluster(keyspace, []cluster.Shard{*shard0, *shard1})
|
||||
if err != nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
//Start MySql
|
||||
var mysqlCtlProcessList []*exec.Cmd
|
||||
for _, shard := range clusterInstance.Keyspaces[0].Shards {
|
||||
for _, tablet := range shard.Vttablets {
|
||||
fmt.Println("Starting MySql for tablet ", tablet.Alias)
|
||||
if proc, err := tablet.MysqlctlProcess.StartProcess(); err != nil {
|
||||
return 1
|
||||
} else {
|
||||
mysqlCtlProcessList = append(mysqlCtlProcessList, proc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for mysql processes to start
|
||||
for _, proc := range mysqlCtlProcessList {
|
||||
if err := proc.Wait(); err != nil {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
// We do not need semiSync for this test case.
|
||||
clusterInstance.EnableSemiSync = false
|
||||
|
||||
// create tablet manager client
|
||||
tmClient = tmc.NewClient()
|
||||
|
||||
return m.Run()
|
||||
}()
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
func getMysqlConnParam(tablet *cluster.Vttablet) mysql.ConnParams {
|
||||
connParams := mysql.ConnParams{
|
||||
Uname: username,
|
||||
DbName: dbName,
|
||||
UnixSocket: path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d/mysql.sock", tablet.TabletUID)),
|
||||
}
|
||||
return connParams
|
||||
}
|
||||
|
||||
func runSQL(ctx context.Context, t *testing.T, sql string, tablet *cluster.Vttablet) *sqltypes.Result {
|
||||
// Get Connection
|
||||
tabletParams := getMysqlConnParam(tablet)
|
||||
conn, err := mysql.Connect(ctx, &tabletParams)
|
||||
assert.Nil(t, err)
|
||||
defer conn.Close()
|
||||
|
||||
// runSQL
|
||||
return execute(t, conn, sql)
|
||||
}
|
||||
|
||||
func execute(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result {
|
||||
t.Helper()
|
||||
qr, err := conn.ExecuteFetch(query, 1000, true)
|
||||
assert.Nil(t, err)
|
||||
return qr
|
||||
}
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
Copyright 2019 The Vitess Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package reparent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"vitess.io/vitess/go/test/endtoend/cluster"
|
||||
)
|
||||
|
||||
func TestReparentGracefulRangeBased(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*masterTablet, *replicaTablet} {
|
||||
// create database
|
||||
err := tablet.VttabletProcess.CreateDB(keyspaceName)
|
||||
assert.Nil(t, err)
|
||||
// Init Tablet
|
||||
err = clusterInstance.VtctlclientProcess.InitTablet(&tablet, tablet.Cell, keyspaceName, hostname, shard1Name)
|
||||
assert.Nil(t, err)
|
||||
// Start the tablet
|
||||
err = tablet.VttabletProcess.Setup()
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*masterTablet, *replicaTablet} {
|
||||
err := tablet.VttabletProcess.WaitForTabletTypes([]string{"SERVING", "NOT_SERVING"})
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
// Force the replica to reparent assuming that all the datasets are identical.
|
||||
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
|
||||
"-force", fmt.Sprintf("%s/%s", keyspaceName, shard1Name), masterTablet.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Validate topology
|
||||
validateTopology(t, true)
|
||||
|
||||
// create Tables
|
||||
runSQL(ctx, t, sqlSchema, masterTablet)
|
||||
|
||||
checkMasterTablet(t, masterTablet)
|
||||
|
||||
validateTopology(t, false)
|
||||
|
||||
// Run this to make sure it succeeds.
|
||||
output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(
|
||||
"ShardReplicationPositions", fmt.Sprintf("%s/%s", keyspaceName, shard1Name))
|
||||
assert.Nil(t, err)
|
||||
strArray := strings.Split(output, "\n")
|
||||
if strArray[len(strArray)-1] == "" {
|
||||
strArray = strArray[:len(strArray)-1] // Truncate slice, remove empty line
|
||||
}
|
||||
assert.Equal(t, 2, len(strArray)) // one master, one slave
|
||||
assert.Contains(t, strArray[0], "master") // master first
|
||||
|
||||
// Perform a graceful reparent operation
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand(
|
||||
"PlannedReparentShard",
|
||||
"-keyspace_shard", fmt.Sprintf("%s/%s", keyspaceName, shard1Name),
|
||||
"-new_master", replicaTablet.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Validate topology
|
||||
validateTopology(t, false)
|
||||
|
||||
checkMasterTablet(t, replicaTablet)
|
||||
|
||||
// insert data into the new master, check the connected replica work
|
||||
insertSQL := fmt.Sprintf(insertSQL, 1, 1)
|
||||
runSQL(ctx, t, insertSQL, replicaTablet)
|
||||
err = checkInsertedValues(ctx, t, masterTablet, 1)
|
||||
assert.Nil(t, err)
|
||||
}
|
|
@ -0,0 +1,899 @@
|
|||
/*
|
||||
Copyright 2019 The Vitess Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package reparent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"vitess.io/vitess/go/mysql"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"vitess.io/vitess/go/json2"
|
||||
"vitess.io/vitess/go/test/endtoend/cluster"
|
||||
querypb "vitess.io/vitess/go/vt/proto/query"
|
||||
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
|
||||
)
|
||||
|
||||
func TestMasterToSpareStateChangeImpossible(t *testing.T) {
|
||||
|
||||
args := []string{"InitTablet", "-hostname", hostname,
|
||||
"-port", fmt.Sprintf("%d", tablet62344.HTTPPort), "-allow_update", "-parent",
|
||||
"-keyspace", keyspaceName,
|
||||
"-shard", shardName,
|
||||
"-mysql_port", fmt.Sprintf("%d", tablet62344.MySQLPort),
|
||||
"-grpc_port", fmt.Sprintf("%d", tablet62344.GrpcPort)}
|
||||
args = append(args, fmt.Sprintf("%s-%010d", tablet62344.Cell, tablet62344.TabletUID), "master")
|
||||
err := clusterInstance.VtctlclientProcess.ExecuteCommand(args...)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Start the tablet
|
||||
err = tablet62344.VttabletProcess.Setup()
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Create Database
|
||||
err = tablet62344.VttabletProcess.CreateDB(keyspaceName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// We cannot change a master to spare
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeSlaveType", tablet62344.Alias, "spare")
|
||||
assert.NotNil(t, err)
|
||||
|
||||
//kill Tablet
|
||||
err = tablet62344.VttabletProcess.TearDown()
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestReparentDownMaster(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet41983, *tablet31981} {
|
||||
// Create Database
|
||||
err := tablet.VttabletProcess.CreateDB(keyspaceName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Reset status, don't wait for the tablet status. We will check it later
|
||||
tablet.VttabletProcess.ServingStatus = ""
|
||||
// Init Tablet
|
||||
err = clusterInstance.VtctlclientProcess.InitTablet(&tablet, tablet.Cell, keyspaceName, hostname, shardName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Start the tablet
|
||||
err = tablet.VttabletProcess.Setup()
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet41983, *tablet31981} {
|
||||
err := tablet.VttabletProcess.WaitForTabletTypes([]string{"SERVING", "NOT_SERVING"})
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
// Init Shard Master
|
||||
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
|
||||
"-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
validateTopology(t, true)
|
||||
|
||||
// create Tables
|
||||
runSQL(ctx, t, sqlSchema, tablet62344)
|
||||
|
||||
// Make the current master agent and database unavailable.
|
||||
err = tablet62344.VttabletProcess.TearDown()
|
||||
assert.Nil(t, err)
|
||||
err = tablet62344.MysqlctlProcess.Stop()
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Perform a planned reparent operation, will try to contact
|
||||
// the current master and fail somewhat quickly
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand(
|
||||
"PlannedReparentShard",
|
||||
"-wait-time", "5s",
|
||||
"-keyspace_shard", keyspaceShard,
|
||||
"-new_master", tablet62044.Alias)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
// Run forced reparent operation, this should now proceed unimpeded.
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand(
|
||||
"EmergencyReparentShard",
|
||||
"-keyspace_shard", keyspaceShard,
|
||||
"-new_master", tablet62044.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
validateTopology(t, false)
|
||||
|
||||
checkMasterTablet(t, tablet62044)
|
||||
|
||||
// insert data into the new master, check the connected replica work
|
||||
insertSQL := fmt.Sprintf(insertSQL, 2, 2)
|
||||
runSQL(ctx, t, insertSQL, tablet62044)
|
||||
err = checkInsertedValues(ctx, t, tablet41983, 2)
|
||||
assert.Nil(t, err)
|
||||
err = checkInsertedValues(ctx, t, tablet31981, 2)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// bring back the old master as a replica, check that it catches up
|
||||
tablet62344.MysqlctlProcess.InitMysql = false
|
||||
err = tablet62344.MysqlctlProcess.Start()
|
||||
assert.Nil(t, err)
|
||||
err = clusterInstance.VtctlclientProcess.InitTablet(tablet62344, tablet62344.Cell, keyspaceName, hostname, shardName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// As there is already a master the new replica will come directly in SERVING state
|
||||
tablet62344.VttabletProcess.ServingStatus = "SERVING"
|
||||
// Start the tablet
|
||||
err = tablet62344.VttabletProcess.Setup()
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = checkInsertedValues(ctx, t, tablet62344, 2)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Kill tablets
|
||||
killTablets(t)
|
||||
}
|
||||
|
||||
func TestReparentCrossCell(t *testing.T) {
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet41983, *tablet31981} {
|
||||
// create database
|
||||
err := tablet.VttabletProcess.CreateDB(keyspaceName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Init Tablet
|
||||
err = clusterInstance.VtctlclientProcess.InitTablet(&tablet, tablet.Cell, keyspaceName, hostname, shardName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Start the tablet
|
||||
err = tablet.VttabletProcess.Setup()
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet41983, *tablet31981} {
|
||||
err := tablet.VttabletProcess.WaitForTabletTypes([]string{"SERVING", "NOT_SERVING"})
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
// Force the replica to reparent assuming that all the datasets are identical.
|
||||
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
|
||||
"-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
validateTopology(t, true)
|
||||
|
||||
checkMasterTablet(t, tablet62344)
|
||||
|
||||
// Perform a graceful reparent operation to another cell.
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand(
|
||||
"PlannedReparentShard",
|
||||
"-keyspace_shard", keyspaceShard,
|
||||
"-new_master", tablet31981.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
validateTopology(t, false)
|
||||
|
||||
checkMasterTablet(t, tablet31981)
|
||||
|
||||
// Kill tablets
|
||||
killTablets(t)
|
||||
|
||||
}
|
||||
|
||||
func TestReparentGraceful(t *testing.T) {
|
||||
reparentGraceful(t, false)
|
||||
}
|
||||
|
||||
func TestReparentGracefulRecovery(t *testing.T) {
|
||||
reparentGraceful(t, true)
|
||||
}
|
||||
|
||||
func reparentGraceful(t *testing.T, confusedMaster bool) {
|
||||
ctx := context.Background()
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet41983, *tablet31981} {
|
||||
// create database
|
||||
err := tablet.VttabletProcess.CreateDB(keyspaceName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Init Tablet
|
||||
err = clusterInstance.VtctlclientProcess.InitTablet(&tablet, tablet.Cell, keyspaceName, hostname, shardName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Start the tablet
|
||||
err = tablet.VttabletProcess.Setup()
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet41983, *tablet31981} {
|
||||
err := tablet.VttabletProcess.WaitForTabletTypes([]string{"SERVING", "NOT_SERVING"})
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
// Force the replica to reparent assuming that all the datasets are identical.
|
||||
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
|
||||
"-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
validateTopology(t, true)
|
||||
|
||||
// create Tables
|
||||
runSQL(ctx, t, sqlSchema, tablet62344)
|
||||
|
||||
checkMasterTablet(t, tablet62344)
|
||||
|
||||
validateTopology(t, false)
|
||||
|
||||
// Run this to make sure it succeeds.
|
||||
output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(
|
||||
"ShardReplicationPositions", fmt.Sprintf("%s/%s", keyspaceName, shardName))
|
||||
assert.Nil(t, err)
|
||||
strArray := strings.Split(output, "\n")
|
||||
if strArray[len(strArray)-1] == "" {
|
||||
strArray = strArray[:len(strArray)-1] // Truncate slice, remove empty line
|
||||
}
|
||||
assert.Equal(t, 4, len(strArray)) // one master, three replicas
|
||||
assert.Contains(t, strArray[0], "master") // master first
|
||||
|
||||
// Perform a graceful reparent operation
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand(
|
||||
"PlannedReparentShard",
|
||||
"-keyspace_shard", fmt.Sprintf("%s/%s", keyspaceName, shardName),
|
||||
"-new_master", tablet62044.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
validateTopology(t, false)
|
||||
|
||||
checkMasterTablet(t, tablet62044)
|
||||
|
||||
// Simulate a master that forgets it's master and becomes replica.
|
||||
// PlannedReparentShard should be able to recover by reparenting to the same master again,
|
||||
// as long as all tablets are available to check that it's safe.
|
||||
if confusedMaster {
|
||||
tablet62044.Type = "replica"
|
||||
err = clusterInstance.VtctlclientProcess.InitTablet(tablet62044, tablet62044.Cell, keyspaceName, hostname, shardName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("RefreshState", tablet62044.Alias)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
// Perform a graceful reparent to the same master.
|
||||
// It should be idempotent, and should fix any inconsistencies if necessary
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand(
|
||||
"PlannedReparentShard",
|
||||
"-keyspace_shard", fmt.Sprintf("%s/%s", keyspaceName, shardName),
|
||||
"-new_master", tablet62044.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
validateTopology(t, false)
|
||||
|
||||
checkMasterTablet(t, tablet62044)
|
||||
|
||||
// insert data into the new master, check the connected replica work
|
||||
insertSQL := fmt.Sprintf(insertSQL, 1, 1)
|
||||
runSQL(ctx, t, insertSQL, tablet62044)
|
||||
err = checkInsertedValues(ctx, t, tablet41983, 1)
|
||||
assert.Nil(t, err)
|
||||
err = checkInsertedValues(ctx, t, tablet62344, 1)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Kill tablets
|
||||
killTablets(t)
|
||||
}
|
||||
|
||||
func TestReparentSlaveOffline(t *testing.T) {
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet41983, *tablet31981} {
|
||||
// create database
|
||||
err := tablet.VttabletProcess.CreateDB(keyspaceName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Init Tablet
|
||||
err = clusterInstance.VtctlclientProcess.InitTablet(&tablet, tablet.Cell, keyspaceName, hostname, shardName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Start the tablet
|
||||
err = tablet.VttabletProcess.Setup()
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet41983, *tablet31981} {
|
||||
err := tablet.VttabletProcess.WaitForTabletTypes([]string{"SERVING", "NOT_SERVING"})
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
// Force the replica to reparent assuming that all the datasets are identical.
|
||||
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
|
||||
"-force", keyspaceShard, tablet62344.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
validateTopology(t, true)
|
||||
|
||||
checkMasterTablet(t, tablet62344)
|
||||
|
||||
// Kill one tablet so we seem offline
|
||||
err = tablet31981.VttabletProcess.TearDown()
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Perform a graceful reparent operation.
|
||||
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(
|
||||
"PlannedReparentShard",
|
||||
"-keyspace_shard", keyspaceShard,
|
||||
"-new_master", tablet62044.Alias)
|
||||
assert.NotNil(t, err)
|
||||
assert.Contains(t, out, "tablet zone2-0000031981 SetMaster failed")
|
||||
|
||||
checkMasterTablet(t, tablet62044)
|
||||
|
||||
killTablets(t)
|
||||
}
|
||||
|
||||
func TestReparentAvoid(t *testing.T) {
|
||||
// Remove tablet41983 from topology as that tablet is not required for this test
|
||||
err := clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet41983.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet31981} {
|
||||
// create database
|
||||
err := tablet.VttabletProcess.CreateDB(keyspaceName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Init Tablet
|
||||
err = clusterInstance.VtctlclientProcess.InitTablet(&tablet, tablet.Cell, keyspaceName, hostname, shardName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Start the tablet
|
||||
err = tablet.VttabletProcess.Setup()
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet31981} {
|
||||
err := tablet.VttabletProcess.WaitForTabletTypes([]string{"SERVING", "NOT_SERVING"})
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
// Force the replica to reparent assuming that all the dataset's are identical.
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
|
||||
"-force", keyspaceShard, tablet62344.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
validateTopology(t, true)
|
||||
|
||||
checkMasterTablet(t, tablet62344)
|
||||
|
||||
// Perform a reparent operation with avoid_master pointing to non-master. It
|
||||
// should succeed without doing anything.
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand(
|
||||
"PlannedReparentShard",
|
||||
"-keyspace_shard", keyspaceShard,
|
||||
"-avoid_master", tablet62044.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
validateTopology(t, false)
|
||||
|
||||
checkMasterTablet(t, tablet62344)
|
||||
|
||||
// Perform a reparent operation with avoid_master pointing to master.
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand(
|
||||
"PlannedReparentShard",
|
||||
"-keyspace_shard", keyspaceShard,
|
||||
"-avoid_master", tablet62344.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
validateTopology(t, false)
|
||||
|
||||
// 62044 is in the same cell and 31981 is in a different cell, so we must land on 62044
|
||||
checkMasterTablet(t, tablet62044)
|
||||
|
||||
// If we kill the tablet in the same cell as master then reparent -avoid_master will fail.
|
||||
err = tablet62344.VttabletProcess.TearDown()
|
||||
assert.Nil(t, err)
|
||||
|
||||
output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(
|
||||
"PlannedReparentShard",
|
||||
"-keyspace_shard", keyspaceShard,
|
||||
"-avoid_master", tablet62044.Alias)
|
||||
assert.NotNil(t, err)
|
||||
assert.Contains(t, output, "cannot find a tablet to reparent to")
|
||||
|
||||
validateTopology(t, false)
|
||||
|
||||
checkMasterTablet(t, tablet62044)
|
||||
|
||||
killTablets(t)
|
||||
}
|
||||
|
||||
func TestReparentFromOutside(t *testing.T) {
|
||||
reparentFromOutside(t, false)
|
||||
}
|
||||
|
||||
func TestReparentFromOutsideWithNoMaster(t *testing.T) {
|
||||
reparentFromOutside(t, true)
|
||||
|
||||
// We will have to restart mysql to avoid hanging/locks due to external Reparent
|
||||
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet41983, *tablet31981} {
|
||||
fmt.Println("Restarting MySql for tablet ", tablet.Alias)
|
||||
err := tablet.MysqlctlProcess.Stop()
|
||||
assert.Nil(t, err)
|
||||
tablet.MysqlctlProcess.InitMysql = false
|
||||
err = tablet.MysqlctlProcess.Start()
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func reparentFromOutside(t *testing.T, downMaster bool) {
|
||||
//This test will start a master and 3 replicas.
|
||||
//Then:
|
||||
//- one replica will be the new master
|
||||
//- one replica will be reparented to that new master
|
||||
//- one replica will be busted and dead in the water and we'll call TabletExternallyReparented.
|
||||
//Args:
|
||||
//downMaster: kills the old master first
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet41983, *tablet31981} {
|
||||
// create database
|
||||
err := tablet.VttabletProcess.CreateDB(keyspaceName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Init Tablet
|
||||
err = clusterInstance.VtctlclientProcess.InitTablet(&tablet, tablet.Cell, keyspaceName, hostname, shardName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Start the tablet
|
||||
err = tablet.VttabletProcess.Setup()
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet41983, *tablet31981} {
|
||||
err := tablet.VttabletProcess.WaitForTabletTypes([]string{"SERVING", "NOT_SERVING"})
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
// Reparent as a starting point
|
||||
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
|
||||
"-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
validateTopology(t, true)
|
||||
|
||||
checkMasterTablet(t, tablet62344)
|
||||
|
||||
// now manually reparent 1 out of 2 tablets
|
||||
// 62044 will be the new master
|
||||
// 31981 won't be re-parented, so it will be busted
|
||||
|
||||
if !downMaster {
|
||||
// commands to stop the current master
|
||||
demoteMasterCommands := "SET GLOBAL read_only = ON; FLUSH TABLES WITH READ LOCK; UNLOCK TABLES"
|
||||
runSQL(ctx, t, demoteMasterCommands, tablet62344)
|
||||
|
||||
//Get the position of the old master and wait for the new one to catch up.
|
||||
err = waitForReplicationPosition(t, tablet62344, tablet62044)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
// commands to convert a replica to a master
|
||||
promoteSlaveCommands := "STOP SLAVE; RESET SLAVE ALL; SET GLOBAL read_only = OFF;"
|
||||
runSQL(ctx, t, promoteSlaveCommands, tablet62044)
|
||||
|
||||
// Get master position
|
||||
_, gtID := cluster.GetMasterPosition(t, *tablet62044, hostname)
|
||||
|
||||
// 62344 will now be a slave of 62044
|
||||
changeMasterCommands := fmt.Sprintf("RESET MASTER; RESET SLAVE; SET GLOBAL gtid_purged = '%s';"+
|
||||
"CHANGE MASTER TO MASTER_HOST='%s', MASTER_PORT=%d, MASTER_USER='vt_repl', MASTER_AUTO_POSITION = 1;"+
|
||||
"START SLAVE;", gtID, hostname, tablet62044.MySQLPort)
|
||||
runSQL(ctx, t, changeMasterCommands, tablet62344)
|
||||
|
||||
// Capture time when we made tablet62044 master
|
||||
baseTime := time.Now().UnixNano() / 1000000000
|
||||
|
||||
// 41983 will be a slave of 62044
|
||||
changeMasterCommands = fmt.Sprintf("STOP SLAVE; RESET MASTER; SET GLOBAL gtid_purged = '%s';"+
|
||||
"CHANGE MASTER TO MASTER_HOST='%s', MASTER_PORT=%d, MASTER_USER='vt_repl', MASTER_AUTO_POSITION = 1;"+
|
||||
"START SLAVE;", gtID, hostname, tablet62044.MySQLPort)
|
||||
runSQL(ctx, t, changeMasterCommands, tablet41983)
|
||||
|
||||
// To test the downMaster, we kill the old master first and delete its tablet record
|
||||
if downMaster {
|
||||
err := tablet62344.VttabletProcess.TearDown()
|
||||
assert.Nil(t, err)
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet",
|
||||
"-allow_master", tablet62344.Alias)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
// update topology with the new server
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("TabletExternallyReparented",
|
||||
tablet62044.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
checkReparentFromOutside(t, tablet62044, downMaster, baseTime)
|
||||
|
||||
if !downMaster {
|
||||
err := tablet62344.VttabletProcess.TearDown()
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
killTablets(t)
|
||||
}
|
||||
|
||||
func TestReparentWithDownSlave(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet41983, *tablet31981} {
|
||||
// Create Database
|
||||
err := tablet.VttabletProcess.CreateDB(keyspaceName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Init Tablet
|
||||
err = clusterInstance.VtctlclientProcess.InitTablet(&tablet, tablet.Cell, keyspaceName, hostname, shardName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Start the tablet
|
||||
err = tablet.VttabletProcess.Setup()
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet41983, *tablet31981} {
|
||||
err := tablet.VttabletProcess.WaitForTabletTypes([]string{"SERVING", "NOT_SERVING"})
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
// Init Shard Master
|
||||
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
|
||||
"-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
validateTopology(t, true)
|
||||
|
||||
// create Tables
|
||||
runSQL(ctx, t, sqlSchema, tablet62344)
|
||||
|
||||
// Stop replica mysql Process
|
||||
err = tablet41983.MysqlctlProcess.Stop()
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Perform a graceful reparent operation. It will fail as one tablet is down.
|
||||
output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(
|
||||
"PlannedReparentShard",
|
||||
"-keyspace_shard", keyspaceShard,
|
||||
"-new_master", tablet62044.Alias)
|
||||
assert.NotNil(t, err)
|
||||
assert.Contains(t, output, "TabletManager.SetMaster on zone1-0000041983 error")
|
||||
|
||||
// insert data into the new master, check the connected replica work
|
||||
insertSQL := fmt.Sprintf(insertSQL, 3, 3)
|
||||
runSQL(ctx, t, insertSQL, tablet62044)
|
||||
err = checkInsertedValues(ctx, t, tablet31981, 3)
|
||||
assert.Nil(t, err)
|
||||
err = checkInsertedValues(ctx, t, tablet62344, 3)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// restart mysql on the old replica, should still be connecting to the old master
|
||||
tablet41983.MysqlctlProcess.InitMysql = false
|
||||
err = tablet41983.MysqlctlProcess.Start()
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Use the same PlannedReparentShard command to fix up the tablet.
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand(
|
||||
"PlannedReparentShard",
|
||||
"-keyspace_shard", keyspaceShard,
|
||||
"-new_master", tablet62044.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// wait until it gets the data
|
||||
err = checkInsertedValues(ctx, t, tablet41983, 3)
|
||||
assert.Nil(t, err)
|
||||
|
||||
killTablets(t)
|
||||
}
|
||||
|
||||
func TestChangeTypeSemiSync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Create new names for tablets, so this test is less confusing.
|
||||
master := tablet62344
|
||||
replica := tablet62044
|
||||
rdonly1 := tablet41983
|
||||
rdonly2 := tablet31981
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*master, *replica, *rdonly1, *rdonly2} {
|
||||
// Create Database
|
||||
err := tablet.VttabletProcess.CreateDB(keyspaceName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Init Tablet
|
||||
err = clusterInstance.VtctlclientProcess.InitTablet(&tablet, tablet.Cell, keyspaceName, hostname, shardName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Start the tablet
|
||||
err = tablet.VttabletProcess.Setup()
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
// Init Shard Master
|
||||
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
|
||||
"-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), master.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*master, *replica, *rdonly1, *rdonly2} {
|
||||
err := tablet.VttabletProcess.WaitForTabletType("SERVING")
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
// Updated rdonly tablet and set tablet type to rdonly
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeSlaveType", rdonly1.Alias, "rdonly")
|
||||
assert.Nil(t, err)
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeSlaveType", rdonly2.Alias, "rdonly")
|
||||
assert.Nil(t, err)
|
||||
|
||||
validateTopology(t, true)
|
||||
|
||||
checkMasterTablet(t, master)
|
||||
|
||||
// Stop replication on rdonly1, to make sure when we make it replica it doesn't start again.
|
||||
// Note we do a similar test for replica -> rdonly below.
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopSlave", rdonly1.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Check semi-sync on replicas.
|
||||
// The flag is only an indication of the value to use next time
|
||||
// we turn replication on, so also check the status.
|
||||
// rdonly1 is not replicating, so its status is off.
|
||||
checkDBvar(ctx, t, replica, "rpl_semi_sync_slave_enabled", "ON")
|
||||
checkDBvar(ctx, t, rdonly1, "rpl_semi_sync_slave_enabled", "OFF")
|
||||
checkDBvar(ctx, t, rdonly2, "rpl_semi_sync_slave_enabled", "OFF")
|
||||
checkDBstatus(ctx, t, replica, "Rpl_semi_sync_slave_status", "ON")
|
||||
checkDBstatus(ctx, t, rdonly1, "Rpl_semi_sync_slave_status", "OFF")
|
||||
checkDBstatus(ctx, t, rdonly2, "Rpl_semi_sync_slave_status", "OFF")
|
||||
|
||||
// Change replica to rdonly while replicating, should turn off semi-sync, and restart replication.
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeSlaveType", replica.Alias, "rdonly")
|
||||
assert.Nil(t, err)
|
||||
checkDBvar(ctx, t, replica, "rpl_semi_sync_slave_enabled", "OFF")
|
||||
checkDBstatus(ctx, t, replica, "Rpl_semi_sync_slave_status", "OFF")
|
||||
|
||||
// Change rdonly1 to replica, should turn on semi-sync, and not start replication.
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeSlaveType", rdonly1.Alias, "replica")
|
||||
assert.Nil(t, err)
|
||||
checkDBvar(ctx, t, rdonly1, "rpl_semi_sync_slave_enabled", "ON")
|
||||
checkDBstatus(ctx, t, rdonly1, "Rpl_semi_sync_slave_status", "OFF")
|
||||
checkSlaveStatus(ctx, t, rdonly1)
|
||||
|
||||
// Now change from replica back to rdonly, make sure replication is still not enabled.
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeSlaveType", rdonly1.Alias, "rdonly")
|
||||
assert.Nil(t, err)
|
||||
checkDBvar(ctx, t, rdonly1, "rpl_semi_sync_slave_enabled", "OFF")
|
||||
checkDBstatus(ctx, t, rdonly1, "Rpl_semi_sync_slave_status", "OFF")
|
||||
checkSlaveStatus(ctx, t, rdonly1)
|
||||
|
||||
// Change rdonly2 to replica, should turn on semi-sync, and restart replication.
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeSlaveType", rdonly2.Alias, "replica")
|
||||
assert.Nil(t, err)
|
||||
checkDBvar(ctx, t, rdonly2, "rpl_semi_sync_slave_enabled", "ON")
|
||||
checkDBstatus(ctx, t, rdonly2, "Rpl_semi_sync_slave_status", "ON")
|
||||
|
||||
killTablets(t)
|
||||
}
|
||||
|
||||
func TestReparentDoesntHangIfMasterFails(t *testing.T) {
|
||||
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet41983, *tablet31981} {
|
||||
// Create Database
|
||||
err := tablet.VttabletProcess.CreateDB(keyspaceName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Init Tablet
|
||||
err = clusterInstance.VtctlclientProcess.InitTablet(&tablet, tablet.Cell, keyspaceName, hostname, shardName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Start the tablet
|
||||
err = tablet.VttabletProcess.Setup()
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
// Init Shard Master
|
||||
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
|
||||
"-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet41983, *tablet31981} {
|
||||
err := tablet.VttabletProcess.WaitForTabletType("SERVING")
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
validateTopology(t, true)
|
||||
|
||||
// Change the schema of the _vt.reparent_journal table, so that
|
||||
// inserts into it will fail. That will make the master fail.
|
||||
_, err = tablet62344.VttabletProcess.QueryTabletWithDB(
|
||||
"ALTER TABLE reparent_journal DROP COLUMN replication_position", "_vt")
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Perform a planned reparent operation, the master will fail the
|
||||
// insert. The slaves should then abort right away.
|
||||
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(
|
||||
"PlannedReparentShard",
|
||||
"-keyspace_shard", keyspaceShard,
|
||||
"-new_master", tablet62044.Alias)
|
||||
assert.NotNil(t, err)
|
||||
assert.Contains(t, out, "master failed to PopulateReparentJournal")
|
||||
|
||||
killTablets(t)
|
||||
}
|
||||
|
||||
// Waits for tablet B to catch up to the replication position of tablet A.
|
||||
func waitForReplicationPosition(t *testing.T, tabletA *cluster.Vttablet, tabletB *cluster.Vttablet) error {
|
||||
posA, _ := cluster.GetMasterPosition(t, *tabletA, hostname)
|
||||
timeout := time.Now().Add(5 * time.Second)
|
||||
for time.Now().Before(timeout) {
|
||||
posB, _ := cluster.GetMasterPosition(t, *tabletB, hostname)
|
||||
if positionAtLeast(t, tabletB, posA, posB) {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
return fmt.Errorf("failed to catch up on replication position")
|
||||
}
|
||||
|
||||
func positionAtLeast(t *testing.T, tablet *cluster.Vttablet, a string, b string) bool {
|
||||
isAtleast := false
|
||||
val, err := tablet.MysqlctlProcess.ExecuteCommandWithOutput("position", "at_least", a, b)
|
||||
assert.Nil(t, err)
|
||||
if strings.Contains(val, "true") {
|
||||
isAtleast = true
|
||||
}
|
||||
return isAtleast
|
||||
}
|
||||
|
||||
func checkReparentFromOutside(t *testing.T, tablet *cluster.Vttablet, downMaster bool, baseTime int64) {
|
||||
result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShardReplication", cell1, keyspaceShard)
|
||||
assert.Nil(t, err, "error should be Nil")
|
||||
if !downMaster {
|
||||
assertNodeCount(t, result, int(3))
|
||||
} else {
|
||||
assertNodeCount(t, result, int(2))
|
||||
}
|
||||
|
||||
// make sure the master status page says it's the master
|
||||
status := tablet.VttabletProcess.GetStatus()
|
||||
assert.Contains(t, status, "Tablet Type: MASTER")
|
||||
|
||||
// make sure the master health stream says it's the master too
|
||||
// (health check is disabled on these servers, force it first)
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", tablet.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
streamHealth, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(
|
||||
"VtTabletStreamHealth",
|
||||
"-count", "1", tablet.Alias)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var streamHealthResponse querypb.StreamHealthResponse
|
||||
err = json.Unmarshal([]byte(streamHealth), &streamHealthResponse)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, streamHealthResponse.Target.TabletType, topodatapb.TabletType_MASTER)
|
||||
assert.True(t, streamHealthResponse.TabletExternallyReparentedTimestamp >= baseTime)
|
||||
|
||||
}
|
||||
|
||||
func assertNodeCount(t *testing.T, result string, want int) {
|
||||
resultMap := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(result), &resultMap)
|
||||
assert.Nil(t, err)
|
||||
|
||||
nodes := reflect.ValueOf(resultMap["nodes"])
|
||||
got := nodes.Len()
|
||||
assert.Equal(t, want, got)
|
||||
}
|
||||
|
||||
func checkDBvar(ctx context.Context, t *testing.T, tablet *cluster.Vttablet, variable string, status string) {
|
||||
tabletParams := getMysqlConnParam(tablet)
|
||||
conn, err := mysql.Connect(ctx, &tabletParams)
|
||||
assert.Nil(t, err)
|
||||
defer conn.Close()
|
||||
|
||||
qr := execute(t, conn, fmt.Sprintf("show variables like '%s'", variable))
|
||||
got := fmt.Sprintf("%v", qr.Rows)
|
||||
want := fmt.Sprintf("[[VARCHAR(\"%s\") VARCHAR(\"%s\")]]", variable, status)
|
||||
assert.Equal(t, want, got)
|
||||
}
|
||||
|
||||
func checkDBstatus(ctx context.Context, t *testing.T, tablet *cluster.Vttablet, variable string, status string) {
|
||||
tabletParams := getMysqlConnParam(tablet)
|
||||
conn, err := mysql.Connect(ctx, &tabletParams)
|
||||
assert.Nil(t, err)
|
||||
defer conn.Close()
|
||||
|
||||
qr := execute(t, conn, fmt.Sprintf("show status like '%s'", variable))
|
||||
got := fmt.Sprintf("%v", qr.Rows)
|
||||
want := fmt.Sprintf("[[VARCHAR(\"%s\") VARCHAR(\"%s\")]]", variable, status)
|
||||
assert.Equal(t, want, got)
|
||||
}
|
||||
|
||||
func checkSlaveStatus(ctx context.Context, t *testing.T, tablet *cluster.Vttablet) {
|
||||
qr := runSQL(ctx, t, "show slave status", tablet)
|
||||
SlaveIORunning := fmt.Sprintf("%v", qr.Rows[0][10]) // Slave_IO_Running
|
||||
SlaveSQLRunning := fmt.Sprintf("%v", qr.Rows[0][10]) // Slave_SQL_Running
|
||||
assert.Equal(t, SlaveIORunning, "VARCHAR(\"No\")")
|
||||
assert.Equal(t, SlaveSQLRunning, "VARCHAR(\"No\")")
|
||||
}
|
||||
|
||||
// Makes sure the tablet type is master, and its health check agrees.
|
||||
func checkMasterTablet(t *testing.T, tablet *cluster.Vttablet) {
|
||||
result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tablet.Alias)
|
||||
assert.Nil(t, err)
|
||||
var tabletInfo topodatapb.Tablet
|
||||
err = json2.Unmarshal([]byte(result), &tabletInfo)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, topodatapb.TabletType_MASTER, tabletInfo.GetType())
|
||||
|
||||
// make sure the health stream is updated
|
||||
result, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("VtTabletStreamHealth", "-count", "1", tablet.Alias)
|
||||
assert.Nil(t, err)
|
||||
var streamHealthResponse querypb.StreamHealthResponse
|
||||
|
||||
err = json2.Unmarshal([]byte(result), &streamHealthResponse)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.True(t, streamHealthResponse.GetServing())
|
||||
tabletType := streamHealthResponse.GetTarget().GetTabletType()
|
||||
assert.Equal(t, topodatapb.TabletType_MASTER, tabletType)
|
||||
|
||||
}
|
||||
|
||||
func checkInsertedValues(ctx context.Context, t *testing.T, tablet *cluster.Vttablet, index int) error {
|
||||
// wait until it gets the data
|
||||
timeout := time.Now().Add(10 * time.Second)
|
||||
for time.Now().Before(timeout) {
|
||||
selectSQL := fmt.Sprintf("select msg from vt_insert_test where id=%d", index)
|
||||
qr := runSQL(ctx, t, selectSQL, tablet)
|
||||
if len(qr.Rows) == 1 {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
}
|
||||
return fmt.Errorf("data is not yet replicated")
|
||||
}
|
||||
|
||||
func validateTopology(t *testing.T, pingTablets bool) {
|
||||
if pingTablets {
|
||||
err := clusterInstance.VtctlclientProcess.ExecuteCommand("Validate", "-ping-tablets=true")
|
||||
assert.Nil(t, err)
|
||||
} else {
|
||||
err := clusterInstance.VtctlclientProcess.ExecuteCommand("Validate")
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func killTablets(t *testing.T) {
|
||||
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet41983, *tablet31981} {
|
||||
fmt.Println("Teardown tablet: ", tablet.Alias)
|
||||
err := tablet.VttabletProcess.TearDown()
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Reset status and type
|
||||
tablet.VttabletProcess.ServingStatus = ""
|
||||
tablet.Type = "replica"
|
||||
}
|
||||
}
|
|
@ -143,19 +143,13 @@ func initClusterForInitialSharding(keyspaceName string, shardNames []string, tot
|
|||
|
||||
for i := 0; i < totalTabletsRequired; i++ {
|
||||
// instantiate vttablet object with reserved ports
|
||||
tabletUID := ClusterInstance.GetAndReserveTabletUID()
|
||||
tablet := &cluster.Vttablet{
|
||||
TabletUID: tabletUID,
|
||||
HTTPPort: ClusterInstance.GetAndReservePort(),
|
||||
GrpcPort: ClusterInstance.GetAndReservePort(),
|
||||
MySQLPort: ClusterInstance.GetAndReservePort(),
|
||||
Alias: fmt.Sprintf("%s-%010d", ClusterInstance.Cell, tabletUID),
|
||||
Type: "replica",
|
||||
}
|
||||
if i == 0 { // Make the first one as master
|
||||
tablet.Type = "master"
|
||||
} else if i == totalTabletsRequired-1 && rdonly { // Make the last one as rdonly if rdonly flag is passed
|
||||
tablet.Type = "rdonly"
|
||||
var tablet *cluster.Vttablet
|
||||
if i == totalTabletsRequired-1 && rdonly {
|
||||
tablet = ClusterInstance.GetVttabletInstance("rdonly", 0, "")
|
||||
} else if i == 0 {
|
||||
tablet = ClusterInstance.GetVttabletInstance("master", 0, "")
|
||||
} else {
|
||||
tablet = ClusterInstance.GetVttabletInstance("replica", 0, "")
|
||||
}
|
||||
// Start Mysqlctl process
|
||||
tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, ClusterInstance.TmpDirectory)
|
||||
|
@ -518,6 +512,7 @@ func TestInitialSharding(t *testing.T, keyspace *cluster.Keyspace, keyType query
|
|||
"MultiSplitDiff",
|
||||
fmt.Sprintf("%s/%s", keyspaceName, shard1.Name))
|
||||
assert.Nil(t, err)
|
||||
|
||||
for _, shard := range []string{shard21.Name, shard22.Name} {
|
||||
err = ClusterInstance.VtworkerProcess.ExecuteVtworkerCommand(ClusterInstance.GetAndReservePort(),
|
||||
ClusterInstance.GetAndReservePort(),
|
||||
|
@ -529,9 +524,14 @@ func TestInitialSharding(t *testing.T, keyspace *cluster.Keyspace, keyType query
|
|||
}
|
||||
}
|
||||
|
||||
if isExternal {
|
||||
// get status for the destination master tablet, make sure we have it all
|
||||
sharding.CheckRunningBinlogPlayer(t, *shard21.MasterTablet(), 3956, 2002)
|
||||
sharding.CheckRunningBinlogPlayer(t, *shard22.MasterTablet(), 4048, 2002)
|
||||
} else {
|
||||
sharding.CheckRunningBinlogPlayer(t, *shard21.MasterTablet(), 3954, 2000)
|
||||
sharding.CheckRunningBinlogPlayer(t, *shard22.MasterTablet(), 4046, 2000)
|
||||
}
|
||||
|
||||
// check we can't migrate the master just yet
|
||||
err = ClusterInstance.VtctlclientProcess.ExecuteCommand("MigrateServedTypes", shard1Ks, "master")
|
||||
|
|
|
@ -278,6 +278,10 @@ func TestResharding(t *testing.T, useVarbinaryShardingKeyType bool) {
|
|||
//Start Tablets and Wait for the Process
|
||||
for _, shard := range clusterInstance.Keyspaces[0].Shards {
|
||||
for _, tablet := range shard.Vttablets {
|
||||
// Init Tablet
|
||||
err := clusterInstance.VtctlclientProcess.InitTablet(tablet, tablet.Cell, keyspaceName, hostname, shard.Name)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Start the tablet
|
||||
err = tablet.VttabletProcess.Setup()
|
||||
assert.Nil(t, err)
|
||||
|
|
|
@ -72,12 +72,12 @@ select name, count(*) from user group by name /* scatter aggregate */
|
|||
1 ks_sharded/c0-: select name, count(*) from user group by name limit 10001 /* scatter aggregate */
|
||||
|
||||
----------------------------------------------------------------------
|
||||
select 1, "hello", 3.14 from user limit 10 /* select constant sql values */
|
||||
select 1, "hello", 3.14, null from user limit 10 /* select constant sql values */
|
||||
|
||||
1 ks_sharded/-40: select 1, 'hello', 3.14 from user limit 10 /* select constant sql values */
|
||||
1 ks_sharded/40-80: select 1, 'hello', 3.14 from user limit 10 /* select constant sql values */
|
||||
1 ks_sharded/80-c0: select 1, 'hello', 3.14 from user limit 10 /* select constant sql values */
|
||||
1 ks_sharded/c0-: select 1, 'hello', 3.14 from user limit 10 /* select constant sql values */
|
||||
1 ks_sharded/-40: select 1, 'hello', 3.14, null from user limit 10 /* select constant sql values */
|
||||
1 ks_sharded/40-80: select 1, 'hello', 3.14, null from user limit 10 /* select constant sql values */
|
||||
1 ks_sharded/80-c0: select 1, 'hello', 3.14, null from user limit 10 /* select constant sql values */
|
||||
1 ks_sharded/c0-: select 1, 'hello', 3.14, null from user limit 10 /* select constant sql values */
|
||||
|
||||
----------------------------------------------------------------------
|
||||
select * from (select id from user) s /* scatter paren select */
|
||||
|
|
|
@ -11,7 +11,7 @@ select count(*) from user where id = 1 /* point aggregate */;
|
|||
select count(*) from user where name in ('alice','bob') /* scatter aggregate */;
|
||||
select name, count(*) from user group by name /* scatter aggregate */;
|
||||
|
||||
select 1, "hello", 3.14 from user limit 10 /* select constant sql values */;
|
||||
select 1, "hello", 3.14, null from user limit 10 /* select constant sql values */;
|
||||
select * from (select id from user) s /* scatter paren select */;
|
||||
|
||||
select name from user where id = (select id from t1) /* non-correlated subquery as value */;
|
||||
|
|
|
@ -628,6 +628,9 @@ func inferColTypeFromExpr(node sqlparser.Expr, colTypeMap map[string]querypb.Typ
|
|||
colNames, colTypes = inferColTypeFromExpr(node.Expr, colTypeMap, colNames, colTypes)
|
||||
case *sqlparser.CaseExpr:
|
||||
colNames, colTypes = inferColTypeFromExpr(node.Whens[0].Val, colTypeMap, colNames, colTypes)
|
||||
case *sqlparser.NullVal:
|
||||
colNames = append(colNames, sqlparser.String(node))
|
||||
colTypes = append(colTypes, querypb.Type_NULL_TYPE)
|
||||
default:
|
||||
log.Errorf("vtexplain: unsupported select expression type +%v node %s", reflect.TypeOf(node), sqlparser.String(node))
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer/testenv"
|
||||
)
|
||||
|
@ -54,12 +53,6 @@ func TestMain(m *testing.M) {
|
|||
engine.Open(env.KeyspaceName, env.Cells[0])
|
||||
defer engine.Close()
|
||||
|
||||
// GH actions machines sometimes exhibit multi-second delays on replication.
|
||||
// So, give a generously high value for heartbeat time for all tests.
|
||||
saveHeartbeat := HeartbeatTime
|
||||
defer func() { HeartbeatTime = saveHeartbeat }()
|
||||
HeartbeatTime = 10 * time.Second
|
||||
|
||||
return m.Run()
|
||||
}()
|
||||
os.Exit(exitCode)
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/golang/protobuf/proto"
|
||||
"vitess.io/vitess/go/mysql"
|
||||
"vitess.io/vitess/go/sqltypes"
|
||||
"vitess.io/vitess/go/sync2"
|
||||
"vitess.io/vitess/go/vt/binlog"
|
||||
"vitess.io/vitess/go/vt/log"
|
||||
"vitess.io/vitess/go/vt/sqlparser"
|
||||
|
@ -54,6 +55,14 @@ func NewVStreamer(ctx context.Context, cp *mysql.ConnParams, se *schema.Engine,
|
|||
return newVStreamer(ctx, cp, se, startPos, filter, &localVSchema{vschema: &vindexes.VSchema{}}, send)
|
||||
}
|
||||
|
||||
// vschemaUpdateCount is for testing only.
|
||||
// vstreamer is a mutex free data structure. So, it's not safe to access its members
|
||||
// from a test. Since VSchema gets updated asynchronously, there's no way for a test
|
||||
// to wait for it. Instead, the code that updates the vschema increments this atomic
|
||||
// counter, which will let the tests poll for it to change.
|
||||
// TODO(sougou): find a better way for this.
|
||||
var vschemaUpdateCount sync2.AtomicInt64
|
||||
|
||||
type vstreamer struct {
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
|
@ -237,6 +246,8 @@ func (vs *vstreamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog
|
|||
if err := vs.rebuildPlans(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Increment this counter for testing.
|
||||
vschemaUpdateCount.Add(1)
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-timer.C:
|
||||
|
|
|
@ -20,7 +20,10 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"vitess.io/vitess/go/mysql"
|
||||
|
@ -237,9 +240,7 @@ func TestREKeyRange(t *testing.T) {
|
|||
})
|
||||
engine.se.Reload(context.Background())
|
||||
|
||||
if err := env.SetVSchema(shardedVSchema); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
setVSchema(t, shardedVSchema)
|
||||
defer env.SetVSchema("{}")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -298,9 +299,7 @@ func TestREKeyRange(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}`
|
||||
if err := env.SetVSchema(altVSchema); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
setVSchema(t, altVSchema)
|
||||
|
||||
// Only the first insert should be sent.
|
||||
input = []string{
|
||||
|
@ -331,9 +330,7 @@ func TestInKeyRangeMultiColumn(t *testing.T) {
|
|||
})
|
||||
engine.se.Reload(context.Background())
|
||||
|
||||
if err := env.SetVSchema(multicolumnVSchema); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
setVSchema(t, multicolumnVSchema)
|
||||
defer env.SetVSchema("{}")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -388,9 +385,7 @@ func TestREMultiColumnVindex(t *testing.T) {
|
|||
})
|
||||
engine.se.Reload(context.Background())
|
||||
|
||||
if err := env.SetVSchema(multicolumnVSchema); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
setVSchema(t, multicolumnVSchema)
|
||||
defer env.SetVSchema("{}")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -1074,6 +1069,20 @@ func TestStatementMode(t *testing.T) {
|
|||
runCases(t, nil, testcases, "")
|
||||
}
|
||||
|
||||
func TestHeartbeat(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ch := startStream(ctx, t, nil, "")
|
||||
evs := <-ch
|
||||
require.Equal(t, 1, len(evs))
|
||||
assert.Equal(t, binlogdatapb.VEventType_HEARTBEAT, evs[0].Type)
|
||||
}
|
||||
|
||||
func runCases(t *testing.T, filter *binlogdatapb.Filter, testcases []testcase, postion string) {
|
||||
t.Helper()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -1101,15 +1110,26 @@ func expectLog(ctx context.Context, t *testing.T, input interface{}, ch <-chan [
|
|||
t.Helper()
|
||||
for _, wantset := range output {
|
||||
var evs []*binlogdatapb.VEvent
|
||||
var ok bool
|
||||
for {
|
||||
select {
|
||||
case evs, ok = <-ch:
|
||||
case allevs, ok := <-ch:
|
||||
if !ok {
|
||||
t.Fatal("stream ended early")
|
||||
}
|
||||
for _, ev := range allevs {
|
||||
// Ignore spurious heartbeats that can happen on slow machines.
|
||||
if ev.Type == binlogdatapb.VEventType_HEARTBEAT {
|
||||
continue
|
||||
}
|
||||
evs = append(evs, ev)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Fatal("stream ended early")
|
||||
}
|
||||
if len(evs) != 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(wantset) != len(evs) {
|
||||
t.Fatalf("%v: evs\n%v, want\n%v", input, evs, wantset)
|
||||
}
|
||||
|
@ -1206,3 +1226,26 @@ func masterPosition(t *testing.T) string {
|
|||
}
|
||||
return mysql.EncodePosition(pos)
|
||||
}
|
||||
|
||||
func setVSchema(t *testing.T, vschema string) {
|
||||
t.Helper()
|
||||
|
||||
curCount := vschemaUpdates.Get()
|
||||
|
||||
if err := env.SetVSchema(vschema); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for curCount to go up.
|
||||
updated := false
|
||||
for i := 0; i < 10; i++ {
|
||||
if vschemaUpdates.Get() != curCount {
|
||||
updated = true
|
||||
break
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
if !updated {
|
||||
t.Error("vschema did not get updated")
|
||||
}
|
||||
}
|
||||
|
|
6
test.go
6
test.go
|
@ -164,8 +164,14 @@ func (t *Test) hasAnyTag(want []string) bool {
|
|||
func (t *Test) run(dir, dataDir string) ([]byte, error) {
|
||||
testCmd := t.Command
|
||||
if len(testCmd) == 0 {
|
||||
if strings.Contains(fmt.Sprintf("%v", t.File), ".go") {
|
||||
testCmd = []string{"tools/e2e_go_test.sh"}
|
||||
testCmd = append(testCmd, t.Args...)
|
||||
testCmd = append(testCmd, "--skip-build", "--keep-logs")
|
||||
} else {
|
||||
testCmd = []string{"test/" + t.File, "-v", "--skip-build", "--keep-logs"}
|
||||
testCmd = append(testCmd, t.Args...)
|
||||
}
|
||||
testCmd = append(testCmd, extraArgs...)
|
||||
if *docker {
|
||||
// Teardown is unnecessary since Docker kills everything.
|
||||
|
|
335
test/config.json
335
test/config.json
|
@ -1,14 +1,5 @@
|
|||
{
|
||||
"Tests": {
|
||||
"backup": {
|
||||
"File": "backup.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 5,
|
||||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"backup_only": {
|
||||
"File": "backup_only.py",
|
||||
"Args": [],
|
||||
|
@ -45,17 +36,6 @@
|
|||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"binlog": {
|
||||
"File": "binlog.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 4,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"worker_test"
|
||||
]
|
||||
},
|
||||
"cache_invalidation": {
|
||||
"File": "cache_invalidation.py",
|
||||
"Args": [],
|
||||
|
@ -65,28 +45,6 @@
|
|||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"cell_aliases": {
|
||||
"File": "cell_aliases.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 5,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"site_test"
|
||||
]
|
||||
},
|
||||
"cell_no_aliases": {
|
||||
"File": "cell_no_aliases.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 5,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"site_test"
|
||||
]
|
||||
},
|
||||
"check_make_parser": {
|
||||
"File": "",
|
||||
"Args": [],
|
||||
|
@ -116,37 +74,6 @@
|
|||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"initial_sharding": {
|
||||
"File": "initial_sharding.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 5,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"worker_test"
|
||||
]
|
||||
},
|
||||
"initial_sharding_bytes": {
|
||||
"File": "initial_sharding_bytes.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 5,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"worker_test"
|
||||
]
|
||||
},
|
||||
"initial_sharding_multi": {
|
||||
"File": "initial_sharding_multi.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 5,
|
||||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"java": {
|
||||
"File": "",
|
||||
"Args": [],
|
||||
|
@ -172,17 +99,6 @@
|
|||
"site_test"
|
||||
]
|
||||
},
|
||||
"keyspace": {
|
||||
"File": "keyspace_test.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 5,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"site_test"
|
||||
]
|
||||
},
|
||||
"legacy_resharding": {
|
||||
"File": "legacy_resharding.py",
|
||||
"Args": [],
|
||||
|
@ -265,17 +181,6 @@
|
|||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"mysqlctl": {
|
||||
"File": "mysqlctl.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 5,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"site_test"
|
||||
]
|
||||
},
|
||||
"python_client": {
|
||||
"File": "python_client_test.py",
|
||||
"Args": [],
|
||||
|
@ -285,57 +190,6 @@
|
|||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"recovery": {
|
||||
"File": "recovery.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 4,
|
||||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"reparent": {
|
||||
"File": "reparent.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 2,
|
||||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"resharding": {
|
||||
"File": "resharding.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 5,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"worker_test"
|
||||
]
|
||||
},
|
||||
"resharding_bytes": {
|
||||
"File": "resharding_bytes.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 5,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"worker_test"
|
||||
]
|
||||
},
|
||||
"resharding_rbr": {
|
||||
"File": "resharding_rbr.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 5,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"worker_test"
|
||||
]
|
||||
},
|
||||
"schema": {
|
||||
"File": "schema.py",
|
||||
"Args": [],
|
||||
|
@ -345,15 +199,6 @@
|
|||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"sharded": {
|
||||
"File": "sharded.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 5,
|
||||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"sharded_recovery": {
|
||||
"File": "sharded_recovery.py",
|
||||
"Args": [],
|
||||
|
@ -363,17 +208,6 @@
|
|||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"tabletmanager": {
|
||||
"File": "tabletmanager.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 5,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"site_test"
|
||||
]
|
||||
},
|
||||
"tabletmanager_consul": {
|
||||
"File": "tabletmanager.py",
|
||||
"Args": [
|
||||
|
@ -476,28 +310,6 @@
|
|||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"vertical_split": {
|
||||
"File": "vertical_split.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 5,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"worker_test"
|
||||
]
|
||||
},
|
||||
"vertical_split_rbr": {
|
||||
"File": "vertical_split_rbr.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 5,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"worker_test"
|
||||
]
|
||||
},
|
||||
"vschema": {
|
||||
"File": "vschema.py",
|
||||
"Args": [],
|
||||
|
@ -611,6 +423,153 @@
|
|||
"Shard": 3,
|
||||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"backup": {
|
||||
"File": "backup.go",
|
||||
"Args": ["vitess.io/vitess/go/test/endtoend/backup"],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 11,
|
||||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"binlog": {
|
||||
"File": "binlog.go",
|
||||
"Args": ["vitess.io/vitess/go/test/endtoend/binlog"],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 11,
|
||||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"cellalias": {
|
||||
"File": "cellalias.go",
|
||||
"Args": ["vitess.io/vitess/go/test/endtoend/cellalias"],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 11,
|
||||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"initial_sharding": {
|
||||
"File": "initial_sharding.go",
|
||||
"Args": ["vitess.io/vitess/go/test/endtoend/sharding/initialsharding/v3"],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 12,
|
||||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"initial_sharding_bytes": {
|
||||
"File": "initial_sharding_bytes.go",
|
||||
"Args": ["vitess.io/vitess/go/test/endtoend/sharding/initialsharding/bytes"],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 12,
|
||||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"initial_sharding_multi": {
|
||||
"File": "initial_sharding_multi.go",
|
||||
"Args": ["vitess.io/vitess/go/test/endtoend/sharding/initialsharding/multi"],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 12,
|
||||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"keyspace": {
|
||||
"File": "keyspace.go",
|
||||
"Args": ["vitess.io/vitess/go/test/endtoend/keyspace"],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 11,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"site_test"
|
||||
]
|
||||
},
|
||||
"mysqlctl": {
|
||||
"File": "mysqlctl.go",
|
||||
"Args": ["vitess.io/vitess/go/test/endtoend/mysqlctl"],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 11,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"site_test"
|
||||
]
|
||||
},
|
||||
"recovery": {
|
||||
"File": "recovery.go",
|
||||
"Args": ["vitess.io/vitess/go/test/endtoend/recovery"],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 11,
|
||||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"reparent": {
|
||||
"File": "reparent.go",
|
||||
"Args": ["vitess.io/vitess/go/test/endtoend/reparent"],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 13,
|
||||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"resharding": {
|
||||
"File": "resharding.go",
|
||||
"Args": ["vitess.io/vitess/go/test/endtoend/sharding/resharding/v3"],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 13,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"worker_test"
|
||||
]
|
||||
},
|
||||
"resharding_bytes": {
|
||||
"File": "resharding_bytes.go",
|
||||
"Args": ["vitess.io/vitess/go/test/endtoend/sharding/resharding/string"],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 13,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"worker_test"
|
||||
]
|
||||
},
|
||||
"sharded": {
|
||||
"File": "sharded.go",
|
||||
"Args": ["vitess.io/vitess/go/test/endtoend/sharded"],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 11,
|
||||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
"tabletmanager": {
|
||||
"File": "tabletmanager.go",
|
||||
"Args": ["vitess.io/vitess/go/test/endtoend/tabletmanager"],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 11,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"site_test"
|
||||
]
|
||||
},
|
||||
"vertical_split": {
|
||||
"File": "vertical_split.go",
|
||||
"Args": ["vitess.io/vitess/go/test/endtoend/sharding/verticalsplit"],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 11,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"worker_test"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
#!/bin/bash
|
||||
source build.env
|
||||
echo "running tests for $1"
|
||||
go test -v $1 -count=1
|
|
@ -62,7 +62,7 @@ for pkg in $flaky_tests; do
|
|||
max_attempts=3
|
||||
attempt=1
|
||||
# Set a timeout because some tests may deadlock when they flake.
|
||||
until go test -timeout 30s $VT_GO_PARALLEL $pkg -count=1; do
|
||||
until go test -timeout 2m $VT_GO_PARALLEL $pkg -count=1; do
|
||||
echo "FAILED (try $attempt/$max_attempts) in $pkg (return code $?). See above for errors."
|
||||
if [ $((++attempt)) -gt $max_attempts ]; then
|
||||
echo "ERROR: Flaky Go unit tests in package $pkg failed too often (after $max_attempts retries). Please reduce the flakiness."
|
||||
|
|
Загрузка…
Ссылка в новой задаче