зеркало из https://github.com/github/vitess-gh.git
Fix vtbackup upgrade/downgrade test (#12437)
* adding check for v16 for upgrade/downgrade backup tests Signed-off-by: Rameez Sajwani <rameezwazirali@hotmail.com> * removing TestReparentDoesntHangIfPrimaryFails Signed-off-by: Rameez Sajwani <rameezwazirali@hotmail.com> * Removing keep-data from upgrade/downgrade workflow Signed-off-by: Rameez Sajwani <rameezwazirali@hotmail.com> * setting -keep-data=false in workflows Signed-off-by: Rameez Sajwani <rameezwazirali@hotmail.com> --------- Signed-off-by: Rameez Sajwani <rameezwazirali@hotmail.com>
This commit is contained in:
Родитель
c35032feee
Коммит
911f246149
|
@ -188,4 +188,4 @@ jobs:
|
|||
mkdir -p /tmp/vtdataroot
|
||||
set -x
|
||||
source build.env
|
||||
eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_backups
|
||||
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_backups
|
||||
|
|
|
@ -191,4 +191,4 @@ jobs:
|
|||
mkdir -p /tmp/vtdataroot
|
||||
set -x
|
||||
source build.env
|
||||
eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_backups
|
||||
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_backups
|
||||
|
|
|
@ -174,7 +174,7 @@ jobs:
|
|||
mkdir -p /tmp/vtdataroot
|
||||
|
||||
source build.env
|
||||
eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
|
||||
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
|
||||
|
||||
# Swap the binaries in the bin. Use vtgate version n-1 and keep vttablet at version n
|
||||
- name: Use last release's VTGate
|
||||
|
@ -194,7 +194,7 @@ jobs:
|
|||
mkdir -p /tmp/vtdataroot
|
||||
|
||||
source build.env
|
||||
eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
|
||||
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
|
||||
|
||||
# Swap the binaries again. This time, vtgate will be at version n, and vttablet will be at version n-1
|
||||
- name: Use current version VTGate, and other version VTTablet
|
||||
|
@ -216,4 +216,4 @@ jobs:
|
|||
mkdir -p /tmp/vtdataroot
|
||||
|
||||
source build.env
|
||||
eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
|
||||
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
|
||||
|
|
|
@ -177,7 +177,7 @@ jobs:
|
|||
mkdir -p /tmp/vtdataroot
|
||||
|
||||
source build.env
|
||||
eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
|
||||
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
|
||||
|
||||
# Swap the binaries in the bin. Use vtgate version n+1 and keep vttablet at version n
|
||||
- name: Use next release's VTGate
|
||||
|
@ -197,7 +197,7 @@ jobs:
|
|||
mkdir -p /tmp/vtdataroot
|
||||
|
||||
source build.env
|
||||
eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
|
||||
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
|
||||
|
||||
# Swap the binaries again. This time, vtgate will be at version n, and vttablet will be at version n+1
|
||||
- name: Use current version VTGate, and other version VTTablet
|
||||
|
@ -219,4 +219,4 @@ jobs:
|
|||
mkdir -p /tmp/vtdataroot
|
||||
|
||||
source build.env
|
||||
eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
|
||||
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
|
||||
|
|
|
@ -174,7 +174,7 @@ jobs:
|
|||
mkdir -p /tmp/vtdataroot
|
||||
|
||||
source build.env
|
||||
eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
|
||||
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
|
||||
|
||||
# Swap the binaries in the bin. Use vtgate version n-1 and keep vttablet at version n
|
||||
- name: Use last release's VTGate
|
||||
|
@ -194,7 +194,7 @@ jobs:
|
|||
mkdir -p /tmp/vtdataroot
|
||||
|
||||
source build.env
|
||||
eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
|
||||
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
|
||||
|
||||
# Swap the binaries again. This time, vtgate will be at version n, and vttablet will be at version n-1
|
||||
- name: Use current version VTGate, and other version VTTablet
|
||||
|
@ -216,4 +216,4 @@ jobs:
|
|||
mkdir -p /tmp/vtdataroot
|
||||
|
||||
source build.env
|
||||
eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
|
||||
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
|
||||
|
|
|
@ -177,7 +177,7 @@ jobs:
|
|||
mkdir -p /tmp/vtdataroot
|
||||
|
||||
source build.env
|
||||
eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
|
||||
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
|
||||
|
||||
# Swap the binaries in the bin. Use vtgate version n+1 and keep vttablet at version n
|
||||
- name: Use next release's VTGate
|
||||
|
@ -197,7 +197,7 @@ jobs:
|
|||
mkdir -p /tmp/vtdataroot
|
||||
|
||||
source build.env
|
||||
eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
|
||||
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
|
||||
|
||||
# Swap the binaries again. This time, vtgate will be at version n, and vttablet will be at version n+1
|
||||
- name: Use current version VTGate, and other version VTTablet
|
||||
|
@ -219,4 +219,4 @@ jobs:
|
|||
mkdir -p /tmp/vtdataroot
|
||||
|
||||
source build.env
|
||||
eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
|
||||
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
|
||||
|
|
|
@ -191,4 +191,4 @@ jobs:
|
|||
mkdir -p /tmp/vtdataroot
|
||||
|
||||
source build.env
|
||||
eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_reparent
|
||||
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_reparent
|
||||
|
|
|
@ -188,4 +188,4 @@ jobs:
|
|||
mkdir -p /tmp/vtdataroot
|
||||
|
||||
source build.env
|
||||
eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_reparent
|
||||
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_reparent
|
||||
|
|
|
@ -188,4 +188,4 @@ jobs:
|
|||
mkdir -p /tmp/vtdataroot
|
||||
|
||||
source build.env
|
||||
eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_reparent
|
||||
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_reparent
|
||||
|
|
|
@ -185,4 +185,4 @@ jobs:
|
|||
mkdir -p /tmp/vtdataroot
|
||||
|
||||
source build.env
|
||||
eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_reparent
|
||||
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_reparent
|
||||
|
|
|
@ -66,6 +66,9 @@ func TestTabletInitialBackup(t *testing.T) {
|
|||
// Restore the Tablets
|
||||
restore(t, primary, "replica", "NOT_SERVING")
|
||||
err := localCluster.VtctlclientProcess.ExecuteCommand(
|
||||
"SetReadWrite", primary.Alias)
|
||||
require.Nil(t, err)
|
||||
err = localCluster.VtctlclientProcess.ExecuteCommand(
|
||||
"TabletExternallyReparented", primary.Alias)
|
||||
require.Nil(t, err)
|
||||
restore(t, replica1, "replica", "SERVING")
|
||||
|
@ -152,17 +155,19 @@ func firstBackupTest(t *testing.T, tabletType string) {
|
|||
cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2)
|
||||
|
||||
// check that the restored replica has the right local_metadata
|
||||
result, err := replica2.VttabletProcess.QueryTabletWithDB("select * from local_metadata", "_vt")
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, result)
|
||||
require.NotEmpty(t, result.Rows)
|
||||
assert.Equal(t, replica2.Alias, result.Rows[0][1].ToString(), "Alias")
|
||||
assert.Equal(t, "ks.0", result.Rows[1][1].ToString(), "ClusterAlias")
|
||||
assert.Equal(t, cell, result.Rows[2][1].ToString(), "DataCenter")
|
||||
if tabletType == "replica" {
|
||||
assert.Equal(t, "neutral", result.Rows[3][1].ToString(), "PromotionRule")
|
||||
} else {
|
||||
assert.Equal(t, "must_not", result.Rows[3][1].ToString(), "PromotionRule")
|
||||
if localCluster.VtTabletMajorVersion <= 15 {
|
||||
result, err := replica2.VttabletProcess.QueryTabletWithDB("select * from local_metadata", "_vt")
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, result)
|
||||
require.NotEmpty(t, result.Rows)
|
||||
assert.Equal(t, replica2.Alias, result.Rows[0][1].ToString(), "Alias")
|
||||
assert.Equal(t, "ks.0", result.Rows[1][1].ToString(), "ClusterAlias")
|
||||
assert.Equal(t, cell, result.Rows[2][1].ToString(), "DataCenter")
|
||||
if tabletType == "replica" {
|
||||
assert.Equal(t, "neutral", result.Rows[3][1].ToString(), "PromotionRule")
|
||||
} else {
|
||||
assert.Equal(t, "must_not", result.Rows[3][1].ToString(), "PromotionRule")
|
||||
}
|
||||
}
|
||||
|
||||
removeBackups(t)
|
||||
|
|
|
@ -135,6 +135,16 @@ func TestMain(m *testing.M) {
|
|||
}
|
||||
}
|
||||
|
||||
if localCluster.VtTabletMajorVersion >= 16 {
|
||||
// If vttablets are any lower than version 16, then they are running the replication manager.
|
||||
// Running VTOrc and replication manager sometimes creates the situation where VTOrc has set up semi-sync on the primary,
|
||||
// but the replication manager starts replication on the replica without setting semi-sync. This hangs the primary.
|
||||
// Even if VTOrc fixes it, since there is no ongoing traffic, the state remains blocked.
|
||||
if err := localCluster.StartVTOrc(keyspaceName); err != nil {
|
||||
return 1, err
|
||||
}
|
||||
}
|
||||
|
||||
return m.Run(), nil
|
||||
}()
|
||||
|
||||
|
|
|
@ -251,6 +251,22 @@ func (cluster *LocalProcessCluster) StartTopo() (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
// StartVTOrc starts a VTOrc instance
|
||||
func (cluster *LocalProcessCluster) StartVTOrc(keyspace string) error {
|
||||
// Start vtorc
|
||||
vtorcProcess := cluster.NewVTOrcProcess(VTOrcConfiguration{})
|
||||
err := vtorcProcess.Setup()
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
if keyspace != "" {
|
||||
vtorcProcess.ExtraArgs = append(vtorcProcess.ExtraArgs, fmt.Sprintf(`--clusters_to_watch="%s"`, keyspace))
|
||||
}
|
||||
cluster.VTOrcProcesses = append(cluster.VTOrcProcesses, vtorcProcess)
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartUnshardedKeyspace starts unshared keyspace with shard name as "0"
|
||||
func (cluster *LocalProcessCluster) StartUnshardedKeyspace(keyspace Keyspace, replicaCount int, rdonly bool) error {
|
||||
return cluster.StartKeyspace(keyspace, []string{"0"}, replicaCount, rdonly)
|
||||
|
|
|
@ -117,17 +117,25 @@ func (vttablet *VttabletProcess) Setup() (err error) {
|
|||
if vttablet.SupportsBackup {
|
||||
vttablet.proc.Args = append(vttablet.proc.Args, "--restore_from_backup")
|
||||
}
|
||||
var majorVersion int
|
||||
majorVersion, err = GetMajorVersion("vttablet")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if vttablet.EnableSemiSync {
|
||||
var majorVersion int
|
||||
majorVersion, err = GetMajorVersion("vttablet")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// enable_semi_sync is removed in v16 and shouldn't be set on any release v16+
|
||||
if majorVersion <= 15 {
|
||||
vttablet.proc.Args = append(vttablet.proc.Args, "--enable_semi_sync")
|
||||
}
|
||||
}
|
||||
|
||||
// enable_semi_sync is removed in v16 and shouldn't be set on any release v16+
|
||||
if majorVersion >= 16 {
|
||||
disableReplicationFlag := "--disable-replication-manager"
|
||||
vttablet.proc.Args = append(vttablet.proc.Args, disableReplicationFlag)
|
||||
}
|
||||
|
||||
if vttablet.DbFlavor != "" {
|
||||
vttablet.proc.Args = append(vttablet.proc.Args, fmt.Sprintf("--db_flavor=%s", vttablet.DbFlavor))
|
||||
}
|
||||
|
|
|
@ -362,6 +362,11 @@ func TestReparentDoesntHangIfPrimaryFails(t *testing.T) {
|
|||
defer cluster.PanicHandler(t)
|
||||
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
|
||||
defer utils.TeardownCluster(clusterInstance)
|
||||
|
||||
// This test is no longer valid post v16
|
||||
if clusterInstance.VtTabletMajorVersion >= 16 {
|
||||
t.Skip("Skipping TestReparentDoesntHangIfPrimaryFails in CI environment for v16")
|
||||
}
|
||||
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
|
||||
|
||||
// Change the schema of the _vt.reparent_journal table, so that
|
||||
|
@ -369,7 +374,6 @@ func TestReparentDoesntHangIfPrimaryFails(t *testing.T) {
|
|||
_, err := tablets[0].VttabletProcess.QueryTabletWithDB(
|
||||
"ALTER TABLE reparent_journal DROP COLUMN replication_position", "_vt")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Perform a planned reparent operation, the primary will fail the
|
||||
// insert. The replicas should then abort right away.
|
||||
out, err := utils.Prs(t, clusterInstance, tablets[1])
|
||||
|
|
Загрузка…
Ссылка в новой задаче