зеркало из https://github.com/github/vitess-gh.git
Use GTIDSet to specify replication position.
This commit is contained in:
Родитель
1c48f52c72
Коммит
5b8415ec87
|
@ -4,21 +4,21 @@
|
|||
{"Category": 6, "Sql": "SET TIMESTAMP=1382160043"},
|
||||
{"Category": 5, "Sql": "create database sougou"}
|
||||
],
|
||||
"GTIDField": "GoogleMysql/5"
|
||||
"GTIDField": "GoogleMysql/41983-5"
|
||||
},
|
||||
{
|
||||
"Statements": [
|
||||
{"Category": 6, "Sql": "SET TIMESTAMP=1382160061"},
|
||||
{"Category": 5, "Sql": "create table a(id int, data varchar(128))"}
|
||||
],
|
||||
"GTIDField": "GoogleMysql/6"
|
||||
"GTIDField": "GoogleMysql/41983-6"
|
||||
},
|
||||
{
|
||||
"Statements": [
|
||||
{"Category": 6, "Sql": "SET TIMESTAMP=1382160107"},
|
||||
{"Category": 4, "Sql": "insert into a values(1, 'aa')"}
|
||||
],
|
||||
"GTIDField": "GoogleMysql/7"
|
||||
"GTIDField": "GoogleMysql/41983-7"
|
||||
},
|
||||
{
|
||||
"Statements": [
|
||||
|
@ -27,49 +27,49 @@
|
|||
{"Category": 6, "Sql": "SET TIMESTAMP=1382231687"},
|
||||
{"Category": 4, "Sql": "insert into a values(3, 'cc')"}
|
||||
],
|
||||
"GTIDField": "GoogleMysql/8"
|
||||
"GTIDField": "GoogleMysql/41983-8"
|
||||
},
|
||||
{
|
||||
"Statements": [
|
||||
{"Category": 6, "Sql": "SET TIMESTAMP=1382231687"},
|
||||
{"Category": 4, "Sql": "insert into a values(4, 'dd')"}
|
||||
],
|
||||
"GTIDField": "GoogleMysql/9"
|
||||
"GTIDField": "GoogleMysql/41983-9"
|
||||
},
|
||||
{
|
||||
"Statements": [
|
||||
{"Category": 6, "Sql": "SET TIMESTAMP=1382231687"},
|
||||
{"Category": 4, "Sql": "insert into a values(5, 'ee')"}
|
||||
],
|
||||
"GTIDField": "GoogleMysql/10"
|
||||
"GTIDField": "GoogleMysql/41983-10"
|
||||
},
|
||||
{
|
||||
"Statements": [
|
||||
{"Category": 6, "Sql": "SET TIMESTAMP=1382232554"},
|
||||
{"Category": 4, "Sql": "insert into a\nvalues(6, 'ff')"}
|
||||
],
|
||||
"GTIDField": "GoogleMysql/11"
|
||||
"GTIDField": "GoogleMysql/41983-11"
|
||||
},
|
||||
{
|
||||
"Statements": [
|
||||
{"Category": 6, "Sql": "SET TIMESTAMP=1382251656"},
|
||||
{"Category": 4, "Sql": "insert into a values(7, 'gg')"}
|
||||
],
|
||||
"GTIDField": "GoogleMysql/12"
|
||||
"GTIDField": "GoogleMysql/41983-12"
|
||||
},
|
||||
{
|
||||
"Statements": [
|
||||
{"Category": 6, "Sql": "SET TIMESTAMP=1382330065"},
|
||||
{"Category": 5, "Sql": "alter table a add constraint primary key(id)"}
|
||||
],
|
||||
"GTIDField": "GoogleMysql/13"
|
||||
"GTIDField": "GoogleMysql/41983-13"
|
||||
},
|
||||
{
|
||||
"Statements": [
|
||||
{"Category": 6, "Sql": "SET TIMESTAMP=1382330121"},
|
||||
{"Category": 4, "Sql": "UPDATE a set data='ggg' where id = 7"}
|
||||
],
|
||||
"GTIDField": "GoogleMysql/14"
|
||||
"GTIDField": "GoogleMysql/41983-14"
|
||||
},
|
||||
{
|
||||
"Statements": [
|
||||
|
@ -78,6 +78,6 @@
|
|||
{"Category": 6, "Sql": "SET TIMESTAMP=1382330252"},
|
||||
{"Category": 4, "Sql": "insert into a values(9, 'ii')"}
|
||||
],
|
||||
"GTIDField": "GoogleMysql/15"
|
||||
"GTIDField": "GoogleMysql/41983-15"
|
||||
}
|
||||
]
|
||||
|
|
|
@ -112,7 +112,7 @@ func main() {
|
|||
|
||||
type rTablet struct {
|
||||
*topo.TabletInfo
|
||||
*myproto.ReplicationPosition
|
||||
*myproto.ReplicationStatus
|
||||
}
|
||||
|
||||
type rTablets []*rTablet
|
||||
|
@ -128,10 +128,10 @@ func (rts rTablets) Less(i, j int) bool {
|
|||
l, r := rts[j], rts[i]
|
||||
// l or r ReplicationPosition would be nil if we failed to get
|
||||
// the position (put them at the beginning of the list)
|
||||
if l.ReplicationPosition == nil {
|
||||
return r.ReplicationPosition != nil
|
||||
if l.ReplicationStatus == nil {
|
||||
return r.ReplicationStatus != nil
|
||||
}
|
||||
if r.ReplicationPosition == nil {
|
||||
if r.ReplicationStatus == nil {
|
||||
return false
|
||||
}
|
||||
var lTypeMaster, rTypeMaster int
|
||||
|
@ -145,22 +145,18 @@ func (rts rTablets) Less(i, j int) bool {
|
|||
return true
|
||||
}
|
||||
if lTypeMaster == rTypeMaster {
|
||||
if l.MapKeyIo() < r.MapKeyIo() {
|
||||
return true
|
||||
}
|
||||
if l.MapKeyIo() == r.MapKeyIo() {
|
||||
if l.MapKey() < r.MapKey() {
|
||||
return true
|
||||
}
|
||||
if l.IOPosition.Equal(r.IOPosition) {
|
||||
return !l.Position.AtLeast(r.Position)
|
||||
}
|
||||
return !l.IOPosition.AtLeast(r.IOPosition)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func sortReplicatingTablets(tablets []*topo.TabletInfo, positions []*myproto.ReplicationPosition) []*rTablet {
|
||||
func sortReplicatingTablets(tablets []*topo.TabletInfo, stats []*myproto.ReplicationStatus) []*rTablet {
|
||||
rtablets := make([]*rTablet, len(tablets))
|
||||
for i, pos := range positions {
|
||||
rtablets[i] = &rTablet{tablets[i], pos}
|
||||
for i, status := range stats {
|
||||
rtablets[i] = &rTablet{tablets[i], status}
|
||||
}
|
||||
sort.Sort(rTablets(rtablets))
|
||||
return rtablets
|
||||
|
|
|
@ -110,8 +110,8 @@ Binlog player state: {{.State}}</br>
|
|||
<th>Index</th>
|
||||
<th>SourceShard</th>
|
||||
<th>State</th>
|
||||
<th>StopAtGTID</th>
|
||||
<th>LastGTID</th>
|
||||
<th>StopPosition</th>
|
||||
<th>LastPosition</th>
|
||||
<th>SecondsBehindMaster</th>
|
||||
<th>Counts</th>
|
||||
<th>Rates</th>
|
||||
|
@ -129,8 +129,8 @@ Binlog player state: {{.State}}</br>
|
|||
(from {{github_com_youtube_vitess_vtctld_tablet .SourceTablet.String}})
|
||||
{{end}}
|
||||
{{end}}</td>
|
||||
<td>{{if .StopAtGTID}}{{.StopAtGTID}}{{end}}</td>
|
||||
<td>{{.LastGTID}}</td>
|
||||
<td>{{if .StopPosition}}{{.StopPosition}}{{end}}</td>
|
||||
<td>{{.LastPosition}}</td>
|
||||
<td>{{.SecondsBehindMaster}}</td>
|
||||
<td>{{range $key, $value := .Counts}}<b>{{$key}}</b>: {{$value}}<br>{{end}}</td>
|
||||
<td>{{range $key, $values := .Rates}}<b>{{$key}}</b>: {{range $values}}{{.}} {{end}}<br>{{end}}</td>
|
||||
|
|
|
@ -9,13 +9,14 @@ import (
|
|||
"github.com/youtube/vitess/go/testfiles"
|
||||
"github.com/youtube/vitess/go/vt/binlog/proto"
|
||||
"github.com/youtube/vitess/go/vt/mysqlctl"
|
||||
myproto "github.com/youtube/vitess/go/vt/mysqlctl/proto"
|
||||
)
|
||||
|
||||
func BenchmarkFileStreamerParseEvents(b *testing.B) {
|
||||
filename := testfiles.Locate("binlog_test/vt-0000062347-bin.000001")
|
||||
var svm sync2.ServiceManager
|
||||
count := 0
|
||||
bls := newTestBinlogFileStreamer("vt_test_database", "", nil, func(tx *proto.BinlogTransaction) error {
|
||||
bls := newTestBinlogFileStreamer("vt_test_database", "", myproto.ReplicationPosition{}, func(tx *proto.BinlogTransaction) error {
|
||||
count++
|
||||
return nil
|
||||
})
|
||||
|
|
|
@ -23,7 +23,7 @@ type binlogConnStreamer struct {
|
|||
// dbname and mysqld are set at creation.
|
||||
dbname string
|
||||
mysqld *mysqlctl.Mysqld
|
||||
startPos myproto.GTID
|
||||
startPos myproto.ReplicationPosition
|
||||
sendTransaction sendTransactionFunc
|
||||
|
||||
conn *mysqlctl.SlaveConnection
|
||||
|
@ -35,7 +35,7 @@ type binlogConnStreamer struct {
|
|||
// newBinlogConnStreamer creates a BinlogStreamer.
|
||||
//
|
||||
// dbname specifes the db to stream events for.
|
||||
func newBinlogConnStreamer(dbname string, mysqld *mysqlctl.Mysqld, startPos myproto.GTID, sendTransaction sendTransactionFunc) BinlogStreamer {
|
||||
func newBinlogConnStreamer(dbname string, mysqld *mysqlctl.Mysqld, startPos myproto.ReplicationPosition, sendTransaction sendTransactionFunc) BinlogStreamer {
|
||||
return &binlogConnStreamer{
|
||||
dbname: dbname,
|
||||
mysqld: mysqld,
|
||||
|
@ -156,7 +156,7 @@ func (bls *binlogConnStreamer) parseEvents(ctx *sync2.ServiceContext, events <-c
|
|||
}
|
||||
}
|
||||
|
||||
switch true {
|
||||
switch {
|
||||
case ev.IsXID(): // XID_EVENT (equivalent to COMMIT)
|
||||
if err = commit(int64(ev.Timestamp())); err != nil {
|
||||
return err
|
||||
|
@ -195,8 +195,10 @@ func (bls *binlogConnStreamer) parseEvents(ctx *sync2.ServiceContext, events <-c
|
|||
statements = make([]proto.Statement, 0, 10)
|
||||
autocommit = false
|
||||
case proto.BL_ROLLBACK:
|
||||
// Rollbacks are possible under some circumstances. So, let's honor them
|
||||
// by sending an empty transaction, which will contain the new binlog position.
|
||||
// Rollbacks are possible under some circumstances. Since the stream
|
||||
// client keeps track of its replication position by updating the set
|
||||
// of GTIDs it's seen, we must commit an empty transaction so the client
|
||||
// can update its position.
|
||||
statements = nil
|
||||
fallthrough
|
||||
case proto.BL_COMMIT:
|
||||
|
|
|
@ -58,7 +58,7 @@ func TestBinlogConnStreamerParseEventsXID(t *testing.T) {
|
|||
},
|
||||
Timestamp: 1407805592,
|
||||
GTIDField: myproto.GTIDField{
|
||||
Value: myproto.GoogleGTID{GroupID: 0x0d}},
|
||||
Value: myproto.GoogleGTID{ServerID: 62344, GroupID: 0x0d}},
|
||||
},
|
||||
}
|
||||
var got []proto.BinlogTransaction
|
||||
|
@ -66,7 +66,7 @@ func TestBinlogConnStreamerParseEventsXID(t *testing.T) {
|
|||
got = append(got, *trans)
|
||||
return nil
|
||||
}
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, nil, sendTransaction).(*binlogConnStreamer)
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, myproto.ReplicationPosition{}, sendTransaction).(*binlogConnStreamer)
|
||||
|
||||
go sendTestEvents(events, input)
|
||||
svm := &sync2.ServiceManager{}
|
||||
|
@ -101,7 +101,7 @@ func TestBinlogConnStreamerParseEventsCommit(t *testing.T) {
|
|||
},
|
||||
Timestamp: 1407805592,
|
||||
GTIDField: myproto.GTIDField{
|
||||
Value: myproto.GoogleGTID{GroupID: 0x0d}},
|
||||
Value: myproto.GoogleGTID{ServerID: 62344, GroupID: 0x0d}},
|
||||
},
|
||||
}
|
||||
var got []proto.BinlogTransaction
|
||||
|
@ -109,7 +109,7 @@ func TestBinlogConnStreamerParseEventsCommit(t *testing.T) {
|
|||
got = append(got, *trans)
|
||||
return nil
|
||||
}
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, nil, sendTransaction).(*binlogConnStreamer)
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, myproto.ReplicationPosition{}, sendTransaction).(*binlogConnStreamer)
|
||||
|
||||
go sendTestEvents(events, input)
|
||||
svm := &sync2.ServiceManager{}
|
||||
|
@ -131,7 +131,7 @@ func TestBinlogConnStreamerStop(t *testing.T) {
|
|||
sendTransaction := func(trans *proto.BinlogTransaction) error {
|
||||
return nil
|
||||
}
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, nil, sendTransaction).(*binlogConnStreamer)
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, myproto.ReplicationPosition{}, sendTransaction).(*binlogConnStreamer)
|
||||
|
||||
// Start parseEvents(), but don't send it anything, so it just waits.
|
||||
svm := &sync2.ServiceManager{}
|
||||
|
@ -171,7 +171,7 @@ func TestBinlogConnStreamerParseEventsClientEOF(t *testing.T) {
|
|||
sendTransaction := func(trans *proto.BinlogTransaction) error {
|
||||
return io.EOF
|
||||
}
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, nil, sendTransaction).(*binlogConnStreamer)
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, myproto.ReplicationPosition{}, sendTransaction).(*binlogConnStreamer)
|
||||
|
||||
go sendTestEvents(events, input)
|
||||
svm := &sync2.ServiceManager{}
|
||||
|
@ -196,7 +196,7 @@ func TestBinlogConnStreamerParseEventsServerEOF(t *testing.T) {
|
|||
sendTransaction := func(trans *proto.BinlogTransaction) error {
|
||||
return nil
|
||||
}
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, nil, sendTransaction).(*binlogConnStreamer)
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, myproto.ReplicationPosition{}, sendTransaction).(*binlogConnStreamer)
|
||||
|
||||
svm := &sync2.ServiceManager{}
|
||||
svm.Go(func(ctx *sync2.ServiceContext) error {
|
||||
|
@ -226,7 +226,7 @@ func TestBinlogConnStreamerParseEventsSendErrorXID(t *testing.T) {
|
|||
sendTransaction := func(trans *proto.BinlogTransaction) error {
|
||||
return fmt.Errorf("foobar")
|
||||
}
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, nil, sendTransaction).(*binlogConnStreamer)
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, myproto.ReplicationPosition{}, sendTransaction).(*binlogConnStreamer)
|
||||
|
||||
go sendTestEvents(events, input)
|
||||
svm := &sync2.ServiceManager{}
|
||||
|
@ -258,7 +258,7 @@ func TestBinlogConnStreamerParseEventsSendErrorCommit(t *testing.T) {
|
|||
sendTransaction := func(trans *proto.BinlogTransaction) error {
|
||||
return fmt.Errorf("foobar")
|
||||
}
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, nil, sendTransaction).(*binlogConnStreamer)
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, myproto.ReplicationPosition{}, sendTransaction).(*binlogConnStreamer)
|
||||
|
||||
go sendTestEvents(events, input)
|
||||
svm := &sync2.ServiceManager{}
|
||||
|
@ -292,7 +292,7 @@ func TestBinlogConnStreamerParseEventsInvalid(t *testing.T) {
|
|||
sendTransaction := func(trans *proto.BinlogTransaction) error {
|
||||
return nil
|
||||
}
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, nil, sendTransaction).(*binlogConnStreamer)
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, myproto.ReplicationPosition{}, sendTransaction).(*binlogConnStreamer)
|
||||
|
||||
go sendTestEvents(events, input)
|
||||
svm := &sync2.ServiceManager{}
|
||||
|
@ -328,7 +328,7 @@ func TestBinlogConnStreamerParseEventsInvalidFormat(t *testing.T) {
|
|||
sendTransaction := func(trans *proto.BinlogTransaction) error {
|
||||
return nil
|
||||
}
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, nil, sendTransaction).(*binlogConnStreamer)
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, myproto.ReplicationPosition{}, sendTransaction).(*binlogConnStreamer)
|
||||
|
||||
go sendTestEvents(events, input)
|
||||
svm := &sync2.ServiceManager{}
|
||||
|
@ -360,7 +360,7 @@ func TestBinlogConnStreamerParseEventsNoFormat(t *testing.T) {
|
|||
sendTransaction := func(trans *proto.BinlogTransaction) error {
|
||||
return nil
|
||||
}
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, nil, sendTransaction).(*binlogConnStreamer)
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, myproto.ReplicationPosition{}, sendTransaction).(*binlogConnStreamer)
|
||||
|
||||
go sendTestEvents(events, input)
|
||||
svm := &sync2.ServiceManager{}
|
||||
|
@ -396,7 +396,7 @@ func TestBinlogConnStreamerParseEventsInvalidQuery(t *testing.T) {
|
|||
sendTransaction := func(trans *proto.BinlogTransaction) error {
|
||||
return nil
|
||||
}
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, nil, sendTransaction).(*binlogConnStreamer)
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, myproto.ReplicationPosition{}, sendTransaction).(*binlogConnStreamer)
|
||||
|
||||
go sendTestEvents(events, input)
|
||||
svm := &sync2.ServiceManager{}
|
||||
|
@ -433,7 +433,7 @@ func TestBinlogConnStreamerParseEventsRollback(t *testing.T) {
|
|||
Statements: nil,
|
||||
Timestamp: 1407805592,
|
||||
GTIDField: myproto.GTIDField{
|
||||
Value: myproto.GoogleGTID{GroupID: 0x0d}},
|
||||
Value: myproto.GoogleGTID{ServerID: 62344, GroupID: 0x0d}},
|
||||
},
|
||||
proto.BinlogTransaction{
|
||||
Statements: []proto.Statement{
|
||||
|
@ -442,7 +442,7 @@ func TestBinlogConnStreamerParseEventsRollback(t *testing.T) {
|
|||
},
|
||||
Timestamp: 1407805592,
|
||||
GTIDField: myproto.GTIDField{
|
||||
Value: myproto.GoogleGTID{GroupID: 0x0d}},
|
||||
Value: myproto.GoogleGTID{ServerID: 62344, GroupID: 0x0d}},
|
||||
},
|
||||
}
|
||||
var got []proto.BinlogTransaction
|
||||
|
@ -450,7 +450,7 @@ func TestBinlogConnStreamerParseEventsRollback(t *testing.T) {
|
|||
got = append(got, *trans)
|
||||
return nil
|
||||
}
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, nil, sendTransaction).(*binlogConnStreamer)
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, myproto.ReplicationPosition{}, sendTransaction).(*binlogConnStreamer)
|
||||
|
||||
go sendTestEvents(events, input)
|
||||
svm := &sync2.ServiceManager{}
|
||||
|
@ -484,13 +484,13 @@ func TestBinlogConnStreamerParseEventsDMLWithoutBegin(t *testing.T) {
|
|||
},
|
||||
Timestamp: 1407805592,
|
||||
GTIDField: myproto.GTIDField{
|
||||
Value: myproto.GoogleGTID{GroupID: 0x0d}},
|
||||
Value: myproto.GoogleGTID{ServerID: 62344, GroupID: 0x0d}},
|
||||
},
|
||||
proto.BinlogTransaction{
|
||||
Statements: nil,
|
||||
Timestamp: 1407805592,
|
||||
GTIDField: myproto.GTIDField{
|
||||
Value: myproto.GoogleGTID{GroupID: 0x0d}},
|
||||
Value: myproto.GoogleGTID{ServerID: 62344, GroupID: 0x0d}},
|
||||
},
|
||||
}
|
||||
var got []proto.BinlogTransaction
|
||||
|
@ -498,7 +498,7 @@ func TestBinlogConnStreamerParseEventsDMLWithoutBegin(t *testing.T) {
|
|||
got = append(got, *trans)
|
||||
return nil
|
||||
}
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, nil, sendTransaction).(*binlogConnStreamer)
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, myproto.ReplicationPosition{}, sendTransaction).(*binlogConnStreamer)
|
||||
|
||||
go sendTestEvents(events, input)
|
||||
svm := &sync2.ServiceManager{}
|
||||
|
@ -533,13 +533,13 @@ func TestBinlogConnStreamerParseEventsBeginWithoutCommit(t *testing.T) {
|
|||
},
|
||||
Timestamp: 1407805592,
|
||||
GTIDField: myproto.GTIDField{
|
||||
Value: myproto.GoogleGTID{GroupID: 0x0d}},
|
||||
Value: myproto.GoogleGTID{ServerID: 62344, GroupID: 0x0d}},
|
||||
},
|
||||
proto.BinlogTransaction{
|
||||
Statements: []proto.Statement{},
|
||||
Timestamp: 1407805592,
|
||||
GTIDField: myproto.GTIDField{
|
||||
Value: myproto.GoogleGTID{GroupID: 0x0d}},
|
||||
Value: myproto.GoogleGTID{ServerID: 62344, GroupID: 0x0d}},
|
||||
},
|
||||
}
|
||||
var got []proto.BinlogTransaction
|
||||
|
@ -547,7 +547,7 @@ func TestBinlogConnStreamerParseEventsBeginWithoutCommit(t *testing.T) {
|
|||
got = append(got, *trans)
|
||||
return nil
|
||||
}
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, nil, sendTransaction).(*binlogConnStreamer)
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, myproto.ReplicationPosition{}, sendTransaction).(*binlogConnStreamer)
|
||||
|
||||
go sendTestEvents(events, input)
|
||||
svm := &sync2.ServiceManager{}
|
||||
|
@ -584,7 +584,7 @@ func TestBinlogConnStreamerParseEventsSetInsertID(t *testing.T) {
|
|||
},
|
||||
Timestamp: 1407805592,
|
||||
GTIDField: myproto.GTIDField{
|
||||
Value: myproto.GoogleGTID{GroupID: 0x0d}},
|
||||
Value: myproto.GoogleGTID{ServerID: 62344, GroupID: 0x0d}},
|
||||
},
|
||||
}
|
||||
var got []proto.BinlogTransaction
|
||||
|
@ -592,7 +592,7 @@ func TestBinlogConnStreamerParseEventsSetInsertID(t *testing.T) {
|
|||
got = append(got, *trans)
|
||||
return nil
|
||||
}
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, nil, sendTransaction).(*binlogConnStreamer)
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, myproto.ReplicationPosition{}, sendTransaction).(*binlogConnStreamer)
|
||||
|
||||
go sendTestEvents(events, input)
|
||||
svm := &sync2.ServiceManager{}
|
||||
|
@ -628,7 +628,7 @@ func TestBinlogConnStreamerParseEventsInvalidIntVar(t *testing.T) {
|
|||
sendTransaction := func(trans *proto.BinlogTransaction) error {
|
||||
return nil
|
||||
}
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, nil, sendTransaction).(*binlogConnStreamer)
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, myproto.ReplicationPosition{}, sendTransaction).(*binlogConnStreamer)
|
||||
|
||||
go sendTestEvents(events, input)
|
||||
svm := &sync2.ServiceManager{}
|
||||
|
@ -665,7 +665,7 @@ func TestBinlogConnStreamerParseEventsOtherDB(t *testing.T) {
|
|||
},
|
||||
Timestamp: 1407805592,
|
||||
GTIDField: myproto.GTIDField{
|
||||
Value: myproto.GoogleGTID{GroupID: 0x0d}},
|
||||
Value: myproto.GoogleGTID{ServerID: 62344, GroupID: 0x0d}},
|
||||
},
|
||||
}
|
||||
var got []proto.BinlogTransaction
|
||||
|
@ -673,7 +673,7 @@ func TestBinlogConnStreamerParseEventsOtherDB(t *testing.T) {
|
|||
got = append(got, *trans)
|
||||
return nil
|
||||
}
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, nil, sendTransaction).(*binlogConnStreamer)
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, myproto.ReplicationPosition{}, sendTransaction).(*binlogConnStreamer)
|
||||
|
||||
go sendTestEvents(events, input)
|
||||
svm := &sync2.ServiceManager{}
|
||||
|
@ -709,7 +709,7 @@ func TestBinlogConnStreamerParseEventsOtherDBBegin(t *testing.T) {
|
|||
},
|
||||
Timestamp: 1407805592,
|
||||
GTIDField: myproto.GTIDField{
|
||||
Value: myproto.GoogleGTID{GroupID: 0x0d}},
|
||||
Value: myproto.GoogleGTID{ServerID: 62344, GroupID: 0x0d}},
|
||||
},
|
||||
}
|
||||
var got []proto.BinlogTransaction
|
||||
|
@ -717,7 +717,7 @@ func TestBinlogConnStreamerParseEventsOtherDBBegin(t *testing.T) {
|
|||
got = append(got, *trans)
|
||||
return nil
|
||||
}
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, nil, sendTransaction).(*binlogConnStreamer)
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, myproto.ReplicationPosition{}, sendTransaction).(*binlogConnStreamer)
|
||||
|
||||
go sendTestEvents(events, input)
|
||||
svm := &sync2.ServiceManager{}
|
||||
|
@ -747,7 +747,7 @@ func TestBinlogConnStreamerParseEventsBeginAgain(t *testing.T) {
|
|||
sendTransaction := func(trans *proto.BinlogTransaction) error {
|
||||
return nil
|
||||
}
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, nil, sendTransaction).(*binlogConnStreamer)
|
||||
bls := newBinlogConnStreamer("vt_test_keyspace", nil, myproto.ReplicationPosition{}, sendTransaction).(*binlogConnStreamer)
|
||||
before := binlogStreamerErrors.Counts()["ParseEvents"]
|
||||
|
||||
go sendTestEvents(events, input)
|
||||
|
|
|
@ -67,7 +67,7 @@ type binlogFileStreamer struct {
|
|||
dbname string
|
||||
dir string
|
||||
mysqld *mysqlctl.Mysqld
|
||||
gtid myproto.GTID
|
||||
startPos myproto.ReplicationPosition
|
||||
sendTransaction sendTransactionFunc
|
||||
|
||||
svm sync2.ServiceManager
|
||||
|
@ -81,12 +81,12 @@ type binlogFileStreamer struct {
|
|||
// newBinlogFileStreamer creates a BinlogStreamer.
|
||||
//
|
||||
// dbname specifes the db to stream events for.
|
||||
func newBinlogFileStreamer(dbname string, mysqld *mysqlctl.Mysqld, gtid myproto.GTID, sendTransaction sendTransactionFunc) BinlogStreamer {
|
||||
func newBinlogFileStreamer(dbname string, mysqld *mysqlctl.Mysqld, startPos myproto.ReplicationPosition, sendTransaction sendTransactionFunc) BinlogStreamer {
|
||||
return &binlogFileStreamer{
|
||||
dbname: dbname,
|
||||
dir: path.Dir(mysqld.Cnf().BinLogPath),
|
||||
mysqld: mysqld,
|
||||
gtid: gtid,
|
||||
startPos: startPos,
|
||||
sendTransaction: sendTransaction,
|
||||
}
|
||||
}
|
||||
|
@ -94,12 +94,12 @@ func newBinlogFileStreamer(dbname string, mysqld *mysqlctl.Mysqld, gtid myproto.
|
|||
// Stream implements BinlogStreamer.Stream().
|
||||
func (bls *binlogFileStreamer) Stream(ctx *sync2.ServiceContext) error {
|
||||
// Query mysqld to convert GTID to file & pos.
|
||||
rp, err := bls.mysqld.BinlogInfo(bls.gtid)
|
||||
fileName, filePos, err := bls.mysqld.BinlogInfo(bls.startPos)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to serve client request: error computing start position: %v", err)
|
||||
return fmt.Errorf("error computing start position: %v", err)
|
||||
}
|
||||
return bls.streamFilePos(ctx, rp.MasterLogFile, int64(rp.MasterLogPosition))
|
||||
return bls.streamFilePos(ctx, fileName, int64(filePos))
|
||||
}
|
||||
|
||||
// streamFilePos starts streaming events from a given file and position.
|
||||
|
@ -215,7 +215,10 @@ eventLoop:
|
|||
if values != nil {
|
||||
bls.blPos.ServerId = mustParseInt64(values[1])
|
||||
bls.file.Set(mustParseInt64(values[2]))
|
||||
bls.blPos.GTID = myproto.MustParseGTID(blsMysqlFlavor, string(values[3]))
|
||||
|
||||
// Make the fake Google GTID format we invented.
|
||||
gtid := string(values[1]) + "-" + string(values[3])
|
||||
bls.blPos.GTID = myproto.MustParseGTID(blsMysqlFlavor, gtid)
|
||||
continue
|
||||
}
|
||||
values = rotateRE.FindSubmatch(event)
|
||||
|
|
|
@ -290,10 +290,10 @@ type transaction struct {
|
|||
GTIDField myproto.GTIDField
|
||||
}
|
||||
|
||||
func newTestBinlogFileStreamer(dbname, binlogPath string, gtid myproto.GTID, sendTransaction sendTransactionFunc) *binlogFileStreamer {
|
||||
func newTestBinlogFileStreamer(dbname, binlogPath string, startPos myproto.ReplicationPosition, sendTransaction sendTransactionFunc) *binlogFileStreamer {
|
||||
return &binlogFileStreamer{
|
||||
dbname: dbname,
|
||||
gtid: gtid,
|
||||
startPos: startPos,
|
||||
dir: path.Dir(binlogPath),
|
||||
sendTransaction: sendTransaction,
|
||||
}
|
||||
|
@ -350,7 +350,7 @@ func TestStream(t *testing.T) {
|
|||
*/
|
||||
return nil
|
||||
}
|
||||
bls := newTestBinlogFileStreamer("db", testfiles.Locate("mysqlctl_test/vt-0000041983-bin"), nil, sendTx)
|
||||
bls := newTestBinlogFileStreamer("db", testfiles.Locate("mysqlctl_test/vt-0000041983-bin"), myproto.ReplicationPosition{}, sendTx)
|
||||
svm.Go(func(ctx *sync2.ServiceContext) error {
|
||||
return bls.streamFilePos(ctx, "vt-0000041983-bin.000001", 0)
|
||||
})
|
||||
|
@ -366,7 +366,7 @@ func TestRotation(t *testing.T) {
|
|||
defer cleanup(env)
|
||||
|
||||
svm := &sync2.ServiceManager{}
|
||||
bls := newTestBinlogFileStreamer("db", testfiles.Locate("mysqlctl_test/vt-0000041983-bin"), nil, func(tx *proto.BinlogTransaction) error {
|
||||
bls := newTestBinlogFileStreamer("db", testfiles.Locate("mysqlctl_test/vt-0000041983-bin"), myproto.ReplicationPosition{}, func(tx *proto.BinlogTransaction) error {
|
||||
// Launch as goroutine to prevent deadlock.
|
||||
go svm.Stop()
|
||||
return nil
|
||||
|
|
|
@ -33,10 +33,10 @@ var (
|
|||
)
|
||||
|
||||
// BinlogStreamer is an interface for requesting a stream of binlog events from
|
||||
// mysqld starting at a given GTID.
|
||||
// mysqld starting at a given position. A BinlogStreamer should only be used
|
||||
// once. To start another stream, call NewBinlogStreamer() again.
|
||||
type BinlogStreamer interface {
|
||||
// Stream starts streaming binlog events from a given GTID.
|
||||
// It calls sendTransaction() with the contens of each event.
|
||||
// Stream starts streaming binlog events using the settings from NewBinlogStreamer().
|
||||
Stream(ctx *sync2.ServiceContext) error
|
||||
}
|
||||
|
||||
|
@ -45,15 +45,17 @@ type BinlogStreamer interface {
|
|||
//
|
||||
// dbname specifes the db to stream events for.
|
||||
// mysqld is the local instance of mysqlctl.Mysqld.
|
||||
func NewBinlogStreamer(dbname string, mysqld *mysqlctl.Mysqld, gtid myproto.GTID, sendTransaction sendTransactionFunc) BinlogStreamer {
|
||||
// startPos is the position to start streaming at.
|
||||
// sendTransaction is called each time a transaction is committed or rolled back.
|
||||
func NewBinlogStreamer(dbname string, mysqld *mysqlctl.Mysqld, startPos myproto.ReplicationPosition, sendTransaction sendTransactionFunc) BinlogStreamer {
|
||||
fn := binlogStreamers[*binlogStreamer]
|
||||
if fn == nil {
|
||||
panic(fmt.Errorf("unknown BinlogStreamer implementation: %#v", *binlogStreamer))
|
||||
}
|
||||
return fn(dbname, mysqld, gtid, sendTransaction)
|
||||
return fn(dbname, mysqld, startPos, sendTransaction)
|
||||
}
|
||||
|
||||
type newBinlogStreamerFunc func(string, *mysqlctl.Mysqld, myproto.GTID, sendTransactionFunc) BinlogStreamer
|
||||
type newBinlogStreamerFunc func(string, *mysqlctl.Mysqld, myproto.ReplicationPosition, sendTransactionFunc) BinlogStreamer
|
||||
|
||||
// sendTransactionFunc is used to send binlog events.
|
||||
// reply is of type proto.BinlogTransaction.
|
||||
|
|
|
@ -18,13 +18,13 @@ func TestNewBinlogStreamer(t *testing.T) {
|
|||
|
||||
*binlogStreamer = "fake"
|
||||
binlogStreamers = map[string]newBinlogStreamerFunc{
|
||||
"fake": func(string, *mysqlctl.Mysqld, myproto.GTID, sendTransactionFunc) BinlogStreamer {
|
||||
"fake": func(string, *mysqlctl.Mysqld, myproto.ReplicationPosition, sendTransactionFunc) BinlogStreamer {
|
||||
triggered = true
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
NewBinlogStreamer("", nil, nil, nil)
|
||||
NewBinlogStreamer("", nil, myproto.ReplicationPosition{}, nil)
|
||||
|
||||
if !triggered {
|
||||
t.Errorf("NewBinlogStreamer() failed to call the right newBinlogStreamerFunc.")
|
||||
|
@ -47,7 +47,7 @@ func TestNewBinlogStreamerUnknown(t *testing.T) {
|
|||
}
|
||||
}()
|
||||
|
||||
NewBinlogStreamer("", nil, nil, nil)
|
||||
NewBinlogStreamer("", nil, myproto.ReplicationPosition{}, nil)
|
||||
}
|
||||
|
||||
func TestGetStatementCategory(t *testing.T) {
|
||||
|
|
|
@ -45,21 +45,21 @@ type BinlogPlayerStats struct {
|
|||
Rates *stats.Rates
|
||||
|
||||
// Last saved status
|
||||
lastGTID myproto.GTID
|
||||
lastGTIDMutex sync.RWMutex
|
||||
lastPosition myproto.ReplicationPosition
|
||||
lastPositionMutex sync.RWMutex
|
||||
SecondsBehindMaster sync2.AtomicInt64
|
||||
}
|
||||
|
||||
func (bps *BinlogPlayerStats) SetLastGTID(gtid myproto.GTID) {
|
||||
bps.lastGTIDMutex.Lock()
|
||||
defer bps.lastGTIDMutex.Unlock()
|
||||
bps.lastGTID = gtid
|
||||
func (bps *BinlogPlayerStats) SetLastPosition(pos myproto.ReplicationPosition) {
|
||||
bps.lastPositionMutex.Lock()
|
||||
defer bps.lastPositionMutex.Unlock()
|
||||
bps.lastPosition = pos
|
||||
}
|
||||
|
||||
func (bps *BinlogPlayerStats) GetLastGTID() myproto.GTID {
|
||||
bps.lastGTIDMutex.RLock()
|
||||
defer bps.lastGTIDMutex.RUnlock()
|
||||
return bps.lastGTID
|
||||
func (bps *BinlogPlayerStats) GetLastPosition() myproto.ReplicationPosition {
|
||||
bps.lastPositionMutex.RLock()
|
||||
defer bps.lastPositionMutex.RUnlock()
|
||||
return bps.lastPosition
|
||||
}
|
||||
|
||||
// NewBinlogPlayerStats creates a new BinlogPlayerStats structure
|
||||
|
@ -84,37 +84,37 @@ type BinlogPlayer struct {
|
|||
|
||||
// common to all
|
||||
blpPos proto.BlpPosition
|
||||
stopAtGTID myproto.GTID
|
||||
stopPosition myproto.ReplicationPosition
|
||||
blplStats *BinlogPlayerStats
|
||||
}
|
||||
|
||||
// NewBinlogPlayerKeyRange returns a new BinlogPlayer pointing at the server
|
||||
// replicating the provided keyrange, starting at the startPosition.GTID,
|
||||
// replicating the provided keyrange, starting at the startPosition,
|
||||
// and updating _vt.blp_checkpoint with uid=startPosition.Uid.
|
||||
// If stopAtGTID != nil, it will stop when reaching that GTID.
|
||||
func NewBinlogPlayerKeyRange(dbClient VtClient, addr string, keyspaceIdType key.KeyspaceIdType, keyRange key.KeyRange, startPosition *proto.BlpPosition, stopAtGTID myproto.GTID, blplStats *BinlogPlayerStats) *BinlogPlayer {
|
||||
// If !stopPosition.IsZero(), it will stop when reaching that position.
|
||||
func NewBinlogPlayerKeyRange(dbClient VtClient, addr string, keyspaceIdType key.KeyspaceIdType, keyRange key.KeyRange, startPosition *proto.BlpPosition, stopPosition myproto.ReplicationPosition, blplStats *BinlogPlayerStats) *BinlogPlayer {
|
||||
return &BinlogPlayer{
|
||||
addr: addr,
|
||||
dbClient: dbClient,
|
||||
keyspaceIdType: keyspaceIdType,
|
||||
keyRange: keyRange,
|
||||
blpPos: *startPosition,
|
||||
stopAtGTID: stopAtGTID,
|
||||
stopPosition: stopPosition,
|
||||
blplStats: blplStats,
|
||||
}
|
||||
}
|
||||
|
||||
// NewBinlogPlayerTables returns a new BinlogPlayer pointing at the server
|
||||
// replicating the provided tables, starting at the startPosition.GTID,
|
||||
// replicating the provided tables, starting at the startPosition,
|
||||
// and updating _vt.blp_checkpoint with uid=startPosition.Uid.
|
||||
// If stopAtGTID != nil, it will stop when reaching that GTID.
|
||||
func NewBinlogPlayerTables(dbClient VtClient, addr string, tables []string, startPosition *proto.BlpPosition, stopAtGTID myproto.GTID, blplStats *BinlogPlayerStats) *BinlogPlayer {
|
||||
// If !stopPosition.IsZero(), it will stop when reaching that position.
|
||||
func NewBinlogPlayerTables(dbClient VtClient, addr string, tables []string, startPosition *proto.BlpPosition, stopPosition myproto.ReplicationPosition, blplStats *BinlogPlayerStats) *BinlogPlayer {
|
||||
return &BinlogPlayer{
|
||||
addr: addr,
|
||||
dbClient: dbClient,
|
||||
tables: tables,
|
||||
blpPos: *startPosition,
|
||||
stopAtGTID: stopAtGTID,
|
||||
stopPosition: stopPosition,
|
||||
blplStats: blplStats,
|
||||
}
|
||||
}
|
||||
|
@ -131,8 +131,8 @@ func NewBinlogPlayerTables(dbClient VtClient, addr string, tables []string, star
|
|||
func (blp *BinlogPlayer) writeRecoveryPosition(tx *proto.BinlogTransaction) error {
|
||||
now := time.Now().Unix()
|
||||
|
||||
blp.blpPos.GTIDField = tx.GTIDField
|
||||
updateRecovery := UpdateBlpCheckpoint(blp.blpPos.Uid, tx.GTIDField.Value, now, tx.Timestamp)
|
||||
blp.blpPos.Position = myproto.AppendGTID(blp.blpPos.Position, tx.GTIDField.Value)
|
||||
updateRecovery := UpdateBlpCheckpoint(blp.blpPos.Uid, blp.blpPos.Position, now, tx.Timestamp)
|
||||
|
||||
qr, err := blp.exec(updateRecovery)
|
||||
if err != nil {
|
||||
|
@ -141,7 +141,7 @@ func (blp *BinlogPlayer) writeRecoveryPosition(tx *proto.BinlogTransaction) erro
|
|||
if qr.RowsAffected != 1 {
|
||||
return fmt.Errorf("Cannot update blp_recovery table, affected %v rows", qr.RowsAffected)
|
||||
}
|
||||
blp.blplStats.SetLastGTID(tx.GTIDField.Value)
|
||||
blp.blplStats.SetLastPosition(blp.blpPos.Position)
|
||||
if tx.Timestamp != 0 {
|
||||
blp.blplStats.SecondsBehindMaster.Set(now - tx.Timestamp)
|
||||
}
|
||||
|
@ -159,13 +159,13 @@ func ReadStartPosition(dbClient VtClient, uid uint32) (*proto.BlpPosition, strin
|
|||
if qr.RowsAffected != 1 {
|
||||
return nil, "", fmt.Errorf("checkpoint information not available in db for %v", uid)
|
||||
}
|
||||
gtid, err := myproto.DecodeGTID(qr.Rows[0][0].String())
|
||||
pos, err := myproto.DecodeReplicationPosition(qr.Rows[0][0].String())
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return &proto.BlpPosition{
|
||||
Uid: uid,
|
||||
GTIDField: myproto.GTIDField{Value: gtid},
|
||||
Position: pos,
|
||||
}, string(qr.Rows[0][1].Raw()), nil
|
||||
}
|
||||
|
||||
|
@ -218,7 +218,7 @@ func (blp *BinlogPlayer) ApplyBinlogEvents(interrupted chan struct{}) error {
|
|||
log.Infof("BinlogPlayer client %v for tables %v starting @ '%v', server: %v",
|
||||
blp.blpPos.Uid,
|
||||
blp.tables,
|
||||
blp.blpPos.GTIDField,
|
||||
blp.blpPos.Position,
|
||||
blp.addr,
|
||||
)
|
||||
} else {
|
||||
|
@ -226,26 +226,21 @@ func (blp *BinlogPlayer) ApplyBinlogEvents(interrupted chan struct{}) error {
|
|||
blp.blpPos.Uid,
|
||||
blp.keyRange.Start.Hex(),
|
||||
blp.keyRange.End.Hex(),
|
||||
blp.blpPos.GTIDField,
|
||||
blp.blpPos.Position,
|
||||
blp.addr,
|
||||
)
|
||||
}
|
||||
if blp.stopAtGTID != nil {
|
||||
if !blp.stopPosition.IsZero() {
|
||||
// We need to stop at some point. Sanity check the point.
|
||||
//
|
||||
// TODO(enisoc): Stop trying to compare GTIDs, since we can't do that
|
||||
// reliably for some MySQL flavors.
|
||||
cmp, err := blp.blpPos.GTIDField.Value.TryCompare(blp.stopAtGTID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cmp > 0 { // blp.blpPos.GTIDField.Value > blp.stopAtGTID
|
||||
return fmt.Errorf("starting point %v greater than stopping point %v", blp.blpPos.GTIDField, blp.stopAtGTID)
|
||||
} else if cmp == 0 { // blp.blpPos.GTIDField.Value == blp.stopAtGTID
|
||||
log.Infof("Not starting BinlogPlayer, we're already at the desired position %v", blp.stopAtGTID)
|
||||
switch {
|
||||
case blp.blpPos.Position.Equal(blp.stopPosition):
|
||||
log.Infof("Not starting BinlogPlayer, we're already at the desired position %v", blp.stopPosition)
|
||||
return nil
|
||||
case blp.blpPos.Position.AtLeast(blp.stopPosition):
|
||||
return fmt.Errorf("starting point %v greater than stopping point %v", blp.blpPos.Position, blp.stopPosition)
|
||||
default:
|
||||
log.Infof("Will stop player when reaching %v", blp.stopPosition)
|
||||
}
|
||||
log.Infof("Will stop player when reaching %v", blp.stopAtGTID)
|
||||
}
|
||||
|
||||
binlogPlayerClientFactory, ok := binlogPlayerClientFactories[*binlogPlayerProtocol]
|
||||
|
@ -265,14 +260,14 @@ func (blp *BinlogPlayer) ApplyBinlogEvents(interrupted chan struct{}) error {
|
|||
if len(blp.tables) > 0 {
|
||||
req := &proto.TablesRequest{
|
||||
Tables: blp.tables,
|
||||
GTIDField: blp.blpPos.GTIDField,
|
||||
Position: blp.blpPos.Position,
|
||||
}
|
||||
resp = blplClient.StreamTables(req, responseChan)
|
||||
} else {
|
||||
req := &proto.KeyRangeRequest{
|
||||
KeyspaceIdType: blp.keyspaceIdType,
|
||||
KeyRange: blp.keyRange,
|
||||
GTIDField: blp.blpPos.GTIDField,
|
||||
Position: blp.blpPos.Position,
|
||||
}
|
||||
resp = blplClient.StreamKeyRange(req, responseChan)
|
||||
}
|
||||
|
@ -290,13 +285,8 @@ processLoop:
|
|||
return fmt.Errorf("Error in processing binlog event %v", err)
|
||||
}
|
||||
if ok {
|
||||
if blp.stopAtGTID != nil {
|
||||
// TODO(enisoc): Stop trying to compare GTIDs.
|
||||
cmp, err := blp.blpPos.GTIDField.Value.TryCompare(blp.stopAtGTID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cmp >= 0 { // blp.blpPos.GTIDField.Value >= blp.stopAtGTID
|
||||
if !blp.stopPosition.IsZero() {
|
||||
if blp.blpPos.Position.AtLeast(blp.stopPosition) {
|
||||
log.Infof("Reached stopping position, done playing logs")
|
||||
return nil
|
||||
}
|
||||
|
@ -323,7 +313,7 @@ func CreateBlpCheckpoint() []string {
|
|||
"CREATE DATABASE IF NOT EXISTS _vt",
|
||||
`CREATE TABLE IF NOT EXISTS _vt.blp_checkpoint (
|
||||
source_shard_uid INT(10) UNSIGNED NOT NULL,
|
||||
gtid VARCHAR(250) DEFAULT NULL,
|
||||
pos VARCHAR(250) DEFAULT NULL,
|
||||
time_updated BIGINT UNSIGNED NOT NULL,
|
||||
transaction_timestamp BIGINT UNSIGNED NOT NULL,
|
||||
flags VARCHAR(250) DEFAULT NULL,
|
||||
|
@ -332,33 +322,33 @@ func CreateBlpCheckpoint() []string {
|
|||
|
||||
// PopulateBlpCheckpoint returns a statement to populate the first value into
|
||||
// the _vt.blp_checkpoint table.
|
||||
func PopulateBlpCheckpoint(index uint32, gtid myproto.GTID, timeUpdated int64, flags string) string {
|
||||
func PopulateBlpCheckpoint(index uint32, pos myproto.ReplicationPosition, timeUpdated int64, flags string) string {
|
||||
return fmt.Sprintf("INSERT INTO _vt.blp_checkpoint "+
|
||||
"(source_shard_uid, gtid, time_updated, transaction_timestamp, flags) "+
|
||||
"(source_shard_uid, pos, time_updated, transaction_timestamp, flags) "+
|
||||
"VALUES (%v, '%v', %v, 0, '%v')",
|
||||
index, myproto.EncodeGTID(gtid), timeUpdated, flags)
|
||||
index, myproto.EncodeReplicationPosition(pos), timeUpdated, flags)
|
||||
}
|
||||
|
||||
// UpdateBlpCheckpoint returns a statement to update a value in the
|
||||
// _vt.blp_checkpoint table.
|
||||
func UpdateBlpCheckpoint(uid uint32, gtid myproto.GTID, timeUpdated int64, txTimestamp int64) string {
|
||||
func UpdateBlpCheckpoint(uid uint32, pos myproto.ReplicationPosition, timeUpdated int64, txTimestamp int64) string {
|
||||
if txTimestamp != 0 {
|
||||
return fmt.Sprintf(
|
||||
"UPDATE _vt.blp_checkpoint "+
|
||||
"SET gtid='%v', time_updated=%v, transaction_timestamp=%v "+
|
||||
"SET pos='%v', time_updated=%v, transaction_timestamp=%v "+
|
||||
"WHERE source_shard_uid=%v",
|
||||
myproto.EncodeGTID(gtid), timeUpdated, txTimestamp, uid)
|
||||
myproto.EncodeReplicationPosition(pos), timeUpdated, txTimestamp, uid)
|
||||
} else {
|
||||
return fmt.Sprintf(
|
||||
"UPDATE _vt.blp_checkpoint "+
|
||||
"SET gtid='%v', time_updated=%v "+
|
||||
"SET pos='%v', time_updated=%v "+
|
||||
"WHERE source_shard_uid=%v",
|
||||
myproto.EncodeGTID(gtid), timeUpdated, uid)
|
||||
myproto.EncodeReplicationPosition(pos), timeUpdated, uid)
|
||||
}
|
||||
}
|
||||
|
||||
// QueryBlpCheckpoint returns a statement to query the gtid and flags for a
|
||||
// given shard from the _vt.blp_checkpoint table.
|
||||
func QueryBlpCheckpoint(index uint32) string {
|
||||
return fmt.Sprintf("SELECT gtid, flags FROM _vt.blp_checkpoint WHERE source_shard_uid=%v", index)
|
||||
return fmt.Sprintf("SELECT pos, flags FROM _vt.blp_checkpoint WHERE source_shard_uid=%v", index)
|
||||
}
|
||||
|
|
|
@ -11,43 +11,43 @@ import (
|
|||
)
|
||||
|
||||
func TestPopulateBlpCheckpoint(t *testing.T) {
|
||||
gtid := myproto.MustParseGTID("GoogleMysql", "19283")
|
||||
gtid := myproto.MustParseGTID("GoogleMysql", "41983-19283")
|
||||
want := "INSERT INTO _vt.blp_checkpoint " +
|
||||
"(source_shard_uid, gtid, time_updated, transaction_timestamp, flags) " +
|
||||
"VALUES (18372, 'GoogleMysql/19283', 481823, 0, 'myflags')"
|
||||
"(source_shard_uid, pos, time_updated, transaction_timestamp, flags) " +
|
||||
"VALUES (18372, 'GoogleMysql/41983-19283', 481823, 0, 'myflags')"
|
||||
|
||||
got := PopulateBlpCheckpoint(18372, gtid, 481823, "myflags")
|
||||
got := PopulateBlpCheckpoint(18372, myproto.ReplicationPosition{GTIDSet: gtid.GTIDSet()}, 481823, "myflags")
|
||||
if got != want {
|
||||
t.Errorf("PopulateBlpCheckpoint() = %#v, want %#v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateBlpCheckpoint(t *testing.T) {
|
||||
gtid := myproto.MustParseGTID("GoogleMysql", "58283")
|
||||
gtid := myproto.MustParseGTID("GoogleMysql", "41983-58283")
|
||||
want := "UPDATE _vt.blp_checkpoint " +
|
||||
"SET gtid='GoogleMysql/58283', time_updated=88822 " +
|
||||
"SET pos='GoogleMysql/41983-58283', time_updated=88822 " +
|
||||
"WHERE source_shard_uid=78522"
|
||||
|
||||
got := UpdateBlpCheckpoint(78522, gtid, 88822, 0)
|
||||
got := UpdateBlpCheckpoint(78522, myproto.ReplicationPosition{GTIDSet: gtid.GTIDSet()}, 88822, 0)
|
||||
if got != want {
|
||||
t.Errorf("UpdateBlpCheckpoint() = %#v, want %#v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateBlpCheckpointTimestamp(t *testing.T) {
|
||||
gtid := myproto.MustParseGTID("GoogleMysql", "58283")
|
||||
gtid := myproto.MustParseGTID("GoogleMysql", "41983-58283")
|
||||
want := "UPDATE _vt.blp_checkpoint " +
|
||||
"SET gtid='GoogleMysql/58283', time_updated=88822, transaction_timestamp=481828 " +
|
||||
"SET pos='GoogleMysql/41983-58283', time_updated=88822, transaction_timestamp=481828 " +
|
||||
"WHERE source_shard_uid=78522"
|
||||
|
||||
got := UpdateBlpCheckpoint(78522, gtid, 88822, 481828)
|
||||
got := UpdateBlpCheckpoint(78522, myproto.ReplicationPosition{GTIDSet: gtid.GTIDSet()}, 88822, 481828)
|
||||
if got != want {
|
||||
t.Errorf("UpdateBlpCheckpoint() = %#v, want %#v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryBlpCheckpoint(t *testing.T) {
|
||||
want := "SELECT gtid, flags FROM _vt.blp_checkpoint WHERE source_shard_uid=482821"
|
||||
want := "SELECT pos, flags FROM _vt.blp_checkpoint WHERE source_shard_uid=482821"
|
||||
got := QueryBlpCheckpoint(482821)
|
||||
if got != want {
|
||||
t.Errorf("QueryBlpCheckpoint(482821) = %#v, want %#v", got, want)
|
||||
|
|
|
@ -39,11 +39,11 @@ type EventStreamer struct {
|
|||
sendEvent sendEventFunc
|
||||
}
|
||||
|
||||
func NewEventStreamer(dbname string, mysqld *mysqlctl.Mysqld, gtid myproto.GTID, sendEvent sendEventFunc) *EventStreamer {
|
||||
func NewEventStreamer(dbname string, mysqld *mysqlctl.Mysqld, startPos myproto.ReplicationPosition, sendEvent sendEventFunc) *EventStreamer {
|
||||
evs := &EventStreamer{
|
||||
sendEvent: sendEvent,
|
||||
}
|
||||
evs.bls = NewBinlogStreamer(dbname, mysqld, gtid, evs.transactionToEvent)
|
||||
evs.bls = NewBinlogStreamer(dbname, mysqld, startPos, evs.transactionToEvent)
|
||||
return evs
|
||||
}
|
||||
|
||||
|
|
|
@ -102,7 +102,7 @@ func TestDMLEvent(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Timestamp: 1,
|
||||
GTIDField: myproto.GTIDField{Value: myproto.MustParseGTID(blsMysqlFlavor, "20")},
|
||||
GTIDField: myproto.GTIDField{Value: myproto.MustParseGTID(blsMysqlFlavor, "41983-20")},
|
||||
}
|
||||
evs := &EventStreamer{
|
||||
sendEvent: func(event *proto.StreamEvent) error {
|
||||
|
@ -120,7 +120,7 @@ func TestDMLEvent(t *testing.T) {
|
|||
t.Errorf("got %s, want %s", got, want)
|
||||
}
|
||||
case "POS":
|
||||
want := `&{POS [] [] 1 20}`
|
||||
want := `&{POS [] [] 1 41983-20}`
|
||||
got := fmt.Sprintf("%v", event)
|
||||
if got != want {
|
||||
t.Errorf("got %s, want %s", got, want)
|
||||
|
@ -149,7 +149,7 @@ func TestDDLEvent(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Timestamp: 1,
|
||||
GTIDField: myproto.GTIDField{Value: myproto.MustParseGTID(blsMysqlFlavor, "20")},
|
||||
GTIDField: myproto.GTIDField{Value: myproto.MustParseGTID(blsMysqlFlavor, "41983-20")},
|
||||
}
|
||||
evs := &EventStreamer{
|
||||
sendEvent: func(event *proto.StreamEvent) error {
|
||||
|
@ -161,7 +161,7 @@ func TestDDLEvent(t *testing.T) {
|
|||
t.Errorf("got %s, want %s", got, want)
|
||||
}
|
||||
case "POS":
|
||||
want := `&{POS [] [] 1 20}`
|
||||
want := `&{POS [] [] 1 41983-20}`
|
||||
got := fmt.Sprintf("%v", event)
|
||||
if got != want {
|
||||
t.Errorf("got %s, want %s", got, want)
|
||||
|
|
|
@ -32,7 +32,7 @@ func TestKeyRangeFilterPass(t *testing.T) {
|
|||
Sql: []byte("dml2 /* EMD keyspace_id:2 */"),
|
||||
},
|
||||
},
|
||||
GTIDField: myproto.GTIDField{Value: myproto.MustParseGTID(blsMysqlFlavor, "1")},
|
||||
GTIDField: myproto.GTIDField{Value: myproto.MustParseGTID(blsMysqlFlavor, "41983-1")},
|
||||
}
|
||||
var got string
|
||||
f := KeyRangeFilterFunc(key.KIT_UINT64, testKeyRange, func(reply *proto.BinlogTransaction) error {
|
||||
|
@ -40,7 +40,7 @@ func TestKeyRangeFilterPass(t *testing.T) {
|
|||
return nil
|
||||
})
|
||||
f(&input)
|
||||
want := `statement: <6, "set1"> statement: <4, "dml2 /* EMD keyspace_id:2 */"> position: "1" `
|
||||
want := `statement: <6, "set1"> statement: <4, "dml2 /* EMD keyspace_id:2 */"> position: "41983-1" `
|
||||
if want != got {
|
||||
t.Errorf("want %s, got %s", want, got)
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ func TestKeyRangeFilterSkip(t *testing.T) {
|
|||
Sql: []byte("dml1 /* EMD keyspace_id:20 */"),
|
||||
},
|
||||
},
|
||||
GTIDField: myproto.GTIDField{Value: myproto.MustParseGTID(blsMysqlFlavor, "1")},
|
||||
GTIDField: myproto.GTIDField{Value: myproto.MustParseGTID(blsMysqlFlavor, "41983-1")},
|
||||
}
|
||||
var got string
|
||||
f := KeyRangeFilterFunc(key.KIT_UINT64, testKeyRange, func(reply *proto.BinlogTransaction) error {
|
||||
|
@ -65,7 +65,7 @@ func TestKeyRangeFilterSkip(t *testing.T) {
|
|||
return nil
|
||||
})
|
||||
f(&input)
|
||||
want := `position: "1" `
|
||||
want := `position: "41983-1" `
|
||||
if want != got {
|
||||
t.Errorf("want %s, got %s", want, got)
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ func TestKeyRangeFilterDDL(t *testing.T) {
|
|||
Sql: []byte("ddl"),
|
||||
},
|
||||
},
|
||||
GTIDField: myproto.GTIDField{Value: myproto.MustParseGTID(blsMysqlFlavor, "1")},
|
||||
GTIDField: myproto.GTIDField{Value: myproto.MustParseGTID(blsMysqlFlavor, "41983-1")},
|
||||
}
|
||||
var got string
|
||||
f := KeyRangeFilterFunc(key.KIT_UINT64, testKeyRange, func(reply *proto.BinlogTransaction) error {
|
||||
|
@ -90,7 +90,7 @@ func TestKeyRangeFilterDDL(t *testing.T) {
|
|||
return nil
|
||||
})
|
||||
f(&input)
|
||||
want := `position: "1" `
|
||||
want := `position: "41983-1" `
|
||||
if want != got {
|
||||
t.Errorf("want %s, got %s", want, got)
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ func TestKeyRangeFilterMalformed(t *testing.T) {
|
|||
Sql: []byte("dml1 /* EMD keyspace_id:2a */"),
|
||||
},
|
||||
},
|
||||
GTIDField: myproto.GTIDField{Value: myproto.MustParseGTID(blsMysqlFlavor, "1")},
|
||||
GTIDField: myproto.GTIDField{Value: myproto.MustParseGTID(blsMysqlFlavor, "41983-1")},
|
||||
}
|
||||
var got string
|
||||
f := KeyRangeFilterFunc(key.KIT_UINT64, testKeyRange, func(reply *proto.BinlogTransaction) error {
|
||||
|
@ -121,7 +121,7 @@ func TestKeyRangeFilterMalformed(t *testing.T) {
|
|||
return nil
|
||||
})
|
||||
f(&input)
|
||||
want := `position: "1" `
|
||||
want := `position: "41983-1" `
|
||||
if want != got {
|
||||
t.Errorf("want %s, got %s", want, got)
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
// BlpPosition describes a binlog player position to start from.
|
||||
type BlpPosition struct {
|
||||
Uid uint32
|
||||
GTIDField myproto.GTIDField
|
||||
Position myproto.ReplicationPosition
|
||||
}
|
||||
|
||||
// BlpPositionList is a list of BlpPosition, not sorted.
|
||||
|
|
|
@ -20,7 +20,7 @@ func (blpPosition *BlpPosition) MarshalBson(buf *bytes2.ChunkedWriter, key strin
|
|||
lenWriter := bson.NewLenWriter(buf)
|
||||
|
||||
bson.EncodeUint32(buf, "Uid", blpPosition.Uid)
|
||||
blpPosition.GTIDField.MarshalBson(buf, "GTIDField")
|
||||
blpPosition.Position.MarshalBson(buf, "Position")
|
||||
|
||||
lenWriter.Close()
|
||||
}
|
||||
|
@ -41,8 +41,8 @@ func (blpPosition *BlpPosition) UnmarshalBson(buf *bytes.Buffer, kind byte) {
|
|||
switch bson.ReadCString(buf) {
|
||||
case "Uid":
|
||||
blpPosition.Uid = bson.DecodeUint32(buf, kind)
|
||||
case "GTIDField":
|
||||
blpPosition.GTIDField.UnmarshalBson(buf, kind)
|
||||
case "Position":
|
||||
blpPosition.Position.UnmarshalBson(buf, kind)
|
||||
default:
|
||||
bson.Skip(buf, kind)
|
||||
}
|
||||
|
|
|
@ -11,18 +11,18 @@ import (
|
|||
|
||||
// UpdateStreamRequest is used to make a request for ServeUpdateStream.
|
||||
type UpdateStreamRequest struct {
|
||||
GTIDField myproto.GTIDField
|
||||
Position myproto.ReplicationPosition
|
||||
}
|
||||
|
||||
// KeyRangeRequest is used to make a request for StreamKeyRange.
|
||||
type KeyRangeRequest struct {
|
||||
GTIDField myproto.GTIDField
|
||||
Position myproto.ReplicationPosition
|
||||
KeyspaceIdType key.KeyspaceIdType
|
||||
KeyRange key.KeyRange
|
||||
}
|
||||
|
||||
// TablesRequest is used to make a request for StreamTables.
|
||||
type TablesRequest struct {
|
||||
GTIDField myproto.GTIDField
|
||||
Position myproto.ReplicationPosition
|
||||
Tables []string
|
||||
}
|
||||
|
|
|
@ -133,7 +133,7 @@ func IsUpdateStreamEnabled() bool {
|
|||
return UpdateStreamRpcService.isEnabled()
|
||||
}
|
||||
|
||||
func GetReplicationPosition() (myproto.GTID, error) {
|
||||
func GetReplicationPosition() (myproto.ReplicationPosition, error) {
|
||||
return UpdateStreamRpcService.getReplicationPosition()
|
||||
}
|
||||
|
||||
|
@ -197,9 +197,9 @@ func (updateStream *UpdateStream) ServeUpdateStream(req *proto.UpdateStreamReque
|
|||
|
||||
streamCount.Add("Updates", 1)
|
||||
defer streamCount.Add("Updates", -1)
|
||||
log.Infof("ServeUpdateStream starting @ %#v", req.GTIDField.Value)
|
||||
log.Infof("ServeUpdateStream starting @ %#v", req.Position)
|
||||
|
||||
evs := NewEventStreamer(updateStream.dbname, updateStream.mysqld, req.GTIDField.Value, func(reply *proto.StreamEvent) error {
|
||||
evs := NewEventStreamer(updateStream.dbname, updateStream.mysqld, req.Position, func(reply *proto.StreamEvent) error {
|
||||
if reply.Category == "ERR" {
|
||||
updateStreamErrors.Add("UpdateStream", 1)
|
||||
} else {
|
||||
|
@ -234,7 +234,7 @@ func (updateStream *UpdateStream) StreamKeyRange(req *proto.KeyRangeRequest, sen
|
|||
|
||||
streamCount.Add("KeyRange", 1)
|
||||
defer streamCount.Add("KeyRange", -1)
|
||||
log.Infof("ServeUpdateStream starting @ %#v", req.GTIDField.Value)
|
||||
log.Infof("ServeUpdateStream starting @ %#v", req.Position)
|
||||
|
||||
// Calls cascade like this: BinlogStreamer->KeyRangeFilterFunc->func(*proto.BinlogTransaction)->sendReply
|
||||
f := KeyRangeFilterFunc(req.KeyspaceIdType, req.KeyRange, func(reply *proto.BinlogTransaction) error {
|
||||
|
@ -242,7 +242,7 @@ func (updateStream *UpdateStream) StreamKeyRange(req *proto.KeyRangeRequest, sen
|
|||
keyrangeTransactions.Add(1)
|
||||
return sendReply(reply)
|
||||
})
|
||||
bls := NewBinlogStreamer(updateStream.dbname, updateStream.mysqld, req.GTIDField.Value, f)
|
||||
bls := NewBinlogStreamer(updateStream.dbname, updateStream.mysqld, req.Position, f)
|
||||
|
||||
svm := &sync2.ServiceManager{}
|
||||
svm.Go(bls.Stream)
|
||||
|
@ -270,7 +270,7 @@ func (updateStream *UpdateStream) StreamTables(req *proto.TablesRequest, sendRep
|
|||
|
||||
streamCount.Add("Tables", 1)
|
||||
defer streamCount.Add("Tables", -1)
|
||||
log.Infof("ServeUpdateStream starting @ %#v", req.GTIDField.Value)
|
||||
log.Infof("ServeUpdateStream starting @ %#v", req.Position)
|
||||
|
||||
// Calls cascade like this: BinlogStreamer->TablesFilterFunc->func(*proto.BinlogTransaction)->sendReply
|
||||
f := TablesFilterFunc(req.Tables, func(reply *proto.BinlogTransaction) error {
|
||||
|
@ -278,7 +278,7 @@ func (updateStream *UpdateStream) StreamTables(req *proto.TablesRequest, sendRep
|
|||
keyrangeTransactions.Add(1)
|
||||
return sendReply(reply)
|
||||
})
|
||||
bls := NewBinlogStreamer(updateStream.dbname, updateStream.mysqld, req.GTIDField.Value, f)
|
||||
bls := NewBinlogStreamer(updateStream.dbname, updateStream.mysqld, req.Position, f)
|
||||
|
||||
svm := &sync2.ServiceManager{}
|
||||
svm.Go(bls.Stream)
|
||||
|
@ -287,16 +287,12 @@ func (updateStream *UpdateStream) StreamTables(req *proto.TablesRequest, sendRep
|
|||
return svm.Join()
|
||||
}
|
||||
|
||||
func (updateStream *UpdateStream) getReplicationPosition() (myproto.GTID, error) {
|
||||
func (updateStream *UpdateStream) getReplicationPosition() (myproto.ReplicationPosition, error) {
|
||||
updateStream.actionLock.Lock()
|
||||
defer updateStream.actionLock.Unlock()
|
||||
if !updateStream.isEnabled() {
|
||||
return nil, fmt.Errorf("update stream service is not enabled")
|
||||
return myproto.ReplicationPosition{}, fmt.Errorf("update stream service is not enabled")
|
||||
}
|
||||
|
||||
rp, err := updateStream.mysqld.MasterStatus()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rp.MasterLogGTIDField.Value, nil
|
||||
return updateStream.mysqld.MasterPosition()
|
||||
}
|
||||
|
|
|
@ -246,10 +246,10 @@ func (mysqld *Mysqld) CreateSnapshot(dbName, sourceAddr string, allowHierarchica
|
|||
sourceIsMaster := false
|
||||
readOnly = true
|
||||
|
||||
slaveStatus, slaveErr := mysqld.slaveStatus()
|
||||
if slaveErr == nil {
|
||||
slaveStartRequired = (slaveStatus["Slave_IO_Running"] == "Yes" && slaveStatus["Slave_SQL_Running"] == "Yes")
|
||||
} else if slaveErr == ErrNotSlave {
|
||||
slaveStatus, err := mysqld.SlaveStatus()
|
||||
if err == nil {
|
||||
slaveStartRequired = slaveStatus.SlaveRunning()
|
||||
} else if err == ErrNotSlave {
|
||||
sourceIsMaster = true
|
||||
} else {
|
||||
// If we can't get any data, just fail.
|
||||
|
@ -265,12 +265,12 @@ func (mysqld *Mysqld) CreateSnapshot(dbName, sourceAddr string, allowHierarchica
|
|||
// If the source is a slave use the master replication position
|
||||
// unless we are allowing hierachical replicas.
|
||||
masterAddr := ""
|
||||
var replicationPosition *proto.ReplicationPosition
|
||||
var replicationPosition proto.ReplicationPosition
|
||||
if sourceIsMaster {
|
||||
if err = mysqld.SetReadOnly(true); err != nil {
|
||||
return
|
||||
}
|
||||
replicationPosition, err = mysqld.MasterStatus()
|
||||
replicationPosition, err = mysqld.MasterPosition()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -279,10 +279,13 @@ func (mysqld *Mysqld) CreateSnapshot(dbName, sourceAddr string, allowHierarchica
|
|||
if err = mysqld.StopSlave(hookExtraEnv); err != nil {
|
||||
return
|
||||
}
|
||||
replicationPosition, err = mysqld.SlaveStatus()
|
||||
var slaveStatus *proto.ReplicationStatus
|
||||
slaveStatus, err = mysqld.SlaveStatus()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
replicationPosition = slaveStatus.Position
|
||||
|
||||
// We are a slave, check our replication strategy before
|
||||
// choosing the master address.
|
||||
if allowHierarchicalReplication {
|
||||
|
@ -306,7 +309,7 @@ func (mysqld *Mysqld) CreateSnapshot(dbName, sourceAddr string, allowHierarchica
|
|||
} else {
|
||||
var sm *SnapshotManifest
|
||||
sm, snapshotErr = newSnapshotManifest(sourceAddr, mysqld.IpAddr(),
|
||||
masterAddr, dbName, dataFiles, replicationPosition, nil)
|
||||
masterAddr, dbName, dataFiles, replicationPosition, proto.ReplicationPosition{})
|
||||
if snapshotErr != nil {
|
||||
log.Errorf("CreateSnapshot failed: %v", snapshotErr)
|
||||
} else {
|
||||
|
@ -427,7 +430,7 @@ func (mysqld *Mysqld) RestoreFromSnapshot(snapshotManifest *SnapshotManifest, fe
|
|||
return err
|
||||
}
|
||||
|
||||
cmdList, err := StartReplicationCommands(mysqld, snapshotManifest.ReplicationState)
|
||||
cmdList, err := mysqld.StartReplicationCommands(snapshotManifest.ReplicationStatus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -285,16 +285,12 @@ type SnapshotManifest struct {
|
|||
DbName string
|
||||
Files SnapshotFiles
|
||||
|
||||
ReplicationState *proto.ReplicationState
|
||||
MasterState *proto.ReplicationState
|
||||
ReplicationStatus *proto.ReplicationStatus
|
||||
MasterPosition proto.ReplicationPosition
|
||||
}
|
||||
|
||||
func newSnapshotManifest(addr, mysqlAddr, masterAddr, dbName string, files []SnapshotFile, pos, masterPos *proto.ReplicationPosition) (*SnapshotManifest, error) {
|
||||
nrs, err := proto.NewReplicationState(masterAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mrs, err := proto.NewReplicationState(mysqlAddr)
|
||||
func newSnapshotManifest(addr, mysqlAddr, masterAddr, dbName string, files []SnapshotFile, pos, masterPos proto.ReplicationPosition) (*SnapshotManifest, error) {
|
||||
nrs, err := proto.NewReplicationStatus(masterAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -302,14 +298,11 @@ func newSnapshotManifest(addr, mysqlAddr, masterAddr, dbName string, files []Sna
|
|||
Addr: addr,
|
||||
DbName: dbName,
|
||||
Files: files,
|
||||
ReplicationState: nrs,
|
||||
MasterState: mrs,
|
||||
ReplicationStatus: nrs,
|
||||
MasterPosition: masterPos,
|
||||
}
|
||||
sort.Sort(rs.Files)
|
||||
rs.ReplicationState.ReplicationPosition = *pos
|
||||
if masterPos != nil {
|
||||
rs.MasterState.ReplicationPosition = *masterPos
|
||||
}
|
||||
rs.ReplicationStatus.Position = pos
|
||||
return rs, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
// Copyright 2014, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mysqlctl
|
||||
|
||||
import (
|
||||
"github.com/youtube/vitess/go/vt/mysqlctl/proto"
|
||||
)
|
||||
|
||||
// fakeGTID is used in the mysql_flavor_*_test.go files.
|
||||
type fakeGTID struct {
|
||||
flavor, value string
|
||||
}
|
||||
|
||||
func (f fakeGTID) String() string { return f.value }
|
||||
func (f fakeGTID) Flavor() string { return f.flavor }
|
||||
func (fakeGTID) TryCompare(proto.GTID) (int, error) { return 0, nil }
|
|
@ -19,11 +19,11 @@ func (mrl *mysqlReplicationLag) Report(typ topo.TabletType) (status map[string]s
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
rp, err := mrl.mysqld.SlaveStatus()
|
||||
slaveStatus, err := mrl.mysqld.SlaveStatus()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if int(rp.SecondsBehindMaster) > mrl.allowedLagInSeconds {
|
||||
if !slaveStatus.SlaveRunning() || int(slaveStatus.SecondsBehindMaster) > mrl.allowedLagInSeconds {
|
||||
return map[string]string{health.ReplicationLag: health.ReplicationLagHigh}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -6,8 +6,10 @@ package mysqlctl
|
|||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
"github.com/youtube/vitess/go/mysql"
|
||||
blproto "github.com/youtube/vitess/go/vt/binlog/proto"
|
||||
"github.com/youtube/vitess/go/vt/mysqlctl/proto"
|
||||
)
|
||||
|
@ -18,27 +20,39 @@ This file handles the differences between flavors of mysql.
|
|||
|
||||
// MysqlFlavor is the abstract interface for a flavor.
|
||||
type MysqlFlavor interface {
|
||||
// MasterStatus fills in the ReplicationPosition structure.
|
||||
// It has two components: the result of a standard 'SHOW MASTER STATUS'
|
||||
// and the corresponding transaction group id.
|
||||
MasterStatus(mysqld *Mysqld) (*proto.ReplicationPosition, error)
|
||||
// MasterPosition returns the ReplicationPosition of a master.
|
||||
MasterPosition(mysqld *Mysqld) (proto.ReplicationPosition, error)
|
||||
|
||||
// SlaveStatus returns the ReplicationStatus of a slave.
|
||||
SlaveStatus(mysqld *Mysqld) (*proto.ReplicationStatus, error)
|
||||
|
||||
// PromoteSlaveCommands returns the commands to run to change
|
||||
// a slave into a master
|
||||
PromoteSlaveCommands() []string
|
||||
|
||||
// ParseGTID converts a string containing a GTID in the canonical format of
|
||||
// this MySQL flavor into a proto.GTID interface value.
|
||||
// StartReplicationCommands returns the commands to start replicating from
|
||||
// a given master and position as specified in a ReplicationStatus.
|
||||
StartReplicationCommands(params *mysql.ConnectionParams, status *proto.ReplicationStatus) ([]string, error)
|
||||
|
||||
// ParseGTID parses a GTID in the canonical format of this MySQL flavor into
|
||||
// a proto.GTID interface value.
|
||||
ParseGTID(string) (proto.GTID, error)
|
||||
|
||||
// ParseReplicationPosition parses a replication position in the canonical
|
||||
// format of this MySQL flavor into a proto.ReplicationPosition struct.
|
||||
ParseReplicationPosition(string) (proto.ReplicationPosition, error)
|
||||
|
||||
// SendBinlogDumpCommand sends the flavor-specific version of the
|
||||
// COM_BINLOG_DUMP command to start dumping raw binlog events over a slave
|
||||
// connection, starting at a given GTID.
|
||||
SendBinlogDumpCommand(mysqld *Mysqld, conn *SlaveConnection, startPos proto.GTID) error
|
||||
SendBinlogDumpCommand(mysqld *Mysqld, conn *SlaveConnection, startPos proto.ReplicationPosition) error
|
||||
|
||||
// MakeBinlogEvent takes a raw packet from the MySQL binlog stream connection
|
||||
// and returns a BinlogEvent through which the packet can be examined.
|
||||
MakeBinlogEvent(buf []byte) blproto.BinlogEvent
|
||||
|
||||
// WaitMasterPos waits until slave replication reaches at least targetPos.
|
||||
WaitMasterPos(mysqld *Mysqld, targetPos proto.ReplicationPosition, waitTimeout time.Duration) error
|
||||
}
|
||||
|
||||
var mysqlFlavors map[string]MysqlFlavor = make(map[string]MysqlFlavor)
|
||||
|
|
|
@ -5,9 +5,15 @@
|
|||
package mysqlctl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
"github.com/youtube/vitess/go/mysql"
|
||||
blproto "github.com/youtube/vitess/go/vt/binlog/proto"
|
||||
"github.com/youtube/vitess/go/vt/mysqlctl/proto"
|
||||
)
|
||||
|
@ -18,47 +24,102 @@ type googleMysql51 struct {
|
|||
|
||||
const googleMysqlFlavorID = "GoogleMysql"
|
||||
|
||||
// MasterStatus implements MysqlFlavor.MasterStatus
|
||||
// MasterPosition implements MysqlFlavor.MasterPosition().
|
||||
//
|
||||
// The command looks like:
|
||||
// mysql> show master status\G
|
||||
// mysql> SHOW MASTER STATUS\G
|
||||
// **************************** 1. row ***************************
|
||||
// File: vt-000001c6-bin.000003
|
||||
// Position: 106
|
||||
// Binlog_Do_DB:
|
||||
// Binlog_Ignore_DB:
|
||||
// Group_ID:
|
||||
func (flavor *googleMysql51) MasterStatus(mysqld *Mysqld) (rp *proto.ReplicationPosition, err error) {
|
||||
qr, err := mysqld.fetchSuperQuery("SHOW MASTER STATUS")
|
||||
func (flavor *googleMysql51) MasterPosition(mysqld *Mysqld) (rp proto.ReplicationPosition, err error) {
|
||||
fields, err := mysqld.fetchSuperQueryMap("SHOW MASTER STATUS")
|
||||
if err != nil {
|
||||
return
|
||||
return rp, err
|
||||
}
|
||||
if len(qr.Rows) != 1 {
|
||||
return nil, ErrNotMaster
|
||||
groupID, ok := fields["Group_ID"]
|
||||
if !ok {
|
||||
return rp, fmt.Errorf("this db does not support group id")
|
||||
}
|
||||
if len(qr.Rows[0]) < 5 {
|
||||
return nil, fmt.Errorf("this db does not support group id")
|
||||
// Get the server_id that created this group_id.
|
||||
info, err := mysqld.fetchSuperQueryMap("SHOW BINLOG INFO FOR " + groupID)
|
||||
if err != nil {
|
||||
return proto.ReplicationPosition{}, err
|
||||
}
|
||||
rp = &proto.ReplicationPosition{}
|
||||
rp.MasterLogFile = qr.Rows[0][0].String()
|
||||
utemp, err := qr.Rows[0][1].ParseUint64()
|
||||
// Google MySQL does not define a format to describe both a server_id and
|
||||
// group_id, so we invented one.
|
||||
pos := info["Server_ID"] + "-" + groupID
|
||||
return flavor.ParseReplicationPosition(pos)
|
||||
}
|
||||
|
||||
// SlaveStatus implements MysqlFlavor.SlaveStatus().
|
||||
func (flavor *googleMysql51) SlaveStatus(mysqld *Mysqld) (*proto.ReplicationStatus, error) {
|
||||
fields, err := mysqld.fetchSuperQueryMap("SHOW SLAVE STATUS")
|
||||
if err != nil {
|
||||
return nil, ErrNotSlave
|
||||
}
|
||||
status := &proto.ReplicationStatus{
|
||||
SlaveIORunning: fields["Slave_IO_Running"] == "Yes",
|
||||
SlaveSQLRunning: fields["Slave_SQL_Running"] == "Yes",
|
||||
}
|
||||
groupID := fields["Exec_Master_Group_ID"]
|
||||
|
||||
// Get the server_id that created this group_id.
|
||||
info, err := mysqld.fetchSuperQueryMap("SHOW BINLOG INFO FOR " + groupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rp.MasterLogPosition = uint(utemp)
|
||||
rp.MasterLogGTIDField.Value, err = flavor.ParseGTID(qr.Rows[0][4].String())
|
||||
// Create the fake Google GTID syntax we invented.
|
||||
pos := info["Server_ID"] + "-" + groupID
|
||||
|
||||
status.Position, err = flavor.ParseReplicationPosition(pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// On the master, the SQL position and IO position are at
|
||||
// necessarily the same point.
|
||||
rp.MasterLogFileIo = rp.MasterLogFile
|
||||
rp.MasterLogPositionIo = rp.MasterLogPosition
|
||||
return
|
||||
temp, _ := strconv.ParseUint(fields["Seconds_Behind_Master"], 10, 0)
|
||||
status.SecondsBehindMaster = uint(temp)
|
||||
return status, nil
|
||||
}
|
||||
|
||||
// PromoteSlaveCommands implements MysqlFlavor.PromoteSlaveCommands
|
||||
// WaitMasterPos implements MysqlFlavor.WaitMasterPos().
|
||||
//
|
||||
// waitTimeout of 0 means wait indefinitely.
|
||||
//
|
||||
// Google MySQL doesn't have a function to wait for a GTID. MASTER_POS_WAIT()
|
||||
// requires a file:pos, which we don't know anymore because we're passing around
|
||||
// only GTIDs internally now.
|
||||
//
|
||||
// We can't ask the local mysqld instance to convert the GTID with BinlogInfo()
|
||||
// because this instance hasn't seen that GTID yet. For now, we have to poll.
|
||||
//
|
||||
// There used to be a function called Mysqld.WaitForMinimumReplicationPosition,
|
||||
// which was the same as WaitMasterPos except it used polling because it worked
|
||||
// on GTIDs. Now that WaitMasterPos uses GTIDs too, they've been merged.
|
||||
func (*googleMysql51) WaitMasterPos(mysqld *Mysqld, targetPos proto.ReplicationPosition, waitTimeout time.Duration) error {
|
||||
stopTime := time.Now().Add(waitTimeout)
|
||||
for waitTimeout == 0 || time.Now().Before(stopTime) {
|
||||
status, err := mysqld.SlaveStatus()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if status.Position.AtLeast(targetPos) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !status.SlaveRunning() && waitTimeout == 0 {
|
||||
return fmt.Errorf("slave not running during WaitMasterPos and no timeout is set, status = %+v", status)
|
||||
}
|
||||
|
||||
log.Infof("WaitMasterPos got position %v, sleeping for 1s waiting for position %v", status.Position, targetPos)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
return fmt.Errorf("timed out waiting for position %v", targetPos)
|
||||
}
|
||||
|
||||
// PromoteSlaveCommands implements MysqlFlavor.PromoteSlaveCommands().
|
||||
func (*googleMysql51) PromoteSlaveCommands() []string {
|
||||
return []string{
|
||||
"RESET MASTER",
|
||||
|
@ -67,28 +128,76 @@ func (*googleMysql51) PromoteSlaveCommands() []string {
|
|||
}
|
||||
}
|
||||
|
||||
// StartReplicationCommands implements MysqlFlavor.StartReplicationCommands().
|
||||
func (*googleMysql51) StartReplicationCommands(params *mysql.ConnectionParams, status *proto.ReplicationStatus) ([]string, error) {
|
||||
// Make SET binlog_group_id command. We have to cast to the Google-specific
|
||||
// struct to access the fields because there is no canonical printed format to
|
||||
// represent both a group_id and server_id in Google MySQL.
|
||||
gtid, ok := status.Position.GTIDSet.(proto.GoogleGTID)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("can't start replication at GTIDSet %#v, expected GoogleGTID", status.Position.GTIDSet)
|
||||
}
|
||||
setGroupID := fmt.Sprintf(
|
||||
"SET binlog_group_id = %d, master_server_id = %d",
|
||||
gtid.GroupID, gtid.ServerID)
|
||||
|
||||
// Make CHANGE MASTER TO command.
|
||||
args := changeMasterArgs(params, status)
|
||||
args = append(args, "CONNECT_USING_GROUP_ID")
|
||||
changeMasterTo := "CHANGE MASTER TO\n " + strings.Join(args, ",\n ")
|
||||
|
||||
return []string{
|
||||
"STOP SLAVE",
|
||||
"RESET SLAVE",
|
||||
setGroupID,
|
||||
changeMasterTo,
|
||||
"START SLAVE",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ParseGTID implements MysqlFlavor.ParseGTID().
|
||||
func (*googleMysql51) ParseGTID(s string) (proto.GTID, error) {
|
||||
return proto.ParseGTID(googleMysqlFlavorID, s)
|
||||
}
|
||||
|
||||
// SendBinlogDumpCommand implements MysqlFlavor.SendBinlogDumpCommand().
|
||||
func (*googleMysql51) SendBinlogDumpCommand(mysqld *Mysqld, conn *SlaveConnection, startPos proto.GTID) error {
|
||||
const COM_BINLOG_DUMP = 0x12
|
||||
// ParseReplicationPosition implements MysqlFlavor.ParseReplicationPosition().
|
||||
func (*googleMysql51) ParseReplicationPosition(s string) (proto.ReplicationPosition, error) {
|
||||
return proto.ParseReplicationPosition(googleMysqlFlavorID, s)
|
||||
}
|
||||
|
||||
// We can't use Google MySQL's group_id command COM_BINLOG_DUMP2 because it
|
||||
// requires us to know the server_id of the server that generated the event,
|
||||
// to avoid connecting to a master with an alternate future. We don't know
|
||||
// that server_id, so we have to use the old file and position method, which
|
||||
// bypasses that check.
|
||||
pos, err := mysqld.BinlogInfo(startPos)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error computing start position: %v", err)
|
||||
// makeBinlogDump2Command builds a buffer containing the data for a Google MySQL
|
||||
// COM_BINLOG_DUMP2 command.
|
||||
func makeBinlogDump2Command(flags uint16, slave_id uint32, group_id uint64, source_server_id uint32) []byte {
|
||||
var buf bytes.Buffer
|
||||
buf.Grow(2 + 4 + 8 + 4)
|
||||
|
||||
// binlog_flags (2 bytes)
|
||||
binary.Write(&buf, binary.LittleEndian, flags)
|
||||
// server_id of slave (4 bytes)
|
||||
binary.Write(&buf, binary.LittleEndian, slave_id)
|
||||
// group_id (8 bytes)
|
||||
binary.Write(&buf, binary.LittleEndian, group_id)
|
||||
// server_id of the server that generated the group_id (4 bytes)
|
||||
binary.Write(&buf, binary.LittleEndian, source_server_id)
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// SendBinlogDumpCommand implements MysqlFlavor.SendBinlogDumpCommand().
|
||||
func (flavor *googleMysql51) SendBinlogDumpCommand(mysqld *Mysqld, conn *SlaveConnection, startPos proto.ReplicationPosition) error {
|
||||
const (
|
||||
COM_BINLOG_DUMP2 = 0x27
|
||||
BINLOG_USE_GROUP_ID = 0x04
|
||||
)
|
||||
|
||||
gtid, ok := startPos.GTIDSet.(proto.GoogleGTID)
|
||||
if !ok {
|
||||
return fmt.Errorf("startPos.GTIDSet = %#v is wrong type, expected GoogleGTID", startPos.GTIDSet)
|
||||
}
|
||||
|
||||
// Build the command.
|
||||
buf := makeBinlogDumpCommand(uint32(pos.MasterLogPosition), 0, conn.slaveID, pos.MasterLogFile)
|
||||
return conn.SendCommand(COM_BINLOG_DUMP, buf)
|
||||
buf := makeBinlogDump2Command(BINLOG_USE_GROUP_ID, conn.slaveID, gtid.GroupID, gtid.ServerID)
|
||||
return conn.SendCommand(COM_BINLOG_DUMP2, buf)
|
||||
}
|
||||
|
||||
// MakeBinlogEvent implements MysqlFlavor.MakeBinlogEvent().
|
||||
|
@ -154,7 +263,7 @@ func (ev googleBinlogEvent) GTID(f blproto.BinlogFormat) (proto.GTID, error) {
|
|||
if group_id == 0 {
|
||||
return nil, fmt.Errorf("invalid group_id 0")
|
||||
}
|
||||
return proto.GoogleGTID{GroupID: group_id}, nil
|
||||
return proto.GoogleGTID{ServerID: ev.ServerID(), GroupID: group_id}, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/youtube/vitess/go/mysql"
|
||||
blproto "github.com/youtube/vitess/go/vt/binlog/proto"
|
||||
proto "github.com/youtube/vitess/go/vt/mysqlctl/proto"
|
||||
)
|
||||
|
@ -169,7 +170,7 @@ func TestGoogleBinlogEventGTID(t *testing.T) {
|
|||
}
|
||||
|
||||
input := googleBinlogEvent{binlogEvent: binlogEvent(googleQueryEvent)}
|
||||
want := proto.GoogleGTID{GroupID: 0xb}
|
||||
want := proto.GoogleGTID{ServerID: 62344, GroupID: 0xb}
|
||||
got, err := input.GTID(f)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
|
@ -178,3 +179,127 @@ func TestGoogleBinlogEventGTID(t *testing.T) {
|
|||
t.Errorf("%#v.GTID() = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleStartReplicationCommands(t *testing.T) {
|
||||
params := &mysql.ConnectionParams{
|
||||
Uname: "username",
|
||||
Pass: "password",
|
||||
}
|
||||
status := &proto.ReplicationStatus{
|
||||
Position: proto.ReplicationPosition{GTIDSet: proto.GoogleGTID{ServerID: 41983, GroupID: 12345}},
|
||||
MasterHost: "localhost",
|
||||
MasterPort: 123,
|
||||
MasterConnectRetry: 1234,
|
||||
}
|
||||
want := []string{
|
||||
"STOP SLAVE",
|
||||
"RESET SLAVE",
|
||||
"SET binlog_group_id = 12345, master_server_id = 41983",
|
||||
`CHANGE MASTER TO
|
||||
MASTER_HOST = 'localhost',
|
||||
MASTER_PORT = 123,
|
||||
MASTER_USER = 'username',
|
||||
MASTER_PASSWORD = 'password',
|
||||
MASTER_CONNECT_RETRY = 1234,
|
||||
CONNECT_USING_GROUP_ID`,
|
||||
"START SLAVE",
|
||||
}
|
||||
|
||||
got, err := (&googleMysql51{}).StartReplicationCommands(params, status)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("(&googleMysql51{}).StartReplicationCommands(%#v, %#v) = %#v, want %#v", params, status, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleStartReplicationCommandsSSL(t *testing.T) {
|
||||
params := &mysql.ConnectionParams{
|
||||
Uname: "username",
|
||||
Pass: "password",
|
||||
SslCa: "ssl-ca",
|
||||
SslCaPath: "ssl-ca-path",
|
||||
SslCert: "ssl-cert",
|
||||
SslKey: "ssl-key",
|
||||
}
|
||||
params.EnableSSL()
|
||||
status := &proto.ReplicationStatus{
|
||||
Position: proto.ReplicationPosition{GTIDSet: proto.GoogleGTID{ServerID: 41983, GroupID: 12345}},
|
||||
MasterHost: "localhost",
|
||||
MasterPort: 123,
|
||||
MasterConnectRetry: 1234,
|
||||
}
|
||||
want := []string{
|
||||
"STOP SLAVE",
|
||||
"RESET SLAVE",
|
||||
"SET binlog_group_id = 12345, master_server_id = 41983",
|
||||
`CHANGE MASTER TO
|
||||
MASTER_HOST = 'localhost',
|
||||
MASTER_PORT = 123,
|
||||
MASTER_USER = 'username',
|
||||
MASTER_PASSWORD = 'password',
|
||||
MASTER_CONNECT_RETRY = 1234,
|
||||
MASTER_SSL = 1,
|
||||
MASTER_SSL_CA = 'ssl-ca',
|
||||
MASTER_SSL_CAPATH = 'ssl-ca-path',
|
||||
MASTER_SSL_CERT = 'ssl-cert',
|
||||
MASTER_SSL_KEY = 'ssl-key',
|
||||
CONNECT_USING_GROUP_ID`,
|
||||
"START SLAVE",
|
||||
}
|
||||
|
||||
got, err := (&googleMysql51{}).StartReplicationCommands(params, status)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("(&googleMysql51{}).StartReplicationCommands(%#v, %#v) = %#v, want %#v", params, status, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleParseGTID(t *testing.T) {
|
||||
input := "123-456"
|
||||
want := proto.GoogleGTID{ServerID: 123, GroupID: 456}
|
||||
|
||||
got, err := (&googleMysql51{}).ParseGTID(input)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if got != want {
|
||||
t.Errorf("(&googleMysql51{}).ParseGTID(%#v) = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleParseReplicationPosition(t *testing.T) {
|
||||
input := "123-456"
|
||||
want := proto.ReplicationPosition{GTIDSet: proto.GoogleGTID{ServerID: 123, GroupID: 456}}
|
||||
|
||||
got, err := (&googleMysql51{}).ParseReplicationPosition(input)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !got.Equal(want) {
|
||||
t.Errorf("(&googleMysql51{}).ParseReplicationPosition(%#v) = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMakeBinlogDump2Command(t *testing.T) {
|
||||
want := []byte{
|
||||
// binlog_flags
|
||||
0xfe, 0xca,
|
||||
// slave_server_id
|
||||
0xef, 0xbe, 0xad, 0xde,
|
||||
// group_id
|
||||
0x78, 0x56, 0x34, 0x12, 0x78, 0x56, 0x34, 0x12,
|
||||
// event_server_id
|
||||
0x21, 0x43, 0x65, 0x87,
|
||||
}
|
||||
|
||||
got := makeBinlogDump2Command(0xcafe, 0xdeadbeef, 0x1234567812345678, 0x87654321)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("makeBinlogDump2Command() = %#v, want %#v", got, want)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,7 +7,12 @@ package mysqlctl
|
|||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
"github.com/youtube/vitess/go/mysql"
|
||||
blproto "github.com/youtube/vitess/go/vt/binlog/proto"
|
||||
"github.com/youtube/vitess/go/vt/mysqlctl/proto"
|
||||
)
|
||||
|
@ -18,64 +23,97 @@ type mariaDB10 struct {
|
|||
|
||||
const mariadbFlavorID = "MariaDB"
|
||||
|
||||
// MasterStatus implements MysqlFlavor.MasterStatus
|
||||
func (flavor *mariaDB10) MasterStatus(mysqld *Mysqld) (rp *proto.ReplicationPosition, err error) {
|
||||
// grab what we need from SHOW MASTER STATUS
|
||||
qr, err := mysqld.fetchSuperQuery("SHOW MASTER STATUS")
|
||||
// MasterPosition implements MysqlFlavor.MasterPosition().
|
||||
func (flavor *mariaDB10) MasterPosition(mysqld *Mysqld) (rp proto.ReplicationPosition, err error) {
|
||||
qr, err := mysqld.fetchSuperQuery("SELECT @@GLOBAL.gtid_binlog_pos")
|
||||
if err != nil {
|
||||
return
|
||||
return rp, err
|
||||
}
|
||||
if len(qr.Rows) != 1 {
|
||||
return nil, ErrNotMaster
|
||||
if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 {
|
||||
return rp, fmt.Errorf("unexpected result format for gtid_binlog_pos: %#v", qr)
|
||||
}
|
||||
if len(qr.Rows[0]) < 2 {
|
||||
return nil, fmt.Errorf("unknown format for SHOW MASTER STATUS")
|
||||
return flavor.ParseReplicationPosition(qr.Rows[0][0].String())
|
||||
}
|
||||
rp = &proto.ReplicationPosition{}
|
||||
rp.MasterLogFile = qr.Rows[0][0].String()
|
||||
utemp, err := qr.Rows[0][1].ParseUint64()
|
||||
|
||||
// SlaveStatus implements MysqlFlavor.SlaveStatus().
|
||||
func (flavor *mariaDB10) SlaveStatus(mysqld *Mysqld) (*proto.ReplicationStatus, error) {
|
||||
fields, err := mysqld.fetchSuperQueryMap("SHOW SLAVE STATUS")
|
||||
if err != nil {
|
||||
return nil, ErrNotSlave
|
||||
}
|
||||
status := &proto.ReplicationStatus{
|
||||
SlaveIORunning: fields["Slave_IO_Running"] == "Yes",
|
||||
SlaveSQLRunning: fields["Slave_SQL_Running"] == "Yes",
|
||||
}
|
||||
status.Position, err = flavor.ParseReplicationPosition(fields["Exec_Master_Group_ID"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rp.MasterLogPosition = uint(utemp)
|
||||
temp, _ := strconv.ParseUint(fields["Seconds_Behind_Master"], 10, 0)
|
||||
status.SecondsBehindMaster = uint(temp)
|
||||
return status, nil
|
||||
}
|
||||
|
||||
// grab the corresponding GTID
|
||||
qr, err = mysqld.fetchSuperQuery(fmt.Sprintf("SELECT BINLOG_GTID_POS('%v', %v)", rp.MasterLogFile, rp.MasterLogPosition))
|
||||
// WaitMasterPos implements MysqlFlavor.WaitMasterPos().
|
||||
//
|
||||
// Note: Unlike MASTER_POS_WAIT(), MASTER_GTID_WAIT() will continue waiting even
|
||||
// if the slave thread stops. If that is a problem, we'll have to change this.
|
||||
func (*mariaDB10) WaitMasterPos(mysqld *Mysqld, targetPos proto.ReplicationPosition, waitTimeout time.Duration) error {
|
||||
query := fmt.Sprintf("SELECT MASTER_GTID_WAIT('%s', %.6f)", targetPos, waitTimeout.Seconds())
|
||||
|
||||
log.Infof("Waiting for minimum replication position with query: %v", query)
|
||||
qr, err := mysqld.fetchSuperQuery(query)
|
||||
if err != nil {
|
||||
return
|
||||
return fmt.Errorf("MASTER_GTID_WAIT() failed: %v", err)
|
||||
}
|
||||
if len(qr.Rows) != 1 {
|
||||
return nil, fmt.Errorf("BINLOG_GTID_POS failed with no rows")
|
||||
if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 {
|
||||
return fmt.Errorf("unexpected result format from MASTER_GTID_WAIT(): %#v", qr)
|
||||
}
|
||||
if len(qr.Rows[0]) < 1 {
|
||||
return nil, fmt.Errorf("BINLOG_GTID_POS returned no result")
|
||||
result := qr.Rows[0][0].String()
|
||||
if result == "-1" {
|
||||
return fmt.Errorf("timed out waiting for position %v", targetPos)
|
||||
}
|
||||
rp.MasterLogGTIDField.Value, err = flavor.ParseGTID(qr.Rows[0][0].String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil
|
||||
}
|
||||
|
||||
// On the master, the SQL position and IO position are at
|
||||
// necessarily the same point.
|
||||
rp.MasterLogFileIo = rp.MasterLogFile
|
||||
rp.MasterLogPositionIo = rp.MasterLogPosition
|
||||
return
|
||||
}
|
||||
|
||||
// PromoteSlaveCommands implements MysqlFlavor.PromoteSlaveCommands
|
||||
// PromoteSlaveCommands implements MysqlFlavor.PromoteSlaveCommands().
|
||||
func (*mariaDB10) PromoteSlaveCommands() []string {
|
||||
return []string{
|
||||
"RESET SLAVE",
|
||||
}
|
||||
}
|
||||
|
||||
// StartReplicationCommands implements MysqlFlavor.StartReplicationCommands().
|
||||
func (*mariaDB10) StartReplicationCommands(params *mysql.ConnectionParams, status *proto.ReplicationStatus) ([]string, error) {
|
||||
// Make SET gtid_slave_pos command.
|
||||
setSlavePos := fmt.Sprintf("SET GLOBAL gtid_slave_pos = '%s'", status.Position)
|
||||
|
||||
// Make CHANGE MASTER TO command.
|
||||
args := changeMasterArgs(params, status)
|
||||
args = append(args, "MASTER_USE_GTID = slave_pos")
|
||||
changeMasterTo := "CHANGE MASTER TO\n " + strings.Join(args, ",\n ")
|
||||
|
||||
return []string{
|
||||
"STOP SLAVE",
|
||||
"RESET SLAVE",
|
||||
setSlavePos,
|
||||
changeMasterTo,
|
||||
"START SLAVE",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ParseGTID implements MysqlFlavor.ParseGTID().
|
||||
func (*mariaDB10) ParseGTID(s string) (proto.GTID, error) {
|
||||
return proto.ParseGTID(mariadbFlavorID, s)
|
||||
}
|
||||
|
||||
// ParseReplicationPosition implements MysqlFlavor.ParseReplicationposition().
|
||||
func (*mariaDB10) ParseReplicationPosition(s string) (proto.ReplicationPosition, error) {
|
||||
return proto.ParseReplicationPosition(mariadbFlavorID, s)
|
||||
}
|
||||
|
||||
// SendBinlogDumpCommand implements MysqlFlavor.SendBinlogDumpCommand().
|
||||
func (*mariaDB10) SendBinlogDumpCommand(mysqld *Mysqld, conn *SlaveConnection, startPos proto.GTID) error {
|
||||
func (*mariaDB10) SendBinlogDumpCommand(mysqld *Mysqld, conn *SlaveConnection, startPos proto.ReplicationPosition) error {
|
||||
const COM_BINLOG_DUMP = 0x12
|
||||
|
||||
// MariaDB expects the slave to set the @slave_connect_state variable before
|
||||
|
|
|
@ -4,5 +4,119 @@
|
|||
|
||||
package mysqlctl
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/youtube/vitess/go/mysql"
|
||||
"github.com/youtube/vitess/go/vt/mysqlctl/proto"
|
||||
)
|
||||
|
||||
// TODO(enisoc): Grab MariaDB binlog event data to make unit tests for binary
|
||||
// parser when MariaDB starts working.
|
||||
|
||||
func TestMariadbStartReplicationCommands(t *testing.T) {
|
||||
params := &mysql.ConnectionParams{
|
||||
Uname: "username",
|
||||
Pass: "password",
|
||||
}
|
||||
status := &proto.ReplicationStatus{
|
||||
Position: proto.ReplicationPosition{GTIDSet: proto.MariadbGTID{Domain: 1, Server: 41983, Sequence: 12345}},
|
||||
MasterHost: "localhost",
|
||||
MasterPort: 123,
|
||||
MasterConnectRetry: 1234,
|
||||
}
|
||||
want := []string{
|
||||
"STOP SLAVE",
|
||||
"RESET SLAVE",
|
||||
"SET GLOBAL gtid_slave_pos = '1-41983-12345'",
|
||||
`CHANGE MASTER TO
|
||||
MASTER_HOST = 'localhost',
|
||||
MASTER_PORT = 123,
|
||||
MASTER_USER = 'username',
|
||||
MASTER_PASSWORD = 'password',
|
||||
MASTER_CONNECT_RETRY = 1234,
|
||||
MASTER_USE_GTID = slave_pos`,
|
||||
"START SLAVE",
|
||||
}
|
||||
|
||||
got, err := (&mariaDB10{}).StartReplicationCommands(params, status)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("(&mariaDB10{}).StartReplicationCommands(%#v, %#v) = %#v, want %#v", params, status, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariadbStartReplicationCommandsSSL(t *testing.T) {
|
||||
params := &mysql.ConnectionParams{
|
||||
Uname: "username",
|
||||
Pass: "password",
|
||||
SslCa: "ssl-ca",
|
||||
SslCaPath: "ssl-ca-path",
|
||||
SslCert: "ssl-cert",
|
||||
SslKey: "ssl-key",
|
||||
}
|
||||
params.EnableSSL()
|
||||
status := &proto.ReplicationStatus{
|
||||
Position: proto.ReplicationPosition{GTIDSet: proto.MariadbGTID{Domain: 1, Server: 41983, Sequence: 12345}},
|
||||
MasterHost: "localhost",
|
||||
MasterPort: 123,
|
||||
MasterConnectRetry: 1234,
|
||||
}
|
||||
want := []string{
|
||||
"STOP SLAVE",
|
||||
"RESET SLAVE",
|
||||
"SET GLOBAL gtid_slave_pos = '1-41983-12345'",
|
||||
`CHANGE MASTER TO
|
||||
MASTER_HOST = 'localhost',
|
||||
MASTER_PORT = 123,
|
||||
MASTER_USER = 'username',
|
||||
MASTER_PASSWORD = 'password',
|
||||
MASTER_CONNECT_RETRY = 1234,
|
||||
MASTER_SSL = 1,
|
||||
MASTER_SSL_CA = 'ssl-ca',
|
||||
MASTER_SSL_CAPATH = 'ssl-ca-path',
|
||||
MASTER_SSL_CERT = 'ssl-cert',
|
||||
MASTER_SSL_KEY = 'ssl-key',
|
||||
MASTER_USE_GTID = slave_pos`,
|
||||
"START SLAVE",
|
||||
}
|
||||
|
||||
got, err := (&mariaDB10{}).StartReplicationCommands(params, status)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("(&mariaDB10{}).StartReplicationCommands(%#v, %#v) = %#v, want %#v", params, status, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariadbParseGTID(t *testing.T) {
|
||||
input := "12-34-5678"
|
||||
want := proto.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678}
|
||||
|
||||
got, err := (&mariaDB10{}).ParseGTID(input)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if got != want {
|
||||
t.Errorf("(&mariaDB10{}).ParseGTID(%#v) = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariadbParseReplicationPosition(t *testing.T) {
|
||||
input := "12-34-5678"
|
||||
want := proto.ReplicationPosition{GTIDSet: proto.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678}}
|
||||
|
||||
got, err := (&mariaDB10{}).ParseReplicationPosition(input)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !got.Equal(want) {
|
||||
t.Errorf("(&mariaDB10{}).ParseReplicationPosition(%#v) = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,24 +7,34 @@ package mysqlctl
|
|||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/youtube/vitess/go/mysql"
|
||||
blproto "github.com/youtube/vitess/go/vt/binlog/proto"
|
||||
"github.com/youtube/vitess/go/vt/mysqlctl/proto"
|
||||
)
|
||||
|
||||
type fakeMysqlFlavor struct{}
|
||||
|
||||
func (fakeMysqlFlavor) MasterStatus(mysqld *Mysqld) (*proto.ReplicationPosition, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (fakeMysqlFlavor) PromoteSlaveCommands() []string { return nil }
|
||||
func (fakeMysqlFlavor) ParseGTID(string) (proto.GTID, error) { return nil, nil }
|
||||
func (fakeMysqlFlavor) SendBinlogDumpCommand(mysqld *Mysqld, conn *SlaveConnection, startPos proto.GTID) error {
|
||||
func (fakeMysqlFlavor) MakeBinlogEvent(buf []byte) blproto.BinlogEvent { return nil }
|
||||
func (fakeMysqlFlavor) ParseReplicationPosition(string) (proto.ReplicationPosition, error) {
|
||||
return proto.ReplicationPosition{}, nil
|
||||
}
|
||||
func (fakeMysqlFlavor) SendBinlogDumpCommand(mysqld *Mysqld, conn *SlaveConnection, startPos proto.ReplicationPosition) error {
|
||||
return nil
|
||||
}
|
||||
func (fakeMysqlFlavor) MakeBinlogEvent(buf []byte) blproto.BinlogEvent {
|
||||
func (fakeMysqlFlavor) WaitMasterPos(mysqld *Mysqld, targetPos proto.ReplicationPosition, waitTimeout time.Duration) error {
|
||||
return nil
|
||||
}
|
||||
func (fakeMysqlFlavor) MasterPosition(mysqld *Mysqld) (proto.ReplicationPosition, error) {
|
||||
return proto.ReplicationPosition{}, nil
|
||||
}
|
||||
func (fakeMysqlFlavor) SlaveStatus(mysqld *Mysqld) (*proto.ReplicationStatus, error) { return nil, nil }
|
||||
func (fakeMysqlFlavor) StartReplicationCommands(params *mysql.ConnectionParams, status *proto.ReplicationStatus) ([]string, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func TestDefaultMysqlFlavor(t *testing.T) {
|
||||
os.Setenv("MYSQL_FLAVOR", "")
|
||||
|
|
|
@ -7,27 +7,52 @@ package proto
|
|||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const googleMysqlFlavorID = "GoogleMysql"
|
||||
|
||||
// parseGoogleGTID is registered as a parser for ParseGTID().
|
||||
// parseGoogleGTID is registered as a GTID parser.
|
||||
func parseGoogleGTID(s string) (GTID, error) {
|
||||
id, err := strconv.ParseUint(s, 10, 64)
|
||||
// Split into parts.
|
||||
parts := strings.Split(s, "-")
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid Google MySQL GTID (%v): expecting ServerID-GroupID", s)
|
||||
}
|
||||
|
||||
server_id, err := strconv.ParseUint(parts[0], 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid Google MySQL group_id (%v): %v", s, err)
|
||||
return nil, fmt.Errorf("invalid Google MySQL server_id (%v): %v", parts[0], err)
|
||||
}
|
||||
|
||||
return GoogleGTID{GroupID: id}, nil
|
||||
group_id, err := strconv.ParseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid Google MySQL group_id (%v): %v", parts[1], err)
|
||||
}
|
||||
|
||||
return GoogleGTID{ServerID: uint32(server_id), GroupID: group_id}, nil
|
||||
}
|
||||
|
||||
// parseGoogleGTIDSet is registered as a GTIDSet parser.
|
||||
func parseGoogleGTIDSet(s string) (GTIDSet, error) {
|
||||
gtid, err := parseGoogleGTID(s)
|
||||
return gtid.(GoogleGTID), err
|
||||
}
|
||||
|
||||
// GoogleGTID implements GTID and GTIDSet. In Google MySQL, a single GTID is
|
||||
// already enough to define the set of all GTIDs that came before it.
|
||||
type GoogleGTID struct {
|
||||
// ServerID is the server_id of the server that originally generated the
|
||||
// transaction.
|
||||
ServerID uint32
|
||||
// GroupID is the unique ID of a transaction group.
|
||||
GroupID uint64
|
||||
}
|
||||
|
||||
// String implements GTID.String().
|
||||
// String implements GTID.String(). Google MySQL doesn't define a canonical way
|
||||
// to represent both a group_id and a server_id together, so we've invented one.
|
||||
func (gtid GoogleGTID) String() string {
|
||||
return fmt.Sprintf("%d", gtid.GroupID)
|
||||
return fmt.Sprintf("%d-%d", gtid.ServerID, gtid.GroupID)
|
||||
}
|
||||
|
||||
// Flavor implements GTID.Flavor().
|
||||
|
@ -35,24 +60,74 @@ func (gtid GoogleGTID) Flavor() string {
|
|||
return googleMysqlFlavorID
|
||||
}
|
||||
|
||||
// TryCompare implements GTID.TryCompare().
|
||||
func (gtid GoogleGTID) TryCompare(cmp GTID) (int, error) {
|
||||
other, ok := cmp.(GoogleGTID)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("can't compare GTID, wrong type: %#v.TryCompare(%#v)",
|
||||
gtid, cmp)
|
||||
// Domain implements GTID.SequenceDomain().
|
||||
func (gtid GoogleGTID) SequenceDomain() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
switch true {
|
||||
case gtid.GroupID < other.GroupID:
|
||||
return -1, nil
|
||||
case gtid.GroupID > other.GroupID:
|
||||
return 1, nil
|
||||
default:
|
||||
return 0, nil
|
||||
// SourceServer implements GTID.SourceServer().
|
||||
func (gtid GoogleGTID) SourceServer() string {
|
||||
return strconv.FormatUint(uint64(gtid.ServerID), 10)
|
||||
}
|
||||
|
||||
// SequenceNumber implements GTID.SequenceNumber().
|
||||
func (gtid GoogleGTID) SequenceNumber() uint64 {
|
||||
return gtid.GroupID
|
||||
}
|
||||
|
||||
// GTIDSet implements GTID.GTIDSet().
|
||||
func (gtid GoogleGTID) GTIDSet() GTIDSet {
|
||||
return gtid
|
||||
}
|
||||
|
||||
// Last implements GTIDSet.Last().
|
||||
func (gtid GoogleGTID) Last() GTID {
|
||||
return gtid
|
||||
}
|
||||
|
||||
// ContainsGTID implements GTIDSet.ContainsGTID().
|
||||
func (gtid GoogleGTID) ContainsGTID(other GTID) bool {
|
||||
if other == nil {
|
||||
return true
|
||||
}
|
||||
gOther, ok := other.(GoogleGTID)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return gtid.GroupID >= gOther.GroupID
|
||||
}
|
||||
|
||||
// Contains implements GTIDSet.Contains().
|
||||
func (gtid GoogleGTID) Contains(other GTIDSet) bool {
|
||||
if other == nil {
|
||||
return true
|
||||
}
|
||||
gOther, ok := other.(GoogleGTID)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return gtid.GroupID >= gOther.GroupID
|
||||
}
|
||||
|
||||
// Equal implements GTIDSet.Equal().
|
||||
func (gtid GoogleGTID) Equal(other GTIDSet) bool {
|
||||
gOther, ok := other.(GoogleGTID)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return gtid.GroupID == gOther.GroupID
|
||||
}
|
||||
|
||||
// AddGTID implements GTIDSet.AddGTID().
|
||||
func (gtid GoogleGTID) AddGTID(other GTID) GTIDSet {
|
||||
gOther, ok := other.(GoogleGTID)
|
||||
if !ok || gtid.GroupID >= gOther.GroupID {
|
||||
return gtid
|
||||
}
|
||||
return gOther
|
||||
}
|
||||
|
||||
func init() {
|
||||
gtidParsers[googleMysqlFlavorID] = parseGoogleGTID
|
||||
gtidSetParsers[googleMysqlFlavorID] = parseGoogleGTIDSet
|
||||
}
|
||||
|
|
|
@ -10,20 +10,59 @@ import (
|
|||
)
|
||||
|
||||
func TestParseGoogleGTID(t *testing.T) {
|
||||
input := "1758283"
|
||||
want := GoogleGTID{GroupID: 1758283}
|
||||
input := "41983-1758283"
|
||||
want := GoogleGTID{ServerID: 41983, GroupID: 1758283}
|
||||
|
||||
got, err := parseGoogleGTID(input)
|
||||
if err != nil {
|
||||
t.Errorf("%v", err)
|
||||
}
|
||||
if got.(GoogleGTID) != want {
|
||||
t.Errorf("ParseGTID(%v) = %v, want %v", input, got, want)
|
||||
t.Errorf("parseGoogleGTID(%v) = %v, want %v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseGoogleGTIDSet(t *testing.T) {
|
||||
input := "41983-1758283"
|
||||
want := GoogleGTID{ServerID: 41983, GroupID: 1758283}
|
||||
|
||||
got, err := parseGoogleGTIDSet(input)
|
||||
if err != nil {
|
||||
t.Errorf("%v", err)
|
||||
}
|
||||
if got.(GoogleGTID) != want {
|
||||
t.Errorf("parseGoogleGTIDSet(%v) = %v, want %v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidGoogleGTID(t *testing.T) {
|
||||
input := "1-2-3"
|
||||
input := "12345"
|
||||
want := "invalid Google MySQL GTID"
|
||||
|
||||
_, err := parseGoogleGTID(input)
|
||||
if err == nil {
|
||||
t.Errorf("expected error for invalid input (%v)", input)
|
||||
}
|
||||
if !strings.HasPrefix(err.Error(), want) {
|
||||
t.Errorf("wrong error message, got '%v', want '%v'", err, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidGoogleServerID(t *testing.T) {
|
||||
input := "1d3-45"
|
||||
want := "invalid Google MySQL server_id"
|
||||
|
||||
_, err := parseGoogleGTID(input)
|
||||
if err == nil {
|
||||
t.Errorf("expected error for invalid input (%v)", input)
|
||||
}
|
||||
if !strings.HasPrefix(err.Error(), want) {
|
||||
t.Errorf("wrong error message, got '%v', want '%v'", err, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidGoogleGroupID(t *testing.T) {
|
||||
input := "1-2d3"
|
||||
want := "invalid Google MySQL group_id"
|
||||
|
||||
_, err := parseGoogleGTID(input)
|
||||
|
@ -36,8 +75,8 @@ func TestParseInvalidGoogleGTID(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGoogleGTIDString(t *testing.T) {
|
||||
input := GoogleGTID{GroupID: 1857273}
|
||||
want := "1857273"
|
||||
input := GoogleGTID{ServerID: 41983, GroupID: 1857273}
|
||||
want := "41983-1857273"
|
||||
|
||||
got := input.String()
|
||||
if got != want {
|
||||
|
@ -55,91 +94,252 @@ func TestGoogleGTIDFlavor(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDCompareLess(t *testing.T) {
|
||||
func TestGoogleGTIDSequenceDomain(t *testing.T) {
|
||||
input := GoogleGTID{ServerID: 41983, GroupID: 1857273}
|
||||
want := ""
|
||||
|
||||
got := input.SequenceDomain()
|
||||
if got != want {
|
||||
t.Errorf("%#v.SequenceDomain() = '%v', want '%v'", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDSourceServer(t *testing.T) {
|
||||
input := GoogleGTID{ServerID: 41983, GroupID: 1857273}
|
||||
want := "41983"
|
||||
|
||||
got := input.SourceServer()
|
||||
if got != want {
|
||||
t.Errorf("%#v.SourceServer() = '%v', want '%v'", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDSequenceNumber(t *testing.T) {
|
||||
input := GoogleGTID{ServerID: 41983, GroupID: 1857273}
|
||||
want := uint64(1857273)
|
||||
|
||||
got := input.SequenceNumber()
|
||||
if got != want {
|
||||
t.Errorf("%#v.SequenceNumber() = %v, want %v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDGTIDSet(t *testing.T) {
|
||||
input := GoogleGTID{ServerID: 41983, GroupID: 1857273}
|
||||
want := GTIDSet(input)
|
||||
|
||||
got := input.GTIDSet()
|
||||
if got != want {
|
||||
t.Errorf("%#v.GTIDSet() = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDLast(t *testing.T) {
|
||||
input := GoogleGTID{ServerID: 41983, GroupID: 1857273}
|
||||
want := GTID(input)
|
||||
|
||||
got := input.Last()
|
||||
if got != want {
|
||||
t.Errorf("%#v.Last() = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDContainsLess(t *testing.T) {
|
||||
input1 := GoogleGTID{GroupID: 12345}
|
||||
input2 := GoogleGTID{GroupID: 54321}
|
||||
want := false
|
||||
|
||||
cmp, err := input1.TryCompare(input2)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error for %#v.TryCompare(%#v): %v", input1, input2, err)
|
||||
}
|
||||
if !(cmp < 0) {
|
||||
t.Errorf("%#v.TryCompare(%#v) = %v, want < 0", input1, input2, cmp)
|
||||
if got := input1.Contains(input2); got != want {
|
||||
t.Errorf("%#v.Contains(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDCompareGreater(t *testing.T) {
|
||||
input1 := GoogleGTID{GroupID: 98765}
|
||||
input2 := GoogleGTID{GroupID: 56789}
|
||||
func TestGoogleGTIDContainsGreater(t *testing.T) {
|
||||
input1 := GoogleGTID{GroupID: 54321}
|
||||
input2 := GoogleGTID{GroupID: 12345}
|
||||
want := true
|
||||
|
||||
cmp, err := input1.TryCompare(input2)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error for %#v.TryCompare(%#v): %v", input2, input1, err)
|
||||
}
|
||||
if !(cmp > 0) {
|
||||
t.Errorf("%#v.TryCompare(%#v) = %v, want > 0", input2, input1, cmp)
|
||||
if got := input1.Contains(input2); got != want {
|
||||
t.Errorf("%#v.Contains(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDCompareEqual(t *testing.T) {
|
||||
input1 := GoogleGTID{GroupID: 41234}
|
||||
input2 := GoogleGTID{GroupID: 41234}
|
||||
func TestGoogleGTIDContainsEqual(t *testing.T) {
|
||||
input1 := GoogleGTID{GroupID: 12345}
|
||||
input2 := GoogleGTID{GroupID: 12345}
|
||||
want := true
|
||||
|
||||
cmp, err := input1.TryCompare(input2)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error for %#v.TryCompare(%#v): %v", input1, input2, err)
|
||||
}
|
||||
if cmp != 0 {
|
||||
t.Errorf("%#v.TryCompare(%#v) = %v, want 0", input1, input2, cmp)
|
||||
if got := input1.Contains(input2); got != want {
|
||||
t.Errorf("%#v.Contains(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDCompareWrongType(t *testing.T) {
|
||||
func TestGoogleGTIDContainsWrongType(t *testing.T) {
|
||||
input1 := GoogleGTID{GroupID: 123}
|
||||
input2 := fakeGTID{}
|
||||
want := "can't compare GTID, wrong type"
|
||||
want := false
|
||||
|
||||
_, err := input1.TryCompare(input2)
|
||||
if err == nil {
|
||||
t.Errorf("expected error for %#v.TryCompare(%#v)", input1, input2)
|
||||
}
|
||||
if !strings.HasPrefix(err.Error(), want) {
|
||||
t.Errorf("wrong error message for %#v.TryCompare(%#v), got %v, want %v", input1, input2, err, want)
|
||||
if got := input1.Contains(input2); got != want {
|
||||
t.Errorf("%#v.Contains(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDCompareNil(t *testing.T) {
|
||||
func TestGoogleGTIDContainsNil(t *testing.T) {
|
||||
input1 := GoogleGTID{GroupID: 123}
|
||||
input2 := GTIDSet(nil)
|
||||
want := true
|
||||
|
||||
if got := input1.Contains(input2); got != want {
|
||||
t.Errorf("%#v.Contains(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDContainsGTIDLess(t *testing.T) {
|
||||
input1 := GoogleGTID{GroupID: 12345}
|
||||
input2 := GoogleGTID{GroupID: 54321}
|
||||
want := false
|
||||
|
||||
if got := input1.ContainsGTID(input2); got != want {
|
||||
t.Errorf("%#v.ContainsGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDContainsGTIDGreater(t *testing.T) {
|
||||
input1 := GoogleGTID{GroupID: 54321}
|
||||
input2 := GoogleGTID{GroupID: 12345}
|
||||
want := true
|
||||
|
||||
if got := input1.ContainsGTID(input2); got != want {
|
||||
t.Errorf("%#v.ContainsGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDContainsGTIDEqual(t *testing.T) {
|
||||
input1 := GoogleGTID{GroupID: 12345}
|
||||
input2 := GoogleGTID{GroupID: 12345}
|
||||
want := true
|
||||
|
||||
if got := input1.ContainsGTID(input2); got != want {
|
||||
t.Errorf("%#v.ContainsGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDContainsGTIDWrongType(t *testing.T) {
|
||||
input1 := GoogleGTID{GroupID: 123}
|
||||
input2 := fakeGTID{}
|
||||
want := false
|
||||
|
||||
if got := input1.ContainsGTID(input2); got != want {
|
||||
t.Errorf("%#v.ContainsGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDContainsGTIDNil(t *testing.T) {
|
||||
input1 := GoogleGTID{GroupID: 123}
|
||||
input2 := GTID(nil)
|
||||
want := "can't compare GTID"
|
||||
want := true
|
||||
|
||||
_, err := input1.TryCompare(input2)
|
||||
if err == nil {
|
||||
t.Errorf("expected error for %#v.TryCompare(%#v)", input1, input2)
|
||||
}
|
||||
if !strings.HasPrefix(err.Error(), want) {
|
||||
t.Errorf("wrong error message for %#v.TryCompare(%#v), got %v, want %v", input1, input2, err, want)
|
||||
if got := input1.ContainsGTID(input2); got != want {
|
||||
t.Errorf("%#v.ContainsGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDEqual(t *testing.T) {
|
||||
input1 := GTID(GoogleGTID{GroupID: 41234})
|
||||
input2 := GTID(GoogleGTID{GroupID: 41234})
|
||||
input1 := GoogleGTID{GroupID: 41234}
|
||||
input2 := GoogleGTID{GroupID: 41234}
|
||||
want := true
|
||||
|
||||
cmp := input1 == input2
|
||||
if cmp != want {
|
||||
t.Errorf("(%#v == %#v) = %v, want %v", input1, input2, cmp, want)
|
||||
if got := input1.Equal(input2); got != want {
|
||||
t.Errorf("%#v.Equal(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDNotEqual(t *testing.T) {
|
||||
input1 := GTID(GoogleGTID{GroupID: 41234})
|
||||
input2 := GTID(GoogleGTID{GroupID: 51234})
|
||||
input1 := GoogleGTID{GroupID: 41234}
|
||||
input2 := GoogleGTID{GroupID: 51234}
|
||||
want := false
|
||||
|
||||
cmp := input1 == input2
|
||||
if cmp != want {
|
||||
t.Errorf("(%#v == %#v) = %v, want %v", input1, input2, cmp, want)
|
||||
if got := input1.Equal(input2); got != want {
|
||||
t.Errorf("%#v.Equal(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDEqualWrongType(t *testing.T) {
|
||||
input1 := GoogleGTID{GroupID: 41234}
|
||||
input2 := fakeGTID{}
|
||||
want := false
|
||||
|
||||
if got := input1.Equal(input2); got != want {
|
||||
t.Errorf("%#v.Equal(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDEqualNil(t *testing.T) {
|
||||
input1 := GoogleGTID{GroupID: 41234}
|
||||
input2 := GTIDSet(nil)
|
||||
want := false
|
||||
|
||||
if got := input1.Equal(input2); got != want {
|
||||
t.Errorf("%#v.Equal(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDAddGTIDEqual(t *testing.T) {
|
||||
input1 := GoogleGTID{GroupID: 41234}
|
||||
input2 := GoogleGTID{GroupID: 41234}
|
||||
want := GoogleGTID{GroupID: 41234}
|
||||
|
||||
if got := input1.AddGTID(input2); got != want {
|
||||
t.Errorf("%#v.AddGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDAddGTIDGreater(t *testing.T) {
|
||||
input1 := GoogleGTID{GroupID: 41234}
|
||||
input2 := GoogleGTID{GroupID: 51234}
|
||||
want := GoogleGTID{GroupID: 51234}
|
||||
|
||||
if got := input1.AddGTID(input2); got != want {
|
||||
t.Errorf("%#v.AddGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDAddGTIDLess(t *testing.T) {
|
||||
input1 := GoogleGTID{GroupID: 51234}
|
||||
input2 := GoogleGTID{GroupID: 41234}
|
||||
want := GoogleGTID{GroupID: 51234}
|
||||
|
||||
if got := input1.AddGTID(input2); got != want {
|
||||
t.Errorf("%#v.AddGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDAddGTIDWrongType(t *testing.T) {
|
||||
input1 := GoogleGTID{GroupID: 41234}
|
||||
input2 := fakeGTID{}
|
||||
want := input1
|
||||
|
||||
if got := input1.AddGTID(input2); got != want {
|
||||
t.Errorf("%#v.AddGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDAddGTIDNil(t *testing.T) {
|
||||
input1 := GoogleGTID{GroupID: 41234}
|
||||
input2 := GTID(nil)
|
||||
want := input1
|
||||
|
||||
if got := input1.AddGTID(input2); got != want {
|
||||
t.Errorf("%#v.AddGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleGTIDAddGTIDDifferentServer(t *testing.T) {
|
||||
input1 := GoogleGTID{ServerID: 1, GroupID: 41234}
|
||||
input2 := GoogleGTID{ServerID: 2, GroupID: 51234}
|
||||
want := GoogleGTID{ServerID: 2, GroupID: 51234}
|
||||
|
||||
if got := input1.AddGTID(input2); got != want {
|
||||
t.Errorf("%#v.AddGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,25 +21,29 @@ import (
|
|||
// Types that implement GTID should use a non-pointer receiver. This ensures
|
||||
// that comparing GTID interface values with == has the expected semantics.
|
||||
type GTID interface {
|
||||
// String returns the canonical form of the GTID as expected by a particular
|
||||
// flavor of MySQL.
|
||||
// String returns the canonical printed form of the GTID as expected by a
|
||||
// particular flavor of MySQL.
|
||||
String() string
|
||||
|
||||
// Flavor returns the key under which the corresponding GTID parser function
|
||||
// is registered in the GTIDParsers map.
|
||||
// is registered in the gtidParsers map.
|
||||
Flavor() string
|
||||
|
||||
// TryCompare tries to compare two GTIDs. Some flavors of GTID can always be
|
||||
// compared (e.g. Google MySQL group_id). Others can only be compared if they
|
||||
// came from the same master (e.g. MariaDB, MySQL 5.6).
|
||||
//
|
||||
// If the comparison is possible, a.TryCompare(b) will return an int that is:
|
||||
// < 0 if a < b (a came before b)
|
||||
// == 0 if a == b
|
||||
// > 0 if a > b (a came after b)
|
||||
//
|
||||
// If the comparison is not possible, a non-nil error will be returned.
|
||||
TryCompare(GTID) (int, error)
|
||||
// SourceServer returns the ID of the server that generated the transaction.
|
||||
SourceServer() string
|
||||
|
||||
// SequenceNumber returns the ID number that increases with each transaction.
|
||||
// It is only valid to compare the sequence numbers of two GTIDs if they have
|
||||
// the same domain value.
|
||||
SequenceNumber() uint64
|
||||
|
||||
// SequenceDomain returns the ID of the domain within which two sequence
|
||||
// numbers can be meaningfully compared.
|
||||
SequenceDomain() string
|
||||
|
||||
// GTIDSet returns a GTIDSet of the same flavor as this GTID, containing only
|
||||
// this GTID.
|
||||
GTIDSet() GTIDSet
|
||||
}
|
||||
|
||||
// gtidParsers maps flavor names to parser functions.
|
||||
|
@ -49,7 +53,7 @@ var gtidParsers = make(map[string]func(string) (GTID, error))
|
|||
func ParseGTID(flavor, value string) (GTID, error) {
|
||||
parser := gtidParsers[flavor]
|
||||
if parser == nil {
|
||||
return nil, fmt.Errorf("ParseGTID: unknown flavor '%v'", flavor)
|
||||
return nil, fmt.Errorf("parse error: unknown GTID flavor %#v", flavor)
|
||||
}
|
||||
return parser(value)
|
||||
}
|
||||
|
@ -84,7 +88,7 @@ func DecodeGTID(s string) (GTID, error) {
|
|||
parts := strings.SplitN(s, "/", 2)
|
||||
if len(parts) != 2 {
|
||||
// There is no flavor. Try looking for a default parser.
|
||||
parts = []string{"", s}
|
||||
return ParseGTID("", s)
|
||||
}
|
||||
return ParseGTID(parts[0], parts[1])
|
||||
}
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
// Copyright 2014, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
// GTIDSet represents the set of transactions received or applied by a server.
|
||||
// In some flavors, a single GTID is enough to specify the set of all
|
||||
// transactions that came before it, but in others a more complex structure is
|
||||
// required.
|
||||
//
|
||||
// GTIDSet is wrapped by ReplicationPosition, which is a concrete struct that
|
||||
// enables JSON and BSON marshaling. Most code outside of this package should
|
||||
// use ReplicationPosition rather than GTIDSet.
|
||||
type GTIDSet interface {
|
||||
// String returns the canonical printed form of the set as expected by a
|
||||
// particular flavor of MySQL.
|
||||
String() string
|
||||
|
||||
// Flavor returns the key under which the corresponding parser function is
|
||||
// registered in the transactionSetParsers map.
|
||||
Flavor() string
|
||||
|
||||
// Last returns the GTID of the most recent transaction in the set.
|
||||
Last() GTID
|
||||
|
||||
// Contains returns true if the set contains the specified transaction.
|
||||
ContainsGTID(GTID) bool
|
||||
|
||||
// Contains returns true if the set is a superset of another set.
|
||||
Contains(GTIDSet) bool
|
||||
|
||||
// Equal returns true if the set is equal to another set.
|
||||
Equal(GTIDSet) bool
|
||||
|
||||
// AddGTID returns a new GTIDSet that is expanded to contain the given GTID.
|
||||
AddGTID(GTID) GTIDSet
|
||||
}
|
||||
|
||||
// gtidSetParsers maps flavor names to parser functions. It is used by
|
||||
// ParseReplicationPosition().
|
||||
var gtidSetParsers = make(map[string]func(string) (GTIDSet, error))
|
|
@ -39,13 +39,13 @@ func TestMustParseGTID(t *testing.T) {
|
|||
|
||||
got := MustParseGTID(flavor, input)
|
||||
if got != want {
|
||||
t.Errorf("ParseGTID(%#v, %#v) = %#v, want %#v", flavor, input, got, want)
|
||||
t.Errorf("MustParseGTID(%#v, %#v) = %#v, want %#v", flavor, input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMustParseGTIDError(t *testing.T) {
|
||||
defer func() {
|
||||
want := "ParseGTID: unknown flavor"
|
||||
want := `parse error: unknown GTID flavor "unknown flavor !@$!@"`
|
||||
err := recover()
|
||||
if err == nil {
|
||||
t.Errorf("wrong error, got %#v, want %#v", err, want)
|
||||
|
@ -60,7 +60,7 @@ func TestMustParseGTIDError(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParseUnknownFlavor(t *testing.T) {
|
||||
want := "ParseGTID: unknown flavor 'foobar8675309'"
|
||||
want := `parse error: unknown GTID flavor "foobar8675309"`
|
||||
|
||||
_, err := ParseGTID("foobar8675309", "foo")
|
||||
if !strings.HasPrefix(err.Error(), want) {
|
||||
|
@ -111,7 +111,7 @@ func TestMustDecodeGTID(t *testing.T) {
|
|||
|
||||
func TestMustDecodeGTIDError(t *testing.T) {
|
||||
defer func() {
|
||||
want := "ParseGTID: unknown flavor"
|
||||
want := `parse error: unknown GTID flavor "unknown flavor !@$!@"`
|
||||
err := recover()
|
||||
if err == nil {
|
||||
t.Errorf("wrong error, got %#v, want %#v", err, want)
|
||||
|
@ -423,4 +423,13 @@ type fakeGTID struct {
|
|||
|
||||
func (f fakeGTID) String() string { return f.value }
|
||||
func (f fakeGTID) Flavor() string { return f.flavor }
|
||||
func (fakeGTID) TryCompare(GTID) (int, error) { return 0, nil }
|
||||
func (fakeGTID) SourceServer() string { return "" }
|
||||
func (fakeGTID) SequenceNumber() uint64 { return 0 }
|
||||
func (fakeGTID) SequenceDomain() string { return "" }
|
||||
func (f fakeGTID) GTIDSet() GTIDSet { return nil }
|
||||
|
||||
func (fakeGTID) Last() GTID { return nil }
|
||||
func (fakeGTID) ContainsGTID(GTID) bool { return false }
|
||||
func (fakeGTID) Contains(GTIDSet) bool { return false }
|
||||
func (f fakeGTID) Equal(other GTIDSet) bool { return f == other.(fakeGTID) }
|
||||
func (fakeGTID) AddGTID(GTID) GTIDSet { return nil }
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
const mariadbFlavorID = "MariaDB"
|
||||
|
||||
// parseMariadbGTID is registered as a parser for ParseGTID().
|
||||
// parseMariadbGTID is registered as a GTID parser.
|
||||
func parseMariadbGTID(s string) (GTID, error) {
|
||||
// Split into parts.
|
||||
parts := strings.Split(s, "-")
|
||||
|
@ -45,9 +45,19 @@ func parseMariadbGTID(s string) (GTID, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
// parseMariadbGTIDSet is registered as a GTIDSet parser.
|
||||
func parseMariadbGTIDSet(s string) (GTIDSet, error) {
|
||||
gtid, err := parseMariadbGTID(s)
|
||||
return gtid.(MariadbGTID), err
|
||||
}
|
||||
|
||||
// MariadbGTID implements GTID.
|
||||
type MariadbGTID struct {
|
||||
// Domain is the ID number of the domain within which sequence numbers apply.
|
||||
Domain uint32
|
||||
// Server is the ID of the server that generated the transaction.
|
||||
Server uint32
|
||||
// Sequence is the sequence number of the transaction within the domain.
|
||||
Sequence uint64
|
||||
}
|
||||
|
||||
|
@ -61,31 +71,74 @@ func (gtid MariadbGTID) Flavor() string {
|
|||
return mariadbFlavorID
|
||||
}
|
||||
|
||||
// TryCompare implements GTID.TryCompare().
|
||||
func (gtid MariadbGTID) TryCompare(cmp GTID) (int, error) {
|
||||
other, ok := cmp.(MariadbGTID)
|
||||
// SequenceDomain implements GTID.SequenceDomain().
|
||||
func (gtid MariadbGTID) SequenceDomain() string {
|
||||
return strconv.FormatUint(uint64(gtid.Domain), 10)
|
||||
}
|
||||
|
||||
// SourceServer implements GTID.SourceServer().
|
||||
func (gtid MariadbGTID) SourceServer() string {
|
||||
return strconv.FormatUint(uint64(gtid.Server), 10)
|
||||
}
|
||||
|
||||
// SequenceNumber implements GTID.SequenceNumber().
|
||||
func (gtid MariadbGTID) SequenceNumber() uint64 {
|
||||
return gtid.Sequence
|
||||
}
|
||||
|
||||
// GTIDSet implements GTID.GTIDSet().
|
||||
func (gtid MariadbGTID) GTIDSet() GTIDSet {
|
||||
return gtid
|
||||
}
|
||||
|
||||
// Last implements GTIDSet.Last().
|
||||
func (gtid MariadbGTID) Last() GTID {
|
||||
return gtid
|
||||
}
|
||||
|
||||
// ContainsGTID implements GTIDSet.ContainsGTID().
|
||||
func (gtid MariadbGTID) ContainsGTID(other GTID) bool {
|
||||
if other == nil {
|
||||
return true
|
||||
}
|
||||
mdbOther, ok := other.(MariadbGTID)
|
||||
if !ok || gtid.Domain != mdbOther.Domain {
|
||||
return false
|
||||
}
|
||||
return gtid.Sequence >= mdbOther.Sequence
|
||||
}
|
||||
|
||||
// Contains implements GTIDSet.Contains().
|
||||
func (gtid MariadbGTID) Contains(other GTIDSet) bool {
|
||||
if other == nil {
|
||||
return true
|
||||
}
|
||||
mdbOther, ok := other.(MariadbGTID)
|
||||
if !ok || gtid.Domain != mdbOther.Domain {
|
||||
return false
|
||||
}
|
||||
return gtid.Sequence >= mdbOther.Sequence
|
||||
}
|
||||
|
||||
// Equal implements GTIDSet.Equal().
|
||||
func (gtid MariadbGTID) Equal(other GTIDSet) bool {
|
||||
mdbOther, ok := other.(MariadbGTID)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("can't compare GTID, wrong type: %#v.TryCompare(%#v)",
|
||||
gtid, cmp)
|
||||
return false
|
||||
}
|
||||
return gtid == mdbOther
|
||||
}
|
||||
|
||||
if gtid.Domain != other.Domain {
|
||||
return 0, fmt.Errorf("can't compare GTID, MariaDB Domain doesn't match: %v != %v", gtid.Domain, other.Domain)
|
||||
}
|
||||
if gtid.Server != other.Server {
|
||||
return 0, fmt.Errorf("can't compare GTID, MariaDB Server doesn't match: %v != %v", gtid.Server, other.Server)
|
||||
}
|
||||
|
||||
switch true {
|
||||
case gtid.Sequence < other.Sequence:
|
||||
return -1, nil
|
||||
case gtid.Sequence > other.Sequence:
|
||||
return 1, nil
|
||||
default:
|
||||
return 0, nil
|
||||
// AddGTID implements GTIDSet.AddGTID().
|
||||
func (gtid MariadbGTID) AddGTID(other GTID) GTIDSet {
|
||||
mdbOther, ok := other.(MariadbGTID)
|
||||
if !ok || gtid.Domain != mdbOther.Domain || gtid.Sequence >= mdbOther.Sequence {
|
||||
return gtid
|
||||
}
|
||||
return mdbOther
|
||||
}
|
||||
|
||||
func init() {
|
||||
gtidParsers[mariadbFlavorID] = parseMariadbGTID
|
||||
gtidSetParsers[mariadbFlavorID] = parseMariadbGTIDSet
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ func TestParseMariaGTID(t *testing.T) {
|
|||
t.Errorf("%v", err)
|
||||
}
|
||||
if got.(MariadbGTID) != want {
|
||||
t.Errorf("ParseGTID(%v) = %v, want %v", input, got, want)
|
||||
t.Errorf("parseMariadbGTID(%v) = %v, want %v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -74,6 +74,19 @@ func TestParseMariaGTIDInvalidSequence(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestParseMariaGTIDSet(t *testing.T) {
|
||||
input := "12-34-5678"
|
||||
want := MariadbGTID{Domain: 12, Server: 34, Sequence: 5678}
|
||||
|
||||
got, err := parseMariadbGTIDSet(input)
|
||||
if err != nil {
|
||||
t.Errorf("%v", err)
|
||||
}
|
||||
if got.(MariadbGTID) != want {
|
||||
t.Errorf("parseMariadbGTIDSet(%v) = %v, want %v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDString(t *testing.T) {
|
||||
input := MariadbGTID{Domain: 5, Server: 4727, Sequence: 1737373}
|
||||
want := "5-4727-1737373"
|
||||
|
@ -94,119 +107,302 @@ func TestMariaGTIDFlavor(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDCompareLess(t *testing.T) {
|
||||
func TestMariaGTIDSequenceDomain(t *testing.T) {
|
||||
input := MariadbGTID{Domain: 12, Server: 345, Sequence: 6789}
|
||||
want := "12"
|
||||
|
||||
got := input.SequenceDomain()
|
||||
if got != want {
|
||||
t.Errorf("%#v.SequenceDomain() = '%v', want '%v'", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDSourceServer(t *testing.T) {
|
||||
input := MariadbGTID{Domain: 12, Server: 345, Sequence: 6789}
|
||||
want := "345"
|
||||
|
||||
got := input.SourceServer()
|
||||
if got != want {
|
||||
t.Errorf("%#v.SourceServer() = '%v', want '%v'", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDSequenceNumber(t *testing.T) {
|
||||
input := MariadbGTID{Domain: 12, Server: 345, Sequence: 6789}
|
||||
want := uint64(6789)
|
||||
|
||||
got := input.SequenceNumber()
|
||||
if got != want {
|
||||
t.Errorf("%#v.SequenceNumber() = %v, want %v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDGTIDSet(t *testing.T) {
|
||||
input := MariadbGTID{Domain: 12, Server: 345, Sequence: 6789}
|
||||
want := GTIDSet(input)
|
||||
|
||||
got := input.GTIDSet()
|
||||
if got != want {
|
||||
t.Errorf("%#v.GTIDSet() = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDLast(t *testing.T) {
|
||||
input := MariadbGTID{Domain: 12, Server: 345, Sequence: 6789}
|
||||
want := GTID(input)
|
||||
|
||||
got := input.Last()
|
||||
if got != want {
|
||||
t.Errorf("%#v.Last() = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDContainsLess(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 5, Server: 4727, Sequence: 300}
|
||||
input2 := MariadbGTID{Domain: 5, Server: 4727, Sequence: 700}
|
||||
want := false
|
||||
|
||||
cmp, err := input1.TryCompare(input2)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error for %#v.TryCompare(%#v): %v", input1, input2, err)
|
||||
}
|
||||
if !(cmp < 0) {
|
||||
t.Errorf("%#v.TryCompare(%#v) = %v, want < 0", input1, input2, cmp)
|
||||
if got := input1.Contains(input2); got != want {
|
||||
t.Errorf("%#v.Contains(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDCompareGreater(t *testing.T) {
|
||||
func TestMariaGTIDContainsGreater(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 5, Server: 4727, Sequence: 9000}
|
||||
input2 := MariadbGTID{Domain: 5, Server: 4727, Sequence: 100}
|
||||
want := true
|
||||
|
||||
cmp, err := input1.TryCompare(input2)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error for %#v.TryCompare(%#v): %v", input2, input1, err)
|
||||
}
|
||||
if !(cmp > 0) {
|
||||
t.Errorf("%#v.TryCompare(%#v) = %v, want > 0", input2, input1, cmp)
|
||||
if got := input1.Contains(input2); got != want {
|
||||
t.Errorf("%#v.Contains(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDCompareEqual(t *testing.T) {
|
||||
func TestMariaGTIDContainsEqual(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 5, Server: 4727, Sequence: 1234}
|
||||
input2 := MariadbGTID{Domain: 5, Server: 4727, Sequence: 1234}
|
||||
want := true
|
||||
|
||||
cmp, err := input1.TryCompare(input2)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error for %#v.TryCompare(%#v): %v", input1, input2, err)
|
||||
}
|
||||
if cmp != 0 {
|
||||
t.Errorf("%#v.TryCompare(%#v) = %v, want 0", input1, input2, cmp)
|
||||
if got := input1.Contains(input2); got != want {
|
||||
t.Errorf("%#v.Contains(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDCompareNil(t *testing.T) {
|
||||
func TestMariaGTIDContainsNil(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 1, Server: 2, Sequence: 123}
|
||||
input2 := GTID(nil)
|
||||
want := "can't compare GTID"
|
||||
input2 := GTIDSet(nil)
|
||||
want := true
|
||||
|
||||
_, err := input1.TryCompare(input2)
|
||||
if err == nil {
|
||||
t.Errorf("expected error for %#v.TryCompare(%#v)", input1, input2)
|
||||
}
|
||||
if !strings.HasPrefix(err.Error(), want) {
|
||||
t.Errorf("wrong error message for %#v.TryCompare(%#v), got %v, want %v", input1, input2, err, want)
|
||||
if got := input1.Contains(input2); got != want {
|
||||
t.Errorf("%#v.Contains(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDCompareWrongType(t *testing.T) {
|
||||
func TestMariaGTIDContainsWrongType(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 5, Server: 4727, Sequence: 1234}
|
||||
input2 := fakeGTID{}
|
||||
want := "can't compare GTID, wrong type"
|
||||
want := false
|
||||
|
||||
_, err := input1.TryCompare(input2)
|
||||
if err == nil {
|
||||
t.Errorf("expected error for %#v.TryCompare(%#v)", input1, input2)
|
||||
}
|
||||
if !strings.HasPrefix(err.Error(), want) {
|
||||
t.Errorf("wrong error message for %#v.TryCompare(%#v), got %v, want %v", input1, input2, err, want)
|
||||
if got := input1.Contains(input2); got != want {
|
||||
t.Errorf("%#v.Contains(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDCompareWrongDomain(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 3, Server: 4727, Sequence: 1234}
|
||||
func TestMariaGTIDContainsDifferentDomain(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 3, Server: 4727, Sequence: 1235}
|
||||
input2 := MariadbGTID{Domain: 5, Server: 4727, Sequence: 1234}
|
||||
want := "can't compare GTID, MariaDB Domain doesn't match"
|
||||
want := false
|
||||
|
||||
_, err := input1.TryCompare(input2)
|
||||
if err == nil {
|
||||
t.Errorf("expected error for %#v.TryCompare(%#v)", input1, input2)
|
||||
}
|
||||
if !strings.HasPrefix(err.Error(), want) {
|
||||
t.Errorf("wrong error message for %#v.TryCompare(%#v), got %v, want %v", input1, input2, err, want)
|
||||
if got := input1.Contains(input2); got != want {
|
||||
t.Errorf("%#v.Contains(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDCompareWrongServer(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 3, Server: 4727, Sequence: 1234}
|
||||
func TestMariaGTIDContainsDifferentServer(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 3, Server: 4727, Sequence: 1235}
|
||||
input2 := MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}
|
||||
want := "can't compare GTID, MariaDB Server doesn't match"
|
||||
want := true
|
||||
|
||||
_, err := input1.TryCompare(input2)
|
||||
if err == nil {
|
||||
t.Errorf("expected error for %#v.TryCompare(%#v)", input1, input2)
|
||||
if got := input1.Contains(input2); got != want {
|
||||
t.Errorf("%#v.Contains(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
if !strings.HasPrefix(err.Error(), want) {
|
||||
t.Errorf("wrong error message for %#v.TryCompare(%#v), got %v, want %v", input1, input2, err, want)
|
||||
}
|
||||
|
||||
func TestMariaGTIDContainsGTIDLess(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 5, Server: 4727, Sequence: 300}
|
||||
input2 := MariadbGTID{Domain: 5, Server: 4727, Sequence: 700}
|
||||
want := false
|
||||
|
||||
if got := input1.ContainsGTID(input2); got != want {
|
||||
t.Errorf("%#v.ContainsGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDContainsGTIDGreater(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 5, Server: 4727, Sequence: 9000}
|
||||
input2 := MariadbGTID{Domain: 5, Server: 4727, Sequence: 100}
|
||||
want := true
|
||||
|
||||
if got := input1.ContainsGTID(input2); got != want {
|
||||
t.Errorf("%#v.ContainsGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDContainsGTIDEqual(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 5, Server: 4727, Sequence: 1234}
|
||||
input2 := MariadbGTID{Domain: 5, Server: 4727, Sequence: 1234}
|
||||
want := true
|
||||
|
||||
if got := input1.ContainsGTID(input2); got != want {
|
||||
t.Errorf("%#v.ContainsGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDContainsGTIDNil(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 1, Server: 2, Sequence: 123}
|
||||
input2 := GTID(nil)
|
||||
want := true
|
||||
|
||||
if got := input1.ContainsGTID(input2); got != want {
|
||||
t.Errorf("%#v.ContainsGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDContainsGTIDWrongType(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 5, Server: 4727, Sequence: 1234}
|
||||
input2 := fakeGTID{}
|
||||
want := false
|
||||
|
||||
if got := input1.ContainsGTID(input2); got != want {
|
||||
t.Errorf("%#v.ContainsGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDContainsGTIDDifferentDomain(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 3, Server: 4727, Sequence: 1235}
|
||||
input2 := MariadbGTID{Domain: 5, Server: 4727, Sequence: 1234}
|
||||
want := false
|
||||
|
||||
if got := input1.ContainsGTID(input2); got != want {
|
||||
t.Errorf("%#v.ContainsGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDContainsGTIDDifferentServer(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 3, Server: 4727, Sequence: 1235}
|
||||
input2 := MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}
|
||||
want := true
|
||||
|
||||
if got := input1.ContainsGTID(input2); got != want {
|
||||
t.Errorf("%#v.ContainsGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDEqual(t *testing.T) {
|
||||
input1 := GTID(MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234})
|
||||
input2 := GTID(MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234})
|
||||
input1 := MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}
|
||||
input2 := MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}
|
||||
want := true
|
||||
|
||||
cmp := input1 == input2
|
||||
if cmp != want {
|
||||
t.Errorf("(%#v == %#v) = %v, want %v", input1, input2, cmp, want)
|
||||
if got := input1.Equal(input2); got != want {
|
||||
t.Errorf("%#v.Equal(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDNotEqual(t *testing.T) {
|
||||
input1 := GTID(MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234})
|
||||
input2 := GTID(MariadbGTID{Domain: 3, Server: 4555, Sequence: 1234})
|
||||
input1 := MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}
|
||||
input2 := MariadbGTID{Domain: 3, Server: 4555, Sequence: 1234}
|
||||
want := false
|
||||
|
||||
cmp := input1 == input2
|
||||
if cmp != want {
|
||||
t.Errorf("(%#v == %#v) = %v, want %v", input1, input2, cmp, want)
|
||||
if got := input1.Equal(input2); got != want {
|
||||
t.Errorf("%#v.Equal(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDEqualWrongType(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}
|
||||
input2 := fakeGTID{}
|
||||
want := false
|
||||
|
||||
if got := input1.Equal(input2); got != want {
|
||||
t.Errorf("%#v.Equal(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDEqualNil(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}
|
||||
input2 := GTIDSet(nil)
|
||||
want := false
|
||||
|
||||
if got := input1.Equal(input2); got != want {
|
||||
t.Errorf("%#v.Equal(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDAddGTIDEqual(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}
|
||||
input2 := MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}
|
||||
want := MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}
|
||||
|
||||
if got := input1.AddGTID(input2); got != want {
|
||||
t.Errorf("%#v.AddGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDAddGTIDGreater(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 3, Server: 5555, Sequence: 5234}
|
||||
input2 := MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}
|
||||
want := MariadbGTID{Domain: 3, Server: 5555, Sequence: 5234}
|
||||
|
||||
if got := input1.AddGTID(input2); got != want {
|
||||
t.Errorf("%#v.AddGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDAddGTIDLess(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}
|
||||
input2 := MariadbGTID{Domain: 3, Server: 5555, Sequence: 5234}
|
||||
want := MariadbGTID{Domain: 3, Server: 5555, Sequence: 5234}
|
||||
|
||||
if got := input1.AddGTID(input2); got != want {
|
||||
t.Errorf("%#v.AddGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDAddGTIDWrongType(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}
|
||||
input2 := fakeGTID{}
|
||||
want := input1
|
||||
|
||||
if got := input1.AddGTID(input2); got != want {
|
||||
t.Errorf("%#v.AddGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDAddGTIDNil(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}
|
||||
input2 := GTID(nil)
|
||||
want := input1
|
||||
|
||||
if got := input1.AddGTID(input2); got != want {
|
||||
t.Errorf("%#v.AddGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDAddGTIDDifferentServer(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}
|
||||
input2 := MariadbGTID{Domain: 3, Server: 4444, Sequence: 5234}
|
||||
want := MariadbGTID{Domain: 3, Server: 4444, Sequence: 5234}
|
||||
|
||||
if got := input1.AddGTID(input2); got != want {
|
||||
t.Errorf("%#v.AddGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMariaGTIDAddGTIDDifferentDomain(t *testing.T) {
|
||||
input1 := MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}
|
||||
input2 := MariadbGTID{Domain: 5, Server: 5555, Sequence: 5234}
|
||||
want := MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}
|
||||
|
||||
if got := input1.AddGTID(input2); got != want {
|
||||
t.Errorf("%#v.AddGTID(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,72 +5,223 @@
|
|||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/youtube/vitess/go/bson"
|
||||
"github.com/youtube/vitess/go/bytes2"
|
||||
)
|
||||
|
||||
const (
|
||||
// InvalidLagSeconds is a special value for SecondsBehindMaster
|
||||
// that means replication is not running
|
||||
InvalidLagSeconds = 0xFFFFFFFF
|
||||
)
|
||||
|
||||
// ReplicationPosition tracks the replication position on both a master
|
||||
// and a slave.
|
||||
// ReplicationPosition represents the information necessary to describe which
|
||||
// transactions a server has seen, so that it can request a replication stream
|
||||
// from a new master that picks up where it left off.
|
||||
//
|
||||
// This must be a concrete struct because custom Unmarshalers can't be
|
||||
// registered on an interface.
|
||||
//
|
||||
// The == operator should not be used with ReplicationPosition, because the
|
||||
// underlying GTIDSet might use slices, which are not comparable. Using == in
|
||||
// those cases will result in a run-time panic.
|
||||
type ReplicationPosition struct {
|
||||
// MasterLogFile, MasterLogPosition and MasterLogGTID are
|
||||
// the position on the logs for transactions that have been
|
||||
// applied (SQL position):
|
||||
// - on the master, it's File, Position and Group_ID from
|
||||
// 'show master status'.
|
||||
// - on the slave, it's Relay_Master_Log_File, Exec_Master_Log_Pos
|
||||
// and Exec_Master_Group_ID from 'show slave status'.
|
||||
MasterLogFile string
|
||||
MasterLogPosition uint
|
||||
MasterLogGTIDField GTIDField
|
||||
GTIDSet GTIDSet
|
||||
|
||||
// MasterLogFileIo and MasterLogPositionIo are the position on the logs
|
||||
// that have been downloaded from the master (IO position),
|
||||
// but not necessarely applied yet:
|
||||
// - on the master, same as MasterLogFile and MasterLogPosition.
|
||||
// - on the slave, it's Master_Log_File and Read_Master_Log_Pos
|
||||
// from 'show slave status'.
|
||||
MasterLogFileIo string
|
||||
MasterLogPositionIo uint
|
||||
// This is a zero byte compile-time check that no one is trying to
|
||||
// use == or != with ReplicationPosition. Without this, we won't know there's
|
||||
// a problem until the runtime panic.
|
||||
_ [0]struct{ notComparable []byte }
|
||||
}
|
||||
|
||||
// SecondsBehindMaster is how far behind we are in applying logs in
|
||||
// replication. If equal to InvalidLagSeconds, it means replication
|
||||
// is not running.
|
||||
// Equal returns true if this position is equal to another.
|
||||
func (rp ReplicationPosition) Equal(other ReplicationPosition) bool {
|
||||
if rp.GTIDSet == nil {
|
||||
return other.GTIDSet == nil
|
||||
}
|
||||
return rp.GTIDSet.Equal(other.GTIDSet)
|
||||
}
|
||||
|
||||
// AtLeast returns true if this position is equal to or after another.
|
||||
func (rp ReplicationPosition) AtLeast(other ReplicationPosition) bool {
|
||||
if rp.GTIDSet == nil {
|
||||
return other.GTIDSet == nil
|
||||
}
|
||||
return rp.GTIDSet.Contains(other.GTIDSet)
|
||||
}
|
||||
|
||||
// String returns a string representation of the underlying GTIDSet.
|
||||
// If the set is nil, it returns "<nil>" in the style of Sprintf("%v", nil).
|
||||
func (rp ReplicationPosition) String() string {
|
||||
if rp.GTIDSet == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return rp.GTIDSet.String()
|
||||
}
|
||||
|
||||
// IsZero returns true if this is the zero value, ReplicationPosition{}.
|
||||
func (rp ReplicationPosition) IsZero() bool {
|
||||
return rp.GTIDSet == nil
|
||||
}
|
||||
|
||||
// AppendGTID returns a new ReplicationPosition that represents the position
|
||||
// after the given GTID is replicated.
|
||||
func AppendGTID(rp ReplicationPosition, gtid GTID) ReplicationPosition {
|
||||
if gtid == nil {
|
||||
return rp
|
||||
}
|
||||
if rp.GTIDSet == nil {
|
||||
return ReplicationPosition{GTIDSet: gtid.GTIDSet()}
|
||||
}
|
||||
return ReplicationPosition{GTIDSet: rp.GTIDSet.AddGTID(gtid)}
|
||||
}
|
||||
|
||||
// MustParseReplicationPosition calls ParseReplicationPosition and panics
|
||||
// on error.
|
||||
func MustParseReplicationPosition(flavor, value string) ReplicationPosition {
|
||||
rp, err := ParseReplicationPosition(flavor, value)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return rp
|
||||
}
|
||||
|
||||
// EncodeReplicationPosition returns a string that contains both the flavor
|
||||
// and value of the ReplicationPosition, so that the correct parser can be
|
||||
// selected when that string is passed to DecodeReplicationPosition.
|
||||
func EncodeReplicationPosition(rp ReplicationPosition) string {
|
||||
if rp.GTIDSet == nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%s/%s", rp.GTIDSet.Flavor(), rp.GTIDSet.String())
|
||||
}
|
||||
|
||||
// DecodeReplicationPosition converts a string in the format returned by
|
||||
// EncodeReplicationPosition back into a ReplicationPosition value with the
|
||||
// correct underlying flavor.
|
||||
func DecodeReplicationPosition(s string) (rp ReplicationPosition, err error) {
|
||||
if s == "" {
|
||||
return rp, nil
|
||||
}
|
||||
|
||||
parts := strings.SplitN(s, "/", 2)
|
||||
if len(parts) != 2 {
|
||||
// There is no flavor. Try looking for a default parser.
|
||||
return ParseReplicationPosition("", s)
|
||||
}
|
||||
return ParseReplicationPosition(parts[0], parts[1])
|
||||
}
|
||||
|
||||
// ParseReplicationPosition calls the parser for the specified flavor.
|
||||
func ParseReplicationPosition(flavor, value string) (rp ReplicationPosition, err error) {
|
||||
parser := gtidSetParsers[flavor]
|
||||
if parser == nil {
|
||||
return rp, fmt.Errorf("parse error: unknown GTIDSet flavor %#v", flavor)
|
||||
}
|
||||
gtidSet, err := parser(value)
|
||||
if err != nil {
|
||||
return rp, err
|
||||
}
|
||||
rp.GTIDSet = gtidSet
|
||||
return rp, err
|
||||
}
|
||||
|
||||
// MarshalBson bson-encodes ReplicationPosition.
|
||||
func (rp ReplicationPosition) MarshalBson(buf *bytes2.ChunkedWriter, key string) {
|
||||
bson.EncodeOptionalPrefix(buf, bson.Object, key)
|
||||
|
||||
lenWriter := bson.NewLenWriter(buf)
|
||||
|
||||
if rp.GTIDSet != nil {
|
||||
// The name of the bson field is the MySQL flavor.
|
||||
bson.EncodeString(buf, rp.GTIDSet.Flavor(), rp.GTIDSet.String())
|
||||
}
|
||||
|
||||
lenWriter.Close()
|
||||
}
|
||||
|
||||
// UnmarshalBson bson-decodes into ReplicationPosition.
|
||||
func (rp *ReplicationPosition) UnmarshalBson(buf *bytes.Buffer, kind byte) {
|
||||
switch kind {
|
||||
case bson.EOO, bson.Object:
|
||||
// valid
|
||||
case bson.Null:
|
||||
return
|
||||
default:
|
||||
panic(bson.NewBsonError("unexpected kind %v for ReplicationPosition", kind))
|
||||
}
|
||||
bson.Next(buf, 4)
|
||||
|
||||
// We expect exactly zero or one fields in this bson object.
|
||||
kind = bson.NextByte(buf)
|
||||
if kind == bson.EOO {
|
||||
// The value was nil, nothing to do.
|
||||
return
|
||||
}
|
||||
|
||||
// The field name is the MySQL flavor.
|
||||
flavor := bson.ReadCString(buf)
|
||||
value := bson.DecodeString(buf, kind)
|
||||
|
||||
// Check for and consume the end byte.
|
||||
if kind = bson.NextByte(buf); kind != bson.EOO {
|
||||
panic(bson.NewBsonError("too many fields for ReplicationPosition"))
|
||||
}
|
||||
|
||||
// Parse the value.
|
||||
var err error
|
||||
*rp, err = ParseReplicationPosition(flavor, value)
|
||||
if err != nil {
|
||||
panic(bson.NewBsonError("invalid value %#v for ReplicationPosition: %v", value, err))
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalJSON implements encoding/json.Marshaler.
|
||||
func (rp ReplicationPosition) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(EncodeReplicationPosition(rp))
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements encoding/json.Unmarshaler.
|
||||
func (rp *ReplicationPosition) UnmarshalJSON(buf []byte) error {
|
||||
var s string
|
||||
err := json.Unmarshal(buf, &s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*rp, err = DecodeReplicationPosition(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReplicationStatus holds replication information from SHOW SLAVE STATUS.
|
||||
type ReplicationStatus struct {
|
||||
Position ReplicationPosition
|
||||
IOPosition ReplicationPosition
|
||||
SlaveIORunning bool
|
||||
SlaveSQLRunning bool
|
||||
SecondsBehindMaster uint
|
||||
}
|
||||
|
||||
func (rp *ReplicationPosition) MapKey() string {
|
||||
return fmt.Sprintf("%v:%d", rp.MasterLogFile, rp.MasterLogPosition)
|
||||
}
|
||||
|
||||
func (rp *ReplicationPosition) MapKeyIo() string {
|
||||
return fmt.Sprintf("%v:%d", rp.MasterLogFileIo, rp.MasterLogPositionIo)
|
||||
}
|
||||
|
||||
type ReplicationState struct {
|
||||
// ReplicationPosition is not anonymous because the default json encoder has begun to fail here.
|
||||
ReplicationPosition ReplicationPosition
|
||||
MasterHost string
|
||||
MasterPort int
|
||||
MasterConnectRetry int
|
||||
}
|
||||
|
||||
func (rs *ReplicationState) MasterAddr() string {
|
||||
func (rs *ReplicationStatus) SlaveRunning() bool {
|
||||
return rs.SlaveIORunning && rs.SlaveSQLRunning
|
||||
}
|
||||
|
||||
func (rs *ReplicationStatus) MasterAddr() string {
|
||||
return fmt.Sprintf("%v:%v", rs.MasterHost, rs.MasterPort)
|
||||
}
|
||||
|
||||
func NewReplicationState(masterAddr string) (*ReplicationState, error) {
|
||||
func NewReplicationStatus(masterAddr string) (*ReplicationStatus, error) {
|
||||
addrPieces := strings.Split(masterAddr, ":")
|
||||
port, err := strconv.Atoi(addrPieces[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ReplicationState{MasterConnectRetry: 10,
|
||||
return &ReplicationStatus{MasterConnectRetry: 10,
|
||||
MasterHost: addrPieces[0], MasterPort: port}, nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,554 @@
|
|||
// Copyright 2014, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/youtube/vitess/go/bson"
|
||||
)
|
||||
|
||||
func TestReplicationPositionEqual(t *testing.T) {
|
||||
input1 := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}}
|
||||
input2 := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}}
|
||||
want := true
|
||||
|
||||
if got := input1.Equal(input2); got != want {
|
||||
t.Errorf("%#v.Equal(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationPositionNotEqual(t *testing.T) {
|
||||
input1 := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}}
|
||||
input2 := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 12345}}
|
||||
want := false
|
||||
|
||||
if got := input1.Equal(input2); got != want {
|
||||
t.Errorf("%#v.Equal(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationPositionEqualZero(t *testing.T) {
|
||||
input1 := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}}
|
||||
input2 := ReplicationPosition{}
|
||||
want := false
|
||||
|
||||
if got := input1.Equal(input2); got != want {
|
||||
t.Errorf("%#v.Equal(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationPositionZeroEqualZero(t *testing.T) {
|
||||
input1 := ReplicationPosition{}
|
||||
input2 := ReplicationPosition{}
|
||||
want := true
|
||||
|
||||
if got := input1.Equal(input2); got != want {
|
||||
t.Errorf("%#v.Equal(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationPositionAtLeastLess(t *testing.T) {
|
||||
input1 := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1233}}
|
||||
input2 := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}}
|
||||
want := false
|
||||
|
||||
if got := input1.AtLeast(input2); got != want {
|
||||
t.Errorf("%#v.AtLeast(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationPositionAtLeastEqual(t *testing.T) {
|
||||
input1 := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}}
|
||||
input2 := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}}
|
||||
want := true
|
||||
|
||||
if got := input1.AtLeast(input2); got != want {
|
||||
t.Errorf("%#v.AtLeast(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationPositionAtLeastGreater(t *testing.T) {
|
||||
input1 := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1235}}
|
||||
input2 := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}}
|
||||
want := true
|
||||
|
||||
if got := input1.AtLeast(input2); got != want {
|
||||
t.Errorf("%#v.AtLeast(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationPositionAtLeastDifferentServer(t *testing.T) {
|
||||
input1 := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1235}}
|
||||
input2 := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 4444, Sequence: 1234}}
|
||||
want := true
|
||||
|
||||
if got := input1.AtLeast(input2); got != want {
|
||||
t.Errorf("%#v.AtLeast(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationPositionAtLeastDifferentDomain(t *testing.T) {
|
||||
input1 := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1235}}
|
||||
input2 := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 4, Server: 5555, Sequence: 1234}}
|
||||
want := false
|
||||
|
||||
if got := input1.AtLeast(input2); got != want {
|
||||
t.Errorf("%#v.AtLeast(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationPositionZeroAtLeast(t *testing.T) {
|
||||
input1 := ReplicationPosition{}
|
||||
input2 := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}}
|
||||
want := false
|
||||
|
||||
if got := input1.AtLeast(input2); got != want {
|
||||
t.Errorf("%#v.AtLeast(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationPositionAtLeastZero(t *testing.T) {
|
||||
input1 := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}}
|
||||
input2 := ReplicationPosition{}
|
||||
want := true
|
||||
|
||||
if got := input1.AtLeast(input2); got != want {
|
||||
t.Errorf("%#v.AtLeast(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationPositionZeroAtLeastZero(t *testing.T) {
|
||||
input1 := ReplicationPosition{}
|
||||
input2 := ReplicationPosition{}
|
||||
want := true
|
||||
|
||||
if got := input1.AtLeast(input2); got != want {
|
||||
t.Errorf("%#v.AtLeast(%#v) = %v, want %v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationPositionString(t *testing.T) {
|
||||
input := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}}
|
||||
want := "3-5555-1234"
|
||||
|
||||
if got := input.String(); got != want {
|
||||
t.Errorf("%#v.String() = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationPositionStringNil(t *testing.T) {
|
||||
input := ReplicationPosition{}
|
||||
want := "<nil>"
|
||||
|
||||
if got := input.String(); got != want {
|
||||
t.Errorf("%#v.String() = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationPositionIsZero(t *testing.T) {
|
||||
input := ReplicationPosition{}
|
||||
want := true
|
||||
|
||||
if got := input.IsZero(); got != want {
|
||||
t.Errorf("%#v.IsZero() = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationPositionIsNotZero(t *testing.T) {
|
||||
input := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}}
|
||||
want := false
|
||||
|
||||
if got := input.IsZero(); got != want {
|
||||
t.Errorf("%#v.IsZero() = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationPositionAppend(t *testing.T) {
|
||||
input1 := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}}
|
||||
input2 := MariadbGTID{Domain: 3, Server: 5555, Sequence: 1235}
|
||||
want := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1235}}
|
||||
|
||||
if got := AppendGTID(input1, input2); !got.Equal(want) {
|
||||
t.Errorf("AppendGTID(%#v, %#v) = %#v, want %#v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationPositionAppendNil(t *testing.T) {
|
||||
input1 := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}}
|
||||
input2 := GTID(nil)
|
||||
want := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}}
|
||||
|
||||
if got := AppendGTID(input1, input2); !got.Equal(want) {
|
||||
t.Errorf("AppendGTID(%#v, %#v) = %#v, want %#v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationPositionAppendToZero(t *testing.T) {
|
||||
input1 := ReplicationPosition{}
|
||||
input2 := MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}
|
||||
want := ReplicationPosition{GTIDSet: MariadbGTID{Domain: 3, Server: 5555, Sequence: 1234}}
|
||||
|
||||
if got := AppendGTID(input1, input2); !got.Equal(want) {
|
||||
t.Errorf("AppendGTID(%#v, %#v) = %#v, want %#v", input1, input2, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMustParseReplicationPosition(t *testing.T) {
|
||||
flavor := "fake flavor"
|
||||
gtidSetParsers[flavor] = func(s string) (GTIDSet, error) {
|
||||
return fakeGTID{value: s}, nil
|
||||
}
|
||||
input := "12345"
|
||||
want := ReplicationPosition{GTIDSet: fakeGTID{value: "12345"}}
|
||||
|
||||
if got := MustParseReplicationPosition(flavor, input); !got.Equal(want) {
|
||||
t.Errorf("MustParseReplicationPosition(%#v, %#v) = %#v, want %#v", flavor, input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMustParseReplicationPositionError(t *testing.T) {
|
||||
defer func() {
|
||||
want := `parse error: unknown GTIDSet flavor "unknown flavor !@$!@"`
|
||||
err := recover()
|
||||
if err == nil {
|
||||
t.Errorf("wrong error, got %#v, want %#v", err, want)
|
||||
}
|
||||
got, ok := err.(error)
|
||||
if !ok || !strings.HasPrefix(got.Error(), want) {
|
||||
t.Errorf("wrong error, got %#v, want %#v", got, want)
|
||||
}
|
||||
}()
|
||||
|
||||
MustParseReplicationPosition("unknown flavor !@$!@", "yowzah")
|
||||
}
|
||||
|
||||
func TestEncodeReplicationPosition(t *testing.T) {
|
||||
input := ReplicationPosition{GTIDSet: fakeGTID{
|
||||
flavor: "myflav",
|
||||
value: "1:2:3-4-5-6",
|
||||
}}
|
||||
want := "myflav/1:2:3-4-5-6"
|
||||
|
||||
if got := EncodeReplicationPosition(input); got != want {
|
||||
t.Errorf("EncodeReplicationPosition(%#v) = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeReplicationPositionZero(t *testing.T) {
|
||||
input := ReplicationPosition{}
|
||||
want := ""
|
||||
|
||||
if got := EncodeReplicationPosition(input); got != want {
|
||||
t.Errorf("EncodeReplicationPosition(%#v) = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeReplicationPosition(t *testing.T) {
|
||||
gtidSetParsers["flavorflav"] = func(s string) (GTIDSet, error) {
|
||||
return fakeGTID{value: s}, nil
|
||||
}
|
||||
input := "flavorflav/123-456:789"
|
||||
want := ReplicationPosition{GTIDSet: fakeGTID{value: "123-456:789"}}
|
||||
|
||||
got, err := DecodeReplicationPosition(input)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !got.Equal(want) {
|
||||
t.Errorf("DecodeReplicationPosition(%#v) = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeReplicationPositionZero(t *testing.T) {
|
||||
input := ""
|
||||
want := ReplicationPosition{}
|
||||
|
||||
got, err := DecodeReplicationPosition(input)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !got.Equal(want) {
|
||||
t.Errorf("DecodeReplicationPosition(%#v) = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeReplicationPositionNoFlavor(t *testing.T) {
|
||||
gtidSetParsers[""] = func(s string) (GTIDSet, error) {
|
||||
return fakeGTID{value: s}, nil
|
||||
}
|
||||
input := "12345"
|
||||
want := ReplicationPosition{GTIDSet: fakeGTID{value: "12345"}}
|
||||
|
||||
got, err := DecodeReplicationPosition(input)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !got.Equal(want) {
|
||||
t.Errorf("DecodeReplicationPosition(%#v) = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBsonMarshalUnmarshalReplicationPosition(t *testing.T) {
|
||||
gtidSetParsers["golf"] = func(s string) (GTIDSet, error) {
|
||||
return fakeGTID{flavor: "golf", value: s}, nil
|
||||
}
|
||||
input := ReplicationPosition{GTIDSet: fakeGTID{flavor: "golf", value: "par"}}
|
||||
want := ReplicationPosition{GTIDSet: fakeGTID{flavor: "golf", value: "par"}}
|
||||
|
||||
buf, err := bson.Marshal(input)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
var got ReplicationPosition
|
||||
if err = bson.Unmarshal(buf, &got); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !got.Equal(want) {
|
||||
t.Errorf("marshal->unmarshal mismatch, got %#v, want %#v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBsonMarshalUnmarshalReplicationPositionPointer(t *testing.T) {
|
||||
gtidSetParsers["golf"] = func(s string) (GTIDSet, error) {
|
||||
return fakeGTID{flavor: "golf", value: s}, nil
|
||||
}
|
||||
input := ReplicationPosition{GTIDSet: fakeGTID{flavor: "golf", value: "par"}}
|
||||
want := ReplicationPosition{GTIDSet: fakeGTID{flavor: "golf", value: "par"}}
|
||||
|
||||
buf, err := bson.Marshal(&input)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
var got ReplicationPosition
|
||||
if err = bson.Unmarshal(buf, &got); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if !got.Equal(want) {
|
||||
t.Errorf("marshal->unmarshal mismatch, got %#v, want %#v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBsonMarshalUnmarshalReplicationPositionInStruct(t *testing.T) {
|
||||
gtidSetParsers["golf"] = func(s string) (GTIDSet, error) {
|
||||
return fakeGTID{flavor: "golf", value: s}, nil
|
||||
}
|
||||
input := ReplicationPosition{GTIDSet: fakeGTID{flavor: "golf", value: "par"}}
|
||||
want := ReplicationPosition{GTIDSet: fakeGTID{flavor: "golf", value: "par"}}
|
||||
|
||||
type mystruct struct {
|
||||
ReplicationPosition
|
||||
}
|
||||
|
||||
buf, err := bson.Marshal(&mystruct{ReplicationPosition: input})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
var gotStruct mystruct
|
||||
if err = bson.Unmarshal(buf, &gotStruct); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if got := gotStruct.ReplicationPosition; !got.Equal(want) {
|
||||
t.Errorf("marshal->unmarshal mismatch, got %#v, want %#v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBsonMarshalUnmarshalReplicationPositionZero(t *testing.T) {
|
||||
gtidSetParsers["golf"] = func(s string) (GTIDSet, error) {
|
||||
return fakeGTID{flavor: "golf", value: s}, nil
|
||||
}
|
||||
input := ReplicationPosition{}
|
||||
want := ReplicationPosition{}
|
||||
|
||||
buf, err := bson.Marshal(input)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
var got ReplicationPosition
|
||||
if err = bson.Unmarshal(buf, &got); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if !got.Equal(want) {
|
||||
t.Errorf("marshal->unmarshal mismatch, got %#v, want %#v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJsonMarshalReplicationPosition(t *testing.T) {
|
||||
input := ReplicationPosition{GTIDSet: fakeGTID{flavor: "golf", value: "par"}}
|
||||
want := `"golf/par"`
|
||||
|
||||
buf, err := json.Marshal(input)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if got := string(buf); got != want {
|
||||
t.Errorf("json.Marshal(%#v) = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJsonMarshalReplicationPositionPointer(t *testing.T) {
|
||||
input := ReplicationPosition{GTIDSet: fakeGTID{flavor: "golf", value: "par"}}
|
||||
want := `"golf/par"`
|
||||
|
||||
buf, err := json.Marshal(&input)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if got := string(buf); got != want {
|
||||
t.Errorf("json.Marshal(%#v) = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJsonUnmarshalReplicationPosition(t *testing.T) {
|
||||
gtidSetParsers["golf"] = func(s string) (GTIDSet, error) {
|
||||
return fakeGTID{flavor: "golf", value: s}, nil
|
||||
}
|
||||
input := `"golf/par"`
|
||||
want := ReplicationPosition{GTIDSet: fakeGTID{flavor: "golf", value: "par"}}
|
||||
|
||||
var got ReplicationPosition
|
||||
err := json.Unmarshal([]byte(input), &got)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !got.Equal(want) {
|
||||
t.Errorf("json.Unmarshal(%#v) = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJsonMarshalReplicationPositionInStruct(t *testing.T) {
|
||||
input := ReplicationPosition{GTIDSet: fakeGTID{flavor: "golf", value: "par"}}
|
||||
want := `{"ReplicationPosition":"golf/par"}`
|
||||
|
||||
type mystruct struct {
|
||||
ReplicationPosition ReplicationPosition
|
||||
}
|
||||
|
||||
buf, err := json.Marshal(&mystruct{input})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if got := string(buf); got != want {
|
||||
t.Errorf("json.Marshal(%#v) = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJsonUnmarshalReplicationPositionInStruct(t *testing.T) {
|
||||
gtidSetParsers["golf"] = func(s string) (GTIDSet, error) {
|
||||
return fakeGTID{flavor: "golf", value: s}, nil
|
||||
}
|
||||
input := `{"ReplicationPosition":"golf/par"}`
|
||||
want := ReplicationPosition{GTIDSet: fakeGTID{flavor: "golf", value: "par"}}
|
||||
|
||||
var gotStruct struct {
|
||||
ReplicationPosition ReplicationPosition
|
||||
}
|
||||
err := json.Unmarshal([]byte(input), &gotStruct)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if got := gotStruct.ReplicationPosition; !got.Equal(want) {
|
||||
t.Errorf("json.Unmarshal(%#v) = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJsonMarshalReplicationPositionZero(t *testing.T) {
|
||||
input := ReplicationPosition{}
|
||||
want := `""`
|
||||
|
||||
buf, err := json.Marshal(input)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if got := string(buf); got != want {
|
||||
t.Errorf("json.Marshal(%#v) = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJsonUnmarshalReplicationPositionZero(t *testing.T) {
|
||||
input := `""`
|
||||
want := ReplicationPosition{}
|
||||
|
||||
var got ReplicationPosition
|
||||
err := json.Unmarshal([]byte(input), &got)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !got.Equal(want) {
|
||||
t.Errorf("json.Unmarshal(%#v) = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationStatusSlaveRunning(t *testing.T) {
|
||||
input := &ReplicationStatus{
|
||||
SlaveIORunning: true,
|
||||
SlaveSQLRunning: true,
|
||||
}
|
||||
want := true
|
||||
if got := input.SlaveRunning(); got != want {
|
||||
t.Errorf("%#v.SlaveRunning() = %v, want %v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationStatusSlaveIONotRunning(t *testing.T) {
|
||||
input := &ReplicationStatus{
|
||||
SlaveIORunning: false,
|
||||
SlaveSQLRunning: true,
|
||||
}
|
||||
want := false
|
||||
if got := input.SlaveRunning(); got != want {
|
||||
t.Errorf("%#v.SlaveRunning() = %v, want %v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationStatusSlaveSQLNotRunning(t *testing.T) {
|
||||
input := &ReplicationStatus{
|
||||
SlaveIORunning: true,
|
||||
SlaveSQLRunning: false,
|
||||
}
|
||||
want := false
|
||||
if got := input.SlaveRunning(); got != want {
|
||||
t.Errorf("%#v.SlaveRunning() = %v, want %v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationStatusMasterAddr(t *testing.T) {
|
||||
input := &ReplicationStatus{
|
||||
MasterHost: "master-host",
|
||||
MasterPort: 1234,
|
||||
}
|
||||
want := "master-host:1234"
|
||||
if got := input.MasterAddr(); got != want {
|
||||
t.Errorf("%#v.MasterAddr() = %v, want %v", input, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewReplicationStatus(t *testing.T) {
|
||||
input := "master-host:1234"
|
||||
want := &ReplicationStatus{
|
||||
MasterHost: "master-host",
|
||||
MasterPort: 1234,
|
||||
}
|
||||
got, err := NewReplicationStatus(input)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if got.MasterHost != want.MasterHost || got.MasterPort != want.MasterPort {
|
||||
t.Errorf("NewReplicationStatus(%#v) = %#v, want %#v", input, got, want)
|
||||
}
|
||||
}
|
|
@ -14,17 +14,17 @@ import (
|
|||
|
||||
// if the master is still alive, then we need to demote it gracefully
|
||||
// make it read-only, flush the writes and get the position
|
||||
func (mysqld *Mysqld) DemoteMaster() (*proto.ReplicationPosition, error) {
|
||||
func (mysqld *Mysqld) DemoteMaster() (rp proto.ReplicationPosition, err error) {
|
||||
// label as TYPE_REPLICA
|
||||
mysqld.SetReadOnly(true)
|
||||
cmds := []string{
|
||||
"FLUSH TABLES WITH READ LOCK",
|
||||
"UNLOCK TABLES",
|
||||
}
|
||||
if err := mysqld.ExecuteSuperQueryList(cmds); err != nil {
|
||||
return nil, err
|
||||
if err = mysqld.ExecuteSuperQueryList(cmds); err != nil {
|
||||
return rp, err
|
||||
}
|
||||
return mysqld.MasterStatus()
|
||||
return mysqld.MasterPosition()
|
||||
}
|
||||
|
||||
// setReadWrite: set the new master in read-write mode.
|
||||
|
@ -32,37 +32,50 @@ func (mysqld *Mysqld) DemoteMaster() (*proto.ReplicationPosition, error) {
|
|||
// replicationState: info slaves need to reparent themselves
|
||||
// waitPosition: slaves can wait for this position when restarting replication
|
||||
// timePromoted: this timestamp (unix nanoseconds) is inserted into _vt.replication_log to verify the replication config
|
||||
func (mysqld *Mysqld) PromoteSlave(setReadWrite bool, hookExtraEnv map[string]string) (replicationState *proto.ReplicationState, waitPosition *proto.ReplicationPosition, timePromoted int64, err error) {
|
||||
func (mysqld *Mysqld) PromoteSlave(setReadWrite bool, hookExtraEnv map[string]string) (replicationStatus *proto.ReplicationStatus, waitPosition proto.ReplicationPosition, timePromoted int64, err error) {
|
||||
if err = mysqld.StopSlave(hookExtraEnv); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// If we are forced, we have to get our status as a master, not a slave.
|
||||
lastRepPos, err := mysqld.SlaveStatus()
|
||||
var lastRepPos proto.ReplicationPosition
|
||||
slaveStatus, err := mysqld.SlaveStatus()
|
||||
if err == ErrNotSlave {
|
||||
lastRepPos, err = mysqld.MasterStatus()
|
||||
}
|
||||
lastRepPos, err = mysqld.MasterPosition()
|
||||
} else {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
lastRepPos = slaveStatus.Position
|
||||
}
|
||||
|
||||
// Promote to master.
|
||||
cmds := mysqld.flavor.PromoteSlaveCommands()
|
||||
if err = mysqld.ExecuteSuperQueryList(cmds); err != nil {
|
||||
return
|
||||
}
|
||||
replicationPosition, err := mysqld.MasterStatus()
|
||||
|
||||
// Write a row so there's something in the binlog before we fetch the
|
||||
// master position. Otherwise, the slave may request a GTID that has
|
||||
// already been purged from the binlog.
|
||||
cmds = []string{
|
||||
fmt.Sprintf("INSERT INTO _vt.replication_log (time_created_ns, note) VALUES (%v, 'first binlog event')", time.Now().UnixNano()),
|
||||
}
|
||||
if err = mysqld.ExecuteSuperQueryList(cmds); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
replicationPosition, err := mysqld.MasterPosition()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
mysqldAddr := mysqld.IpAddr()
|
||||
replicationState, err = proto.NewReplicationState(mysqldAddr)
|
||||
replicationStatus, err = proto.NewReplicationStatus(mysqldAddr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
replicationState.ReplicationPosition = *replicationPosition
|
||||
lastPos := lastRepPos.MapKey()
|
||||
newAddr := replicationState.MasterAddr()
|
||||
newPos := replicationState.ReplicationPosition.MapKey()
|
||||
replicationStatus.Position = replicationPosition
|
||||
replicationStatus.IOPosition = replicationPosition
|
||||
timePromoted = time.Now().UnixNano()
|
||||
// write a row to verify that replication is functioning
|
||||
cmds = []string{
|
||||
|
@ -72,20 +85,20 @@ func (mysqld *Mysqld) PromoteSlave(setReadWrite bool, hookExtraEnv map[string]st
|
|||
return
|
||||
}
|
||||
// this is the wait-point for checking replication
|
||||
waitPosition, err = mysqld.MasterStatus()
|
||||
waitPosition, err = mysqld.MasterPosition()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if waitPosition.MasterLogFile == replicationPosition.MasterLogFile && waitPosition.MasterLogPosition == replicationPosition.MasterLogPosition {
|
||||
// we inserted a row, but our binlog position didn't
|
||||
// change. This is a serious problem. we don't want to
|
||||
// ever promote a master like that.
|
||||
if waitPosition.Equal(replicationPosition) {
|
||||
// We inserted a row, but our binlog position didn't change. This is a
|
||||
// serious problem. We don't want to ever promote a master like that.
|
||||
err = fmt.Errorf("cannot promote slave to master, non-functional binlogs")
|
||||
return
|
||||
}
|
||||
|
||||
cmds = []string{
|
||||
fmt.Sprintf("INSERT INTO _vt.reparent_log (time_created_ns, last_position, new_addr, new_position, wait_position) VALUES (%v, '%v', '%v', '%v', '%v')", timePromoted, lastPos, newAddr, newPos, waitPosition.MapKey()),
|
||||
fmt.Sprintf("INSERT INTO _vt.reparent_log (time_created_ns, last_position, new_addr, new_position, wait_position) VALUES (%v, '%v', '%v', '%v', '%v')",
|
||||
timePromoted, lastRepPos, replicationStatus.MasterAddr(), replicationPosition, waitPosition),
|
||||
}
|
||||
if err = mysqld.ExecuteSuperQueryList(cmds); err != nil {
|
||||
return
|
||||
|
@ -97,9 +110,9 @@ func (mysqld *Mysqld) PromoteSlave(setReadWrite bool, hookExtraEnv map[string]st
|
|||
return
|
||||
}
|
||||
|
||||
func (mysqld *Mysqld) RestartSlave(replicationState *proto.ReplicationState, waitPosition *proto.ReplicationPosition, timeCheck int64) error {
|
||||
func (mysqld *Mysqld) RestartSlave(replicationStatus *proto.ReplicationStatus, waitPosition proto.ReplicationPosition, timeCheck int64) error {
|
||||
log.Infof("Restart Slave")
|
||||
cmds, err := StartReplicationCommands(mysqld, replicationState)
|
||||
cmds, err := mysqld.StartReplicationCommands(replicationStatus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"time"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
"github.com/youtube/vitess/go/mysql"
|
||||
mproto "github.com/youtube/vitess/go/mysql/proto"
|
||||
"github.com/youtube/vitess/go/vt/binlog/binlogplayer"
|
||||
blproto "github.com/youtube/vitess/go/vt/binlog/proto"
|
||||
|
@ -33,62 +34,9 @@ const (
|
|||
SlaveStartDeadline = 30
|
||||
)
|
||||
|
||||
var changeMasterCmd = `CHANGE MASTER TO
|
||||
MASTER_HOST = '{{.ReplicationState.MasterHost}}',
|
||||
MASTER_PORT = {{.ReplicationState.MasterPort}},
|
||||
MASTER_USER = '{{.MasterUser}}',
|
||||
MASTER_PASSWORD = '{{.MasterPassword}}',
|
||||
MASTER_LOG_FILE = '{{.ReplicationState.ReplicationPosition.MasterLogFile}}',
|
||||
MASTER_LOG_POS = {{.ReplicationState.ReplicationPosition.MasterLogPosition}},
|
||||
MASTER_CONNECT_RETRY = {{.ReplicationState.MasterConnectRetry}}
|
||||
`
|
||||
|
||||
var masterPasswordStart = " MASTER_PASSWORD = '"
|
||||
var masterPasswordEnd = "',\n"
|
||||
|
||||
type newMasterData struct {
|
||||
ReplicationState *proto.ReplicationState
|
||||
MasterUser string
|
||||
MasterPassword string
|
||||
}
|
||||
|
||||
func StartReplicationCommands(mysqld *Mysqld, replState *proto.ReplicationState) ([]string, error) {
|
||||
params, err := dbconfigs.MysqlParams(mysqld.replParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nmd := &newMasterData{
|
||||
ReplicationState: replState,
|
||||
MasterUser: params.Uname,
|
||||
MasterPassword: params.Pass,
|
||||
}
|
||||
cmc, err := fillStringTemplate(changeMasterCmd, nmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if params.SslEnabled() {
|
||||
cmc += ",\n MASTER_SSL = 1"
|
||||
}
|
||||
if params.SslCa != "" {
|
||||
cmc += ",\n MASTER_SSL_CA = '" + params.SslCa + "'"
|
||||
}
|
||||
if params.SslCaPath != "" {
|
||||
cmc += ",\n MASTER_SSL_CAPATH = '" + params.SslCaPath + "'"
|
||||
}
|
||||
if params.SslCert != "" {
|
||||
cmc += ",\n MASTER_SSL_CERT = '" + params.SslCert + "'"
|
||||
}
|
||||
if params.SslKey != "" {
|
||||
cmc += ",\n MASTER_SSL_KEY = '" + params.SslKey + "'"
|
||||
}
|
||||
|
||||
return []string{
|
||||
"STOP SLAVE",
|
||||
"RESET SLAVE",
|
||||
cmc,
|
||||
"START SLAVE"}, nil
|
||||
}
|
||||
|
||||
func fillStringTemplate(tmpl string, vars interface{}) (string, error) {
|
||||
myTemplate := template.Must(template.New("").Parse(tmpl))
|
||||
data := new(bytes.Buffer)
|
||||
|
@ -98,15 +46,41 @@ func fillStringTemplate(tmpl string, vars interface{}) (string, error) {
|
|||
return data.String(), nil
|
||||
}
|
||||
|
||||
func (mysqld *Mysqld) WaitForSlaveStart(slaveStartDeadline int) (err error) {
|
||||
var rowMap map[string]string
|
||||
for slaveWait := 0; slaveWait < slaveStartDeadline; slaveWait++ {
|
||||
rowMap, err = mysqld.slaveStatus()
|
||||
if err != nil {
|
||||
return
|
||||
func changeMasterArgs(params *mysql.ConnectionParams, status *proto.ReplicationStatus) []string {
|
||||
var args []string
|
||||
args = append(args, fmt.Sprintf("MASTER_HOST = '%s'", status.MasterHost))
|
||||
args = append(args, fmt.Sprintf("MASTER_PORT = %d", status.MasterPort))
|
||||
args = append(args, fmt.Sprintf("MASTER_USER = '%s'", params.Uname))
|
||||
args = append(args, fmt.Sprintf("MASTER_PASSWORD = '%s'", params.Pass))
|
||||
args = append(args, fmt.Sprintf("MASTER_CONNECT_RETRY = %d", status.MasterConnectRetry))
|
||||
|
||||
if params.SslEnabled() {
|
||||
args = append(args, "MASTER_SSL = 1")
|
||||
}
|
||||
if params.SslCa != "" {
|
||||
args = append(args, fmt.Sprintf("MASTER_SSL_CA = '%s'", params.SslCa))
|
||||
}
|
||||
if params.SslCaPath != "" {
|
||||
args = append(args, fmt.Sprintf("MASTER_SSL_CAPATH = '%s'", params.SslCaPath))
|
||||
}
|
||||
if params.SslCert != "" {
|
||||
args = append(args, fmt.Sprintf("MASTER_SSL_CERT = '%s'", params.SslCert))
|
||||
}
|
||||
if params.SslKey != "" {
|
||||
args = append(args, fmt.Sprintf("MASTER_SSL_KEY = '%s'", params.SslKey))
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
if rowMap["Slave_IO_Running"] == "Yes" && rowMap["Slave_SQL_Running"] == "Yes" {
|
||||
func (mysqld *Mysqld) WaitForSlaveStart(slaveStartDeadline int) error {
|
||||
var rowMap map[string]string
|
||||
for slaveWait := 0; slaveWait < slaveStartDeadline; slaveWait++ {
|
||||
status, err := mysqld.SlaveStatus()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if status.SlaveRunning() {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
@ -146,12 +120,11 @@ func (mysqld *Mysqld) StopSlave(hookExtraEnv map[string]string) error {
|
|||
}
|
||||
|
||||
func (mysqld *Mysqld) GetMasterAddr() (string, error) {
|
||||
slaveStatus, err := mysqld.slaveStatus()
|
||||
slaveStatus, err := mysqld.SlaveStatus()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
masterAddr := slaveStatus["Master_Host"] + ":" + slaveStatus["Master_Port"]
|
||||
return masterAddr, nil
|
||||
return slaveStatus.MasterAddr(), nil
|
||||
}
|
||||
|
||||
func (mysqld *Mysqld) GetMysqlPort() (int, error) {
|
||||
|
@ -198,193 +171,118 @@ var (
|
|||
ErrNotMaster = errors.New("no master status")
|
||||
)
|
||||
|
||||
func (mysqld *Mysqld) slaveStatus() (map[string]string, error) {
|
||||
qr, err := mysqld.fetchSuperQuery("SHOW SLAVE STATUS")
|
||||
// fetchSuperQueryMap returns a map from column names to cell data for a query
|
||||
// that should return exactly 1 row.
|
||||
func (mysqld *Mysqld) fetchSuperQueryMap(query string) (map[string]string, error) {
|
||||
qr, err := mysqld.fetchSuperQuery(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(qr.Rows) != 1 {
|
||||
return nil, ErrNotSlave
|
||||
return nil, fmt.Errorf("query %#v returned %d rows, expected 1", query, len(qr.Rows))
|
||||
}
|
||||
if len(qr.Fields) != len(qr.Rows[0]) {
|
||||
return nil, fmt.Errorf("query %#v returned %d column names, expected %d", query, len(qr.Fields), len(qr.Rows[0]))
|
||||
}
|
||||
|
||||
rowMap := make(map[string]string)
|
||||
for i, column := range qr.Rows[0] {
|
||||
if i >= len(showSlaveStatusColumnNames) {
|
||||
break
|
||||
}
|
||||
rowMap[showSlaveStatusColumnNames[i]] = column.String()
|
||||
for i, value := range qr.Rows[0] {
|
||||
rowMap[qr.Fields[i].Name] = value.String()
|
||||
}
|
||||
return rowMap, nil
|
||||
}
|
||||
|
||||
// Return a replication state that will reparent a slave to the
|
||||
// correct master for a specified position.
|
||||
func (mysqld *Mysqld) ReparentPosition(slavePosition *proto.ReplicationPosition) (rs *proto.ReplicationState, waitPosition *proto.ReplicationPosition, reparentTime int64, err error) {
|
||||
qr, err := mysqld.fetchSuperQuery(fmt.Sprintf("SELECT time_created_ns, new_addr, new_position, wait_position FROM _vt.reparent_log WHERE last_position = '%v'", slavePosition.MapKey()))
|
||||
func (mysqld *Mysqld) ReparentPosition(slavePosition proto.ReplicationPosition) (rs *proto.ReplicationStatus, waitPosition proto.ReplicationPosition, reparentTime int64, err error) {
|
||||
qr, err := mysqld.fetchSuperQuery(fmt.Sprintf("SELECT time_created_ns, new_addr, new_position, wait_position FROM _vt.reparent_log WHERE last_position = '%v'", slavePosition))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(qr.Rows) != 1 {
|
||||
err = fmt.Errorf("no reparent for position: %v", slavePosition.MapKey())
|
||||
err = fmt.Errorf("no reparent for position: %v", slavePosition)
|
||||
return
|
||||
}
|
||||
|
||||
reparentTime, err = qr.Rows[0][0].ParseInt64()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("bad reparent time: %v %v %v", slavePosition.MapKey(), qr.Rows[0][0], err)
|
||||
err = fmt.Errorf("bad reparent time: %v %v %v", slavePosition, qr.Rows[0][0], err)
|
||||
return
|
||||
}
|
||||
|
||||
file, pos, err := parseReplicationPosition(qr.Rows[0][2].String())
|
||||
rs, err = proto.NewReplicationStatus(qr.Rows[0][1].String())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
rs, err = proto.NewReplicationState(qr.Rows[0][1].String())
|
||||
rs.Position, err = mysqld.flavor.ParseReplicationPosition(qr.Rows[0][2].String())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
rs.ReplicationPosition.MasterLogFile = file
|
||||
rs.ReplicationPosition.MasterLogPosition = uint(pos)
|
||||
|
||||
file, pos, err = parseReplicationPosition(qr.Rows[0][3].String())
|
||||
waitPosition, err = mysqld.flavor.ParseReplicationPosition(qr.Rows[0][3].String())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
waitPosition = new(proto.ReplicationPosition)
|
||||
waitPosition.MasterLogFile = file
|
||||
waitPosition.MasterLogPosition = pos
|
||||
return
|
||||
}
|
||||
|
||||
func parseReplicationPosition(rpos string) (filename string, pos uint, err error) {
|
||||
parts := strings.Split(rpos, ":")
|
||||
if len(parts) != 2 {
|
||||
return "", 0, fmt.Errorf("bad replication file position: %v", rpos)
|
||||
}
|
||||
_pos, err := strconv.ParseUint(parts[1], 10, 32)
|
||||
if err != nil {
|
||||
return "", 0, fmt.Errorf("bad replication file position: %v %v", rpos, err)
|
||||
}
|
||||
filename = parts[0]
|
||||
pos = uint(_pos)
|
||||
return
|
||||
func (mysqld *Mysqld) WaitMasterPos(targetPos proto.ReplicationPosition, waitTimeout time.Duration) error {
|
||||
return mysqld.flavor.WaitMasterPos(mysqld, targetPos, waitTimeout)
|
||||
}
|
||||
|
||||
func (mysqld *Mysqld) WaitMasterPos(rp *proto.ReplicationPosition, waitTimeout time.Duration) error {
|
||||
var timeToWait int
|
||||
if waitTimeout > 0 {
|
||||
timeToWait = int(waitTimeout / time.Second)
|
||||
if timeToWait == 0 {
|
||||
timeToWait = 1
|
||||
}
|
||||
}
|
||||
cmd := fmt.Sprintf("SELECT MASTER_POS_WAIT('%v', %v, %v)",
|
||||
rp.MasterLogFile, rp.MasterLogPosition, timeToWait)
|
||||
qr, err := mysqld.fetchSuperQuery(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(qr.Rows) != 1 {
|
||||
return fmt.Errorf("WaitMasterPos returned unexpected row count: %v", len(qr.Rows))
|
||||
}
|
||||
if qr.Rows[0][0].IsNull() {
|
||||
return fmt.Errorf("WaitMasterPos failed: replication stopped")
|
||||
} else if qr.Rows[0][0].String() == "-1" {
|
||||
return fmt.Errorf("WaitMasterPos failed: timed out")
|
||||
}
|
||||
return nil
|
||||
func (mysqld *Mysqld) SlaveStatus() (*proto.ReplicationStatus, error) {
|
||||
return mysqld.flavor.SlaveStatus(mysqld)
|
||||
}
|
||||
|
||||
func (mysqld *Mysqld) WaitForMinimumReplicationPosition(targetGTID proto.GTID, waitTimeout time.Duration) (err error) {
|
||||
// TODO(enisoc): Use MySQL "wait for gtid" commands instead of comparing GTIDs.
|
||||
for remaining := waitTimeout; remaining > 0; remaining -= time.Second {
|
||||
pos, err := mysqld.SlaveStatus()
|
||||
if err != nil {
|
||||
return err
|
||||
func (mysqld *Mysqld) MasterPosition() (rp proto.ReplicationPosition, err error) {
|
||||
return mysqld.flavor.MasterPosition(mysqld)
|
||||
}
|
||||
|
||||
cmp, err := pos.MasterLogGTIDField.Value.TryCompare(targetGTID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cmp >= 0 { // pos.MasterLogGTIDField.Value >= targetGTID
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Infof("WaitForMinimumReplicationPosition got GTID %v, sleeping for 1s waiting for GTID %v", pos.MasterLogGTIDField, targetGTID)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
return fmt.Errorf("timed out waiting for GTID %v", targetGTID)
|
||||
}
|
||||
|
||||
func (mysqld *Mysqld) SlaveStatus() (*proto.ReplicationPosition, error) {
|
||||
fields, err := mysqld.slaveStatus()
|
||||
func (mysqld *Mysqld) StartReplicationCommands(status *proto.ReplicationStatus) ([]string, error) {
|
||||
params, err := dbconfigs.MysqlParams(mysqld.replParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pos := new(proto.ReplicationPosition)
|
||||
// Use Relay_Master_Log_File for the SQL thread postion.
|
||||
pos.MasterLogFile = fields["Relay_Master_Log_File"]
|
||||
pos.MasterLogFileIo = fields["Master_Log_File"]
|
||||
temp, _ := strconv.ParseUint(fields["Exec_Master_Log_Pos"], 10, 0)
|
||||
pos.MasterLogPosition = uint(temp)
|
||||
temp, _ = strconv.ParseUint(fields["Read_Master_Log_Pos"], 10, 0)
|
||||
pos.MasterLogPositionIo = uint(temp)
|
||||
pos.MasterLogGTIDField.Value, _ = mysqld.flavor.ParseGTID(fields["Exec_Master_Group_ID"])
|
||||
|
||||
if fields["Slave_IO_Running"] == "Yes" && fields["Slave_SQL_Running"] == "Yes" {
|
||||
temp, _ = strconv.ParseUint(fields["Seconds_Behind_Master"], 10, 0)
|
||||
pos.SecondsBehindMaster = uint(temp)
|
||||
} else {
|
||||
// replications isn't running - report it as invalid since it won't resolve itself.
|
||||
pos.SecondsBehindMaster = proto.InvalidLagSeconds
|
||||
}
|
||||
return pos, nil
|
||||
}
|
||||
|
||||
func (mysqld *Mysqld) MasterStatus() (rp *proto.ReplicationPosition, err error) {
|
||||
return mysqld.flavor.MasterStatus(mysqld)
|
||||
return mysqld.flavor.StartReplicationCommands(¶ms, status)
|
||||
}
|
||||
|
||||
/*
|
||||
mysql> show binlog info for 5\G
|
||||
mysql> SHOW BINLOG INFO FOR 5\G
|
||||
*************************** 1. row ***************************
|
||||
Log_name: vt-0000041983-bin.000001
|
||||
Pos: 1194
|
||||
Server_ID: 41983
|
||||
*/
|
||||
func (mysqld *Mysqld) BinlogInfo(gtid proto.GTID) (rp *proto.ReplicationPosition, err error) {
|
||||
qr, err := mysqld.fetchSuperQuery(fmt.Sprintf("SHOW BINLOG INFO FOR %v", gtid))
|
||||
// BinlogInfo returns the filename and position for a Google MySQL group_id.
|
||||
// This command only exists in Google MySQL.
|
||||
func (mysqld *Mysqld) BinlogInfo(pos proto.ReplicationPosition) (fileName string, filePos uint, err error) {
|
||||
if pos.IsZero() {
|
||||
return fileName, filePos, fmt.Errorf("input position for BinlogInfo is uninitialized")
|
||||
}
|
||||
// Extract the group_id from the GoogleGTID. We can't just use String() on the
|
||||
// ReplicationPosition, because that includes the server_id.
|
||||
gtid, ok := pos.GTIDSet.(proto.GoogleGTID)
|
||||
if !ok {
|
||||
return "", 0, fmt.Errorf("Non-Google GTID in BinlogInfo(%#v), which is only supported on Google MySQL", pos)
|
||||
}
|
||||
info, err := mysqld.fetchSuperQueryMap(fmt.Sprintf("SHOW BINLOG INFO FOR %v", gtid.GroupID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", 0, err
|
||||
}
|
||||
if len(qr.Rows) != 1 {
|
||||
return nil, fmt.Errorf("no binlogs")
|
||||
}
|
||||
rp = &proto.ReplicationPosition{}
|
||||
rp.MasterLogFile = qr.Rows[0][0].String()
|
||||
temp, err := qr.Rows[0][1].ParseUint64()
|
||||
fileName = info["Log_name"]
|
||||
temp, err := strconv.ParseUint(info["Pos"], 10, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return fileName, filePos, err
|
||||
}
|
||||
rp.MasterLogPosition = uint(temp)
|
||||
rp.MasterLogGTIDField.Value, err = mysqld.flavor.ParseGTID(qr.Rows[0][1].String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// On the master, the SQL position and IO position are at
|
||||
// necessarily the same point.
|
||||
rp.MasterLogFileIo = rp.MasterLogFile
|
||||
rp.MasterLogPositionIo = rp.MasterLogPosition
|
||||
return rp, nil
|
||||
filePos = uint(temp)
|
||||
return fileName, filePos, err
|
||||
}
|
||||
|
||||
func (mysqld *Mysqld) WaitForSlave(maxLag int) (err error) {
|
||||
// FIXME(msolomon) verify that slave started based on show slave status;
|
||||
var rowMap map[string]string
|
||||
for {
|
||||
rowMap, err = mysqld.slaveStatus()
|
||||
rowMap, err = mysqld.fetchSuperQueryMap("SHOW SLAVE STATUS")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -416,91 +314,6 @@ func (mysqld *Mysqld) WaitForSlave(maxLag int) (err error) {
|
|||
return errors.New("replication stopped, it will never catch up")
|
||||
}
|
||||
|
||||
/*
|
||||
Slave_IO_State: Waiting for master to send event
|
||||
Master_Host: voltron
|
||||
Master_User: vt_repl
|
||||
Master_Port: 6600
|
||||
Connect_Retry: 10
|
||||
Master_Log_File: vt-00000001-bin.000002
|
||||
Read_Master_Log_Pos: 106
|
||||
Relay_Log_File: vt-00000002-relay-bin.000003
|
||||
Relay_Log_Pos: 257
|
||||
Relay_Master_Log_File: vt-00000001-bin.000002
|
||||
Slave_IO_Running: Yes
|
||||
Slave_SQL_Running: Yes
|
||||
Replicate_Do_DB:
|
||||
Replicate_Ignore_DB:
|
||||
Replicate_Do_Table:
|
||||
Replicate_Ignore_Table:
|
||||
Replicate_Wild_Do_Table:
|
||||
Replicate_Wild_Ignore_Table:
|
||||
Last_Errno: 0
|
||||
Last_Error:
|
||||
Skip_Counter: 0
|
||||
Exec_Master_Log_Pos: 106
|
||||
Relay_Log_Space: 569
|
||||
Until_Condition: None
|
||||
Until_Log_File:
|
||||
Until_Log_Pos: 0
|
||||
Master_SSL_Allowed: No
|
||||
Master_SSL_CA_File:
|
||||
Master_SSL_CA_Path:
|
||||
Master_SSL_Cert:
|
||||
Master_SSL_Cipher:
|
||||
Master_SSL_Key:
|
||||
Seconds_Behind_Master: 0
|
||||
Master_SSL_Verify_Server_Cert: No
|
||||
Last_IO_Errno: 0
|
||||
Last_IO_Error:
|
||||
Last_SQL_Errno: 0
|
||||
Last_SQL_Error:
|
||||
Exec_Master_Group_ID: 14
|
||||
Connect_Using_Group_ID: No
|
||||
*/
|
||||
var showSlaveStatusColumnNames = []string{
|
||||
"Slave_IO_State",
|
||||
"Master_Host",
|
||||
"Master_User",
|
||||
"Master_Port",
|
||||
"Connect_Retry",
|
||||
"Master_Log_File",
|
||||
"Read_Master_Log_Pos",
|
||||
"Relay_Log_File",
|
||||
"Relay_Log_Pos",
|
||||
"Relay_Master_Log_File",
|
||||
"Slave_IO_Running",
|
||||
"Slave_SQL_Running",
|
||||
"Replicate_Do_DB",
|
||||
"Replicate_Ignore_DB",
|
||||
"Replicate_Do_Table",
|
||||
"Replicate_Ignore_Table",
|
||||
"Replicate_Wild_Do_Table",
|
||||
"Replicate_Wild_Ignore_Table",
|
||||
"Last_Errno",
|
||||
"Last_Error",
|
||||
"Skip_Counter",
|
||||
"Exec_Master_Log_Pos",
|
||||
"Relay_Log_Space",
|
||||
"Until_Condition",
|
||||
"Until_Log_File",
|
||||
"Until_Log_Pos",
|
||||
"Master_SSL_Allowed",
|
||||
"Master_SSL_CA_File",
|
||||
"Master_SSL_CA_Path",
|
||||
"Master_SSL_Cert",
|
||||
"Master_SSL_Cipher",
|
||||
"Master_SSL_Key",
|
||||
"Seconds_Behind_Master",
|
||||
"Master_SSL_Verify_Server_Cert",
|
||||
"Last_IO_Errno",
|
||||
"Last_IO_Error",
|
||||
"Last_SQL_Errno",
|
||||
"Last_SQL_Error",
|
||||
"Exec_Master_Group_ID",
|
||||
"Connect_Using_Group_ID",
|
||||
}
|
||||
|
||||
func (mysqld *Mysqld) ExecuteSuperQuery(query string) error {
|
||||
return mysqld.ExecuteSuperQueryList([]string{query})
|
||||
}
|
||||
|
@ -646,18 +459,18 @@ func (mysqld *Mysqld) WaitBlpPos(bp *blproto.BlpPosition, waitTimeout time.Durat
|
|||
if len(qr.Rows) != 1 {
|
||||
return fmt.Errorf("WaitBlpPos(%v) returned unexpected row count: %v", bp.Uid, len(qr.Rows))
|
||||
}
|
||||
var gtid proto.GTID
|
||||
var pos proto.ReplicationPosition
|
||||
if !qr.Rows[0][0].IsNull() {
|
||||
gtid, err = proto.DecodeGTID(qr.Rows[0][0].String())
|
||||
pos, err = proto.DecodeReplicationPosition(qr.Rows[0][0].String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if gtid == bp.GTIDField.Value {
|
||||
if pos.Equal(bp.Position) {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Infof("Sleeping 1 second waiting for binlog replication(%v) to catch up: %v != %v", bp.Uid, gtid, bp.GTIDField)
|
||||
log.Infof("Sleeping 1 second waiting for binlog replication(%v) to catch up: %v != %v", bp.Uid, pos, bp.Position)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ var slaveIDPool = pools.NewIDPool()
|
|||
// be sent. The stream will continue, waiting for new events if necessary,
|
||||
// until the connection is closed, either by the master or by calling
|
||||
// SlaveConnection.Close(). At that point, the channel will also be closed.
|
||||
func (sc *SlaveConnection) StartBinlogDump(startPos proto.GTID) (<-chan blproto.BinlogEvent, error) {
|
||||
func (sc *SlaveConnection) StartBinlogDump(startPos proto.ReplicationPosition) (<-chan blproto.BinlogEvent, error) {
|
||||
log.Infof("sending binlog dump command: startPos=%v, slaveID=%v", startPos, sc.slaveID)
|
||||
err := sc.mysqld.flavor.SendBinlogDumpCommand(sc.mysqld, sc, startPos)
|
||||
if err != nil {
|
||||
|
|
|
@ -142,7 +142,7 @@ type SplitSnapshotManifest struct {
|
|||
// masterAddr is the address of the server to use as master.
|
||||
// pos is the replication position to use on that master.
|
||||
// myMasterPos is the local server master position
|
||||
func NewSplitSnapshotManifest(myAddr, myMysqlAddr, masterAddr, dbName string, files []SnapshotFile, pos, myMasterPos *proto.ReplicationPosition, keyRange key.KeyRange, sd *proto.SchemaDefinition) (*SplitSnapshotManifest, error) {
|
||||
func NewSplitSnapshotManifest(myAddr, myMysqlAddr, masterAddr, dbName string, files []SnapshotFile, pos, myMasterPos proto.ReplicationPosition, keyRange key.KeyRange, sd *proto.SchemaDefinition) (*SplitSnapshotManifest, error) {
|
||||
sm, err := newSnapshotManifest(myAddr, myMysqlAddr, masterAddr, dbName, files, pos, myMasterPos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -167,23 +167,24 @@ func SanityCheckManifests(ssms []*SplitSnapshotManifest) error {
|
|||
|
||||
// getReplicationPositionForClones returns what position the clones
|
||||
// need to replicate from. Can be ours if we are a master, or our master's.
|
||||
func (mysqld *Mysqld) getReplicationPositionForClones(allowHierarchicalReplication bool) (replicationPosition *proto.ReplicationPosition, masterAddr string, err error) {
|
||||
func (mysqld *Mysqld) getReplicationPositionForClones(allowHierarchicalReplication bool) (replicationPosition proto.ReplicationPosition, masterAddr string, err error) {
|
||||
// If the source is a slave use the master replication position,
|
||||
// unless we are allowing hierachical replicas.
|
||||
replicationPosition, err = mysqld.SlaveStatus()
|
||||
if err != nil {
|
||||
if err != ErrNotSlave {
|
||||
// this is a real error
|
||||
return
|
||||
}
|
||||
var status *proto.ReplicationStatus
|
||||
status, err = mysqld.SlaveStatus()
|
||||
if err == ErrNotSlave {
|
||||
// we are really a master, so we need that position
|
||||
replicationPosition, err = mysqld.MasterStatus()
|
||||
replicationPosition, err = mysqld.MasterPosition()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
masterAddr = mysqld.IpAddr()
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
replicationPosition = status.Position
|
||||
|
||||
// we are a slave, check our replication strategy
|
||||
if allowHierarchicalReplication {
|
||||
|
@ -194,10 +195,10 @@ func (mysqld *Mysqld) getReplicationPositionForClones(allowHierarchicalReplicati
|
|||
return
|
||||
}
|
||||
|
||||
func (mysqld *Mysqld) prepareToSnapshot(allowHierarchicalReplication bool, hookExtraEnv map[string]string) (slaveStartRequired, readOnly bool, replicationPosition, myMasterPosition *proto.ReplicationPosition, masterAddr string, connToRelease dbconnpool.PoolConnection, err error) {
|
||||
func (mysqld *Mysqld) prepareToSnapshot(allowHierarchicalReplication bool, hookExtraEnv map[string]string) (slaveStartRequired, readOnly bool, replicationPosition, myMasterPosition proto.ReplicationPosition, masterAddr string, connToRelease dbconnpool.PoolConnection, err error) {
|
||||
// save initial state so we can restore on Start()
|
||||
if slaveStatus, slaveErr := mysqld.slaveStatus(); slaveErr == nil {
|
||||
slaveStartRequired = (slaveStatus["Slave_IO_Running"] == "Yes" && slaveStatus["Slave_SQL_Running"] == "Yes")
|
||||
if slaveStatus, slaveErr := mysqld.SlaveStatus(); slaveErr == nil {
|
||||
slaveStartRequired = slaveStatus.SlaveRunning()
|
||||
}
|
||||
|
||||
// For masters, set read-only so we don't write anything during snapshot
|
||||
|
@ -222,7 +223,7 @@ func (mysqld *Mysqld) prepareToSnapshot(allowHierarchicalReplication bool, hookE
|
|||
}
|
||||
|
||||
// get our master position, some targets may use it
|
||||
myMasterPosition, err = mysqld.MasterStatus()
|
||||
myMasterPosition, err = mysqld.MasterPosition()
|
||||
if err != nil && err != ErrNotMaster {
|
||||
// this is a real error
|
||||
return
|
||||
|
@ -634,13 +635,12 @@ func (mysqld *Mysqld) CreateMultiSnapshot(keyRanges []key.KeyRange, dbName, keyN
|
|||
|
||||
// Check the replication position after snapshot is done
|
||||
// hasn't changed, to be sure we haven't inserted any data
|
||||
var newReplicationPosition *proto.ReplicationPosition
|
||||
newReplicationPosition, _, err = mysqld.getReplicationPositionForClones(allowHierarchicalReplication)
|
||||
newReplicationPosition, _, err := mysqld.getReplicationPositionForClones(allowHierarchicalReplication)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if newReplicationPosition.MasterLogGTIDField != replicationPosition.MasterLogGTIDField {
|
||||
return nil, fmt.Errorf("replicationPosition position changed during snapshot, from %v to %v", replicationPosition.MasterLogGTIDField, newReplicationPosition.MasterLogGTIDField)
|
||||
if !newReplicationPosition.Equal(replicationPosition) {
|
||||
return nil, fmt.Errorf("replicationPosition position changed during snapshot, from %v to %v", replicationPosition, newReplicationPosition)
|
||||
}
|
||||
|
||||
// Write all the manifest files
|
||||
|
@ -1064,7 +1064,7 @@ func (mysqld *Mysqld) MultiRestore(destinationDbName string, keyRanges []key.Key
|
|||
flags = binlogplayer.BLP_FLAG_DONT_START
|
||||
}
|
||||
for manifestIndex, manifest := range manifests {
|
||||
queries = append(queries, binlogplayer.PopulateBlpCheckpoint(uint32(manifestIndex), manifest.Source.MasterState.ReplicationPosition.MasterLogGTIDField.Value, time.Now().Unix(), flags))
|
||||
queries = append(queries, binlogplayer.PopulateBlpCheckpoint(uint32(manifestIndex), manifest.Source.MasterPosition, time.Now().Unix(), flags))
|
||||
}
|
||||
if err = mysqld.ExecuteSuperQueryList(queries); err != nil {
|
||||
return err
|
||||
|
|
|
@ -72,7 +72,7 @@ const (
|
|||
TABLET_ACTION_BREAK_SLAVES = "BreakSlaves"
|
||||
TABLET_ACTION_MASTER_POSITION = "MasterPosition"
|
||||
TABLET_ACTION_REPARENT_POSITION = "ReparentPosition"
|
||||
TABLET_ACTION_SLAVE_POSITION = "SlavePosition"
|
||||
TABLET_ACTION_SLAVE_STATUS = "SlaveStatus"
|
||||
TABLET_ACTION_WAIT_SLAVE_POSITION = "WaitSlavePosition"
|
||||
TABLET_ACTION_WAIT_BLP_POSITION = "WaitBlpPosition"
|
||||
TABLET_ACTION_STOP_BLP = "StopBlp"
|
||||
|
@ -251,7 +251,7 @@ func ActionNodeFromJson(data, path string) (*ActionNode, error) {
|
|||
TABLET_ACTION_RELOAD_SCHEMA,
|
||||
TABLET_ACTION_EXECUTE_FETCH,
|
||||
TABLET_ACTION_GET_PERMISSIONS,
|
||||
TABLET_ACTION_SLAVE_POSITION,
|
||||
TABLET_ACTION_SLAVE_STATUS,
|
||||
TABLET_ACTION_WAIT_SLAVE_POSITION,
|
||||
TABLET_ACTION_MASTER_POSITION,
|
||||
TABLET_ACTION_STOP_SLAVE,
|
||||
|
|
|
@ -26,15 +26,15 @@ Note it's OK to rename the structures as the type name is not saved in json.
|
|||
// tablet action node structures
|
||||
|
||||
type RestartSlaveData struct {
|
||||
ReplicationState *myproto.ReplicationState
|
||||
WaitPosition *myproto.ReplicationPosition
|
||||
ReplicationStatus *myproto.ReplicationStatus
|
||||
WaitPosition myproto.ReplicationPosition
|
||||
TimePromoted int64 // used to verify replication - a row will be inserted with this timestamp
|
||||
Parent topo.TabletAlias
|
||||
Force bool
|
||||
}
|
||||
|
||||
func (rsd *RestartSlaveData) String() string {
|
||||
return fmt.Sprintf("RestartSlaveData{ReplicationState:%#v WaitPosition:%#v TimePromoted:%v Parent:%v Force:%v}", rsd.ReplicationState, rsd.WaitPosition, rsd.TimePromoted, rsd.Parent, rsd.Force)
|
||||
return fmt.Sprintf("RestartSlaveData{ReplicationStatus:%#v WaitPosition:%#v TimePromoted:%v Parent:%v Force:%v}", rsd.ReplicationStatus, rsd.WaitPosition, rsd.TimePromoted, rsd.Parent, rsd.Force)
|
||||
}
|
||||
|
||||
type SlaveWasRestartedArgs struct {
|
||||
|
|
|
@ -222,7 +222,7 @@ func (ta *TabletActor) dispatchAction(actionNode *actionnode.ActionNode) (err er
|
|||
actionnode.TABLET_ACTION_GET_SCHEMA,
|
||||
actionnode.TABLET_ACTION_RELOAD_SCHEMA,
|
||||
actionnode.TABLET_ACTION_GET_PERMISSIONS,
|
||||
actionnode.TABLET_ACTION_SLAVE_POSITION,
|
||||
actionnode.TABLET_ACTION_SLAVE_STATUS,
|
||||
actionnode.TABLET_ACTION_WAIT_SLAVE_POSITION,
|
||||
actionnode.TABLET_ACTION_MASTER_POSITION,
|
||||
actionnode.TABLET_ACTION_STOP_SLAVE,
|
||||
|
@ -314,12 +314,15 @@ func (ta *TabletActor) promoteSlave(actionNode *actionnode.ActionNode) error {
|
|||
}
|
||||
|
||||
// Perform the action.
|
||||
rsd := &actionnode.RestartSlaveData{Parent: tablet.Alias, Force: (tablet.Parent.Uid == topo.NO_TABLET)}
|
||||
rsd.ReplicationState, rsd.WaitPosition, rsd.TimePromoted, err = ta.mysqld.PromoteSlave(false, ta.hookExtraEnv())
|
||||
rsd := &actionnode.RestartSlaveData{
|
||||
Parent: tablet.Alias,
|
||||
Force: (tablet.Parent.Uid == topo.NO_TABLET),
|
||||
}
|
||||
rsd.ReplicationStatus, rsd.WaitPosition, rsd.TimePromoted, err = ta.mysqld.PromoteSlave(false, ta.hookExtraEnv())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("PromoteSlave %v", rsd.String())
|
||||
log.Infof("PromoteSlave %v", rsd)
|
||||
actionNode.Reply = rsd
|
||||
|
||||
return updateReplicationGraphForPromotedSlave(ta.ts, tablet)
|
||||
|
@ -370,18 +373,18 @@ func updateReplicationGraphForPromotedSlave(ts topo.Server, tablet *topo.TabletI
|
|||
}
|
||||
|
||||
func (ta *TabletActor) reparentPosition(actionNode *actionnode.ActionNode) error {
|
||||
slavePos := actionNode.Args.(*myproto.ReplicationPosition)
|
||||
slavePos := *actionNode.Args.(*myproto.ReplicationPosition)
|
||||
|
||||
replicationState, waitPosition, timePromoted, err := ta.mysqld.ReparentPosition(slavePos)
|
||||
replicationStatus, waitPosition, timePromoted, err := ta.mysqld.ReparentPosition(slavePos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rsd := new(actionnode.RestartSlaveData)
|
||||
rsd.ReplicationState = replicationState
|
||||
rsd.ReplicationStatus = replicationStatus
|
||||
rsd.TimePromoted = timePromoted
|
||||
rsd.WaitPosition = waitPosition
|
||||
rsd.Parent = ta.tabletAlias
|
||||
log.V(6).Infof("reparentPosition %v", rsd.String())
|
||||
log.V(6).Infof("reparentPosition: %v", rsd)
|
||||
actionNode.Reply = rsd
|
||||
return nil
|
||||
}
|
||||
|
@ -410,7 +413,7 @@ func (ta *TabletActor) restartSlave(actionNode *actionnode.ActionNode) error {
|
|||
if tablet.Type == topo.TYPE_LAG {
|
||||
tablet.Type = topo.TYPE_LAG_ORPHAN
|
||||
} else {
|
||||
err = ta.mysqld.RestartSlave(rsd.ReplicationState, rsd.WaitPosition, rsd.TimePromoted)
|
||||
err = ta.mysqld.RestartSlave(rsd.ReplicationStatus, rsd.WaitPosition, rsd.TimePromoted)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -422,7 +425,7 @@ func (ta *TabletActor) restartSlave(actionNode *actionnode.ActionNode) error {
|
|||
return err
|
||||
}
|
||||
} else if rsd.Force {
|
||||
err = ta.mysqld.RestartSlave(rsd.ReplicationState, rsd.WaitPosition, rsd.TimePromoted)
|
||||
err = ta.mysqld.RestartSlave(rsd.ReplicationStatus, rsd.WaitPosition, rsd.TimePromoted)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -437,11 +440,11 @@ func (ta *TabletActor) restartSlave(actionNode *actionnode.ActionNode) error {
|
|||
} else {
|
||||
// There is nothing to safely reparent, so check replication. If
|
||||
// either replication thread is not running, report an error.
|
||||
replicationPos, err := ta.mysqld.SlaveStatus()
|
||||
status, err := ta.mysqld.SlaveStatus()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot verify replication for slave: %v", err)
|
||||
}
|
||||
if replicationPos.SecondsBehindMaster == myproto.InvalidLagSeconds {
|
||||
if !status.SlaveRunning() {
|
||||
return fmt.Errorf("replication not running for slave")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,8 +61,8 @@ type BinlogPlayerController struct {
|
|||
// done is the channel to wait for to be sure the player is done
|
||||
done chan struct{}
|
||||
|
||||
// stopAtGTID contains the stopping point for this player, if any
|
||||
stopAtGTID myproto.GTID
|
||||
// stopPosition contains the stopping point for this player, if any
|
||||
stopPosition myproto.ReplicationPosition
|
||||
|
||||
// information about the individual tablet we're replicating from
|
||||
sourceTablet topo.TabletAlias
|
||||
|
@ -101,21 +101,21 @@ func (bpc *BinlogPlayerController) Start() {
|
|||
log.Infof("%v: starting binlog player", bpc)
|
||||
bpc.interrupted = make(chan struct{}, 1)
|
||||
bpc.done = make(chan struct{}, 1)
|
||||
bpc.stopAtGTID = nil // run forever
|
||||
bpc.stopPosition = myproto.ReplicationPosition{} // run forever
|
||||
go bpc.Loop()
|
||||
}
|
||||
|
||||
// StartUntil will start the Player until we reach the given GTID.
|
||||
func (bpc *BinlogPlayerController) StartUntil(gtid myproto.GTID) error {
|
||||
// StartUntil will start the Player until we reach the given position.
|
||||
func (bpc *BinlogPlayerController) StartUntil(stopPos myproto.ReplicationPosition) error {
|
||||
bpc.playerMutex.Lock()
|
||||
defer bpc.playerMutex.Unlock()
|
||||
if bpc.interrupted != nil {
|
||||
return fmt.Errorf("%v: already started", bpc)
|
||||
}
|
||||
log.Infof("%v: starting binlog player until %v", bpc, gtid)
|
||||
log.Infof("%v: starting binlog player until %v", bpc, stopPos)
|
||||
bpc.interrupted = make(chan struct{}, 1)
|
||||
bpc.done = make(chan struct{}, 1)
|
||||
bpc.stopAtGTID = gtid
|
||||
bpc.stopPosition = stopPos
|
||||
go bpc.Loop()
|
||||
return nil
|
||||
}
|
||||
|
@ -269,7 +269,7 @@ func (bpc *BinlogPlayerController) Iteration() (err error) {
|
|||
}
|
||||
|
||||
// tables, just get them
|
||||
player := binlogplayer.NewBinlogPlayerTables(vtClient, addr, tables, startPosition, bpc.stopAtGTID, bpc.binlogPlayerStats)
|
||||
player := binlogplayer.NewBinlogPlayerTables(vtClient, addr, tables, startPosition, bpc.stopPosition, bpc.binlogPlayerStats)
|
||||
return player.ApplyBinlogEvents(bpc.interrupted)
|
||||
} else {
|
||||
// the data we have to replicate is the intersection of the
|
||||
|
@ -279,7 +279,7 @@ func (bpc *BinlogPlayerController) Iteration() (err error) {
|
|||
return fmt.Errorf("Source shard %v doesn't overlap destination shard %v", bpc.sourceShard.KeyRange, bpc.keyRange)
|
||||
}
|
||||
|
||||
player := binlogplayer.NewBinlogPlayerKeyRange(vtClient, addr, bpc.keyspaceIdType, overlap, startPosition, bpc.stopAtGTID, bpc.binlogPlayerStats)
|
||||
player := binlogplayer.NewBinlogPlayerKeyRange(vtClient, addr, bpc.keyspaceIdType, overlap, startPosition, bpc.stopPosition, bpc.binlogPlayerStats)
|
||||
return player.ApplyBinlogEvents(bpc.interrupted)
|
||||
}
|
||||
}
|
||||
|
@ -533,18 +533,18 @@ func (blm *BinlogPlayerMap) RunUntil(blpPositionList *blproto.BlpPositionList, w
|
|||
|
||||
// find the exact stop position for all players, to be sure
|
||||
// we're not doing anything wrong
|
||||
gtids := make(map[uint32]myproto.GTID)
|
||||
posMap := make(map[uint32]myproto.ReplicationPosition)
|
||||
for _, bpc := range blm.players {
|
||||
pos, err := blpPositionList.FindBlpPositionById(bpc.sourceShard.Uid)
|
||||
blpPos, err := blpPositionList.FindBlpPositionById(bpc.sourceShard.Uid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("No binlog position passed in for player Uid %v", bpc.sourceShard.Uid)
|
||||
}
|
||||
gtids[bpc.sourceShard.Uid] = pos.GTIDField.Value
|
||||
posMap[bpc.sourceShard.Uid] = blpPos.Position
|
||||
}
|
||||
|
||||
// start all the players giving them where to stop
|
||||
for _, bpc := range blm.players {
|
||||
if err := bpc.StartUntil(gtids[bpc.sourceShard.Uid]); err != nil {
|
||||
if err := bpc.StartUntil(posMap[bpc.sourceShard.Uid]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -573,10 +573,10 @@ type BinlogPlayerControllerStatus struct {
|
|||
// configuration values
|
||||
Index uint32
|
||||
SourceShard topo.SourceShard
|
||||
StopAtGTID myproto.GTID
|
||||
StopPosition myproto.ReplicationPosition
|
||||
|
||||
// stats and current values
|
||||
LastGTID myproto.GTID
|
||||
LastPosition myproto.ReplicationPosition
|
||||
SecondsBehindMaster int64
|
||||
Counts map[string]int64
|
||||
Rates map[string][]float64
|
||||
|
@ -630,8 +630,8 @@ func (blm *BinlogPlayerMap) Status() *BinlogPlayerMapStatus {
|
|||
bpcs := &BinlogPlayerControllerStatus{
|
||||
Index: i,
|
||||
SourceShard: bpc.sourceShard,
|
||||
StopAtGTID: bpc.stopAtGTID,
|
||||
LastGTID: bpc.binlogPlayerStats.GetLastGTID(),
|
||||
StopPosition: bpc.stopPosition,
|
||||
LastPosition: bpc.binlogPlayerStats.GetLastPosition(),
|
||||
SecondsBehindMaster: bpc.binlogPlayerStats.SecondsBehindMaster.Get(),
|
||||
Counts: bpc.binlogPlayerStats.Timings.Counts(),
|
||||
Rates: bpc.binlogPlayerStats.Rates.Get(),
|
||||
|
|
|
@ -38,12 +38,12 @@ type SetBlacklistedTablesArgs struct {
|
|||
}
|
||||
|
||||
type WaitSlavePositionArgs struct {
|
||||
ReplicationPosition myproto.ReplicationPosition
|
||||
Position myproto.ReplicationPosition
|
||||
WaitTimeout time.Duration // pass in zero to wait indefinitely
|
||||
}
|
||||
|
||||
type StopSlaveMinimumArgs struct {
|
||||
GTIDField myproto.GTIDField
|
||||
Position myproto.ReplicationPosition
|
||||
WaitTime time.Duration
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@ type GoRpcTabletManagerConn struct {
|
|||
}
|
||||
|
||||
func (client *GoRpcTabletManagerConn) rpcCallTablet(tablet *topo.TabletInfo, name string, args, reply interface{}, waitTime time.Duration) error {
|
||||
|
||||
// create the RPC client, using waitTime as the connect
|
||||
// timeout, and starting the overall timeout as well
|
||||
timer := time.After(waitTime)
|
||||
|
@ -118,31 +117,31 @@ func (client *GoRpcTabletManagerConn) ExecuteFetch(tablet *topo.TabletInfo, quer
|
|||
// Replication related methods
|
||||
//
|
||||
|
||||
func (client *GoRpcTabletManagerConn) SlavePosition(tablet *topo.TabletInfo, waitTime time.Duration) (*myproto.ReplicationPosition, error) {
|
||||
var rp myproto.ReplicationPosition
|
||||
if err := client.rpcCallTablet(tablet, actionnode.TABLET_ACTION_SLAVE_POSITION, "", &rp, waitTime); err != nil {
|
||||
func (client *GoRpcTabletManagerConn) SlaveStatus(tablet *topo.TabletInfo, waitTime time.Duration) (*myproto.ReplicationStatus, error) {
|
||||
var status myproto.ReplicationStatus
|
||||
if err := client.rpcCallTablet(tablet, actionnode.TABLET_ACTION_SLAVE_STATUS, "", &status, waitTime); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &rp, nil
|
||||
return &status, nil
|
||||
}
|
||||
|
||||
func (client *GoRpcTabletManagerConn) WaitSlavePosition(tablet *topo.TabletInfo, replicationPosition *myproto.ReplicationPosition, waitTime time.Duration) (*myproto.ReplicationPosition, error) {
|
||||
var rp myproto.ReplicationPosition
|
||||
func (client *GoRpcTabletManagerConn) WaitSlavePosition(tablet *topo.TabletInfo, waitPos myproto.ReplicationPosition, waitTime time.Duration) (*myproto.ReplicationStatus, error) {
|
||||
var status myproto.ReplicationStatus
|
||||
if err := client.rpcCallTablet(tablet, actionnode.TABLET_ACTION_WAIT_SLAVE_POSITION, &gorpcproto.WaitSlavePositionArgs{
|
||||
ReplicationPosition: *replicationPosition,
|
||||
Position: waitPos,
|
||||
WaitTimeout: waitTime,
|
||||
}, &rp, waitTime); err != nil {
|
||||
}, &status, waitTime); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &rp, nil
|
||||
return &status, nil
|
||||
}
|
||||
|
||||
func (client *GoRpcTabletManagerConn) MasterPosition(tablet *topo.TabletInfo, waitTime time.Duration) (*myproto.ReplicationPosition, error) {
|
||||
func (client *GoRpcTabletManagerConn) MasterPosition(tablet *topo.TabletInfo, waitTime time.Duration) (myproto.ReplicationPosition, error) {
|
||||
var rp myproto.ReplicationPosition
|
||||
if err := client.rpcCallTablet(tablet, actionnode.TABLET_ACTION_MASTER_POSITION, "", &rp, waitTime); err != nil {
|
||||
return nil, err
|
||||
return rp, err
|
||||
}
|
||||
return &rp, nil
|
||||
return rp, nil
|
||||
}
|
||||
|
||||
func (client *GoRpcTabletManagerConn) StopSlave(tablet *topo.TabletInfo, waitTime time.Duration) error {
|
||||
|
@ -150,15 +149,15 @@ func (client *GoRpcTabletManagerConn) StopSlave(tablet *topo.TabletInfo, waitTim
|
|||
return client.rpcCallTablet(tablet, actionnode.TABLET_ACTION_STOP_SLAVE, "", &noOutput, waitTime)
|
||||
}
|
||||
|
||||
func (client *GoRpcTabletManagerConn) StopSlaveMinimum(tablet *topo.TabletInfo, gtid myproto.GTID, waitTime time.Duration) (*myproto.ReplicationPosition, error) {
|
||||
var pos myproto.ReplicationPosition
|
||||
func (client *GoRpcTabletManagerConn) StopSlaveMinimum(tablet *topo.TabletInfo, minPos myproto.ReplicationPosition, waitTime time.Duration) (*myproto.ReplicationStatus, error) {
|
||||
var status myproto.ReplicationStatus
|
||||
if err := client.rpcCallTablet(tablet, actionnode.TABLET_ACTION_STOP_SLAVE_MINIMUM, &gorpcproto.StopSlaveMinimumArgs{
|
||||
GTIDField: myproto.GTIDField{Value: gtid},
|
||||
Position: minPos,
|
||||
WaitTime: waitTime,
|
||||
}, &pos, waitTime); err != nil {
|
||||
}, &status, waitTime); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pos, nil
|
||||
return &status, nil
|
||||
}
|
||||
|
||||
func (client *GoRpcTabletManagerConn) StartSlave(tablet *topo.TabletInfo, waitTime time.Duration) error {
|
||||
|
@ -200,15 +199,15 @@ func (client *GoRpcTabletManagerConn) StartBlp(tablet *topo.TabletInfo, waitTime
|
|||
return client.rpcCallTablet(tablet, actionnode.TABLET_ACTION_START_BLP, "", &noOutput, waitTime)
|
||||
}
|
||||
|
||||
func (client *GoRpcTabletManagerConn) RunBlpUntil(tablet *topo.TabletInfo, positions *blproto.BlpPositionList, waitTime time.Duration) (*myproto.ReplicationPosition, error) {
|
||||
func (client *GoRpcTabletManagerConn) RunBlpUntil(tablet *topo.TabletInfo, positions *blproto.BlpPositionList, waitTime time.Duration) (myproto.ReplicationPosition, error) {
|
||||
var pos myproto.ReplicationPosition
|
||||
if err := client.rpcCallTablet(tablet, actionnode.TABLET_ACTION_RUN_BLP_UNTIL, &gorpcproto.RunBlpUntilArgs{
|
||||
BlpPositionList: positions,
|
||||
WaitTimeout: waitTime,
|
||||
}, &pos, waitTime); err != nil {
|
||||
return nil, err
|
||||
return myproto.ReplicationPosition{}, err
|
||||
}
|
||||
return &pos, nil
|
||||
return pos, nil
|
||||
}
|
||||
|
||||
//
|
||||
|
|
|
@ -102,25 +102,25 @@ func (tm *TabletManager) ExecuteFetch(context *rpcproto.Context, args *gorpcprot
|
|||
// Replication related methods
|
||||
//
|
||||
|
||||
func (tm *TabletManager) SlavePosition(context *rpcproto.Context, args *rpc.UnusedRequest, reply *myproto.ReplicationPosition) error {
|
||||
return tm.agent.RpcWrap(context.RemoteAddr, actionnode.TABLET_ACTION_SLAVE_POSITION, args, reply, func() error {
|
||||
position, err := tm.agent.Mysqld.SlaveStatus()
|
||||
func (tm *TabletManager) SlaveStatus(context *rpcproto.Context, args *rpc.UnusedRequest, reply *myproto.ReplicationStatus) error {
|
||||
return tm.agent.RpcWrap(context.RemoteAddr, actionnode.TABLET_ACTION_SLAVE_STATUS, args, reply, func() error {
|
||||
status, err := tm.agent.Mysqld.SlaveStatus()
|
||||
if err == nil {
|
||||
*reply = *position
|
||||
*reply = *status
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (tm *TabletManager) WaitSlavePosition(context *rpcproto.Context, args *gorpcproto.WaitSlavePositionArgs, reply *myproto.ReplicationPosition) error {
|
||||
func (tm *TabletManager) WaitSlavePosition(context *rpcproto.Context, args *gorpcproto.WaitSlavePositionArgs, reply *myproto.ReplicationStatus) error {
|
||||
return tm.agent.RpcWrap(context.RemoteAddr, actionnode.TABLET_ACTION_WAIT_SLAVE_POSITION, args, reply, func() error {
|
||||
if err := tm.agent.Mysqld.WaitMasterPos(&args.ReplicationPosition, args.WaitTimeout); err != nil {
|
||||
if err := tm.agent.Mysqld.WaitMasterPos(args.Position, args.WaitTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
position, err := tm.agent.Mysqld.SlaveStatus()
|
||||
status, err := tm.agent.Mysqld.SlaveStatus()
|
||||
if err == nil {
|
||||
*reply = *position
|
||||
*reply = *status
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
@ -128,9 +128,9 @@ func (tm *TabletManager) WaitSlavePosition(context *rpcproto.Context, args *gorp
|
|||
|
||||
func (tm *TabletManager) MasterPosition(context *rpcproto.Context, args *rpc.UnusedRequest, reply *myproto.ReplicationPosition) error {
|
||||
return tm.agent.RpcWrap(context.RemoteAddr, actionnode.TABLET_ACTION_MASTER_POSITION, args, reply, func() error {
|
||||
position, err := tm.agent.Mysqld.MasterStatus()
|
||||
position, err := tm.agent.Mysqld.MasterPosition()
|
||||
if err == nil {
|
||||
*reply = *position
|
||||
*reply = position
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
@ -142,17 +142,17 @@ func (tm *TabletManager) StopSlave(context *rpcproto.Context, args *rpc.UnusedRe
|
|||
})
|
||||
}
|
||||
|
||||
func (tm *TabletManager) StopSlaveMinimum(context *rpcproto.Context, args *gorpcproto.StopSlaveMinimumArgs, reply *myproto.ReplicationPosition) error {
|
||||
func (tm *TabletManager) StopSlaveMinimum(context *rpcproto.Context, args *gorpcproto.StopSlaveMinimumArgs, reply *myproto.ReplicationStatus) error {
|
||||
return tm.agent.RpcWrapLock(context.RemoteAddr, actionnode.TABLET_ACTION_STOP_SLAVE_MINIMUM, args, reply, func() error {
|
||||
if err := tm.agent.Mysqld.WaitForMinimumReplicationPosition(args.GTIDField.Value, args.WaitTime); err != nil {
|
||||
if err := tm.agent.Mysqld.WaitMasterPos(args.Position, args.WaitTime); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tm.agent.Mysqld.StopSlave(map[string]string{"TABLET_ALIAS": tm.agent.TabletAlias.String()}); err != nil {
|
||||
return err
|
||||
}
|
||||
position, err := tm.agent.Mysqld.SlaveStatus()
|
||||
status, err := tm.agent.Mysqld.SlaveStatus()
|
||||
if err == nil {
|
||||
*reply = *position
|
||||
*reply = *status
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
@ -220,9 +220,9 @@ func (tm *TabletManager) RunBlpUntil(context *rpcproto.Context, args *gorpcproto
|
|||
if err := tm.agent.BinlogPlayerMap.RunUntil(args.BlpPositionList, args.WaitTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
position, err := tm.agent.Mysqld.MasterStatus()
|
||||
position, err := tm.agent.Mysqld.MasterPosition()
|
||||
if err == nil {
|
||||
*reply = *position
|
||||
*reply = position
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
|
|
@ -156,33 +156,33 @@ func (ai *ActionInitiator) RpcSlaveWasRestarted(tablet *topo.TabletInfo, args *a
|
|||
return ai.rpc.SlaveWasRestarted(tablet, args, waitTime)
|
||||
}
|
||||
|
||||
func (ai *ActionInitiator) ReparentPosition(tabletAlias topo.TabletAlias, slavePos *myproto.ReplicationPosition) (actionPath string, err error) {
|
||||
return ai.writeTabletAction(tabletAlias, &actionnode.ActionNode{Action: actionnode.TABLET_ACTION_REPARENT_POSITION, Args: slavePos})
|
||||
func (ai *ActionInitiator) ReparentPosition(tabletAlias topo.TabletAlias, slavePos myproto.ReplicationPosition) (actionPath string, err error) {
|
||||
return ai.writeTabletAction(tabletAlias, &actionnode.ActionNode{Action: actionnode.TABLET_ACTION_REPARENT_POSITION, Args: &slavePos})
|
||||
}
|
||||
|
||||
func (ai *ActionInitiator) MasterPosition(tablet *topo.TabletInfo, waitTime time.Duration) (*myproto.ReplicationPosition, error) {
|
||||
func (ai *ActionInitiator) MasterPosition(tablet *topo.TabletInfo, waitTime time.Duration) (myproto.ReplicationPosition, error) {
|
||||
return ai.rpc.MasterPosition(tablet, waitTime)
|
||||
}
|
||||
|
||||
func (ai *ActionInitiator) SlavePosition(tablet *topo.TabletInfo, waitTime time.Duration) (*myproto.ReplicationPosition, error) {
|
||||
return ai.rpc.SlavePosition(tablet, waitTime)
|
||||
func (ai *ActionInitiator) SlaveStatus(tablet *topo.TabletInfo, waitTime time.Duration) (*myproto.ReplicationStatus, error) {
|
||||
return ai.rpc.SlaveStatus(tablet, waitTime)
|
||||
}
|
||||
|
||||
func (ai *ActionInitiator) WaitSlavePosition(tablet *topo.TabletInfo, replicationPosition *myproto.ReplicationPosition, waitTime time.Duration) (*myproto.ReplicationPosition, error) {
|
||||
return ai.rpc.WaitSlavePosition(tablet, replicationPosition, waitTime)
|
||||
func (ai *ActionInitiator) WaitSlavePosition(tablet *topo.TabletInfo, waitPos myproto.ReplicationPosition, waitTime time.Duration) (*myproto.ReplicationStatus, error) {
|
||||
return ai.rpc.WaitSlavePosition(tablet, waitPos, waitTime)
|
||||
}
|
||||
|
||||
func (ai *ActionInitiator) StopSlave(tablet *topo.TabletInfo, waitTime time.Duration) error {
|
||||
return ai.rpc.StopSlave(tablet, waitTime)
|
||||
}
|
||||
|
||||
func (ai *ActionInitiator) StopSlaveMinimum(tabletAlias topo.TabletAlias, gtid myproto.GTID, waitTime time.Duration) (*myproto.ReplicationPosition, error) {
|
||||
func (ai *ActionInitiator) StopSlaveMinimum(tabletAlias topo.TabletAlias, minPos myproto.ReplicationPosition, waitTime time.Duration) (*myproto.ReplicationStatus, error) {
|
||||
tablet, err := ai.ts.GetTablet(tabletAlias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ai.rpc.StopSlaveMinimum(tablet, gtid, waitTime)
|
||||
return ai.rpc.StopSlaveMinimum(tablet, minPos, waitTime)
|
||||
}
|
||||
|
||||
func (ai *ActionInitiator) StartSlave(tabletAlias topo.TabletAlias, waitTime time.Duration) error {
|
||||
|
@ -230,10 +230,10 @@ func (ai *ActionInitiator) StartBlp(tabletAlias topo.TabletAlias, waitTime time.
|
|||
return ai.rpc.StartBlp(tablet, waitTime)
|
||||
}
|
||||
|
||||
func (ai *ActionInitiator) RunBlpUntil(tabletAlias topo.TabletAlias, positions *blproto.BlpPositionList, waitTime time.Duration) (*myproto.ReplicationPosition, error) {
|
||||
func (ai *ActionInitiator) RunBlpUntil(tabletAlias topo.TabletAlias, positions *blproto.BlpPositionList, waitTime time.Duration) (myproto.ReplicationPosition, error) {
|
||||
tablet, err := ai.ts.GetTablet(tabletAlias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return myproto.ReplicationPosition{}, err
|
||||
}
|
||||
|
||||
return ai.rpc.RunBlpUntil(tablet, positions, waitTime)
|
||||
|
|
|
@ -51,22 +51,22 @@ type TabletManagerConn interface {
|
|||
// Replication related methods
|
||||
//
|
||||
|
||||
// SlavePosition returns the tablet's mysql slave position
|
||||
SlavePosition(tablet *topo.TabletInfo, waitTime time.Duration) (*myproto.ReplicationPosition, error)
|
||||
// SlaveStatus returns the tablet's mysql slave status.
|
||||
SlaveStatus(tablet *topo.TabletInfo, waitTime time.Duration) (*myproto.ReplicationStatus, error)
|
||||
|
||||
// WaitSlavePosition asks the tablet to wait until it reaches that
|
||||
// position in mysql replication
|
||||
WaitSlavePosition(tablet *topo.TabletInfo, replicationPosition *myproto.ReplicationPosition, waitTime time.Duration) (*myproto.ReplicationPosition, error)
|
||||
WaitSlavePosition(tablet *topo.TabletInfo, waitPos myproto.ReplicationPosition, waitTime time.Duration) (*myproto.ReplicationStatus, error)
|
||||
|
||||
// MasterPosition returns the tablet's master position
|
||||
MasterPosition(tablet *topo.TabletInfo, waitTime time.Duration) (*myproto.ReplicationPosition, error)
|
||||
MasterPosition(tablet *topo.TabletInfo, waitTime time.Duration) (myproto.ReplicationPosition, error)
|
||||
|
||||
// StopSlave stops the mysql replication
|
||||
StopSlave(tablet *topo.TabletInfo, waitTime time.Duration) error
|
||||
|
||||
// StopSlaveMinimum stops the mysql replication after it reaches
|
||||
// the provided minimum point
|
||||
StopSlaveMinimum(tablet *topo.TabletInfo, gtid myproto.GTID, waitTime time.Duration) (*myproto.ReplicationPosition, error)
|
||||
StopSlaveMinimum(tablet *topo.TabletInfo, stopPos myproto.ReplicationPosition, waitTime time.Duration) (*myproto.ReplicationStatus, error)
|
||||
|
||||
// StartSlave starts the mysql replication
|
||||
StartSlave(tablet *topo.TabletInfo, waitTime time.Duration) error
|
||||
|
@ -90,7 +90,7 @@ type TabletManagerConn interface {
|
|||
|
||||
// RunBlpUntil asks the tablet to restart its binlog players until
|
||||
// it reaches the given positions, if not there yet.
|
||||
RunBlpUntil(tablet *topo.TabletInfo, positions *blproto.BlpPositionList, waitTime time.Duration) (*myproto.ReplicationPosition, error)
|
||||
RunBlpUntil(tablet *topo.TabletInfo, positions *blproto.BlpPositionList, waitTime time.Duration) (myproto.ReplicationPosition, error)
|
||||
|
||||
//
|
||||
// Reparenting related functions
|
||||
|
|
|
@ -29,29 +29,36 @@ type RowcacheInvalidator struct {
|
|||
|
||||
svm sync2.ServiceManager
|
||||
|
||||
gtidMutex sync.Mutex
|
||||
gtid myproto.GTID
|
||||
posMutex sync.Mutex
|
||||
pos myproto.ReplicationPosition
|
||||
lagSeconds sync2.AtomicInt64
|
||||
}
|
||||
|
||||
func (rci *RowcacheInvalidator) SetGTID(gtid myproto.GTID) {
|
||||
rci.gtidMutex.Lock()
|
||||
defer rci.gtidMutex.Unlock()
|
||||
rci.gtid = gtid
|
||||
// AppendGTID updates the current replication position by appending a GTID to
|
||||
// the set of transactions that have been processed.
|
||||
func (rci *RowcacheInvalidator) AppendGTID(gtid myproto.GTID) {
|
||||
rci.posMutex.Lock()
|
||||
defer rci.posMutex.Unlock()
|
||||
rci.pos = myproto.AppendGTID(rci.pos, gtid)
|
||||
}
|
||||
|
||||
func (rci *RowcacheInvalidator) GTID() myproto.GTID {
|
||||
rci.gtidMutex.Lock()
|
||||
defer rci.gtidMutex.Unlock()
|
||||
return rci.gtid
|
||||
// SetPosition sets the current ReplicationPosition.
|
||||
func (rci *RowcacheInvalidator) SetPosition(rp myproto.ReplicationPosition) {
|
||||
rci.posMutex.Lock()
|
||||
defer rci.posMutex.Unlock()
|
||||
rci.pos = rp
|
||||
}
|
||||
|
||||
func (rci *RowcacheInvalidator) GTIDString() string {
|
||||
gtid := rci.GTID()
|
||||
if gtid == nil {
|
||||
return "<nil>"
|
||||
// Position returns the current ReplicationPosition.
|
||||
func (rci *RowcacheInvalidator) Position() myproto.ReplicationPosition {
|
||||
rci.posMutex.Lock()
|
||||
defer rci.posMutex.Unlock()
|
||||
return rci.pos
|
||||
}
|
||||
return gtid.String()
|
||||
|
||||
// PositionString returns the current ReplicationPosition as a string.
|
||||
func (rci *RowcacheInvalidator) PositionString() string {
|
||||
return rci.Position().String()
|
||||
}
|
||||
|
||||
// NewRowcacheInvalidator creates a new RowcacheInvalidator.
|
||||
|
@ -60,14 +67,14 @@ func (rci *RowcacheInvalidator) GTIDString() string {
|
|||
func NewRowcacheInvalidator(qe *QueryEngine) *RowcacheInvalidator {
|
||||
rci := &RowcacheInvalidator{qe: qe}
|
||||
stats.Publish("RowcacheInvalidatorState", stats.StringFunc(rci.svm.StateName))
|
||||
stats.Publish("RowcacheInvalidatorPosition", stats.StringFunc(rci.GTIDString))
|
||||
stats.Publish("RowcacheInvalidatorPosition", stats.StringFunc(rci.PositionString))
|
||||
stats.Publish("RowcacheInvalidatorLagSeconds", stats.IntFunc(rci.lagSeconds.Get))
|
||||
return rci
|
||||
}
|
||||
|
||||
// Open runs the invalidation loop.
|
||||
func (rci *RowcacheInvalidator) Open(dbname string, mysqld *mysqlctl.Mysqld) {
|
||||
rp, err := mysqld.MasterStatus()
|
||||
rp, err := mysqld.MasterPosition()
|
||||
if err != nil {
|
||||
panic(NewTabletError(FATAL, "Rowcache invalidator aborting: cannot determine replication position: %v", err))
|
||||
}
|
||||
|
@ -76,12 +83,11 @@ func (rci *RowcacheInvalidator) Open(dbname string, mysqld *mysqlctl.Mysqld) {
|
|||
}
|
||||
rci.dbname = dbname
|
||||
rci.mysqld = mysqld
|
||||
rci.SetGTID(rp.MasterLogGTIDField.Value)
|
||||
// We'll set gtid inside the run function.
|
||||
rci.SetPosition(rp)
|
||||
|
||||
ok := rci.svm.Go(rci.run)
|
||||
if ok {
|
||||
log.Infof("Rowcache invalidator starting, dbname: %s, path: %s, logfile: %s, position: %d", dbname, mysqld.Cnf().BinLogPath, rp.MasterLogFile, rp.MasterLogPosition)
|
||||
log.Infof("Rowcache invalidator starting, dbname: %s, path: %s, position: %v", dbname, mysqld.Cnf().BinLogPath, rp)
|
||||
} else {
|
||||
log.Infof("Rowcache invalidator already running")
|
||||
}
|
||||
|
@ -95,7 +101,7 @@ func (rci *RowcacheInvalidator) Close() {
|
|||
|
||||
func (rci *RowcacheInvalidator) run(ctx *sync2.ServiceContext) error {
|
||||
for {
|
||||
evs := binlog.NewEventStreamer(rci.dbname, rci.mysqld, rci.GTID(), rci.processEvent)
|
||||
evs := binlog.NewEventStreamer(rci.dbname, rci.mysqld, rci.Position(), rci.processEvent)
|
||||
// We wrap this code in a func so we can catch all panics.
|
||||
// If an error is returned, we log it, wait 1 second, and retry.
|
||||
// This loop can only be stopped by calling Close.
|
||||
|
@ -142,7 +148,7 @@ func (rci *RowcacheInvalidator) processEvent(event *blproto.StreamEvent) error {
|
|||
case "ERR":
|
||||
rci.qe.InvalidateForUnrecognized(event.Sql)
|
||||
case "POS":
|
||||
rci.SetGTID(event.GTIDField.Value)
|
||||
rci.AppendGTID(event.GTIDField.Value)
|
||||
default:
|
||||
log.Errorf("unknown event: %#v", event)
|
||||
internalErrors.Add("Invalidation", 1)
|
||||
|
|
|
@ -1254,19 +1254,19 @@ func commandShardReplicationPositions(wr *wrangler.Wrangler, subFlags *flag.Flag
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
tablets, positions, err := wr.ShardReplicationPositions(keyspace, shard)
|
||||
tablets, stats, err := wr.ShardReplicationStatuses(keyspace, shard)
|
||||
if tablets == nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
lines := make([]string, 0, 24)
|
||||
for _, rt := range sortReplicatingTablets(tablets, positions) {
|
||||
pos := rt.ReplicationPosition
|
||||
for _, rt := range sortReplicatingTablets(tablets, stats) {
|
||||
status := rt.ReplicationStatus
|
||||
ti := rt.TabletInfo
|
||||
if pos == nil {
|
||||
if status == nil {
|
||||
lines = append(lines, fmtTabletAwkable(ti)+" <err> <err> <err>")
|
||||
} else {
|
||||
lines = append(lines, fmtTabletAwkable(ti)+fmt.Sprintf(" %v:%010d %v:%010d %v", pos.MasterLogFile, pos.MasterLogPosition, pos.MasterLogFileIo, pos.MasterLogPositionIo, pos.SecondsBehindMaster))
|
||||
lines = append(lines, fmtTabletAwkable(ti)+fmt.Sprintf(" %v %v %v", status.Position, status.IOPosition, status.SecondsBehindMaster))
|
||||
}
|
||||
}
|
||||
for _, l := range lines {
|
||||
|
@ -2134,7 +2134,7 @@ func commandGetShardReplication(wr *wrangler.Wrangler, subFlags *flag.FlagSet, a
|
|||
|
||||
type rTablet struct {
|
||||
*topo.TabletInfo
|
||||
*myproto.ReplicationPosition
|
||||
*myproto.ReplicationStatus
|
||||
}
|
||||
|
||||
type rTablets []*rTablet
|
||||
|
@ -2148,12 +2148,12 @@ func (rts rTablets) Swap(i, j int) { rts[i], rts[j] = rts[j], rts[i] }
|
|||
func (rts rTablets) Less(i, j int) bool {
|
||||
// NOTE: Swap order of unpack to reverse sort
|
||||
l, r := rts[j], rts[i]
|
||||
// l or r ReplicationPosition would be nil if we failed to get
|
||||
// l or r ReplicationStatus would be nil if we failed to get
|
||||
// the position (put them at the beginning of the list)
|
||||
if l.ReplicationPosition == nil {
|
||||
return r.ReplicationPosition != nil
|
||||
if l.ReplicationStatus == nil {
|
||||
return r.ReplicationStatus != nil
|
||||
}
|
||||
if r.ReplicationPosition == nil {
|
||||
if r.ReplicationStatus == nil {
|
||||
return false
|
||||
}
|
||||
var lTypeMaster, rTypeMaster int
|
||||
|
@ -2167,22 +2167,18 @@ func (rts rTablets) Less(i, j int) bool {
|
|||
return true
|
||||
}
|
||||
if lTypeMaster == rTypeMaster {
|
||||
if l.MapKeyIo() < r.MapKeyIo() {
|
||||
return true
|
||||
}
|
||||
if l.MapKeyIo() == r.MapKeyIo() {
|
||||
if l.MapKey() < r.MapKey() {
|
||||
return true
|
||||
}
|
||||
if l.IOPosition.Equal(r.IOPosition) {
|
||||
return !l.Position.AtLeast(r.Position)
|
||||
}
|
||||
return !l.IOPosition.AtLeast(r.IOPosition)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func sortReplicatingTablets(tablets []*topo.TabletInfo, positions []*myproto.ReplicationPosition) []*rTablet {
|
||||
func sortReplicatingTablets(tablets []*topo.TabletInfo, stats []*myproto.ReplicationStatus) []*rTablet {
|
||||
rtablets := make([]*rTablet, len(tablets))
|
||||
for i, pos := range positions {
|
||||
rtablets[i] = &rTablet{tablets[i], pos}
|
||||
for i, status := range stats {
|
||||
rtablets[i] = &rTablet{TabletInfo: tablets[i], ReplicationStatus: status}
|
||||
}
|
||||
sort.Sort(rTablets(rtablets))
|
||||
return rtablets
|
||||
|
|
|
@ -274,19 +274,19 @@ func (sdw *SplitDiffWorker) synchronizeReplication() error {
|
|||
}
|
||||
for i, ss := range sdw.shardInfo.SourceShards {
|
||||
// find where we should be stopping
|
||||
pos, err := blpPositionList.FindBlpPositionById(ss.Uid)
|
||||
blpPos, err := blpPositionList.FindBlpPositionById(ss.Uid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("no binlog position on the master for Uid %v", ss.Uid)
|
||||
}
|
||||
|
||||
// stop replication
|
||||
sdw.wr.Logger().Infof("Stopping slave[%v] %v at a minimum of %v", i, sdw.sourceAliases[i], pos.GTIDField)
|
||||
stoppedAt, err := sdw.wr.ActionInitiator().StopSlaveMinimum(sdw.sourceAliases[i], pos.GTIDField.Value, 30*time.Second)
|
||||
sdw.wr.Logger().Infof("Stopping slave[%v] %v at a minimum of %v", i, sdw.sourceAliases[i], blpPos.Position)
|
||||
stoppedAt, err := sdw.wr.ActionInitiator().StopSlaveMinimum(sdw.sourceAliases[i], blpPos.Position, 30*time.Second)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot stop slave %v at right binlog position %v: %v", sdw.sourceAliases[i], pos.GTIDField, err)
|
||||
return fmt.Errorf("cannot stop slave %v at right binlog position %v: %v", sdw.sourceAliases[i], blpPos.Position, err)
|
||||
}
|
||||
stopPositionList.Entries[i].Uid = ss.Uid
|
||||
stopPositionList.Entries[i].GTIDField = stoppedAt.MasterLogGTIDField
|
||||
stopPositionList.Entries[i].Position = stoppedAt.Position
|
||||
|
||||
// change the cleaner actions from ChangeSlaveType(rdonly)
|
||||
// to StartSlave() + ChangeSlaveType(spare)
|
||||
|
@ -308,10 +308,10 @@ func (sdw *SplitDiffWorker) synchronizeReplication() error {
|
|||
|
||||
// 4 - wait until the destination checker is equal or passed
|
||||
// that master binlog position, and stop its replication.
|
||||
sdw.wr.Logger().Infof("Waiting for destination checker %v to catch up to %v", sdw.destinationAlias, masterPos.MasterLogGTIDField)
|
||||
_, err = sdw.wr.ActionInitiator().StopSlaveMinimum(sdw.destinationAlias, masterPos.MasterLogGTIDField.Value, 30*time.Second)
|
||||
sdw.wr.Logger().Infof("Waiting for destination checker %v to catch up to %v", sdw.destinationAlias, masterPos)
|
||||
_, err = sdw.wr.ActionInitiator().StopSlaveMinimum(sdw.destinationAlias, masterPos, 30*time.Second)
|
||||
if err != nil {
|
||||
return fmt.Errorf("StopSlaveMinimum for %v at %v failed: %v", sdw.destinationAlias, masterPos.MasterLogGTIDField, err)
|
||||
return fmt.Errorf("StopSlaveMinimum for %v at %v failed: %v", sdw.destinationAlias, masterPos, err)
|
||||
}
|
||||
wrangler.RecordStartSlaveAction(sdw.cleaner, sdw.destinationAlias, 30*time.Second)
|
||||
action, err := wrangler.FindChangeSlaveTypeActionByTarget(sdw.cleaner, sdw.destinationAlias)
|
||||
|
|
|
@ -582,7 +582,7 @@ func (vscw *VerticalSplitCloneWorker) copy() error {
|
|||
// then create and populate the blp_checkpoint table
|
||||
if strings.Index(vscw.strategy, "populateBlpCheckpoint") != -1 {
|
||||
// get the current position from the source
|
||||
pos, err := vscw.wr.ActionInitiator().SlavePosition(vscw.sourceTablet, 30*time.Second)
|
||||
status, err := vscw.wr.ActionInitiator().SlaveStatus(vscw.sourceTablet, 30*time.Second)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -593,7 +593,7 @@ func (vscw *VerticalSplitCloneWorker) copy() error {
|
|||
if strings.Index(vscw.strategy, "dontStartBinlogPlayer") != -1 {
|
||||
flags = binlogplayer.BLP_FLAG_DONT_START
|
||||
}
|
||||
queries = append(queries, binlogplayer.PopulateBlpCheckpoint(0, pos.MasterLogGTIDField.Value, time.Now().Unix(), flags))
|
||||
queries = append(queries, binlogplayer.PopulateBlpCheckpoint(0, status.Position, time.Now().Unix(), flags))
|
||||
for _, tabletAlias := range vscw.destinationAliases {
|
||||
destinationWaitGroup.Add(1)
|
||||
go func(ti *topo.TabletInfo) {
|
||||
|
|
|
@ -286,13 +286,13 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication() error {
|
|||
}
|
||||
|
||||
// stop replication
|
||||
vsdw.wr.Logger().Infof("Stopping slave %v at a minimum of %v", vsdw.sourceAlias, pos.GTIDField)
|
||||
stoppedAt, err := vsdw.wr.ActionInitiator().StopSlaveMinimum(vsdw.sourceAlias, pos.GTIDField.Value, 30*time.Second)
|
||||
vsdw.wr.Logger().Infof("Stopping slave %v at a minimum of %v", vsdw.sourceAlias, pos.Position)
|
||||
stoppedAt, err := vsdw.wr.ActionInitiator().StopSlaveMinimum(vsdw.sourceAlias, pos.Position, 30*time.Second)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot stop slave %v at right binlog position %v: %v", vsdw.sourceAlias, pos.GTIDField, err)
|
||||
return fmt.Errorf("cannot stop slave %v at right binlog position %v: %v", vsdw.sourceAlias, pos.Position, err)
|
||||
}
|
||||
stopPositionList.Entries[0].Uid = ss.Uid
|
||||
stopPositionList.Entries[0].GTIDField = stoppedAt.MasterLogGTIDField
|
||||
stopPositionList.Entries[0].Position = stoppedAt.Position
|
||||
|
||||
// change the cleaner actions from ChangeSlaveType(rdonly)
|
||||
// to StartSlave() + ChangeSlaveType(spare)
|
||||
|
@ -313,10 +313,10 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication() error {
|
|||
|
||||
// 4 - wait until the destination checker is equal or passed
|
||||
// that master binlog position, and stop its replication.
|
||||
vsdw.wr.Logger().Infof("Waiting for destination checker %v to catch up to %v", vsdw.destinationAlias, masterPos.MasterLogGTIDField)
|
||||
_, err = vsdw.wr.ActionInitiator().StopSlaveMinimum(vsdw.destinationAlias, masterPos.MasterLogGTIDField.Value, 30*time.Second)
|
||||
vsdw.wr.Logger().Infof("Waiting for destination checker %v to catch up to %v", vsdw.destinationAlias, masterPos)
|
||||
_, err = vsdw.wr.ActionInitiator().StopSlaveMinimum(vsdw.destinationAlias, masterPos, 30*time.Second)
|
||||
if err != nil {
|
||||
return fmt.Errorf("StopSlaveMinimum on %v at %v failed: %v", vsdw.destinationAlias, masterPos.MasterLogGTIDField, err)
|
||||
return fmt.Errorf("StopSlaveMinimum on %v at %v failed: %v", vsdw.destinationAlias, masterPos, err)
|
||||
}
|
||||
wrangler.RecordStartSlaveAction(vsdw.cleaner, vsdw.destinationAlias, 30*time.Second)
|
||||
action, err = wrangler.FindChangeSlaveTypeActionByTarget(vsdw.cleaner, vsdw.destinationAlias)
|
||||
|
|
|
@ -226,9 +226,9 @@ func (wr *Wrangler) makeMastersReadOnly(shards []*topo.ShardInfo) error {
|
|||
return rec.Error()
|
||||
}
|
||||
|
||||
func (wr *Wrangler) getMastersPosition(shards []*topo.ShardInfo) (map[*topo.ShardInfo]*myproto.ReplicationPosition, error) {
|
||||
func (wr *Wrangler) getMastersPosition(shards []*topo.ShardInfo) (map[*topo.ShardInfo]myproto.ReplicationPosition, error) {
|
||||
mu := sync.Mutex{}
|
||||
result := make(map[*topo.ShardInfo]*myproto.ReplicationPosition)
|
||||
result := make(map[*topo.ShardInfo]myproto.ReplicationPosition)
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
rec := concurrency.AllErrorRecorder{}
|
||||
|
@ -259,7 +259,7 @@ func (wr *Wrangler) getMastersPosition(shards []*topo.ShardInfo) (map[*topo.Shar
|
|||
return result, rec.Error()
|
||||
}
|
||||
|
||||
func (wr *Wrangler) waitForFilteredReplication(sourcePositions map[*topo.ShardInfo]*myproto.ReplicationPosition, destinationShards []*topo.ShardInfo) error {
|
||||
func (wr *Wrangler) waitForFilteredReplication(sourcePositions map[*topo.ShardInfo]myproto.ReplicationPosition, destinationShards []*topo.ShardInfo) error {
|
||||
wg := sync.WaitGroup{}
|
||||
rec := concurrency.AllErrorRecorder{}
|
||||
for _, si := range destinationShards {
|
||||
|
@ -272,9 +272,9 @@ func (wr *Wrangler) waitForFilteredReplication(sourcePositions map[*topo.ShardIn
|
|||
}
|
||||
|
||||
// find the position it should be at
|
||||
for s, rp := range sourcePositions {
|
||||
for s, pos := range sourcePositions {
|
||||
if s.Keyspace() == sourceShard.Keyspace && s.ShardName() == sourceShard.Shard {
|
||||
blpPosition.GTIDField = rp.MasterLogGTIDField
|
||||
blpPosition.Position = pos
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -602,7 +602,7 @@ func (wr *Wrangler) migrateServedFrom(ki *topo.KeyspaceInfo, si *topo.ShardInfo,
|
|||
event.DispatchUpdate(ev, "waiting for destination master to catch up to source master")
|
||||
if err := wr.ai.WaitBlpPosition(si.MasterAlias, blproto.BlpPosition{
|
||||
Uid: 0,
|
||||
GTIDField: masterPosition.MasterLogGTIDField,
|
||||
Position: masterPosition,
|
||||
}, wr.ActionTimeout()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -146,9 +146,8 @@ func (wr *Wrangler) reparentShardLocked(keyspace, shard string, masterElectTable
|
|||
return err
|
||||
}
|
||||
|
||||
// ShardReplicationPositions returns the ReplicationPositions for all
|
||||
// the tablets in a shard.
|
||||
func (wr *Wrangler) ShardReplicationPositions(keyspace, shard string) ([]*topo.TabletInfo, []*myproto.ReplicationPosition, error) {
|
||||
// ShardReplicationStatuses returns the ReplicationStatus for each tablet in a shard.
|
||||
func (wr *Wrangler) ShardReplicationStatuses(keyspace, shard string) ([]*topo.TabletInfo, []*myproto.ReplicationStatus, error) {
|
||||
shardInfo, err := wr.ts.GetShard(keyspace, shard)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
@ -161,19 +160,19 @@ func (wr *Wrangler) ShardReplicationPositions(keyspace, shard string) ([]*topo.T
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
tabletMap, posMap, err := wr.shardReplicationPositions(shardInfo)
|
||||
tabletMap, posMap, err := wr.shardReplicationStatuses(shardInfo)
|
||||
return tabletMap, posMap, wr.unlockShard(keyspace, shard, actionNode, lockPath, err)
|
||||
}
|
||||
|
||||
func (wr *Wrangler) shardReplicationPositions(shardInfo *topo.ShardInfo) ([]*topo.TabletInfo, []*myproto.ReplicationPosition, error) {
|
||||
func (wr *Wrangler) shardReplicationStatuses(shardInfo *topo.ShardInfo) ([]*topo.TabletInfo, []*myproto.ReplicationStatus, error) {
|
||||
// FIXME(msolomon) this assumes no hierarchical replication, which is currently the case.
|
||||
tabletMap, err := topo.GetTabletMapForShard(wr.ts, shardInfo.Keyspace(), shardInfo.ShardName())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
tablets := topotools.CopyMapValues(tabletMap, []*topo.TabletInfo{}).([]*topo.TabletInfo)
|
||||
positions, err := wr.tabletReplicationPositions(tablets)
|
||||
return tablets, positions, err
|
||||
stats, err := wr.tabletReplicationStatuses(tablets)
|
||||
return tablets, stats, err
|
||||
}
|
||||
|
||||
// ReparentTablet attempts to reparent this tablet to the current
|
||||
|
@ -213,13 +212,13 @@ func (wr *Wrangler) ReparentTablet(tabletAlias topo.TabletAlias) error {
|
|||
return fmt.Errorf("master %v and potential slave not in same keyspace/shard", shardInfo.MasterAlias)
|
||||
}
|
||||
|
||||
pos, err := wr.ai.SlavePosition(ti, wr.ActionTimeout())
|
||||
status, err := wr.ai.SlaveStatus(ti, wr.ActionTimeout())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wr.Logger().Infof("slave tablet position: %v %v %v", tabletAlias, ti.MysqlAddr(), pos.MapKey())
|
||||
wr.Logger().Infof("slave tablet position: %v %v %v", tabletAlias, ti.MysqlAddr(), status.Position)
|
||||
|
||||
actionPath, err := wr.ai.ReparentPosition(masterTi.Alias, pos)
|
||||
actionPath, err := wr.ai.ReparentPosition(masterTi.Alias, status.Position)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -229,7 +228,7 @@ func (wr *Wrangler) ReparentTablet(tabletAlias topo.TabletAlias) error {
|
|||
}
|
||||
rsd := result.(*actionnode.RestartSlaveData)
|
||||
|
||||
wr.Logger().Infof("master tablet position: %v %v %v", shardInfo.MasterAlias, masterTi.MysqlAddr(), rsd.ReplicationState.ReplicationPosition.MapKey())
|
||||
wr.Logger().Infof("master tablet position: %v %v %v", shardInfo.MasterAlias, masterTi.MysqlAddr(), rsd.ReplicationStatus.Position)
|
||||
// An orphan is already in the replication graph but it is
|
||||
// disconnected, hence we have to force this action.
|
||||
rsd.Force = ti.Type == topo.TYPE_LAG_ORPHAN
|
||||
|
|
|
@ -17,7 +17,7 @@ import (
|
|||
// helper struct to queue up results
|
||||
type rpcContext struct {
|
||||
tablet *topo.TabletInfo
|
||||
position *myproto.ReplicationPosition
|
||||
status *myproto.ReplicationStatus
|
||||
err error
|
||||
}
|
||||
|
||||
|
@ -64,7 +64,7 @@ func (wr *Wrangler) checkSlaveReplication(tabletMap map[topo.TabletAlias]*topo.T
|
|||
return
|
||||
}
|
||||
|
||||
replPos, err := wr.ai.SlavePosition(tablet, wr.ActionTimeout())
|
||||
status, err := wr.ai.SlaveStatus(tablet, wr.ActionTimeout())
|
||||
if err != nil {
|
||||
if tablet.Type == topo.TYPE_BACKUP {
|
||||
wr.logger.Warningf(" failed to get slave position from backup tablet %v, either wait for backup to finish or scrap tablet (%v)", tablet.Alias, err)
|
||||
|
@ -75,15 +75,13 @@ func (wr *Wrangler) checkSlaveReplication(tabletMap map[topo.TabletAlias]*topo.T
|
|||
}
|
||||
|
||||
if !masterIsDead {
|
||||
// This case used to be handled by the timeout check below, but checking
|
||||
// it explicitly provides a more informative error message.
|
||||
if replPos.SecondsBehindMaster == myproto.InvalidLagSeconds {
|
||||
if !status.SlaveRunning() {
|
||||
err = fmt.Errorf("slave %v is not replicating (Slave_IO or Slave_SQL not running), can't complete reparent in time", tablet.Alias)
|
||||
wr.logger.Errorf(" %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
var dur time.Duration = time.Duration(uint(time.Second) * replPos.SecondsBehindMaster)
|
||||
var dur time.Duration = time.Duration(uint(time.Second) * status.SecondsBehindMaster)
|
||||
if dur > wr.ActionTimeout() {
|
||||
err = fmt.Errorf("slave is too far behind to complete reparent in time (%v>%v), either increase timeout using 'vtctl -wait-time XXX ReparentShard ...' or scrap tablet %v", dur, wr.ActionTimeout(), tablet.Alias)
|
||||
wr.logger.Errorf(" %v", err)
|
||||
|
@ -101,7 +99,7 @@ func (wr *Wrangler) checkSlaveReplication(tabletMap map[topo.TabletAlias]*topo.T
|
|||
// Check all the tablets to see if we can proceed with reparenting.
|
||||
// masterPosition is supplied from the demoted master if we are doing
|
||||
// this gracefully.
|
||||
func (wr *Wrangler) checkSlaveConsistency(tabletMap map[uint32]*topo.TabletInfo, masterPosition *myproto.ReplicationPosition) error {
|
||||
func (wr *Wrangler) checkSlaveConsistency(tabletMap map[uint32]*topo.TabletInfo, masterPosition myproto.ReplicationPosition) error {
|
||||
wr.logger.Infof("checkSlaveConsistency %v %#v", topotools.MapKeys(tabletMap), masterPosition)
|
||||
|
||||
// FIXME(msolomon) Something still feels clumsy here and I can't put my finger on it.
|
||||
|
@ -112,31 +110,28 @@ func (wr *Wrangler) checkSlaveConsistency(tabletMap map[uint32]*topo.TabletInfo,
|
|||
calls <- ctx
|
||||
}()
|
||||
|
||||
var args *myproto.ReplicationPosition
|
||||
if masterPosition != nil {
|
||||
var waitPos myproto.ReplicationPosition
|
||||
if !masterPosition.IsZero() {
|
||||
// If the master position is known, do our best to wait for replication to catch up.
|
||||
args = masterPosition
|
||||
waitPos = masterPosition
|
||||
} else {
|
||||
// In the case where a master is down, look for the last bit of data copied and wait
|
||||
// for that to apply. That gives us a chance to wait for all data.
|
||||
replPos, err := wr.ai.SlavePosition(ti, wr.ActionTimeout())
|
||||
status, err := wr.ai.SlaveStatus(ti, wr.ActionTimeout())
|
||||
if err != nil {
|
||||
ctx.err = err
|
||||
return
|
||||
}
|
||||
args = &myproto.ReplicationPosition{
|
||||
MasterLogFile: replPos.MasterLogFileIo,
|
||||
MasterLogPositionIo: replPos.MasterLogPositionIo,
|
||||
}
|
||||
waitPos = status.IOPosition
|
||||
}
|
||||
|
||||
// This option waits for the SQL thread to apply all changes to this instance.
|
||||
rp, err := wr.ai.WaitSlavePosition(ti, args, wr.ActionTimeout())
|
||||
status, err := wr.ai.WaitSlavePosition(ti, waitPos, wr.ActionTimeout())
|
||||
if err != nil {
|
||||
ctx.err = err
|
||||
return
|
||||
}
|
||||
ctx.position = rp
|
||||
ctx.status = status
|
||||
}
|
||||
|
||||
for _, tablet := range tabletMap {
|
||||
|
@ -150,7 +145,7 @@ func (wr *Wrangler) checkSlaveConsistency(tabletMap map[uint32]*topo.TabletInfo,
|
|||
ctx := <-calls
|
||||
mapKey := "unavailable-tablet-error"
|
||||
if ctx.err == nil {
|
||||
mapKey = ctx.position.MapKey()
|
||||
mapKey = ctx.status.Position.String()
|
||||
}
|
||||
if _, ok := positionMap[mapKey]; !ok {
|
||||
positionMap[mapKey] = make([]uint32, 0, 32)
|
||||
|
@ -161,8 +156,8 @@ func (wr *Wrangler) checkSlaveConsistency(tabletMap map[uint32]*topo.TabletInfo,
|
|||
if len(positionMap) == 1 {
|
||||
// great, everyone agrees
|
||||
// demotedMasterReplicationState is nil if demotion failed
|
||||
if masterPosition != nil {
|
||||
demotedMapKey := masterPosition.MapKey()
|
||||
if !masterPosition.IsZero() {
|
||||
demotedMapKey := masterPosition.String()
|
||||
if _, ok := positionMap[demotedMapKey]; !ok {
|
||||
for slaveMapKey := range positionMap {
|
||||
return fmt.Errorf("slave position doesn't match demoted master: %v != %v", demotedMapKey,
|
||||
|
@ -214,11 +209,11 @@ func (wr *Wrangler) stopSlaves(tabletMap map[topo.TabletAlias]*topo.TabletInfo)
|
|||
return nil
|
||||
}
|
||||
|
||||
// Return a list of corresponding replication positions.
|
||||
// Handles masters and slaves, but it's up to the caller to guarantee
|
||||
// all tablets are in the same shard.
|
||||
func (wr *Wrangler) tabletReplicationPositions(tablets []*topo.TabletInfo) ([]*myproto.ReplicationPosition, error) {
|
||||
wr.logger.Infof("tabletReplicationPositions %v", tablets)
|
||||
// tabletReplicationStatuses returns the ReplicationStatus of each tablet in
|
||||
// tablets. It handles masters and slaves, but it's up to the caller to
|
||||
// guarantee all tablets are in the same shard.
|
||||
func (wr *Wrangler) tabletReplicationStatuses(tablets []*topo.TabletInfo) ([]*myproto.ReplicationStatus, error) {
|
||||
wr.logger.Infof("tabletReplicationStatuses: %v", tablets)
|
||||
calls := make([]*rpcContext, len(tablets))
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
|
@ -228,9 +223,16 @@ func (wr *Wrangler) tabletReplicationPositions(tablets []*topo.TabletInfo) ([]*m
|
|||
ctx := &rpcContext{tablet: ti}
|
||||
calls[idx] = ctx
|
||||
if ti.Type == topo.TYPE_MASTER {
|
||||
ctx.position, ctx.err = wr.ai.MasterPosition(ti, wr.ActionTimeout())
|
||||
pos, err := wr.ai.MasterPosition(ti, wr.ActionTimeout())
|
||||
ctx.err = err
|
||||
if err == nil {
|
||||
ctx.status = &myproto.ReplicationStatus{
|
||||
Position: pos,
|
||||
IOPosition: pos,
|
||||
}
|
||||
}
|
||||
} else if ti.IsSlaveType() {
|
||||
ctx.position, ctx.err = wr.ai.SlavePosition(ti, wr.ActionTimeout())
|
||||
ctx.status, ctx.err = wr.ai.SlaveStatus(ti, wr.ActionTimeout())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -247,33 +249,33 @@ func (wr *Wrangler) tabletReplicationPositions(tablets []*topo.TabletInfo) ([]*m
|
|||
wg.Wait()
|
||||
|
||||
someErrors := false
|
||||
positions := make([]*myproto.ReplicationPosition, len(tablets))
|
||||
stats := make([]*myproto.ReplicationStatus, len(tablets))
|
||||
for i, ctx := range calls {
|
||||
if ctx == nil {
|
||||
continue
|
||||
}
|
||||
if ctx.err != nil {
|
||||
wr.logger.Warningf("could not get replication position for tablet %v %v", ctx.tablet.Alias, ctx.err)
|
||||
wr.logger.Warningf("could not get replication status for tablet %v %v", ctx.tablet.Alias, ctx.err)
|
||||
someErrors = true
|
||||
} else {
|
||||
positions[i] = ctx.position
|
||||
stats[i] = ctx.status
|
||||
}
|
||||
}
|
||||
if someErrors {
|
||||
return positions, fmt.Errorf("partial position map, some errors")
|
||||
return stats, fmt.Errorf("partial position map, some errors")
|
||||
}
|
||||
return positions, nil
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func (wr *Wrangler) demoteMaster(ti *topo.TabletInfo) (*myproto.ReplicationPosition, error) {
|
||||
func (wr *Wrangler) demoteMaster(ti *topo.TabletInfo) (myproto.ReplicationPosition, error) {
|
||||
wr.logger.Infof("demote master %v", ti.Alias)
|
||||
actionPath, err := wr.ai.DemoteMaster(ti.Alias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return myproto.ReplicationPosition{}, err
|
||||
}
|
||||
err = wr.WaitForCompletion(actionPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return myproto.ReplicationPosition{}, err
|
||||
}
|
||||
return wr.ai.MasterPosition(ti, wr.ActionTimeout())
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/youtube/vitess/go/event"
|
||||
myproto "github.com/youtube/vitess/go/vt/mysqlctl/proto"
|
||||
"github.com/youtube/vitess/go/vt/topo"
|
||||
"github.com/youtube/vitess/go/vt/topotools"
|
||||
"github.com/youtube/vitess/go/vt/topotools/events"
|
||||
|
@ -50,7 +51,7 @@ func (wr *Wrangler) reparentShardBrutal(ev *events.Reparent, si *topo.ShardInfo,
|
|||
event.DispatchUpdate(ev, "checking slave consistency")
|
||||
wr.logger.Infof("check slaves %v/%v", masterElectTablet.Keyspace, masterElectTablet.Shard)
|
||||
restartableSlaveTabletMap := wr.restartableTabletMap(slaveTabletMap)
|
||||
err = wr.checkSlaveConsistency(restartableSlaveTabletMap, nil)
|
||||
err = wr.checkSlaveConsistency(restartableSlaveTabletMap, myproto.ReplicationPosition{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -8,16 +8,16 @@ from net import bsonrpc
|
|||
from vtdb import dbexceptions
|
||||
|
||||
class Coord(object):
|
||||
GTIDField = None
|
||||
Position = None
|
||||
ServerId = None
|
||||
|
||||
def __init__(self, gtid, server_id = None):
|
||||
self.GTIDField = gtid
|
||||
def __init__(self, replPos, server_id = None):
|
||||
self.Position = replPos
|
||||
self.ServerId = server_id
|
||||
|
||||
|
||||
class EventData(object):
|
||||
Cateory = None
|
||||
Category = None
|
||||
TableName = None
|
||||
PKColNames = None
|
||||
PKValues = None
|
||||
|
@ -50,9 +50,9 @@ class UpdateStreamConnection(object):
|
|||
def close(self):
|
||||
self.client.close()
|
||||
|
||||
def stream_start(self, gtid):
|
||||
def stream_start(self, replPos):
|
||||
try:
|
||||
self.client.stream_call('UpdateStream.ServeUpdateStream', {"GTIDField": gtid})
|
||||
self.client.stream_call('UpdateStream.ServeUpdateStream', {"Position": replPos})
|
||||
response = self.client.stream_next()
|
||||
if response is None:
|
||||
return None
|
||||
|
|
|
@ -25,6 +25,9 @@ class MysqlFlavor(object):
|
|||
'CHANGE MASTER TO MASTER_HOST = ""',
|
||||
]
|
||||
|
||||
def change_master_commands(self, host, port, pos):
|
||||
return None
|
||||
|
||||
def extra_my_cnf(self):
|
||||
"""Returns the path to an extra my_cnf file, or None."""
|
||||
return None
|
||||
|
@ -33,6 +36,26 @@ class MysqlFlavor(object):
|
|||
"""Returns the name of the bootstrap archive for mysqlctl, relative to vitess/data/bootstrap/"""
|
||||
return "mysql-db-dir.tbz"
|
||||
|
||||
def master_position(self, tablet):
|
||||
"""Returns the position from SHOW MASTER STATUS"""
|
||||
return None
|
||||
|
||||
def position_equal(self, a, b):
|
||||
"""Returns true if position 'a' is equal to 'b'"""
|
||||
return None
|
||||
|
||||
def position_at_least(self, a, b):
|
||||
"""Returns true if position 'a' is at least as far along as 'b'."""
|
||||
return None
|
||||
|
||||
def position_after(self, a, b):
|
||||
"""Returns true if position 'a' is after 'b'"""
|
||||
return self.position_at_least(a, b) and not self.position_equal(a, b)
|
||||
|
||||
def position_append(self, pos, gtid):
|
||||
"""Returns a new position with the given GTID appended"""
|
||||
return None
|
||||
|
||||
|
||||
class GoogleMysql(MysqlFlavor):
|
||||
"""Overrides specific to Google MySQL"""
|
||||
|
@ -40,6 +63,44 @@ class GoogleMysql(MysqlFlavor):
|
|||
def extra_my_cnf(self):
|
||||
return environment.vttop + "/config/mycnf/master_google.cnf"
|
||||
|
||||
def master_position(self, tablet):
|
||||
conn, cursor = tablet.connect_dict("")
|
||||
try:
|
||||
cursor.execute("SHOW MASTER STATUS")
|
||||
group_id = cursor.fetchall()[0]["Group_ID"]
|
||||
|
||||
cursor.execute("SHOW BINLOG INFO FOR " + str(group_id))
|
||||
server_id = cursor.fetchall()[0]["Server_ID"]
|
||||
|
||||
return {"GoogleMysql": "%u-%u" % (server_id, group_id)}
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
def position_equal(self, a, b):
|
||||
return int(a["GoogleMysql"].split("-")[1]) == int(
|
||||
b["GoogleMysql"].split("-")[1])
|
||||
|
||||
def position_at_least(self, a, b):
|
||||
return int(a["GoogleMysql"].split("-")[1]) >= int(
|
||||
b["GoogleMysql"].split("-")[1])
|
||||
|
||||
def position_append(self, pos, gtid):
|
||||
if self.position_at_least(pos, gtid):
|
||||
return pos
|
||||
else:
|
||||
return gtid
|
||||
|
||||
def change_master_commands(self, host, port, pos):
|
||||
parts = pos["GoogleMysql"].split("-")
|
||||
server_id = parts[0]
|
||||
group_id = parts[1]
|
||||
return [
|
||||
"SET binlog_group_id = %s, master_server_id = %s" %
|
||||
(group_id, server_id),
|
||||
"CHANGE MASTER TO "
|
||||
"MASTER_HOST='%s', MASTER_PORT=%u, CONNECT_USING_GROUP_ID" %
|
||||
(host, port)]
|
||||
|
||||
|
||||
class MariaDB(MysqlFlavor):
|
||||
"""Overrides specific to MariaDB"""
|
||||
|
@ -71,4 +132,5 @@ elif environment.mysql_flavor == "GoogleMysql":
|
|||
mysql_flavor = GoogleMysql()
|
||||
else:
|
||||
mysql_flavor = MysqlFlavor()
|
||||
logging.warning("Unknown MYSQL_FLAVOR '%s', using defaults" % environment.mysql_flavor)
|
||||
logging.warning(
|
||||
"Unknown MYSQL_FLAVOR '%s', using defaults", environment.mysql_flavor)
|
||||
|
|
102
test/reparent.py
102
test/reparent.py
|
@ -3,7 +3,7 @@
|
|||
import warnings
|
||||
# Dropping a table inexplicably produces a warning despite
|
||||
# the "IF EXISTS" clause. Squelch these warnings.
|
||||
warnings.simplefilter("ignore")
|
||||
warnings.simplefilter('ignore')
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
@ -23,6 +23,7 @@ tablet_62044 = tablet.Tablet(62044)
|
|||
tablet_41983 = tablet.Tablet(41983)
|
||||
tablet_31981 = tablet.Tablet(31981)
|
||||
|
||||
|
||||
def setUpModule():
|
||||
try:
|
||||
environment.topo_server_setup()
|
||||
|
@ -40,6 +41,7 @@ def setUpModule():
|
|||
tearDownModule()
|
||||
raise
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
if utils.options.skip_teardown:
|
||||
return
|
||||
|
@ -75,14 +77,17 @@ class TestReparent(unittest.TestCase):
|
|||
def _check_db_addr(self, shard, db_type, expected_port, cell='test_nj'):
|
||||
ep = utils.run_vtctl_json(['GetEndPoints', cell, 'test_keyspace/' + shard,
|
||||
db_type])
|
||||
self.assertEqual(len(ep['entries']), 1 , 'Wrong number of entries: %s' % str(ep))
|
||||
self.assertEqual(
|
||||
len(ep['entries']), 1, 'Wrong number of entries: %s' % str(ep))
|
||||
port = ep['entries'][0]['named_port_map']['_vtocc']
|
||||
self.assertEqual(port, expected_port,
|
||||
'Unexpected port: %u != %u from %s' % (port, expected_port,
|
||||
str(ep)))
|
||||
host = ep['entries'][0]['host']
|
||||
if not host.startswith(utils.hostname):
|
||||
self.fail('Invalid hostname %s was expecting something starting with %s' % (host, utils.hostname))
|
||||
self.fail(
|
||||
'Invalid hostname %s was expecting something starting with %s' %
|
||||
(host, utils.hostname))
|
||||
|
||||
def test_master_to_spare_state_change_impossible(self):
|
||||
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
|
||||
|
@ -121,9 +126,10 @@ class TestReparent(unittest.TestCase):
|
|||
|
||||
# wait for all tablets to start
|
||||
for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
|
||||
t.wait_for_vttablet_state("SERVING")
|
||||
t.wait_for_vttablet_state('SERVING')
|
||||
|
||||
# Recompute the shard layout node - until you do that, it might not be valid.
|
||||
# Recompute the shard layout node - until you do that, it might not be
|
||||
# valid.
|
||||
utils.run_vtctl(['RebuildShardGraph', 'test_keyspace/0'])
|
||||
utils.validate_topology()
|
||||
|
||||
|
@ -146,15 +152,17 @@ class TestReparent(unittest.TestCase):
|
|||
'test_keyspace/0',
|
||||
tablet_62044.tablet_alias],
|
||||
expect_fail=True)
|
||||
logging.debug("Failed ReparentShard output:\n" + stderr)
|
||||
logging.debug('Failed ReparentShard output:\n' + stderr)
|
||||
if 'ValidateShard verification failed' not in stderr:
|
||||
self.fail("didn't find the right error strings in failed ReparentShard: " + stderr)
|
||||
self.fail(
|
||||
"didn't find the right error strings in failed ReparentShard: " +
|
||||
stderr)
|
||||
|
||||
# Should timeout and fail
|
||||
stdout, stderr = utils.run_vtctl(['-wait-time', '5s', 'ScrapTablet',
|
||||
tablet_62344.tablet_alias],
|
||||
expect_fail=True)
|
||||
logging.debug("Failed ScrapTablet output:\n" + stderr)
|
||||
logging.debug('Failed ScrapTablet output:\n' + stderr)
|
||||
if 'deadline exceeded' not in stderr:
|
||||
self.fail("didn't find the right error strings in failed ScrapTablet: " +
|
||||
stderr)
|
||||
|
@ -172,7 +180,7 @@ class TestReparent(unittest.TestCase):
|
|||
os.kill(sp.pid, signal.SIGINT)
|
||||
stdout, stderr = sp.communicate()
|
||||
|
||||
logging.debug("Failed ScrapTablet output:\n" + stderr)
|
||||
logging.debug('Failed ScrapTablet output:\n' + stderr)
|
||||
if 'interrupted' not in stderr:
|
||||
self.fail("didn't find the right error strings in failed ScrapTablet: " +
|
||||
stderr)
|
||||
|
@ -233,18 +241,21 @@ class TestReparent(unittest.TestCase):
|
|||
tablet_31981.init_tablet('replica', 'test_keyspace', shard_id, start=True,
|
||||
wait_for_start=False)
|
||||
for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
|
||||
t.wait_for_vttablet_state("SERVING")
|
||||
t.wait_for_vttablet_state('SERVING')
|
||||
shard = utils.run_vtctl_json(['GetShard', 'test_keyspace/' + shard_id])
|
||||
self.assertEqual(shard['Cells'], ['test_nj', 'test_ny'], 'wrong list of cell in Shard: %s' % str(shard['Cells']))
|
||||
self.assertEqual(
|
||||
shard['Cells'], ['test_nj', 'test_ny'],
|
||||
'wrong list of cell in Shard: %s' % str(shard['Cells']))
|
||||
|
||||
# Recompute the shard layout node - until you do that, it might not be valid.
|
||||
# Recompute the shard layout node - until you do that, it might not be
|
||||
# valid.
|
||||
utils.run_vtctl(['RebuildShardGraph', 'test_keyspace/' + shard_id])
|
||||
utils.validate_topology()
|
||||
|
||||
# Force the slaves to reparent assuming that all the datasets are identical.
|
||||
for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
|
||||
t.reset_replication()
|
||||
utils.pause("force ReparentShard?")
|
||||
utils.pause('force ReparentShard?')
|
||||
utils.run_vtctl(['ReparentShard', '-force', 'test_keyspace/' + shard_id,
|
||||
tablet_62344.tablet_alias])
|
||||
utils.validate_topology(ping_tablets=True)
|
||||
|
@ -260,7 +271,7 @@ class TestReparent(unittest.TestCase):
|
|||
self.assertEqual(srvShard['MasterCell'], 'test_nj')
|
||||
|
||||
# Perform a graceful reparent operation to another cell.
|
||||
utils.pause("graceful ReparentShard?")
|
||||
utils.pause('graceful ReparentShard?')
|
||||
utils.run_vtctl(['ReparentShard', 'test_keyspace/' + shard_id,
|
||||
tablet_31981.tablet_alias], auto_log=True)
|
||||
utils.validate_topology()
|
||||
|
@ -278,7 +289,6 @@ class TestReparent(unittest.TestCase):
|
|||
tablet.kill_tablets([tablet_62344, tablet_62044, tablet_41983,
|
||||
tablet_31981])
|
||||
|
||||
|
||||
def test_reparent_graceful_range_based(self):
|
||||
shard_id = '0000000000000000-FFFFFFFFFFFFFFFF'
|
||||
self._test_reparent_graceful(shard_id)
|
||||
|
@ -311,20 +321,21 @@ class TestReparent(unittest.TestCase):
|
|||
tablet_31981.init_tablet('replica', 'test_keyspace', shard_id, start=True,
|
||||
wait_for_start=False)
|
||||
for t in [tablet_62044, tablet_41983, tablet_31981]:
|
||||
t.wait_for_vttablet_state("SERVING")
|
||||
t.wait_for_vttablet_state('SERVING')
|
||||
if environment.topo_server_implementation == 'zookeeper':
|
||||
shard = utils.run_vtctl_json(['GetShard', 'test_keyspace/' + shard_id])
|
||||
self.assertEqual(shard['Cells'], ['test_nj', 'test_ny'],
|
||||
'wrong list of cell in Shard: %s' % str(shard['Cells']))
|
||||
|
||||
# Recompute the shard layout node - until you do that, it might not be valid.
|
||||
# Recompute the shard layout node - until you do that, it might not be
|
||||
# valid.
|
||||
utils.run_vtctl(['RebuildShardGraph', 'test_keyspace/' + shard_id])
|
||||
utils.validate_topology()
|
||||
|
||||
# Force the slaves to reparent assuming that all the datasets are identical.
|
||||
for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
|
||||
t.reset_replication()
|
||||
utils.pause("force ReparentShard?")
|
||||
utils.pause('force ReparentShard?')
|
||||
utils.run_vtctl(['ReparentShard', '-force', 'test_keyspace/' + shard_id,
|
||||
tablet_62344.tablet_alias])
|
||||
utils.validate_topology(ping_tablets=True)
|
||||
|
@ -351,7 +362,7 @@ class TestReparent(unittest.TestCase):
|
|||
stdout=utils.devnull)
|
||||
|
||||
# Perform a graceful reparent operation.
|
||||
utils.pause("graceful ReparentShard?")
|
||||
utils.pause('graceful ReparentShard?')
|
||||
utils.run_vtctl(['ReparentShard', 'test_keyspace/' + shard_id,
|
||||
tablet_62044.tablet_alias], auto_log=True)
|
||||
utils.validate_topology()
|
||||
|
@ -379,7 +390,6 @@ class TestReparent(unittest.TestCase):
|
|||
|
||||
tablet_62044.kill_vttablet()
|
||||
|
||||
|
||||
# This is a manual test to check error formatting.
|
||||
def _test_reparent_slave_offline(self, shard_id='0'):
|
||||
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
|
||||
|
@ -404,9 +414,10 @@ class TestReparent(unittest.TestCase):
|
|||
|
||||
# wait for all tablets to start
|
||||
for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
|
||||
t.wait_for_vttablet_state("SERVING")
|
||||
t.wait_for_vttablet_state('SERVING')
|
||||
|
||||
# Recompute the shard layout node - until you do that, it might not be valid.
|
||||
# Recompute the shard layout node - until you do that, it might not be
|
||||
# valid.
|
||||
utils.run_vtctl(['RebuildShardGraph', 'test_keyspace/' + shard_id])
|
||||
utils.validate_topology()
|
||||
|
||||
|
@ -428,7 +439,6 @@ class TestReparent(unittest.TestCase):
|
|||
|
||||
tablet.kill_tablets([tablet_62344, tablet_62044, tablet_41983])
|
||||
|
||||
|
||||
# assume a different entity is doing the reparent, and telling us it was done
|
||||
def test_reparent_from_outside(self):
|
||||
self._test_reparent_from_outside(brutal=False)
|
||||
|
@ -473,7 +483,7 @@ class TestReparent(unittest.TestCase):
|
|||
|
||||
# wait for all tablets to start
|
||||
for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
|
||||
t.wait_for_vttablet_state("SERVING")
|
||||
t.wait_for_vttablet_state('SERVING')
|
||||
|
||||
# Reparent as a starting point
|
||||
for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
|
||||
|
@ -485,23 +495,22 @@ class TestReparent(unittest.TestCase):
|
|||
# 62044 will be the new master
|
||||
# 31981 won't be re-parented, so it will be busted
|
||||
tablet_62044.mquery('', mysql_flavor.promote_slave_commands())
|
||||
new_pos = tablet_62044.mquery('', 'show master status')
|
||||
logging.debug("New master position: %s" % str(new_pos))
|
||||
new_pos = mysql_flavor.master_position(tablet_62044)
|
||||
logging.debug('New master position: %s', str(new_pos))
|
||||
changeMasterCmds = mysql_flavor.change_master_commands(
|
||||
utils.hostname,
|
||||
tablet_62044.mysql_port,
|
||||
new_pos)
|
||||
|
||||
# 62344 will now be a slave of 62044
|
||||
tablet_62344.mquery('', [
|
||||
"RESET MASTER",
|
||||
"RESET SLAVE",
|
||||
"change master to master_host='%s', master_port=%u, master_log_file='%s', master_log_pos=%u" % (utils.hostname, tablet_62044.mysql_port, new_pos[0][0], new_pos[0][1]),
|
||||
'start slave'
|
||||
])
|
||||
tablet_62344.mquery('', ['RESET MASTER', 'RESET SLAVE'] +
|
||||
changeMasterCmds +
|
||||
['START SLAVE'])
|
||||
|
||||
# 41983 will be a slave of 62044
|
||||
tablet_41983.mquery('', [
|
||||
'stop slave',
|
||||
"change master to master_port=%u, master_log_file='%s', master_log_pos=%u" % (tablet_62044.mysql_port, new_pos[0][0], new_pos[0][1]),
|
||||
'start slave'
|
||||
])
|
||||
tablet_41983.mquery('', ['STOP SLAVE'] +
|
||||
changeMasterCmds +
|
||||
['START SLAVE'])
|
||||
|
||||
# in brutal mode, we scrap the old master first
|
||||
if brutal:
|
||||
|
@ -536,15 +545,15 @@ class TestReparent(unittest.TestCase):
|
|||
'test_keyspace/0'])
|
||||
hashed_links = {}
|
||||
for rl in shard_replication['ReplicationLinks']:
|
||||
key = rl['TabletAlias']['Cell'] + "-" + str(rl['TabletAlias']['Uid'])
|
||||
value = rl['Parent']['Cell'] + "-" + str(rl['Parent']['Uid'])
|
||||
key = rl['TabletAlias']['Cell'] + '-' + str(rl['TabletAlias']['Uid'])
|
||||
value = rl['Parent']['Cell'] + '-' + str(rl['Parent']['Uid'])
|
||||
hashed_links[key] = value
|
||||
logging.debug("Got replication links: %s", str(hashed_links))
|
||||
logging.debug('Got replication links: %s', str(hashed_links))
|
||||
expected_links = {'test_nj-41983': 'test_nj-62044'}
|
||||
if not brutal:
|
||||
expected_links['test_nj-62344'] = 'test_nj-62044'
|
||||
self.assertEqual(expected_links, hashed_links,
|
||||
"Got unexpected links: %s != %s" % (str(expected_links),
|
||||
'Got unexpected links: %s != %s' % (str(expected_links),
|
||||
str(hashed_links)))
|
||||
|
||||
_create_vt_insert_test = '''create table vt_insert_test (
|
||||
|
@ -581,10 +590,11 @@ class TestReparent(unittest.TestCase):
|
|||
|
||||
# wait for all tablets to start
|
||||
for t in [tablet_62344, tablet_62044, tablet_31981]:
|
||||
t.wait_for_vttablet_state("SERVING")
|
||||
tablet_41983.wait_for_vttablet_state("NOT_SERVING")
|
||||
t.wait_for_vttablet_state('SERVING')
|
||||
tablet_41983.wait_for_vttablet_state('NOT_SERVING')
|
||||
|
||||
# Recompute the shard layout node - until you do that, it might not be valid.
|
||||
# Recompute the shard layout node - until you do that, it might not be
|
||||
# valid.
|
||||
utils.run_vtctl(['RebuildShardGraph', 'test_keyspace/' + shard_id])
|
||||
utils.validate_topology()
|
||||
|
||||
|
@ -608,7 +618,7 @@ class TestReparent(unittest.TestCase):
|
|||
tablet_41983.mquery('', 'start slave')
|
||||
time.sleep(1)
|
||||
|
||||
utils.pause("check orphan")
|
||||
utils.pause('check orphan')
|
||||
|
||||
utils.run_vtctl(['ReparentTablet', tablet_41983.tablet_alias])
|
||||
|
||||
|
@ -617,7 +627,7 @@ class TestReparent(unittest.TestCase):
|
|||
if len(result) != 1:
|
||||
self.fail('expected 1 row from vt_insert_test: %s' % str(result))
|
||||
|
||||
utils.pause("check lag reparent")
|
||||
utils.pause('check lag reparent')
|
||||
|
||||
tablet.kill_tablets([tablet_62344, tablet_62044, tablet_41983,
|
||||
tablet_31981])
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
import warnings
|
||||
# Dropping a table inexplicably produces a warning despite
|
||||
# the "IF EXISTS" clause. Squelch these warnings.
|
||||
warnings.simplefilter("ignore")
|
||||
warnings.simplefilter('ignore')
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
@ -22,11 +22,11 @@ from vtdb import topology
|
|||
from vtdb import update_stream_service
|
||||
from vtdb import vtclient
|
||||
from zk import zkocc
|
||||
|
||||
from mysql_flavor import mysql_flavor
|
||||
|
||||
master_tablet = tablet.Tablet()
|
||||
replica_tablet = tablet.Tablet()
|
||||
master_host = "localhost:%u" % master_tablet.port
|
||||
master_host = 'localhost:%u' % master_tablet.port
|
||||
|
||||
vtgate_server = None
|
||||
vtgate_port = None
|
||||
|
@ -53,37 +53,13 @@ foo varbinary(128),
|
|||
primary key(eid, name)
|
||||
) Engine=InnoDB'''
|
||||
|
||||
# Return a GTID in the format expected for interacting with the Go service
|
||||
# over bsonrpc. The key name is the MySQL flavor, and the value is the GTID.
|
||||
def _make_default_gtid(value):
|
||||
# Flavor defaults to GoogleMysql for now since that's all we support.
|
||||
return {'GoogleMysql': str(value)}
|
||||
|
||||
|
||||
# Compare two GTIDs (if possible) and return an integer that is:
|
||||
# < 0 if a < b
|
||||
# == 0 if a == b
|
||||
# > 0 if a > b
|
||||
def _gtidcmp(a, b):
|
||||
if 'GoogleMysql' not in a or 'GoogleMysql' not in b:
|
||||
raise RuntimeError("only the GoogleMysql flavor of GTID can be compared")
|
||||
|
||||
return int(a['GoogleMysql']) - int(b['GoogleMysql'])
|
||||
|
||||
def _get_master_current_position():
|
||||
return _make_default_gtid(utils.mysql_query(master_tablet.tablet_uid,
|
||||
'vt_test_keyspace',
|
||||
'show master status')[0][4])
|
||||
return mysql_flavor.master_position(master_tablet)
|
||||
|
||||
|
||||
def _get_repl_current_position():
|
||||
conn = MySQLdb.Connect(user='vt_dba',
|
||||
unix_socket=os.path.join(environment.vtdataroot, 'vt_%010d/mysql.sock' % replica_tablet.tablet_uid),
|
||||
db='vt_test_keyspace')
|
||||
cursor = MySQLdb.cursors.DictCursor(conn)
|
||||
cursor.execute('show master status')
|
||||
res = cursor.fetchall()
|
||||
return _make_default_gtid(res[0]['Group_ID'])
|
||||
return mysql_flavor.master_position(replica_tablet)
|
||||
|
||||
|
||||
def setUpModule():
|
||||
|
@ -97,12 +73,11 @@ def setUpModule():
|
|||
|
||||
# start mysql instance external to the test
|
||||
setup_procs = [master_tablet.init_mysql(),
|
||||
replica_tablet.init_mysql()
|
||||
]
|
||||
replica_tablet.init_mysql()]
|
||||
utils.wait_procs(setup_procs)
|
||||
|
||||
# Start up a master mysql and vttablet
|
||||
logging.debug("Setting up tablets")
|
||||
logging.debug('Setting up tablets')
|
||||
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
|
||||
master_tablet.init_tablet('master', 'test_keyspace', '0')
|
||||
replica_tablet.init_tablet('replica', 'test_keyspace', '0')
|
||||
|
@ -116,7 +91,8 @@ def setUpModule():
|
|||
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'])
|
||||
|
||||
vtgate_socket_file = environment.tmproot + '/vtgate.sock'
|
||||
vtgate_server, vtgate_port = utils.vtgate_start(socket_file=vtgate_socket_file)
|
||||
vtgate_server, vtgate_port = utils.vtgate_start(
|
||||
socket_file=vtgate_socket_file)
|
||||
|
||||
master_tablet.start_vttablet()
|
||||
replica_tablet.start_vttablet()
|
||||
|
@ -143,10 +119,11 @@ def setUpModule():
|
|||
tearDownModule()
|
||||
raise
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
if utils.options.skip_teardown:
|
||||
return
|
||||
logging.debug("Tearing down the servers and setup")
|
||||
logging.debug('Tearing down the servers and setup')
|
||||
tablet.Tablet.tablets_running = 2
|
||||
tablet.kill_tablets([master_tablet, replica_tablet])
|
||||
teardown_procs = [master_tablet.teardown_mysql(),
|
||||
|
@ -160,22 +137,24 @@ def tearDownModule():
|
|||
master_tablet.remove_tree()
|
||||
replica_tablet.remove_tree()
|
||||
|
||||
|
||||
class TestUpdateStream(unittest.TestCase):
|
||||
_populate_vt_insert_test = [
|
||||
"insert into vt_insert_test (msg) values ('test %s')" % x
|
||||
for x in xrange(4)]
|
||||
|
||||
def _populate_vt_a(self, count):
|
||||
return ["insert into vt_a (eid, id) values (%d, %d)" % (x, x)
|
||||
return ['insert into vt_a (eid, id) values (%d, %d)' % (x, x)
|
||||
for x in xrange(count + 1) if x > 0]
|
||||
|
||||
def _populate_vt_b(self, count):
|
||||
return ["insert into vt_b (eid, name, foo) values (%d, 'name %s', 'foo %s')" % (x, x, x)
|
||||
for x in xrange(count)]
|
||||
return [
|
||||
"insert into vt_b (eid, name, foo) values (%d, 'name %s', 'foo %s')" %
|
||||
(x, x, x) for x in xrange(count)]
|
||||
|
||||
def setUp(self):
|
||||
self.vtgate_client = zkocc.ZkOccConnection(vtgate_socket_file,
|
||||
"test_nj", 30.0)
|
||||
'test_nj', 30.0)
|
||||
topology.read_topology(self.vtgate_client)
|
||||
|
||||
def tearDown(self):
|
||||
|
@ -185,28 +164,28 @@ class TestUpdateStream(unittest.TestCase):
|
|||
return update_stream_service.UpdateStreamConnection(master_host, 30)
|
||||
|
||||
def _get_replica_stream_conn(self):
|
||||
return update_stream_service.UpdateStreamConnection("localhost:%u" %
|
||||
return update_stream_service.UpdateStreamConnection('localhost:%u' %
|
||||
replica_tablet.port, 30)
|
||||
|
||||
|
||||
def _test_service_disabled(self):
|
||||
start_position = _get_repl_current_position()
|
||||
logging.debug("_test_service_disabled starting @ %s" % start_position)
|
||||
logging.debug('_test_service_disabled starting @ %s', start_position)
|
||||
self._exec_vt_txn(self._populate_vt_insert_test)
|
||||
self._exec_vt_txn(['delete from vt_insert_test',])
|
||||
self._exec_vt_txn(['delete from vt_insert_test'])
|
||||
utils.run_vtctl(['ChangeSlaveType', replica_tablet.tablet_alias, 'spare'])
|
||||
# time.sleep(20)
|
||||
replica_conn = self._get_replica_stream_conn()
|
||||
logging.debug("dialing replica update stream service")
|
||||
logging.debug('dialing replica update stream service')
|
||||
replica_conn.dial()
|
||||
try:
|
||||
data = replica_conn.stream_start(start_position)
|
||||
except Exception, e:
|
||||
logging.debug(str(e))
|
||||
if str(e) == "update stream service is not enabled":
|
||||
logging.debug("Test Service Disabled: Pass")
|
||||
if str(e) == 'update stream service is not enabled':
|
||||
logging.debug('Test Service Disabled: Pass')
|
||||
else:
|
||||
self.fail("Test Service Disabled: Fail - did not throw the correct exception")
|
||||
self.fail(
|
||||
'Test Service Disabled: Fail - did not throw the correct exception')
|
||||
|
||||
v = utils.get_vars(replica_tablet.port)
|
||||
if v['UpdateStreamState'] != 'Disabled':
|
||||
|
@ -216,14 +195,13 @@ class TestUpdateStream(unittest.TestCase):
|
|||
def perform_writes(self, count):
|
||||
for i in xrange(count):
|
||||
self._exec_vt_txn(self._populate_vt_insert_test)
|
||||
self._exec_vt_txn(['delete from vt_insert_test',])
|
||||
|
||||
self._exec_vt_txn(['delete from vt_insert_test'])
|
||||
|
||||
def _test_service_enabled(self):
|
||||
start_position = _get_repl_current_position()
|
||||
logging.debug("_test_service_enabled starting @ %s" % start_position)
|
||||
logging.debug('_test_service_enabled starting @ %s', start_position)
|
||||
utils.run_vtctl(['ChangeSlaveType', replica_tablet.tablet_alias, 'replica'])
|
||||
logging.debug("sleeping a bit for the replica action to complete")
|
||||
logging.debug('sleeping a bit for the replica action to complete')
|
||||
time.sleep(10)
|
||||
thd = threading.Thread(target=self.perform_writes, name='write_thd',
|
||||
args=(400,))
|
||||
|
@ -237,10 +215,10 @@ class TestUpdateStream(unittest.TestCase):
|
|||
for i in xrange(10):
|
||||
data = replica_conn.stream_next()
|
||||
if data['Category'] == 'DML' and utils.options.verbose == 2:
|
||||
logging.debug("Test Service Enabled: Pass")
|
||||
logging.debug('Test Service Enabled: Pass')
|
||||
break
|
||||
except Exception, e:
|
||||
self.fail("Exception in getting stream from replica: %s\n Traceback %s" %
|
||||
self.fail('Exception in getting stream from replica: %s\n Traceback %s' %
|
||||
(str(e), traceback.print_exc()))
|
||||
thd.join(timeout=30)
|
||||
|
||||
|
@ -251,7 +229,7 @@ class TestUpdateStream(unittest.TestCase):
|
|||
self.assertTrue('DML' in v['UpdateStreamEvents'])
|
||||
self.assertTrue('POS' in v['UpdateStreamEvents'])
|
||||
|
||||
logging.debug("Testing enable -> disable switch starting @ %s" %
|
||||
logging.debug('Testing enable -> disable switch starting @ %s',
|
||||
start_position)
|
||||
replica_conn = self._get_replica_stream_conn()
|
||||
replica_conn.dial()
|
||||
|
@ -266,15 +244,18 @@ class TestUpdateStream(unittest.TestCase):
|
|||
data = replica_conn.stream_next()
|
||||
if data is not None and data['Category'] == 'POS':
|
||||
txn_count += 1
|
||||
logging.error("Test Service Switch: FAIL")
|
||||
logging.error('Test Service Switch: FAIL')
|
||||
return
|
||||
except dbexceptions.DatabaseError, e:
|
||||
self.assertEqual("Fatal Service Error: Disconnecting because the Update Stream service has been disabled", str(e))
|
||||
self.assertEqual(
|
||||
'Fatal Service Error: Disconnecting because the Update Stream '
|
||||
'service has been disabled',
|
||||
str(e))
|
||||
except Exception, e:
|
||||
logging.error("Exception: %s", str(e))
|
||||
logging.error("Traceback: %s", traceback.print_exc())
|
||||
logging.error('Exception: %s', str(e))
|
||||
logging.error('Traceback: %s', traceback.print_exc())
|
||||
self.fail("Update stream returned error '%s'" % str(e))
|
||||
logging.debug("Streamed %d transactions before exiting" % txn_count)
|
||||
logging.debug('Streamed %d transactions before exiting', txn_count)
|
||||
|
||||
def _vtdb_conn(self, host):
|
||||
conn = vtclient.VtOCCConnection(self.vtgate_client, 'test_keyspace', '0',
|
||||
|
@ -285,7 +266,7 @@ class TestUpdateStream(unittest.TestCase):
|
|||
def _exec_vt_txn(self, query_list=None):
|
||||
if not query_list:
|
||||
return
|
||||
vtdb_conn = self._vtdb_conn("localhost:%u" % master_tablet.port)
|
||||
vtdb_conn = self._vtdb_conn('localhost:%u' % master_tablet.port)
|
||||
vtdb_cursor = vtdb_conn.cursor()
|
||||
vtdb_conn.begin()
|
||||
for q in query_list:
|
||||
|
@ -298,14 +279,14 @@ class TestUpdateStream(unittest.TestCase):
|
|||
def test_stream_parity(self):
|
||||
master_start_position = _get_master_current_position()
|
||||
replica_start_position = _get_repl_current_position()
|
||||
logging.debug("run_test_stream_parity starting @ %s" %
|
||||
logging.debug('run_test_stream_parity starting @ %s',
|
||||
master_start_position)
|
||||
master_txn_count = 0
|
||||
replica_txn_count = 0
|
||||
self._exec_vt_txn(self._populate_vt_a(15))
|
||||
self._exec_vt_txn(self._populate_vt_b(14))
|
||||
self._exec_vt_txn(['delete from vt_a',])
|
||||
self._exec_vt_txn(['delete from vt_b',])
|
||||
self._exec_vt_txn(['delete from vt_a'])
|
||||
self._exec_vt_txn(['delete from vt_b'])
|
||||
master_conn = self._get_master_stream_conn()
|
||||
master_conn.dial()
|
||||
master_events = []
|
||||
|
@ -329,18 +310,22 @@ class TestUpdateStream(unittest.TestCase):
|
|||
replica_txn_count += 1
|
||||
break
|
||||
if len(master_events) != len(replica_events):
|
||||
logging.debug("Test Failed - # of records mismatch, master %s replica %s" % (master_events, replica_events))
|
||||
logging.debug(
|
||||
'Test Failed - # of records mismatch, master %s replica %s',
|
||||
master_events, replica_events)
|
||||
for master_val, replica_val in zip(master_events, replica_events):
|
||||
master_data = master_val
|
||||
replica_data = replica_val
|
||||
self.assertEqual(master_data, replica_data, "Test failed, data mismatch - master '%s' and replica position '%s'" % (master_data, replica_data))
|
||||
logging.debug("Test Writes: PASS")
|
||||
|
||||
self.assertEqual(
|
||||
master_data, replica_data,
|
||||
"Test failed, data mismatch - master '%s' and replica position '%s'" %
|
||||
(master_data, replica_data))
|
||||
logging.debug('Test Writes: PASS')
|
||||
|
||||
def test_ddl(self):
|
||||
global master_start_position
|
||||
start_position = master_start_position
|
||||
logging.debug("test_ddl: starting @ %s" % start_position)
|
||||
logging.debug('test_ddl: starting @ %s', start_position)
|
||||
master_conn = self._get_master_stream_conn()
|
||||
master_conn.dial()
|
||||
data = master_conn.stream_start(start_position)
|
||||
|
@ -350,7 +335,7 @@ class TestUpdateStream(unittest.TestCase):
|
|||
def test_set_insert_id(self):
|
||||
start_position = _get_master_current_position()
|
||||
self._exec_vt_txn(['SET INSERT_ID=1000000'] + self._populate_vt_insert_test)
|
||||
logging.debug("test_set_insert_id: starting @ %s" % start_position)
|
||||
logging.debug('test_set_insert_id: starting @ %s', start_position)
|
||||
master_conn = self._get_master_stream_conn()
|
||||
master_conn.dial()
|
||||
data = master_conn.stream_start(start_position)
|
||||
|
@ -366,14 +351,16 @@ class TestUpdateStream(unittest.TestCase):
|
|||
start_position = _get_master_current_position()
|
||||
master_tablet.mquery('other_database', _create_vt_insert_test)
|
||||
self._exec_vt_txn(self._populate_vt_insert_test)
|
||||
logging.debug("test_database_filter: starting @ %s" % start_position)
|
||||
logging.debug('test_database_filter: starting @ %s', start_position)
|
||||
master_conn = self._get_master_stream_conn()
|
||||
master_conn.dial()
|
||||
data = master_conn.stream_start(start_position)
|
||||
while data:
|
||||
if data['Category'] == 'POS':
|
||||
break
|
||||
self.assertNotEqual(data['Category'], 'DDL', "query using other_database wasn't filted out")
|
||||
self.assertNotEqual(
|
||||
data['Category'], 'DDL',
|
||||
"query using other_database wasn't filted out")
|
||||
data = master_conn.stream_next()
|
||||
|
||||
# This tests the service switch from disable -> enable -> disable
|
||||
|
@ -385,9 +372,10 @@ class TestUpdateStream(unittest.TestCase):
|
|||
|
||||
def test_log_rotation(self):
|
||||
start_position = _get_master_current_position()
|
||||
position = start_position
|
||||
master_tablet.mquery('vt_test_keyspace', 'flush logs')
|
||||
self._exec_vt_txn(self._populate_vt_a(15))
|
||||
self._exec_vt_txn(['delete from vt_a',])
|
||||
self._exec_vt_txn(['delete from vt_a'])
|
||||
master_conn = self._get_master_stream_conn()
|
||||
master_conn.dial()
|
||||
data = master_conn.stream_start(start_position)
|
||||
|
@ -397,9 +385,10 @@ class TestUpdateStream(unittest.TestCase):
|
|||
data = master_conn.stream_next()
|
||||
if data['Category'] == 'POS':
|
||||
master_txn_count += 1
|
||||
if _gtidcmp(start_position, data['GTIDField']) < 0:
|
||||
position = mysql_flavor.position_append(position, data['GTIDField'])
|
||||
if mysql_flavor.position_after(position, start_position):
|
||||
logs_correct = True
|
||||
logging.debug("Log rotation correctly interpreted")
|
||||
logging.debug('Log rotation correctly interpreted')
|
||||
break
|
||||
if not logs_correct:
|
||||
self.fail("Flush logs didn't get properly interpreted")
|
||||
|
|
Загрузка…
Ссылка в новой задаче