Merge pull request #4491 from planetscale/ss-vrepl

VReplication: VStreamer
This commit is contained in:
Sugu Sougoumarane 2019-01-14 08:44:25 -08:00 коммит произвёл GitHub
Родитель f1f9cca729 0e37f1f019
Коммит e9e91635d7
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
12 изменённых файлов: 3875 добавлений и 99 удалений

Просмотреть файл

@ -20,6 +20,68 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// VEventType enumerates the event types.
// This list is comprehensive. Many of these types
// will not be encountered in RBR mode.
type VEventType int32
const (
VEventType_UNKNOWN VEventType = 0
VEventType_GTID VEventType = 1
VEventType_BEGIN VEventType = 2
VEventType_COMMIT VEventType = 3
VEventType_ROLLBACK VEventType = 4
VEventType_DDL VEventType = 5
VEventType_INSERT VEventType = 6
VEventType_REPLACE VEventType = 7
VEventType_UPDATE VEventType = 8
VEventType_DELETE VEventType = 9
VEventType_SET VEventType = 10
VEventType_OTHER VEventType = 11
VEventType_ROW VEventType = 12
VEventType_FIELD VEventType = 13
)
var VEventType_name = map[int32]string{
0: "UNKNOWN",
1: "GTID",
2: "BEGIN",
3: "COMMIT",
4: "ROLLBACK",
5: "DDL",
6: "INSERT",
7: "REPLACE",
8: "UPDATE",
9: "DELETE",
10: "SET",
11: "OTHER",
12: "ROW",
13: "FIELD",
}
var VEventType_value = map[string]int32{
"UNKNOWN": 0,
"GTID": 1,
"BEGIN": 2,
"COMMIT": 3,
"ROLLBACK": 4,
"DDL": 5,
"INSERT": 6,
"REPLACE": 7,
"UPDATE": 8,
"DELETE": 9,
"SET": 10,
"OTHER": 11,
"ROW": 12,
"FIELD": 13,
}
func (x VEventType) String() string {
return proto.EnumName(VEventType_name, int32(x))
}
func (VEventType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e1edbb575eea20d0, []int{0}
}
type BinlogTransaction_Statement_Category int32
const (
@ -65,7 +127,7 @@ func (x BinlogTransaction_Statement_Category) String() string {
return proto.EnumName(BinlogTransaction_Statement_Category_name, int32(x))
}
func (BinlogTransaction_Statement_Category) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_599cd600856cd9fe, []int{1, 0, 0}
return fileDescriptor_binlogdata_e1edbb575eea20d0, []int{1, 0, 0}
}
// Charset is the per-statement charset info from a QUERY_EVENT binlog entry.
@ -85,7 +147,7 @@ func (m *Charset) Reset() { *m = Charset{} }
func (m *Charset) String() string { return proto.CompactTextString(m) }
func (*Charset) ProtoMessage() {}
func (*Charset) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_599cd600856cd9fe, []int{0}
return fileDescriptor_binlogdata_e1edbb575eea20d0, []int{0}
}
func (m *Charset) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Charset.Unmarshal(m, b)
@ -142,7 +204,7 @@ func (m *BinlogTransaction) Reset() { *m = BinlogTransaction{} }
func (m *BinlogTransaction) String() string { return proto.CompactTextString(m) }
func (*BinlogTransaction) ProtoMessage() {}
func (*BinlogTransaction) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_599cd600856cd9fe, []int{1}
return fileDescriptor_binlogdata_e1edbb575eea20d0, []int{1}
}
func (m *BinlogTransaction) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BinlogTransaction.Unmarshal(m, b)
@ -192,7 +254,7 @@ func (m *BinlogTransaction_Statement) Reset() { *m = BinlogTransaction_S
func (m *BinlogTransaction_Statement) String() string { return proto.CompactTextString(m) }
func (*BinlogTransaction_Statement) ProtoMessage() {}
func (*BinlogTransaction_Statement) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_599cd600856cd9fe, []int{1, 0}
return fileDescriptor_binlogdata_e1edbb575eea20d0, []int{1, 0}
}
func (m *BinlogTransaction_Statement) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BinlogTransaction_Statement.Unmarshal(m, b)
@ -250,7 +312,7 @@ func (m *StreamKeyRangeRequest) Reset() { *m = StreamKeyRangeRequest{} }
func (m *StreamKeyRangeRequest) String() string { return proto.CompactTextString(m) }
func (*StreamKeyRangeRequest) ProtoMessage() {}
func (*StreamKeyRangeRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_599cd600856cd9fe, []int{2}
return fileDescriptor_binlogdata_e1edbb575eea20d0, []int{2}
}
func (m *StreamKeyRangeRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamKeyRangeRequest.Unmarshal(m, b)
@ -303,7 +365,7 @@ func (m *StreamKeyRangeResponse) Reset() { *m = StreamKeyRangeResponse{}
func (m *StreamKeyRangeResponse) String() string { return proto.CompactTextString(m) }
func (*StreamKeyRangeResponse) ProtoMessage() {}
func (*StreamKeyRangeResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_599cd600856cd9fe, []int{3}
return fileDescriptor_binlogdata_e1edbb575eea20d0, []int{3}
}
func (m *StreamKeyRangeResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamKeyRangeResponse.Unmarshal(m, b)
@ -347,7 +409,7 @@ func (m *StreamTablesRequest) Reset() { *m = StreamTablesRequest{} }
func (m *StreamTablesRequest) String() string { return proto.CompactTextString(m) }
func (*StreamTablesRequest) ProtoMessage() {}
func (*StreamTablesRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_599cd600856cd9fe, []int{4}
return fileDescriptor_binlogdata_e1edbb575eea20d0, []int{4}
}
func (m *StreamTablesRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamTablesRequest.Unmarshal(m, b)
@ -400,7 +462,7 @@ func (m *StreamTablesResponse) Reset() { *m = StreamTablesResponse{} }
func (m *StreamTablesResponse) String() string { return proto.CompactTextString(m) }
func (*StreamTablesResponse) ProtoMessage() {}
func (*StreamTablesResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_599cd600856cd9fe, []int{5}
return fileDescriptor_binlogdata_e1edbb575eea20d0, []int{5}
}
func (m *StreamTablesResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamTablesResponse.Unmarshal(m, b)
@ -427,6 +489,98 @@ func (m *StreamTablesResponse) GetBinlogTransaction() *BinlogTransaction {
return nil
}
// Rule represents one rule.
type Rule struct {
// match can be a table name or a regular expression
// delineated by '/' and '/'.
Match string `protobuf:"bytes,1,opt,name=match,proto3" json:"match,omitempty"`
// filter can be an empty string or keyrange if the match
// is a regular expression. Otherwise, it must be a select
// query.
Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Rule) Reset() { *m = Rule{} }
func (m *Rule) String() string { return proto.CompactTextString(m) }
func (*Rule) ProtoMessage() {}
func (*Rule) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e1edbb575eea20d0, []int{6}
}
func (m *Rule) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Rule.Unmarshal(m, b)
}
func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Rule.Marshal(b, m, deterministic)
}
func (dst *Rule) XXX_Merge(src proto.Message) {
xxx_messageInfo_Rule.Merge(dst, src)
}
func (m *Rule) XXX_Size() int {
return xxx_messageInfo_Rule.Size(m)
}
func (m *Rule) XXX_DiscardUnknown() {
xxx_messageInfo_Rule.DiscardUnknown(m)
}
var xxx_messageInfo_Rule proto.InternalMessageInfo
func (m *Rule) GetMatch() string {
if m != nil {
return m.Match
}
return ""
}
func (m *Rule) GetFilter() string {
if m != nil {
return m.Filter
}
return ""
}
// Filter represents a list of ordered rules. First match
// wins.
type Filter struct {
Rules []*Rule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Filter) Reset() { *m = Filter{} }
func (m *Filter) String() string { return proto.CompactTextString(m) }
func (*Filter) ProtoMessage() {}
func (*Filter) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e1edbb575eea20d0, []int{7}
}
func (m *Filter) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Filter.Unmarshal(m, b)
}
func (m *Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Filter.Marshal(b, m, deterministic)
}
func (dst *Filter) XXX_Merge(src proto.Message) {
xxx_messageInfo_Filter.Merge(dst, src)
}
func (m *Filter) XXX_Size() int {
return xxx_messageInfo_Filter.Size(m)
}
func (m *Filter) XXX_DiscardUnknown() {
xxx_messageInfo_Filter.DiscardUnknown(m)
}
var xxx_messageInfo_Filter proto.InternalMessageInfo
func (m *Filter) GetRules() []*Rule {
if m != nil {
return m.Rules
}
return nil
}
// BinlogSource specifies the source and filter parameters for
// Filtered Replication. It currently supports a keyrange
// or a list of tables.
@ -440,7 +594,10 @@ type BinlogSource struct {
// key_range is set if the request is for a keyrange
KeyRange *topodata.KeyRange `protobuf:"bytes,4,opt,name=key_range,json=keyRange,proto3" json:"key_range,omitempty"`
// tables is set if the request is for a list of tables
Tables []string `protobuf:"bytes,5,rep,name=tables,proto3" json:"tables,omitempty"`
Tables []string `protobuf:"bytes,5,rep,name=tables,proto3" json:"tables,omitempty"`
// filter is set if we're using the generalized representation
// for the filter.
Filter *Filter `protobuf:"bytes,6,opt,name=filter,proto3" json:"filter,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -450,7 +607,7 @@ func (m *BinlogSource) Reset() { *m = BinlogSource{} }
func (m *BinlogSource) String() string { return proto.CompactTextString(m) }
func (*BinlogSource) ProtoMessage() {}
func (*BinlogSource) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_599cd600856cd9fe, []int{6}
return fileDescriptor_binlogdata_e1edbb575eea20d0, []int{8}
}
func (m *BinlogSource) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BinlogSource.Unmarshal(m, b)
@ -505,6 +662,310 @@ func (m *BinlogSource) GetTables() []string {
return nil
}
func (m *BinlogSource) GetFilter() *Filter {
if m != nil {
return m.Filter
}
return nil
}
// RowChange represents one row change
type RowChange struct {
Before *query.Row `protobuf:"bytes,1,opt,name=before,proto3" json:"before,omitempty"`
After *query.Row `protobuf:"bytes,2,opt,name=after,proto3" json:"after,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RowChange) Reset() { *m = RowChange{} }
func (m *RowChange) String() string { return proto.CompactTextString(m) }
func (*RowChange) ProtoMessage() {}
func (*RowChange) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e1edbb575eea20d0, []int{9}
}
func (m *RowChange) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RowChange.Unmarshal(m, b)
}
func (m *RowChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RowChange.Marshal(b, m, deterministic)
}
func (dst *RowChange) XXX_Merge(src proto.Message) {
xxx_messageInfo_RowChange.Merge(dst, src)
}
func (m *RowChange) XXX_Size() int {
return xxx_messageInfo_RowChange.Size(m)
}
func (m *RowChange) XXX_DiscardUnknown() {
xxx_messageInfo_RowChange.DiscardUnknown(m)
}
var xxx_messageInfo_RowChange proto.InternalMessageInfo
func (m *RowChange) GetBefore() *query.Row {
if m != nil {
return m.Before
}
return nil
}
func (m *RowChange) GetAfter() *query.Row {
if m != nil {
return m.After
}
return nil
}
// RowEvent represent row events for one table
type RowEvent struct {
TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"`
RowChanges []*RowChange `protobuf:"bytes,2,rep,name=row_changes,json=rowChanges,proto3" json:"row_changes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RowEvent) Reset() { *m = RowEvent{} }
func (m *RowEvent) String() string { return proto.CompactTextString(m) }
func (*RowEvent) ProtoMessage() {}
func (*RowEvent) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e1edbb575eea20d0, []int{10}
}
func (m *RowEvent) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RowEvent.Unmarshal(m, b)
}
func (m *RowEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RowEvent.Marshal(b, m, deterministic)
}
func (dst *RowEvent) XXX_Merge(src proto.Message) {
xxx_messageInfo_RowEvent.Merge(dst, src)
}
func (m *RowEvent) XXX_Size() int {
return xxx_messageInfo_RowEvent.Size(m)
}
func (m *RowEvent) XXX_DiscardUnknown() {
xxx_messageInfo_RowEvent.DiscardUnknown(m)
}
var xxx_messageInfo_RowEvent proto.InternalMessageInfo
func (m *RowEvent) GetTableName() string {
if m != nil {
return m.TableName
}
return ""
}
func (m *RowEvent) GetRowChanges() []*RowChange {
if m != nil {
return m.RowChanges
}
return nil
}
type FieldEvent struct {
TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"`
Fields []*query.Field `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *FieldEvent) Reset() { *m = FieldEvent{} }
func (m *FieldEvent) String() string { return proto.CompactTextString(m) }
func (*FieldEvent) ProtoMessage() {}
func (*FieldEvent) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e1edbb575eea20d0, []int{11}
}
func (m *FieldEvent) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_FieldEvent.Unmarshal(m, b)
}
func (m *FieldEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_FieldEvent.Marshal(b, m, deterministic)
}
func (dst *FieldEvent) XXX_Merge(src proto.Message) {
xxx_messageInfo_FieldEvent.Merge(dst, src)
}
func (m *FieldEvent) XXX_Size() int {
return xxx_messageInfo_FieldEvent.Size(m)
}
func (m *FieldEvent) XXX_DiscardUnknown() {
xxx_messageInfo_FieldEvent.DiscardUnknown(m)
}
var xxx_messageInfo_FieldEvent proto.InternalMessageInfo
func (m *FieldEvent) GetTableName() string {
if m != nil {
return m.TableName
}
return ""
}
func (m *FieldEvent) GetFields() []*query.Field {
if m != nil {
return m.Fields
}
return nil
}
// VEvent represents a vstream event
type VEvent struct {
Type VEventType `protobuf:"varint,1,opt,name=type,proto3,enum=binlogdata.VEventType" json:"type,omitempty"`
Gtid string `protobuf:"bytes,2,opt,name=gtid,proto3" json:"gtid,omitempty"`
Ddl string `protobuf:"bytes,3,opt,name=ddl,proto3" json:"ddl,omitempty"`
RowEvent *RowEvent `protobuf:"bytes,4,opt,name=row_event,json=rowEvent,proto3" json:"row_event,omitempty"`
FieldEvent *FieldEvent `protobuf:"bytes,5,opt,name=field_event,json=fieldEvent,proto3" json:"field_event,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VEvent) Reset() { *m = VEvent{} }
func (m *VEvent) String() string { return proto.CompactTextString(m) }
func (*VEvent) ProtoMessage() {}
func (*VEvent) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e1edbb575eea20d0, []int{12}
}
func (m *VEvent) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_VEvent.Unmarshal(m, b)
}
func (m *VEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_VEvent.Marshal(b, m, deterministic)
}
func (dst *VEvent) XXX_Merge(src proto.Message) {
xxx_messageInfo_VEvent.Merge(dst, src)
}
func (m *VEvent) XXX_Size() int {
return xxx_messageInfo_VEvent.Size(m)
}
func (m *VEvent) XXX_DiscardUnknown() {
xxx_messageInfo_VEvent.DiscardUnknown(m)
}
var xxx_messageInfo_VEvent proto.InternalMessageInfo
func (m *VEvent) GetType() VEventType {
if m != nil {
return m.Type
}
return VEventType_UNKNOWN
}
func (m *VEvent) GetGtid() string {
if m != nil {
return m.Gtid
}
return ""
}
func (m *VEvent) GetDdl() string {
if m != nil {
return m.Ddl
}
return ""
}
func (m *VEvent) GetRowEvent() *RowEvent {
if m != nil {
return m.RowEvent
}
return nil
}
func (m *VEvent) GetFieldEvent() *FieldEvent {
if m != nil {
return m.FieldEvent
}
return nil
}
// VStreamRequest is the payload for VStream
type VStreamRequest struct {
Position string `protobuf:"bytes,1,opt,name=position,proto3" json:"position,omitempty"`
Filter *Filter `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VStreamRequest) Reset() { *m = VStreamRequest{} }
func (m *VStreamRequest) String() string { return proto.CompactTextString(m) }
func (*VStreamRequest) ProtoMessage() {}
func (*VStreamRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e1edbb575eea20d0, []int{13}
}
func (m *VStreamRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_VStreamRequest.Unmarshal(m, b)
}
func (m *VStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_VStreamRequest.Marshal(b, m, deterministic)
}
func (dst *VStreamRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_VStreamRequest.Merge(dst, src)
}
func (m *VStreamRequest) XXX_Size() int {
return xxx_messageInfo_VStreamRequest.Size(m)
}
func (m *VStreamRequest) XXX_DiscardUnknown() {
xxx_messageInfo_VStreamRequest.DiscardUnknown(m)
}
var xxx_messageInfo_VStreamRequest proto.InternalMessageInfo
func (m *VStreamRequest) GetPosition() string {
if m != nil {
return m.Position
}
return ""
}
func (m *VStreamRequest) GetFilter() *Filter {
if m != nil {
return m.Filter
}
return nil
}
// VStreamResponse is the response from VStream
type VStreamResponse struct {
Event []*VEvent `protobuf:"bytes,1,rep,name=event,proto3" json:"event,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VStreamResponse) Reset() { *m = VStreamResponse{} }
func (m *VStreamResponse) String() string { return proto.CompactTextString(m) }
func (*VStreamResponse) ProtoMessage() {}
func (*VStreamResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e1edbb575eea20d0, []int{14}
}
func (m *VStreamResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_VStreamResponse.Unmarshal(m, b)
}
func (m *VStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_VStreamResponse.Marshal(b, m, deterministic)
}
func (dst *VStreamResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_VStreamResponse.Merge(dst, src)
}
func (m *VStreamResponse) XXX_Size() int {
return xxx_messageInfo_VStreamResponse.Size(m)
}
func (m *VStreamResponse) XXX_DiscardUnknown() {
xxx_messageInfo_VStreamResponse.DiscardUnknown(m)
}
var xxx_messageInfo_VStreamResponse proto.InternalMessageInfo
func (m *VStreamResponse) GetEvent() []*VEvent {
if m != nil {
return m.Event
}
return nil
}
func init() {
proto.RegisterType((*Charset)(nil), "binlogdata.Charset")
proto.RegisterType((*BinlogTransaction)(nil), "binlogdata.BinlogTransaction")
@ -513,52 +974,85 @@ func init() {
proto.RegisterType((*StreamKeyRangeResponse)(nil), "binlogdata.StreamKeyRangeResponse")
proto.RegisterType((*StreamTablesRequest)(nil), "binlogdata.StreamTablesRequest")
proto.RegisterType((*StreamTablesResponse)(nil), "binlogdata.StreamTablesResponse")
proto.RegisterType((*Rule)(nil), "binlogdata.Rule")
proto.RegisterType((*Filter)(nil), "binlogdata.Filter")
proto.RegisterType((*BinlogSource)(nil), "binlogdata.BinlogSource")
proto.RegisterType((*RowChange)(nil), "binlogdata.RowChange")
proto.RegisterType((*RowEvent)(nil), "binlogdata.RowEvent")
proto.RegisterType((*FieldEvent)(nil), "binlogdata.FieldEvent")
proto.RegisterType((*VEvent)(nil), "binlogdata.VEvent")
proto.RegisterType((*VStreamRequest)(nil), "binlogdata.VStreamRequest")
proto.RegisterType((*VStreamResponse)(nil), "binlogdata.VStreamResponse")
proto.RegisterEnum("binlogdata.VEventType", VEventType_name, VEventType_value)
proto.RegisterEnum("binlogdata.BinlogTransaction_Statement_Category", BinlogTransaction_Statement_Category_name, BinlogTransaction_Statement_Category_value)
}
func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_binlogdata_599cd600856cd9fe) }
func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_binlogdata_e1edbb575eea20d0) }
var fileDescriptor_binlogdata_599cd600856cd9fe = []byte{
// 640 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0xcd, 0x6e, 0xda, 0x4a,
0x14, 0xbe, 0xc6, 0x40, 0xec, 0xe3, 0xdc, 0x64, 0x98, 0xfc, 0x08, 0x21, 0x5d, 0x09, 0xb1, 0x09,
0x77, 0x71, 0xcd, 0x95, 0xab, 0x3e, 0x40, 0x8c, 0xad, 0x88, 0xc4, 0x90, 0x68, 0x70, 0x36, 0xd9,
0x58, 0xc6, 0x99, 0x12, 0x04, 0xf1, 0x38, 0x9e, 0x09, 0xaa, 0x9f, 0xa3, 0x4f, 0xd1, 0xb7, 0xe8,
0xaa, 0x6f, 0xd2, 0xf7, 0xa8, 0x3c, 0x36, 0x86, 0xa4, 0x52, 0x9b, 0x2e, 0xba, 0x3b, 0xdf, 0x99,
0xef, 0x9c, 0x39, 0xdf, 0x37, 0x47, 0x03, 0x68, 0xb6, 0x88, 0x57, 0x6c, 0x7e, 0x1f, 0x8a, 0xd0,
0x4c, 0x52, 0x26, 0x18, 0x86, 0x6d, 0xa6, 0x63, 0x3c, 0x3d, 0xd3, 0x34, 0x2b, 0x0e, 0x3a, 0x07,
0x82, 0x25, 0x6c, 0x4b, 0xec, 0x8d, 0x61, 0x6f, 0xf8, 0x10, 0xa6, 0x9c, 0x0a, 0x7c, 0x0a, 0xcd,
0x68, 0xb5, 0xa0, 0xb1, 0x68, 0x2b, 0x5d, 0xa5, 0xdf, 0x20, 0x25, 0xc2, 0x18, 0xea, 0x11, 0x8b,
0xe3, 0x76, 0x4d, 0x66, 0x65, 0x9c, 0x73, 0x39, 0x4d, 0xd7, 0x34, 0x6d, 0xab, 0x05, 0xb7, 0x40,
0xbd, 0x6f, 0x2a, 0xb4, 0x6c, 0x79, 0xb5, 0x9f, 0x86, 0x31, 0x0f, 0x23, 0xb1, 0x60, 0x31, 0xbe,
0x00, 0xe0, 0x22, 0x14, 0xf4, 0x91, 0xc6, 0x82, 0xb7, 0x95, 0xae, 0xda, 0x37, 0xac, 0x33, 0x73,
0x67, 0xe8, 0x1f, 0x4a, 0xcc, 0xe9, 0x86, 0x4f, 0x76, 0x4a, 0xb1, 0x05, 0x06, 0x5d, 0xd3, 0x58,
0x04, 0x82, 0x2d, 0x69, 0xdc, 0xae, 0x77, 0x95, 0xbe, 0x61, 0xb5, 0xcc, 0x42, 0xa0, 0x9b, 0x9f,
0xf8, 0xf9, 0x01, 0x01, 0x5a, 0xc5, 0x9d, 0xaf, 0x35, 0xd0, 0xab, 0x6e, 0xd8, 0x03, 0x2d, 0x0a,
0x05, 0x9d, 0xb3, 0x34, 0x93, 0x32, 0x0f, 0xac, 0xff, 0xdf, 0x38, 0x88, 0x39, 0x2c, 0xeb, 0x48,
0xd5, 0x01, 0xff, 0x07, 0x7b, 0x51, 0xe1, 0x9e, 0x74, 0xc7, 0xb0, 0x8e, 0x76, 0x9b, 0x95, 0xc6,
0x92, 0x0d, 0x07, 0x23, 0x50, 0xf9, 0xd3, 0x4a, 0x5a, 0xb6, 0x4f, 0xf2, 0xb0, 0xf7, 0x59, 0x01,
0x6d, 0xd3, 0x17, 0x1f, 0xc1, 0xa1, 0xed, 0x05, 0xb7, 0x13, 0xe2, 0x0e, 0xaf, 0x2f, 0x26, 0xa3,
0x3b, 0xd7, 0x41, 0x7f, 0xe1, 0x7d, 0xd0, 0x6c, 0x2f, 0xb0, 0xdd, 0x8b, 0xd1, 0x04, 0x29, 0xf8,
0x6f, 0xd0, 0x6d, 0x2f, 0x18, 0x5e, 0x8f, 0xc7, 0x23, 0x1f, 0xd5, 0xf0, 0x21, 0x18, 0xb6, 0x17,
0x90, 0x6b, 0xcf, 0xb3, 0xcf, 0x87, 0x57, 0x48, 0xc5, 0x27, 0xd0, 0xb2, 0xbd, 0xc0, 0x19, 0x7b,
0x81, 0xe3, 0xde, 0x10, 0x77, 0x78, 0xee, 0xbb, 0x0e, 0xaa, 0x63, 0x80, 0x66, 0x9e, 0x76, 0x3c,
0xd4, 0x28, 0xe3, 0xa9, 0xeb, 0xa3, 0x66, 0xd9, 0x6e, 0x34, 0x99, 0xba, 0xc4, 0x47, 0x7b, 0x25,
0xbc, 0xbd, 0x71, 0xce, 0x7d, 0x17, 0x69, 0x25, 0x74, 0x5c, 0xcf, 0xf5, 0x5d, 0xa4, 0x5f, 0xd6,
0xb5, 0x1a, 0x52, 0x2f, 0xeb, 0x9a, 0x8a, 0xea, 0xbd, 0x4f, 0x0a, 0x9c, 0x4c, 0x45, 0x4a, 0xc3,
0xc7, 0x2b, 0x9a, 0x91, 0x30, 0x9e, 0x53, 0x42, 0x9f, 0x9e, 0x29, 0x17, 0xb8, 0x03, 0x5a, 0xc2,
0xf8, 0x22, 0xf7, 0x4e, 0x1a, 0xac, 0x93, 0x0a, 0xe3, 0x01, 0xe8, 0x4b, 0x9a, 0x05, 0x69, 0xce,
0x2f, 0x0d, 0xc3, 0x66, 0xb5, 0x90, 0x55, 0x27, 0x6d, 0x59, 0x46, 0xbb, 0xfe, 0xaa, 0xbf, 0xf6,
0xb7, 0xf7, 0x01, 0x4e, 0x5f, 0x0f, 0xc5, 0x13, 0x16, 0x73, 0x8a, 0x3d, 0xc0, 0x45, 0x61, 0x20,
0xb6, 0x6f, 0x2b, 0xe7, 0x33, 0xac, 0x7f, 0x7e, 0xba, 0x00, 0xa4, 0x35, 0x7b, 0x9d, 0xea, 0x7d,
0x84, 0xa3, 0xe2, 0x1e, 0x3f, 0x9c, 0xad, 0x28, 0x7f, 0x8b, 0xf4, 0x53, 0x68, 0x0a, 0x49, 0x6e,
0xd7, 0xba, 0x6a, 0x5f, 0x27, 0x25, 0xfa, 0x5d, 0x85, 0xf7, 0x70, 0xfc, 0xf2, 0xe6, 0x3f, 0xa2,
0xef, 0x8b, 0x02, 0xfb, 0x05, 0x71, 0xca, 0x9e, 0xd3, 0x88, 0xe6, 0xca, 0x96, 0x34, 0xe3, 0x49,
0x18, 0xd1, 0x8d, 0xb2, 0x0d, 0xc6, 0xc7, 0xd0, 0xe0, 0x0f, 0x61, 0x7a, 0x2f, 0x1f, 0x54, 0x27,
0x05, 0xc0, 0xef, 0xc1, 0x90, 0x0a, 0x45, 0x20, 0xb2, 0x84, 0x4a, 0x6d, 0x07, 0xd6, 0xf1, 0xf6,
0xb1, 0xe5, 0xfc, 0xc2, 0xcf, 0x12, 0x4a, 0x40, 0x54, 0xf1, 0xcb, 0x0d, 0xa9, 0xbf, 0x61, 0x43,
0xb6, 0xbe, 0x36, 0x76, 0x7d, 0xb5, 0xff, 0xbd, 0x3b, 0x5b, 0x2f, 0x04, 0xe5, 0xdc, 0x5c, 0xb0,
0x41, 0x11, 0x0d, 0xe6, 0x6c, 0xb0, 0x16, 0x03, 0xf9, 0xef, 0x0d, 0xb6, 0x96, 0xcc, 0x9a, 0x32,
0xf3, 0xee, 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x08, 0xae, 0x13, 0x46, 0x05, 0x00, 0x00,
var fileDescriptor_binlogdata_e1edbb575eea20d0 = []byte{
// 1017 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdb, 0x6e, 0xdb, 0x46,
0x13, 0x0e, 0x45, 0x8a, 0x22, 0x87, 0x8e, 0xbd, 0x5e, 0x1f, 0x20, 0x18, 0x08, 0x60, 0x10, 0x3f,
0xfe, 0xb8, 0x06, 0x2a, 0xa5, 0xea, 0xe9, 0xa2, 0x57, 0x96, 0x44, 0xbb, 0x8a, 0x69, 0xc9, 0x59,
0xd3, 0x49, 0x91, 0x1b, 0x82, 0x96, 0xd6, 0xb2, 0x60, 0x89, 0x94, 0xc9, 0x95, 0x5d, 0x3d, 0x47,
0x9f, 0xa2, 0x7d, 0x90, 0xbc, 0x49, 0xef, 0xfa, 0x10, 0xc5, 0x1e, 0x48, 0x49, 0x0e, 0x90, 0xaa,
0x17, 0xbd, 0x9b, 0xd3, 0x7e, 0x3b, 0xf3, 0xcd, 0x70, 0x87, 0x80, 0x6e, 0x46, 0xf1, 0x38, 0x19,
0x0e, 0x22, 0x16, 0xd5, 0xa6, 0x69, 0xc2, 0x12, 0x0c, 0x0b, 0xcb, 0x81, 0xf3, 0x30, 0xa3, 0xe9,
0x5c, 0x3a, 0x0e, 0x36, 0x59, 0x32, 0x4d, 0x16, 0x81, 0xee, 0x05, 0x54, 0x5a, 0x77, 0x51, 0x9a,
0x51, 0x86, 0xf7, 0xc1, 0xec, 0x8f, 0x47, 0x34, 0x66, 0x55, 0xed, 0x50, 0x3b, 0x2a, 0x13, 0xa5,
0x61, 0x0c, 0x46, 0x3f, 0x89, 0xe3, 0x6a, 0x49, 0x58, 0x85, 0xcc, 0x63, 0x33, 0x9a, 0x3e, 0xd2,
0xb4, 0xaa, 0xcb, 0x58, 0xa9, 0xb9, 0x7f, 0xea, 0xb0, 0xdd, 0x14, 0x57, 0x07, 0x69, 0x14, 0x67,
0x51, 0x9f, 0x8d, 0x92, 0x18, 0x9f, 0x01, 0x64, 0x2c, 0x62, 0x74, 0x42, 0x63, 0x96, 0x55, 0xb5,
0x43, 0xfd, 0xc8, 0x69, 0xbc, 0xae, 0x2d, 0x25, 0xfd, 0xd9, 0x91, 0xda, 0x55, 0x1e, 0x4f, 0x96,
0x8e, 0xe2, 0x06, 0x38, 0xf4, 0x91, 0xc6, 0x2c, 0x64, 0xc9, 0x3d, 0x8d, 0xab, 0xc6, 0xa1, 0x76,
0xe4, 0x34, 0xb6, 0x6b, 0xb2, 0x40, 0x8f, 0x7b, 0x02, 0xee, 0x20, 0x40, 0x0b, 0xf9, 0xe0, 0x53,
0x09, 0xec, 0x02, 0x0d, 0xfb, 0x60, 0xf5, 0x23, 0x46, 0x87, 0x49, 0x3a, 0x17, 0x65, 0x6e, 0x36,
0xde, 0xac, 0x99, 0x48, 0xad, 0xa5, 0xce, 0x91, 0x02, 0x01, 0x7f, 0x0d, 0x95, 0xbe, 0x64, 0x4f,
0xb0, 0xe3, 0x34, 0x76, 0x96, 0xc1, 0x14, 0xb1, 0x24, 0x8f, 0xc1, 0x08, 0xf4, 0xec, 0x61, 0x2c,
0x28, 0xdb, 0x20, 0x5c, 0x74, 0x7f, 0xd7, 0xc0, 0xca, 0x71, 0xf1, 0x0e, 0x6c, 0x35, 0xfd, 0xf0,
0xba, 0x4b, 0xbc, 0x56, 0xef, 0xac, 0xdb, 0xf9, 0xe8, 0xb5, 0xd1, 0x0b, 0xbc, 0x01, 0x56, 0xd3,
0x0f, 0x9b, 0xde, 0x59, 0xa7, 0x8b, 0x34, 0xfc, 0x12, 0xec, 0xa6, 0x1f, 0xb6, 0x7a, 0x17, 0x17,
0x9d, 0x00, 0x95, 0xf0, 0x16, 0x38, 0x4d, 0x3f, 0x24, 0x3d, 0xdf, 0x6f, 0x9e, 0xb4, 0xce, 0x91,
0x8e, 0xf7, 0x60, 0xbb, 0xe9, 0x87, 0xed, 0x0b, 0x3f, 0x6c, 0x7b, 0x97, 0xc4, 0x6b, 0x9d, 0x04,
0x5e, 0x1b, 0x19, 0x18, 0xc0, 0xe4, 0xe6, 0xb6, 0x8f, 0xca, 0x4a, 0xbe, 0xf2, 0x02, 0x64, 0x2a,
0xb8, 0x4e, 0xf7, 0xca, 0x23, 0x01, 0xaa, 0x28, 0xf5, 0xfa, 0xb2, 0x7d, 0x12, 0x78, 0xc8, 0x52,
0x6a, 0xdb, 0xf3, 0xbd, 0xc0, 0x43, 0xf6, 0x5b, 0xc3, 0x2a, 0x21, 0xfd, 0xad, 0x61, 0xe9, 0xc8,
0x70, 0x7f, 0xd3, 0x60, 0xef, 0x8a, 0xa5, 0x34, 0x9a, 0x9c, 0xd3, 0x39, 0x89, 0xe2, 0x21, 0x25,
0xf4, 0x61, 0x46, 0x33, 0x86, 0x0f, 0xc0, 0x9a, 0x26, 0xd9, 0x88, 0x73, 0x27, 0x08, 0xb6, 0x49,
0xa1, 0xe3, 0x3a, 0xd8, 0xf7, 0x74, 0x1e, 0xa6, 0x3c, 0x5e, 0x11, 0x86, 0x6b, 0xc5, 0x40, 0x16,
0x48, 0xd6, 0xbd, 0x92, 0x96, 0xf9, 0xd5, 0xff, 0x99, 0x5f, 0xf7, 0x16, 0xf6, 0x9f, 0x27, 0x95,
0x4d, 0x93, 0x38, 0xa3, 0xd8, 0x07, 0x2c, 0x0f, 0x86, 0x6c, 0xd1, 0x5b, 0x91, 0x9f, 0xd3, 0x78,
0xf5, 0xc5, 0x01, 0x20, 0xdb, 0x37, 0xcf, 0x4d, 0xee, 0xaf, 0xb0, 0x23, 0xef, 0x09, 0xa2, 0x9b,
0x31, 0xcd, 0xd6, 0x29, 0x7d, 0x1f, 0x4c, 0x26, 0x82, 0xab, 0xa5, 0x43, 0xfd, 0xc8, 0x26, 0x4a,
0xfb, 0xb7, 0x15, 0x0e, 0x60, 0x77, 0xf5, 0xe6, 0xff, 0xa4, 0xbe, 0xef, 0xc0, 0x20, 0xb3, 0x31,
0xc5, 0xbb, 0x50, 0x9e, 0x44, 0xac, 0x7f, 0xa7, 0xaa, 0x91, 0x0a, 0x2f, 0xe5, 0x76, 0x34, 0x66,
0x34, 0x15, 0x2d, 0xb4, 0x89, 0xd2, 0xdc, 0x37, 0x60, 0x9e, 0x0a, 0x09, 0xff, 0x1f, 0xca, 0xe9,
0x8c, 0xd7, 0x2a, 0x3f, 0x75, 0xb4, 0x9c, 0x00, 0x07, 0x26, 0xd2, 0xed, 0xfe, 0xa5, 0xc1, 0x86,
0x4c, 0xe8, 0x2a, 0x99, 0xa5, 0x7d, 0xca, 0x19, 0xbc, 0xa7, 0xf3, 0x6c, 0x1a, 0xf5, 0x69, 0xce,
0x60, 0xae, 0xf3, 0x64, 0xb2, 0xbb, 0x28, 0x1d, 0xa8, 0x5b, 0xa5, 0x82, 0xbf, 0x07, 0x47, 0x30,
0xc9, 0x42, 0x36, 0x9f, 0x52, 0xc1, 0xe1, 0x66, 0x63, 0x77, 0x31, 0x54, 0x82, 0x27, 0x16, 0xcc,
0xa7, 0x94, 0x00, 0x2b, 0xe4, 0xd5, 0x49, 0x34, 0xd6, 0x98, 0xc4, 0x45, 0xff, 0xca, 0x2b, 0xfd,
0x3b, 0x2e, 0xc8, 0x30, 0x15, 0xca, 0x52, 0xad, 0x92, 0x8e, 0x82, 0xa0, 0x77, 0x60, 0x93, 0xe4,
0xa9, 0x75, 0x27, 0x00, 0x5d, 0x30, 0x6f, 0xe8, 0x6d, 0x92, 0x52, 0xd5, 0x25, 0x50, 0xaf, 0x18,
0x49, 0x9e, 0x88, 0xf2, 0xe0, 0x43, 0x28, 0x47, 0xb7, 0x39, 0xd1, 0xab, 0x21, 0xd2, 0xe1, 0x46,
0x60, 0x91, 0xe4, 0x49, 0xbc, 0x7c, 0xf8, 0x15, 0xc8, 0x0a, 0xc3, 0x38, 0x9a, 0xe4, 0xf4, 0xd9,
0xc2, 0xd2, 0x8d, 0x26, 0x14, 0xff, 0x00, 0x4e, 0x9a, 0x3c, 0x85, 0x7d, 0x71, 0xbd, 0x1c, 0x43,
0xa7, 0xb1, 0xb7, 0xd2, 0x9a, 0x3c, 0x39, 0x02, 0x69, 0x2e, 0x66, 0xee, 0x3b, 0x80, 0xd3, 0x11,
0x1d, 0x0f, 0xd6, 0xba, 0xe4, 0x7f, 0x9c, 0x0e, 0x3a, 0x1e, 0xe4, 0xf8, 0x1b, 0x2a, 0x65, 0x81,
0x40, 0x94, 0xcf, 0xfd, 0xa4, 0x81, 0xf9, 0x5e, 0xe2, 0x1d, 0x83, 0x21, 0x1a, 0x27, 0xdf, 0xe2,
0xfd, 0xe5, 0x74, 0x64, 0x84, 0x68, 0x9d, 0x88, 0xe1, 0x8b, 0x68, 0xc8, 0x46, 0xf9, 0x00, 0x08,
0x99, 0x3f, 0xa9, 0x83, 0x81, 0x7c, 0x52, 0x6d, 0xc2, 0x45, 0xfc, 0x0d, 0xd8, 0xbc, 0x4e, 0xb1,
0x01, 0x54, 0x6b, 0x77, 0x9f, 0x55, 0x29, 0x80, 0x89, 0x95, 0xe6, 0xcc, 0xfd, 0x08, 0x8e, 0xc8,
0x4c, 0x1d, 0x2a, 0x8b, 0x43, 0xfb, 0xab, 0x9d, 0xcc, 0x19, 0x20, 0x70, 0x5b, 0xc8, 0xee, 0x2f,
0xb0, 0xf9, 0x5e, 0x7e, 0x8f, 0xeb, 0xbc, 0x01, 0xc7, 0x2b, 0x1f, 0xce, 0x97, 0x67, 0xe5, 0x27,
0xd8, 0x2a, 0x90, 0xd5, 0x37, 0x7e, 0x04, 0x65, 0x99, 0x9f, 0xfc, 0xaa, 0xf0, 0xe7, 0x5c, 0x11,
0x19, 0x70, 0xfc, 0x87, 0x06, 0xb0, 0x60, 0x0f, 0x3b, 0x50, 0xb9, 0xee, 0x9e, 0x77, 0x7b, 0x1f,
0xba, 0xe8, 0x05, 0xb6, 0xc0, 0x38, 0x0b, 0x3a, 0x6d, 0xa4, 0x61, 0x1b, 0xca, 0x72, 0xad, 0x94,
0xf8, 0x4e, 0x50, 0x3b, 0x45, 0xe7, 0x0b, 0xa7, 0x58, 0x28, 0x06, 0xae, 0x80, 0x5e, 0xac, 0x0d,
0xb5, 0x27, 0x4c, 0x0e, 0x48, 0xbc, 0x4b, 0xff, 0xa4, 0xe5, 0xa1, 0x0a, 0x77, 0x14, 0x1b, 0x03,
0xc0, 0xcc, 0xd7, 0x05, 0x3f, 0xc9, 0x97, 0x0c, 0xf0, 0x7b, 0x7a, 0xc1, 0xcf, 0x1e, 0x41, 0x0e,
0xb7, 0x91, 0xde, 0x07, 0xb4, 0xc1, 0x6d, 0xa7, 0x1d, 0xcf, 0x6f, 0xa3, 0x97, 0xcd, 0xaf, 0x3e,
0xbe, 0x7e, 0x1c, 0x31, 0x9a, 0x65, 0xb5, 0x51, 0x52, 0x97, 0x52, 0x7d, 0x98, 0xd4, 0x1f, 0x59,
0x5d, 0xfc, 0xa1, 0xd4, 0x17, 0x55, 0xde, 0x98, 0xc2, 0xf2, 0xed, 0xdf, 0x01, 0x00, 0x00, 0xff,
0xff, 0xda, 0xbf, 0xad, 0x34, 0xf0, 0x08, 0x00, 0x00,
}

Просмотреть файл

@ -31,6 +31,7 @@ import (
)
// These constants are used to identify the SQL statement type.
// Changing this list will require reviewing all calls to Preview.
const (
StmtSelect = iota
StmtStream

Просмотреть файл

@ -46,6 +46,7 @@ import (
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/srvtopo"
"vitess.io/vitess/go/vt/tableacl"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/vterrors"
@ -60,7 +61,9 @@ import (
"vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv"
"vitess.io/vitess/go/vt/vttablet/tabletserver/txserializer"
"vitess.io/vitess/go/vt/vttablet/tabletserver/txthrottler"
"vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer"
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
querypb "vitess.io/vitess/go/vt/proto/query"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
@ -170,6 +173,7 @@ type TabletServer struct {
hr *heartbeat.Reader
messager *messager.Engine
watcher *ReplicationWatcher
vstreamer *vstreamer.Engine
updateStreamList *binlog.StreamList
// checkMySQLThrottler is used to throttle the number of
@ -209,6 +213,7 @@ func NewServer(topoServer *topo.Server, alias topodatapb.TabletAlias) *TabletSer
}
var tsOnce sync.Once
var srvTopoServer srvtopo.Server
// NewTabletServerWithNilTopoServer is typically used in tests that
// don't need a topoServer member.
@ -243,6 +248,7 @@ func NewTabletServer(config tabletenv.TabletConfig, topoServer *topo.Server, ali
// So that vtcombo doesn't even call it once, on the first tablet.
// And we can remove the tsOnce variable.
tsOnce.Do(func() {
srvTopoServer = srvtopo.NewResilientServer(topoServer, "TabletSrvTopo")
stats.NewGaugeFunc("TabletState", "Tablet server state", func() int64 {
tsv.mu.Lock()
state := tsv.state
@ -260,6 +266,8 @@ func NewTabletServer(config tabletenv.TabletConfig, topoServer *topo.Server, ali
stats.NewGaugeDurationFunc("QueryPoolTimeout", "Tablet server timeout to get a connection from the query pool", tsv.qe.connTimeout.Get)
stats.NewGaugeDurationFunc("BeginTimeout", "Tablet server begin timeout", tsv.BeginTimeout.Get)
})
// TODO(sougou): move this up once the stats naming problem is fixed.
tsv.vstreamer = vstreamer.NewEngine(srvTopoServer, tsv.se)
return tsv
}
@ -348,6 +356,7 @@ func (tsv *TabletServer) InitDBConfig(target querypb.Target, dbcfgs *dbconfigs.D
tsv.hr.InitDBConfig(tsv.dbconfigs)
tsv.messager.InitDBConfig(tsv.dbconfigs)
tsv.watcher.InitDBConfig(tsv.dbconfigs)
tsv.vstreamer.InitDBConfig(tsv.dbconfigs)
return nil
}
@ -513,6 +522,7 @@ func (tsv *TabletServer) fullStart() (err error) {
}
tsv.hr.Init(tsv.target)
tsv.updateStreamList.Init()
tsv.vstreamer.Open(tsv.target.Keyspace, tsv.alias.Cell)
return tsv.serveNewType()
}
@ -573,6 +583,7 @@ func (tsv *TabletServer) StopService() {
log.Infof("Executing complete shutdown.")
tsv.waitForShutdown()
tsv.vstreamer.Close()
tsv.qe.Close()
tsv.se.Close()
tsv.hw.Close()
@ -1263,6 +1274,40 @@ func (tsv *TabletServer) execDML(ctx context.Context, target *querypb.Target, qu
return int64(qr.RowsAffected), nil
}
// VStream streams VReplication events.
func (tsv *TabletServer) VStream(ctx context.Context, target *querypb.Target, startPos mysql.Position, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error {
// This code is partially duplicated from startRequest. This is because
// is allowed even if the tablet is in non-serving state.
err := func() error {
tsv.mu.Lock()
defer tsv.mu.Unlock()
if target != nil {
// a valid target needs to be used
switch {
case target.Keyspace != tsv.target.Keyspace:
return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid keyspace %v", target.Keyspace)
case target.Shard != tsv.target.Shard:
return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid shard %v", target.Shard)
case target.TabletType != tsv.target.TabletType:
for _, otherType := range tsv.alsoAllow {
if target.TabletType == otherType {
return nil
}
}
return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "invalid tablet type: %v, want: %v or %v", target.TabletType, tsv.target.TabletType, tsv.alsoAllow)
}
} else if !tabletenv.IsLocalContext(ctx) {
return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "No target")
}
return nil
}()
if err != nil {
return err
}
return tsv.vstreamer.Stream(ctx, startPos, filter, send)
}
// SplitQuery splits a query + bind variables into smaller queries that return a
// subset of rows from the original query. This is the new version that supports multiple
// split columns and multiple split algortihms.

Просмотреть файл

@ -0,0 +1,237 @@
/*
Copyright 2018 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vstreamer
import (
"bytes"
"context"
"encoding/json"
"errors"
"net/http"
"sync"
"vitess.io/vitess/go/acl"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/stats"
"vitess.io/vitess/go/vt/dbconfigs"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/srvtopo"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/vtgate/vindexes"
"vitess.io/vitess/go/vt/vttablet/tabletserver/schema"
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
vschemapb "vitess.io/vitess/go/vt/proto/vschema"
)
var (
once sync.Once
vschemaErrors *stats.Counter
vschemaUpdates *stats.Counter
)
// Engine is the engine for handling vreplication streaming requests.
type Engine struct {
// cp is initialized by InitDBConfig
cp *mysql.ConnParams
// mu protects isOpen, streamers, streamIdx and kschema.
mu sync.Mutex
isOpen bool
// wg is incremented for every Stream, and decremented on end.
// Close waits for all current streams to end by waiting on wg.
wg sync.WaitGroup
streamers map[int]*vstreamer
streamIdx int
// watcherOnce is used for initializing kschema
// and setting up the vschema watch. It's guaranteed that
// no stream will start until kschema is initialized by
// the first call through watcherOnce.
watcherOnce sync.Once
kschema *vindexes.KeyspaceSchema
// The following members are initialized once at the beginning.
ts srvtopo.Server
se *schema.Engine
keyspace string
cell string
}
// NewEngine creates a new Engine.
// Initialization sequence is: NewEngine->InitDBConfig->Open.
// Open and Close can be called multiple times and are idempotent.
func NewEngine(ts srvtopo.Server, se *schema.Engine) *Engine {
vse := &Engine{
streamers: make(map[int]*vstreamer),
kschema: &vindexes.KeyspaceSchema{},
ts: ts,
se: se,
}
once.Do(func() {
vschemaErrors = stats.NewCounter("VSchemaErrors", "Count of VSchema errors")
vschemaUpdates = stats.NewCounter("VSchemaUpdates", "Count of VSchema updates. Does not include errors")
http.Handle("/debug/tablet_vschema", vse)
})
return vse
}
// InitDBConfig performs saves the required info from dbconfigs for future use.
func (vse *Engine) InitDBConfig(dbcfgs *dbconfigs.DBConfigs) {
vse.cp = dbcfgs.DbaWithDB()
}
// Open starts the Engine service.
func (vse *Engine) Open(keyspace, cell string) error {
vse.mu.Lock()
defer vse.mu.Unlock()
if vse.isOpen {
return nil
}
vse.isOpen = true
vse.keyspace = keyspace
vse.cell = cell
return nil
}
// Close closes the Engine service.
func (vse *Engine) Close() {
func() {
vse.mu.Lock()
defer vse.mu.Unlock()
if !vse.isOpen {
return
}
for _, s := range vse.streamers {
// cancel is non-blocking.
s.Cancel()
}
vse.isOpen = false
}()
// Wait only after releasing the lock because the end of every
// stream will use the lock to remove the entry from streamers.
vse.wg.Wait()
}
func (vse *Engine) vschema() *vindexes.KeyspaceSchema {
vse.mu.Lock()
defer vse.mu.Unlock()
return vse.kschema
}
// Stream starts a new stream.
func (vse *Engine) Stream(ctx context.Context, startPos mysql.Position, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error {
// Ensure kschema is initialized and the watcher is started.
// Starting of the watcher has to be delayed till the first call to Stream
// because this overhead should be incurred only if someone uses this feature.
vse.watcherOnce.Do(vse.setWatch)
// Create stream and add it to the map.
streamer, idx, err := func() (*vstreamer, int, error) {
vse.mu.Lock()
defer vse.mu.Unlock()
if !vse.isOpen {
return nil, 0, errors.New("VStreamer is not open")
}
streamer := newVStreamer(ctx, vse.cp, vse.se, startPos, filter, vse.kschema, send)
idx := vse.streamIdx
vse.streamers[idx] = streamer
vse.streamIdx++
// Now that we've added the stream, increment wg.
// This must be done before releasing the lock.
vse.wg.Add(1)
return streamer, idx, nil
}()
if err != nil {
return err
}
// Remove stream from map and decrement wg when it ends.
defer func() {
vse.mu.Lock()
defer vse.mu.Unlock()
delete(vse.streamers, idx)
vse.wg.Done()
}()
// No lock is held while streaming, but wg is incremented.
return streamer.Stream()
}
// ServeHTTP shows the current VSchema.
func (vse *Engine) ServeHTTP(response http.ResponseWriter, request *http.Request) {
if err := acl.CheckAccessHTTP(request, acl.DEBUGGING); err != nil {
acl.SendError(response, err)
return
}
response.Header().Set("Content-Type", "application/json; charset=utf-8")
vs := vse.vschema()
if vs == nil || vs.Keyspace == nil {
response.Write([]byte("{}"))
}
b, err := json.MarshalIndent(vs, "", " ")
if err != nil {
response.Write([]byte(err.Error()))
return
}
buf := bytes.NewBuffer(nil)
json.HTMLEscape(buf, b)
response.Write(buf.Bytes())
}
func (vse *Engine) setWatch() {
// WatchSrvVSchema does not return until the inner func has been called at least once.
vse.ts.WatchSrvVSchema(context.TODO(), vse.cell, func(v *vschemapb.SrvVSchema, err error) {
var kschema *vindexes.KeyspaceSchema
switch {
case err == nil:
kschema, err = vindexes.BuildKeyspaceSchema(v.Keyspaces[vse.keyspace], vse.keyspace)
if err != nil {
log.Errorf("Error building vschema %s: %v", vse.keyspace, err)
vschemaErrors.Add(1)
return
}
case topo.IsErrType(err, topo.NoNode):
// No-op.
default:
log.Errorf("Error fetching vschema %s: %v", vse.keyspace, err)
vschemaErrors.Add(1)
return
}
if kschema == nil {
kschema = &vindexes.KeyspaceSchema{
Keyspace: &vindexes.Keyspace{
Name: vse.keyspace,
},
}
}
// Broadcast the change to all streamers.
vse.mu.Lock()
defer vse.mu.Unlock()
vse.kschema = kschema
b, _ := json.MarshalIndent(kschema, "", " ")
log.Infof("Updated KSchema: %s", b)
for _, s := range vse.streamers {
s.SetKSchema(kschema)
}
vschemaUpdates.Add(1)
})
}

Просмотреть файл

@ -0,0 +1,127 @@
/*
Copyright 2019 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vstreamer
import (
"encoding/json"
"testing"
"time"
"golang.org/x/net/context"
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
)
var shardedVSchema = `{
"sharded": true,
"vindexes": {
"hash": {
"type": "hash"
}
},
"tables": {
"t1": {
"column_vindexes": [
{
"column": "id1",
"name": "hash"
}
]
}
}
}`
func TestUpdateVSchema(t *testing.T) {
if testing.Short() {
t.Skip()
}
defer setVSchema("{}")
// We have to start at least one stream to start the vschema watcher.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
filter := &binlogdatapb.Filter{
Rules: []*binlogdatapb.Rule{{
Match: "/.*/",
}},
}
_ = startStream(ctx, t, filter)
cancel()
startCount := expectUpdateCount(t, 1)
if err := setVSchema(shardedVSchema); err != nil {
t.Fatal(err)
}
expectUpdateCount(t, startCount+1)
want := `{
"sharded": true,
"tables": {
"t1": {
"name": "t1",
"column_vindexes": [
{
"columns": [
"id1"
],
"type": "hash",
"name": "hash",
"vindex": {}
}
],
"ordered": [
{
"columns": [
"id1"
],
"type": "hash",
"name": "hash",
"vindex": {}
}
]
}
},
"vindexes": {
"hash": {}
}
}`
b, err := json.MarshalIndent(engine.vschema(), "", " ")
if err != nil {
t.Fatal(err)
}
if got := string(b); got != want {
t.Errorf("vschema:\n%s, want:\n%s", got, want)
}
}
func expectUpdateCount(t *testing.T, wantCount int64) int64 {
for i := 0; i < 10; i++ {
gotCount := vschemaUpdates.Get()
if gotCount >= wantCount {
return gotCount
}
if i == 9 {
t.Fatalf("update count: %d, want %d", gotCount, wantCount)
}
time.Sleep(10 * time.Millisecond)
}
panic("unreachable")
}

Просмотреть файл

@ -0,0 +1,156 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vstreamer
import (
"flag"
"fmt"
"os"
"path"
"testing"
"golang.org/x/net/context"
"vitess.io/vitess/go/json2"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/vt/dbconfigs"
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/mysqlctl"
"vitess.io/vitess/go/vt/srvtopo"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/memorytopo"
"vitess.io/vitess/go/vt/topotools"
"vitess.io/vitess/go/vt/vttablet/tabletserver/connpool"
"vitess.io/vitess/go/vt/vttablet/tabletserver/schema"
"vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv"
"vitess.io/vitess/go/vt/vttest"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
vschemapb "vitess.io/vitess/go/vt/proto/vschema"
vttestpb "vitess.io/vitess/go/vt/proto/vttest"
)
var (
engine *Engine
mysqld *mysqlctl.Mysqld
connParams mysql.ConnParams
connAppDebugParams mysql.ConnParams
topoServ *topo.Server
keyspaceName = "vttest"
cells = []string{"cell1"}
)
type checker struct{}
var _ = connpool.MySQLChecker(checker{})
func (checker) CheckMySQL() {}
func TestMain(m *testing.M) {
flag.Parse() // Do not remove this comment, import into google3 depends on it
if testing.Short() {
os.Exit(m.Run())
}
exitCode := func() int {
// Launch MySQL.
// We need a Keyspace in the topology, so the DbName is set.
// We need a Shard too, so the database 'vttest' is created.
cfg := vttest.Config{
Topology: &vttestpb.VTTestTopology{
Keyspaces: []*vttestpb.Keyspace{
{
Name: keyspaceName,
Shards: []*vttestpb.Shard{
{
Name: "0",
DbNameOverride: "vttest",
},
},
},
},
},
ExtraMyCnf: []string{path.Join(os.Getenv("VTTOP"), "config/mycnf/rbr.cnf")},
OnlyMySQL: true,
}
defer os.RemoveAll(cfg.SchemaDir)
cluster := vttest.LocalCluster{
Config: cfg,
}
if err := cluster.Setup(); err != nil {
fmt.Fprintf(os.Stderr, "could not launch mysql: %v\n", err)
return 1
}
defer cluster.TearDown()
// initTopo initializes topoServ.
if err := initEngine(&cluster); err != nil {
fmt.Fprintf(os.Stderr, "%v", err)
return 1
}
defer engine.Close()
return m.Run()
}()
os.Exit(exitCode)
}
func initEngine(cluster *vttest.LocalCluster) error {
if err := initTopo(); err != nil {
return err
}
se := schema.NewEngine(checker{}, tabletenv.DefaultQsConfig)
srvTopoServer := srvtopo.NewResilientServer(topoServ, "TestTopo")
engine = NewEngine(srvTopoServer, se)
dbcfgs := dbconfigs.NewTestDBConfigs(cluster.MySQLConnParams(), cluster.MySQLAppDebugConnParams(), cluster.DbName())
mysqld = mysqlctl.NewMysqld(dbcfgs)
se.InitDBConfig(dbcfgs)
engine.InitDBConfig(dbcfgs)
engine.Open(keyspaceName, cells[0])
return nil
}
func initTopo() error {
ctx := context.Background()
topoServ = memorytopo.NewServer(cells...)
if err := topoServ.CreateKeyspace(ctx, keyspaceName, &topodatapb.Keyspace{}); err != nil {
return err
}
// The first vschema should not be empty. Leads to Node not found error.
// TODO(sougou): need to fix the bug.
return setVSchema(`{"sharded": true}`)
}
func setVSchema(vs string) error {
ctx := context.Background()
logger := logutil.NewConsoleLogger()
var kspb vschemapb.Keyspace
if err := json2.Unmarshal([]byte(vs), &kspb); err != nil {
return fmt.Errorf("Unmarshal failed: %v", err)
}
if err := topoServ.SaveVSchema(ctx, keyspaceName, &kspb); err != nil {
return fmt.Errorf("SaveVSchema failed: %v", err)
}
if err := topotools.RebuildVSchema(ctx, logger, topoServ, cells); err != nil {
return fmt.Errorf("RebuildVSchema failed: %v", err)
}
return nil
}

Просмотреть файл

@ -0,0 +1,367 @@
/*
Copyright 2018 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vstreamer
import (
"fmt"
"regexp"
"strings"
"time"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/key"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vtgate/vindexes"
"vitess.io/vitess/go/vt/vttablet/tabletserver/schema"
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
querypb "vitess.io/vitess/go/vt/proto/query"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
// Plan represents the streaming plan for a table.
type Plan struct {
Table *Table
ColExprs []ColExpr
VindexColumn int
Vindex vindexes.Vindex
KeyRange *topodatapb.KeyRange
}
// ColExpr represents a column expression.
type ColExpr struct {
ColNum int
Alias sqlparser.ColIdent
Type querypb.Type
Operation Operation
}
// Operation represents the operation to be performed on a column.
type Operation int
// The following are the supported operations on a column.
const (
OpNone = Operation(iota)
OpMonth
OpDay
OpHour
)
// Table contains the metadata for a table. The
// name is dervied from mysql's Table_map_log_event.
type Table struct {
*mysql.TableMap
Columns []schema.TableColumn
}
// The filter function needs the ability to perform expression evaluations. This is
// because the consumer of vstream is not just VPlayer. It can also be a dumb client
// like a mysql client that's subscribing to changes. This ability to allow users
// to directly pull events by sending a complex select query. The same reasoning
// applies to where clauses. For now, only simple functions like hour are supported,
// but this can be expanded in the future.
func (plan *Plan) filter(values []sqltypes.Value) (bool, []sqltypes.Value, error) {
result := make([]sqltypes.Value, len(plan.ColExprs))
for i, colExpr := range plan.ColExprs {
switch colExpr.Operation {
case OpMonth:
v, _ := sqltypes.ToInt64(values[colExpr.ColNum])
t := time.Unix(v, 0).UTC()
s := fmt.Sprintf("%d%02d", t.Year(), t.Month())
result[i] = sqltypes.NewVarBinary(s)
case OpDay:
v, _ := sqltypes.ToInt64(values[colExpr.ColNum])
t := time.Unix(v, 0).UTC()
s := fmt.Sprintf("%d%02d%02d", t.Year(), t.Month(), t.Day())
result[i] = sqltypes.NewVarBinary(s)
case OpHour:
v, _ := sqltypes.ToInt64(values[colExpr.ColNum])
t := time.Unix(v, 0).UTC()
s := fmt.Sprintf("%d%02d%02d%02d", t.Year(), t.Month(), t.Day(), t.Hour())
result[i] = sqltypes.NewVarBinary(s)
default:
result[i] = values[colExpr.ColNum]
}
}
if plan.Vindex == nil {
return true, result, nil
}
// Filter by Vindex.
destinations, err := plan.Vindex.Map(nil, []sqltypes.Value{result[plan.VindexColumn]})
if err != nil {
return false, nil, err
}
if len(destinations) != 1 {
return false, nil, fmt.Errorf("mapping row to keyspace id returned an invalid array of destinations: %v", key.DestinationsString(destinations))
}
ksid, ok := destinations[0].(key.DestinationKeyspaceID)
if !ok || len(ksid) == 0 {
return false, nil, fmt.Errorf("could not map %v to a keyspace id, got destination %v", result[plan.VindexColumn], destinations[0])
}
if !key.KeyRangeContains(plan.KeyRange, ksid) {
return false, nil, nil
}
return true, result, nil
}
func buildPlan(ti *Table, kschema *vindexes.KeyspaceSchema, filter *binlogdatapb.Filter) (*Plan, error) {
for _, rule := range filter.Rules {
switch {
case strings.HasPrefix(rule.Match, "/"):
expr := strings.Trim(rule.Match, "/")
result, err := regexp.MatchString(expr, ti.Name)
if err != nil {
return nil, err
}
if !result {
continue
}
return buildREPlan(ti, kschema, rule.Filter)
case rule.Match == ti.Name:
return buildTablePlan(ti, kschema, rule.Filter)
}
}
return nil, nil
}
func buildREPlan(ti *Table, kschema *vindexes.KeyspaceSchema, filter string) (*Plan, error) {
plan := &Plan{
Table: ti,
}
plan.ColExprs = make([]ColExpr, len(ti.Columns))
for i, col := range ti.Columns {
plan.ColExprs[i].ColNum = i
plan.ColExprs[i].Alias = col.Name
plan.ColExprs[i].Type = col.Type
}
if filter == "" {
return plan, nil
}
// We need to additionally set VindexColumn, Vindex and KeyRange
// based on the Primary Vindex of the table.
// Find table in kschema.
table := kschema.Tables[ti.Name]
if table == nil {
return nil, fmt.Errorf("no vschema definition for table %s", ti.Name)
}
// Get Primary Vindex.
if len(table.ColumnVindexes) == 0 {
return nil, fmt.Errorf("table %s has no primary vindex", ti.Name)
}
// findColumn can be used here because result column list is same
// as source.
colnum, err := findColumn(ti, table.ColumnVindexes[0].Columns[0])
if err != nil {
return nil, err
}
plan.VindexColumn = colnum
plan.Vindex = table.ColumnVindexes[0].Vindex
// Parse keyrange.
keyranges, err := key.ParseShardingSpec(filter)
if err != nil {
return nil, err
}
if len(keyranges) != 1 {
return nil, fmt.Errorf("error parsing keyrange: %v", filter)
}
plan.KeyRange = keyranges[0]
return plan, nil
}
func buildTablePlan(ti *Table, kschema *vindexes.KeyspaceSchema, query string) (*Plan, error) {
statement, err := sqlparser.Parse(query)
if err != nil {
return nil, err
}
plan := &Plan{
Table: ti,
}
sel, ok := statement.(*sqlparser.Select)
if !ok {
return nil, fmt.Errorf("unexpected: %v", sqlparser.String(statement))
}
if len(sel.From) > 1 {
return nil, fmt.Errorf("unexpected: %v", sqlparser.String(sel))
}
node, ok := sel.From[0].(*sqlparser.AliasedTableExpr)
if !ok {
return nil, fmt.Errorf("unexpected: %v", sqlparser.String(sel))
}
fromTable := sqlparser.GetTableName(node.Expr)
if fromTable.IsEmpty() {
return nil, fmt.Errorf("unexpected: %v", sqlparser.String(sel))
}
if fromTable.String() != ti.Name {
return nil, fmt.Errorf("unexpected: select expression table %v does not match the table entry name %s", sqlparser.String(fromTable), ti.Name)
}
if _, ok := sel.SelectExprs[0].(*sqlparser.StarExpr); !ok {
for _, expr := range sel.SelectExprs {
cExpr, err := analyzeExpr(ti, expr)
if err != nil {
return nil, err
}
plan.ColExprs = append(plan.ColExprs, cExpr)
}
} else {
if len(sel.SelectExprs) != 1 {
return nil, fmt.Errorf("unexpected: %v", sqlparser.String(sel))
}
plan.ColExprs = make([]ColExpr, len(ti.Columns))
for i, col := range ti.Columns {
plan.ColExprs[i].ColNum = i
plan.ColExprs[i].Alias = col.Name
plan.ColExprs[i].Type = col.Type
}
}
if sel.Where == nil {
return plan, nil
}
// Filter by Vindex.
funcExpr, ok := sel.Where.Expr.(*sqlparser.FuncExpr)
if !ok {
return nil, fmt.Errorf("unexpected where clause: %v", sqlparser.String(sel.Where))
}
if !funcExpr.Name.EqualString("in_keyrange") {
return nil, fmt.Errorf("unexpected where clause: %v", sqlparser.String(sel.Where))
}
if len(funcExpr.Exprs) != 3 {
return nil, fmt.Errorf("unexpected where clause: %v", sqlparser.String(sel.Where))
}
aexpr, ok := funcExpr.Exprs[0].(*sqlparser.AliasedExpr)
if !ok {
return nil, fmt.Errorf("unexpected: %v", sqlparser.String(funcExpr))
}
colname, ok := aexpr.Expr.(*sqlparser.ColName)
if !ok {
return nil, fmt.Errorf("unsupported: %v", sqlparser.String(funcExpr))
}
found := false
for i, cExpr := range plan.ColExprs {
if cExpr.Alias.Equal(colname.Name) {
found = true
plan.VindexColumn = i
break
}
}
if !found {
return nil, fmt.Errorf("keyrange expression does not reference a column in the select list: %v", sqlparser.String(colname))
}
vtype, err := selString(funcExpr.Exprs[1])
if err != nil {
return nil, err
}
plan.Vindex, err = vindexes.CreateVindex(vtype, vtype, map[string]string{})
if err != nil {
return nil, err
}
if !plan.Vindex.IsUnique() || !plan.Vindex.IsFunctional() {
return nil, fmt.Errorf("vindex must be Unique and Functional to be used for VReplication: %s", vtype)
}
kr, err := selString(funcExpr.Exprs[2])
if err != nil {
return nil, err
}
keyranges, err := key.ParseShardingSpec(kr)
if err != nil {
return nil, err
}
if len(keyranges) != 1 {
return nil, fmt.Errorf("unexpected where clause: %v", sqlparser.String(sel.Where))
}
plan.KeyRange = keyranges[0]
return plan, nil
}
func analyzeExpr(ti *Table, expr sqlparser.SelectExpr) (cExpr ColExpr, err error) {
aexpr, ok := expr.(*sqlparser.AliasedExpr)
if !ok {
return ColExpr{}, fmt.Errorf("unexpected: %v", sqlparser.String(expr))
}
switch expr := aexpr.Expr.(type) {
case *sqlparser.ColName:
colnum, err := findColumn(ti, expr.Name)
if err != nil {
return ColExpr{}, err
}
return ColExpr{ColNum: colnum, Alias: expr.Name, Type: ti.Columns[colnum].Type}, nil
case *sqlparser.FuncExpr:
if expr.Distinct || len(expr.Exprs) != 1 {
return ColExpr{}, fmt.Errorf("unsupported: %v", sqlparser.String(expr))
}
switch fname := expr.Name.Lowered(); fname {
case "month", "day", "hour":
aInner, ok := expr.Exprs[0].(*sqlparser.AliasedExpr)
if !ok {
return ColExpr{}, fmt.Errorf("unsupported: %v", sqlparser.String(expr))
}
innerCol, ok := aInner.Expr.(*sqlparser.ColName)
if !ok {
return ColExpr{}, fmt.Errorf("unsupported: %v", sqlparser.String(expr))
}
as := aexpr.As
if as.IsEmpty() {
as = sqlparser.NewColIdent(sqlparser.String(expr))
}
colnum, err := findColumn(ti, innerCol.Name)
if err != nil {
return ColExpr{}, err
}
switch fname {
case "month":
return ColExpr{ColNum: colnum, Alias: as, Type: sqltypes.VarBinary, Operation: OpMonth}, nil
case "day":
return ColExpr{ColNum: colnum, Alias: as, Type: sqltypes.VarBinary, Operation: OpDay}, nil
case "hour":
return ColExpr{ColNum: colnum, Alias: as, Type: sqltypes.VarBinary, Operation: OpHour}, nil
default:
panic("unreachable")
}
default:
return ColExpr{}, fmt.Errorf("unsupported: %v", sqlparser.String(expr))
}
default:
return ColExpr{}, fmt.Errorf("unexpected: %v", sqlparser.String(expr))
}
}
func selString(expr sqlparser.SelectExpr) (string, error) {
aexpr, ok := expr.(*sqlparser.AliasedExpr)
if !ok {
return "", fmt.Errorf("unexpected: %v", sqlparser.String(expr))
}
val, ok := aexpr.Expr.(*sqlparser.SQLVal)
if !ok {
return "", fmt.Errorf("unexpected: %v", sqlparser.String(expr))
}
return string(val.Val), nil
}
func findColumn(ti *Table, name sqlparser.ColIdent) (int, error) {
for i, col := range ti.Columns {
if name.Equal(col.Name) {
return i, nil
}
}
return 0, fmt.Errorf("column %s not found in table %s", sqlparser.String(name), ti.Name)
}

Просмотреть файл

@ -0,0 +1,383 @@
/*
Copyright 2019 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vstreamer
import (
"fmt"
"reflect"
"testing"
"vitess.io/vitess/go/json2"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vtgate/vindexes"
"vitess.io/vitess/go/vt/vttablet/tabletserver/schema"
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
vschemapb "vitess.io/vitess/go/vt/proto/vschema"
)
var testKSChema *vindexes.KeyspaceSchema
func init() {
input := `{
"sharded": true,
"vindexes": {
"hash": {
"type": "hash"
},
"lookup": {
"type": "lookup"
}
},
"tables": {
"t1": {
"column_vindexes": [
{
"column": "id",
"name": "hash"
}
]
}
}
}`
var kspb vschemapb.Keyspace
if err := json2.Unmarshal([]byte(input), &kspb); err != nil {
panic(fmt.Errorf("Unmarshal failed: %v", err))
}
kschema, err := vindexes.BuildKeyspaceSchema(&kspb, keyspaceName)
if err != nil {
panic(err)
}
testKSChema = kschema
}
func TestPlanbuilder(t *testing.T) {
t1 := &Table{
TableMap: &mysql.TableMap{
Name: "t1",
},
Columns: []schema.TableColumn{{
Name: sqlparser.NewColIdent("id"),
Type: sqltypes.Int64,
}, {
Name: sqlparser.NewColIdent("val"),
Type: sqltypes.VarBinary,
}},
}
// t1alt has no id column
t1alt := &Table{
TableMap: &mysql.TableMap{
Name: "t1",
},
Columns: []schema.TableColumn{{
Name: sqlparser.NewColIdent("val"),
Type: sqltypes.VarBinary,
}},
}
t2 := &Table{
TableMap: &mysql.TableMap{
Name: "t2",
},
Columns: []schema.TableColumn{{
Name: sqlparser.NewColIdent("id"),
Type: sqltypes.Int64,
}, {
Name: sqlparser.NewColIdent("val"),
Type: sqltypes.VarBinary,
}},
}
testcases := []struct {
inTable *Table
inRule *binlogdatapb.Rule
outPlan *Plan
outErr string
}{{
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "/.*/"},
outPlan: &Plan{
ColExprs: []ColExpr{{
ColNum: 0,
Alias: sqlparser.NewColIdent("id"),
Type: sqltypes.Int64,
}, {
ColNum: 1,
Alias: sqlparser.NewColIdent("val"),
Type: sqltypes.VarBinary,
}},
},
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "/.*/", Filter: "-80"},
outPlan: &Plan{
ColExprs: []ColExpr{{
ColNum: 0,
Alias: sqlparser.NewColIdent("id"),
Type: sqltypes.Int64,
}, {
ColNum: 1,
Alias: sqlparser.NewColIdent("val"),
Type: sqltypes.VarBinary,
}},
VindexColumn: 0,
},
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select * from t1"},
outPlan: &Plan{
ColExprs: []ColExpr{{
ColNum: 0,
Alias: sqlparser.NewColIdent("id"),
Type: sqltypes.Int64,
}, {
ColNum: 1,
Alias: sqlparser.NewColIdent("val"),
Type: sqltypes.VarBinary,
}},
},
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1"},
outPlan: &Plan{
ColExprs: []ColExpr{{
ColNum: 0,
Alias: sqlparser.NewColIdent("id"),
Type: sqltypes.Int64,
}, {
ColNum: 1,
Alias: sqlparser.NewColIdent("val"),
Type: sqltypes.VarBinary,
}},
},
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select val, id from t1"},
outPlan: &Plan{
ColExprs: []ColExpr{{
ColNum: 1,
Alias: sqlparser.NewColIdent("val"),
Type: sqltypes.VarBinary,
}, {
ColNum: 0,
Alias: sqlparser.NewColIdent("id"),
Type: sqltypes.Int64,
}},
},
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select val, id from t1 where in_keyrange(id, 'hash', '-80')"},
outPlan: &Plan{
ColExprs: []ColExpr{{
ColNum: 1,
Alias: sqlparser.NewColIdent("val"),
Type: sqltypes.VarBinary,
}, {
ColNum: 0,
Alias: sqlparser.NewColIdent("id"),
Type: sqltypes.Int64,
}},
VindexColumn: 1,
},
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val, month(val) m, day(id), hour(val) from t1 where in_keyrange(m, 'hash', '-80')"},
outPlan: &Plan{
ColExprs: []ColExpr{{
ColNum: 0,
Alias: sqlparser.NewColIdent("id"),
Type: sqltypes.Int64,
}, {
ColNum: 1,
Alias: sqlparser.NewColIdent("val"),
Type: sqltypes.VarBinary,
}, {
ColNum: 1,
Alias: sqlparser.NewColIdent("m"),
Type: sqltypes.VarBinary,
Operation: OpMonth,
}, {
ColNum: 0,
Alias: sqlparser.NewColIdent("day(id)"),
Type: sqltypes.VarBinary,
Operation: OpDay,
}, {
ColNum: 1,
Alias: sqlparser.NewColIdent("hour(val)"),
Type: sqltypes.VarBinary,
Operation: OpHour,
}},
VindexColumn: 2,
},
}, {
inTable: t2,
inRule: &binlogdatapb.Rule{Match: "/t1/"},
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "/*/"},
outErr: "error parsing regexp: missing argument to repetition operator: `*`",
}, {
inTable: t2,
inRule: &binlogdatapb.Rule{Match: "/.*/", Filter: "-80"},
outErr: `no vschema definition for table t2`,
}, {
inTable: t1alt,
inRule: &binlogdatapb.Rule{Match: "/.*/", Filter: "-80"},
outErr: `column id not found in table t1`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "/.*/", Filter: "80"},
outErr: `malformed spec: doesn't define a range: "80"`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "/.*/", Filter: "-80-"},
outErr: `error parsing keyrange: -80-`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "bad query"},
outErr: `syntax error at position 4 near 'bad'`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "delete from t1"},
outErr: `unexpected: delete from t1`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select * from t1, t2"},
outErr: `unexpected: select * from t1, t2`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select * from t1 join t2"},
outErr: `unexpected: select * from t1 join t2`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select * from a.t1"},
outErr: `unexpected: select * from a.t1`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select * from t2"},
outErr: `unexpected: select expression table t2 does not match the table entry name t1`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select *, id from t1"},
outErr: `unexpected: select *, id from t1`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where id=1"},
outErr: `unexpected where clause: where id = 1`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where max(id)"},
outErr: `unexpected where clause: where max(id)`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(id)"},
outErr: `unexpected where clause: where in_keyrange(id)`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(*, 'hash', '-80')"},
outErr: `unexpected: in_keyrange(*, 'hash', '-80')`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(1, 'hash', '-80')"},
outErr: `unsupported: in_keyrange(1, 'hash', '-80')`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(none, 'hash', '-80')"},
outErr: `keyrange expression does not reference a column in the select list: none`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(id, 'lookup', '-80')"},
outErr: `vindex must be Unique and Functional to be used for VReplication: lookup`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(id, 'hash', '80')"},
outErr: `malformed spec: doesn't define a range: "80"`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(id, 'hash', '-80-')"},
outErr: `unexpected where clause: where in_keyrange(id, 'hash', '-80-')`,
}, {
// analyzeExpr tests.
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, * from t1"},
outErr: `unexpected: *`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select none from t1"},
outErr: `column none not found in table t1`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val, hour(distinct a) from t1"},
outErr: `unsupported: hour(distinct a)`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val, hour(a, b) from t1"},
outErr: `unsupported: hour(a, b)`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val, hour(*) from t1"},
outErr: `unsupported: hour(*)`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val, hour(val+1) from t1"},
outErr: `unsupported: hour(val + 1)`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val, hour(none) from t1"},
outErr: `column none not found in table t1`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val, max(val) from t1"},
outErr: `unsupported: max(val)`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id+1, val from t1"},
outErr: `unexpected: id + 1`,
}, {
// selString
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(id, *, '-80')"},
outErr: `unexpected: *`,
}, {
inTable: t1,
inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(id, 1+1, '-80')"},
outErr: `unexpected: 1 + 1`,
}}
for _, tcase := range testcases {
plan, err := buildPlan(tcase.inTable, testKSChema, &binlogdatapb.Filter{
Rules: []*binlogdatapb.Rule{tcase.inRule},
})
if plan != nil {
plan.Table = nil
plan.Vindex = nil
plan.KeyRange = nil
if !reflect.DeepEqual(tcase.outPlan, plan) {
t.Errorf("Plan(%v, %v):\n%v, want\n%v", tcase.inTable, tcase.inRule, plan, tcase.outPlan)
}
}
gotErr := ""
if err != nil {
gotErr = err.Error()
}
if gotErr != tcase.outErr {
t.Errorf("Plan(%v, %v) err: %v, want %v", tcase.inTable, tcase.inRule, err, tcase.outErr)
}
}
}

Просмотреть файл

@ -0,0 +1,431 @@
/*
Copyright 2018 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vstreamer
import (
"context"
"flag"
"fmt"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/binlog"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vtgate/vindexes"
"vitess.io/vitess/go/vt/vttablet/tabletserver/schema"
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
querypb "vitess.io/vitess/go/vt/proto/query"
)
var packetSize = flag.Int("vstream_packet_size", 10000, "Suggested packet size for VReplication streamer. This is used only as a recommendation. The actual packet size may be more or less than this amount.")
type vstreamer struct {
ctx context.Context
cancel func()
cp *mysql.ConnParams
se *schema.Engine
startPos mysql.Position
filter *binlogdatapb.Filter
send func([]*binlogdatapb.VEvent) error
// A kschema is a VSchema for just one keyspace.
kevents chan *vindexes.KeyspaceSchema
kschema *vindexes.KeyspaceSchema
plans map[uint64]*Plan
// format and pos are updated by parseEvent.
format mysql.BinlogFormat
pos mysql.Position
}
func newVStreamer(ctx context.Context, cp *mysql.ConnParams, se *schema.Engine, startPos mysql.Position, filter *binlogdatapb.Filter, kschema *vindexes.KeyspaceSchema, send func([]*binlogdatapb.VEvent) error) *vstreamer {
ctx, cancel := context.WithCancel(ctx)
return &vstreamer{
ctx: ctx,
cancel: cancel,
cp: cp,
se: se,
startPos: startPos,
filter: filter,
send: send,
kevents: make(chan *vindexes.KeyspaceSchema, 1),
kschema: kschema,
plans: make(map[uint64]*Plan),
}
}
// SetKSchema updates all existing against the new kschema.
func (vs *vstreamer) SetKSchema(kschema *vindexes.KeyspaceSchema) {
// Since vs.Stream is a single-threaded loop. We just send an event to
// that thread, which helps us avoid mutexes to update the plans.
select {
case vs.kevents <- kschema:
case <-vs.ctx.Done():
}
}
func (vs *vstreamer) Cancel() {
vs.cancel()
}
// Stream runs a single-threaded loop.
func (vs *vstreamer) Stream() error {
defer vs.cancel()
vs.pos = vs.startPos
// Ensure se is Open. If vttablet came up in a non_serving role,
// the schema engine may not have been initialized.
if err := vs.se.Open(); err != nil {
return wrapError(err, vs.pos)
}
conn, err := binlog.NewSlaveConnection(vs.cp)
if err != nil {
return wrapError(err, vs.pos)
}
defer conn.Close()
events, err := conn.StartBinlogDumpFromPosition(vs.ctx, vs.pos)
if err != nil {
return wrapError(err, vs.pos)
}
err = vs.parseEvents(vs.ctx, events)
return wrapError(err, vs.pos)
}
func (vs *vstreamer) parseEvents(ctx context.Context, events <-chan mysql.BinlogEvent) error {
// bufferAndTransmit uses bufferedEvents and curSize to buffer events.
var (
bufferedEvents []*binlogdatapb.VEvent
curSize int
)
// Buffering only takes row lenghts into consideration.
// Length of other events is considered negligible.
// If a new row event causes the packet size to be exceeded,
// all existing rows are sent without the new row.
// If a single row exceeds the packet size, it will be in its own packet.
bufferAndTransmit := func(vevent *binlogdatapb.VEvent) error {
switch vevent.Type {
case binlogdatapb.VEventType_GTID, binlogdatapb.VEventType_BEGIN, binlogdatapb.VEventType_FIELD:
// We never have to send GTID, BEGIN or FIELD events on their own.
bufferedEvents = append(bufferedEvents, vevent)
case binlogdatapb.VEventType_COMMIT, binlogdatapb.VEventType_ROLLBACK, binlogdatapb.VEventType_DDL:
// COMMIT, ROLLBACK and DDL are terminal. There may be no more events after
// these for a long time. So, we have to send whatever we have.
bufferedEvents = append(bufferedEvents, vevent)
vevents := bufferedEvents
bufferedEvents = nil
curSize = 0
return vs.send(vevents)
case binlogdatapb.VEventType_ROW:
// ROW events happen inside transactions. So, we can chunk them.
// Buffer everything until packet size is reached, and then send.
newSize := 0
for _, rowChange := range vevent.RowEvent.RowChanges {
if rowChange.Before != nil {
newSize += len(rowChange.Before.Values)
}
if rowChange.After != nil {
newSize += len(rowChange.After.Values)
}
}
if curSize+newSize > *packetSize {
vevents := bufferedEvents
bufferedEvents = []*binlogdatapb.VEvent{vevent}
curSize = newSize
return vs.send(vevents)
}
curSize += newSize
bufferedEvents = append(bufferedEvents, vevent)
default:
return fmt.Errorf("unexpected event: %v", vevent)
}
return nil
}
// Main loop: calls bufferAndTransmit as events arrive.
for {
select {
case ev, ok := <-events:
if !ok {
select {
case <-ctx.Done():
return nil
default:
}
return fmt.Errorf("unexpected server EOF")
}
vevents, err := vs.parseEvent(ev)
if err != nil {
return err
}
for _, vevent := range vevents {
if err := bufferAndTransmit(vevent); err != nil {
return fmt.Errorf("error sending event: %v", err)
}
}
case vs.kschema = <-vs.kevents:
if err := vs.rebuildPlans(); err != nil {
return err
}
case <-ctx.Done():
return nil
}
}
}
func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, error) {
// Validate the buffer before reading fields from it.
if !ev.IsValid() {
return nil, fmt.Errorf("can't parse binlog event: invalid data: %#v", ev)
}
// We need to keep checking for FORMAT_DESCRIPTION_EVENT even after we've
// seen one, because another one might come along (e.g. on log rotate due to
// binlog settings change) that changes the format.
if ev.IsFormatDescription() {
var err error
vs.format, err = ev.Format()
if err != nil {
return nil, fmt.Errorf("can't parse FORMAT_DESCRIPTION_EVENT: %v, event data: %#v", err, ev)
}
return nil, nil
}
// We can't parse anything until we get a FORMAT_DESCRIPTION_EVENT that
// tells us the size of the event header.
if vs.format.IsZero() {
// The only thing that should come before the FORMAT_DESCRIPTION_EVENT
// is a fake ROTATE_EVENT, which the master sends to tell us the name
// of the current log file.
if ev.IsRotate() {
return nil, nil
}
return nil, fmt.Errorf("got a real event before FORMAT_DESCRIPTION_EVENT: %#v", ev)
}
// Strip the checksum, if any. We don't actually verify the checksum, so discard it.
ev, _, err := ev.StripChecksum(vs.format)
if err != nil {
return nil, fmt.Errorf("can't strip checksum from binlog event: %v, event data: %#v", err, ev)
}
var vevents []*binlogdatapb.VEvent
switch {
case ev.IsPseudo() || ev.IsGTID():
gtid, hasBegin, err := ev.GTID(vs.format)
if err != nil {
return nil, fmt.Errorf("can't get GTID from binlog event: %v, event data: %#v", err, ev)
}
if hasBegin {
vevents = append(vevents, &binlogdatapb.VEvent{
Type: binlogdatapb.VEventType_BEGIN,
})
}
vs.pos = mysql.AppendGTID(vs.pos, gtid)
vevents = append(vevents, &binlogdatapb.VEvent{
Type: binlogdatapb.VEventType_GTID,
Gtid: mysql.EncodePosition(vs.pos),
})
case ev.IsXID():
vevents = append(vevents, &binlogdatapb.VEvent{
Type: binlogdatapb.VEventType_COMMIT,
})
case ev.IsQuery():
q, err := ev.Query(vs.format)
if err != nil {
return nil, fmt.Errorf("can't get query from binlog event: %v, event data: %#v", err, ev)
}
switch cat := sqlparser.Preview(q.SQL); cat {
case sqlparser.StmtBegin:
vevents = append(vevents, &binlogdatapb.VEvent{
Type: binlogdatapb.VEventType_BEGIN,
})
case sqlparser.StmtCommit:
vevents = append(vevents, &binlogdatapb.VEvent{
Type: binlogdatapb.VEventType_COMMIT,
})
case sqlparser.StmtRollback:
vevents = append(vevents, &binlogdatapb.VEvent{
Type: binlogdatapb.VEventType_ROLLBACK,
})
case sqlparser.StmtDDL:
vevents = append(vevents, &binlogdatapb.VEvent{
Type: binlogdatapb.VEventType_DDL,
Ddl: q.SQL,
})
// Proactively reload schema.
// If the DDL adds a column, comparing with an older snapshot of the
// schema will make us think that a column was dropped and error out.
vs.se.Reload(vs.ctx)
case sqlparser.StmtOther:
// These are DBA statements like REPAIR that can be ignored.
default:
return nil, fmt.Errorf("unexpected statement type %s in row-based replication: %q", sqlparser.StmtType(cat), q.SQL)
}
case ev.IsTableMap():
// This is very frequent. It precedes every row event.
id := ev.TableID(vs.format)
tm, err := ev.TableMap(vs.format)
if err != nil {
return nil, err
}
// We have to build a plan only for new ids.
if _, ok := vs.plans[id]; ok {
return nil, nil
}
if tm.Database != "" && tm.Database != vs.cp.DbName {
vs.plans[id] = nil
return nil, nil
}
ti := vs.se.GetTable(sqlparser.NewTableIdent(tm.Name))
if ti == nil {
return nil, fmt.Errorf("unknown table %v in schema", tm.Name)
}
if len(ti.Columns) < len(tm.Types) {
return nil, fmt.Errorf("cannot determine table columns for %s: event has %d columns, current schema has %d: %#v", tm.Name, len(tm.Types), len(ti.Columns), ev)
}
table := &Table{
TableMap: tm,
// Columns should be truncated to match those in tm.
Columns: ti.Columns[:len(tm.Types)],
}
plan, err := buildPlan(table, vs.kschema, vs.filter)
if err != nil {
return nil, err
}
vs.plans[id] = plan
if plan == nil {
return nil, nil
}
fields := make([]*querypb.Field, len(plan.ColExprs))
for i, ce := range plan.ColExprs {
fields[i] = &querypb.Field{
Name: ce.Alias.String(),
Type: ce.Type,
}
}
vevents = append(vevents, &binlogdatapb.VEvent{
Type: binlogdatapb.VEventType_FIELD,
FieldEvent: &binlogdatapb.FieldEvent{
TableName: plan.Table.Name,
Fields: fields,
},
})
case ev.IsWriteRows() || ev.IsDeleteRows() || ev.IsUpdateRows():
// The existence of before and after images can be used to
// identify statememt types. It's also possible that the
// before and after images end up going to different shards.
// If so, an update will be treated as delete on one shard
// and insert on the other.
id := ev.TableID(vs.format)
plan := vs.plans[id]
if plan == nil {
return nil, nil
}
rows, err := ev.Rows(vs.format, plan.Table.TableMap)
if err != nil {
return nil, err
}
rowChanges := make([]*binlogdatapb.RowChange, 0, len(rows.Rows))
for _, row := range rows.Rows {
beforeOK, beforeValues, err := vs.extractRowAndFilter(plan, row.Identify, rows.IdentifyColumns, row.NullIdentifyColumns)
if err != nil {
return nil, err
}
afterOK, afterValues, err := vs.extractRowAndFilter(plan, row.Data, rows.DataColumns, row.NullColumns)
if err != nil {
return nil, err
}
if !beforeOK && !afterOK {
continue
}
rowChange := &binlogdatapb.RowChange{}
if beforeOK {
rowChange.Before = sqltypes.RowToProto3(beforeValues)
}
if afterOK {
rowChange.After = sqltypes.RowToProto3(afterValues)
}
rowChanges = append(rowChanges, rowChange)
}
if len(rowChanges) != 0 {
vevents = append(vevents, &binlogdatapb.VEvent{
Type: binlogdatapb.VEventType_ROW,
RowEvent: &binlogdatapb.RowEvent{
TableName: plan.Table.Name,
RowChanges: rowChanges,
},
})
}
}
return vevents, nil
}
func (vs *vstreamer) rebuildPlans() error {
for id, plan := range vs.plans {
if plan == nil {
// If a table has no plan, a kschema change will not
// cause that to change.
continue
}
newPlan, err := buildPlan(plan.Table, vs.kschema, vs.filter)
if err != nil {
return err
}
vs.plans[id] = newPlan
}
return nil
}
func (vs *vstreamer) extractRowAndFilter(plan *Plan, data []byte, dataColumns, nullColumns mysql.Bitmap) (bool, []sqltypes.Value, error) {
if len(data) == 0 {
return false, nil, nil
}
values := make([]sqltypes.Value, dataColumns.Count())
valueIndex := 0
pos := 0
for colNum := 0; colNum < dataColumns.Count(); colNum++ {
if !dataColumns.Bit(colNum) {
return false, nil, fmt.Errorf("partial row image encountered: ensure binlog_row_image is set to 'full'")
}
if nullColumns.Bit(valueIndex) {
valueIndex++
continue
}
value, l, err := mysql.CellValue(data, pos, plan.Table.Types[colNum], plan.Table.Metadata[colNum], plan.Table.Columns[colNum].Type)
if err != nil {
return false, nil, err
}
pos += l
values[colNum] = value
valueIndex++
}
return plan.filter(values)
}
func wrapError(err error, stopPos mysql.Position) error {
if err != nil {
err = fmt.Errorf("stream error @ %v: %v", stopPos, err)
log.Error(err)
return err
}
log.Infof("stream ended @ %v", stopPos)
return nil
}

Просмотреть файл

@ -0,0 +1,978 @@
/*
Copyright 2018 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vstreamer
import (
"fmt"
"strings"
"testing"
"golang.org/x/net/context"
"vitess.io/vitess/go/mysql"
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
)
type testcase struct {
input interface{}
output [][]string
}
func TestStatements(t *testing.T) {
if testing.Short() {
t.Skip()
}
execStatements(t, []string{
"create table stream1(id int, val varbinary(128), primary key(id))",
"create table stream2(id int, val varbinary(128), primary key(id))",
})
defer execStatements(t, []string{
"drop table stream1",
"drop table stream2",
})
engine.se.Reload(context.Background())
testcases := []testcase{{
input: []string{
"begin",
"insert into stream1 values (1, 'aaa')",
"update stream1 set val='bbb' where id = 1",
"commit",
},
// MySQL issues GTID->BEGIN.
// MariaDB issues BEGIN->GTID.
output: [][]string{{
`gtid|begin`,
`gtid|begin`,
`type:FIELD field_event:<table_name:"stream1" fields:<name:"id" type:INT32 > fields:<name:"val" type:VARBINARY > > `,
`type:ROW row_event:<table_name:"stream1" row_changes:<after:<lengths:1 lengths:3 values:"1aaa" > > > `,
`type:ROW row_event:<table_name:"stream1" row_changes:<before:<lengths:1 lengths:3 values:"1aaa" > after:<lengths:1 lengths:3 values:"1bbb" > > > `,
`commit`,
}},
}, {
// Normal DDL.
input: "alter table stream1 change column val val varbinary(128)",
output: [][]string{{
`gtid`,
`type:DDL ddl:"alter table stream1 change column val val varbinary(128)" `,
}},
}, {
// DDL padded with comments.
input: " /* prefix */ alter table stream1 change column val val varbinary(256) /* suffix */ ",
output: [][]string{{
`gtid`,
`type:DDL ddl:"/* prefix */ alter table stream1 change column val val varbinary(256) /* suffix */" `,
}},
}, {
// Multiple tables, and multiple rows changed per statement.
input: []string{
"begin",
"insert into stream1 values (2, 'bbb')",
"insert into stream2 values (1, 'aaa')",
"update stream1 set val='ccc'",
"delete from stream1",
"commit",
},
output: [][]string{{
`gtid|begin`,
`gtid|begin`,
`type:FIELD field_event:<table_name:"stream1" fields:<name:"id" type:INT32 > fields:<name:"val" type:VARBINARY > > `,
`type:ROW row_event:<table_name:"stream1" row_changes:<after:<lengths:1 lengths:3 values:"2bbb" > > > `,
`type:FIELD field_event:<table_name:"stream2" fields:<name:"id" type:INT32 > fields:<name:"val" type:VARBINARY > > `,
`type:ROW row_event:<table_name:"stream2" row_changes:<after:<lengths:1 lengths:3 values:"1aaa" > > > `,
`type:ROW row_event:<table_name:"stream1" ` +
`row_changes:<before:<lengths:1 lengths:3 values:"1bbb" > after:<lengths:1 lengths:3 values:"1ccc" > > ` +
`row_changes:<before:<lengths:1 lengths:3 values:"2bbb" > after:<lengths:1 lengths:3 values:"2ccc" > > > `,
`type:ROW row_event:<table_name:"stream1" ` +
`row_changes:<before:<lengths:1 lengths:3 values:"1ccc" > > ` +
`row_changes:<before:<lengths:1 lengths:3 values:"2ccc" > > > `,
`commit`,
}},
}, {
// truncate is a DDL
input: "truncate table stream2",
output: [][]string{{
`gtid`,
`type:DDL ddl:"truncate table stream2" `,
}},
}, {
// repair, optimize and analyze show up in binlog stream, but ignored by vitess.
input: "repair table stream2",
}, {
input: "optimize table stream2",
}, {
input: "analyze table stream2",
}, {
// select, set, show, analyze and describe don't get logged.
input: "select * from stream1",
}, {
input: "set @val=1",
}, {
input: "show tables",
}, {
input: "analyze table stream1",
}, {
input: "describe stream1",
}}
runCases(t, nil, testcases)
}
func TestRegexp(t *testing.T) {
if testing.Short() {
t.Skip()
}
execStatements(t, []string{
"create table yes_stream(id int, val varbinary(128), primary key(id))",
"create table no_stream(id int, val varbinary(128), primary key(id))",
})
defer execStatements(t, []string{
"drop table yes_stream",
"drop table no_stream",
})
engine.se.Reload(context.Background())
filter := &binlogdatapb.Filter{
Rules: []*binlogdatapb.Rule{{
Match: "/yes.*/",
}},
}
testcases := []testcase{{
input: []string{
"begin",
"insert into yes_stream values (1, 'aaa')",
"insert into no_stream values (2, 'bbb')",
"update yes_stream set val='bbb' where id = 1",
"update no_stream set val='bbb' where id = 2",
"commit",
},
output: [][]string{{
`gtid|begin`,
`gtid|begin`,
`type:FIELD field_event:<table_name:"yes_stream" fields:<name:"id" type:INT32 > fields:<name:"val" type:VARBINARY > > `,
`type:ROW row_event:<table_name:"yes_stream" row_changes:<after:<lengths:1 lengths:3 values:"1aaa" > > > `,
`type:ROW row_event:<table_name:"yes_stream" row_changes:<before:<lengths:1 lengths:3 values:"1aaa" > after:<lengths:1 lengths:3 values:"1bbb" > > > `,
`commit`,
}},
}}
runCases(t, filter, testcases)
}
func TestREKeyrange(t *testing.T) {
if testing.Short() {
t.Skip()
}
execStatements(t, []string{
"create table t1(id1 int, id2 int, val varbinary(128), primary key(id1))",
})
defer execStatements(t, []string{
"drop table t1",
})
engine.se.Reload(context.Background())
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
filter := &binlogdatapb.Filter{
Rules: []*binlogdatapb.Rule{{
Match: "/.*/",
Filter: "-80",
}},
}
ch := startStream(ctx, t, filter)
if err := setVSchema(shardedVSchema); err != nil {
t.Fatal(err)
}
defer setVSchema("{}")
// 1, 2, 3 and 5 are in shard -80.
// 4 and 6 are in shard 80-.
input := []string{
"begin",
"insert into t1 values (1, 4, 'aaa')",
"insert into t1 values (4, 1, 'bbb')",
// Stay in shard.
"update t1 set id1 = 2 where id1 = 1",
// Move from -80 to 80-.
"update t1 set id1 = 6 where id1 = 2",
// Move from 80- to -80.
"update t1 set id1 = 3 where id1 = 4",
"commit",
}
execStatements(t, input)
expectLog(ctx, t, input, ch, [][]string{{
`gtid|begin`,
`gtid|begin`,
`type:FIELD field_event:<table_name:"t1" fields:<name:"id1" type:INT32 > fields:<name:"id2" type:INT32 > fields:<name:"val" type:VARBINARY > > `,
`type:ROW row_event:<table_name:"t1" row_changes:<after:<lengths:1 lengths:1 lengths:3 values:"14aaa" > > > `,
`type:ROW row_event:<table_name:"t1" row_changes:<before:<lengths:1 lengths:1 lengths:3 values:"14aaa" > after:<lengths:1 lengths:1 lengths:3 values:"24aaa" > > > `,
`type:ROW row_event:<table_name:"t1" row_changes:<before:<lengths:1 lengths:1 lengths:3 values:"24aaa" > > > `,
`type:ROW row_event:<table_name:"t1" row_changes:<after:<lengths:1 lengths:1 lengths:3 values:"31bbb" > > > `,
`commit`,
}})
// Switch the vschema to make id2 the primary vindex.
altVSchema := `{
"sharded": true,
"vindexes": {
"hash": {
"type": "hash"
}
},
"tables": {
"t1": {
"column_vindexes": [
{
"column": "id2",
"name": "hash"
}
]
}
}
}`
if err := setVSchema(altVSchema); err != nil {
t.Fatal(err)
}
// Only the first insert should be sent.
input = []string{
"begin",
"insert into t1 values (4, 1, 'aaa')",
"insert into t1 values (1, 4, 'aaa')",
"commit",
}
execStatements(t, input)
expectLog(ctx, t, input, ch, [][]string{{
`gtid|begin`,
`gtid|begin`,
`type:ROW row_event:<table_name:"t1" row_changes:<after:<lengths:1 lengths:1 lengths:3 values:"41aaa" > > > `,
`commit`,
}})
}
func TestSelectFilter(t *testing.T) {
if testing.Short() {
t.Skip()
}
execStatements(t, []string{
"create table t1(id1 int, id2 int, val varbinary(128), primary key(id1))",
})
defer execStatements(t, []string{
"drop table t1",
})
engine.se.Reload(context.Background())
filter := &binlogdatapb.Filter{
Rules: []*binlogdatapb.Rule{{
Match: "t1",
Filter: "select id2, val from t1 where in_keyrange(id2, 'hash', '-80')",
}},
}
testcases := []testcase{{
input: []string{
"begin",
"insert into t1 values (4, 1, 'aaa')",
"insert into t1 values (2, 4, 'aaa')",
"commit",
},
// MySQL issues GTID->BEGIN.
// MariaDB issues BEGIN->GTID.
output: [][]string{{
`gtid|begin`,
`gtid|begin`,
`type:FIELD field_event:<table_name:"t1" fields:<name:"id2" type:INT32 > fields:<name:"val" type:VARBINARY > > `,
`type:ROW row_event:<table_name:"t1" row_changes:<after:<lengths:1 lengths:3 values:"1aaa" > > > `,
`commit`,
}},
}}
runCases(t, filter, testcases)
}
func TestSelectExpressions(t *testing.T) {
if testing.Short() {
t.Skip()
}
execStatements(t, []string{
"create table expr_test(id int, val bigint, primary key(id))",
})
defer execStatements(t, []string{
"drop table expr_test",
})
engine.se.Reload(context.Background())
filter := &binlogdatapb.Filter{
Rules: []*binlogdatapb.Rule{{
Match: "expr_test",
Filter: "select id, val, month(val), day(val), hour(val) from expr_test",
}},
}
testcases := []testcase{{
input: []string{
"begin",
"insert into expr_test values (1, 1546392881)",
"commit",
},
// MySQL issues GTID->BEGIN.
// MariaDB issues BEGIN->GTID.
output: [][]string{{
`gtid|begin`,
`gtid|begin`,
`type:FIELD field_event:<table_name:"expr_test" ` +
`fields:<name:"id" type:INT32 > ` +
`fields:<name:"val" type:INT64 > ` +
`fields:<name:"month(val)" type:VARBINARY > ` +
`fields:<name:"day(val)" type:VARBINARY > ` +
`fields:<name:"hour(val)" type:VARBINARY > > `,
`type:ROW row_event:<table_name:"expr_test" row_changes:<after:<lengths:1 lengths:10 lengths:6 lengths:8 lengths:10 values:"` +
`1` +
`1546392881` +
`201901` +
`20190102` +
`2019010201` +
`" > > > `,
`commit`,
}},
}}
runCases(t, filter, testcases)
}
func TestDDLAddColumn(t *testing.T) {
if testing.Short() {
t.Skip()
}
execStatements(t, []string{
"create table ddl_test1(id int, val1 varbinary(128), primary key(id))",
"create table ddl_test2(id int, val1 varbinary(128), primary key(id))",
})
defer execStatements(t, []string{
"drop table ddl_test1",
"drop table ddl_test2",
})
// Record position before the next few statements.
pos, err := mysqld.MasterPosition()
if err != nil {
t.Fatal(err)
}
execStatements(t, []string{
"begin",
"insert into ddl_test1 values(1, 'aaa')",
"insert into ddl_test2 values(1, 'aaa')",
"commit",
// Adding columns is allowed.
"alter table ddl_test1 add column val2 varbinary(128)",
"alter table ddl_test2 add column val2 varbinary(128)",
"begin",
"insert into ddl_test1 values(2, 'bbb', 'ccc')",
"insert into ddl_test2 values(2, 'bbb', 'ccc')",
"commit",
})
engine.se.Reload(context.Background())
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Test RE as well as select-based filters.
filter := &binlogdatapb.Filter{
Rules: []*binlogdatapb.Rule{{
Match: "ddl_test2",
Filter: "select * from ddl_test2",
}, {
Match: "/.*/",
}},
}
ch := make(chan []*binlogdatapb.VEvent)
go func() {
defer close(ch)
if err := vstream(ctx, t, pos, filter, ch); err != nil {
t.Fatal(err)
}
}()
expectLog(ctx, t, "ddls", ch, [][]string{{
// Current schema has 3 columns, but they'll be truncated to match the two columns in the event.
`gtid|begin`,
`gtid|begin`,
`type:FIELD field_event:<table_name:"ddl_test1" fields:<name:"id" type:INT32 > fields:<name:"val1" type:VARBINARY > > `,
`type:ROW row_event:<table_name:"ddl_test1" row_changes:<after:<lengths:1 lengths:3 values:"1aaa" > > > `,
`type:FIELD field_event:<table_name:"ddl_test2" fields:<name:"id" type:INT32 > fields:<name:"val1" type:VARBINARY > > `,
`type:ROW row_event:<table_name:"ddl_test2" row_changes:<after:<lengths:1 lengths:3 values:"1aaa" > > > `,
`commit`,
}, {
`gtid`,
`type:DDL ddl:"alter table ddl_test1 add column val2 varbinary(128)" `,
}, {
`gtid`,
`type:DDL ddl:"alter table ddl_test2 add column val2 varbinary(128)" `,
}, {
// The plan will be updated to now include the third column
// because the new table map will have three columns.
`gtid|begin`,
`gtid|begin`,
`type:FIELD field_event:<table_name:"ddl_test1" fields:<name:"id" type:INT32 > fields:<name:"val1" type:VARBINARY > fields:<name:"val2" type:VARBINARY > > `,
`type:ROW row_event:<table_name:"ddl_test1" row_changes:<after:<lengths:1 lengths:3 lengths:3 values:"2bbbccc" > > > `,
`type:FIELD field_event:<table_name:"ddl_test2" fields:<name:"id" type:INT32 > fields:<name:"val1" type:VARBINARY > fields:<name:"val2" type:VARBINARY > > `,
`type:ROW row_event:<table_name:"ddl_test2" row_changes:<after:<lengths:1 lengths:3 lengths:3 values:"2bbbccc" > > > `,
`commit`,
}})
}
func TestDDLDropColumn(t *testing.T) {
if testing.Short() {
t.Skip()
}
execStatement(t, "create table ddl_test2(id int, val1 varbinary(128), val2 varbinary(128), primary key(id))")
defer execStatement(t, "drop table ddl_test2")
// Record position before the next few statements.
pos, err := mysqld.MasterPosition()
if err != nil {
t.Fatal(err)
}
execStatements(t, []string{
"insert into ddl_test2 values(1, 'aaa', 'ccc')",
// Adding columns is allowed.
"alter table ddl_test2 drop column val2",
"insert into ddl_test2 values(2, 'bbb')",
})
engine.se.Reload(context.Background())
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ch := make(chan []*binlogdatapb.VEvent)
go func() {
for range ch {
}
}()
defer close(ch)
err = vstream(ctx, t, pos, nil, ch)
want := "cannot determine table columns"
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("err: %v, must contain %s", err, want)
}
}
func TestBuffering(t *testing.T) {
if testing.Short() {
t.Skip()
}
savedSize := *packetSize
*packetSize = 10
defer func() { *packetSize = savedSize }()
execStatement(t, "create table packet_test(id int, val varbinary(128), primary key(id))")
defer execStatement(t, "drop table packet_test")
engine.se.Reload(context.Background())
testcases := []testcase{{
// All rows in one packet.
input: []string{
"begin",
"insert into packet_test values (1, '123')",
"insert into packet_test values (2, '456')",
"commit",
},
output: [][]string{{
`gtid|begin`,
`gtid|begin`,
`type:FIELD field_event:<table_name:"packet_test" fields:<name:"id" type:INT32 > fields:<name:"val" type:VARBINARY > > `,
`type:ROW row_event:<table_name:"packet_test" row_changes:<after:<lengths:1 lengths:3 values:"1123" > > > `,
`type:ROW row_event:<table_name:"packet_test" row_changes:<after:<lengths:1 lengths:3 values:"2456" > > > `,
`commit`,
}},
}, {
// A new row causes packet size to be exceeded.
// Also test deletes
input: []string{
"begin",
"insert into packet_test values (3, '123456')",
"insert into packet_test values (4, '789012')",
"delete from packet_test where id=3",
"delete from packet_test where id=4",
"commit",
},
output: [][]string{{
`gtid|begin`,
`gtid|begin`,
`type:ROW row_event:<table_name:"packet_test" row_changes:<after:<lengths:1 lengths:6 values:"3123456" > > > `,
}, {
`type:ROW row_event:<table_name:"packet_test" row_changes:<after:<lengths:1 lengths:6 values:"4789012" > > > `,
}, {
`type:ROW row_event:<table_name:"packet_test" row_changes:<before:<lengths:1 lengths:6 values:"3123456" > > > `,
}, {
`type:ROW row_event:<table_name:"packet_test" row_changes:<before:<lengths:1 lengths:6 values:"4789012" > > > `,
`commit`,
}},
}, {
// A single row is itself bigger than the packet size.
input: []string{
"begin",
"insert into packet_test values (5, '123456')",
"insert into packet_test values (6, '12345678901')",
"insert into packet_test values (7, '23456')",
"commit",
},
output: [][]string{{
`gtid|begin`,
`gtid|begin`,
`type:ROW row_event:<table_name:"packet_test" row_changes:<after:<lengths:1 lengths:6 values:"5123456" > > > `,
}, {
`type:ROW row_event:<table_name:"packet_test" row_changes:<after:<lengths:1 lengths:11 values:"612345678901" > > > `,
}, {
`type:ROW row_event:<table_name:"packet_test" row_changes:<after:<lengths:1 lengths:5 values:"723456" > > > `,
`commit`,
}},
}, {
// An update packet is bigger because it has a before and after image.
input: []string{
"begin",
"insert into packet_test values (8, '123')",
"update packet_test set val='456' where id=8",
"commit",
},
output: [][]string{{
`gtid|begin`,
`gtid|begin`,
`type:ROW row_event:<table_name:"packet_test" row_changes:<after:<lengths:1 lengths:3 values:"8123" > > > `,
}, {
`type:ROW row_event:<table_name:"packet_test" row_changes:<before:<lengths:1 lengths:3 values:"8123" > after:<lengths:1 lengths:3 values:"8456" > > > `,
`commit`,
}},
}, {
// DDL is in its own packet
input: []string{
"alter table packet_test change val val varchar(128)",
},
output: [][]string{{
`gtid`,
`type:DDL ddl:"alter table packet_test change val val varchar(128)" `,
}},
}}
runCases(t, nil, testcases)
}
func TestTypes(t *testing.T) {
if testing.Short() {
t.Skip()
}
// Modeled after vttablet endtoend compatibility tests.
execStatements(t, []string{
"create table vitess_ints(tiny tinyint, tinyu tinyint unsigned, small smallint, smallu smallint unsigned, medium mediumint, mediumu mediumint unsigned, normal int, normalu int unsigned, big bigint, bigu bigint unsigned, y year, primary key(tiny))",
"create table vitess_fracts(id int, deci decimal(5,2), num numeric(5,2), f float, d double, primary key(id))",
"create table vitess_strings(vb varbinary(16), c char(16), vc varchar(16), b binary(4), tb tinyblob, bl blob, ttx tinytext, tx text, en enum('a','b'), s set('a','b'), primary key(vb))",
"create table vitess_misc(id int, b bit(8), d date, dt datetime, t time, g geometry, primary key(id))",
"create table vitess_null(id int, val varbinary(128), primary key(id))",
})
defer execStatements(t, []string{
"drop table vitess_ints",
"drop table vitess_fracts",
"drop table vitess_strings",
"drop table vitess_misc",
"drop table vitess_null",
})
engine.se.Reload(context.Background())
testcases := []testcase{{
input: []string{
"insert into vitess_ints values(-128, 255, -32768, 65535, -8388608, 16777215, -2147483648, 4294967295, -9223372036854775808, 18446744073709551615, 2012)",
},
output: [][]string{{
`gtid|begin`,
`gtid|begin`,
`type:FIELD field_event:<table_name:"vitess_ints" ` +
`fields:<name:"tiny" type:INT8 > ` +
`fields:<name:"tinyu" type:UINT8 > ` +
`fields:<name:"small" type:INT16 > ` +
`fields:<name:"smallu" type:UINT16 > ` +
`fields:<name:"medium" type:INT24 > ` +
`fields:<name:"mediumu" type:UINT24 > ` +
`fields:<name:"normal" type:INT32 > ` +
`fields:<name:"normalu" type:UINT32 > ` +
`fields:<name:"big" type:INT64 > ` +
`fields:<name:"bigu" type:UINT64 > ` +
`fields:<name:"y" type:YEAR > > `,
`type:ROW row_event:<table_name:"vitess_ints" row_changes:<after:<lengths:4 lengths:3 lengths:6 lengths:5 lengths:8 lengths:8 lengths:11 lengths:10 lengths:20 lengths:20 lengths:4 values:"` +
`-128` +
`255` +
`-32768` +
`65535` +
`-8388608` +
`16777215` +
`-2147483648` +
`4294967295` +
`-9223372036854775808` +
`18446744073709551615` +
`2012` +
`" > > > `,
`commit`,
}},
}, {
input: []string{
"insert into vitess_fracts values(1, 1.99, 2.99, 3.99, 4.99)",
},
output: [][]string{{
`gtid|begin`,
`gtid|begin`,
`type:FIELD field_event:<table_name:"vitess_fracts" ` +
`fields:<name:"id" type:INT32 > ` +
`fields:<name:"deci" type:DECIMAL > ` +
`fields:<name:"num" type:DECIMAL > ` +
`fields:<name:"f" type:FLOAT32 > ` +
`fields:<name:"d" type:FLOAT64 > > `,
`type:ROW row_event:<table_name:"vitess_fracts" row_changes:<after:<lengths:1 lengths:4 lengths:4 lengths:8 lengths:8 values:"` +
`1` +
`1.99` +
`2.99` +
`3.99E+00` +
`4.99E+00` +
`" > > > `,
`commit`,
}},
}, {
// TODO(sougou): validate that binary and char data generate correct DMLs on the other end.
input: []string{
"insert into vitess_strings values('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'a', 'a,b')",
},
output: [][]string{{
`gtid|begin`,
`gtid|begin`,
`type:FIELD field_event:<table_name:"vitess_strings" ` +
`fields:<name:"vb" type:VARBINARY > ` +
`fields:<name:"c" type:CHAR > ` +
`fields:<name:"vc" type:VARCHAR > ` +
`fields:<name:"b" type:BINARY > ` +
`fields:<name:"tb" type:BLOB > ` +
`fields:<name:"bl" type:BLOB > ` +
`fields:<name:"ttx" type:TEXT > ` +
`fields:<name:"tx" type:TEXT > ` +
`fields:<name:"en" type:ENUM > ` +
`fields:<name:"s" type:SET > > `,
`type:ROW row_event:<table_name:"vitess_strings" row_changes:<after:<lengths:1 lengths:1 lengths:1 lengths:1 lengths:1 lengths:1 lengths:1 lengths:1 lengths:1 lengths:1 ` +
`values:"abcdefgh13" > > > `,
`commit`,
}},
}, {
// TODO(sougou): validate that the geometry value generates the correct DMLs on the other end.
input: []string{
"insert into vitess_misc values(1, '\x01', '2012-01-01', '2012-01-01 15:45:45', '15:45:45', point(1, 2))",
},
output: [][]string{{
`gtid|begin`,
`gtid|begin`,
`type:FIELD field_event:<table_name:"vitess_misc" ` +
`fields:<name:"id" type:INT32 > ` +
`fields:<name:"b" type:BIT > ` +
`fields:<name:"d" type:DATE > ` +
`fields:<name:"dt" type:DATETIME > ` +
`fields:<name:"t" type:TIME > ` +
`fields:<name:"g" type:GEOMETRY > > `,
`type:ROW row_event:<table_name:"vitess_misc" row_changes:<after:<lengths:1 lengths:1 lengths:10 lengths:19 lengths:8 lengths:25 values:"` +
`1` +
`\001` +
`2012-01-01` +
`2012-01-01 15:45:45` +
`15:45:45` +
`\000\000\000\000\001\001\000\000\000\000\000\000\000\000\000\360?\000\000\000\000\000\000\000@` +
`" > > > `,
`commit`,
}},
}, {
input: []string{
"insert into vitess_null values(1, null)",
},
output: [][]string{{
`gtid|begin`,
`gtid|begin`,
`type:FIELD field_event:<table_name:"vitess_null" fields:<name:"id" type:INT32 > fields:<name:"val" type:VARBINARY > > `,
`type:ROW row_event:<table_name:"vitess_null" row_changes:<after:<lengths:1 lengths:-1 values:"1" > > > `,
`commit`,
}},
}}
runCases(t, nil, testcases)
}
func TestJSON(t *testing.T) {
if testing.Short() {
t.Skip()
}
// JSON is supported only after mysql57.
if err := mysqld.ExecuteSuperQuery(context.Background(), "create table vitess_json(id int default 1, val json, primary key(id))"); err != nil {
// If it's a syntax error, MySQL is an older version. Skip this test.
if strings.Contains(err.Error(), "syntax") {
return
}
t.Fatal(err)
}
defer execStatement(t, "drop table vitess_json")
engine.se.Reload(context.Background())
testcases := []testcase{{
input: []string{
`insert into vitess_json values(1, '{"foo": "bar"}')`,
},
output: [][]string{{
`gtid|begin`,
`gtid|begin`,
`type:FIELD field_event:<table_name:"vitess_json" fields:<name:"id" type:INT32 > fields:<name:"val" type:JSON > > `,
`type:ROW row_event:<table_name:"vitess_json" row_changes:<after:<lengths:1 lengths:24 values:"1JSON_OBJECT('foo','bar')" > > > `,
`commit`,
}},
}}
runCases(t, nil, testcases)
}
func TestExternalTable(t *testing.T) {
if testing.Short() {
t.Skip()
}
execStatements(t, []string{
"create database external",
"create table external.ext(id int, val varbinary(128), primary key(id))",
})
defer execStatements(t, []string{
"drop database external",
})
engine.se.Reload(context.Background())
testcases := []testcase{{
input: []string{
"begin",
"insert into external.ext values (1, 'aaa')",
"commit",
},
// External table events don't get sent.
output: [][]string{{
`gtid|begin`,
`gtid|begin`,
`commit`,
}},
}}
runCases(t, nil, testcases)
}
func TestMinimalMode(t *testing.T) {
if testing.Short() {
t.Skip()
}
execStatements(t, []string{
"create table t1(id int, val1 varbinary(128), val2 varbinary(128), primary key(id))",
"insert into t1 values(1, 'aaa', 'bbb')",
})
defer execStatements(t, []string{
"drop table t1",
})
engine.se.Reload(context.Background())
// Record position before the next few statements.
pos, err := mysqld.MasterPosition()
if err != nil {
t.Fatal(err)
}
execStatements(t, []string{
"set @@session.binlog_row_image='minimal'",
"update t1 set val1='bbb' where id=1",
"set @@session.binlog_row_image='full'",
})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ch := make(chan []*binlogdatapb.VEvent)
go func() {
for evs := range ch {
t.Errorf("received: %v", evs)
}
}()
defer close(ch)
err = vstream(ctx, t, pos, nil, ch)
want := "partial row image encountered"
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("err: %v, must contain '%s'", err, want)
}
}
func TestStatementMode(t *testing.T) {
if testing.Short() {
t.Skip()
}
execStatements(t, []string{
"create table t1(id int, val1 varbinary(128), val2 varbinary(128), primary key(id))",
"insert into t1 values(1, 'aaa', 'bbb')",
})
defer execStatements(t, []string{
"drop table t1",
})
engine.se.Reload(context.Background())
// Record position before the next few statements.
pos, err := mysqld.MasterPosition()
if err != nil {
t.Fatal(err)
}
execStatements(t, []string{
"set @@session.binlog_format='statement'",
"update t1 set val1='bbb' where id=1",
"set @@session.binlog_format='row'",
})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ch := make(chan []*binlogdatapb.VEvent)
go func() {
for evs := range ch {
t.Errorf("received: %v", evs)
}
}()
defer close(ch)
err = vstream(ctx, t, pos, nil, ch)
want := "unexpected statement type"
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("err: %v, must contain '%s'", err, want)
}
}
func runCases(t *testing.T, filter *binlogdatapb.Filter, testcases []testcase) {
t.Helper()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ch := startStream(ctx, t, filter)
for _, tcase := range testcases {
switch input := tcase.input.(type) {
case []string:
execStatements(t, input)
case string:
execStatement(t, input)
default:
t.Fatalf("unexpected input: %#v", input)
}
expectLog(ctx, t, tcase.input, ch, tcase.output)
}
cancel()
if evs, ok := <-ch; ok {
t.Fatalf("unexpected evs: %v", evs)
}
}
func expectLog(ctx context.Context, t *testing.T, input interface{}, ch <-chan []*binlogdatapb.VEvent, output [][]string) {
t.Helper()
for _, wantset := range output {
var evs []*binlogdatapb.VEvent
var ok bool
select {
case evs, ok = <-ch:
if !ok {
t.Fatal("stream ended early")
}
case <-ctx.Done():
t.Fatal("stream ended early")
}
if len(wantset) != len(evs) {
t.Fatalf("%v: evs\n%v, want\n%v", input, evs, wantset)
}
for i, want := range wantset {
switch want {
case "gtid|begin":
if evs[i].Type != binlogdatapb.VEventType_GTID && evs[i].Type != binlogdatapb.VEventType_BEGIN {
t.Fatalf("%v (%d): event: %v, want gtid or begin", input, i, evs[i])
}
case "gtid":
if evs[i].Type != binlogdatapb.VEventType_GTID {
t.Fatalf("%v (%d): event: %v, want gtid", input, i, evs[i])
}
case "commit":
if evs[i].Type != binlogdatapb.VEventType_COMMIT {
t.Fatalf("%v (%d): event: %v, want commit", input, i, evs[i])
}
default:
if got := fmt.Sprintf("%v", evs[i]); got != want {
t.Fatalf("%v (%d): event:\n%q, want\n%q", input, i, got, want)
}
}
}
}
}
func startStream(ctx context.Context, t *testing.T, filter *binlogdatapb.Filter) <-chan []*binlogdatapb.VEvent {
pos, err := mysqld.MasterPosition()
if err != nil {
t.Fatal(err)
}
ch := make(chan []*binlogdatapb.VEvent)
go func() {
defer close(ch)
if err := vstream(ctx, t, pos, filter, ch); err != nil {
t.Fatal(err)
}
}()
return ch
}
func vstream(ctx context.Context, t *testing.T, pos mysql.Position, filter *binlogdatapb.Filter, ch chan []*binlogdatapb.VEvent) error {
if filter == nil {
filter = &binlogdatapb.Filter{
Rules: []*binlogdatapb.Rule{{
Match: "/.*/",
}},
}
}
return engine.Stream(ctx, pos, filter, func(evs []*binlogdatapb.VEvent) error {
t.Logf("Received events: %v", evs)
select {
case ch <- evs:
case <-ctx.Done():
return fmt.Errorf("stream ended early")
}
return nil
})
}
func execStatement(t *testing.T, query string) {
t.Helper()
if err := mysqld.ExecuteSuperQuery(context.Background(), query); err != nil {
t.Fatal(err)
}
}
func execStatements(t *testing.T, queries []string) {
t.Helper()
if err := mysqld.ExecuteSuperQueryList(context.Background(), queries); err != nil {
t.Fatal(err)
}
}

Просмотреть файл

@ -114,6 +114,23 @@ message StreamTablesResponse {
BinlogTransaction binlog_transaction = 1;
}
// Rule represents one rule.
message Rule {
// match can be a table name or a regular expression
// delineated by '/' and '/'.
string match = 1;
// filter can be an empty string or keyrange if the match
// is a regular expression. Otherwise, it must be a select
// query.
string filter = 2;
}
// Filter represents a list of ordered rules. First match
// wins.
message Filter {
repeated Rule rules = 1;
}
// BinlogSource specifies the source and filter parameters for
// Filtered Replication. It currently supports a keyrange
// or a list of tables.
@ -132,4 +149,65 @@ message BinlogSource {
// tables is set if the request is for a list of tables
repeated string tables = 5;
// filter is set if we're using the generalized representation
// for the filter.
Filter filter = 6;
}
// VEventType enumerates the event types.
// This list is comprehensive. Many of these types
// will not be encountered in RBR mode.
enum VEventType {
UNKNOWN = 0;
GTID = 1;
BEGIN = 2;
COMMIT = 3;
ROLLBACK = 4;
DDL = 5;
INSERT = 6;
REPLACE = 7;
UPDATE = 8;
DELETE = 9;
SET = 10;
OTHER = 11;
ROW = 12;
FIELD = 13;
}
// RowChange represents one row change
message RowChange {
query.Row before = 1;
query.Row after = 2;
}
// RowEvent represent row events for one table
message RowEvent {
string table_name = 1;
repeated RowChange row_changes = 2;
}
message FieldEvent {
string table_name = 1;
repeated query.Field fields = 2;
}
// VEvent represents a vstream event
message VEvent {
VEventType type = 1;
string gtid = 2;
string ddl = 3;
RowEvent row_event = 4;
FieldEvent field_event = 5;
}
// VStreamRequest is the payload for VStream
message VStreamRequest {
string position = 1;
Filter filter = 2;
}
// VStreamResponse is the response from VStream
message VStreamResponse {
repeated VEvent event = 1;
}

Просмотреть файл

@ -3,11 +3,11 @@
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
@ -21,10 +21,96 @@ DESCRIPTOR = _descriptor.FileDescriptor(
name='binlogdata.proto',
package='binlogdata',
syntax='proto3',
serialized_pb=_b('\n\x10\x62inlogdata.proto\x12\nbinlogdata\x1a\x0bquery.proto\x1a\x0etopodata.proto\"7\n\x07\x43harset\x12\x0e\n\x06\x63lient\x18\x01 \x01(\x05\x12\x0c\n\x04\x63onn\x18\x02 \x01(\x05\x12\x0e\n\x06server\x18\x03 \x01(\x05\"\xb5\x03\n\x11\x42inlogTransaction\x12;\n\nstatements\x18\x01 \x03(\x0b\x32\'.binlogdata.BinlogTransaction.Statement\x12&\n\x0b\x65vent_token\x18\x04 \x01(\x0b\x32\x11.query.EventToken\x1a\xae\x02\n\tStatement\x12\x42\n\x08\x63\x61tegory\x18\x01 \x01(\x0e\x32\x30.binlogdata.BinlogTransaction.Statement.Category\x12$\n\x07\x63harset\x18\x02 \x01(\x0b\x32\x13.binlogdata.Charset\x12\x0b\n\x03sql\x18\x03 \x01(\x0c\"\xa9\x01\n\x08\x43\x61tegory\x12\x13\n\x0f\x42L_UNRECOGNIZED\x10\x00\x12\x0c\n\x08\x42L_BEGIN\x10\x01\x12\r\n\tBL_COMMIT\x10\x02\x12\x0f\n\x0b\x42L_ROLLBACK\x10\x03\x12\x15\n\x11\x42L_DML_DEPRECATED\x10\x04\x12\n\n\x06\x42L_DDL\x10\x05\x12\n\n\x06\x42L_SET\x10\x06\x12\r\n\tBL_INSERT\x10\x07\x12\r\n\tBL_UPDATE\x10\x08\x12\r\n\tBL_DELETE\x10\tJ\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\"v\n\x15StreamKeyRangeRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"S\n\x16StreamKeyRangeResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"]\n\x13StreamTablesRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\x0e\n\x06tables\x18\x02 \x03(\t\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"Q\n\x14StreamTablesResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"\x91\x01\n\x0c\x42inlogSource\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12)\n\x0btablet_type\x18\x03 \x01(\x0e\x32\x14.topodata.TabletType\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\tB)Z\'vitess.io/vitess/go/vt/proto/binlogdatab\x06proto3')
serialized_options=_b('Z\'vitess.io/vitess/go/vt/proto/binlogdata'),
serialized_pb=_b('\n\x10\x62inlogdata.proto\x12\nbinlogdata\x1a\x0bquery.proto\x1a\x0etopodata.proto\"7\n\x07\x43harset\x12\x0e\n\x06\x63lient\x18\x01 \x01(\x05\x12\x0c\n\x04\x63onn\x18\x02 \x01(\x05\x12\x0e\n\x06server\x18\x03 \x01(\x05\"\xb5\x03\n\x11\x42inlogTransaction\x12;\n\nstatements\x18\x01 \x03(\x0b\x32\'.binlogdata.BinlogTransaction.Statement\x12&\n\x0b\x65vent_token\x18\x04 \x01(\x0b\x32\x11.query.EventToken\x1a\xae\x02\n\tStatement\x12\x42\n\x08\x63\x61tegory\x18\x01 \x01(\x0e\x32\x30.binlogdata.BinlogTransaction.Statement.Category\x12$\n\x07\x63harset\x18\x02 \x01(\x0b\x32\x13.binlogdata.Charset\x12\x0b\n\x03sql\x18\x03 \x01(\x0c\"\xa9\x01\n\x08\x43\x61tegory\x12\x13\n\x0f\x42L_UNRECOGNIZED\x10\x00\x12\x0c\n\x08\x42L_BEGIN\x10\x01\x12\r\n\tBL_COMMIT\x10\x02\x12\x0f\n\x0b\x42L_ROLLBACK\x10\x03\x12\x15\n\x11\x42L_DML_DEPRECATED\x10\x04\x12\n\n\x06\x42L_DDL\x10\x05\x12\n\n\x06\x42L_SET\x10\x06\x12\r\n\tBL_INSERT\x10\x07\x12\r\n\tBL_UPDATE\x10\x08\x12\r\n\tBL_DELETE\x10\tJ\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\"v\n\x15StreamKeyRangeRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"S\n\x16StreamKeyRangeResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"]\n\x13StreamTablesRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\x0e\n\x06tables\x18\x02 \x03(\t\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"Q\n\x14StreamTablesResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"%\n\x04Rule\x12\r\n\x05match\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\")\n\x06\x46ilter\x12\x1f\n\x05rules\x18\x01 \x03(\x0b\x32\x10.binlogdata.Rule\"\xb5\x01\n\x0c\x42inlogSource\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12)\n\x0btablet_type\x18\x03 \x01(\x0e\x32\x14.topodata.TabletType\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\t\x12\"\n\x06\x66ilter\x18\x06 \x01(\x0b\x32\x12.binlogdata.Filter\"B\n\tRowChange\x12\x1a\n\x06\x62\x65\x66ore\x18\x01 \x01(\x0b\x32\n.query.Row\x12\x19\n\x05\x61\x66ter\x18\x02 \x01(\x0b\x32\n.query.Row\"J\n\x08RowEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12*\n\x0brow_changes\x18\x02 \x03(\x0b\x32\x15.binlogdata.RowChange\">\n\nFieldEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x1c\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x0c.query.Field\"\x9f\x01\n\x06VEvent\x12$\n\x04type\x18\x01 \x01(\x0e\x32\x16.binlogdata.VEventType\x12\x0c\n\x04gtid\x18\x02 \x01(\t\x12\x0b\n\x03\x64\x64l\x18\x03 \x01(\t\x12\'\n\trow_event\x18\x04 \x01(\x0b\x32\x14.binlogdata.RowEvent\x12+\n\x0b\x66ield_event\x18\x05 \x01(\x0b\x32\x16.binlogdata.FieldEvent\"F\n\x0eVStreamRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\"\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x12.binlogdata.Filter\"4\n\x0fVStreamResponse\x12!\n\x05\x65vent\x18\x01 \x03(\x0b\x32\x12.binlogdata.VEvent*\xaa\x01\n\nVEventType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x08\n\x04GTID\x10\x01\x12\t\n\x05\x42\x45GIN\x10\x02\x12\n\n\x06\x43OMMIT\x10\x03\x12\x0c\n\x08ROLLBACK\x10\x04\x12\x07\n\x03\x44\x44L\x10\x05\x12\n\n\x06INSERT\x10\x06\x12\x0b\n\x07REPLACE\x10\x07\x12\n\n\x06UPDATE\x10\x08\x12\n\n\x06\x44\x45LETE\x10\t\x12\x07\n\x03SET\x10\n\x12\t\n\x05OTHER\x10\x0b\x12\x07\n\x03ROW\x10\x0c\x12\t\n\x05\x46IELD\x10\rB)Z\'vitess.io/vitess/go/vt/proto/binlogdatab\x06proto3')
,
dependencies=[query__pb2.DESCRIPTOR,topodata__pb2.DESCRIPTOR,])
_VEVENTTYPE = _descriptor.EnumDescriptor(
name='VEventType',
full_name='binlogdata.VEventType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GTID', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BEGIN', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COMMIT', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ROLLBACK', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DDL', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INSERT', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REPLACE', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UPDATE', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DELETE', index=9, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SET', index=10, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OTHER', index=11, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ROW', index=12, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FIELD', index=13, number=13,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1704,
serialized_end=1874,
)
_sym_db.RegisterEnumDescriptor(_VEVENTTYPE)
VEventType = enum_type_wrapper.EnumTypeWrapper(_VEVENTTYPE)
UNKNOWN = 0
GTID = 1
BEGIN = 2
COMMIT = 3
ROLLBACK = 4
DDL = 5
INSERT = 6
REPLACE = 7
UPDATE = 8
DELETE = 9
SET = 10
OTHER = 11
ROW = 12
FIELD = 13
_BINLOGTRANSACTION_STATEMENT_CATEGORY = _descriptor.EnumDescriptor(
@ -35,47 +121,47 @@ _BINLOGTRANSACTION_STATEMENT_CATEGORY = _descriptor.EnumDescriptor(
values=[
_descriptor.EnumValueDescriptor(
name='BL_UNRECOGNIZED', index=0, number=0,
options=None,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BL_BEGIN', index=1, number=1,
options=None,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BL_COMMIT', index=2, number=2,
options=None,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BL_ROLLBACK', index=3, number=3,
options=None,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BL_DML_DEPRECATED', index=4, number=4,
options=None,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BL_DDL', index=5, number=5,
options=None,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BL_SET', index=6, number=6,
options=None,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BL_INSERT', index=7, number=7,
options=None,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BL_UPDATE', index=8, number=8,
options=None,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BL_DELETE', index=9, number=9,
options=None,
serialized_options=None,
type=None),
],
containing_type=None,
options=None,
serialized_options=None,
serialized_start=375,
serialized_end=544,
)
@ -95,28 +181,28 @@ _CHARSET = _descriptor.Descriptor(
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conn', full_name='binlogdata.Charset.conn', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='server', full_name='binlogdata.Charset.server', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
@ -140,21 +226,21 @@ _BINLOGTRANSACTION_STATEMENT = _descriptor.Descriptor(
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='charset', full_name='binlogdata.BinlogTransaction.Statement.charset', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sql', full_name='binlogdata.BinlogTransaction.Statement.sql', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
@ -162,7 +248,7 @@ _BINLOGTRANSACTION_STATEMENT = _descriptor.Descriptor(
enum_types=[
_BINLOGTRANSACTION_STATEMENT_CATEGORY,
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
@ -185,21 +271,21 @@ _BINLOGTRANSACTION = _descriptor.Descriptor(
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='event_token', full_name='binlogdata.BinlogTransaction.event_token', index=1,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_BINLOGTRANSACTION_STATEMENT, ],
enum_types=[
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
@ -223,28 +309,28 @@ _STREAMKEYRANGEREQUEST = _descriptor.Descriptor(
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key_range', full_name='binlogdata.StreamKeyRangeRequest.key_range', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='charset', full_name='binlogdata.StreamKeyRangeRequest.charset', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
@ -268,14 +354,14 @@ _STREAMKEYRANGERESPONSE = _descriptor.Descriptor(
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
@ -299,28 +385,28 @@ _STREAMTABLESREQUEST = _descriptor.Descriptor(
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tables', full_name='binlogdata.StreamTablesRequest.tables', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='charset', full_name='binlogdata.StreamTablesRequest.charset', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
@ -344,14 +430,14 @@ _STREAMTABLESRESPONSE = _descriptor.Descriptor(
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
@ -362,6 +448,75 @@ _STREAMTABLESRESPONSE = _descriptor.Descriptor(
)
_RULE = _descriptor.Descriptor(
name='Rule',
full_name='binlogdata.Rule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='match', full_name='binlogdata.Rule.match', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='filter', full_name='binlogdata.Rule.filter', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=941,
serialized_end=978,
)
_FILTER = _descriptor.Descriptor(
name='Filter',
full_name='binlogdata.Filter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rules', full_name='binlogdata.Filter.rules', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=980,
serialized_end=1021,
)
_BINLOGSOURCE = _descriptor.Descriptor(
name='BinlogSource',
full_name='binlogdata.BinlogSource',
@ -375,49 +530,298 @@ _BINLOGSOURCE = _descriptor.Descriptor(
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shard', full_name='binlogdata.BinlogSource.shard', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tablet_type', full_name='binlogdata.BinlogSource.tablet_type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key_range', full_name='binlogdata.BinlogSource.key_range', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tables', full_name='binlogdata.BinlogSource.tables', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='filter', full_name='binlogdata.BinlogSource.filter', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=942,
serialized_end=1087,
serialized_start=1024,
serialized_end=1205,
)
_ROWCHANGE = _descriptor.Descriptor(
name='RowChange',
full_name='binlogdata.RowChange',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='before', full_name='binlogdata.RowChange.before', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='after', full_name='binlogdata.RowChange.after', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1207,
serialized_end=1273,
)
_ROWEVENT = _descriptor.Descriptor(
name='RowEvent',
full_name='binlogdata.RowEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='table_name', full_name='binlogdata.RowEvent.table_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='row_changes', full_name='binlogdata.RowEvent.row_changes', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1275,
serialized_end=1349,
)
_FIELDEVENT = _descriptor.Descriptor(
name='FieldEvent',
full_name='binlogdata.FieldEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='table_name', full_name='binlogdata.FieldEvent.table_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fields', full_name='binlogdata.FieldEvent.fields', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1351,
serialized_end=1413,
)
_VEVENT = _descriptor.Descriptor(
name='VEvent',
full_name='binlogdata.VEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='binlogdata.VEvent.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gtid', full_name='binlogdata.VEvent.gtid', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ddl', full_name='binlogdata.VEvent.ddl', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='row_event', full_name='binlogdata.VEvent.row_event', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='field_event', full_name='binlogdata.VEvent.field_event', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1416,
serialized_end=1575,
)
_VSTREAMREQUEST = _descriptor.Descriptor(
name='VStreamRequest',
full_name='binlogdata.VStreamRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='position', full_name='binlogdata.VStreamRequest.position', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='filter', full_name='binlogdata.VStreamRequest.filter', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1577,
serialized_end=1647,
)
_VSTREAMRESPONSE = _descriptor.Descriptor(
name='VStreamResponse',
full_name='binlogdata.VStreamResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='event', full_name='binlogdata.VStreamResponse.event', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1649,
serialized_end=1701,
)
_BINLOGTRANSACTION_STATEMENT.fields_by_name['category'].enum_type = _BINLOGTRANSACTION_STATEMENT_CATEGORY
@ -431,15 +835,35 @@ _STREAMKEYRANGEREQUEST.fields_by_name['charset'].message_type = _CHARSET
_STREAMKEYRANGERESPONSE.fields_by_name['binlog_transaction'].message_type = _BINLOGTRANSACTION
_STREAMTABLESREQUEST.fields_by_name['charset'].message_type = _CHARSET
_STREAMTABLESRESPONSE.fields_by_name['binlog_transaction'].message_type = _BINLOGTRANSACTION
_FILTER.fields_by_name['rules'].message_type = _RULE
_BINLOGSOURCE.fields_by_name['tablet_type'].enum_type = topodata__pb2._TABLETTYPE
_BINLOGSOURCE.fields_by_name['key_range'].message_type = topodata__pb2._KEYRANGE
_BINLOGSOURCE.fields_by_name['filter'].message_type = _FILTER
_ROWCHANGE.fields_by_name['before'].message_type = query__pb2._ROW
_ROWCHANGE.fields_by_name['after'].message_type = query__pb2._ROW
_ROWEVENT.fields_by_name['row_changes'].message_type = _ROWCHANGE
_FIELDEVENT.fields_by_name['fields'].message_type = query__pb2._FIELD
_VEVENT.fields_by_name['type'].enum_type = _VEVENTTYPE
_VEVENT.fields_by_name['row_event'].message_type = _ROWEVENT
_VEVENT.fields_by_name['field_event'].message_type = _FIELDEVENT
_VSTREAMREQUEST.fields_by_name['filter'].message_type = _FILTER
_VSTREAMRESPONSE.fields_by_name['event'].message_type = _VEVENT
DESCRIPTOR.message_types_by_name['Charset'] = _CHARSET
DESCRIPTOR.message_types_by_name['BinlogTransaction'] = _BINLOGTRANSACTION
DESCRIPTOR.message_types_by_name['StreamKeyRangeRequest'] = _STREAMKEYRANGEREQUEST
DESCRIPTOR.message_types_by_name['StreamKeyRangeResponse'] = _STREAMKEYRANGERESPONSE
DESCRIPTOR.message_types_by_name['StreamTablesRequest'] = _STREAMTABLESREQUEST
DESCRIPTOR.message_types_by_name['StreamTablesResponse'] = _STREAMTABLESRESPONSE
DESCRIPTOR.message_types_by_name['Rule'] = _RULE
DESCRIPTOR.message_types_by_name['Filter'] = _FILTER
DESCRIPTOR.message_types_by_name['BinlogSource'] = _BINLOGSOURCE
DESCRIPTOR.message_types_by_name['RowChange'] = _ROWCHANGE
DESCRIPTOR.message_types_by_name['RowEvent'] = _ROWEVENT
DESCRIPTOR.message_types_by_name['FieldEvent'] = _FIELDEVENT
DESCRIPTOR.message_types_by_name['VEvent'] = _VEVENT
DESCRIPTOR.message_types_by_name['VStreamRequest'] = _VSTREAMREQUEST
DESCRIPTOR.message_types_by_name['VStreamResponse'] = _VSTREAMRESPONSE
DESCRIPTOR.enum_types_by_name['VEventType'] = _VEVENTTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Charset = _reflection.GeneratedProtocolMessageType('Charset', (_message.Message,), dict(
@ -492,6 +916,20 @@ StreamTablesResponse = _reflection.GeneratedProtocolMessageType('StreamTablesRes
))
_sym_db.RegisterMessage(StreamTablesResponse)
Rule = _reflection.GeneratedProtocolMessageType('Rule', (_message.Message,), dict(
DESCRIPTOR = _RULE,
__module__ = 'binlogdata_pb2'
# @@protoc_insertion_point(class_scope:binlogdata.Rule)
))
_sym_db.RegisterMessage(Rule)
Filter = _reflection.GeneratedProtocolMessageType('Filter', (_message.Message,), dict(
DESCRIPTOR = _FILTER,
__module__ = 'binlogdata_pb2'
# @@protoc_insertion_point(class_scope:binlogdata.Filter)
))
_sym_db.RegisterMessage(Filter)
BinlogSource = _reflection.GeneratedProtocolMessageType('BinlogSource', (_message.Message,), dict(
DESCRIPTOR = _BINLOGSOURCE,
__module__ = 'binlogdata_pb2'
@ -499,7 +937,48 @@ BinlogSource = _reflection.GeneratedProtocolMessageType('BinlogSource', (_messag
))
_sym_db.RegisterMessage(BinlogSource)
RowChange = _reflection.GeneratedProtocolMessageType('RowChange', (_message.Message,), dict(
DESCRIPTOR = _ROWCHANGE,
__module__ = 'binlogdata_pb2'
# @@protoc_insertion_point(class_scope:binlogdata.RowChange)
))
_sym_db.RegisterMessage(RowChange)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z\'vitess.io/vitess/go/vt/proto/binlogdata'))
RowEvent = _reflection.GeneratedProtocolMessageType('RowEvent', (_message.Message,), dict(
DESCRIPTOR = _ROWEVENT,
__module__ = 'binlogdata_pb2'
# @@protoc_insertion_point(class_scope:binlogdata.RowEvent)
))
_sym_db.RegisterMessage(RowEvent)
FieldEvent = _reflection.GeneratedProtocolMessageType('FieldEvent', (_message.Message,), dict(
DESCRIPTOR = _FIELDEVENT,
__module__ = 'binlogdata_pb2'
# @@protoc_insertion_point(class_scope:binlogdata.FieldEvent)
))
_sym_db.RegisterMessage(FieldEvent)
VEvent = _reflection.GeneratedProtocolMessageType('VEvent', (_message.Message,), dict(
DESCRIPTOR = _VEVENT,
__module__ = 'binlogdata_pb2'
# @@protoc_insertion_point(class_scope:binlogdata.VEvent)
))
_sym_db.RegisterMessage(VEvent)
VStreamRequest = _reflection.GeneratedProtocolMessageType('VStreamRequest', (_message.Message,), dict(
DESCRIPTOR = _VSTREAMREQUEST,
__module__ = 'binlogdata_pb2'
# @@protoc_insertion_point(class_scope:binlogdata.VStreamRequest)
))
_sym_db.RegisterMessage(VStreamRequest)
VStreamResponse = _reflection.GeneratedProtocolMessageType('VStreamResponse', (_message.Message,), dict(
DESCRIPTOR = _VSTREAMRESPONSE,
__module__ = 'binlogdata_pb2'
# @@protoc_insertion_point(class_scope:binlogdata.VStreamResponse)
))
_sym_db.RegisterMessage(VStreamResponse)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)