зеркало из https://github.com/github/vitess-gh.git
Support additional SHOW synatx
Signed-off-by: Saif Alharthi <saif@saifalharthi.me> Revert previous change (wrong branch!) Signed-off-by: Morgan Tocker <tocker@gmail.com> Resolve yaml conflict Signed-off-by: Saif Alharthi <saif@saifalharthi.me> vrepl: ExternalizeVindex Signed-off-by: Sugu Sougoumarane <ssougou@gmail.com> vrepl: ExternalizeVindex vtctl command Signed-off-by: Sugu Sougoumarane <ssougou@gmail.com> adds position and last token context to parse errors Signed-off-by: cmoog <moogcharlie@gmail.com> adds unit tests for PositionedErr parse error Signed-off-by: cmoog <moogcharlie@gmail.com> Add datadog plugin for tracing Signed-off-by: Karel Alfonso Sague <kalfonso@squareup.com> prepare statment test case. Signed-off-by: pradip parmar <prince.soamedia@gmail.com> prepared_statement: comments changes as per idiomatic go, removed elses. Signed-off-by: pradip parmar <prince.soamedia@gmail.com> prepared_statement: unwanted dependency removed. Signed-off-by: pradip parmar <prince.soamedia@gmail.com> prepare_statement: dummy commit. Signed-off-by: pradip parmar <prince.soamedia@gmail.com> patches error logic Signed-off-by: cmoog <moogcharlie@gmail.com> vitess-mixin: add structure & first 2 dashboards Signed-off-by: Guido Iaquinti <giaquinti@slack-corp.com> Address #5734 by forming MySQL error packet on query parse error during COM_PREPARE Signed-off-by: Jacques Grove <aquarapid@gmail.com>
This commit is contained in:
Родитель
1d60870e83
Коммит
bd5c3db97c
2
go.mod
2
go.mod
|
@ -44,7 +44,6 @@ require (
|
|||
github.com/hashicorp/memberlist v0.1.4 // indirect
|
||||
github.com/hashicorp/serf v0.0.0-20161207011743-d3a67ab21bc8 // indirect
|
||||
github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428
|
||||
github.com/jinzhu/gorm v1.9.12
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 // indirect
|
||||
github.com/klauspost/crc32 v1.2.0 // indirect
|
||||
github.com/klauspost/pgzip v1.2.0
|
||||
|
@ -90,6 +89,7 @@ require (
|
|||
google.golang.org/api v0.9.0
|
||||
google.golang.org/genproto v0.0.0-20190926190326-7ee9db18f195 // indirect
|
||||
google.golang.org/grpc v1.24.0
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.17.0
|
||||
gopkg.in/asn1-ber.v1 v1.0.0-20150924051756-4e86f4367175 // indirect
|
||||
gopkg.in/ldap.v2 v2.5.0
|
||||
honnef.co/go/tools v0.0.1-2019.2.3
|
||||
|
|
2
go.sum
2
go.sum
|
@ -113,7 +113,6 @@ github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTD
|
|||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
|
@ -682,6 +681,7 @@ google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8=
|
|||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s=
|
||||
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.17.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/asn1-ber.v1 v1.0.0-20150924051756-4e86f4367175 h1:nn6Zav2sOQHCFJHEspya8KqxhFwKci30UxHy3HXPTyQ=
|
||||
|
|
|
@ -853,7 +853,12 @@ func (c *Conn) handleNextCommand(handler Handler) error {
|
|||
|
||||
statement, err := sqlparser.ParseStrictDDL(query)
|
||||
if err != nil {
|
||||
return err
|
||||
log.Errorf("Conn %v: Error parsing prepared statement: %v", c, err)
|
||||
if werr := c.writeErrorPacketFromError(err); werr != nil {
|
||||
// If we can't even write the error, we're done.
|
||||
log.Errorf("Conn %v: Error writing prepared statement error: %v", c, werr)
|
||||
return werr
|
||||
}
|
||||
}
|
||||
|
||||
paramsCount := uint16(0)
|
||||
|
|
|
@ -221,12 +221,12 @@ func (cluster *LocalProcessCluster) StartKeyspace(keyspace Keyspace, shardNames
|
|||
// Start Mysqlctl process
|
||||
log.Info(fmt.Sprintf("Starting mysqlctl for table uid %d, mysql port %d", tablet.TabletUID, tablet.MySQLPort))
|
||||
tablet.MysqlctlProcess = *MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, cluster.TmpDirectory)
|
||||
if proc, err := tablet.MysqlctlProcess.StartProcess(); err != nil {
|
||||
proc, err := tablet.MysqlctlProcess.StartProcess()
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
} else {
|
||||
mysqlctlProcessList = append(mysqlctlProcessList, proc)
|
||||
}
|
||||
mysqlctlProcessList = append(mysqlctlProcessList, proc)
|
||||
|
||||
// start vttablet process
|
||||
tablet.VttabletProcess = VttabletProcessInstance(tablet.HTTPPort,
|
||||
|
@ -573,7 +573,7 @@ func (cluster *LocalProcessCluster) GetVttabletInstance(tabletType string, UID i
|
|||
}
|
||||
}
|
||||
|
||||
// GetVttabletInstance creates a new vttablet object
|
||||
// GetVtprocessInstanceFromVttablet creates a new vttablet object
|
||||
func (cluster *LocalProcessCluster) GetVtprocessInstanceFromVttablet(tablet *Vttablet, shardName string, ksName string) *VttabletProcess {
|
||||
return VttabletProcessInstance(tablet.HTTPPort,
|
||||
tablet.GrpcPort,
|
||||
|
|
|
@ -45,22 +45,21 @@ func GetMasterPosition(t *testing.T, vttablet Vttablet, hostname string) (string
|
|||
return pos, gtID
|
||||
}
|
||||
|
||||
// Verify total number of rows in a tablet
|
||||
// VerifyRowsInTablet Verify total number of rows in a tablet
|
||||
func VerifyRowsInTablet(t *testing.T, vttablet *Vttablet, ksName string, expectedRows int) {
|
||||
timeout := time.Now().Add(10 * time.Second)
|
||||
for time.Now().Before(timeout) {
|
||||
qr, err := vttablet.VttabletProcess.QueryTablet("select * from vt_insert_test", ksName, true)
|
||||
assert.Nil(t, err)
|
||||
if len(qr.Rows) != expectedRows {
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
} else {
|
||||
if len(qr.Rows) == expectedRows {
|
||||
return
|
||||
}
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
}
|
||||
assert.Fail(t, "expected rows not found.")
|
||||
}
|
||||
|
||||
// Verify Local Metadata of a tablet
|
||||
// VerifyLocalMetadata Verify Local Metadata of a tablet
|
||||
func VerifyLocalMetadata(t *testing.T, tablet *Vttablet, ksName string, shardName string, cell string) {
|
||||
qr, err := tablet.VttabletProcess.QueryTablet("select * from _vt.local_metadata", ksName, false)
|
||||
assert.Nil(t, err)
|
||||
|
@ -74,7 +73,7 @@ func VerifyLocalMetadata(t *testing.T, tablet *Vttablet, ksName string, shardNam
|
|||
}
|
||||
}
|
||||
|
||||
//Lists back preset in shard
|
||||
// ListBackups Lists back preset in shard
|
||||
func (cluster LocalProcessCluster) ListBackups(shardKsName string) ([]string, error) {
|
||||
output, err := cluster.VtctlclientProcess.ExecuteCommandWithOutput("ListBackups", shardKsName)
|
||||
if err != nil {
|
||||
|
|
|
@ -56,11 +56,11 @@ func (mysqlctl *MysqlctlProcess) InitDb() (err error) {
|
|||
|
||||
// Start executes mysqlctl command to start mysql instance
|
||||
func (mysqlctl *MysqlctlProcess) Start() (err error) {
|
||||
if tmpProcess, err := mysqlctl.StartProcess(); err != nil {
|
||||
tmpProcess, err := mysqlctl.StartProcess()
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return tmpProcess.Wait()
|
||||
}
|
||||
return tmpProcess.Wait()
|
||||
}
|
||||
|
||||
// StartProcess starts the mysqlctl and returns the process reference
|
||||
|
@ -78,19 +78,20 @@ func (mysqlctl *MysqlctlProcess) StartProcess() (*exec.Cmd, error) {
|
|||
if mysqlctl.InitMysql {
|
||||
tmpProcess.Args = append(tmpProcess.Args, "init",
|
||||
"-init_db_sql_file", mysqlctl.InitDBFile)
|
||||
} else {
|
||||
tmpProcess.Args = append(tmpProcess.Args, "start")
|
||||
}
|
||||
tmpProcess.Args = append(tmpProcess.Args, "start")
|
||||
|
||||
return tmpProcess, tmpProcess.Start()
|
||||
}
|
||||
|
||||
// Stop executes mysqlctl command to stop mysql instance
|
||||
func (mysqlctl *MysqlctlProcess) Stop() (err error) {
|
||||
if tmpProcess, err := mysqlctl.StopProcess(); err != nil {
|
||||
tmpProcess, err := mysqlctl.StopProcess()
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return tmpProcess.Wait()
|
||||
}
|
||||
return tmpProcess.Wait()
|
||||
|
||||
}
|
||||
|
||||
// StopProcess executes mysqlctl command to stop mysql instance and returns process reference
|
||||
|
|
|
@ -24,8 +24,6 @@ import (
|
|||
|
||||
"vitess.io/vitess/go/mysql"
|
||||
"vitess.io/vitess/go/test/endtoend/cluster"
|
||||
|
||||
_ "github.com/jinzhu/gorm/dialects/mysql"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -0,0 +1,270 @@
|
|||
/*
|
||||
Copyright 2019 The Vitess Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package preparestmt
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"vitess.io/vitess/go/test/endtoend/cluster"
|
||||
|
||||
"github.com/go-sql-driver/mysql"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// tableData is a temporary structure to hold selected data.
|
||||
type tableData struct {
|
||||
Msg string
|
||||
Data string
|
||||
TextCol string
|
||||
}
|
||||
|
||||
// DBInfo information about the database.
|
||||
type DBInfo struct {
|
||||
Username string
|
||||
Password string
|
||||
Host string
|
||||
Port uint
|
||||
KeyspaceName string
|
||||
Params []string
|
||||
}
|
||||
|
||||
func init() {
|
||||
dbInfo.KeyspaceName = keyspaceName
|
||||
dbInfo.Username = "testuser1"
|
||||
dbInfo.Password = "testpassword1"
|
||||
dbInfo.Params = []string{
|
||||
"charset=utf8",
|
||||
"parseTime=True",
|
||||
"loc=Local",
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
clusterInstance *cluster.LocalProcessCluster
|
||||
dbInfo DBInfo
|
||||
hostname = "localhost"
|
||||
keyspaceName = "test_keyspace"
|
||||
testingID = 1
|
||||
tableName = "vt_prepare_stmt_test"
|
||||
cell = "zone1"
|
||||
mysqlAuthServerStatic = "mysql_auth_server_static.json"
|
||||
jsonExample = `{
|
||||
"quiz": {
|
||||
"sport": {
|
||||
"q1": {
|
||||
"question": "Which one is correct team name in NBA?",
|
||||
"options": [
|
||||
"New York Bulls",
|
||||
"Los Angeles Kings",
|
||||
"Golden State Warriors",
|
||||
"Huston Rocket"
|
||||
],
|
||||
"answer": "Huston Rocket"
|
||||
}
|
||||
},
|
||||
"maths": {
|
||||
"q1": {
|
||||
"question": "5 + 7 = ?",
|
||||
"options": [
|
||||
"10",
|
||||
"11",
|
||||
"12",
|
||||
"13"
|
||||
],
|
||||
"answer": "12"
|
||||
},
|
||||
"q2": {
|
||||
"question": "12 - 8 = ?",
|
||||
"options": [
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4"
|
||||
],
|
||||
"answer": "4"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
sqlSchema = `create table ` + tableName + ` (
|
||||
id bigint auto_increment,
|
||||
msg varchar(64),
|
||||
keyspace_id bigint(20) unsigned NOT NULL,
|
||||
tinyint_unsigned TINYINT,
|
||||
bool_signed BOOL,
|
||||
smallint_unsigned SMALLINT,
|
||||
mediumint_unsigned MEDIUMINT,
|
||||
int_unsigned INT,
|
||||
float_unsigned FLOAT(10,2),
|
||||
double_unsigned DOUBLE(16,2),
|
||||
decimal_unsigned DECIMAL,
|
||||
t_date DATE,
|
||||
t_datetime DATETIME,
|
||||
t_time TIME,
|
||||
t_timestamp TIMESTAMP,
|
||||
c8 bit(8) DEFAULT NULL,
|
||||
c16 bit(16) DEFAULT NULL,
|
||||
c24 bit(24) DEFAULT NULL,
|
||||
c32 bit(32) DEFAULT NULL,
|
||||
c40 bit(40) DEFAULT NULL,
|
||||
c48 bit(48) DEFAULT NULL,
|
||||
c56 bit(56) DEFAULT NULL,
|
||||
c63 bit(63) DEFAULT NULL,
|
||||
c64 bit(64) DEFAULT NULL,
|
||||
json_col JSON,
|
||||
text_col TEXT,
|
||||
data longblob,
|
||||
primary key (id)
|
||||
) Engine=InnoDB`
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
flag.Parse()
|
||||
|
||||
exitcode, err := func() (int, error) {
|
||||
clusterInstance = cluster.NewCluster(cell, hostname)
|
||||
|
||||
defer clusterInstance.Teardown()
|
||||
|
||||
// Start topo server
|
||||
if err := clusterInstance.StartTopo(); err != nil {
|
||||
return 1, err
|
||||
}
|
||||
|
||||
// create auth server config
|
||||
SQLConfig := `{
|
||||
"testuser1": {
|
||||
"Password": "testpassword1",
|
||||
"UserData": "vtgate client 1"
|
||||
}
|
||||
}`
|
||||
if err := createConfig(mysqlAuthServerStatic, SQLConfig); err != nil {
|
||||
return 1, err
|
||||
}
|
||||
|
||||
// add extra arguments
|
||||
clusterInstance.VtGateExtraArgs = []string{
|
||||
"-mysql_auth_server_impl", "static",
|
||||
"-mysql_server_query_timeout", "1s",
|
||||
"-mysql_auth_server_static_file", clusterInstance.TmpDirectory + "/" + mysqlAuthServerStatic,
|
||||
"-mysql_server_version", "8.0.16-7",
|
||||
}
|
||||
|
||||
// Start keyspace
|
||||
keyspace := &cluster.Keyspace{
|
||||
Name: keyspaceName,
|
||||
SchemaSQL: sqlSchema,
|
||||
}
|
||||
if err := clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false); err != nil {
|
||||
return 1, err
|
||||
}
|
||||
|
||||
// Start vtgate
|
||||
if err := clusterInstance.StartVtgate(); err != nil {
|
||||
return 1, err
|
||||
}
|
||||
|
||||
dbInfo.Host = clusterInstance.Hostname
|
||||
dbInfo.Port = uint(clusterInstance.VtgateMySQLPort)
|
||||
|
||||
return m.Run(), nil
|
||||
}()
|
||||
if err != nil {
|
||||
fmt.Printf("%v\n", err)
|
||||
os.Exit(1)
|
||||
} else {
|
||||
os.Exit(exitcode)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// ConnectionString generates the connection string using dbinfo.
|
||||
func (db DBInfo) ConnectionString(params ...string) string {
|
||||
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?%s", db.Username, db.Password, db.Host,
|
||||
db.Port, db.KeyspaceName, strings.Join(append(db.Params, params...), "&"))
|
||||
}
|
||||
|
||||
// createConfig creates a config file in TmpDir in vtdataroot and writes the given data.
|
||||
func createConfig(name, data string) error {
|
||||
// creating new file
|
||||
f, err := os.Create(clusterInstance.TmpDirectory + "/" + name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if data == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// write the given data
|
||||
_, err = fmt.Fprint(f, data)
|
||||
return err
|
||||
}
|
||||
|
||||
// Connect will connect the vtgate through mysql protocol.
|
||||
func Connect(t *testing.T, params ...string) *sql.DB {
|
||||
dbo, err := sql.Open("mysql", dbInfo.ConnectionString(params...))
|
||||
require.Nil(t, err)
|
||||
return dbo
|
||||
}
|
||||
|
||||
// execWithError executes the prepared query, and validates the error_code.
|
||||
func execWithError(t *testing.T, dbo *sql.DB, errorCodes []uint16, stmt string, params ...interface{}) {
|
||||
_, err := dbo.Exec(stmt, params...)
|
||||
require.NotNilf(t, err, "error expected, got nil")
|
||||
require.Contains(t, errorCodes, err.(*mysql.MySQLError).Number)
|
||||
}
|
||||
|
||||
// exec executes the query using the params.
|
||||
func exec(t *testing.T, dbo *sql.DB, stmt string, params ...interface{}) {
|
||||
require.Nil(t, execErr(dbo, stmt, params...))
|
||||
}
|
||||
|
||||
// execErr executes the query and returns an error if one occurs.
|
||||
func execErr(dbo *sql.DB, stmt string, params ...interface{}) *mysql.MySQLError {
|
||||
if _, err := dbo.Exec(stmt, params...); err != nil {
|
||||
return err.(*mysql.MySQLError)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// selectWhere select the row corresponding to the where condition.
|
||||
func selectWhere(t *testing.T, dbo *sql.DB, where string, params ...interface{}) []tableData {
|
||||
var out []tableData
|
||||
// prepare query
|
||||
qry := "SELECT msg, data, text_col FROM " + tableName
|
||||
if where != "" {
|
||||
qry += " WHERE (" + where + ")"
|
||||
}
|
||||
|
||||
// execute query
|
||||
r, err := dbo.Query(qry, params...)
|
||||
require.Nil(t, err)
|
||||
|
||||
// prepare result
|
||||
for r.Next() {
|
||||
var t tableData
|
||||
r.Scan(&t.Msg, &t.Data, &t.TextCol)
|
||||
out = append(out, t)
|
||||
}
|
||||
return out
|
||||
}
|
|
@ -0,0 +1,169 @@
|
|||
/*
|
||||
Copyright 2019 The Vitess Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package preparestmt
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/icrowley/fake"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestSelect simple select the data without any condition.
|
||||
func TestSelect(t *testing.T) {
|
||||
dbo := Connect(t)
|
||||
defer dbo.Close()
|
||||
selectWhere(t, dbo, "")
|
||||
}
|
||||
|
||||
// TestInsertUpdateDelete validates all insert, update and
|
||||
// delete method on prepared statements.
|
||||
func TestInsertUpdateDelete(t *testing.T) {
|
||||
|
||||
dbo := Connect(t)
|
||||
defer dbo.Close()
|
||||
// prepare insert statement
|
||||
insertStmt := `insert into ` + tableName + ` values( ?, ?, ?, ?, ?, ?, ?,
|
||||
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);`
|
||||
|
||||
textValue := fake.FullName()
|
||||
largeComment := fake.Paragraph()
|
||||
|
||||
// inserting multiple rows into test table
|
||||
for i := 1; i <= 100; i++ {
|
||||
// preparing value for the insert testing
|
||||
insertValue := []interface{}{
|
||||
i, fmt.Sprint(i) + "21", i * 100,
|
||||
127, 1, 32767, 8388607, 2147483647, 2.55, 64.9, 55.5,
|
||||
time.Date(2009, 5, 5, 0, 0, 0, 0, time.UTC),
|
||||
time.Date(2009, 5, 5, 0, 0, 0, 0, time.UTC),
|
||||
time.Now(),
|
||||
time.Date(2009, 5, 5, 0, 0, 0, 0, time.UTC),
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, jsonExample, textValue, largeComment,
|
||||
}
|
||||
exec(t, dbo, insertStmt, insertValue...)
|
||||
|
||||
}
|
||||
// validate inserted data count
|
||||
testcount(t, dbo, 100)
|
||||
|
||||
// select data with id 1 and validate the data accordingly
|
||||
// validate row count
|
||||
data := selectWhere(t, dbo, "id = ?", testingID)
|
||||
assert.Equal(t, 1, len(data))
|
||||
|
||||
// validate value of msg column in data
|
||||
assert.Equal(t, fmt.Sprintf("%d21", testingID), data[0].Msg)
|
||||
|
||||
// testing record update
|
||||
updateRecord(t, dbo)
|
||||
|
||||
// testing record deletion
|
||||
deleteRecord(t, dbo)
|
||||
|
||||
// testing recontion and deleted data validation
|
||||
reconnectAndTest(t)
|
||||
}
|
||||
|
||||
// testcount validates inserted rows count with expected count.
|
||||
func testcount(t *testing.T, dbo *sql.DB, except int) {
|
||||
r, err := dbo.Query("SELECT count(1) FROM " + tableName)
|
||||
require.Nil(t, err)
|
||||
|
||||
r.Next()
|
||||
var i int
|
||||
err = r.Scan(&i)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, except, i)
|
||||
}
|
||||
|
||||
// TestAutoIncColumns test insertion of row without passing
|
||||
// the value of auto increment columns (here it is id).
|
||||
func TestAutoIncColumns(t *testing.T) {
|
||||
dbo := Connect(t)
|
||||
defer dbo.Close()
|
||||
// insert a row without id
|
||||
insertStmt := "INSERT INTO " + tableName + ` (
|
||||
msg,keyspace_id,tinyint_unsigned,bool_signed,smallint_unsigned,
|
||||
mediumint_unsigned,int_unsigned,float_unsigned,double_unsigned,
|
||||
decimal_unsigned,t_date,t_datetime,t_time,t_timestamp,c8,c16,c24,
|
||||
c32,c40,c48,c56,c63,c64,json_col,text_col,data) VALUES (?, ?, ?, ?, ?, ?,
|
||||
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);`
|
||||
insertValue := []interface{}{
|
||||
"21", 0,
|
||||
127, 1, 32767, 8388607, 2147483647, 2.55, 64.9, 55.5,
|
||||
time.Date(2009, 5, 5, 0, 0, 0, 0, time.UTC),
|
||||
time.Date(2009, 5, 5, 0, 0, 0, 0, time.UTC),
|
||||
time.Now(),
|
||||
time.Date(2009, 5, 5, 0, 0, 0, 0, time.UTC),
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, jsonExample, fake.DomainName(), fake.Paragraph(),
|
||||
}
|
||||
|
||||
exec(t, dbo, insertStmt, insertValue...)
|
||||
}
|
||||
|
||||
// deleteRecord test deletion operation corresponds to the testingID.
|
||||
func deleteRecord(t *testing.T, dbo *sql.DB) {
|
||||
// delete the record with id 1
|
||||
exec(t, dbo, "DELETE FROM "+tableName+" WHERE id = ?;", testingID)
|
||||
|
||||
data := selectWhere(t, dbo, "id = ?", testingID)
|
||||
assert.Equal(t, 0, len(data))
|
||||
|
||||
}
|
||||
|
||||
// updateRecord test update operation corresponds to the testingID.
|
||||
func updateRecord(t *testing.T, dbo *sql.DB) {
|
||||
// update the record with id 1
|
||||
updateData := "new data value"
|
||||
updateTextCol := "new text col value"
|
||||
updateQuery := "update " + tableName + " set data = ? , text_col = ? where id = ?;"
|
||||
|
||||
exec(t, dbo, updateQuery, updateData, updateTextCol, testingID)
|
||||
|
||||
// validate the updated value
|
||||
// validate row count
|
||||
data := selectWhere(t, dbo, "id = ?", testingID)
|
||||
assert.Equal(t, 1, len(data))
|
||||
|
||||
// validate value of msg column in data
|
||||
assert.Equal(t, updateData, data[0].Data)
|
||||
assert.Equal(t, updateTextCol, data[0].TextCol)
|
||||
|
||||
}
|
||||
|
||||
// reconnectAndTest creates new connection with database and validate.
|
||||
func reconnectAndTest(t *testing.T) {
|
||||
// reconnect and try to select the record with id 1
|
||||
dbo := Connect(t)
|
||||
defer dbo.Close()
|
||||
data := selectWhere(t, dbo, "id = ?", testingID)
|
||||
assert.Equal(t, 0, len(data))
|
||||
|
||||
}
|
||||
|
||||
// TestWrongTableName query database using invalid
|
||||
// tablename and validate error.
|
||||
func TestWrongTableName(t *testing.T) {
|
||||
dbo := Connect(t)
|
||||
defer dbo.Close()
|
||||
execWithError(t, dbo, []uint16{1105}, "select * from teseting_table;")
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
package trace
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer"
|
||||
ddtracer "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
|
||||
)
|
||||
|
||||
var (
|
||||
dataDogHost = flag.String("datadog-agent-host", "", "host to send spans to. if empty, no tracing will be done")
|
||||
dataDogPort = flag.String("datadog-agent-port", "", "port to send spans to. if empty, no tracing will be done")
|
||||
)
|
||||
|
||||
func newDatadogTracer(serviceName string) (tracingService, io.Closer, error) {
|
||||
if *dataDogHost == "" || *dataDogPort == "" {
|
||||
return nil, nil, fmt.Errorf("need host and port to datadog agent to use datadog tracing")
|
||||
}
|
||||
|
||||
t := opentracer.New(
|
||||
ddtracer.WithAgentAddr(*dataDogHost+":"+*dataDogPort),
|
||||
ddtracer.WithServiceName(serviceName),
|
||||
ddtracer.WithDebugMode(true),
|
||||
ddtracer.WithSampler(ddtracer.NewRateSampler(*samplingRate)),
|
||||
)
|
||||
|
||||
opentracing.SetGlobalTracer(t)
|
||||
|
||||
return openTracingService{Tracer: &datadogTracer{actual: t}}, &ddCloser{}, nil
|
||||
}
|
||||
|
||||
var _ io.Closer = (*ddCloser)(nil)
|
||||
|
||||
type ddCloser struct{}
|
||||
|
||||
func (ddCloser) Close() error {
|
||||
ddtracer.Stop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
tracingBackendFactories["opentracing-datadog"] = newDatadogTracer
|
||||
}
|
||||
|
||||
var _ tracer = (*datadogTracer)(nil)
|
||||
|
||||
type datadogTracer struct {
|
||||
actual opentracing.Tracer
|
||||
}
|
||||
|
||||
func (dt *datadogTracer) GetOpenTracingTracer() opentracing.Tracer {
|
||||
return dt.actual
|
||||
}
|
|
@ -1857,6 +1857,45 @@ func TestConvert(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPositionedErr(t *testing.T) {
|
||||
invalidSQL := []struct {
|
||||
input string
|
||||
output PositionedErr
|
||||
}{{
|
||||
input: "select convert('abc' as date) from t",
|
||||
output: PositionedErr{"syntax error", 24, []byte("as")},
|
||||
}, {
|
||||
input: "select convert from t",
|
||||
output: PositionedErr{"syntax error", 20, []byte("from")},
|
||||
}, {
|
||||
input: "select cast('foo', decimal) from t",
|
||||
output: PositionedErr{"syntax error", 19, nil},
|
||||
}, {
|
||||
input: "select convert('abc', datetime(4+9)) from t",
|
||||
output: PositionedErr{"syntax error", 34, nil},
|
||||
}, {
|
||||
input: "select convert('abc', decimal(4+9)) from t",
|
||||
output: PositionedErr{"syntax error", 33, nil},
|
||||
}, {
|
||||
input: "set transaction isolation level 12345",
|
||||
output: PositionedErr{"syntax error", 38, []byte("12345")},
|
||||
}, {
|
||||
input: "select * from a left join b",
|
||||
output: PositionedErr{"syntax error", 28, nil},
|
||||
}}
|
||||
|
||||
for _, tcase := range invalidSQL {
|
||||
tkn := NewStringTokenizer(tcase.input)
|
||||
_, err := ParseNext(tkn)
|
||||
|
||||
if posErr, ok := err.(PositionedErr); !ok {
|
||||
t.Errorf("%s: %v expected PositionedErr, got (%T) %v", tcase.input, err, err, tcase.output)
|
||||
} else if posErr.Pos != tcase.output.Pos || !bytes.Equal(posErr.Near, tcase.output.Near) || err.Error() != tcase.output.Error() {
|
||||
t.Errorf("%s: %v, want: %v", tcase.input, err, tcase.output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubStr(t *testing.T) {
|
||||
|
||||
validSQL := []struct {
|
||||
|
|
|
@ -18,7 +18,6 @@ package sqlparser
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
|
@ -452,15 +451,23 @@ func (tkn *Tokenizer) Lex(lval *yySymType) int {
|
|||
return typ
|
||||
}
|
||||
|
||||
// PositionedErr holds context related to parser errors
|
||||
type PositionedErr struct {
|
||||
Err string
|
||||
Pos int
|
||||
Near []byte
|
||||
}
|
||||
|
||||
func (p PositionedErr) Error() string {
|
||||
if p.Near != nil {
|
||||
return fmt.Sprintf("%s at position %v near '%s'", p.Err, p.Pos, p.Near)
|
||||
}
|
||||
return fmt.Sprintf("%s at position %v", p.Err, p.Pos)
|
||||
}
|
||||
|
||||
// Error is called by go yacc if there's a parsing error.
|
||||
func (tkn *Tokenizer) Error(err string) {
|
||||
buf := &bytes2.Buffer{}
|
||||
if tkn.lastToken != nil {
|
||||
fmt.Fprintf(buf, "%s at position %v near '%s'", err, tkn.Position, tkn.lastToken)
|
||||
} else {
|
||||
fmt.Fprintf(buf, "%s at position %v", err, tkn.Position)
|
||||
}
|
||||
tkn.LastError = errors.New(buf.String())
|
||||
tkn.LastError = PositionedErr{Err: err, Pos: tkn.Position, Near: tkn.lastToken}
|
||||
|
||||
// Try and re-sync to the next statement
|
||||
tkn.skipStatement()
|
||||
|
|
|
@ -317,6 +317,9 @@ var commands = []commandGroup{
|
|||
{"CreateLookupVindex", commandCreateLookupVindex,
|
||||
"[-cell=<cell>] [-tablet_types=<source_tablet_types>] <keyspace> <json_spec>",
|
||||
`Create and backfill a lookup vindex. the json_spec must contain the vindex and colvindex specs for the new lookup.`},
|
||||
{"ExternalizeVindex", commandExternalizeVindex,
|
||||
"<keyspace>.<vindex>",
|
||||
`Externalize a backfilled vindex.`},
|
||||
{"Materialize", commandMaterialize,
|
||||
`<json_spec>, example : '{"workflow": "aaa", "source_keyspace": "source", "target_keyspace": "target", "table_settings": [{"target_table": "customer", "source_expression": "select * from customer", "create_ddl": "copy"}]}'`,
|
||||
"Performs materialization based on the json spec."},
|
||||
|
@ -1850,6 +1853,16 @@ func commandCreateLookupVindex(ctx context.Context, wr *wrangler.Wrangler, subFl
|
|||
return wr.CreateLookupVindex(ctx, keyspace, specs, *cell, *tabletTypes)
|
||||
}
|
||||
|
||||
func commandExternalizeVindex(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error {
|
||||
if err := subFlags.Parse(args); err != nil {
|
||||
return err
|
||||
}
|
||||
if subFlags.NArg() != 1 {
|
||||
return fmt.Errorf("one argument is required: keyspace.vindex")
|
||||
}
|
||||
return wr.ExternalizeVindex(ctx, subFlags.Arg(0))
|
||||
}
|
||||
|
||||
func commandMaterialize(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error {
|
||||
if err := subFlags.Parse(args); err != nil {
|
||||
return err
|
||||
|
|
|
@ -863,6 +863,9 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql
|
|||
if !show.OnTable.Qualifier.IsEmpty() {
|
||||
destKeyspace = show.OnTable.Qualifier.String()
|
||||
show.OnTable.Qualifier = sqlparser.NewTableIdent("")
|
||||
} else if show.ShowTablesOpt != nil {
|
||||
destKeyspace = show.ShowTablesOpt.DbName
|
||||
show.ShowTablesOpt.DbName = ""
|
||||
} else {
|
||||
break
|
||||
}
|
||||
|
|
|
@ -809,6 +809,11 @@ func TestExecutorShow(t *testing.T) {
|
|||
t.Errorf("Got: %v. Want: %v", lastQuery, wantQuery)
|
||||
}
|
||||
|
||||
_, err = executor.Execute(context.Background(), "TestExecute", session, fmt.Sprintf("show full columns from unknown from %v", KsTestUnsharded), nil)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
for _, query := range []string{"show charset", "show charset like '%foo'", "show character set", "show character set like '%foo'"} {
|
||||
qr, err := executor.Execute(context.Background(), "TestExecute", session, query, nil)
|
||||
if err != nil {
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"golang.org/x/net/context"
|
||||
|
||||
"vitess.io/vitess/go/json2"
|
||||
"vitess.io/vitess/go/sqltypes"
|
||||
"vitess.io/vitess/go/vt/binlog/binlogplayer"
|
||||
"vitess.io/vitess/go/vt/concurrency"
|
||||
"vitess.io/vitess/go/vt/key"
|
||||
|
@ -419,6 +420,110 @@ func generateColDef(lines []string, sourceVindexCol, vindexFromCol string) (stri
|
|||
return "", fmt.Errorf("column %s not found in schema %v", sourceVindexCol, lines)
|
||||
}
|
||||
|
||||
// ExternalizeVindex externalizes a lookup vindex that's finished backfilling or has caught up.
|
||||
func (wr *Wrangler) ExternalizeVindex(ctx context.Context, qualifiedVindexName string) error {
|
||||
splits := strings.Split(qualifiedVindexName, ".")
|
||||
if len(splits) != 2 {
|
||||
return fmt.Errorf("vindex name should be of the form keyspace.vindex: %s", qualifiedVindexName)
|
||||
}
|
||||
sourceKeyspace, vindexName := splits[0], splits[1]
|
||||
sourceVSchema, err := wr.ts.GetVSchema(ctx, sourceKeyspace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sourceVindex := sourceVSchema.Vindexes[vindexName]
|
||||
if sourceVindex == nil {
|
||||
return fmt.Errorf("vindex %s not found in vschema", qualifiedVindexName)
|
||||
}
|
||||
qualifiedTableName := sourceVindex.Params["table"]
|
||||
splits = strings.Split(qualifiedTableName, ".")
|
||||
if len(splits) != 2 {
|
||||
return fmt.Errorf("table name in vindex should be of the form keyspace.table: %s", qualifiedTableName)
|
||||
}
|
||||
targetKeyspace, targetTableName := splits[0], splits[1]
|
||||
workflow := targetTableName + "_vdx"
|
||||
targetShards, err := wr.ts.GetServingShards(ctx, targetKeyspace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a parallelizer function.
|
||||
forAllTargets := func(f func(*topo.ShardInfo) error) error {
|
||||
var wg sync.WaitGroup
|
||||
allErrors := &concurrency.AllErrorRecorder{}
|
||||
for _, targetShard := range targetShards {
|
||||
wg.Add(1)
|
||||
go func(targetShard *topo.ShardInfo) {
|
||||
defer wg.Done()
|
||||
|
||||
if err := f(targetShard); err != nil {
|
||||
allErrors.RecordError(err)
|
||||
}
|
||||
}(targetShard)
|
||||
}
|
||||
wg.Wait()
|
||||
return allErrors.AggrError(vterrors.Aggregate)
|
||||
}
|
||||
|
||||
err = forAllTargets(func(targetShard *topo.ShardInfo) error {
|
||||
targetMaster, err := wr.ts.GetTablet(ctx, targetShard.MasterAlias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p3qr, err := wr.tmc.VReplicationExec(ctx, targetMaster.Tablet, fmt.Sprintf("select id, state, message from _vt.vreplication where workflow=%s and db_name=%s", encodeString(workflow), encodeString(targetMaster.DbName())))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
qr := sqltypes.Proto3ToResult(p3qr)
|
||||
for _, row := range qr.Rows {
|
||||
id, err := sqltypes.ToInt64(row[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state := row[1].ToString()
|
||||
message := row[2].ToString()
|
||||
if sourceVindex.Owner == "" {
|
||||
// If there's no owner, all streams need to be running.
|
||||
if state != binlogplayer.BlpRunning {
|
||||
return fmt.Errorf("stream %d for %v.%v is not in Running state: %v", id, targetShard.Keyspace(), targetShard.ShardName(), state)
|
||||
}
|
||||
} else {
|
||||
// If there is an owner, all streams need to be stopped after copy.
|
||||
if state != binlogplayer.BlpStopped || !strings.Contains(message, "Stopped after copy") {
|
||||
return fmt.Errorf("stream %d for %v.%v is not in Stopped after copy state: %v, %v", id, targetShard.Keyspace(), targetShard.ShardName(), state, message)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sourceVindex.Owner != "" {
|
||||
// If there is an owner, we have to delete the streams.
|
||||
err := forAllTargets(func(targetShard *topo.ShardInfo) error {
|
||||
targetMaster, err := wr.ts.GetTablet(ctx, targetShard.MasterAlias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
query := fmt.Sprintf("delete from _vt.vreplication where db_name=%s and workflow=%s", encodeString(targetMaster.DbName()), encodeString(workflow))
|
||||
_, err = wr.tmc.VReplicationExec(ctx, targetMaster.Tablet, query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the write_only param and save the source vschema.
|
||||
delete(sourceVindex.Params, "write_only")
|
||||
return wr.ts.SaveVSchema(ctx, sourceKeyspace, sourceVSchema)
|
||||
}
|
||||
|
||||
// Materialize performs the steps needed to materialize a list of tables based on the materialization specs.
|
||||
func (wr *Wrangler) Materialize(ctx context.Context, ms *vtctldatapb.MaterializeSettings) error {
|
||||
if err := wr.validateNewWorkflow(ctx, ms.TargetKeyspace, ms.Workflow); err != nil {
|
||||
|
|
|
@ -1290,6 +1290,129 @@ func TestCreateLookupVindexFailures(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestExternalizeVindex(t *testing.T) {
|
||||
ms := &vtctldatapb.MaterializeSettings{
|
||||
SourceKeyspace: "sourceks",
|
||||
TargetKeyspace: "targetks",
|
||||
}
|
||||
env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"-80", "80-"})
|
||||
defer env.close()
|
||||
|
||||
sourceVSchema := &vschemapb.Keyspace{
|
||||
Sharded: true,
|
||||
Vindexes: map[string]*vschemapb.Vindex{
|
||||
"hash": {
|
||||
Type: "hash",
|
||||
},
|
||||
"owned": {
|
||||
Type: "lookup_unique",
|
||||
Params: map[string]string{
|
||||
"table": "targetks.lkp",
|
||||
"from": "c1",
|
||||
"to": "c2",
|
||||
"write_only": "true",
|
||||
},
|
||||
Owner: "t1",
|
||||
},
|
||||
"unowned": {
|
||||
Type: "lookup_unique",
|
||||
Params: map[string]string{
|
||||
"table": "targetks.lkp",
|
||||
"from": "c1",
|
||||
"to": "c2",
|
||||
"write_only": "true",
|
||||
},
|
||||
},
|
||||
"bad": {
|
||||
Type: "lookup_unique",
|
||||
Params: map[string]string{
|
||||
"table": "unqualified",
|
||||
"from": "c1",
|
||||
"to": "c2",
|
||||
},
|
||||
},
|
||||
},
|
||||
Tables: map[string]*vschemapb.Table{
|
||||
"t1": {
|
||||
ColumnVindexes: []*vschemapb.ColumnVindex{{
|
||||
Name: "hash",
|
||||
Column: "col1",
|
||||
}, {
|
||||
Name: "owned",
|
||||
Column: "col2",
|
||||
}},
|
||||
},
|
||||
},
|
||||
}
|
||||
fields := sqltypes.MakeTestFields(
|
||||
"id|state|message",
|
||||
"int64|varbinary|varbinary",
|
||||
)
|
||||
running := sqltypes.MakeTestResult(fields, "1|Running|msg")
|
||||
stopped := sqltypes.MakeTestResult(fields, "1|Stopped|Stopped after copy")
|
||||
testcases := []struct {
|
||||
input string
|
||||
vrResponse *sqltypes.Result
|
||||
expectDelete bool
|
||||
err string
|
||||
}{{
|
||||
input: "sourceks.owned",
|
||||
vrResponse: stopped,
|
||||
expectDelete: true,
|
||||
}, {
|
||||
input: "sourceks.unowned",
|
||||
vrResponse: running,
|
||||
}, {
|
||||
input: "unqualified",
|
||||
err: "vindex name should be of the form keyspace.vindex: unqualified",
|
||||
}, {
|
||||
input: "sourceks.absent",
|
||||
err: "vindex sourceks.absent not found in vschema",
|
||||
}, {
|
||||
input: "sourceks.bad",
|
||||
err: "table name in vindex should be of the form keyspace.table: unqualified",
|
||||
}, {
|
||||
input: "sourceks.owned",
|
||||
vrResponse: running,
|
||||
err: "is not in Stopped after copy state",
|
||||
}, {
|
||||
input: "sourceks.unowned",
|
||||
vrResponse: stopped,
|
||||
err: "is not in Running state",
|
||||
}}
|
||||
for _, tcase := range testcases {
|
||||
// Resave the source schema for every iteration.
|
||||
if err := env.topoServ.SaveVSchema(context.Background(), ms.SourceKeyspace, sourceVSchema); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if tcase.vrResponse != nil {
|
||||
validationQuery := "select id, state, message from _vt.vreplication where workflow='lkp_vdx' and db_name='vt_targetks'"
|
||||
env.tmc.expectVRQuery(200, validationQuery, tcase.vrResponse)
|
||||
env.tmc.expectVRQuery(210, validationQuery, tcase.vrResponse)
|
||||
}
|
||||
|
||||
if tcase.expectDelete {
|
||||
deleteQuery := "delete from _vt.vreplication where db_name='vt_targetks' and workflow='lkp_vdx'"
|
||||
env.tmc.expectVRQuery(200, deleteQuery, &sqltypes.Result{})
|
||||
env.tmc.expectVRQuery(210, deleteQuery, &sqltypes.Result{})
|
||||
}
|
||||
|
||||
err := env.wr.ExternalizeVindex(context.Background(), tcase.input)
|
||||
if tcase.err != "" {
|
||||
if err == nil || !strings.Contains(err.Error(), tcase.err) {
|
||||
t.Errorf("ExternalizeVindex(%s) err: %v, must contain %v", tcase.input, err, tcase.err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
outvschema, err := env.topoServ.GetVSchema(context.Background(), ms.SourceKeyspace)
|
||||
require.NoError(t, err)
|
||||
vindexName := strings.Split(tcase.input, ".")[1]
|
||||
assert.NotContains(t, outvschema.Vindexes[vindexName].Params, "write_only", tcase.input)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaterializerOneToOne(t *testing.T) {
|
||||
ms := &vtctldatapb.MaterializeSettings{
|
||||
Workflow: "workflow",
|
||||
|
|
|
@ -123,12 +123,21 @@
|
|||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
<<<<<<< HEAD
|
||||
"prepared_statement": {
|
||||
"File": "prepared_statement_test.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 5,
|
||||
=======
|
||||
"mysql_server": {
|
||||
"File": "mysql_server_test.py",
|
||||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 4,
|
||||
>>>>>>> 3ab33f668... prepare statment test case.
|
||||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
|
@ -270,9 +279,15 @@
|
|||
"RetryMax": 0,
|
||||
"Tags": []
|
||||
},
|
||||
<<<<<<< HEAD
|
||||
"mysql_server": {
|
||||
"File": "mysql_server_test.go",
|
||||
"Args": ["vitess.io/vitess/go/test/endtoend/mysqlserver"],
|
||||
=======
|
||||
"prepare_statement": {
|
||||
"File": "stmt_methods_test.go",
|
||||
"Args": ["vitess.io/vitess/go/test/endtoend/preparestmt"],
|
||||
>>>>>>> 3ab33f668... prepare statment test case.
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 12,
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
*.yaml
|
||||
dashboards_out
|
||||
vendor
|
||||
jsonnetfile.lock.json
|
|
@ -0,0 +1,44 @@
|
|||
.PHONY: dashboards_out
|
||||
|
||||
JSONNET_ARGS := -n 2 --max-blank-lines 2 --string-style s --comment-style s
|
||||
ifneq (,$(shell which jsonnetfmt))
|
||||
JSONNET_FMT_CMD := jsonnetfmt
|
||||
else
|
||||
JSONNET_FMT_CMD := jsonnet
|
||||
JSONNET_FMT_ARGS := fmt $(JSONNET_ARGS)
|
||||
endif
|
||||
JSONNET_FMT := $(JSONNET_FMT_CMD) $(JSONNET_FMT_ARGS)
|
||||
|
||||
all: fmt prometheus_alerts.yaml prometheus_rules.yaml dashboards_out lint test
|
||||
|
||||
fmt:
|
||||
find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \
|
||||
xargs -n 1 -- $(JSONNET_FMT) -i
|
||||
|
||||
jsonnet_bundle_install:
|
||||
jb install
|
||||
|
||||
prometheus_alerts.yaml: mixin.libsonnet lib/alerts.jsonnet alerts/*.libsonnet
|
||||
jsonnet -S lib/alerts.jsonnet > $@
|
||||
|
||||
prometheus_rules.yaml: mixin.libsonnet lib/rules.jsonnet rules/*.libsonnet
|
||||
jsonnet -S lib/rules.jsonnet > $@
|
||||
|
||||
dashboards_out: mixin.libsonnet lib/dashboards.jsonnet dashboards/*.libsonnet
|
||||
@mkdir -p dashboards_out
|
||||
jsonnet -J vendor -m dashboards_out lib/dashboards.jsonnet
|
||||
|
||||
lint: prometheus_alerts.yaml prometheus_rules.yaml
|
||||
find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \
|
||||
while read f; do \
|
||||
$(JSONNET_FMT) "$$f" | diff -u "$$f" -; \
|
||||
done
|
||||
|
||||
promtool check rules prometheus_rules.yaml
|
||||
promtool check rules prometheus_alerts.yaml
|
||||
|
||||
clean:
|
||||
rm -rf dashboards_out prometheus_alerts.yaml prometheus_rules.yaml
|
||||
|
||||
test: prometheus_alerts.yaml prometheus_rules.yaml
|
||||
promtool test rules tests.yaml
|
|
@ -0,0 +1,48 @@
|
|||
# (Alpha) Prometheus Monitoring Mixin for Vitess
|
||||
|
||||
A set of Grafana dashboards and Prometheus alerts for Vitess.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Install `jsonnet-bundler`:
|
||||
* via `go`: `go get github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb`
|
||||
* via `brew`: `brew install jsonnet`
|
||||
|
||||
1. Install `promtool`: `go get github.com/prometheus/prometheus/cmd/promtool`
|
||||
|
||||
## Generate config files
|
||||
|
||||
You can manually generate the alerts, dashboards and rules files:
|
||||
|
||||
```
|
||||
$ make prometheus_alerts.yaml
|
||||
$ make prometheus_rules.yaml
|
||||
$ make dashboards_out
|
||||
```
|
||||
|
||||
The `prometheus_alerts.yaml` and `prometheus_rules.yaml` file then need to passed
|
||||
to your Prometheus server, and the files in `dashboards_out` need to be imported
|
||||
into you Grafana server. The exact details will depending on how you deploy your
|
||||
monitoring stack.
|
||||
|
||||
## Running the tests (requires Docker)
|
||||
|
||||
Build the mixins, run the tests:
|
||||
|
||||
```
|
||||
$ docker run -v $(pwd):/tmp --entrypoint "/bin/promtool" prom/prometheus:latest test rules /tmp/tests.yaml
|
||||
```
|
||||
|
||||
Generate the alerts, rules and dashboards:
|
||||
|
||||
```
|
||||
$ jsonnet -J vendor -S -e 'std.manifestYamlDoc((import "mixin.libsonnet").prometheusAlerts)' > alerts.yml
|
||||
$ jsonnet -J vendor -S -e 'std.manifestYamlDoc((import "mixin.libsonnet").prometheusRules)' >files/rules.yml
|
||||
$ jsonnet -J vendor -m files/dashboards -e '(import "mixin.libsonnet").grafanaDashboards'
|
||||
```
|
||||
|
||||
## Background
|
||||
|
||||
* For more motivation, see
|
||||
"[The RED Method: How to instrument your services](https://kccncna17.sched.com/event/CU8K/the-red-method-how-to-instrument-your-services-b-tom-wilkie-kausal?iframe=no&w=100%&sidebar=yes&bg=no)" talk from CloudNativeCon Austin.
|
||||
* For more information about monitoring mixins, see this [design doc](https://docs.google.com/document/d/1A9xvzwqnFVSOZ5fD3blKODXfsat5fg6ZhnKu9LK3lB4/edit#).
|
|
@ -0,0 +1 @@
|
|||
{}
|
|
@ -0,0 +1 @@
|
|||
{}
|
|
@ -0,0 +1,39 @@
|
|||
{
|
||||
_config+:: {
|
||||
|
||||
// Selectors are inserted between {} in Prometheus queries.
|
||||
regionSelector: 'region="$region"',
|
||||
vtctldSelector: 'job="vitess-vtctld"',
|
||||
vtgateSelector: 'job="vitess-vtgate"',
|
||||
vttabletSelector: 'job="vitess-vttablet"',
|
||||
vtworkerSelector: 'job="vitess-vtworker"',
|
||||
mysqlSelector: 'job="mysql"',
|
||||
|
||||
// Datasource to use
|
||||
dataSource: 'Prometheus_Vitess',
|
||||
|
||||
// Default config for the Grafana dashboards in the Vitess Mixin
|
||||
grafanaDashboardMetadataDefault: {
|
||||
dashboardNamePrefix: 'Vitess /',
|
||||
dashboardNameSuffix: '(auto-generated)',
|
||||
dashboardTags: ['vitess-mixin'],
|
||||
},
|
||||
|
||||
// Grafana dashboard IDs are necessary for stable links for dashboards
|
||||
grafanaDashboardMetadata: {
|
||||
cluster_overview: {
|
||||
uid: '0d0778047f5a64ff2ea084ec3e',
|
||||
title: '%(dashboardNamePrefix)s Cluster Overview %(dashboardNameSuffix)s' % $._config.grafanaDashboardMetadataDefault,
|
||||
description: 'Vitess cluster overview',
|
||||
dashboardTags: $._config.grafanaDashboardMetadataDefault.dashboardTags + ['overview', 'cluster'],
|
||||
},
|
||||
keyspace_overview: {
|
||||
uid: 'ff33eceed7d2b1267dd286a099',
|
||||
title: '%(dashboardNamePrefix)s Keyspace Overview %(dashboardNameSuffix)s' % $._config.grafanaDashboardMetadataDefault,
|
||||
description: 'General keyspace overview',
|
||||
dashboardTags: $._config.grafanaDashboardMetadataDefault.dashboardTags + ['overview', 'keyspace'],
|
||||
},
|
||||
},
|
||||
|
||||
},
|
||||
}
|
|
@ -0,0 +1,539 @@
|
|||
local grafana = import 'grafonnet/grafana.libsonnet';
|
||||
local annotation = grafana.annotation;
|
||||
local dashboard = grafana.dashboard;
|
||||
local graphPanel = grafana.graphPanel;
|
||||
local prometheus = grafana.prometheus;
|
||||
local row = grafana.row;
|
||||
local singlestat = grafana.singlestat;
|
||||
local template = grafana.template;
|
||||
|
||||
// TODO: add description for each panel
|
||||
// TODO: extract template to common resource file so that components can be recycled across dashboards
|
||||
|
||||
{
|
||||
grafanaDashboards+:: {
|
||||
'cluster_overview.json':
|
||||
|
||||
// Dashboard metadata
|
||||
local dashboardMetadata = $._config.grafanaDashboardMetadata;
|
||||
local currentDashboardMetadata = dashboardMetadata.cluster_overview;
|
||||
|
||||
// Datalinks definition
|
||||
// TODO: use proper param when grafonnet-lib will support it
|
||||
local datalinksDefinition = {
|
||||
options: {
|
||||
dataLinks: [
|
||||
{
|
||||
title: 'Go to "Keyspace Overview"',
|
||||
url: '/d/%(dashboardID)s/?var-interval=${interval}&var-region=${region}&var-keyspace=${__series.labels.keyspace}&time=${__url_time_range}' % dashboardMetadata.keyspace_overview.uid,
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
// Template definition
|
||||
local regionTemplate =
|
||||
template.new(
|
||||
'region',
|
||||
'%(dataSource)s' % $._config,
|
||||
'label_values(vtctld_build_number{%(vtctldSelector)s}, region)' % $._config,
|
||||
label='Region',
|
||||
refresh='time',
|
||||
includeAll=false,
|
||||
sort=1,
|
||||
);
|
||||
|
||||
local intervalTemplate =
|
||||
template.new(
|
||||
name='interval',
|
||||
label='Interval',
|
||||
datasource='$datasource',
|
||||
query='1m,5m,10m,30m,1h,6h,12h',
|
||||
current='5m',
|
||||
refresh=2,
|
||||
includeAll=false,
|
||||
sort=1
|
||||
) + {
|
||||
skipUrlSync: false,
|
||||
type: 'interval',
|
||||
options: [
|
||||
{
|
||||
selected: false,
|
||||
text: '1m',
|
||||
value: '1m',
|
||||
},
|
||||
{
|
||||
selected: true,
|
||||
text: '5m',
|
||||
value: '5m',
|
||||
},
|
||||
{
|
||||
selected: false,
|
||||
text: '10m',
|
||||
value: '10m',
|
||||
},
|
||||
{
|
||||
selected: false,
|
||||
text: '30m',
|
||||
value: '30m',
|
||||
},
|
||||
{
|
||||
selected: false,
|
||||
text: '1h',
|
||||
value: '1h',
|
||||
},
|
||||
{
|
||||
selected: false,
|
||||
text: '6h',
|
||||
value: '6h',
|
||||
},
|
||||
{
|
||||
selected: false,
|
||||
text: '12h',
|
||||
value: '12h',
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
// Singlestat definitions
|
||||
local querySuccessVTGate =
|
||||
singlestat.new(
|
||||
'Query success - vtgate',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
colorBackground=true,
|
||||
decimals=4,
|
||||
format='percentunit',
|
||||
colors=[
|
||||
'#d44a3a',
|
||||
'rgba(237, 129, 40, 0.89)',
|
||||
'#299c46',
|
||||
],
|
||||
valueFontSize='70%',
|
||||
valueName='current',
|
||||
thresholds='0.99,0.999',
|
||||
)
|
||||
.addTarget(prometheus.target('1 - sum(rate(vtgate_api_error_counts{%(regionSelector)s, %(vtgateSelector)s}[$interval])) / (sum(rate(vtgate_api_error_counts{%(regionSelector)s, %(vtgateSelector)s}[$interval])) + sum(rate(vtgate_api_count{%(regionSelector)s, %(vtgateSelector)s}[$interval])))\n' % $._config, instant=true, intervalFactor=1));
|
||||
|
||||
local querySuccessVTTablet =
|
||||
singlestat.new(
|
||||
'Query success - vttablet',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
colorBackground=true,
|
||||
decimals=4,
|
||||
format='percentunit',
|
||||
colors=[
|
||||
'#d44a3a',
|
||||
'rgba(237, 129, 40, 0.89)',
|
||||
'#299c46',
|
||||
],
|
||||
valueFontSize='70%',
|
||||
valueName='current',
|
||||
thresholds='0.99,0.999',
|
||||
)
|
||||
.addTarget(prometheus.target('1 - (sum(rate(vttablet_errors{%(regionSelector)s, %(vttabletSelector)s}[$interval])) / (sum(rate(vttablet_errors{%(regionSelector)s, %(vttabletSelector)s}[$interval])) + sum(rate(vttablet_query_counts{%(regionSelector)s, %(vttabletSelector)s}[$interval]))))' % $._config, instant=true, intervalFactor=1));
|
||||
|
||||
local qpsVTGate =
|
||||
singlestat.new(
|
||||
'QPS - vtgate',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
format='none',
|
||||
valueFontSize='70%',
|
||||
valueName='current',
|
||||
sparklineFull=true,
|
||||
sparklineShow=true,
|
||||
)
|
||||
.addTarget(prometheus.target('sum(rate(vtgate_api_count{%(regionSelector)s, %(vtgateSelector)s}[$interval]))' % $._config, instant=false, intervalFactor=1));
|
||||
|
||||
local qpsVTTablet =
|
||||
singlestat.new(
|
||||
'QPS - vttablet',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
format='none',
|
||||
valueFontSize='70%',
|
||||
valueName='current',
|
||||
sparklineFull=true,
|
||||
sparklineShow=true,
|
||||
)
|
||||
.addTarget(prometheus.target('sum(rate(vttablet_query_counts{%(regionSelector)s, %(vttabletSelector)s}[$interval]))' % $._config, instant=false, intervalFactor=1));
|
||||
|
||||
local qpsMySQL =
|
||||
singlestat.new(
|
||||
'QPS - MySQL',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
format='none',
|
||||
valueFontSize='70%',
|
||||
valueName='current',
|
||||
sparklineFull=true,
|
||||
sparklineShow=true,
|
||||
)
|
||||
.addTarget(prometheus.target('sum(rate(mysql_global_status_queries{%(regionSelector)s, %(mysqlSelector)s}[$interval])) ' % $._config, instant=false, intervalFactor=1));
|
||||
|
||||
local vtctldUp =
|
||||
singlestat.new(
|
||||
'vtctld',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
valueFontSize='50%',
|
||||
)
|
||||
.addTarget(prometheus.target('sum(up{%(regionSelector)s, %(vtctldSelector)s})' % $._config, instant=true, intervalFactor=1));
|
||||
|
||||
local vtgateUp =
|
||||
singlestat.new(
|
||||
'vtgate',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
valueFontSize='50%',
|
||||
)
|
||||
.addTarget(prometheus.target('sum(up{%(regionSelector)s, %(vtgateSelector)s})' % $._config, instant=true, intervalFactor=1));
|
||||
|
||||
local vttabletup =
|
||||
singlestat.new(
|
||||
'vttablet',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
valueFontSize='50%',
|
||||
)
|
||||
.addTarget(prometheus.target('sum(up{%(regionSelector)s, %(vttabletSelector)s})' % $._config, instant=true, intervalFactor=1));
|
||||
|
||||
local vtworkerUp =
|
||||
singlestat.new(
|
||||
'vtworker',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
valueFontSize='50%',
|
||||
)
|
||||
.addTarget(prometheus.target('sum(up{%(regionSelector)s, %(vtworkerSelector)s})' % $._config, instant=true, intervalFactor=1));
|
||||
|
||||
local keyspaceCount =
|
||||
singlestat.new(
|
||||
'keyspace',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
valueFontSize='50%',
|
||||
)
|
||||
.addTarget(prometheus.target('count(count(vttablet_tablet_state{%(regionSelector)s, %(vttabletSelector)s}) by (keyspace))' % $._config, instant=true, intervalFactor=1));
|
||||
|
||||
local shardCount =
|
||||
singlestat.new(
|
||||
'shard',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
valueFontSize='50%',
|
||||
)
|
||||
.addTarget(prometheus.target('count(count(vttablet_tablet_state{%(regionSelector)s, %(vttabletSelector)s}) by (keyspace, shard))' % $._config, instant=true, intervalFactor=1));
|
||||
|
||||
|
||||
// Panel definitions
|
||||
local requests =
|
||||
graphPanel.new(
|
||||
'Requests',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='rps',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
)
|
||||
.addTarget(prometheus.target('sum(irate(vtgate_queries_processed_by_table{%(regionSelector)s, %(vtgateSelector)s}[$interval]))' % $._config, legendFormat='Requests',));
|
||||
|
||||
local requestsByKeyspace =
|
||||
graphPanel.new(
|
||||
'Requests (by keyspace)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='rps',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('sum(irate(vtgate_queries_processed_by_table{%(regionSelector)s, %(vtgateSelector)s}[$interval])) by (keyspace)' % $._config, legendFormat='{{keyspace}}',))
|
||||
+ datalinksDefinition;
|
||||
|
||||
local errorRate =
|
||||
graphPanel.new(
|
||||
'Error rate',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='percent',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
aliasColors={
|
||||
'Error rate': '#F2495C',
|
||||
},
|
||||
)
|
||||
.addTarget(prometheus.target('sum(rate(vtgate_api_error_counts{%(regionSelector)s, %(vtgateSelector)s}[$interval])) / sum(rate(vtgate_api_count{%(regionSelector)s, %(vtgateSelector)s}[$interval]))' % $._config, legendFormat='Error rate',));
|
||||
|
||||
local errorRateByKeyspace =
|
||||
graphPanel.new(
|
||||
'Error rate (by keyspace)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='percent',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('sum(rate(vtgate_api_error_counts{%(regionSelector)s, %(vtgateSelector)s}[$interval])) by (keyspace) / sum(rate(vtgate_api_count{%(regionSelector)s, %(vtgateSelector)s}[$interval])) by (keyspace)' % $._config, legendFormat='{{keyspace}}',))
|
||||
+ datalinksDefinition;
|
||||
|
||||
local duration =
|
||||
graphPanel.new(
|
||||
'Duration 99th quantile',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='s',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
aliasColors={
|
||||
Duration: '#5794F2',
|
||||
},
|
||||
)
|
||||
.addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(vtgate_api_bucket{%(regionSelector)s, %(vtgateSelector)s}[$interval])) by (le))' % $._config, legendFormat='Duration',));
|
||||
|
||||
local durationByKeyspace =
|
||||
graphPanel.new(
|
||||
'Duration 99th quantile (by keyspace)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='s',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(vtgate_api_bucket{%(regionSelector)s, %(vtgateSelector)s}[$interval])) by (keyspace, le))' % $._config, legendFormat='{{keyspace}}',))
|
||||
+ datalinksDefinition;
|
||||
|
||||
local queryPoolAvailableConn =
|
||||
graphPanel.new(
|
||||
'Query pool: min connections available (by keyspace)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='short',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('min(vttablet_conn_pool_available{%(regionSelector)s, %(vttabletSelector)s}) by (keyspace)' % $._config, legendFormat='{{keyspace}}',));
|
||||
|
||||
local transactionPoolAvailableConn =
|
||||
graphPanel.new(
|
||||
'Transaction pool: min connections available (by keyspace)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='short',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('min(vttablet_transaction_pool_available{%(regionSelector)s, %(vttabletSelector)s}) by (keyspace)' % $._config, legendFormat='{{keyspace}}',));
|
||||
|
||||
local servingTabletPerShard =
|
||||
graphPanel.new(
|
||||
'Count of serving tablets (by keyspace)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='short',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('count(vttablet_tablet_server_state{%(regionSelector)s, %(vttabletSelector)s, name="SERVING"}) by (keyspace)' % $._config, legendFormat='{{keyspace}}',));
|
||||
|
||||
local slowQueries =
|
||||
graphPanel.new(
|
||||
'Slow queries > 0 (by keyspace)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='rps',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
nullPointMode='null as zero',
|
||||
)
|
||||
.addTarget(prometheus.target('sum(rate(mysql_global_status_slow_queries{%(regionSelector)s, %(mysqlSelector)s}[$interval])) by (keyspace) > 0' % $._config, legendFormat='{{keyspace}}',));
|
||||
|
||||
local replicationLag =
|
||||
graphPanel.new(
|
||||
'Replication lag > 0 (by keyspace)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='rps',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('sum(mysql_slave_status_seconds_behind_master{%(regionSelector)s, %(mysqlSelector)s}) by (keyspace) > 0' % $._config, legendFormat='{{keyspace}}',));
|
||||
|
||||
local semiSyncAvgWait =
|
||||
graphPanel.new(
|
||||
'Semi-sync replication avg wait time (by keyspace)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='s',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('sum(rate(mysql_global_status_rpl_semi_sync_master_tx_avg_wait_time{%(regionSelector)s, %(mysqlSelector)s}[$interval])) by (keyspace)' % $._config, legendFormat='{{keyspace}}',));
|
||||
|
||||
|
||||
// Row definitions
|
||||
local topLevel =
|
||||
row.new(
|
||||
title='Top level'
|
||||
);
|
||||
|
||||
local redRow =
|
||||
row.new(
|
||||
title='RED (Requests / Error rate / Duration)'
|
||||
);
|
||||
|
||||
local vttablet =
|
||||
row.new(
|
||||
title='vttablet'
|
||||
);
|
||||
|
||||
local mysql =
|
||||
row.new(
|
||||
title='MySQL'
|
||||
);
|
||||
|
||||
// Dashboard definition
|
||||
dashboard.new(
|
||||
title=currentDashboardMetadata.title,
|
||||
description=currentDashboardMetadata.description,
|
||||
uid=currentDashboardMetadata.uid,
|
||||
time_from='now-30m',
|
||||
tags=(currentDashboardMetadata.dashboardTags),
|
||||
editable=true,
|
||||
graphTooltip='shared_crosshair',
|
||||
)
|
||||
.addTemplate(intervalTemplate)
|
||||
.addTemplate(regionTemplate)
|
||||
|
||||
// "Top level" row
|
||||
.addPanel(topLevel, gridPos={ h: 1, w: 24, x: 0, y: 0 })
|
||||
.addPanel(querySuccessVTGate, gridPos={ h: 2, w: 4, x: 0, y: 1 })
|
||||
.addPanel(querySuccessVTTablet, gridPos={ h: 2, w: 4, x: 0, y: 3 })
|
||||
.addPanel(qpsVTGate, gridPos={ h: 2, w: 4, x: 4, y: 1 })
|
||||
.addPanel(qpsVTTablet, gridPos={ h: 2, w: 4, x: 4, y: 3 })
|
||||
.addPanel(qpsMySQL, gridPos={ h: 2, w: 4, x: 8, y: 1 })
|
||||
.addPanel(keyspaceCount, gridPos={ h: 2, w: 2, x: 8, y: 3 })
|
||||
.addPanel(shardCount, gridPos={ h: 2, w: 2, x: 10, y: 3 })
|
||||
.addPanel(vtgateUp, gridPos={ h: 2, w: 2, x: 12, y: 1 })
|
||||
.addPanel(vttabletup, gridPos={ h: 2, w: 2, x: 14, y: 1 })
|
||||
.addPanel(vtctldUp, gridPos={ h: 2, w: 2, x: 12, y: 3 })
|
||||
.addPanel(vtworkerUp, gridPos={ h: 2, w: 2, x: 14, y: 3 })
|
||||
|
||||
// RED row
|
||||
.addPanel(redRow, gridPos={ h: 1, w: 24, x: 0, y: 5 })
|
||||
.addPanel(requests, gridPos={ h: 6, w: 8, x: 0, y: 6 })
|
||||
.addPanel(errorRate, gridPos={ h: 6, w: 8, x: 8, y: 6 })
|
||||
.addPanel(duration, gridPos={ h: 6, w: 8, x: 16, y: 6 })
|
||||
.addPanel(requestsByKeyspace, gridPos={ h: 8, w: 8, x: 0, y: 12 })
|
||||
.addPanel(errorRateByKeyspace, gridPos={ h: 8, w: 8, x: 8, y: 12 })
|
||||
.addPanel(durationByKeyspace, gridPos={ h: 8, w: 8, x: 16, y: 12 })
|
||||
|
||||
// vttablet
|
||||
.addPanel(vttablet, gridPos={ h: 1, w: 24, x: 0, y: 13 })
|
||||
.addPanel(queryPoolAvailableConn, gridPos={ h: 6, w: 8, x: 0, y: 14 })
|
||||
.addPanel(transactionPoolAvailableConn, gridPos={ h: 6, w: 8, x: 8, y: 14 })
|
||||
.addPanel(servingTabletPerShard, gridPos={ h: 6, w: 8, x: 16, y: 14 })
|
||||
|
||||
// MySQL
|
||||
.addPanel(mysql, gridPos={ h: 1, w: 24, x: 0, y: 15 })
|
||||
.addPanel(slowQueries, gridPos={ h: 6, w: 8, x: 0, y: 16 })
|
||||
.addPanel(replicationLag, gridPos={ h: 6, w: 8, x: 8, y: 16 })
|
||||
.addPanel(semiSyncAvgWait, gridPos={ h: 6, w: 8, x: 16, y: 16 }),
|
||||
|
||||
},
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
(import 'cluster_overview.libsonnet') +
|
||||
(import 'keyspace_overview.libsonnet') +
|
||||
(import 'defaults.libsonnet')
|
|
@ -0,0 +1,24 @@
|
|||
{
|
||||
local grafanaDashboards = super.grafanaDashboards,
|
||||
|
||||
grafanaDashboards:: {
|
||||
[filename]: grafanaDashboards[filename] {
|
||||
// Modify tooltip to only show a single value
|
||||
rows: [
|
||||
row {
|
||||
panels: [
|
||||
panel {
|
||||
tooltip+: {
|
||||
shared: false,
|
||||
},
|
||||
}
|
||||
for panel in super.panels
|
||||
],
|
||||
}
|
||||
for row in super.rows
|
||||
],
|
||||
|
||||
}
|
||||
for filename in std.objectFields(grafanaDashboards)
|
||||
},
|
||||
}
|
|
@ -0,0 +1,476 @@
|
|||
local grafana = import 'grafonnet/grafana.libsonnet';
|
||||
local annotation = grafana.annotation;
|
||||
local dashboard = grafana.dashboard;
|
||||
local graphPanel = grafana.graphPanel;
|
||||
local prometheus = grafana.prometheus;
|
||||
local heatmap = grafana.heatmapPanel;
|
||||
local row = grafana.row;
|
||||
local singlestat = grafana.singlestat;
|
||||
local template = grafana.template;
|
||||
|
||||
// TODO: add description for each panel
|
||||
// TODO: extract template to common resource file so that components can be recycled across dashboards
|
||||
|
||||
{
|
||||
grafanaDashboards+:: {
|
||||
'keyspace_overview.json':
|
||||
|
||||
// Dashboard metadata
|
||||
local dashboardMetadata = $._config.grafanaDashboardMetadata;
|
||||
local currentDashboardMetadata = dashboardMetadata.keyspace_overview;
|
||||
|
||||
// Template definition
|
||||
local regionTemplate =
|
||||
template.new(
|
||||
'region',
|
||||
'%(dataSource)s' % $._config,
|
||||
'label_values(vtctld_build_number{%(vtctldSelector)s}, region)' % $._config,
|
||||
label='Region',
|
||||
refresh='time',
|
||||
includeAll=false,
|
||||
sort=1,
|
||||
);
|
||||
|
||||
local intervalTemplate =
|
||||
template.new(
|
||||
name='interval',
|
||||
label='Interval',
|
||||
datasource='$datasource',
|
||||
query='1m,5m,10m,30m,1h,6h,12h',
|
||||
current='5m',
|
||||
refresh=2,
|
||||
includeAll=false,
|
||||
sort=1
|
||||
) + {
|
||||
skipUrlSync: false,
|
||||
type: 'interval',
|
||||
options: [
|
||||
{
|
||||
selected: false,
|
||||
text: '1m',
|
||||
value: '1m',
|
||||
},
|
||||
{
|
||||
selected: true,
|
||||
text: '5m',
|
||||
value: '5m',
|
||||
},
|
||||
{
|
||||
selected: false,
|
||||
text: '10m',
|
||||
value: '10m',
|
||||
},
|
||||
{
|
||||
selected: false,
|
||||
text: '30m',
|
||||
value: '30m',
|
||||
},
|
||||
{
|
||||
selected: false,
|
||||
text: '1h',
|
||||
value: '1h',
|
||||
},
|
||||
{
|
||||
selected: false,
|
||||
text: '6h',
|
||||
value: '6h',
|
||||
},
|
||||
{
|
||||
selected: false,
|
||||
text: '12h',
|
||||
value: '12h',
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
local keyspaceTemplate =
|
||||
template.new(
|
||||
'keyspace',
|
||||
'%(dataSource)s' % $._config,
|
||||
'label_values(vttablet_build_number{%(vttabletSelector)s, region="$region"}, keyspace)' % $._config,
|
||||
label='Keyspace',
|
||||
refresh='load',
|
||||
includeAll=false,
|
||||
sort=1,
|
||||
);
|
||||
|
||||
local tableTemplate =
|
||||
template.new(
|
||||
'table',
|
||||
'%(dataSource)s' % $._config,
|
||||
'label_values(vtgate_queries_processed_by_table{%(vtgateSelector)s, region="$region", keyspace="$keyspace"}, table)' % $._config,
|
||||
label='Table',
|
||||
refresh='load',
|
||||
includeAll=true,
|
||||
sort=1,
|
||||
allValues='.*',
|
||||
);
|
||||
|
||||
// Panel definitions
|
||||
local vtgateQpsByTable =
|
||||
graphPanel.new(
|
||||
'QPS (by table)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='rps',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('sum(irate(vtgate_queries_processed_by_table{%(regionSelector)s, %(vtgateSelector)s, keyspace="$keyspace", table=~"$table"}[$interval])) by (table)' % $._config, legendFormat='{{table}}',));
|
||||
|
||||
local vtgateQpsByPlanType =
|
||||
graphPanel.new(
|
||||
'QPS (by plan type)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='rps',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('sum(irate(vtgate_queries_processed_by_table{%(regionSelector)s, %(vtgateSelector)s, keyspace="$keyspace", table=~"$table"}[$interval])) by (plan)' % $._config, legendFormat='{{plan}}',));
|
||||
|
||||
local vtgateQuerySuccessRate =
|
||||
graphPanel.new(
|
||||
'Query success rate',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='percent',
|
||||
min=0,
|
||||
max=100,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
)
|
||||
.addTarget(prometheus.target('100 - sum(rate(vtgate_api_error_counts{%(regionSelector)s, %(vtgateSelector)s, keyspace="$keyspace", table=~"$table"}[$interval])) / (sum(rate(vtgate_api_error_counts{%(regionSelector)s, %(vtgateSelector)s, keyspace="$keyspace", table=~"$table"}[$interval])) + sum(rate(vtgate_api_count{%(regionSelector)s, %(vtgateSelector)s, keyspace="$keyspace", table=~"$table"}[$interval]))) * 100' % $._config, legendFormat='Success rate',))
|
||||
.addTarget(prometheus.target('sum(rate(vtgate_api_error_counts{%(regionSelector)s, %(vtgateSelector)s, keyspace="$keyspace", table=~"$table"}[$interval]))' % $._config, legendFormat='Error count',)); // TODO: move error count to different Y
|
||||
|
||||
local vttabletQpsByTable =
|
||||
graphPanel.new(
|
||||
'QPS (by table)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='rps',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('sum(irate(vttablet_query_counts{%(regionSelector)s, %(vttabletSelector)s, keyspace="$keyspace", table=~"$table"}[$interval])) by (table)' % $._config, legendFormat='{{table}}',));
|
||||
|
||||
local vttabletQpsByPlanType =
|
||||
graphPanel.new(
|
||||
'QPS (by plan type)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='rps',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('sum(irate(vttablet_query_counts{%(regionSelector)s, %(vttabletSelector)s, keyspace="$keyspace", table=~"$table"}[$interval])) by (plan)' % $._config, legendFormat='{{plan}}',));
|
||||
|
||||
local vttabletQuerySuccessRate =
|
||||
graphPanel.new(
|
||||
'Query success rate',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='percent',
|
||||
min=0,
|
||||
max=100,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
)
|
||||
.addTarget(prometheus.target('100 - (sum(rate(vttablet_errors{%(regionSelector)s, %(vttabletSelector)s, keyspace="$keyspace", table=~"$table"}[$interval])) / (sum(rate(vttablet_errors{%(regionSelector)s, %(vttabletSelector)s, keyspace="$keyspace", table=~"$table"}[$interval])) + sum(rate(vttablet_query_counts{%(regionSelector)s, %(vttabletSelector)s, keyspace="$keyspace", table=~"$table"}[$interval])))) * 100' % $._config, legendFormat='Success rate',))
|
||||
.addTarget(prometheus.target('sum(rate(vttablet_errors{%(regionSelector)s, %(vttabletSelector)s, keyspace="$keyspace", table=~"$table"}[$interval]))' % $._config, legendFormat='Error count',)); // TODO: move error count to different Y
|
||||
|
||||
local queryTimeAvg =
|
||||
graphPanel.new(
|
||||
'Query time (avg)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='s',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('sum (rate(vttablet_queries_sum{%(regionSelector)s, %(vttabletSelector)s, keyspace="$keyspace"}[$interval])) /\nsum (rate(vttablet_queries_count{%(regionSelector)s, %(vttabletSelector)s, keyspace="$keyspace"}[$interval]))' % $._config, legendFormat='avg',));
|
||||
|
||||
local queryTimeP50 =
|
||||
graphPanel.new(
|
||||
'Query time (p50)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='s',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('histogram_quantile(0.50, sum(rate(vttablet_queries_bucket{%(regionSelector)s, %(vttabletSelector)s, keyspace="$keyspace"}[$interval])) by (le))' % $._config, legendFormat='p50',));
|
||||
|
||||
local queryTimeP95 =
|
||||
graphPanel.new(
|
||||
'Query time (p95)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='s',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('histogram_quantile(0.95, sum(rate(vttablet_queries_bucket{%(regionSelector)s, %(vttabletSelector)s, keyspace="$keyspace"}[$interval])) by (le))' % $._config, legendFormat='p95',));
|
||||
|
||||
local queryTimeP999 =
|
||||
graphPanel.new(
|
||||
'Query time (p999)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='s',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('histogram_quantile(0.999, sum(rate(vttablet_queries_bucket{%(regionSelector)s, %(vttabletSelector)s, keyspace="$keyspace"}[$interval])) by (le))' % $._config, legendFormat='p999',));
|
||||
|
||||
local transTimeAvg =
|
||||
graphPanel.new(
|
||||
'Transaction time (avg)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='s',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('sum (rate(vttablet_transactions_sum{%(regionSelector)s, %(vttabletSelector)s, keyspace="$keyspace"}[$interval])) /\nsum (rate(vttablet_transactions_count{%(regionSelector)s, %(vttabletSelector)s, keyspace="$keyspace"}[$interval]))' % $._config, legendFormat='avg',));
|
||||
|
||||
local transTimeP50 =
|
||||
graphPanel.new(
|
||||
'Transaction time (p50)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='s',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('histogram_quantile(0.50, sum(rate(vttablet_transactions_bucket{%(regionSelector)s, %(vttabletSelector)s, keyspace="$keyspace"}[$interval])) by (le))' % $._config, legendFormat='p50',));
|
||||
|
||||
local transTimeP95 =
|
||||
graphPanel.new(
|
||||
'Transaction time (p95)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='s',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('histogram_quantile(0.95, sum(rate(vttablet_transactions_bucket{%(regionSelector)s, %(vttabletSelector)s, keyspace="$keyspace"}[$interval])) by (le))' % $._config, legendFormat='p95',));
|
||||
|
||||
local transTimeP999 =
|
||||
graphPanel.new(
|
||||
'Transaction time (p999)',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
span=4,
|
||||
format='s',
|
||||
min=0,
|
||||
legend_show=true,
|
||||
legend_values=true,
|
||||
legend_alignAsTable=true,
|
||||
legend_min=true,
|
||||
legend_max=true,
|
||||
legend_current=true,
|
||||
legend_sort='current',
|
||||
legend_sortDesc=true,
|
||||
sort='decreasing',
|
||||
fill=0,
|
||||
)
|
||||
.addTarget(prometheus.target('histogram_quantile(0.999, sum(rate(vttablet_transactions_bucket{%(regionSelector)s, %(vttabletSelector)s, keyspace="$keyspace"}[$interval])) by (le))' % $._config, legendFormat='p999',));
|
||||
|
||||
// Heatmap definitions
|
||||
local queryTimeHeatmap =
|
||||
heatmap.new(
|
||||
'Query timings heatmap',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
dataFormat='tsbuckets',
|
||||
yAxis_format='s',
|
||||
color_cardColor='#FF9830',
|
||||
color_exponent=0.3,
|
||||
color_mode='opacity',
|
||||
yAxis_decimals=0,
|
||||
)
|
||||
.addTarget(prometheus.target('sum(rate(vttablet_queries_bucket{%(regionSelector)s, %(vttabletSelector)s, keyspace="$keyspace"}[$interval])) by (le)' % $._config, format='heatmap', legendFormat='{{le}}',));
|
||||
|
||||
local transactionTimeHeatmap =
|
||||
heatmap.new(
|
||||
'Transaction timings heatmap',
|
||||
datasource='%(dataSource)s' % $._config,
|
||||
dataFormat='tsbuckets',
|
||||
yAxis_format='s',
|
||||
color_cardColor='#FF9830',
|
||||
color_exponent=0.3,
|
||||
color_mode='opacity',
|
||||
yAxis_decimals=0,
|
||||
)
|
||||
.addTarget(prometheus.target('sum(rate(vttablet_queries_bucket{%(regionSelector)s, %(vttabletSelector)s, keyspace="$keyspace"}[$interval])) by (le)' % $._config, format='heatmap', legendFormat='{{le}}',));
|
||||
|
||||
// Row definitions
|
||||
local vtgate =
|
||||
row.new(
|
||||
title='vtgate'
|
||||
);
|
||||
|
||||
local vttablet =
|
||||
row.new(
|
||||
title='vttablet'
|
||||
);
|
||||
|
||||
local queryTimings =
|
||||
row.new(
|
||||
title="Query/Transaction timings (table filter doesn't apply)" // as we don't have timings by table (yet!)
|
||||
);
|
||||
|
||||
// Dashboard definition
|
||||
dashboard.new(
|
||||
title=currentDashboardMetadata.title,
|
||||
description=currentDashboardMetadata.description,
|
||||
uid=dashboardMetadata.keyspace_overview.uid,
|
||||
time_from='now-30m',
|
||||
tags=(currentDashboardMetadata.dashboardTags),
|
||||
editable=true,
|
||||
graphTooltip='shared_crosshair',
|
||||
)
|
||||
.addTemplate(intervalTemplate)
|
||||
.addTemplate(regionTemplate)
|
||||
.addTemplate(keyspaceTemplate)
|
||||
.addTemplate(tableTemplate)
|
||||
|
||||
// vtgate row
|
||||
.addPanel(vtgate, gridPos={ h: 1, w: 24, x: 0, y: 0 })
|
||||
.addPanel(vtgateQpsByTable, gridPos={ h: 7, w: 8, x: 0, y: 1 })
|
||||
.addPanel(vtgateQpsByPlanType, gridPos={ h: 7, w: 8, x: 8, y: 1 })
|
||||
.addPanel(vtgateQuerySuccessRate, gridPos={ h: 7, w: 8, x: 16, y: 1 })
|
||||
|
||||
// vttablet
|
||||
.addPanel(vttablet, gridPos={ h: 1, w: 24, x: 0, y: 8 })
|
||||
.addPanel(vttabletQpsByTable, gridPos={ h: 7, w: 8, x: 0, y: 9 })
|
||||
.addPanel(vttabletQpsByPlanType, gridPos={ h: 7, w: 8, x: 8, y: 9 })
|
||||
.addPanel(vttabletQuerySuccessRate, gridPos={ h: 7, w: 8, x: 16, y: 9 })
|
||||
|
||||
// queryTimings row
|
||||
.addPanel(queryTimings, gridPos={ h: 1, w: 24, x: 0, y: 15 })
|
||||
.addPanel(queryTimeAvg, gridPos={ h: 6, w: 6, x: 0, y: 16 })
|
||||
.addPanel(queryTimeP50, gridPos={ h: 6, w: 6, x: 6, y: 16 })
|
||||
.addPanel(queryTimeP95, gridPos={ h: 6, w: 6, x: 12, y: 16 })
|
||||
.addPanel(queryTimeP999, gridPos={ h: 6, w: 6, x: 18, y: 16 })
|
||||
.addPanel(transTimeAvg, gridPos={ h: 6, w: 6, x: 0, y: 23 })
|
||||
.addPanel(transTimeP50, gridPos={ h: 6, w: 6, x: 6, y: 23 })
|
||||
.addPanel(transTimeP95, gridPos={ h: 6, w: 6, x: 12, y: 23 })
|
||||
.addPanel(transTimeP999, gridPos={ h: 6, w: 6, x: 18, y: 23 })
|
||||
.addPanel(queryTimeHeatmap, gridPos={ h: 8, w: 12, x: 0, y: 29 })
|
||||
.addPanel(transactionTimeHeatmap, gridPos={ h: 8, w: 12, x: 12, y: 29 }),
|
||||
|
||||
},
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
{
|
||||
"dependencies": [
|
||||
{
|
||||
"name": "grafonnet",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafonnet-lib",
|
||||
"subdir": "grafonnet"
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
},
|
||||
{
|
||||
"name": "grafana-builder",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/jsonnet-libs",
|
||||
"subdir": "grafana-builder"
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
{}
|
|
@ -0,0 +1,6 @@
|
|||
local dashboards = (import '../mixin.libsonnet').grafanaDashboards;
|
||||
|
||||
{
|
||||
[name]: dashboards[name]
|
||||
for name in std.objectFields(dashboards)
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
{}
|
|
@ -0,0 +1,4 @@
|
|||
(import 'config.libsonnet') +
|
||||
(import 'alerts/alerts.libsonnet') +
|
||||
(import 'dashboards/dashboards.libsonnet') +
|
||||
(import 'rules/rules.libsonnet')
|
|
@ -0,0 +1 @@
|
|||
{}
|
|
@ -0,0 +1 @@
|
|||
{}
|
Загрузка…
Ссылка в новой задаче