revamped rowcache. Drop support for anything that requires nested

lookups. Use consolidated IN clauses for tables that have simple
primary keys.
This commit is contained in:
Sugu Sougoumarane 2013-09-02 17:17:23 -07:00
Родитель b1205c61e8
Коммит ceb2052c44
14 изменённых файлов: 690 добавлений и 761 удалений

Просмотреть файл

@ -219,8 +219,7 @@ func FormatNode(buf *TrackedBuffer, node *Node) {
buf.Fprintf("%s", node.Value)
}
case VALUE_ARG:
buf.bindLocations = append(buf.bindLocations, BindLocation{buf.Len(), len(node.Value)})
buf.Fprintf("%s", node.Value)
buf.WriteArg(string(node.Value[1:]))
case STRING:
s := sqltypes.MakeString(node.Value)
s.EncodeSql(buf)
@ -286,7 +285,9 @@ func NewTrackedBuffer(nodeFormatter func(buf *TrackedBuffer, node *Node)) *Track
return buf
}
// Mimics fmt.Fprintf, but limited to Value & Node
// Fprintf mimics fmt.Fprintf, but limited to Node(%v), Node.Value(%s) and string(%s).
// It also allows a %a for a value argument, in which case it adds tracking info for
// future substitutions.
func (buf *TrackedBuffer) Fprintf(format string, values ...interface{}) {
end := len(format)
fieldnum := 0
@ -304,11 +305,19 @@ func (buf *TrackedBuffer) Fprintf(format string, values ...interface{}) {
i++ // '%'
switch format[i] {
case 's':
nodeValue := values[fieldnum].([]byte)
buf.Write(nodeValue)
switch v := values[fieldnum].(type) {
case []byte:
buf.Write(v)
case string:
buf.WriteString(v)
default:
panic(fmt.Sprintf("unexpected type %T", v))
}
case 'v':
node := values[fieldnum].(*Node)
buf.nodeFormatter(buf, node)
case 'a':
buf.WriteArg(values[fieldnum].(string))
default:
panic("unexpected")
}
@ -317,6 +326,14 @@ func (buf *TrackedBuffer) Fprintf(format string, values ...interface{}) {
}
}
// WriteArg writes a value argument into the buffer. arg should not contain
// the ':' prefix. It also adds tracking info for future substitutions.
func (buf *TrackedBuffer) WriteArg(arg string) {
buf.bindLocations = append(buf.bindLocations, BindLocation{buf.Len(), len(arg) + 1})
buf.WriteString(":")
buf.WriteString(arg)
}
func (buf *TrackedBuffer) ParsedQuery() *ParsedQuery {
return &ParsedQuery{buf.String(), buf.bindLocations}
}

Просмотреть файл

@ -18,7 +18,8 @@ type PlanType int
const (
PLAN_PASS_SELECT PlanType = iota
PLAN_PASS_DML
PLAN_SELECT_PK
PLAN_PK_EQUAL
PLAN_PK_IN
PLAN_SELECT_SUBQUERY
PLAN_DML_PK
PLAN_DML_SUBQUERY
@ -33,7 +34,8 @@ const (
var planName = []string{
"PASS_SELECT",
"PASS_DML",
"SELECT_PK",
"PK_EQUAL",
"PK_IN",
"SELECT_SUBQUERY",
"DML_PK",
"DML_SUBQUERY",
@ -57,7 +59,7 @@ func PlanByName(s string) (pt PlanType, ok bool) {
}
func (pt PlanType) IsSelect() bool {
return pt == PLAN_PASS_SELECT || pt == PLAN_SELECT_PK || pt == PLAN_SELECT_SUBQUERY
return pt == PLAN_PASS_SELECT || pt == PLAN_PK_EQUAL || pt == PLAN_PK_IN || pt == PLAN_SELECT_SUBQUERY
}
func (pt PlanType) MarshalJSON() ([]byte, error) {
@ -79,6 +81,7 @@ const (
REASON_NOINDEX_MATCH
REASON_TABLE_NOINDEX
REASON_PK_CHANGE
REASON_COMPOSITE_PK
REASON_HAS_HINTS
)
@ -96,6 +99,7 @@ var reasonName = []string{
"NOINDEX_MATCH",
"TABLE_NOINDEX",
"PK_CHANGE",
"COMPOSITE_PK",
"HAS_HINTS",
}
@ -117,13 +121,15 @@ type ExecPlan struct {
Reason ReasonType
TableName string
// Query to fetch field info
// FieldQuery is used to fetch field info
FieldQuery *ParsedQuery
// PLAN_PASS_*
// FullQuery will be set for all plans.
FullQuery *ParsedQuery
// For anything that's not PLAN_PASS_*
// For PK plans, only OuterQuery is set.
// For SUBQUERY plans, Subquery is also set.
// IndexUsed is set only for PLAN_SELECT_SUBQUERY
OuterQuery *ParsedQuery
Subquery *ParsedQuery
IndexUsed string
@ -132,10 +138,9 @@ type ExecPlan struct {
// For PLAN_INSERT_SUBQUERY, columns to be inserted
ColumnNumbers []int
// PLAN_*_PK
// For select, update & delete: where clause
// For insert: values clause
// For PLAN_INSERT_SUBQUERY: Location of pk values in subquery
// PLAN_PK_EQUAL, PLAN_DML_PK: where clause values
// PLAN_PK_IN: IN clause values
// PLAN_INSERT_PK: values clause
PKValues []interface{}
// For update: set clause
@ -287,28 +292,45 @@ func (node *Node) execAnalyzeSelect(getTable TableGetter) (plan *ExecPlan) {
}
// order
orders := node.At(SELECT_ORDER_OFFSET).execAnalyzeOrder()
if orders == nil {
if node.At(SELECT_ORDER_OFFSET).Len() != 0 {
plan.Reason = REASON_ORDER
return plan
}
if len(orders) == 0 { // Only do pk analysis if there's no order by clause
if pkValues := getPKValues(conditions, tableInfo.Indexes[0]); pkValues != nil {
plan.PlanId = PLAN_SELECT_PK
plan.OuterQuery = node.GenerateSelectOuterQuery(tableInfo)
// This check should never fail because we only cache tables with primary keys.
if len(tableInfo.Indexes) == 0 || tableInfo.Indexes[0].Name != "PRIMARY" {
panic("unexpected")
}
// Attempt PK match only if there's no limit clause
if node.At(SELECT_LIMIT_OFFSET).Len() == 0 {
planId, pkValues := getSelectPKValues(conditions, tableInfo.Indexes[0])
switch planId {
case PLAN_PK_EQUAL:
plan.PlanId = PLAN_PK_EQUAL
plan.OuterQuery = node.GenerateEqualOuterQuery(tableInfo)
plan.PKValues = pkValues
return plan
case PLAN_PK_IN:
plan.PlanId = PLAN_PK_IN
plan.OuterQuery = node.GenerateInOuterQuery(tableInfo)
plan.PKValues = pkValues
return plan
}
}
if len(tableInfo.Indexes[0].Columns) != 1 {
plan.Reason = REASON_COMPOSITE_PK
return plan
}
// TODO: Analyze hints to improve plan.
if hasHints {
plan.Reason = REASON_HAS_HINTS
return plan
}
plan.IndexUsed = getIndexMatch(conditions, orders, tableInfo.Indexes)
plan.IndexUsed = getIndexMatch(conditions, tableInfo.Indexes)
if plan.IndexUsed == "" {
plan.Reason = REASON_NOINDEX_MATCH
return plan
@ -319,7 +341,7 @@ func (node *Node) execAnalyzeSelect(getTable TableGetter) (plan *ExecPlan) {
}
// TODO: We can further optimize. Change this to pass-through if select list matches all columns in index.
plan.PlanId = PLAN_SELECT_SUBQUERY
plan.OuterQuery = node.GenerateSelectOuterQuery(tableInfo)
plan.OuterQuery = node.GenerateInOuterQuery(tableInfo)
plan.Subquery = node.GenerateSelectSubquery(tableInfo, plan.IndexUsed)
return plan
}
@ -588,7 +610,14 @@ func (node *Node) execAnalyzeBoolean() (conditions []*Node) {
n.PushTwo(left, right)
return []*Node{n}
case IN:
return node.execAnalyzeIN()
left := node.At(0).execAnalyzeID()
right := node.At(1).execAnalyzeSimpleINList()
if left == nil || right == nil {
return nil
}
n := NewParseNode(node.Type, node.Value)
n.PushTwo(left, right)
return []*Node{n}
case BETWEEN:
left := node.At(0).execAnalyzeID()
right1 := node.At(1).execAnalyzeValue()
@ -601,57 +630,6 @@ func (node *Node) execAnalyzeBoolean() (conditions []*Node) {
return nil
}
func (node *Node) execAnalyzeIN() []*Node {
// simple
if node.At(0).Type != '(' { // IN->ID
left := node.At(0).execAnalyzeID()
right := node.At(1).execAnalyzeSimpleINList() // IN->'('
if left == nil || right == nil {
return nil
}
n := NewParseNode(node.Type, node.Value)
n.PushTwo(left, right)
return []*Node{n}
}
// composite
idList := node.At(0).At(0) // IN->'('->NODE_LIST
conditions := make([]*Node, idList.Len())
for i := 0; i < idList.Len(); i++ {
left := idList.At(i).execAnalyzeID()
right := node.execBuildINList(i)
if left == nil || right == nil {
return nil
}
n := NewParseNode(node.Type, node.Value)
n.PushTwo(left, right)
conditions[i] = n
}
return conditions
}
func (node *Node) execBuildINList(index int) *Node {
valuesList := node.At(1).At(0) // IN->'('->NODE_LIST
newList := NewSimpleParseNode(NODE_LIST, "node_list")
for i := 0; i < valuesList.Len(); i++ {
if valuesList.At(i).Type != '(' { // NODE_LIST->'('
return nil
}
innerList := valuesList.At(i).At(0) // NODE_LIST->'('->NODE_LIST
if innerList.Type != NODE_LIST || index >= innerList.Len() {
return nil
}
innerValue := innerList.At(index).execAnalyzeValue()
if innerValue == nil {
return nil
}
newList.Push(innerValue)
}
INList := NewSimpleParseNode('(', "(")
INList.Push(newList)
return INList
}
func (node *Node) execAnalyzeSimpleINList() *Node {
list := node.At(0) // '('->NODE_LIST
for i := 0; i < list.Len(); i++ {
@ -720,37 +698,6 @@ func (node *Node) execAnalyzeUpdateExpressions(pkIndex *schema.Index) (pkValues
return pkValues, true
}
//-----------------------------------------------
// Order
func (node *Node) execAnalyzeOrder() (orders []*Node) {
orders = make([]*Node, 0, 8)
if node.Len() == 0 {
return orders
}
orderList := node.At(0)
for i := 0; i < orderList.Len(); i++ {
if order := orderList.At(i).execAnalyzeOrderExpression(); order != nil {
orders = append(orders, order)
} else {
return nil
}
}
return orders
}
func (node *Node) execAnalyzeOrderExpression() (order *Node) {
switch node.Type {
case ID:
return node
case '.':
return node.At(1).execAnalyzeOrderExpression()
case '(', ASC, DESC:
return node.At(0).execAnalyzeOrderExpression()
}
return nil
}
//-----------------------------------------------
// Insert
@ -861,11 +808,25 @@ func NewIndexScoreList(indexes []*schema.Index) []*IndexScore {
return scoreList
}
func getPKValues(conditions []*Node, pkIndex *schema.Index) (pkValues []interface{}) {
if pkIndex.Name != "PRIMARY" {
log.Warningf("Table has no primary key")
return nil
func getSelectPKValues(conditions []*Node, pkIndex *schema.Index) (planId PlanType, pkValues []interface{}) {
pkValues = getPKValues(conditions, pkIndex)
if pkValues == nil {
return PLAN_PASS_SELECT, nil
}
for _, pkValue := range pkValues {
inList, ok := pkValue.([]interface{})
if !ok {
continue
}
if len(pkValues) == 1 {
return PLAN_PK_IN, inList
}
return PLAN_PASS_SELECT, nil
}
return PLAN_PK_EQUAL, pkValues
}
func getPKValues(conditions []*Node, pkIndex *schema.Index) (pkValues []interface{}) {
pkIndexScore := NewIndexScore(pkIndex)
pkValues = make([]interface{}, len(pkIndexScore.ColumnMatch))
for _, condition := range conditions {
@ -889,18 +850,13 @@ func getPKValues(conditions []*Node, pkIndex *schema.Index) (pkValues []interfac
return nil
}
func getIndexMatch(conditions []*Node, orders []*Node, indexes []*schema.Index) string {
func getIndexMatch(conditions []*Node, indexes []*schema.Index) string {
indexScores := NewIndexScoreList(indexes)
for _, condition := range conditions {
for _, index := range indexScores {
index.FindMatch(string(condition.At(0).Value))
}
}
for _, order := range orders {
for _, index := range indexScores {
index.FindMatch(string(order.Value))
}
}
highScore := NO_MATCH
highScorer := -1
for i, index := range indexScores {
@ -993,7 +949,7 @@ func (node *Node) GenerateDefaultQuery(tableInfo *schema.Table) *ParsedQuery {
return buf.ParsedQuery()
}
func (node *Node) GenerateSelectOuterQuery(tableInfo *schema.Table) *ParsedQuery {
func (node *Node) GenerateEqualOuterQuery(tableInfo *schema.Table) *ParsedQuery {
buf := NewTrackedBuffer(nil)
fmt.Fprintf(buf, "select ")
writeColumnList(buf, tableInfo.Columns)
@ -1002,12 +958,25 @@ func (node *Node) GenerateSelectOuterQuery(tableInfo *schema.Table) *ParsedQuery
return buf.ParsedQuery()
}
func (node *Node) GenerateInOuterQuery(tableInfo *schema.Table) *ParsedQuery {
buf := NewTrackedBuffer(nil)
fmt.Fprintf(buf, "select ")
writeColumnList(buf, tableInfo.Columns)
// We assume there is one and only one PK column.
// A '*' argument name means all variables of the list.
buf.Fprintf(" from %v where %s in (%a)", node.At(SELECT_FROM_OFFSET), tableInfo.Indexes[0].Columns[0], "*")
return buf.ParsedQuery()
}
func (node *Node) GenerateInsertOuterQuery() *ParsedQuery {
buf := NewTrackedBuffer(nil)
buf.Fprintf("insert %vinto %v%v values ",
node.At(INSERT_COMMENT_OFFSET), node.At(INSERT_TABLE_OFFSET), node.At(INSERT_COLUMN_LIST_OFFSET))
writeArg(buf, "_rowValues")
buf.Fprintf("%v", node.At(INSERT_ON_DUP_OFFSET))
buf.Fprintf("insert %vinto %v%v values %a%v",
node.At(INSERT_COMMENT_OFFSET),
node.At(INSERT_TABLE_OFFSET),
node.At(INSERT_COLUMN_LIST_OFFSET),
"_rowValues",
node.At(INSERT_ON_DUP_OFFSET),
)
return buf.ParsedQuery()
}
@ -1031,20 +1000,10 @@ func generatePKWhere(buf *TrackedBuffer, pkIndex *schema.Index) {
if i != 0 {
buf.WriteString(" and ")
}
buf.WriteString(pkIndex.Columns[i])
buf.WriteString(" = ")
writeArg(buf, strconv.FormatInt(int64(i), 10))
buf.Fprintf("%s = %a", pkIndex.Columns[i], strconv.FormatInt(int64(i), 10))
}
}
func writeArg(buf *TrackedBuffer, arg string) {
start := buf.Len()
buf.WriteString(":")
buf.WriteString(arg)
end := buf.Len()
buf.bindLocations = append(buf.bindLocations, BindLocation{start, end - start})
}
func (node *Node) GenerateSelectSubquery(tableInfo *schema.Table, index string) *ParsedQuery {
hint := NewSimpleParseNode(USE, "use")
hint.Push(NewSimpleParseNode(COLUMN_LIST, ""))

Просмотреть файл

@ -68,8 +68,13 @@ func initTables() {
d := schema.NewTable("d")
d.AddColumn("name", "varbinary(10)", SQLZERO, "")
d.AddColumn("id", "int", SQLZERO, "")
d.AddColumn("foo", "varchar(10)", SQLZERO, "")
d.AddColumn("bar", "varchar(10)", SQLZERO, "")
dcolumns := []string{"name"}
d.Indexes = append(d.Indexes, &schema.Index{"PRIMARY", []string{"name"}, []uint64{1}, dcolumns})
d.Indexes = append(d.Indexes, &schema.Index{"d_id", []string{"id"}, []uint64{1}, d.Indexes[0].Columns})
d.Indexes = append(d.Indexes, &schema.Index{"d_bar_never", []string{"bar", "foo"}, []uint64{2, 1}, d.Indexes[0].Columns})
d.Indexes = append(d.Indexes, &schema.Index{"d_bar", []string{"bar", "foo"}, []uint64{3, 1}, d.Indexes[0].Columns})
d.PKColumns = append(d.PKColumns, 0)
d.CacheType = schema.CACHE_RW
schem["d"] = d
@ -78,7 +83,7 @@ func initTables() {
e.AddColumn("eid", "int", SQLZERO, "")
e.AddColumn("id", "int", SQLZERO, "")
ecolumns := []string{"eid", "id"}
e.Indexes = append(a.Indexes, &schema.Index{"PRIMARY", []string{"eid", "id"}, []uint64{1, 1}, ecolumns})
e.Indexes = append(e.Indexes, &schema.Index{"PRIMARY", []string{"eid", "id"}, []uint64{1, 1}, ecolumns})
e.PKColumns = append(a.PKColumns, 0, 1)
e.CacheType = schema.CACHE_W
schem["e"] = e

Просмотреть файл

@ -42,6 +42,8 @@ func (pq *ParsedQuery) GenerateQuery(bindVariables map[string]interface{}, listV
return nil, NewParserError("Index out of range: %d", index)
}
supplied = listVariables[index]
} else if varName[0] == '*' {
supplied = listVariables
} else {
var ok bool
supplied, ok = bindVariables[varName]

Просмотреть файл

@ -298,13 +298,13 @@ select c.eid from a as c
}
# (eid)
select (eid) from a as c
select (eid) from a
{
"PlanId": "PASS_SELECT",
"Reason": "WHERE",
"TableName": "a",
"FieldQuery": "select eid from a as c where 1 != 1",
"FullQuery": "select eid from a as c limit :_vtMaxResultSize",
"FieldQuery": "select eid from a where 1 != 1",
"FullQuery": "select eid from a limit :_vtMaxResultSize",
"OuterQuery": null,
"Subquery": null,
"IndexUsed": "",
@ -337,17 +337,89 @@ select eid from a for update
"SetValue": null
}
# simple where
select * from a where eid=1
# composite pk supplied values
select * from a where eid = 1 and id in (1, 2)
{
"PlanId": "PASS_SELECT",
"Reason": "COMPOSITE_PK",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where eid = 1 and id in (1, 2) limit :_vtMaxResultSize",
"OuterQuery": null,
"Subquery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
1,
2,
3
],
"PKValues": null,
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
"SetKey": "",
"SetValue": null
}
# composite pk subquery
select * from a where name = 'foo'
{
"PlanId": "PASS_SELECT",
"Reason": "COMPOSITE_PK",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where name = 'foo' limit :_vtMaxResultSize",
"OuterQuery": null,
"Subquery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
1,
2,
3
],
"PKValues": null,
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
"SetKey": "",
"SetValue": null
}
# subquery
select * from d where id = 1
{
"PlanId": "SELECT_SUBQUERY",
"Reason": "DEFAULT",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where eid = 1 limit :_vtMaxResultSize",
"OuterQuery": "select eid, id, name, foo from a where eid = :0 and id = :1",
"Subquery": "select eid, id from a use index (a_name) where eid = 1 limit :_vtMaxResultSize",
"IndexUsed": "a_name",
"TableName": "d",
"FieldQuery": "select * from d where 1 != 1",
"FullQuery": "select * from d where id = 1 limit :_vtMaxResultSize",
"OuterQuery": "select name, id, foo, bar from d where name in (:*)",
"Subquery": "select name from d use index (d_id) where id = 1 limit :_vtMaxResultSize",
"IndexUsed": "d_id",
"ColumnNumbers": [
0,
1,
2,
3
],
"PKValues": null,
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
"SetKey": "",
"SetValue": null
}
# subquery with limit
select * from d where id = 1 limit 1
{
"PlanId": "SELECT_SUBQUERY",
"Reason": "DEFAULT",
"TableName": "d",
"FieldQuery": "select * from d where 1 != 1",
"FullQuery": "select * from d where id = 1 limit 1",
"OuterQuery": "select name, id, foo, bar from d where name in (:*)",
"Subquery": "select name from d use index (d_id) where id = 1 limit 1",
"IndexUsed": "d_id",
"ColumnNumbers": [
0,
1,
@ -409,14 +481,14 @@ select * from a where eid = id
"SetValue": null
}
# and
select * from a where eid=1 and foo='b'
# inequality on pk columns
select * from d where name between 'foo' and 'bar'
{
"PlanId": "PASS_SELECT",
"Reason": "PKINDEX",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where eid = 1 and foo = 'b' limit :_vtMaxResultSize",
"TableName": "d",
"FieldQuery": "select * from d where 1 != 1",
"FullQuery": "select * from d where name between 'foo' and 'bar' limit :_vtMaxResultSize",
"OuterQuery": null,
"Subquery": null,
"IndexUsed": "PRIMARY",
@ -434,23 +506,26 @@ select * from a where eid=1 and foo='b'
}
# (condition)
select * from a where (eid=1)
select * from a where (eid=1) and (id=2)
{
"PlanId": "SELECT_SUBQUERY",
"PlanId": "PK_EQUAL",
"Reason": "DEFAULT",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where (eid = 1) limit :_vtMaxResultSize",
"FullQuery": "select * from a where (eid = 1) and (id = 2) limit :_vtMaxResultSize",
"OuterQuery": "select eid, id, name, foo from a where eid = :0 and id = :1",
"Subquery": "select eid, id from a use index (a_name) where (eid = 1) limit :_vtMaxResultSize",
"IndexUsed": "a_name",
"Subquery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
1,
2,
3
],
"PKValues": null,
"PKValues": [
1,
2
],
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
"SetKey": "",
@ -460,7 +535,7 @@ select * from a where (eid=1)
# pk match
select * from a where eid=1 and id=1
{
"PlanId": "SELECT_PK",
"PlanId": "PK_EQUAL",
"Reason": "DEFAULT",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
@ -487,17 +562,19 @@ select * from a where eid=1 and id=1
# string pk match
select * from d where name='foo'
{
"PlanId": "SELECT_PK",
"PlanId": "PK_EQUAL",
"Reason": "DEFAULT",
"TableName": "d",
"FieldQuery": "select * from d where 1 != 1",
"FullQuery": "select * from d where name = 'foo' limit :_vtMaxResultSize",
"OuterQuery": "select name, id from d where name = :0",
"OuterQuery": "select name, id, foo, bar from d where name = :0",
"Subquery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
1
1,
2,
3
],
"PKValues": [
"Zm9v"
@ -508,15 +585,39 @@ select * from d where name='foo'
"SetValue": null
}
# pk IN
select * from a where eid=1 and id in (1, 2)
# string pk match with limit
select * from d where name='foo' limit 1
{
"PlanId": "SELECT_PK",
"PlanId": "PASS_SELECT",
"Reason": "PKINDEX",
"TableName": "d",
"FieldQuery": "select * from d where 1 != 1",
"FullQuery": "select * from d where name = 'foo' limit 1",
"OuterQuery": null,
"Subquery": null,
"IndexUsed": "PRIMARY",
"ColumnNumbers": [
0,
1,
2,
3
],
"PKValues": null,
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
"SetKey": "",
"SetValue": null
}
# pk IN
select * from d where name in ('foo', 'bar')
{
"PlanId": "PK_IN",
"Reason": "DEFAULT",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where eid = 1 and id in (1, 2) limit :_vtMaxResultSize",
"OuterQuery": "select eid, id, name, foo from a where eid = :0 and id = :1",
"TableName": "d",
"FieldQuery": "select * from d where 1 != 1",
"FullQuery": "select * from d where name in ('foo', 'bar') limit :_vtMaxResultSize",
"OuterQuery": "select name, id, foo, bar from d where name in (:*)",
"Subquery": null,
"IndexUsed": "",
"ColumnNumbers": [
@ -526,11 +627,8 @@ select * from a where eid=1 and id in (1, 2)
3
],
"PKValues": [
1,
[
1,
2
]
"Zm9v",
"YmFy"
],
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
@ -539,14 +637,14 @@ select * from a where eid=1 and id in (1, 2)
}
# pk IN parameter list
select * from a where eid=1 and id in (:a, :b)
select * from d where name in (:a,:b)
{
"PlanId": "SELECT_PK",
"PlanId": "PK_IN",
"Reason": "DEFAULT",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where eid = 1 and id in (:a, :b) limit :_vtMaxResultSize",
"OuterQuery": "select eid, id, name, foo from a where eid = :0 and id = :1",
"TableName": "d",
"FieldQuery": "select * from d where 1 != 1",
"FullQuery": "select * from d where name in (:a, :b) limit :_vtMaxResultSize",
"OuterQuery": "select name, id, foo, bar from d where name in (:*)",
"Subquery": null,
"IndexUsed": "",
"ColumnNumbers": [
@ -556,11 +654,8 @@ select * from a where eid=1 and id in (:a, :b)
3
],
"PKValues": [
1,
[
":a",
":b"
]
":a",
":b"
],
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
@ -569,14 +664,14 @@ select * from a where eid=1 and id in (:a, :b)
}
# pk IN, single value list
select * from a where eid=1 and id in (1)
select * from d where name in ('foo')
{
"PlanId": "SELECT_PK",
"PlanId": "PK_IN",
"Reason": "DEFAULT",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where eid = 1 and id in (1) limit :_vtMaxResultSize",
"OuterQuery": "select eid, id, name, foo from a where eid = :0 and id = :1",
"TableName": "d",
"FieldQuery": "select * from d where 1 != 1",
"FullQuery": "select * from d where name in ('foo') limit :_vtMaxResultSize",
"OuterQuery": "select name, id, foo, bar from d where name in (:*)",
"Subquery": null,
"IndexUsed": "",
"ColumnNumbers": [
@ -586,10 +681,33 @@ select * from a where eid=1 and id in (1)
3
],
"PKValues": [
"Zm9v"
],
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
"SetKey": "",
"SetValue": null
}
# pk IN, single value parameter list
select * from d where name in (:a)
{
"PlanId": "PK_IN",
"Reason": "DEFAULT",
"TableName": "d",
"FieldQuery": "select * from d where 1 != 1",
"FullQuery": "select * from d where name in (:a) limit :_vtMaxResultSize",
"OuterQuery": "select name, id, foo, bar from d where name in (:*)",
"Subquery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
1,
[
1
]
2,
3
],
"PKValues": [
":a"
],
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
@ -648,124 +766,14 @@ select * from a where eid in (1, 2) and id in (1, 2)
# pk as tuple
select * from a where (eid, id) in ((1, 1), (2, 2))
{
"PlanId": "SELECT_PK",
"Reason": "DEFAULT",
"PlanId": "PASS_SELECT",
"Reason": "WHERE",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where (eid, id) in ((1, 1), (2, 2)) limit :_vtMaxResultSize",
"OuterQuery": "select eid, id, name, foo from a where eid = :0 and id = :1",
"Subquery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
1,
2,
3
],
"PKValues": [
[
1,
2
],
[
1,
2
]
],
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
"SetKey": "",
"SetValue": null
}
# pk IN, single value parameter list
select * from a where eid=1 and id in (:a)
{
"PlanId": "SELECT_PK",
"Reason": "DEFAULT",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where eid = 1 and id in (:a) limit :_vtMaxResultSize",
"OuterQuery": "select eid, id, name, foo from a where eid = :0 and id = :1",
"Subquery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
1,
2,
3
],
"PKValues": [
1,
[
":a"
]
],
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
"SetKey": "",
"SetValue": null
}
# inequality on pk columns
select * from a where eid=1 and id>1
{
"PlanId": "PASS_SELECT",
"Reason": "PKINDEX",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where eid = 1 and id \u003e 1 limit :_vtMaxResultSize",
"OuterQuery": null,
"Subquery": null,
"IndexUsed": "PRIMARY",
"ColumnNumbers": [
0,
1,
2,
3
],
"PKValues": null,
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
"SetKey": "",
"SetValue": null
}
# non-pk match
select * from a where eid=1 and name='foo'
{
"PlanId": "SELECT_SUBQUERY",
"Reason": "DEFAULT",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where eid = 1 and name = 'foo' limit :_vtMaxResultSize",
"OuterQuery": "select eid, id, name, foo from a where eid = :0 and id = :1",
"Subquery": "select eid, id from a use index (a_name) where eid = 1 and name = 'foo' limit :_vtMaxResultSize",
"IndexUsed": "a_name",
"ColumnNumbers": [
0,
1,
2,
3
],
"PKValues": null,
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
"SetKey": "",
"SetValue": null
}
# non-pk match with limit
select * from a where eid=1 and name='foo' limit 10
{
"PlanId": "SELECT_SUBQUERY",
"Reason": "DEFAULT",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where eid = 1 and name = 'foo' limit 10",
"OuterQuery": "select eid, id, name, foo from a where eid = :0 and id = :1",
"Subquery": "select eid, id from a use index (a_name) where eid = 1 and name = 'foo' limit 10",
"IndexUsed": "a_name",
"IndexUsed": "",
"ColumnNumbers": [
0,
1,
@ -780,17 +788,17 @@ select * from a where eid=1 and name='foo' limit 10
}
# no index match
select * from a where foo='bar'
select * from d where foo='bar'
{
"PlanId":"PASS_SELECT",
"Reason":"NOINDEX_MATCH",
"TableName":"a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery":"select * from a where foo = 'bar' limit :_vtMaxResultSize",
"TableName":"d",
"FieldQuery": "select * from d where 1 != 1",
"FullQuery":"select * from d where foo = 'bar' limit :_vtMaxResultSize",
"OuterQuery": null,
"Subquery":null,
"IndexUsed":"",
"ColumnNumbers":[
"ColumnNumbers": [
0,
1,
2,
@ -803,48 +811,26 @@ select * from a where foo='bar'
"SetValue":null
}
# covering index in where through data column
select * from a where name = 'foo' and id = 1
# table alias
select * from d as c where c.name='foo'
{
"PlanId": "SELECT_SUBQUERY",
"PlanId": "PK_EQUAL",
"Reason": "DEFAULT",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where name = 'foo' and id = 1 limit :_vtMaxResultSize",
"OuterQuery": "select eid, id, name, foo from a where eid = :0 and id = :1",
"Subquery": "select eid, id from a use index (b_name) where name = 'foo' and id = 1 limit :_vtMaxResultSize",
"IndexUsed": "b_name",
"TableName": "d",
"FieldQuery": "select * from d as c where 1 != 1",
"FullQuery": "select * from d as c where c.name = 'foo' limit :_vtMaxResultSize",
"OuterQuery": "select name, id, foo, bar from d as c where name = :0",
"Subquery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
1,
2,
3
],
"PKValues": null,
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
"SetKey": "",
"SetValue": null
}
# table alias & subquery
select * from a as c where c.eid=1 and name='foo'
{
"PlanId": "SELECT_SUBQUERY",
"Reason": "DEFAULT",
"TableName": "a",
"FieldQuery": "select * from a as c where 1 != 1",
"FullQuery": "select * from a as c where c.eid = 1 and name = 'foo' limit :_vtMaxResultSize",
"OuterQuery": "select eid, id, name, foo from a as c where eid = :0 and id = :1",
"Subquery": "select eid, id from a as c use index (a_name) where c.eid = 1 and name = 'foo' limit :_vtMaxResultSize",
"IndexUsed": "a_name",
"ColumnNumbers": [
0,
1,
2,
3
"PKValues": [
"Zm9v"
],
"PKValues": null,
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
"SetKey": "",
@ -852,40 +838,16 @@ select * from a as c where c.eid=1 and name='foo'
}
# non-pk inequality match
select * from a where eid=1 and name<'foo'
select * from d where id<0
{
"PlanId": "SELECT_SUBQUERY",
"Reason": "DEFAULT",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where eid = 1 and name \u003c 'foo' limit :_vtMaxResultSize",
"OuterQuery": "select eid, id, name, foo from a where eid = :0 and id = :1",
"Subquery": "select eid, id from a use index (a_name) where eid = 1 and name \u003c 'foo' limit :_vtMaxResultSize",
"IndexUsed": "a_name",
"ColumnNumbers": [
0,
1,
2,
3
],
"PKValues": null,
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
"SetKey": "",
"SetValue": null
}
# non-pk IN
select * from a where eid in (1, 2) and name='foo'
{
"PlanId": "SELECT_SUBQUERY",
"Reason": "DEFAULT",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where eid in (1, 2) and name = 'foo' limit :_vtMaxResultSize",
"OuterQuery": "select eid, id, name, foo from a where eid = :0 and id = :1",
"Subquery": "select eid, id from a use index (a_name) where eid in (1, 2) and name = 'foo' limit :_vtMaxResultSize",
"IndexUsed": "a_name",
"TableName": "d",
"FieldQuery": "select * from d where 1 != 1",
"FullQuery": "select * from d where id \u003c 0 limit :_vtMaxResultSize",
"OuterQuery": "select name, id, foo, bar from d where name in (:*)",
"Subquery": "select name from d use index (d_id) where id \u003c 0 limit :_vtMaxResultSize",
"IndexUsed": "d_id",
"ColumnNumbers": [
0,
1,
@ -900,13 +862,13 @@ select * from a where eid in (1, 2) and name='foo'
}
# non-pk IN non-value operand
select * from a where eid in (1, id) and name='foo'
select * from d where name in ('foo', id)
{
"PlanId": "PASS_SELECT",
"Reason": "WHERE",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where eid in (1, id) and name = 'foo' limit :_vtMaxResultSize",
"TableName": "d",
"FieldQuery": "select * from d where 1 != 1",
"FullQuery": "select * from d where name in ('foo', id) limit :_vtMaxResultSize",
"OuterQuery": null,
"Subquery": null,
"IndexUsed": "",
@ -924,40 +886,16 @@ select * from a where eid in (1, id) and name='foo'
}
# non-pk between
select * from a where eid between 1 and 2 and name='foo'
select * from d where id between 1 and 2
{
"PlanId": "SELECT_SUBQUERY",
"Reason": "DEFAULT",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where eid between 1 and 2 and name = 'foo' limit :_vtMaxResultSize",
"OuterQuery": "select eid, id, name, foo from a where eid = :0 and id = :1",
"Subquery": "select eid, id from a use index (a_name) where eid between 1 and 2 and name = 'foo' limit :_vtMaxResultSize",
"IndexUsed": "a_name",
"ColumnNumbers": [
0,
1,
2,
3
],
"PKValues": null,
"SecondaryPKValues": null,
"SubqueryPKColumns": null,
"SetKey": "",
"SetValue": null
}
# order by (pk)
select * from a where eid=1 and id=1 order by name
{
"PlanId": "PASS_SELECT",
"Reason": "PKINDEX",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where eid = 1 and id = 1 order by name asc limit :_vtMaxResultSize",
"OuterQuery": null,
"Subquery": null,
"IndexUsed": "PRIMARY",
"TableName": "d",
"FieldQuery": "select * from d where 1 != 1",
"FullQuery": "select * from d where id between 1 and 2 limit :_vtMaxResultSize",
"OuterQuery": "select name, id, foo, bar from d where name in (:*)",
"Subquery": "select name from d use index (d_id) where id between 1 and 2 limit :_vtMaxResultSize",
"IndexUsed": "d_id",
"ColumnNumbers": [
0,
1,
@ -972,16 +910,16 @@ select * from a where eid=1 and id=1 order by name
}
# order by
select * from a where eid=1 order by name
select * from a where eid=1 and id=1 order by name
{
"PlanId": "SELECT_SUBQUERY",
"Reason": "DEFAULT",
"PlanId": "PASS_SELECT",
"Reason": "ORDER",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where eid = 1 order by name asc limit :_vtMaxResultSize",
"OuterQuery": "select eid, id, name, foo from a where eid = :0 and id = :1",
"Subquery": "select eid, id from a use index (a_name) where eid = 1 order by name asc limit :_vtMaxResultSize",
"IndexUsed": "a_name",
"FullQuery": "select * from a where eid = 1 and id = 1 order by name asc limit :_vtMaxResultSize",
"OuterQuery": null,
"Subquery": null,
"IndexUsed": "",
"ColumnNumbers": [
0,
1,
@ -996,16 +934,16 @@ select * from a where eid=1 order by name
}
# cardinality override
select * from a where name='foo'
select * from d where bar = 'foo'
{
"PlanId": "SELECT_SUBQUERY",
"Reason": "DEFAULT",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a where name = 'foo' limit :_vtMaxResultSize",
"OuterQuery": "select eid, id, name, foo from a where eid = :0 and id = :1",
"Subquery": "select eid, id from a use index (b_name) where name = 'foo' limit :_vtMaxResultSize",
"IndexUsed": "b_name",
"TableName": "d",
"FieldQuery": "select * from d where 1 != 1",
"FullQuery": "select * from d where bar = 'foo' limit :_vtMaxResultSize",
"OuterQuery": "select name, id, foo, bar from d where name in (:*)",
"Subquery": "select name from d use index (d_bar) where bar = 'foo' limit :_vtMaxResultSize",
"IndexUsed": "d_bar",
"ColumnNumbers": [
0,
1,
@ -1020,13 +958,13 @@ select * from a where name='foo'
}
# index override
select * from a use index(a_name) where eid = 1
select * from d use index(d_bar_never) where bar = 'foo'
{
"PlanId": "PASS_SELECT",
"Reason": "HAS_HINTS",
"TableName": "a",
"FieldQuery": "select * from a use index (a_name) where 1 != 1",
"FullQuery": "select * from a use index (a_name) where eid = 1 limit :_vtMaxResultSize",
"TableName": "d",
"FieldQuery": "select * from d use index (d_bar_never) where 1 != 1",
"FullQuery": "select * from d use index (d_bar_never) where bar = 'foo' limit :_vtMaxResultSize",
"OuterQuery": null,
"Subquery": null,
"IndexUsed": "",

Просмотреть файл

@ -16,7 +16,7 @@ import (
)
// buildValueList builds the set of PK reference rows used to drive the next query.
// It is uses the PK values supplied in the original query and bind variables.
// It uses the PK values supplied in the original query and bind variables.
// The generated reference rows are validated for type match against the PK of the table.
func buildValueList(tableInfo *TableInfo, pkValues []interface{}, bindVars map[string]interface{}) [][]sqltypes.Value {
length := -1
@ -48,6 +48,21 @@ func buildValueList(tableInfo *TableInfo, pkValues []interface{}, bindVars map[s
return valueList
}
// buildINValueList builds the set of PK reference rows used to drive the next query
// using an IN clause. This works only for tables with no composite PK columns.
// The generated reference rows are validated for type match against the PK of the table.
func buildINValueList(tableInfo *TableInfo, pkValues []interface{}, bindVars map[string]interface{}) [][]sqltypes.Value {
if len(tableInfo.PKColumns) != 1 {
panic("unexpected")
}
valueList := make([][]sqltypes.Value, len(pkValues))
for i, pkValue := range pkValues {
valueList[i] = make([]sqltypes.Value, 1)
valueList[i][0] = resolveValue(tableInfo.GetPKColumn(0), pkValue, bindVars)
}
return valueList
}
// buildSecondaryList is used for handling ON DUPLICATE DMLs, or those that change the PK.
func buildSecondaryList(tableInfo *TableInfo, pkList [][]sqltypes.Value, secondaryList []interface{}, bindVars map[string]interface{}) [][]sqltypes.Value {
if secondaryList == nil {

Просмотреть файл

@ -95,7 +95,7 @@ func TestFilterByPlan(t *testing.T) {
qr2 := NewQueryRule("rule 2", "r2", QR_FAIL_QUERY)
qr2.AddPlanCond(sqlparser.PLAN_PASS_SELECT)
qr2.AddPlanCond(sqlparser.PLAN_SELECT_PK)
qr2.AddPlanCond(sqlparser.PLAN_PK_EQUAL)
qr2.AddBindVarCond("a", true, false, QR_NOOP, nil)
qr3 := NewQueryRule("rule 3", "r3", QR_FAIL_QUERY)
@ -128,7 +128,7 @@ func TestFilterByPlan(t *testing.T) {
t.Errorf("want r2, got %s", qrs1.rules[0].Name)
}
qrs1 = qrs.filterByPlan("insert", sqlparser.PLAN_SELECT_PK)
qrs1 = qrs.filterByPlan("insert", sqlparser.PLAN_PK_EQUAL)
if l := len(qrs1.rules); l != 1 {
t.Errorf("want 1, received %d", l)
}

Просмотреть файл

@ -281,8 +281,10 @@ func (qe *QueryEngine) Execute(logStats *sqlQueryStats, query *proto.Query) (rep
panic(NewTabletError(FAIL, "Disallowed outside transaction"))
}
reply = qe.execSelect(logStats, plan)
case sqlparser.PLAN_SELECT_PK:
reply = qe.execPK(logStats, plan)
case sqlparser.PLAN_PK_EQUAL:
reply = qe.execPKEqual(logStats, plan)
case sqlparser.PLAN_PK_IN:
reply = qe.execPKIN(logStats, plan)
case sqlparser.PLAN_SELECT_SUBQUERY:
reply = qe.execSubquery(logStats, plan)
case sqlparser.PLAN_SET:
@ -410,23 +412,71 @@ func (qe *QueryEngine) execDDL(logStats *sqlQueryStats, ddl string) *mproto.Quer
//-----------------------------------------------
// Execution
func (qe *QueryEngine) execPK(logStats *sqlQueryStats, plan *CompiledPlan) (result *mproto.QueryResult) {
func (qe *QueryEngine) execPKEqual(logStats *sqlQueryStats, plan *CompiledPlan) (result *mproto.QueryResult) {
pkRows := buildValueList(plan.TableInfo, plan.PKValues, plan.BindVars)
return qe.fetchPKRows(logStats, plan, pkRows)
if len(pkRows) != 1 || plan.Fields == nil {
panic("unexpected")
}
row := qe.fetchOne(logStats, plan, pkRows[0])
result = &mproto.QueryResult{}
result.Fields = plan.Fields
if row == nil {
return
}
result.Rows = make([][]sqltypes.Value, 1)
result.Rows[0] = applyFilter(plan.ColumnNumbers, row)
result.RowsAffected = 1
return
}
func (qe *QueryEngine) fetchOne(logStats *sqlQueryStats, plan *CompiledPlan, pk []sqltypes.Value) (row []sqltypes.Value) {
logStats.QuerySources |= QUERY_SOURCE_ROWCACHE
tableInfo := plan.TableInfo
keys := make([]string, 1)
keys[0] = buildKey(pk)
rcresults := tableInfo.Cache.Get(keys)
rcresult := rcresults[keys[0]]
if rcresult.Row != nil {
if qe.mustVerify() {
qe.spotCheck(logStats, plan, rcresult, pk)
}
logStats.CacheHits++
tableInfo.hits.Add(1)
return rcresult.Row
}
resultFromdb := qe.qFetch(logStats, plan.OuterQuery, plan.BindVars, pk)
if len(resultFromdb.Rows) == 0 {
logStats.CacheAbsent++
tableInfo.absent.Add(1)
return nil
}
row = resultFromdb.Rows[0]
tableInfo.Cache.Set(keys[0], row, rcresult.Cas)
logStats.CacheMisses++
tableInfo.misses.Add(1)
return row
}
func (qe *QueryEngine) execPKIN(logStats *sqlQueryStats, plan *CompiledPlan) (result *mproto.QueryResult) {
pkRows := buildINValueList(plan.TableInfo, plan.PKValues, plan.BindVars)
return qe.fetchMulti(logStats, plan, pkRows)
}
func (qe *QueryEngine) execSubquery(logStats *sqlQueryStats, plan *CompiledPlan) (result *mproto.QueryResult) {
innerResult := qe.qFetch(logStats, plan, plan.Subquery, nil)
return qe.fetchPKRows(logStats, plan, innerResult.Rows)
innerResult := qe.qFetch(logStats, plan.Subquery, plan.BindVars, nil)
return qe.fetchMulti(logStats, plan, innerResult.Rows)
}
func (qe *QueryEngine) fetchPKRows(logStats *sqlQueryStats, plan *CompiledPlan, pkRows [][]sqltypes.Value) (result *mproto.QueryResult) {
func (qe *QueryEngine) fetchMulti(logStats *sqlQueryStats, plan *CompiledPlan, pkRows [][]sqltypes.Value) (result *mproto.QueryResult) {
result = &mproto.QueryResult{}
tableInfo := plan.TableInfo
if plan.Fields == nil {
if len(pkRows) == 0 {
return
}
if len(pkRows[0]) != 1 || plan.Fields == nil {
panic("unexpected")
}
tableInfo := plan.TableInfo
keys := make([]string, len(pkRows))
for i, pk := range pkRows {
keys[i] = buildKey(pk)
@ -435,6 +485,7 @@ func (qe *QueryEngine) fetchPKRows(logStats *sqlQueryStats, plan *CompiledPlan,
result.Fields = plan.Fields
rows := make([][]sqltypes.Value, 0, len(pkRows))
missingRows := make([]sqltypes.Value, 0, len(pkRows))
var hits, absent, misses int64
for i, pk := range pkRows {
rcresult := rcresults[keys[i]]
@ -445,24 +496,17 @@ func (qe *QueryEngine) fetchPKRows(logStats *sqlQueryStats, plan *CompiledPlan,
rows = append(rows, applyFilter(plan.ColumnNumbers, rcresult.Row))
hits++
} else {
resultFromdb := qe.qFetch(logStats, plan, plan.OuterQuery, pk)
if len(resultFromdb.Rows) == 0 {
absent++
continue
}
row := resultFromdb.Rows[0]
if qe.mustVerify() {
spotCheckCount.Add(1)
pkRow := applyFilter(tableInfo.PKColumns, row)
newKey := buildKey(pkRow)
if newKey != keys[i] {
log.Warningf("Key mismatch for query %s. computed: %s, fetched: %s", plan.FullQuery.Query, keys[i], newKey)
errorStats.Add("Mismatch", 1)
}
}
tableInfo.Cache.Set(keys[i], row, rcresult.Cas)
missingRows = append(missingRows, pk[0])
}
}
if len(missingRows) != 0 {
resultFromdb := qe.qFetch(logStats, plan.OuterQuery, plan.BindVars, missingRows)
misses = int64(len(resultFromdb.Rows))
absent = int64(len(pkRows)) - hits - misses
for _, row := range resultFromdb.Rows {
rows = append(rows, applyFilter(plan.ColumnNumbers, row))
misses++
key := buildKey(applyFilter(plan.TableInfo.PKColumns, row))
tableInfo.Cache.Set(key, row, rcresults[key].Cas)
}
}
@ -486,7 +530,7 @@ func (qe *QueryEngine) mustVerify() bool {
func (qe *QueryEngine) spotCheck(logStats *sqlQueryStats, plan *CompiledPlan, rcresult RCResult, pk []sqltypes.Value) {
spotCheckCount.Add(1)
resultFromdb := qe.qFetch(logStats, plan, plan.OuterQuery, pk)
resultFromdb := qe.qFetch(logStats, plan.OuterQuery, plan.BindVars, pk)
var dbrow []sqltypes.Value
if len(resultFromdb.Rows) != 0 {
dbrow = resultFromdb.Rows[0]
@ -534,7 +578,7 @@ func (qe *QueryEngine) execDirect(logStats *sqlQueryStats, plan *CompiledPlan, c
// reuses the result. If the plan is missng field info, it sends the query to mysql requesting full info.
func (qe *QueryEngine) execSelect(logStats *sqlQueryStats, plan *CompiledPlan) (result *mproto.QueryResult) {
if plan.Fields != nil {
result = qe.qFetch(logStats, plan, plan.FullQuery, nil)
result = qe.qFetch(logStats, plan.FullQuery, plan.BindVars, nil)
result.Fields = plan.Fields
return
}
@ -666,8 +710,8 @@ func (qe *QueryEngine) execSet(logStats *sqlQueryStats, conn PoolConnection, pla
return &mproto.QueryResult{}
}
func (qe *QueryEngine) qFetch(logStats *sqlQueryStats, plan *CompiledPlan, parsed_query *sqlparser.ParsedQuery, listVars []sqltypes.Value) (result *mproto.QueryResult) {
sql := qe.generateFinalSql(parsed_query, plan.BindVars, listVars, nil)
func (qe *QueryEngine) qFetch(logStats *sqlQueryStats, parsed_query *sqlparser.ParsedQuery, bindVars map[string]interface{}, listVars []sqltypes.Value) (result *mproto.QueryResult) {
sql := qe.generateFinalSql(parsed_query, bindVars, listVars, nil)
q, ok := qe.consolidator.Create(string(sql))
if ok {
defer q.Broadcast()

Просмотреть файл

@ -1,198 +0,0 @@
from cases_framework import Case, MultiCase
cases = [
"alter table vtocc_cached comment 'new'",
Case(doc="SELECT_PK (null key)",
query_plan="SELECT_PK",
sql="select * from vtocc_cached where eid = 2 and bid = %(bid)s",
bindings={"bid": None},
result=[],
rewritten=[
"select * from vtocc_cached where 1 != 1",
"select eid, bid, name, foo from vtocc_cached where eid = 2 and bid = null"],
cache_absent=1),
Case(doc="SELECT_PK (empty cache)",
query_plan="SELECT_PK",
sql="select * from vtocc_cached where eid = 2 and bid = 'foo'",
result=[(2, 'foo', 'abcd2', 'efgh')],
rewritten=[
"select * from vtocc_cached where 1 != 1",
"select eid, bid, name, foo from vtocc_cached where eid = 2 and bid = 'foo'"],
cache_misses=1),
# (2.foo) is in cache
Case(doc="SELECT_PK, use cache",
query_plan="SELECT_PK",
sql="select bid, eid, name, foo from vtocc_cached where eid = 2 and bid = 'foo'",
result=[('foo', 2, 'abcd2', 'efgh')],
rewritten=["select bid, eid, name, foo from vtocc_cached where 1 != 1"],
cache_hits=1),
# (2.foo) is in cache
Case(doc="SELECT_PK, absent",
query_plan="SELECT_PK",
sql="select bid, eid, name, foo from vtocc_cached where eid = 3 and bid = 'foo'",
result=[],
rewritten=[
"select bid, eid, name, foo from vtocc_cached where 1 != 1",
"select eid, bid, name, foo from vtocc_cached where eid = 3 and bid = 'foo'"],
cache_absent=1),
# (2.foo)
Case(doc="SELECT_SUBQUERY (2.foo)",
sql="select * from vtocc_cached where eid = 2 and name = 'abcd2'",
result=[(2L, 'bar', 'abcd2', 'efgh'), (2L, 'foo', 'abcd2', 'efgh')],
rewritten=[
"select * from vtocc_cached where 1 != 1",
"select eid, bid from vtocc_cached use index (aname) where eid = 2 and name = 'abcd2' limit 10001",
"select eid, bid, name, foo from vtocc_cached where eid = 2 and bid = 'bar'"],
cache_hits=1,
cache_misses=1),
# (2.bar, 2.foo)
Case(doc="SELECT_SUBQUERY (2.foo, 2.bar)",
sql="select * from vtocc_cached where eid = 2 and name = 'abcd2'",
result=[(2L, 'bar', 'abcd2', 'efgh'), (2L, 'foo', 'abcd2', 'efgh')],
rewritten="select eid, bid from vtocc_cached use index (aname) where eid = 2 and name = 'abcd2' limit 10001",
cache_hits=2),
# (2.bar, 2.foo)
Case(doc="out of order columns list",
sql="select bid, eid from vtocc_cached where eid = 1 and bid = 'foo'",
result=[('foo', 1)],
rewritten=[
"select bid, eid from vtocc_cached where 1 != 1",
"select eid, bid, name, foo from vtocc_cached where eid = 1 and bid = 'foo'"],
cache_misses=1),
# (1.foo, 2.bar, 2.foo)
Case(doc="out of order columns list, use cache",
sql="select bid, eid from vtocc_cached where eid = 1 and bid = 'foo'",
result=[('foo', 1)],
rewritten=[],
cache_hits=1),
# (1.foo, 2.bar, 2.foo)
MultiCase(
"PASS_SELECT", # it currently doesn't cache
['select * from vtocc_cached',
Case(query_plan="PASS_SELECT",
sql="select eid, bid, name, foo from vtocc_cached",
rewritten=[
"select eid, bid, name, foo from vtocc_cached where 1 != 1",
"select eid, bid, name, foo from vtocc_cached limit 10001"],
cache_hits=0,
cache_misses=0,
cache_absent=0,
cache_invalidations=0)]),
# (1.foo, 2.bar, 2.foo)
Case(doc="verify 1.bar is not cached",
sql="select bid, eid from vtocc_cached where eid = 1 and bid = 'bar'",
result=[('bar', 1)],
rewritten=[
"select bid, eid from vtocc_cached where 1 != 1",
"select eid, bid, name, foo from vtocc_cached where eid = 1 and bid = 'bar'"],
cache_misses=1),
# (1.foo, 1.bar, 2.foo, 2.bar)
MultiCase(
"update",
['begin',
"update vtocc_cached set foo='fghi' where bid = 'bar'",
Case(sql="commit",
cache_invalidations=2),
Case(sql="select * from vtocc_cached where eid = 2 and name = 'abcd2'",
result=[(2L, 'bar', 'abcd2', 'fghi'), (2L, 'foo', 'abcd2', 'efgh')],
rewritten=[
"select eid, bid from vtocc_cached use index (aname) where eid = 2 and name = 'abcd2' limit 10001",
"select eid, bid, name, foo from vtocc_cached where eid = 2 and bid = 'bar'"],
cache_hits=1,
cache_misses=1)]),
# (1.foo, 2.foo, 2.bar)
MultiCase(
"Verify cache",
["select sleep(0.2) from dual",
Case(sql="select * from vtocc_cached where eid = 2 and name = 'abcd2'",
result=[(2L, 'bar', 'abcd2', 'fghi'), (2L, 'foo', 'abcd2', 'efgh')],
rewritten="select eid, bid from vtocc_cached use index (aname) where eid = 2 and name = 'abcd2' limit 10001",
cache_hits=2)]),
Case(doc="this will use the cache",
sql="select * from vtocc_cached where eid = 2 and name = 'abcd2'",
result=[(2L, 'bar', 'abcd2', 'fghi'), (2L, 'foo', 'abcd2', 'efgh')],
rewritten="select eid, bid from vtocc_cached use index (aname) where eid = 2 and name = 'abcd2' limit 10001",
cache_hits=2),
# (1.foo, 2.bar, 2.foo)
MultiCase(
"this will not invalidate the cache",
['begin',
"update vtocc_cached set foo='fghi' where bid = 'bar'",
'rollback',
Case(sql="select * from vtocc_cached where eid = 2 and name = 'abcd2'",
result=[(2L, 'bar', 'abcd2', 'fghi'), (2L, 'foo', 'abcd2', 'efgh')],
rewritten="select eid, bid from vtocc_cached use index (aname) where eid = 2 and name = 'abcd2' limit 10001",
cache_hits=2)]),
# (1.foo, 2.bar, 2.foo)
MultiCase(
"delete",
['begin',
"delete from vtocc_cached where eid = 2 and bid = 'bar'",
Case(sql="commit",
cache_invalidations=1),
Case(sql="select * from vtocc_cached where eid = 2 and name = 'abcd2'",
result=[(2L, 'foo', 'abcd2', 'efgh')],
rewritten="select eid, bid from vtocc_cached use index (aname) where eid = 2 and name = 'abcd2' limit 10001",
cache_hits=1),
"begin",
"insert into vtocc_cached(eid, bid, name, foo) values (2, 'bar', 'abcd2', 'efgh')",
Case(sql="commit",
cache_invalidations=1)]),
# (1.foo, 2.foo)
MultiCase(
"insert on dup key",
['begin',
"insert into vtocc_cached(eid, bid, name, foo) values (2, 'foo', 'abcd2', 'efgh') on duplicate key update foo='fghi'",
Case(sql="commit",
cache_invalidations=1),
Case(sql="select * from vtocc_cached where eid = 2 and name = 'abcd2'",
result=[(2L, 'bar', 'abcd2', 'efgh'), (2L, 'foo', 'abcd2', 'fghi')],
rewritten=[
"select eid, bid from vtocc_cached use index (aname) where eid = 2 and name = 'abcd2' limit 10001",
"select eid, bid, name, foo from vtocc_cached where eid = 2 and bid = 'bar'",
"select eid, bid, name, foo from vtocc_cached where eid = 2 and bid = 'foo'"],
cache_misses=2)]),
# (1.foo)
Case(doc="Verify 1.foo is in cache",
sql="select * from vtocc_cached where eid = 1 and bid = 'foo'",
result=[(1, 'foo', 'abcd1', 'efgh')],
rewritten=["select * from vtocc_cached where 1 != 1"],
cache_hits=1),
# (1.foo) is in cache
# DDL
"alter table vtocc_cached comment 'test'",
Case(doc="Verify cache is empty after DDL",
sql="select * from vtocc_cached where eid = 1 and bid = 'foo'",
result=[(1, 'foo', 'abcd1', 'efgh')],
rewritten=[
"select * from vtocc_cached where 1 != 1",
"select eid, bid, name, foo from vtocc_cached where eid = 1 and bid = 'foo'"],
cache_misses=1),
# (1.foo)
Case(doc="Verify row is cached",
sql="select * from vtocc_cached where eid = 1 and bid = 'foo'",
result=[(1, 'foo', 'abcd1', 'efgh')],
rewritten=[],
cache_hits=1),
# (1.foo)
]

Просмотреть файл

@ -0,0 +1,169 @@
from cases_framework import Case, MultiCase
# Covers cases for vtocc_cached2
class Case2(Case):
def __init__(self, **kwargs):
Case.__init__(self, cache_table='vtocc_cached2', **kwargs)
cases = [
"alter table vtocc_cached2 comment 'new'",
Case2(doc="PK_EQUAL (null key)",
query_plan="PK_EQUAL",
sql="select * from vtocc_cached2 where eid = 2 and bid = %(bid)s",
bindings={"bid": None},
result=[],
rewritten=[
"select * from vtocc_cached2 where 1 != 1",
"select eid, bid, name, foo from vtocc_cached2 where eid = 2 and bid = null"],
cache_absent=1),
Case2(doc="PK_EQUAL (empty cache)",
query_plan="PK_EQUAL",
sql="select * from vtocc_cached2 where eid = 2 and bid = 'foo'",
result=[(2, 'foo', 'abcd2', 'efgh')],
rewritten=[
"select * from vtocc_cached2 where 1 != 1",
"select eid, bid, name, foo from vtocc_cached2 where eid = 2 and bid = 'foo'"],
cache_misses=1),
# (2.foo) is in cache
Case2(doc="PK_EQUAL, use cache",
query_plan="PK_EQUAL",
sql="select bid, eid, name, foo from vtocc_cached2 where eid = 2 and bid = 'foo'",
result=[('foo', 2, 'abcd2', 'efgh')],
rewritten=["select bid, eid, name, foo from vtocc_cached2 where 1 != 1"],
cache_hits=1),
# (2.foo) is in cache
Case2(doc="PK_EQUAL, absent",
query_plan="PK_EQUAL",
sql="select bid, eid, name, foo from vtocc_cached2 where eid = 3 and bid = 'foo'",
result=[],
rewritten=[
"select bid, eid, name, foo from vtocc_cached2 where 1 != 1",
"select eid, bid, name, foo from vtocc_cached2 where eid = 3 and bid = 'foo'"],
cache_absent=1),
# (2.foo)
Case2(doc="out of order columns list",
sql="select bid, eid from vtocc_cached2 where eid = 1 and bid = 'foo'",
result=[('foo', 1)],
rewritten=[
"select bid, eid from vtocc_cached2 where 1 != 1",
"select eid, bid, name, foo from vtocc_cached2 where eid = 1 and bid = 'foo'"],
cache_misses=1),
# (1.foo, 2.foo)
Case2(doc="out of order columns list, use cache",
sql="select bid, eid from vtocc_cached2 where eid = 1 and bid = 'foo'",
result=[('foo', 1)],
rewritten=[],
cache_hits=1),
# (1.foo, 2.foo)
MultiCase(
"PASS_SELECT", # it currently doesn't cache
["select * from vtocc_cached2 where eid = 1 and bid in('foo', 'bar')",
Case2(query_plan="PASS_SELECT",
sql="select eid, bid, name, foo from vtocc_cached2 where eid = 1 and bid in('foo', 'bar')",
rewritten=[
"select eid, bid, name, foo from vtocc_cached2 where 1 != 1",
"select eid, bid, name, foo from vtocc_cached2 where eid = 1 and bid in ('foo', 'bar') limit 10001"],
cache_hits=0,
cache_misses=0,
cache_absent=0,
cache_invalidations=0)]),
# (1.foo, 2.foo)
Case2(doc="verify 1.bar is not in cache",
sql="select bid, eid from vtocc_cached2 where eid = 1 and bid = 'bar'",
result=[('bar', 1)],
rewritten=[
"select bid, eid from vtocc_cached2 where 1 != 1",
"select eid, bid, name, foo from vtocc_cached2 where eid = 1 and bid = 'bar'"],
cache_misses=1),
# (1.foo, 1.bar, 2.foo)
MultiCase(
"update",
['begin',
"update vtocc_cached2 set foo='fghi' where bid = 'bar'",
Case2(sql="commit",
cache_invalidations=2),
Case2(sql="select * from vtocc_cached2 where eid = 1 and bid = 'bar'",
result=[(1L, 'bar', 'abcd1', 'fghi')],
rewritten=[
"select * from vtocc_cached2 where 1 != 1",
"select eid, bid, name, foo from vtocc_cached2 where eid = 1 and bid = 'bar'"],
cache_misses=1)]),
# (1.foo, 1.bar, 2.foo)
MultiCase(
"this will not invalidate the cache",
['begin',
"update vtocc_cached2 set foo='fghi' where bid = 'bar'",
'rollback',
Case2(sql="select * from vtocc_cached2 where eid = 1 and bid = 'bar'",
result=[(1L, 'bar', 'abcd1', 'fghi')],
rewritten=[],
cache_hits=1)]),
# (1.foo, 1.bar, 2.foo)
MultiCase(
"delete",
['begin',
"delete from vtocc_cached2 where eid = 1 and bid = 'bar'",
Case2(sql="commit",
cache_invalidations=1),
Case2(sql="select * from vtocc_cached2 where eid = 1 and bid = 'bar'",
result=[],
rewritten="select eid, bid, name, foo from vtocc_cached2 where eid = 1 and bid = 'bar'",
cache_absent=1),
"begin",
"insert into vtocc_cached2(eid, bid, name, foo) values (1, 'bar', 'abcd1', 'efgh')",
Case2(sql="commit",
cache_invalidations=1)]),
# (1.foo, 2.foo)
MultiCase(
"insert on dup key",
['begin',
"insert into vtocc_cached2(eid, bid, name, foo) values (2, 'foo', 'abcd2', 'efgh') on duplicate key update foo='fghi'",
Case2(sql="commit",
cache_invalidations=1),
Case2(sql="select * from vtocc_cached2 where eid = 2 and bid = 'foo'",
result=[(2L, 'foo', 'abcd2', 'fghi')],
rewritten=["select eid, bid, name, foo from vtocc_cached2 where eid = 2 and bid = 'foo'"],
cache_misses=1)]),
# (1.foo)
Case2(doc="Verify 1.foo is in cache",
sql="select * from vtocc_cached2 where eid = 1 and bid = 'foo'",
result=[(1, 'foo', 'abcd1', 'efgh')],
rewritten=["select * from vtocc_cached2 where 1 != 1"],
cache_hits=1),
# (1.foo) is in cache
# DDL
"alter table vtocc_cached2 comment 'test'",
Case2(doc="Verify cache is empty after DDL",
sql="select * from vtocc_cached2 where eid = 1 and bid = 'foo'",
result=[(1, 'foo', 'abcd1', 'efgh')],
rewritten=[
"select * from vtocc_cached2 where 1 != 1",
"select eid, bid, name, foo from vtocc_cached2 where eid = 1 and bid = 'foo'"],
cache_misses=1),
# (1.foo)
Case2(doc="Verify row is cached",
sql="select * from vtocc_cached2 where eid = 1 and bid = 'foo'",
result=[(1, 'foo', 'abcd1', 'efgh')],
rewritten=[],
cache_hits=1),
# (1.foo)
]

Просмотреть файл

@ -2,7 +2,8 @@ from vtdb import dbexceptions
from vtdb import vt_occ2
import framework
import cache_cases
import cache_cases1
import cache_cases2
class TestWillNotBeCached(framework.TestCase):
@ -28,7 +29,7 @@ class TestWillNotBeCached(framework.TestCase):
class TestCache(framework.TestCase):
def test_num_str(self):
try:
self.env.execute("select bid, eid from vtocc_cached where eid = 1 and bid = 1")
self.env.execute("select bid, eid from vtocc_cached2 where eid = 1 and bid = 1")
except dbexceptions.DatabaseError as e:
self.assertContains(str(e), "error: Type")
else:
@ -37,68 +38,68 @@ class TestCache(framework.TestCase):
def test_uncache(self):
try:
# Verify row cache is working
self.env.execute("select * from vtocc_cached where eid = 2 and bid = 'foo'")
tstart = self.env.table_stats()["vtocc_cached"]
self.env.execute("select * from vtocc_cached where eid = 2 and bid = 'foo'")
tend = self.env.table_stats()["vtocc_cached"]
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
tstart = self.env.table_stats()["vtocc_cached2"]
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
tend = self.env.table_stats()["vtocc_cached2"]
self.assertEqual(tstart["Hits"]+1, tend["Hits"])
# disable
self.env.execute("alter table vtocc_cached comment 'vtocc_nocache'")
self.env.execute("select * from vtocc_cached where eid = 2 and bid = 'foo'")
self.env.execute("alter table vtocc_cached2 comment 'vtocc_nocache'")
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
try:
tstart = self.env.table_stats()["vtocc_cached"]
tstart = self.env.table_stats()["vtocc_cached2"]
except KeyError:
pass
else:
self.fail("Did not receive exception")
finally:
self.env.execute("alter table vtocc_cached comment ''")
self.env.execute("alter table vtocc_cached2 comment ''")
# Verify row cache is working again
self.env.execute("select * from vtocc_cached where eid = 2 and bid = 'foo'")
tstart = self.env.table_stats()["vtocc_cached"]
self.env.execute("select * from vtocc_cached where eid = 2 and bid = 'foo'")
tend = self.env.table_stats()["vtocc_cached"]
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
tstart = self.env.table_stats()["vtocc_cached2"]
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
tend = self.env.table_stats()["vtocc_cached2"]
self.assertEqual(tstart["Hits"]+1, tend["Hits"])
def test_rename(self):
try:
# Verify row cache is working
self.env.execute("select * from vtocc_cached where eid = 2 and bid = 'foo'")
tstart = self.env.table_stats()["vtocc_cached"]
self.env.execute("select * from vtocc_cached where eid = 2 and bid = 'foo'")
tend = self.env.table_stats()["vtocc_cached"]
self.assertEqual(tstart["Hits"]+1, tend["Hits"])
# rename
self.env.execute("alter table vtocc_cached rename to vtocc_cached2")
try:
tstart = self.env.table_stats()["vtocc_cached"]
except KeyError:
pass
else:
self.fail("Did not receive exception")
# Verify row cache is working
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
tstart = self.env.table_stats()["vtocc_cached2"]
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
tend = self.env.table_stats()["vtocc_cached2"]
self.assertEqual(tstart["Hits"]+1, tend["Hits"])
# rename
self.env.execute("alter table vtocc_cached2 rename to vtocc_renamed")
try:
tstart = self.env.table_stats()["vtocc_cached2"]
except KeyError:
pass
else:
self.fail("Did not receive exception")
# Verify row cache is working
self.env.execute("select * from vtocc_renamed where eid = 2 and bid = 'foo'")
tstart = self.env.table_stats()["vtocc_renamed"]
self.env.execute("select * from vtocc_renamed where eid = 2 and bid = 'foo'")
tend = self.env.table_stats()["vtocc_renamed"]
self.assertEqual(tstart["Hits"]+1, tend["Hits"])
finally:
# alter table so there's no hash collision when renamed
self.env.execute("alter table vtocc_cached2 comment 'renamed'")
self.env.execute("rename table vtocc_cached2 to vtocc_cached")
self.env.execute("alter table vtocc_renamed comment 'renamed'")
self.env.execute("rename table vtocc_renamed to vtocc_cached2")
# Verify row cache is working again
self.env.execute("select * from vtocc_cached where eid = 2 and bid = 'foo'")
tstart = self.env.table_stats()["vtocc_cached"]
self.env.execute("select * from vtocc_cached where eid = 2 and bid = 'foo'")
tend = self.env.table_stats()["vtocc_cached"]
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
tstart = self.env.table_stats()["vtocc_cached2"]
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
tend = self.env.table_stats()["vtocc_cached2"]
self.assertEqual(tstart["Hits"]+1, tend["Hits"])
def test_nopass(self):
try:
self.env.conn.begin()
self.env.execute("insert into vtocc_cached(eid, bid, name, foo) values(unix_time(), 'foo', 'bar', 'bar')")
self.env.execute("insert into vtocc_cached2(eid, bid, name, foo) values(unix_time(), 'foo', 'bar', 'bar')")
except dbexceptions.DatabaseError as e:
self.assertContains(str(e), "error: DML too complex")
else:
@ -168,51 +169,33 @@ class TestCache(framework.TestCase):
def test_nodata(self):
# This should not fail
cu = self.env.execute("select * from vtocc_cached where eid = 6 and name = 'bar'")
cu = self.env.execute("select * from vtocc_cached2 where eid = 6 and name = 'bar'")
self.assertEqual(cu.rowcount, 0)
def test_bigdata(self):
self.env.conn.begin()
rowcount = 10
# uncomment this line to do the actual big data test
# rowcount = 10000
for i in range(rowcount):
self.env.execute("insert into vtocc_cached(eid, bid, name, foo) values(5, %(bid)s, 'bar', 'bar')", {"bid": "%d" % i})
self.env.conn.commit()
# prime the cache
cu = self.env.execute("select * from vtocc_cached where eid = 5 and name = 'bar' limit 2000")
cu = self.env.execute("select * from vtocc_cached where eid = 5 and name = 'bar' limit 4000")
cu = self.env.execute("select * from vtocc_cached where eid = 5 and name = 'bar' limit 6000")
cu = self.env.execute("select * from vtocc_cached where eid = 5 and name = 'bar' limit 8000")
cu = self.env.execute("select * from vtocc_cached where eid = 5 and name = 'bar'")
tstart = self.env.table_stats()["vtocc_cached"]
cu = self.env.execute("select * from vtocc_cached where eid = 5 and name = 'bar'")
tend = self.env.table_stats()["vtocc_cached"]
self.assertEqual(tstart["Hits"]+rowcount, tend["Hits"])
self.env.conn.begin()
self.env.execute("delete from vtocc_cached where eid = 5")
self.env.conn.commit()
def test_types(self):
self._verify_mismatch("select * from vtocc_cached where eid = 'str' and bid = 'str'")
self._verify_mismatch("select * from vtocc_cached where eid = %(str)s and bid = %(str)s", {"str": "str"})
self._verify_mismatch("select * from vtocc_cached where eid = 1 and bid = 1")
self._verify_mismatch("select * from vtocc_cached where eid = %(id)s and bid = %(id)s", {"id": 1})
self._verify_mismatch("select * from vtocc_cached where eid = 1.2 and bid = 1.2")
self._verify_mismatch("select * from vtocc_cached where eid = %(fl)s and bid = %(fl)s", {"fl": 1.2})
self._verify_mismatch("select * from vtocc_cached2 where eid = 'str' and bid = 'str'")
self._verify_mismatch("select * from vtocc_cached2 where eid = %(str)s and bid = %(str)s", {"str": "str"})
self._verify_mismatch("select * from vtocc_cached2 where eid = 1 and bid = 1")
self._verify_mismatch("select * from vtocc_cached2 where eid = %(id)s and bid = %(id)s", {"id": 1})
self._verify_mismatch("select * from vtocc_cached2 where eid = 1.2 and bid = 1.2")
self._verify_mismatch("select * from vtocc_cached2 where eid = %(fl)s and bid = %(fl)s", {"fl": 1.2})
def test_spot_check(self):
vstart = self.env.debug_vars()
self.assertEqual(vstart["SpotCheckRatio"], 0)
self.env.execute("select * from vtocc_cached where eid = 225 and bid = 'foo'")
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
self.assertEqual(vstart["SpotCheckCount"], self.env.debug_vars()["SpotCheckCount"])
self.env.execute("set vt_spot_check_ratio=1")
self.assertEqual(self.env.debug_vars()["SpotCheckRatio"], 1)
self.env.execute("select * from vtocc_cached where eid = 2 and bid = 'foo'")
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
self.assertEqual(vstart["SpotCheckCount"]+1, self.env.debug_vars()["SpotCheckCount"])
vstart = self.env.debug_vars()
self.env.execute("select * from vtocc_cached1 where eid in (9)")
self.assertEqual(vstart["SpotCheckCount"], self.env.debug_vars()["SpotCheckCount"])
self.env.execute("select * from vtocc_cached1 where eid in (9)")
self.assertEqual(vstart["SpotCheckCount"]+1, self.env.debug_vars()["SpotCheckCount"])
self.env.execute("set vt_spot_check_ratio=0")
self.assertEqual(self.env.debug_vars()["SpotCheckRatio"], 0)
@ -224,7 +207,12 @@ class TestCache(framework.TestCase):
else:
self.fail("Did not receive exception")
def test_cache_sqls(self):
error_count = self.env.run_cases(cache_cases.cases)
def test_cache1_sqls(self):
error_count = self.env.run_cases(cache_cases1.cases)
if error_count != 0:
self.fail("test_execution errors: %d" % error_count)
self.fail("test_cache1_sqls errors: %d" % error_count)
def test_cache2_sqls(self):
error_count = self.env.run_cases(cache_cases2.cases)
if error_count != 0:
self.fail("test_cache2_sqls errors: %d" % error_count)

Просмотреть файл

@ -117,7 +117,7 @@ class Log(object):
class Case(object):
def __init__(self, sql, bindings=None, result=None, rewritten=None, doc='',
cache_table="vtocc_cached", query_plan=None, cache_hits=None,
cache_table=None, query_plan=None, cache_hits=None,
cache_misses=None, cache_absent=None, cache_invalidations=None,
remote_address="[::1]"):
# For all cache_* parameters, a number n means "check this value

Просмотреть файл

@ -467,19 +467,6 @@ cases = [
"update vtocc_a set foo='fghi' where id=2",
'commit']),
MultiCase(
'tuple in',
['begin',
Case(sql="update /* pk */ vtocc_a set foo='bar' where (eid, id) in ((1, 1), (1, 2))",
rewritten="update /* pk */ vtocc_a set foo = 'bar' where (eid, id) in ((1, 1), (1, 2)) /* _stream vtocc_a (eid id ) (1 1 ) (1 2 )"),
'commit',
Case(sql='select foo from vtocc_a where id = 1',
result=[('bar',)]),
'begin',
"update vtocc_a set foo='efgh' where id=1",
"update vtocc_a set foo='fghi' where id=2",
'commit']),
MultiCase(
'pk change',
['begin',
@ -631,16 +618,6 @@ cases = [
Case(sql='select * from vtocc_a where eid=2',
result=[])]),
MultiCase(
'tuple in',
['begin',
"insert into vtocc_a(eid, id, name, foo) values (2, 1, '', '')",
Case(sql="delete /* pk */ from vtocc_a where (eid, id) in ((2, 1), (3, 2))",
rewritten="delete /* pk */ from vtocc_a where (eid, id) in ((2, 1), (3, 2)) /* _stream vtocc_a (eid id ) (2 1 ) (3 2 )"),
'commit',
Case(sql='select * from vtocc_a where eid=2',
result=[])]),
MultiCase(
'complex where',
['begin',

Просмотреть файл

@ -20,14 +20,26 @@ insert into vtocc_b(eid, id) values(1, 1), (1, 2)
insert into vtocc_c(eid, name, foo) values(10, 'abcd', '20'), (11, 'bcde', '30')
commit
create table vtocc_cached(eid bigint, bid varbinary(16), name varchar(128), foo varbinary(128), primary key(eid, bid))
create index aname on vtocc_cached(eid, name)
create table vtocc_cached1(eid bigint, name varchar(128), foo varbinary(128), primary key(eid))
create index aname1 on vtocc_cached1(name)
begin
delete from vtocc_cached
insert into vtocc_cached values (1, 'foo', 'abcd1', 'efgh')
insert into vtocc_cached values (1, 'bar', 'abcd1', 'efgh')
insert into vtocc_cached values (2, 'foo', 'abcd2', 'efgh')
insert into vtocc_cached values (2, 'bar', 'abcd2', 'efgh')
delete from vtocc_cached1
insert into vtocc_cached1 values (1, 'a', 'abcd')
insert into vtocc_cached1 values (2, 'a', 'abcd')
insert into vtocc_cached1 values (3, 'c', 'abcd')
insert into vtocc_cached1 values (4, 'd', 'abcd')
insert into vtocc_cached1 values (5, 'e', 'efgh')
insert into vtocc_cached1 values (9, 'i', 'ijkl')
commit
create table vtocc_cached2(eid bigint, bid varbinary(16), name varchar(128), foo varbinary(128), primary key(eid, bid))
create index aname2 on vtocc_cached2(eid, name)
begin
delete from vtocc_cached2
insert into vtocc_cached2 values (1, 'foo', 'abcd1', 'efgh')
insert into vtocc_cached2 values (1, 'bar', 'abcd1', 'efgh')
insert into vtocc_cached2 values (2, 'foo', 'abcd2', 'efgh')
insert into vtocc_cached2 values (2, 'bar', 'abcd2', 'efgh')
commit
create table vtocc_big(id int, string1 varchar(128), string2 varchar(100), string3 char(1), string4 varchar(50), string5 varchar(50), date1 date, string6 varchar(16), string7 varchar(120), bigint1 bigint(20), bigint2 bigint(20), date2 date, integer1 int, tinyint1 tinyint(4), primary key(id)) comment 'vtocc_big'
@ -58,8 +70,9 @@ drop table vtocc_c
drop table vtocc_d
drop table vtocc_e
drop table vtocc_f
drop table vtocc_cached
drop table vtocc_cached1
drop table vtocc_cached2
drop table vtocc_renamed
drop table vtocc_nocache
drop table vtocc_big
drop table vtocc_ints