зеркало из https://github.com/github/vitess-gh.git
RIP queryservice_test
This commit is contained in:
Родитель
8afcb3b60d
Коммит
7403a556b8
5
Makefile
5
Makefile
|
@ -8,7 +8,7 @@ MAKEFLAGS = -s
|
|||
# Since we are not using this Makefile for compilation, limiting parallelism will not increase build time.
|
||||
.NOTPARALLEL:
|
||||
|
||||
.PHONY: all build test clean unit_test unit_test_cover unit_test_race queryservice_test integration_test bson proto site_test site_integration_test docker_bootstrap docker_test docker_unit_test java_test php_test reshard_tests
|
||||
.PHONY: all build test clean unit_test unit_test_cover unit_test_race integration_test bson proto site_test site_integration_test docker_bootstrap docker_test docker_unit_test java_test php_test reshard_tests
|
||||
|
||||
all: build test
|
||||
|
||||
|
@ -66,9 +66,6 @@ unit_test_race: build
|
|||
unit_test_goveralls: build
|
||||
travis/goveralls.sh
|
||||
|
||||
queryservice_test:
|
||||
go run test.go -docker=false queryservice
|
||||
|
||||
.ONESHELL:
|
||||
SHELL = /bin/bash
|
||||
|
||||
|
|
|
@ -120,16 +120,6 @@
|
|||
"Shard": 4,
|
||||
"RetryMax": 0
|
||||
},
|
||||
"queryservice": {
|
||||
"File": "queryservice_test.py",
|
||||
"Args": [
|
||||
"-m"
|
||||
],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 3,
|
||||
"RetryMax": 0
|
||||
},
|
||||
"reparent": {
|
||||
"File": "reparent.py",
|
||||
"Args": [],
|
||||
|
|
|
@ -1,88 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# This is a code generator that converts the python test cases
|
||||
# into go.
|
||||
# TODO(sougou): delete after migration.
|
||||
from queryservice_tests.cases_framework import MultiCase
|
||||
import queryservice_tests.cache_cases2 as source
|
||||
|
||||
def main():
|
||||
print "\ttestCases := []framework.Testable{"
|
||||
for case in source.cases:
|
||||
print_case(case, 2)
|
||||
print "\t}"
|
||||
|
||||
def print_case(case, indent):
|
||||
tabs = buildtabs(indent)
|
||||
if isinstance(case, basestring):
|
||||
print '%sframework.TestQuery("%s"),' % (tabs, case)
|
||||
elif isinstance(case, MultiCase):
|
||||
print "%s&framework.MultiCase{" %(tabs)
|
||||
if case.doc:
|
||||
print '%s\tName: "%s",' % (tabs, case.doc)
|
||||
print '%s\tCases: []framework.Testable{' %(tabs)
|
||||
for subcase in case:
|
||||
print_case(subcase, indent+2)
|
||||
print "%s\t}," %(tabs)
|
||||
print "%s}," %(tabs)
|
||||
else:
|
||||
print "%s&framework.TestCase{" %(tabs)
|
||||
print_details(case, indent+1)
|
||||
print "%s}," %(tabs)
|
||||
|
||||
def buildtabs(indent):
|
||||
tabs = ''
|
||||
for i in range(indent):
|
||||
tabs += '\t'
|
||||
return tabs
|
||||
|
||||
def print_details(case, indent):
|
||||
tabs = buildtabs(indent)
|
||||
if case.doc:
|
||||
print '%sName: "%s",' % (tabs, case.doc)
|
||||
print '%sQuery: "%s",' % (tabs, case.sql)
|
||||
if case.bindings:
|
||||
print '%sBindVars: map[string]interface{}{' % (tabs)
|
||||
print_bindings(case.bindings, indent+1)
|
||||
print "%s}," %(tabs)
|
||||
if case.result:
|
||||
print '%sResult: [][]string{' % (tabs)
|
||||
print_result(case.result, indent+1)
|
||||
print "%s}," %(tabs)
|
||||
if case.rewritten:
|
||||
print '%sRewritten: []string{' % (tabs)
|
||||
for v in case.rewritten:
|
||||
print'%s\t"%s",' %(tabs, v)
|
||||
print "%s}," %(tabs)
|
||||
if case.rowcount:
|
||||
print '%sRowsAffected: %s,' % (tabs, case.rowcount)
|
||||
if case.query_plan:
|
||||
print '%sPlan: "%s",' % (tabs, case.query_plan)
|
||||
if case.cache_table:
|
||||
print '%sTable: "%s",' % (tabs, case.cache_table)
|
||||
if case.cache_hits:
|
||||
print '%sHits: %s,' % (tabs, case.cache_hits)
|
||||
if case.cache_misses:
|
||||
print '%sMisses: %s,' % (tabs, case.cache_misses)
|
||||
if case.cache_absent:
|
||||
print '%sAbsent: %s,' % (tabs, case.cache_absent)
|
||||
if case.cache_invalidations:
|
||||
print '%sInvalidations: %s,' % (tabs, case.cache_invalidations)
|
||||
|
||||
|
||||
def print_bindings(bindings, indent):
|
||||
tabs = buildtabs(indent)
|
||||
for (k, v) in bindings.items():
|
||||
if isinstance(v, basestring):
|
||||
print '%s"%s": "%s",' % (tabs, k, v)
|
||||
else:
|
||||
print '%s"%s": %s,' % (tabs, k, v)
|
||||
|
||||
def print_result(result, indent):
|
||||
tabs = buildtabs(indent)
|
||||
for row in result:
|
||||
print '%s{%s},' %(tabs, ", ".join(['"%s"'%v for v in row]))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,81 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import logging
|
||||
import optparse
|
||||
import traceback
|
||||
import unittest
|
||||
|
||||
import utils
|
||||
import framework
|
||||
|
||||
from queryservice_tests import cache_tests
|
||||
from queryservice_tests import nocache_tests
|
||||
from queryservice_tests import status_tests
|
||||
from queryservice_tests import stream_tests
|
||||
from queryservice_tests import test_env
|
||||
|
||||
from mysql_flavor import set_mysql_flavor
|
||||
from protocols_flavor import set_protocols_flavor
|
||||
from topo_flavor.server import set_topo_server_flavor
|
||||
|
||||
|
||||
def main():
|
||||
parser = optparse.OptionParser(usage='usage: %prog [options] [test_names]')
|
||||
parser.add_option('-m', '--memcache', action='store_true', default=False,
|
||||
help='starts a memcache d, and tests rowcache')
|
||||
utils.add_options(parser)
|
||||
options, args = parser.parse_args()
|
||||
|
||||
logging.getLogger().setLevel(logging.ERROR)
|
||||
utils.set_options(options)
|
||||
|
||||
run_tests(options, args)
|
||||
|
||||
|
||||
def run_tests(options, args):
|
||||
suite = unittest.TestSuite()
|
||||
if args:
|
||||
if args[0] == 'teardown':
|
||||
test_env.TestEnv().tearDown()
|
||||
exit(0)
|
||||
for arg in args:
|
||||
if hasattr(nocache_tests.TestNocache, arg):
|
||||
suite.addTest(nocache_tests.TestNocache(arg))
|
||||
elif hasattr(stream_tests.TestStream, arg):
|
||||
suite.addTest(stream_tests.TestStream(arg))
|
||||
elif hasattr(cache_tests.TestCache, arg) and options.memcache:
|
||||
suite.addTest(cache_tests.TestCache(arg))
|
||||
elif hasattr(cache_tests.TestWillNotBeCached, arg) and options.memcache:
|
||||
suite.addTest(cache_tests.TestWillNotBeCached(arg))
|
||||
else:
|
||||
raise Exception(arg, 'not found in tests')
|
||||
else:
|
||||
modules = [nocache_tests, stream_tests, status_tests]
|
||||
if options.memcache:
|
||||
modules.append(cache_tests)
|
||||
for m in modules:
|
||||
suite.addTests(unittest.TestLoader().loadTestsFromModule(m))
|
||||
|
||||
env = test_env.TestEnv()
|
||||
try:
|
||||
env.memcache = options.memcache
|
||||
env.setUp()
|
||||
print 'Starting queryservice_test.py'
|
||||
sys.stdout.flush()
|
||||
framework.TestCase.setenv(env)
|
||||
result = unittest.TextTestRunner(
|
||||
verbosity=options.verbose, failfast=True).run(suite)
|
||||
if not result.wasSuccessful():
|
||||
raise Exception('test failures')
|
||||
finally:
|
||||
if not options.skip_teardown:
|
||||
env.tearDown()
|
||||
if options.keep_logs:
|
||||
print('Leaving temporary files behind (--keep-logs), please '
|
||||
'clean up before next run: ' + os.environ['VTDATAROOT'])
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,4 +0,0 @@
|
|||
# Copyright 2012, Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can
|
||||
# be found in the LICENSE file.
|
||||
|
|
@ -1,154 +0,0 @@
|
|||
from cases_framework import Case, MultiCase
|
||||
|
||||
# Covers cases for vtocc_cached1
|
||||
|
||||
|
||||
class Case1(Case):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
Case.__init__(self, cache_table='vtocc_cached1', **kwargs)
|
||||
|
||||
cases = [
|
||||
"alter table vtocc_cached1 comment 'new'",
|
||||
|
||||
Case1(
|
||||
doc='PK_IN (empty cache)',
|
||||
query_plan='PK_IN',
|
||||
sql='select * from vtocc_cached1 where eid = 1',
|
||||
result=[(1L, 'a', 'abcd')],
|
||||
rewritten=[
|
||||
'select * from vtocc_cached1 where 1 != 1',
|
||||
'select eid, name, foo from vtocc_cached1 where eid in (1)'],
|
||||
rowcount=1,
|
||||
cache_misses=1),
|
||||
# (1) is in cache
|
||||
|
||||
Case1(
|
||||
doc='PK_IN, use cache',
|
||||
query_plan='PK_IN',
|
||||
sql='select * from vtocc_cached1 where eid = 1',
|
||||
result=[(1L, 'a', 'abcd')],
|
||||
rowcount=1,
|
||||
rewritten=[],
|
||||
cache_hits=1),
|
||||
# (1)
|
||||
|
||||
Case1(
|
||||
doc='PK_IN (empty cache)',
|
||||
query_plan='PK_IN',
|
||||
sql='select * from vtocc_cached1 where eid in (1, 3, 6)',
|
||||
result=[(1L, 'a', 'abcd'), (3L, 'c', 'abcd')],
|
||||
rowcount=2,
|
||||
rewritten=[
|
||||
'select * from vtocc_cached1 where 1 != 1',
|
||||
'select eid, name, foo from vtocc_cached1 where eid in (3, 6)'],
|
||||
cache_hits=1,
|
||||
cache_misses=1,
|
||||
cache_absent=1),
|
||||
# (1, 3)
|
||||
|
||||
Case1(
|
||||
doc='PK_IN limit 0',
|
||||
query_plan='PK_IN',
|
||||
sql='select * from vtocc_cached1 where eid in (1, 3, 6) limit 0',
|
||||
result=[],
|
||||
rowcount=0,
|
||||
rewritten=['select * from vtocc_cached1 where 1 != 1'],
|
||||
cache_hits=0,
|
||||
cache_misses=0,
|
||||
cache_absent=0),
|
||||
# (1, 3)
|
||||
|
||||
Case1(
|
||||
doc='PK_IN limit 1',
|
||||
query_plan='PK_IN',
|
||||
sql='select * from vtocc_cached1 where eid in (1, 3, 6) limit 1',
|
||||
result=[(1L, 'a', 'abcd')],
|
||||
rowcount=1,
|
||||
rewritten=[
|
||||
'select * from vtocc_cached1 where 1 != 1',
|
||||
'select eid, name, foo from vtocc_cached1 where eid in (6)'],
|
||||
cache_hits=2,
|
||||
cache_misses=0,
|
||||
cache_absent=1),
|
||||
# (1, 3)
|
||||
|
||||
Case1(
|
||||
doc='PK_IN limit :a',
|
||||
query_plan='PK_IN',
|
||||
sql='select * from vtocc_cached1 where eid in (1, 3, 6) limit :a',
|
||||
bindings={'a': 1},
|
||||
result=[(1L, 'a', 'abcd')],
|
||||
rowcount=1,
|
||||
rewritten=[
|
||||
'select * from vtocc_cached1 where 1 != 1',
|
||||
'select eid, name, foo from vtocc_cached1 where eid in (6)'],
|
||||
cache_hits=2,
|
||||
cache_misses=0,
|
||||
cache_absent=1),
|
||||
# (1, 3)
|
||||
|
||||
Case1(
|
||||
doc='SELECT_SUBQUERY (1, 2)',
|
||||
sql="select * from vtocc_cached1 where name = 'a'",
|
||||
result=[(1L, 'a', 'abcd'), (2L, 'a', 'abcd')],
|
||||
rowcount=2,
|
||||
rewritten=[
|
||||
'select * from vtocc_cached1 where 1 != 1',
|
||||
'select eid from vtocc_cached1 use index (aname1) '
|
||||
"where name = 'a' limit 10001",
|
||||
'select eid, name, foo from vtocc_cached1 where eid in (2)'],
|
||||
cache_hits=1,
|
||||
cache_misses=1),
|
||||
# (1, 2, 3)
|
||||
|
||||
Case1(
|
||||
doc='covering index',
|
||||
query_plan='PASS_SELECT',
|
||||
sql="select eid, name from vtocc_cached1 where name = 'a'",
|
||||
result=[(1L, 'a'), (2L, 'a')],
|
||||
rowcount=2,
|
||||
rewritten=[
|
||||
'select eid, name from vtocc_cached1 where 1 != 1',
|
||||
'select eid, name from vtocc_cached1 '
|
||||
"where name = 'a' limit 10001"]),
|
||||
# (1, 2, 3)
|
||||
|
||||
Case1(
|
||||
doc='SELECT_SUBQUERY (1, 2)',
|
||||
sql="select * from vtocc_cached1 where name = 'a'",
|
||||
result=[(1L, 'a', 'abcd'), (2L, 'a', 'abcd')],
|
||||
rowcount=2,
|
||||
rewritten=['select eid from vtocc_cached1 use index (aname1) '
|
||||
"where name = 'a' limit 10001"],
|
||||
cache_hits=2),
|
||||
# (1, 2, 3)
|
||||
|
||||
Case1(
|
||||
doc='SELECT_SUBQUERY (4, 5)',
|
||||
query_plan='SELECT_SUBQUERY',
|
||||
sql="select * from vtocc_cached1 where name between 'd' and 'e'",
|
||||
result=[(4L, 'd', 'abcd'), (5L, 'e', 'efgh')],
|
||||
rowcount=2,
|
||||
rewritten=[
|
||||
'select * from vtocc_cached1 where 1 != 1',
|
||||
'select eid from vtocc_cached1 use index (aname1) '
|
||||
"where name between 'd' and 'e' limit 10001",
|
||||
'select eid, name, foo from vtocc_cached1 where eid in (4, 5)'],
|
||||
cache_hits=0,
|
||||
cache_misses=2),
|
||||
# (1, 2, 3, 4, 5)
|
||||
|
||||
Case1(
|
||||
doc='PASS_SELECT',
|
||||
query_plan='PASS_SELECT',
|
||||
sql="select * from vtocc_cached1 where foo='abcd'",
|
||||
result=[(1L, 'a', 'abcd'), (2L, 'a', 'abcd'), (3L, 'c', 'abcd'),
|
||||
(4L, 'd', 'abcd')],
|
||||
rowcount=4,
|
||||
rewritten=[
|
||||
'select * from vtocc_cached1 where 1 != 1',
|
||||
"select * from vtocc_cached1 where foo = 'abcd' limit 10001"],
|
||||
cache_hits=0, cache_misses=0, cache_absent=0),
|
||||
# (1, 2, 3, 4, 5)
|
||||
]
|
|
@ -1,271 +0,0 @@
|
|||
from cases_framework import Case, MultiCase
|
||||
|
||||
# Covers cases for vtocc_cached2
|
||||
|
||||
|
||||
class Case2(Case):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
Case.__init__(self, cache_table='vtocc_cached2', **kwargs)
|
||||
|
||||
|
||||
cases = [
|
||||
"alter table vtocc_cached2 comment 'new'",
|
||||
|
||||
Case2(
|
||||
doc='PK_IN (null key)',
|
||||
query_plan='PK_IN',
|
||||
sql='select * from vtocc_cached2 where eid = 2 and bid = :bid',
|
||||
bindings={'bid': None},
|
||||
result=[],
|
||||
rowcount=0,
|
||||
rewritten=[
|
||||
'select * from vtocc_cached2 where 1 != 1',
|
||||
'select eid, bid, name, foo from vtocc_cached2 '
|
||||
'where (eid = 2 and bid = null)'],
|
||||
cache_absent=1),
|
||||
|
||||
Case2(doc='PK_IN (empty cache)',
|
||||
query_plan='PK_IN',
|
||||
sql="select * from vtocc_cached2 where eid = 2 and bid = 'foo'",
|
||||
result=[(2, 'foo', 'abcd2', 'efgh')],
|
||||
rowcount=1,
|
||||
rewritten=[
|
||||
'select * from vtocc_cached2 where 1 != 1',
|
||||
'select eid, bid, name, foo from vtocc_cached2 '
|
||||
"where (eid = 2 and bid = 'foo')"],
|
||||
cache_misses=1),
|
||||
# (2.foo) is in cache
|
||||
|
||||
Case2(doc='PK_IN, use cache',
|
||||
query_plan='PK_IN',
|
||||
sql='select bid, eid, name, foo from vtocc_cached2 '
|
||||
"where eid = 2 and bid = 'foo'",
|
||||
result=[('foo', 2, 'abcd2', 'efgh')],
|
||||
rowcount=1,
|
||||
rewritten=['select bid, eid, name, foo from vtocc_cached2 '
|
||||
'where 1 != 1'],
|
||||
cache_hits=1),
|
||||
# (2.foo)
|
||||
|
||||
Case2(doc='PK_IN, absent',
|
||||
query_plan='PK_IN',
|
||||
sql='select bid, eid, name, foo from vtocc_cached2 '
|
||||
"where eid = 3 and bid = 'foo'",
|
||||
result=[],
|
||||
rowcount=0,
|
||||
rewritten=[
|
||||
'select bid, eid, name, foo from vtocc_cached2 where 1 != 1',
|
||||
'select eid, bid, name, foo from vtocc_cached2 '
|
||||
"where (eid = 3 and bid = 'foo')"],
|
||||
cache_absent=1),
|
||||
# (2.foo)
|
||||
|
||||
Case2(
|
||||
doc='out of order columns list',
|
||||
sql="select bid, eid from vtocc_cached2 where eid = 1 and bid = 'foo'",
|
||||
result=[('foo', 1)],
|
||||
rowcount=1,
|
||||
rewritten=[
|
||||
'select bid, eid from vtocc_cached2 where 1 != 1',
|
||||
'select eid, bid, name, foo from vtocc_cached2 '
|
||||
"where (eid = 1 and bid = 'foo')"],
|
||||
cache_misses=1),
|
||||
# (1.foo, 2.foo)
|
||||
|
||||
Case2(
|
||||
doc='out of order columns list, use cache',
|
||||
sql="select bid, eid from vtocc_cached2 where eid = 1 and bid = 'foo'",
|
||||
result=[('foo', 1)],
|
||||
rowcount=1,
|
||||
rewritten=[],
|
||||
cache_hits=1),
|
||||
# (1.foo, 2.foo)
|
||||
|
||||
Case2(
|
||||
doc='pk_in for composite pk table, two fetches from db (absent)',
|
||||
query_plan='PK_IN',
|
||||
sql='select eid, bid, name, foo from vtocc_cached2 '
|
||||
"where eid = 1 and bid in('absent1', 'absent2')",
|
||||
result=[],
|
||||
rowcount=0,
|
||||
rewritten=[
|
||||
'select eid, bid, name, foo from vtocc_cached2 where 1 != 1',
|
||||
'select eid, bid, name, foo from vtocc_cached2 '
|
||||
"where (eid = 1 and bid = 'absent1') "
|
||||
"or (eid = 1 and bid = 'absent2')"],
|
||||
cache_hits=0,
|
||||
cache_misses=0,
|
||||
cache_absent=2,
|
||||
cache_invalidations=0),
|
||||
# (1.foo, 1.bar, 2.foo)
|
||||
|
||||
Case2(
|
||||
doc='pk_in for composite pk table, 1 fetch from db',
|
||||
query_plan='PK_IN',
|
||||
sql='select eid, bid, name, foo from vtocc_cached2 '
|
||||
"where eid = 1 and bid in('foo', 'bar')",
|
||||
result=[(1L, 'foo', 'abcd1', 'efgh'), (1L, 'bar', 'abcd1', 'efgh')],
|
||||
rowcount=2,
|
||||
rewritten=[
|
||||
'select eid, bid, name, foo from vtocc_cached2 where 1 != 1',
|
||||
'select eid, bid, name, foo from vtocc_cached2 '
|
||||
"where (eid = 1 and bid = 'bar')"],
|
||||
cache_hits=1,
|
||||
cache_misses=1,
|
||||
cache_absent=0,
|
||||
cache_invalidations=0),
|
||||
# (1.foo, 1.bar, 2.foo)
|
||||
|
||||
Case2(
|
||||
doc='pk_in for composite pk table, 0 fetch from db',
|
||||
query_plan='PK_IN',
|
||||
sql='select eid, bid, name, foo from vtocc_cached2 '
|
||||
"where eid = 1 and bid in('foo', 'bar')",
|
||||
result=[(1L, 'foo', 'abcd1', 'efgh'), (1L, 'bar', 'abcd1', 'efgh')],
|
||||
rowcount=2,
|
||||
rewritten=[],
|
||||
cache_hits=2,
|
||||
cache_misses=0,
|
||||
cache_absent=0,
|
||||
cache_invalidations=0),
|
||||
# (1.foo, 1.bar, 2.foo)
|
||||
|
||||
Case2(
|
||||
doc='select_subquery for composite pk table, 1 fetch from db',
|
||||
query_plan='SELECT_SUBQUERY',
|
||||
sql='select eid, bid, name, foo from vtocc_cached2 '
|
||||
"where eid = 2 and name='abcd2'",
|
||||
result=[(2L, 'foo', 'abcd2', 'efgh'), (2L, 'bar', 'abcd2', 'efgh')],
|
||||
rowcount=2,
|
||||
rewritten=[
|
||||
'select eid, bid, name, foo from vtocc_cached2 where 1 != 1',
|
||||
'select eid, bid from vtocc_cached2 use index (aname2) '
|
||||
"where eid = 2 and name = 'abcd2' limit 10001",
|
||||
'select eid, bid, name, foo from vtocc_cached2 '
|
||||
"where (eid = 2 and bid = 'bar')"],
|
||||
cache_hits=1,
|
||||
cache_misses=1,
|
||||
cache_absent=0,
|
||||
cache_invalidations=0),
|
||||
# (1.foo, 1.bar, 2.foo, 2.bar)
|
||||
|
||||
Case2(
|
||||
doc='verify 1.bar is in cache',
|
||||
sql="select bid, eid from vtocc_cached2 where eid = 1 and bid = 'bar'",
|
||||
result=[('bar', 1)],
|
||||
rowcount=1,
|
||||
rewritten=[
|
||||
'select bid, eid from vtocc_cached2 where 1 != 1'],
|
||||
cache_hits=1),
|
||||
# (1.foo, 1.bar, 2.foo, 2.bar)
|
||||
|
||||
MultiCase(
|
||||
'update',
|
||||
['begin',
|
||||
"update vtocc_cached2 set foo='fghi' where bid = 'bar'",
|
||||
Case2(
|
||||
sql='commit', cache_invalidations=2),
|
||||
Case2(
|
||||
sql="select * from vtocc_cached2 where eid = 1 and bid = 'bar'",
|
||||
result=[(1L, 'bar', 'abcd1', 'fghi')],
|
||||
rowcount=1,
|
||||
rewritten=[
|
||||
'select * from vtocc_cached2 where 1 != 1',
|
||||
'select eid, bid, name, foo from vtocc_cached2 '
|
||||
"where (eid = 1 and bid = 'bar')"],
|
||||
cache_misses=1)]),
|
||||
# (1.foo, 1.bar, 2.foo, 2.bar)
|
||||
|
||||
MultiCase(
|
||||
"this will not invalidate the cache",
|
||||
['begin',
|
||||
"update vtocc_cached2 set foo='fghi' where bid = 'bar'",
|
||||
'rollback',
|
||||
Case2(sql="select * from vtocc_cached2 where eid = 1 and bid = 'bar'",
|
||||
result=[(1L, 'bar', 'abcd1', 'fghi')],
|
||||
rowcount=1,
|
||||
rewritten=[],
|
||||
cache_hits=1)]),
|
||||
# (1.foo, 1.bar, 2.foo, 2.bar)
|
||||
|
||||
MultiCase(
|
||||
'upsert should invalidate rowcache',
|
||||
[Case2(sql="select * from vtocc_cached2 where eid = 1 and bid = 'bar'",
|
||||
result=[(1L, 'bar', 'abcd1', 'fghi')],
|
||||
rowcount=1,
|
||||
rewritten=[],
|
||||
cache_hits=1),
|
||||
'begin',
|
||||
Case2(
|
||||
sql="insert into vtocc_cached2 values(1, 'bar', 'abcd1', 'fghi') "
|
||||
"on duplicate key update foo='fghi'",
|
||||
rowcount=0,
|
||||
rewritten=[
|
||||
"insert into vtocc_cached2 values (1, 'bar', 'abcd1', 'fghi') "
|
||||
"/* _stream vtocc_cached2 (eid bid ) (1 'YmFy' )",
|
||||
"update vtocc_cached2 set foo = 'fghi' "
|
||||
"where (eid = 1 and bid = 'bar') "
|
||||
"/* _stream vtocc_cached2 (eid bid ) (1 'YmFy' )"]),
|
||||
'commit',
|
||||
Case2(
|
||||
sql="select * from vtocc_cached2 where eid = 1 and bid = 'bar'",
|
||||
result=[(1L, 'bar', 'abcd1', 'fghi')],
|
||||
rowcount=1,
|
||||
rewritten=['select eid, bid, name, foo from vtocc_cached2 '
|
||||
"where (eid = 1 and bid = 'bar')"],
|
||||
cache_misses=1)]),
|
||||
# (1.foo, 1.bar, 2.foo, 2.bar)
|
||||
|
||||
MultiCase(
|
||||
'delete',
|
||||
['begin',
|
||||
"delete from vtocc_cached2 where eid = 1 and bid = 'bar'",
|
||||
Case2(
|
||||
sql='commit',
|
||||
cache_invalidations=1),
|
||||
Case2(
|
||||
sql="select * from vtocc_cached2 where eid = 1 and bid = 'bar'",
|
||||
result=[],
|
||||
rowcount=0,
|
||||
rewritten='select eid, bid, name, foo from vtocc_cached2 '
|
||||
"where (eid = 1 and bid = 'bar')",
|
||||
cache_absent=1),
|
||||
'begin',
|
||||
'insert into vtocc_cached2(eid, bid, name, foo) '
|
||||
"values (1, 'bar', 'abcd1', 'efgh')",
|
||||
Case2(sql='commit', cache_invalidations=0)]),
|
||||
# (1.foo, 2.foo, 2.bar)
|
||||
|
||||
Case2(doc='Verify 1.foo is in cache',
|
||||
sql="select * from vtocc_cached2 where eid = 1 and bid = 'foo'",
|
||||
result=[(1, 'foo', 'abcd1', 'efgh')],
|
||||
rowcount=1,
|
||||
rewritten=['select * from vtocc_cached2 where 1 != 1'],
|
||||
cache_hits=1),
|
||||
# (1.foo, 2.foo, 2.bar)
|
||||
|
||||
# DDL
|
||||
"alter table vtocc_cached2 comment 'test'",
|
||||
|
||||
Case2(
|
||||
doc='Verify cache is empty after DDL',
|
||||
sql="select * from vtocc_cached2 where eid = 1 and bid = 'foo'",
|
||||
result=[(1, 'foo', 'abcd1', 'efgh')],
|
||||
rowcount=1,
|
||||
rewritten=[
|
||||
'select * from vtocc_cached2 where 1 != 1',
|
||||
'select eid, bid, name, foo from vtocc_cached2 '
|
||||
"where (eid = 1 and bid = 'foo')"],
|
||||
cache_misses=1),
|
||||
|
||||
# (1.foo)
|
||||
Case2(
|
||||
doc='Verify row is cached',
|
||||
sql="select * from vtocc_cached2 where eid = 1 and bid = 'foo'",
|
||||
result=[(1, 'foo', 'abcd1', 'efgh')],
|
||||
rowcount=1,
|
||||
rewritten=[],
|
||||
cache_hits=1),
|
||||
# (1.foo)
|
||||
]
|
|
@ -1,89 +0,0 @@
|
|||
from vtdb import dbexceptions
|
||||
from vtdb import field_types
|
||||
|
||||
import framework
|
||||
import cache_cases1
|
||||
import cache_cases2
|
||||
|
||||
import cases_framework
|
||||
|
||||
|
||||
class TestCache(framework.TestCase):
|
||||
def test_overrides(self):
|
||||
tstart = self.env.table_stats()["vtocc_view"]
|
||||
self.env.querylog.reset()
|
||||
cu = self.env.execute("select * from vtocc_view where key2 = 1")
|
||||
self.assertEqual(cu.fetchone(), (1L, 10L, 1L, 3L))
|
||||
tend = self.env.table_stats()["vtocc_view"]
|
||||
self.assertEqual(tstart["Misses"]+1, tend["Misses"])
|
||||
log = self.env.querylog.tailer.read()
|
||||
|
||||
self.assertContains(log, "select * from vtocc_view where 1 != 1")
|
||||
self.assertContains(
|
||||
log,
|
||||
"select key2, key1, data1, data2 from vtocc_view where key2 in (1)")
|
||||
|
||||
tstart = self.env.table_stats()["vtocc_view"]
|
||||
cu = self.env.execute("select * from vtocc_view where key2 = 1")
|
||||
self.assertEqual(cu.fetchone(), (1L, 10L, 1L, 3L))
|
||||
tend = self.env.table_stats()["vtocc_view"]
|
||||
self.assertEqual(tstart["Hits"]+1, tend["Hits"])
|
||||
|
||||
tstart = self.env.table_stats()["vtocc_view"]
|
||||
self.env.conn.begin()
|
||||
self.env.querylog.reset()
|
||||
self.env.execute("update vtocc_part1 set data1 = 2 where key2 = 1")
|
||||
log = self.env.querylog.tailer.read()
|
||||
self.env.conn.commit()
|
||||
self.assertContains(
|
||||
log,
|
||||
"update vtocc_part1 set data1 = 2 where key2 in (1) "
|
||||
"/* _stream vtocc_part1 (key2 ) (1 ); */")
|
||||
|
||||
|
||||
self.env.querylog.reset()
|
||||
cu = self.env.execute("select * from vtocc_view where key2 = 1")
|
||||
self.assertEqual(cu.fetchone(), (1L, 10L, 2L, 3L))
|
||||
tend = self.env.table_stats()["vtocc_view"]
|
||||
self.assertEqual(tstart["Misses"]+1, tend["Misses"])
|
||||
log = self.env.querylog.tailer.read()
|
||||
self.assertContains(
|
||||
log,
|
||||
"select key2, key1, data1, data2 from vtocc_view where key2 in (1)")
|
||||
|
||||
tstart = self.env.table_stats()["vtocc_view"]
|
||||
cu = self.env.execute("select * from vtocc_view where key2 = 1")
|
||||
self.assertEqual(cu.fetchone(), (1L, 10L, 2L, 3L))
|
||||
tend = self.env.table_stats()["vtocc_view"]
|
||||
self.assertEqual(tstart["Hits"]+1, tend["Hits"])
|
||||
|
||||
tstart = self.env.table_stats()["vtocc_view"]
|
||||
self.env.conn.begin()
|
||||
self.env.execute("update vtocc_part2 set data2 = 2 where key3 = 1")
|
||||
self.env.conn.commit()
|
||||
|
||||
self.env.querylog.reset()
|
||||
cu = self.env.execute("select * from vtocc_view where key2 = 1")
|
||||
self.assertEqual(cu.fetchone(), (1L, 10L, 2L, 2L))
|
||||
tend = self.env.table_stats()["vtocc_view"]
|
||||
self.assertEqual(tstart["Misses"]+1, tend["Misses"])
|
||||
log = self.env.querylog.tailer.read()
|
||||
self.assertContains(
|
||||
log,
|
||||
"select key2, key1, data1, data2 from vtocc_view where key2 in (1)")
|
||||
|
||||
tstart = self.env.table_stats()["vtocc_view"]
|
||||
cu = self.env.execute("select * from vtocc_view where key2 = 1")
|
||||
self.assertEqual(cu.fetchone(), (1L, 10L, 2L, 2L))
|
||||
tend = self.env.table_stats()["vtocc_view"]
|
||||
self.assertEqual(tstart["Hits"]+1, tend["Hits"])
|
||||
|
||||
def test_cache1_sqls(self):
|
||||
error_count = self.env.run_cases(cache_cases1.cases)
|
||||
if error_count != 0:
|
||||
self.fail("test_cache1_sqls errors: %d" % error_count)
|
||||
|
||||
def test_cache2_sqls(self):
|
||||
error_count = self.env.run_cases(cache_cases2.cases)
|
||||
if error_count != 0:
|
||||
self.fail("test_cache2_sqls errors: %d" % error_count)
|
|
@ -1,250 +0,0 @@
|
|||
import ast
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import urllib2
|
||||
|
||||
import environment
|
||||
import framework
|
||||
import utils
|
||||
|
||||
|
||||
def cases_iterator(cases):
|
||||
for case in cases:
|
||||
if isinstance(case, MultiCase):
|
||||
for c in case:
|
||||
yield c
|
||||
else:
|
||||
yield case
|
||||
|
||||
|
||||
class Log(object):
|
||||
def __init__(self, line):
|
||||
self.line = line
|
||||
try:
|
||||
(self.method,
|
||||
self.remote_address,
|
||||
self.username,
|
||||
self.start_time,
|
||||
self.end_time,
|
||||
self.total_time,
|
||||
self.plan_type,
|
||||
self.original_sql,
|
||||
self.bind_variables,
|
||||
self.number_of_queries,
|
||||
self.rewritten_sql,
|
||||
self.query_sources,
|
||||
self.mysql_response_time,
|
||||
self.waiting_for_connection_time,
|
||||
self.rowcount,
|
||||
self.size_of_response,
|
||||
self.cache_hits,
|
||||
self.cache_misses,
|
||||
self.cache_absent,
|
||||
self.cache_invalidations,
|
||||
self.error) = line.strip().split('\t')
|
||||
except ValueError:
|
||||
print 'Wrong looking line: %r' % line
|
||||
raise
|
||||
|
||||
def check(self, case):
|
||||
|
||||
if isinstance(case, basestring):
|
||||
return []
|
||||
|
||||
if isinstance(case, MultiCase):
|
||||
return sum((self.check(subcase) for subcase in case.sqls_and_cases), [])
|
||||
|
||||
failures = []
|
||||
|
||||
for method in dir(self):
|
||||
if method.startswith('check_'):
|
||||
if not case.is_testing_cache and method.startswith('check_cache_'):
|
||||
continue
|
||||
fail = getattr(self, method)(case)
|
||||
if fail:
|
||||
failures.append(fail)
|
||||
return failures
|
||||
|
||||
def fail(self, reason, should, is_):
|
||||
return 'FAIL: %s: %r != %r' % (reason, should, is_)
|
||||
|
||||
def check_original_sql(self, case):
|
||||
# The following is necessary because Python and Go use different
|
||||
# notations for bindings: %(foo)s vs :foo.
|
||||
sql = re.sub(r'%\((\w+)\)s', r':\1', case.sql)
|
||||
# Eval is a cheap hack - Go always uses doublequotes, Python
|
||||
# prefers single quotes.
|
||||
if sql != eval(self.original_sql):
|
||||
return self.fail('wrong sql', case.sql, self.original_sql)
|
||||
|
||||
def check_rowcount(self, case):
|
||||
if case.rowcount is not None and int(self.rowcount) != case.rowcount:
|
||||
return self.fail('Bad rowcount', case.rowcount, self.rowcount)
|
||||
|
||||
def check_cache_hits(self, case):
|
||||
if case.cache_hits is not None and int(self.cache_hits) != case.cache_hits:
|
||||
return self.fail('Bad Cache Hits', case.cache_hits, self.cache_hits)
|
||||
|
||||
def check_cache_absent(self, case):
|
||||
if (case.cache_absent is not None and
|
||||
int(self.cache_absent) != case.cache_absent):
|
||||
return self.fail('Bad Cache Absent', case.cache_absent, self.cache_absent)
|
||||
|
||||
def check_cache_misses(self, case):
|
||||
if (case.cache_misses is not None and
|
||||
int(self.cache_misses) != case.cache_misses):
|
||||
return self.fail(
|
||||
'Bad Cache Misses', case.cache_misses, self.cache_misses)
|
||||
|
||||
def check_cache_invalidations(self, case):
|
||||
if (case.cache_invalidations is not None and
|
||||
int(self.cache_invalidations) != case.cache_invalidations):
|
||||
return self.fail(
|
||||
'Bad Cache Invalidations', case.cache_invalidations,
|
||||
self.cache_invalidations)
|
||||
|
||||
def check_query_plan(self, case):
|
||||
if case.query_plan is not None and case.query_plan != self.plan_type:
|
||||
return self.fail('Bad query plan', case.query_plan, self.plan_type)
|
||||
|
||||
def check_rewritten_sql(self, case):
|
||||
if case.rewritten is None:
|
||||
return
|
||||
queries = []
|
||||
for q in ast.literal_eval(self.rewritten_sql).split(';'):
|
||||
q = q.strip()
|
||||
if q and q != '*/':
|
||||
queries.append(q)
|
||||
if case.rewritten != queries:
|
||||
return self.fail('Bad rewritten SQL', case.rewritten, queries)
|
||||
|
||||
def check_number_of_queries(self, case):
|
||||
if (case.rewritten is not None and
|
||||
int(self.number_of_queries) != len(case.rewritten)):
|
||||
return self.fail(
|
||||
'wrong number of queries', len(case.rewritten),
|
||||
int(self.number_of_queries))
|
||||
|
||||
|
||||
class Case(object):
|
||||
|
||||
def __init__(
|
||||
self, sql, bindings=None, result=None, rewritten=None, doc='',
|
||||
rowcount=None, cache_table=None, query_plan=None, cache_hits=None,
|
||||
cache_misses=None, cache_absent=None, cache_invalidations=None,
|
||||
remote_address='[::1]'):
|
||||
# For all cache_* parameters, a number n means "check this value
|
||||
# is exactly n," while None means "I am not interested in this
|
||||
# value, leave it alone."
|
||||
self.sql = sql
|
||||
self.bindings = bindings or {}
|
||||
self.result = result
|
||||
if isinstance(rewritten, basestring):
|
||||
rewritten = [rewritten]
|
||||
self.rewritten = rewritten
|
||||
self.rowcount = rowcount
|
||||
self.doc = doc
|
||||
self.query_plan = query_plan
|
||||
self.cache_table = cache_table
|
||||
self.cache_hits = cache_hits
|
||||
self.cache_misses = cache_misses
|
||||
self.cache_absent = cache_absent
|
||||
self.cache_invalidations = cache_invalidations
|
||||
self.remote_address = remote_address
|
||||
|
||||
@property
|
||||
def is_testing_cache(self):
|
||||
return any(attr is not None for attr in [self.cache_hits,
|
||||
self.cache_misses,
|
||||
self.cache_absent,
|
||||
self.cache_invalidations])
|
||||
|
||||
def run(self, cursor, env):
|
||||
failures = []
|
||||
env.querylog.reset()
|
||||
if self.is_testing_cache:
|
||||
tstart = self.table_stats(env)
|
||||
if self.sql in ('begin', 'commit', 'rollback'):
|
||||
getattr(cursor._conn, self.sql)()
|
||||
else:
|
||||
cursor.execute(self.sql, self.bindings)
|
||||
if self.result is not None:
|
||||
result = list(cursor)
|
||||
if self.result != result:
|
||||
failures.append('%r:\n%s !=\n%s' % (self.sql, self.result, result))
|
||||
for i in range(30):
|
||||
lines = env.querylog.tailer.readLines()
|
||||
if not lines:
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
break
|
||||
for line in lines:
|
||||
case_failures = Log(line).check(self)
|
||||
if case_failures:
|
||||
failures.extend(case_failures)
|
||||
|
||||
if self.is_testing_cache:
|
||||
tdelta = self.table_stats_delta(tstart, env)
|
||||
if self.cache_hits is not None and tdelta['Hits'] != self.cache_hits:
|
||||
failures.append(
|
||||
'Bad Cache Hits: %s != %s' % (self.cache_hits, tdelta['Hits']))
|
||||
|
||||
if (self.cache_absent is not None and
|
||||
tdelta['Absent'] != self.cache_absent):
|
||||
failures.append(
|
||||
'Bad Cache Absent: %s != %s' %
|
||||
(self.cache_absent, tdelta['Absent']))
|
||||
|
||||
if (self.cache_misses is not None and
|
||||
tdelta['Misses'] != self.cache_misses):
|
||||
failures.append(
|
||||
'Bad Cache Misses: %s != %s' %
|
||||
(self.cache_misses, tdelta['Misses']))
|
||||
|
||||
if (self.cache_invalidations is not None and
|
||||
tdelta['Invalidations'] != self.cache_invalidations):
|
||||
failures.append(
|
||||
'Bad Cache Invalidations: %s != %s' %
|
||||
(self.cache_invalidations, tdelta['Invalidations']))
|
||||
|
||||
return failures
|
||||
|
||||
def table_stats_delta(self, old, env):
|
||||
result = {}
|
||||
new = self.table_stats(env)
|
||||
for k, v in new.items():
|
||||
result[k] = new[k] - old[k]
|
||||
return result
|
||||
|
||||
def table_stats(self, env):
|
||||
return env.http_get('/debug/table_stats')[self.cache_table]
|
||||
|
||||
def __str__(self):
|
||||
return 'Case %r' % self.doc
|
||||
|
||||
|
||||
class MultiCase(object):
|
||||
|
||||
def __init__(self, doc, sqls_and_cases):
|
||||
self.doc = doc
|
||||
self.sqls_and_cases = sqls_and_cases
|
||||
|
||||
def run(self, cursor, env):
|
||||
failures = []
|
||||
for case in self.sqls_and_cases:
|
||||
if isinstance(case, basestring):
|
||||
if case in ('begin', 'commit', 'rollback'):
|
||||
getattr(cursor._conn, case)()
|
||||
else:
|
||||
cursor.execute(case)
|
||||
continue
|
||||
failures += case.run(cursor, env)
|
||||
return failures
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.sqls_and_cases)
|
||||
|
||||
def __str__(self):
|
||||
return 'MultiCase: %s' % self.doc
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,9 +0,0 @@
|
|||
import framework
|
||||
import nocache_cases
|
||||
|
||||
|
||||
class TestNocache(framework.TestCase):
|
||||
def test_sqls(self):
|
||||
error_count = self.env.run_cases(nocache_cases.cases)
|
||||
if error_count != 0:
|
||||
self.fail("test_execution errors: %d"%(error_count))
|
|
@ -1,9 +0,0 @@
|
|||
import framework
|
||||
import utils
|
||||
|
||||
|
||||
class TestStatus(framework.TestCase):
|
||||
|
||||
def test_status(self):
|
||||
port = self.env.port
|
||||
self.assertIn('</html>', utils.get_status(port))
|
|
@ -1,183 +0,0 @@
|
|||
import json
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
import urllib
|
||||
|
||||
from vtdb import cursor
|
||||
from vtdb import dbexceptions
|
||||
|
||||
import environment
|
||||
import framework
|
||||
|
||||
|
||||
class TestStream(framework.TestCase):
|
||||
|
||||
def tearDown(self):
|
||||
self.env.conn.begin()
|
||||
self.env.execute('delete from vtocc_big')
|
||||
self.env.conn.commit()
|
||||
|
||||
# UNION queries like this used to crash vtocc, only straight SELECT
|
||||
# would go through. This is a unit test to show it is fixed.
|
||||
def test_union(self):
|
||||
cu = self.env.execute('select 1 from dual union select 1 from dual',
|
||||
cursorclass=cursor.StreamCursor)
|
||||
count = 0
|
||||
while True:
|
||||
row = cu.fetchone()
|
||||
if row is None:
|
||||
break
|
||||
count += 1
|
||||
self.assertEqual(count, 1)
|
||||
|
||||
def test_customrules(self):
|
||||
bv = {'asdfg': 1}
|
||||
try:
|
||||
self.env.execute('select * from vtocc_test where intval=:asdfg', bv,
|
||||
cursorclass=cursor.StreamCursor)
|
||||
self.fail('Bindvar asdfg should not be allowed by custom rule')
|
||||
except dbexceptions.DatabaseError as e:
|
||||
self.assertContains(str(e), 'error: Query disallowed')
|
||||
if environment.topo_server().flavor() == 'zookeeper':
|
||||
# Make a change to the rule
|
||||
self.env.change_customrules()
|
||||
time.sleep(3)
|
||||
try:
|
||||
self.env.execute(
|
||||
'select * from vtocc_test where intval=:asdfg', bv,
|
||||
cursorclass=cursor.StreamCursor)
|
||||
except dbexceptions.DatabaseError as e:
|
||||
self.fail(
|
||||
'Bindvar asdfg should be allowed after a change of custom rule, '
|
||||
'Err=' + str(e))
|
||||
self.env.restore_customrules()
|
||||
time.sleep(3)
|
||||
try:
|
||||
self.env.execute(
|
||||
'select * from vtocc_test where intval=:asdfg', bv,
|
||||
cursorclass=cursor.StreamCursor)
|
||||
self.fail('Bindvar asdfg should not be allowed by custom rule')
|
||||
except dbexceptions.DatabaseError as e:
|
||||
self.assertContains(str(e), 'error: Query disallowed')
|
||||
|
||||
def test_basic_stream(self):
|
||||
self._populate_vtocc_big_table(100)
|
||||
loop_count = 1
|
||||
|
||||
# select lots of data using a non-streaming query
|
||||
if True:
|
||||
for _ in xrange(loop_count):
|
||||
cu = self.env.execute('select * from vtocc_big b1, vtocc_big b2')
|
||||
rows = cu.fetchall()
|
||||
self.assertEqual(len(rows), 10000)
|
||||
self.check_row_10(rows[10])
|
||||
|
||||
# select lots of data using a streaming query
|
||||
if True:
|
||||
for _ in xrange(loop_count):
|
||||
cu = cursor.StreamCursor(self.env.conn)
|
||||
cu.execute('select * from vtocc_big b1, vtocc_big b2', {})
|
||||
count = 0
|
||||
while True:
|
||||
row = cu.fetchone()
|
||||
if row is None:
|
||||
break
|
||||
if count == 10:
|
||||
self.check_row_10(row)
|
||||
count += 1
|
||||
self.assertEqual(count, 10000)
|
||||
|
||||
def test_streaming_error(self):
|
||||
with self.assertRaises(dbexceptions.DatabaseError):
|
||||
self.env.execute('select count(abcd) from vtocc_big b1',
|
||||
cursorclass=cursor.StreamCursor)
|
||||
|
||||
def check_row_10(self, row):
|
||||
# null the dates so they match
|
||||
row = list(row)
|
||||
row[6] = None
|
||||
row[11] = None
|
||||
row[20] = None
|
||||
row[25] = None
|
||||
self.assertEqual(
|
||||
row,
|
||||
[10L, 'AAAAAAAAAAAAAAAAAA 10', 'BBBBBBBBBBBBBBBBBB 10', 'C',
|
||||
'DDDDDDDDDDDDDDDDDD 10', 'EEEEEEEEEEEEEEEEEE 10', None, 'FF 10',
|
||||
'GGGGGGGGGGGGGGGGGG 10', 10L, 10L, None, 10L, 10, 0L,
|
||||
'AAAAAAAAAAAAAAAAAA 0', 'BBBBBBBBBBBBBBBBBB 0', 'C',
|
||||
'DDDDDDDDDDDDDDDDDD 0', 'EEEEEEEEEEEEEEEEEE 0', None,
|
||||
'FF 0', 'GGGGGGGGGGGGGGGGGG 0', 0L, 0L, None, 0L, 0])
|
||||
|
||||
def test_streaming_terminate(self):
|
||||
try:
|
||||
self._populate_vtocc_big_table(100)
|
||||
query = 'select * from vtocc_big b1, vtocc_big b2, vtocc_big b3'
|
||||
cu = cursor.StreamCursor(self.env.conn)
|
||||
thd = threading.Thread(target=self._stream_exec, args=(cu, query))
|
||||
thd.start()
|
||||
tablet_addr = 'http://' + self.env.conn.addr
|
||||
connId = self._get_conn_id(tablet_addr)
|
||||
self._terminate_query(tablet_addr, connId)
|
||||
thd.join()
|
||||
with self.assertRaises(dbexceptions.DatabaseError) as cm:
|
||||
cu.fetchall()
|
||||
errMsg1 = (
|
||||
'error: the query was killed either because it timed out or was '
|
||||
'canceled: Lost connection to MySQL server during query (errno 2013)')
|
||||
errMsg2 = 'error: Query execution was interrupted (errno 1317)'
|
||||
self.assertNotIn(
|
||||
cm.exception, (errMsg1, errMsg2),
|
||||
'did not raise interruption error: %s' % str(cm.exception))
|
||||
cu.close()
|
||||
except Exception, e:
|
||||
self.fail('Failed with error %s %s' % (str(e), traceback.format_exc()))
|
||||
|
||||
def _populate_vtocc_big_table(self, num_rows):
|
||||
self.env.conn.begin()
|
||||
for i in xrange(num_rows):
|
||||
self.env.execute(
|
||||
'insert into vtocc_big values '
|
||||
'(' + str(i) + ', '
|
||||
"'AAAAAAAAAAAAAAAAAA " + str(i) + "', "
|
||||
"'BBBBBBBBBBBBBBBBBB " + str(i) + "', "
|
||||
"'C', "
|
||||
"'DDDDDDDDDDDDDDDDDD " + str(i) + "', "
|
||||
"'EEEEEEEEEEEEEEEEEE " + str(i) + "', "
|
||||
'now(),'
|
||||
"'FF " + str(i) + "', "
|
||||
"'GGGGGGGGGGGGGGGGGG " + str(i) + "', " +
|
||||
str(i) + ', ' +
|
||||
str(i) + ', '
|
||||
'now(),' +
|
||||
str(i) + ', ' +
|
||||
str(i%100) + ')')
|
||||
self.env.conn.commit()
|
||||
|
||||
# Initiate a slow stream query
|
||||
def _stream_exec(self, cu, query):
|
||||
cu.execute(query, {})
|
||||
|
||||
# Get the connection id from status page
|
||||
def _get_conn_id(self, tablet_addr):
|
||||
streamqueryz_url = tablet_addr + '/streamqueryz?format=json'
|
||||
retries = 3
|
||||
streaming_queries = []
|
||||
while not streaming_queries:
|
||||
content = urllib.urlopen(streamqueryz_url).read()
|
||||
streaming_queries = json.loads(content)
|
||||
retries -= 1
|
||||
if retries == 0:
|
||||
self.fail(
|
||||
'unable to fetch streaming queries from %s' % streamqueryz_url)
|
||||
else:
|
||||
time.sleep(1)
|
||||
connId = streaming_queries[0]['ConnID']
|
||||
return connId
|
||||
|
||||
# Terminate the query via streamqueryz admin page
|
||||
def _terminate_query(self, tablet_addr, connId):
|
||||
terminate_url = (
|
||||
tablet_addr + '/streamqueryz/terminate?format=json&connID=' +
|
||||
str(connId))
|
||||
urllib.urlopen(terminate_url).read()
|
|
@ -1,297 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2012, Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can
|
||||
# be found in the LICENSE file.
|
||||
|
||||
import contextlib
|
||||
import json
|
||||
import MySQLdb as mysql
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
import urllib2
|
||||
import uuid
|
||||
|
||||
from vtdb import cursor
|
||||
from vtdb import dbexceptions
|
||||
from vtdb import tablet as tablet_conn
|
||||
|
||||
import cases_framework
|
||||
import environment
|
||||
import framework
|
||||
import tablet
|
||||
import utils
|
||||
|
||||
|
||||
class EnvironmentError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class TestEnv(object):
|
||||
memcache = False
|
||||
port = 0
|
||||
querylog = None
|
||||
|
||||
txlog_file = os.path.join(environment.vtlogroot, 'txlog')
|
||||
|
||||
tablet = tablet.Tablet(62344)
|
||||
vttop = environment.vttop
|
||||
vtroot = environment.vtroot
|
||||
|
||||
@property
|
||||
def port(self):
|
||||
return self.tablet.port
|
||||
|
||||
@property
|
||||
def address(self):
|
||||
return 'localhost:%s' % self.port
|
||||
|
||||
def connect(self):
|
||||
c = tablet_conn.connect(
|
||||
self.address, '', 'test_keyspace', '0', 2, caller_id='dev')
|
||||
c.max_attempts = 1
|
||||
return c
|
||||
|
||||
def execute(self, query, binds=None, cursorclass=None):
|
||||
if binds is None:
|
||||
binds = {}
|
||||
curs = cursor.TabletCursor(self.conn)
|
||||
try:
|
||||
curs.execute(query, binds)
|
||||
except dbexceptions.OperationalError:
|
||||
self.conn = self.connect()
|
||||
raise
|
||||
return curs
|
||||
|
||||
def url(self, path):
|
||||
return 'http://localhost:%s/' % (self.port) + path
|
||||
|
||||
def http_get(self, path, use_json=True):
|
||||
data = urllib2.urlopen(self.url(path)).read()
|
||||
if use_json:
|
||||
return json.loads(data)
|
||||
return data
|
||||
|
||||
def debug_vars(self):
|
||||
return framework.MultiDict(self.http_get('/debug/vars'))
|
||||
|
||||
def table_stats(self):
|
||||
return framework.MultiDict(self.http_get('/debug/table_stats'))
|
||||
|
||||
def query_stats(self):
|
||||
return self.http_get('/debug/query_stats')
|
||||
|
||||
def health(self):
|
||||
return self.http_get('/debug/health', use_json=False)
|
||||
|
||||
def check_full_streamlog(self, fi):
|
||||
# FIXME(szopa): better test?
|
||||
for line in fi:
|
||||
if '"name":"bytes 4"' in line:
|
||||
print "FAIL: full streamlog doesn't contain all bind variables."
|
||||
return 1
|
||||
return 0
|
||||
|
||||
def create_customrules(self, filename):
|
||||
with open(filename, 'w') as f:
|
||||
f.write("""[{
|
||||
"Name": "r1",
|
||||
"Description": "disallow bindvar 'asdfg'",
|
||||
"BindVarConds":[{
|
||||
"Name": "asdfg",
|
||||
"OnAbsent": false,
|
||||
"Operator": "NOOP"
|
||||
}]
|
||||
}]""")
|
||||
if environment.topo_server().flavor() == 'zookeeper':
|
||||
utils.run(
|
||||
environment.binary_argstr('zk') +
|
||||
' touch -p /zk/test_ca/config/customrules/testrules')
|
||||
utils.run(
|
||||
environment.binary_argstr('zk') + ' cp ' + filename +
|
||||
' /zk/test_ca/config/customrules/testrules')
|
||||
|
||||
def change_customrules(self):
|
||||
customrules = os.path.join(environment.tmproot, 'customrules.json')
|
||||
with open(customrules, 'w') as f:
|
||||
f.write("""[{
|
||||
"Name": "r2",
|
||||
"Description": "disallow bindvar 'gfdsa'",
|
||||
"BindVarConds":[{
|
||||
"Name": "gfdsa",
|
||||
"OnAbsent": false,
|
||||
"Operator": "NOOP"
|
||||
}]
|
||||
}]""")
|
||||
if environment.topo_server().flavor() == 'zookeeper':
|
||||
utils.run(
|
||||
environment.binary_argstr('zk') + ' cp ' + customrules +
|
||||
' /zk/test_ca/config/customrules/testrules')
|
||||
|
||||
def restore_customrules(self):
|
||||
customrules = os.path.join(environment.tmproot, 'customrules.json')
|
||||
self.create_customrules(customrules)
|
||||
if environment.topo_server().flavor() == 'zookeeper':
|
||||
utils.run(
|
||||
environment.binary_argstr('zk') + ' cp ' + customrules +
|
||||
' /zk/test_ca/config/customrules/testrules')
|
||||
|
||||
def create_schema_override(self, filename):
|
||||
with open(filename, 'w') as f:
|
||||
f.write("""[{
|
||||
"Name": "vtocc_view",
|
||||
"PKColumns": ["key2"],
|
||||
"Cache": {
|
||||
"Type": "RW"
|
||||
}
|
||||
}, {
|
||||
"Name": "vtocc_part1",
|
||||
"PKColumns": ["key2"],
|
||||
"Cache": {
|
||||
"Type": "W",
|
||||
"Table": "vtocc_view"
|
||||
}
|
||||
}, {
|
||||
"Name": "vtocc_part2",
|
||||
"PKColumns": ["key3"],
|
||||
"Cache": {
|
||||
"Type": "W",
|
||||
"Table": "vtocc_view"
|
||||
}
|
||||
}]""")
|
||||
|
||||
def run_cases(self, cases):
|
||||
curs = cursor.TabletCursor(self.conn)
|
||||
error_count = 0
|
||||
|
||||
for case in cases:
|
||||
if isinstance(case, basestring):
|
||||
curs.execute(case)
|
||||
continue
|
||||
try:
|
||||
failures = case.run(curs, self)
|
||||
except Exception:
|
||||
print 'Exception in', case
|
||||
raise
|
||||
error_count += len(failures)
|
||||
for fail in failures:
|
||||
print 'FAIL:', case, fail
|
||||
error_count += self.check_full_streamlog(open(self.querylog.path_full, 'r'))
|
||||
return error_count
|
||||
|
||||
def setUp(self):
|
||||
utils.wait_procs([self.tablet.init_mysql()])
|
||||
self.tablet.mquery(
|
||||
'', ['create database vt_test_keyspace', 'set global read_only = off'])
|
||||
|
||||
self.mysql_conn, mcu = self.tablet.connect('vt_test_keyspace')
|
||||
with open(
|
||||
os.path.join(self.vttop, 'test', 'test_data', 'test_schema.sql')) as f:
|
||||
self.clean_sqls = []
|
||||
self.init_sqls = []
|
||||
clean_mode = False
|
||||
for line in f:
|
||||
line = line.rstrip()
|
||||
if line == '# clean':
|
||||
clean_mode = True
|
||||
if line=='' or line.startswith('#'):
|
||||
continue
|
||||
if clean_mode:
|
||||
self.clean_sqls.append(line)
|
||||
else:
|
||||
self.init_sqls.append(line)
|
||||
try:
|
||||
for line in self.init_sqls:
|
||||
mcu.execute(line, {})
|
||||
finally:
|
||||
mcu.close()
|
||||
|
||||
customrules = os.path.join(environment.tmproot, 'customrules.json')
|
||||
schema_override = os.path.join(environment.tmproot, 'schema_override.json')
|
||||
self.create_schema_override(schema_override)
|
||||
table_acl_config = os.path.join(
|
||||
environment.vttop, 'test', 'test_data', 'table_acl_config.json')
|
||||
|
||||
environment.topo_server().setup()
|
||||
self.create_customrules(customrules);
|
||||
utils.run_vtctl('CreateKeyspace -force test_keyspace')
|
||||
self.tablet.init_tablet('master', 'test_keyspace', '0')
|
||||
if environment.topo_server().flavor() == 'zookeeper':
|
||||
self.tablet.start_vttablet(
|
||||
memcache=self.memcache,
|
||||
zkcustomrules='/zk/test_ca/config/customrules/testrules',
|
||||
schema_override=schema_override,
|
||||
table_acl_config=table_acl_config,
|
||||
)
|
||||
else:
|
||||
self.tablet.start_vttablet(
|
||||
memcache=self.memcache,
|
||||
filecustomrules=customrules,
|
||||
schema_override=schema_override,
|
||||
table_acl_config=table_acl_config,
|
||||
)
|
||||
self.conn = self.connect()
|
||||
self.txlogger = utils.curl(
|
||||
self.url('/debug/txlog'), background=True,
|
||||
stdout=open(self.txlog_file, 'w'))
|
||||
self.txlog = framework.Tailer(self.txlog_file, flush=self.tablet.flush)
|
||||
self.log = framework.Tailer(
|
||||
os.path.join(environment.vtlogroot, 'vttablet.INFO'),
|
||||
flush=self.tablet.flush)
|
||||
self.querylog = Querylog(self)
|
||||
|
||||
def tearDown(self):
|
||||
if self.querylog:
|
||||
self.querylog.close()
|
||||
self.tablet.kill_vttablet()
|
||||
try:
|
||||
mcu = self.mysql_conn.cursor()
|
||||
for line in self.clean_sqls:
|
||||
try:
|
||||
mcu.execute(line, {})
|
||||
except:
|
||||
pass
|
||||
mcu.close()
|
||||
utils.wait_procs([self.tablet.teardown_mysql()])
|
||||
except:
|
||||
# FIXME: remove
|
||||
pass
|
||||
if getattr(self, 'txlogger', None):
|
||||
self.txlogger.terminate()
|
||||
environment.topo_server().teardown()
|
||||
utils.kill_sub_processes()
|
||||
utils.remove_tmp_files()
|
||||
self.tablet.remove_tree()
|
||||
|
||||
|
||||
class Querylog(object):
|
||||
|
||||
def __init__(self, env):
|
||||
self.env = env
|
||||
self.id = str(uuid.uuid4())
|
||||
self.curl = utils.curl(
|
||||
self.env.url('/debug/querylog'), background=True,
|
||||
stdout=open(self.path, 'w'))
|
||||
self.curl_full = utils.curl(
|
||||
self.env.url('/debug/querylog?full=true'), background=True,
|
||||
stdout=open(self.path_full, 'w'))
|
||||
self.tailer = framework.Tailer(self.path, sleep=0.02)
|
||||
self.tailer_full = framework.Tailer(self.path_full, sleep=0.02)
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
return os.path.join(environment.vtlogroot, 'querylog' + self.id)
|
||||
|
||||
@property
|
||||
def path_full(self):
|
||||
return os.path.join(environment.vtlogroot, 'querylog_full' + self.id)
|
||||
|
||||
def reset(self):
|
||||
self.tailer.reset()
|
||||
self.tailer_full.reset()
|
||||
|
||||
def close(self, *args, **kwargs):
|
||||
self.curl.terminate()
|
||||
self.curl_full.terminate()
|
|
@ -1,9 +0,0 @@
|
|||
{
|
||||
"charset": "utf8",
|
||||
"dbname": "<test db name>",
|
||||
"host": "localhost",
|
||||
"pass": "<password>",
|
||||
"port": 3306,
|
||||
"uname": "<user name>",
|
||||
"unix_socket": ""
|
||||
}
|
|
@ -1,80 +0,0 @@
|
|||
{
|
||||
"table_groups": [
|
||||
{
|
||||
"name": "mysql",
|
||||
"table_names_or_prefixes": [""],
|
||||
"readers": ["dev"],
|
||||
"writers": ["dev"],
|
||||
"admins": ["dev"]
|
||||
},
|
||||
{
|
||||
"name": "vtocc_cached",
|
||||
"table_names_or_prefixes": ["vtocc_nocache", "vtocc_cached%"],
|
||||
"readers": ["dev"],
|
||||
"writers": ["dev"],
|
||||
"admins": ["dev"]
|
||||
},
|
||||
{
|
||||
"name": "vtocc_renamed",
|
||||
"table_names_or_prefixes": ["vtocc_renamed%"],
|
||||
"readers": ["dev"],
|
||||
"writers": ["dev"],
|
||||
"admins": ["dev"]
|
||||
},
|
||||
{
|
||||
"name": "vtocc_part",
|
||||
"table_names_or_prefixes": ["vtocc_part%"],
|
||||
"readers": ["dev"],
|
||||
"writers": ["dev"],
|
||||
"admins": ["dev"]
|
||||
},
|
||||
{
|
||||
"name": "vtocc",
|
||||
"table_names_or_prefixes": ["vtocc_a", "vtocc_b", "vtocc_c", "dual", "vtocc_d", "vtocc_temp", "vtocc_e", "vtocc_f", "upsert_test", "vtocc_strings", "vtocc_fracts", "vtocc_ints", "vtocc_misc", "vtocc_big", "vtocc_view"],
|
||||
"readers": ["dev"],
|
||||
"writers": ["dev"],
|
||||
"admins": ["dev"]
|
||||
},
|
||||
{
|
||||
"name": "vtocc_test",
|
||||
"table_names_or_prefixes": ["vtocc_test"],
|
||||
"readers": ["dev"],
|
||||
"writers": ["dev"],
|
||||
"admins": ["dev"]
|
||||
},
|
||||
{
|
||||
"name": "vtocc_acl_unmatched",
|
||||
"table_names_or_prefixes": ["vtocc_acl_unmatched"],
|
||||
"readers": ["dev"],
|
||||
"writers": ["dev"],
|
||||
"admins": ["dev"]
|
||||
},
|
||||
{
|
||||
"name": "vtocc_acl_no_access",
|
||||
"table_names_or_prefixes": ["vtocc_acl_no_access"]
|
||||
},
|
||||
{
|
||||
"name": "vtocc_acl_read_only",
|
||||
"table_names_or_prefixes": ["vtocc_acl_read_only"],
|
||||
"readers": ["dev"]
|
||||
},
|
||||
{
|
||||
"name": "vtocc_acl_read_write",
|
||||
"table_names_or_prefixes": ["vtocc_acl_read_write"],
|
||||
"readers": ["dev"],
|
||||
"writers": ["dev"]
|
||||
},
|
||||
{
|
||||
"name": "vtocc_acl_admin",
|
||||
"table_names_or_prefixes": ["vtocc_acl_admin"],
|
||||
"readers": ["dev"],
|
||||
"writers": ["dev"],
|
||||
"admins": ["dev"]
|
||||
},
|
||||
{
|
||||
"name": "vtocc_acl_all_user_read_only",
|
||||
"table_names_or_prefixes": ["vtocc_acl_all_user_read_only"],
|
||||
"readers": ["dev"]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,101 +0,0 @@
|
|||
# init
|
||||
set storage_engine=InnoDB
|
||||
create table vtocc_test(intval int, floatval float, charval varchar(256), binval varbinary(256), primary key(intval)) comment 'vtocc_nocache'
|
||||
begin
|
||||
delete from vtocc_test
|
||||
insert into vtocc_test values(1, 1.12345, 0xC2A2, 0x00FF), (2, null, '', null), (3, null, null, null)
|
||||
commit
|
||||
|
||||
create table vtocc_a(eid bigint default 1, id int default 1, name varchar(128), foo varbinary(128), primary key(eid, id)) comment 'vtocc_nocache'
|
||||
create table vtocc_b(eid bigint, id int, primary key(eid, id)) comment 'vtocc_nocache'
|
||||
create table vtocc_c(eid bigint, name varchar(128), foo varbinary(128), primary key(eid, name)) comment 'vtocc_nocache'
|
||||
create table vtocc_d(eid bigint, id int) comment 'vtocc_nocache'
|
||||
create table vtocc_e(eid bigint auto_increment, id int default 1, name varchar(128) default 'name', foo varchar(128), primary key(eid, id, name)) comment 'vtocc_nocache'
|
||||
create table vtocc_f(vb varbinary(16) default 'ab', id int, primary key(vb)) comment 'vtocc_nocache'
|
||||
create table upsert_test(id1 int, id2 int, primary key (id1)) comment 'vtocc_nocache'
|
||||
create unique index id2_idx on upsert_test(id2)
|
||||
begin
|
||||
delete from vtocc_a
|
||||
delete from vtocc_c
|
||||
insert into vtocc_a(eid, id, name, foo) values(1, 1, 'abcd', 'efgh'), (1, 2, 'bcde', 'fghi')
|
||||
insert into vtocc_b(eid, id) values(1, 1), (1, 2)
|
||||
insert into vtocc_c(eid, name, foo) values(10, 'abcd', '20'), (11, 'bcde', '30')
|
||||
delete from upsert_test
|
||||
commit
|
||||
|
||||
create table vtocc_cached1(eid bigint, name varchar(128), foo varbinary(128), primary key(eid))
|
||||
create index aname1 on vtocc_cached1(name)
|
||||
begin
|
||||
delete from vtocc_cached1
|
||||
insert into vtocc_cached1 values (1, 'a', 'abcd')
|
||||
insert into vtocc_cached1 values (2, 'a', 'abcd')
|
||||
insert into vtocc_cached1 values (3, 'c', 'abcd')
|
||||
insert into vtocc_cached1 values (4, 'd', 'abcd')
|
||||
insert into vtocc_cached1 values (5, 'e', 'efgh')
|
||||
insert into vtocc_cached1 values (9, 'i', 'ijkl')
|
||||
commit
|
||||
|
||||
create table vtocc_cached2(eid bigint, bid varbinary(16), name varchar(128), foo varbinary(128), primary key(eid, bid))
|
||||
create index aname2 on vtocc_cached2(eid, name)
|
||||
begin
|
||||
delete from vtocc_cached2
|
||||
insert into vtocc_cached2 values (1, 'foo', 'abcd1', 'efgh')
|
||||
insert into vtocc_cached2 values (1, 'bar', 'abcd1', 'efgh')
|
||||
insert into vtocc_cached2 values (2, 'foo', 'abcd2', 'efgh')
|
||||
insert into vtocc_cached2 values (2, 'bar', 'abcd2', 'efgh')
|
||||
commit
|
||||
|
||||
create table vtocc_big(id int, string1 varchar(128), string2 varchar(100), string3 char(1), string4 varchar(50), string5 varchar(50), date1 date, string6 varchar(16), string7 varchar(120), bigint1 bigint(20), bigint2 bigint(20), date2 date, integer1 int, tinyint1 tinyint(4), primary key(id)) comment 'vtocc_big'
|
||||
|
||||
create table vtocc_ints(tiny tinyint, tinyu tinyint unsigned, small smallint, smallu smallint unsigned, medium mediumint, mediumu mediumint unsigned, normal int, normalu int unsigned, big bigint, bigu bigint unsigned, y year, primary key(tiny)) comment 'vtocc_nocache'
|
||||
create table vtocc_fracts(id int, deci decimal(5,2), num numeric(5,2), f float, d double, primary key(id)) comment 'vtocc_nocache'
|
||||
create table vtocc_strings(vb varbinary(16), c char(16), vc varchar(16), b binary(4), tb tinyblob, bl blob, ttx tinytext, tx text, en enum('a','b'), s set('a','b'), primary key(vb)) comment 'vtocc_nocache'
|
||||
create table vtocc_misc(id int, b bit(8), d date, dt datetime, t time, primary key(id)) comment 'vtocc_nocache'
|
||||
|
||||
create table vtocc_part1(key1 bigint, key2 bigint, data1 int, primary key(key1, key2))
|
||||
create unique index vtocc_key2 on vtocc_part1(key2)
|
||||
create table vtocc_part2(key3 bigint, data2 int, primary key(key3))
|
||||
create view vtocc_view as select key2, key1, data1, data2 from vtocc_part1, vtocc_part2 where key2=key3
|
||||
begin
|
||||
delete from vtocc_part1
|
||||
delete from vtocc_part2
|
||||
insert into vtocc_part1 values(10, 1, 1)
|
||||
insert into vtocc_part1 values(10, 2, 2)
|
||||
insert into vtocc_part2 values(1, 3)
|
||||
insert into vtocc_part2 values(2, 4)
|
||||
commit
|
||||
|
||||
create table vtocc_acl_no_access(key1 bigint, key2 bigint, primary key(key1))
|
||||
create table vtocc_acl_read_only(key1 bigint, key2 bigint, primary key(key1))
|
||||
create table vtocc_acl_read_write(key1 bigint, key2 bigint, primary key(key1))
|
||||
create table vtocc_acl_admin(key1 bigint, key2 bigint, primary key(key1))
|
||||
create table vtocc_acl_unmatched(key1 bigint, key2 bigint, primary key(key1))
|
||||
create table vtocc_acl_all_user_read_only(key1 bigint, key2 bigint, primary key(key1))
|
||||
|
||||
# clean
|
||||
drop table if exists vtocc_test
|
||||
drop table if exists vtocc_a
|
||||
drop table if exists vtocc_b
|
||||
drop table if exists vtocc_c
|
||||
drop table if exists vtocc_d
|
||||
drop table if exists vtocc_e
|
||||
drop table if exists vtocc_f
|
||||
drop table if exists upsert_test
|
||||
drop table if exists vtocc_cached1
|
||||
drop table if exists vtocc_cached2
|
||||
drop table if exists vtocc_renamed
|
||||
drop table if exists vtocc_nocache
|
||||
drop table if exists vtocc_big
|
||||
drop table if exists vtocc_ints
|
||||
drop table if exists vtocc_fracts
|
||||
drop table if exists vtocc_strings
|
||||
drop table if exists vtocc_misc
|
||||
drop view if exists vtocc_view
|
||||
drop table if exists vtocc_part1
|
||||
drop table if exists vtocc_part2
|
||||
drop table if exists vtocc_acl_no_access
|
||||
drop table if exists vtocc_acl_read_only
|
||||
drop table if exists vtocc_acl_read_write
|
||||
drop table if exists vtocc_acl_admin
|
||||
drop table if exists vtocc_acl_unmatched
|
||||
drop table if exists vtocc_acl_all_user_read_only
|
Загрузка…
Ссылка в новой задаче