Merge pull request #275 from aismail/docker-black-box-tests

Docker black box tests
This commit is contained in:
Rene Cannao 2015-06-25 08:55:20 +02:00
Родитель f0e868fb1e adc7a8d367
Коммит eb692ad933
18 изменённых файлов: 869 добавлений и 0 удалений

4
.gitignore поставляемый
Просмотреть файл

@ -2,6 +2,7 @@
*.o
*.ko
*.oo
*.pyc
# Libraries
*.lib
@ -90,3 +91,6 @@ deps/libconfig/libconfig-1.4.9/
#re2
deps/re2/re2/
test/.vagrant
.DS_Store

4
requirements.txt Normal file
Просмотреть файл

@ -0,0 +1,4 @@
docker-compose==1.1.0
MySQL-python==1.2.5
nose==1.3.6
requests==2.4.3

Просмотреть файл

@ -0,0 +1,21 @@
proxysql:
build: ../base/proxysql
links:
- backend1hostgroup0
ports:
# ProxySQL admin port for MySQL commands
- "6032:6032"
# ProxySQL main port
- "6033:6033"
# gdbserver
- "2345:2345"
privileged: true
backend1hostgroup0:
build: ./mysql
environment:
MYSQL_ROOT_PASSWORD: root
expose:
- "3306"
ports:
- "13306:3306"

Просмотреть файл

@ -0,0 +1,7 @@
# We are creating a custom Dockerfile for MySQL as there is no easy way to
# move a file from host into the container. In our case, it's schema.sql
# There is a proposed improvement to "docker cp" but it's still being
# discussed (https://github.com/docker/docker/issues/5846).
FROM mysql:latest
ADD ./schema.sql /tmp/
ADD ./import_schema.sh /tmp/

Просмотреть файл

@ -0,0 +1 @@
cat /tmp/schema.sql | mysql -h 127.0.0.1 -u root -proot

Просмотреть файл

@ -0,0 +1,18 @@
DROP DATABASE IF EXISTS test;
CREATE DATABASE test;
CREATE USER john@'%' IDENTIFIED BY 'doe';
CREATE USER danny@'%' IDENTIFIED BY 'white';
GRANT ALL PRIVILEGES ON test.* TO 'john'@'%';
GRANT ALL PRIVILEGES ON test.* TO 'danny'@'%';
FLUSH PRIVILEGES;
USE test;
CREATE TABLE strings(value LONGTEXT);
INSERT INTO strings(value) VALUES('a');
INSERT INTO strings(value) VALUES('ab');
INSERT INTO strings(value) VALUES('abc');
INSERT INTO strings(value) VALUES('abcd');

Просмотреть файл

@ -0,0 +1,27 @@
# We're using Ubuntu 14:04 because ProxySQL compilation needs one of the latest
# g++ compilers. Also, it's a long term release.
FROM ubuntu:14.04
MAINTAINER Andrei Ismail <iandrei@gmail.com>
RUN apt-get update && apt-get install -y\
automake\
cmake\
make\
g++\
gcc\
gdb\
gdbserver\
git\
libmysqlclient-dev\
libssl-dev\
libtool
RUN cd /opt; git clone https://github.com/akopytov/sysbench.git
RUN cd /opt/sysbench; ./autogen.sh; ./configure --bindir=/usr/bin; make; make install
RUN cd /opt; git clone https://github.com/sysown/proxysql-0.2.git proxysql
RUN cd /opt/proxysql; make clean && make
RUN mkdir -p /var/run/proxysql
ADD ./proxysql.cnf /etc/
WORKDIR /opt/proxysql/src
CMD ["gdbserver", "0.0.0.0:2345", "/opt/proxysql/src/proxysql", "--initial"]

Просмотреть файл

@ -0,0 +1,24 @@
datadir="/tmp"
admin_variables =
{
admin_credentials="admin:admin"
mysql_ifaces="0.0.0.0:6032"
refresh_interval=2000
debug=true
}
mysql_users =
(
{
username = "root"
password = "root"
default_hostgroup = 0
},
{
username = "john"
password = "doe"
default_hostgroup = 0
}
)

3
src/gdb-commands.txt Normal file
Просмотреть файл

@ -0,0 +1,3 @@
set pagination off
target remote 0.0.0.0:2345
continue

85
test/Vagrantfile поставляемый Normal file
Просмотреть файл

@ -0,0 +1,85 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
# you're doing.
Vagrant.configure(2) do |config|
# The most common configuration options are documented and commented below.
# For a complete reference, please see the online documentation at
# https://docs.vagrantup.com.
# Every Vagrant development environment requires a box. You can search for
# boxes at https://atlas.hashicorp.com/search.
config.vm.box = "ubuntu-14.04"
# Disable automatic box update checking. If you disable this, then
# boxes will only be checked for updates when the user runs
# `vagrant box outdated`. This is not recommended.
# config.vm.box_check_update = false
# Create a forwarded port mapping which allows access to a specific port
# within the machine from a port on the host machine. In the example below,
# accessing "localhost:8080" will access port 80 on the guest machine.
# config.vm.network "forwarded_port", guest: 80, host: 8080
# Create a private network, which allows host-only access to the machine
# using a specific IP.
# config.vm.network "private_network", ip: "192.168.33.10"
# Create a public network, which generally matched to bridged network.
# Bridged networks make the machine appear as another physical device on
# your network.
# config.vm.network "public_network"
# Share an additional folder to the guest VM. The first argument is
# the path on the host to the actual folder. The second argument is
# the path on the guest to mount the folder. And the optional third
# argument is a set of non-required options.
# config.vm.synced_folder "../data", "/vagrant_data"
# Provider-specific configuration so you can fine-tune various
# backing providers for Vagrant. These expose provider-specific options.
# Example for VirtualBox:
#
# config.vm.provider "virtualbox" do |vb|
# # Display the VirtualBox GUI when booting the machine
# vb.gui = true
#
# # Customize the amount of memory on the VM:
# vb.memory = "1024"
# end
#
# View the documentation for the provider you are using for more
# information on available options.
# Define a Vagrant Push strategy for pushing to Atlas. Other push strategies
# such as FTP and Heroku are also available. See the documentation at
# https://docs.vagrantup.com/v2/push/atlas.html for more information.
# config.push.define "atlas" do |push|
# push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME"
# end
# Enable provisioning with a shell script. Additional provisioners such as
# Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the
# documentation for more information about their specific syntax and use.
config.vm.provision "shell", inline: <<-SHELL
sudo apt-get update
sudo apt-get install -y libmysqlclient-dev python python-dev wget
sudo wget -qO- https://bootstrap.pypa.io/ez_setup.py | python
sudo easy_install -U pip
cd /opt
sudo git clone https://github.com/aismail/proxysql-0.2.git proxysql
cd proxysql
sudo git checkout docker-black-box-tests
sudo pip install -r requirements.txt
SHELL
config.vm.provision "shell", run: "always", inline: <<-SHELL
cd /opt/proxysql
sudo git pull origin docker-black-box-tests
sudo pip install -r requirements.txt
SHELL
end

0
test/__init__.py Normal file
Просмотреть файл

9
test/admin_test.py Normal file
Просмотреть файл

@ -0,0 +1,9 @@
from proxysql_base_test import ProxySQLBaseTest
class AdminTest(ProxySQLBaseTest):
DOCKER_COMPOSE_FILE = "./scenarios/1backend"
def test_stop_main_thread(self):
# This test will just assert that PROXYSQL STOP works correctly
self.run_query_proxysql_admin("PROXYSQL STOP")

Просмотреть файл

@ -0,0 +1,43 @@
import MySQLdb
from MySQLdb import OperationalError
from nose.tools import raises
from proxysql_base_test import ProxySQLBaseTest
class AuthenticationTest(ProxySQLBaseTest):
DOCKER_COMPOSE_FILE = "./scenarios/1backend"
def test_existing_user_with_correct_password_works(self):
version1 = self.run_query_mysql(
"SELECT @@version_comment LIMIT 1", "test",
return_result=True,
username="john", password="doe")
version2 = self.run_query_proxysql(
"SELECT @@version_comment LIMIT 1", "test",
return_result=True,
username="john", password="doe")
self.assertEqual(version1, version2)
@raises(OperationalError)
def test_existing_user_with_correct_password_but_not_registerd_within_proxysql_does_not_work(self):
version1 = self.run_query_proxysql(
"SELECT @@version_comment LIMIT 1", "test",
return_result=True,
username="danny", password="white")
@raises(OperationalError)
def test_existing_user_with_incorrect_password_does_not_work(self):
version = self.run_query_proxysql(
"SELECT @@version_comment LIMIT 1", "test",
return_result=True,
username="john", password="doe2")
@raises(OperationalError)
def test_inexisting_user_with_random_password_does_not_work(self):
version = self.run_query_proxysql(
"SELECT @@version_comment LIMIT 1", "test",
return_result=True,
username="johnny", password="randomdoe")

98
test/how_to.md Normal file
Просмотреть файл

@ -0,0 +1,98 @@
# How are the tests built?
First off, a few words about how the infrastructure of the tests looks like.
Tests are written in Python, and the services needed for running a test
(a ProxySQL instance and one or more MySQL instances) are specified in a
docker-compose.yml file and are started by using Docker's docker-compose.
Tests are ran using nosetests (https://nose.readthedocs.org/en/latest/),
Python's de facto leader in terms of how tests are written and ran. The command
to run the tests is, from the root of the repository:
```python
nosetests --nocapture
```
The "--nocapture" flag is present in order to have detailed output on what is
going on. Otherwise, the output will be suppressed by nosetests to give you only
a high-level report of how many tests passed and failed.
# Where can I find the tests?
Tests are grouped in scenarios. A __scenario__ specifies a configuration of
ProxySQL and MySQL backends, together with initial data to populate the MySQL
instances (a text file containing SQL queries).
The folder "scenarios" found in the root folder of the repository contains
these scenarios. There is a "base" folder with common utilities, and then there
is one folder for each scenario. For example, "1backend" is the name for the
scenario of 1 ProxySQL proxy, and 1 MySQL backend.
To create such a scenario, the simplest way to go about, is to copy-paste the
"1backend" scenario and modify it. Some of the important things to modify:
- docker-compose.yml. This is where the list of services is described, and
where you actuall specify how many MySQL backends there are, and which ports
they expose and how. Be careful, there is a naming convention
- mysql/schema.sql. This is where the MySQL backends get populated
# How do I write a test?
It's pretty simple. Once you have a working scenario, you write a class in
the top-level "test" folder, which inherits from ProxySQLBaseTest. One such
example is one_backend_test.py. The only thing which you should specify is
the docker-compose filename, and then start querying both the proxy and the
MySQL backends and testing assertions by using the `run_query_proxysql` and
`run_query_mysql' class methods.
# How do I run the tests locally?
1) install vagrant on the machine where you'll be running the tests
2) vagrant box add ubuntu-14.04 ubuntu-14.04.box
(The ubuntu-14.04.box file is obtained from https://github.com/jose-lpa/packer-ubuntu_14.04/releases/download/v2.0/ubuntu-14.04.box)
# This will actually install what is needed on the Vagrant box
3) cd proxysql/test; vagrant up; vagrant ssh -c "cd /opt/proxysql; nosetests --nocapture"; vagrant halt
# How do I run the tests on a machine without internet connectivity?
For that you need to prepare a Virtual box .box file with the latest state of
the code and the packages from a machine that has internet connectivity and
copy it over to the machine with no connectivity.
To prepare the .box file:
1) clone proxysql test repo locally, let's assume it's in ~/proxysql
2) cd ~/proxysql/test; vagrant up
This will update the machine with the latest master code. If you need to be
testing a different branch, you will have to do an extra step (step 3):
3) vagrant ssh -c "cd /opt/proxysql/test; git checkout my-branch; git pull origin my-branch; sudo pip install -r requirements.txt"
This will fetch the code for the given branch __and__ install the necessary
packages for running the tests on that branch (if there are any new packages).
4) Package it all in a .box file
vagrant package --output proxysql-tests.box
This will generate a big .box file, approximately 1.1GB as of the writing of
this document. This file can be run without having internet connectivity.
5) Copy the proxysql-tests.box to the machine where you want to run the tests
6) vagrant box add proxysql-tests proxysql-tests.box (from the directory where
you copied the .box file and where you are planning to run the tests)
7) vagrant init proxysql-tests; vagrant up
8) vagrant up; vagrant ssh -c "cd /opt/proxysql; nosetests --nocapture"; vagrant halt
to actually run the tests.
NB: we are assuming that the only useful output from running the tests is
stdout. As we will add more tests to the test suite, this section will be
refined on how to retrieve the results as well.

13
test/one_backend_test.py Normal file
Просмотреть файл

@ -0,0 +1,13 @@
import MySQLdb
from proxysql_base_test import ProxySQLBaseTest
class OneBackendTest(ProxySQLBaseTest):
DOCKER_COMPOSE_FILE = "./scenarios/1backend"
def test_select_strings_returns_correct_result(self):
rows = self.run_query_proxysql("SELECT * FROM strings", "test")
self.assertEqual(set([row[0] for row in rows]),
set(['a', 'ab', 'abc', 'abcd']))

416
test/proxysql_base_test.py Normal file
Просмотреть файл

@ -0,0 +1,416 @@
import random
import re
import subprocess
import time
from unittest import TestCase
from docker import Client
from docker.utils import kwargs_from_env
import MySQLdb
from proxysql_ping_thread import ProxySQL_Ping_Thread
class ProxySQLBaseTest(TestCase):
DOCKER_COMPOSE_FILE = None
PROXYSQL_ADMIN_PORT = 6032
PROXYSQL_ADMIN_USERNAME = "admin"
PROXYSQL_ADMIN_PASSWORD = "admin"
PROXYSQL_RW_PORT = 6033
PROXYSQL_RW_USERNAME = "root"
PROXYSQL_RW_PASSWORD = "root"
# TODO(andrei): make it possible to set this to False, and make False
# the default value.
INTERACTIVE_TEST = True
@classmethod
def _startup_docker_services(cls):
"""Start up all the docker services necessary to start this test.
They are specified in the docker compose file specified in the variable
cls.DOCKER_COMPOSE_FILE.
"""
# We have to perform docker-compose build + docker-compose up,
# instead of just doing the latter because of a bug which will give a
# 500 internal error for the Docker bug. When this is fixed, we should
# remove this first extra step.
subprocess.call(["docker-compose", "build"], cwd=cls.DOCKER_COMPOSE_FILE)
subprocess.call(["docker-compose", "up", "-d"], cwd=cls.DOCKER_COMPOSE_FILE)
@classmethod
def _shutdown_docker_services(cls):
"""Shut down all the docker services necessary to start this test.
They are specified in the docker compose file specified in the variable
cls.DOCKER_COMPOSE_FILE.
"""
subprocess.call(["docker-compose", "stop"], cwd=cls.DOCKER_COMPOSE_FILE)
subprocess.call(["docker-compose", "rm", "--force"], cwd=cls.DOCKER_COMPOSE_FILE)
@classmethod
def _get_proxysql_container(cls):
"""Out of all the started docker containers, select the one which
represents the proxy instance.
Note that this only supports one proxy instance for now. This method
relies on interogating the Docker daemon via its REST API.
"""
containers = Client(**kwargs_from_env()).containers()
for container in containers:
if 'proxysql' in container['Image']:
return container
@classmethod
def _get_mysql_containers(cls):
"""Out of all the started docker containers, select the ones which
represent the MySQL backend instances.
This method relies on interogating the Docker daemon via its REST API.
"""
result = []
containers = Client(**kwargs_from_env()).containers()
for container in containers:
if 'proxysql' not in container['Image']:
result.append(container)
return result
@classmethod
def _populate_mysql_containers_with_dump(cls):
"""Populates the started MySQL backend containers with the specified
SQL dump file.
The reason for doing this __after__ the containers are started is
because we want to keep them as generic as possible.
"""
mysql_containers = cls._get_mysql_containers()
# We have already added the SQL dump to the container by using
# the ADD mysql command in the Dockerfile for mysql -- check it
# out. The standard agreed location is at /tmp/schema.sql.
#
# Unfortunately we can't do this step at runtime due to limitations
# on how transfer between host and container is supposed to work by
# design. See the Dockerfile for MySQL for more details.
for mysql_container in mysql_containers:
container_id = mysql_container['Names'][0][1:]
subprocess.call(["docker", "exec", container_id, "bash", "/tmp/import_schema.sh"])
@classmethod
def _extract_hostgroup_from_container_name(cls, container_name):
"""MySQL backend containers are named using a naming convention:
backendXhostgroupY, where X and Y can be multi-digit numbers.
This extracts the value of the hostgroup from the container name.
I made this choice because I wasn't able to find another easy way to
associate arbitrary metadata with a Docker container through the
docker compose file.
"""
service_name = container_name.split('_')[1]
return int(re.search(r'BACKEND(\d+)HOSTGROUP(\d+)', service_name).group(2))
@classmethod
def _extract_port_number_from_uri(cls, uri):
"""Given a Docker container URI (exposed as an environment variable by
the host linking mechanism), extract the TCP port number from it."""
return int(uri.split(':')[2])
@classmethod
def _get_environment_variables_from_container(cls, container_name):
"""Retrieve the environment variables from the given container.
This is useful because the host linking mechanism will expose
connectivity information to the linked hosts by the use of environment
variables.
"""
output = Client(**kwargs_from_env()).execute(container_name, 'env')
result = {}
lines = output.split('\n')
for line in lines:
line = line.strip()
if len(line) == 0:
continue
(k, v) = line.split('=')
result[k] = v
return result
@classmethod
def _populate_proxy_configuration_with_backends(cls):
"""Populate ProxySQL's admin information with the MySQL backends
and their associated hostgroups.
This is needed because I do not want to hardcode this into the ProxySQL
config file of the test scenario, as it leaves more room for quick
iteration.
In order to configure ProxySQL with the correct backends, we are using
the MySQL admin interface of ProxySQL, and inserting rows into the
`mysql_servers` table, which contains a list of which servers go into
which hostgroup.
"""
proxysql_container = cls._get_proxysql_container()
mysql_containers = cls._get_mysql_containers()
environment_variables = cls._get_environment_variables_from_container(
proxysql_container['Names'][0][1:])
proxy_admin_connection = MySQLdb.connect("127.0.0.1",
cls.PROXYSQL_ADMIN_USERNAME,
cls.PROXYSQL_ADMIN_PASSWORD,
port=cls.PROXYSQL_ADMIN_PORT)
cursor = proxy_admin_connection.cursor()
for mysql_container in mysql_containers:
container_name = mysql_container['Names'][0][1:].upper()
port_uri = environment_variables['%s_PORT' % container_name]
port_no = cls._extract_port_number_from_uri(port_uri)
ip = environment_variables['%s_PORT_%d_TCP_ADDR' % (container_name, port_no)]
hostgroup = cls._extract_hostgroup_from_container_name(container_name)
cursor.execute("INSERT INTO mysql_servers(hostgroup_id, hostname, port, status) "
"VALUES(%d, '%s', %d, 'ONLINE')" %
(hostgroup, ip, port_no))
cursor.execute("LOAD MYSQL SERVERS TO RUNTIME")
cursor.close()
proxy_admin_connection.close()
@classmethod
def setUpClass(cls):
# Always shutdown docker services because the previous test might have
# left them in limbo.
cls._shutdown_docker_services()
cls._startup_docker_services()
if cls.INTERACTIVE_TEST:
cls._compile_host_proxysql()
cls._connect_gdb_to_proxysql_within_container()
# Sleep for 30 seconds because we want to populate the MySQL containers
# with SQL dumps, but there is a race condition because we do not know
# when the MySQL daemons inside them have actually started or not.
# TODO(andrei): find a better solution
time.sleep(30)
cls._populate_mysql_containers_with_dump()
cls._populate_proxy_configuration_with_backends()
cls._start_proxysql_pings()
@classmethod
def tearDownClass(cls):
if cls.INTERACTIVE_TEST:
cls._gdb_process.wait()
# It's essential that pings are stopped __after__ the gdb process has
# finished. This allows them to keep pinging ProxySQL in the background
# while it's stuck waiting for user interaction (user interaction needed
# in order to debug the problem causing it to crash).
cls._stop_proxysql_pings()
cls._shutdown_docker_services()
def run_query_proxysql(self, query, db, return_result=True,
username=None, password=None, port=None):
"""Run a query against the ProxySQL proxy and optionally return its
results as a set of rows."""
username = username or ProxySQLBaseTest.PROXYSQL_RW_USERNAME
password = password or ProxySQLBaseTest.PROXYSQL_RW_PASSWORD
port = port or ProxySQLBaseTest.PROXYSQL_RW_PORT
proxy_connection = MySQLdb.connect("127.0.0.1",
username,
password,
port=port,
db=db)
cursor = proxy_connection.cursor()
cursor.execute(query)
if return_result:
rows = cursor.fetchall()
cursor.close()
proxy_connection.close()
if return_result:
return rows
def run_query_proxysql_admin(self, query, return_result=True):
"""Run a query against the ProxySQL admin.
Note: we do not need to specify a db for this query, as it's always
against the "main" database.
TODO(andrei): revisit db assumption once stats databases from ProxySQL
are accessible via the MySQL interface.
"""
return self.run_query_proxysql(
query,
# "main" database is hardcoded within the
# ProxySQL admin -- it contains the SQLite3
# tables with metadata about servers and users
"main",
return_result,
username=ProxySQLBaseTest.PROXYSQL_ADMIN_USERNAME,
password=ProxySQLBaseTest.PROXYSQL_ADMIN_PASSWORD,
port=ProxySQLBaseTest.PROXYSQL_ADMIN_PORT
)
def run_query_mysql(self, query, db, return_result=True, hostgroup=0,
username=None, password=None):
"""Run a query against the MySQL backend and optionally return its
results as a set of rows.
IMPORTANT: since the queries are actually ran against the MySQL backend,
that backend needs to expose its MySQL port to the outside through
docker compose's port mapping mechanism.
This will actually parse the docker-compose configuration file to
retrieve the available backends and hostgroups and will pick a backend
from the specified hostgroup."""
# Figure out which are the containers for the specified hostgroup
mysql_backends = ProxySQLBaseTest._get_mysql_containers()
mysql_backends_in_hostgroup = []
for backend in mysql_backends:
container_name = backend['Names'][0][1:].upper()
backend_hostgroup = ProxySQLBaseTest._extract_hostgroup_from_container_name(container_name)
mysql_port_exposed=False
if not backend.get('Ports'):
continue
for exposed_port in backend.get('Ports', []):
if exposed_port['PrivatePort'] == 3306:
mysql_port_exposed = True
if backend_hostgroup == hostgroup and mysql_port_exposed:
mysql_backends_in_hostgroup.append(backend)
if len(mysql_backends_in_hostgroup) == 0:
raise Exception('No backends with a publicly exposed port were '
'found in hostgroup %d' % hostgroup)
# Pick a random container, extract its connection details
container = random.choice(mysql_backends_in_hostgroup)
for exposed_port in container.get('Ports', []):
if exposed_port['PrivatePort'] == 3306:
mysql_port = exposed_port['PublicPort']
username = username or ProxySQLBaseTest.PROXYSQL_RW_USERNAME
password = password or ProxySQLBaseTest.PROXYSQL_RW_PASSWORD
mysql_connection = MySQLdb.connect("127.0.0.1",
username,
password,
port=mysql_port,
db=db)
cursor = mysql_connection.cursor()
cursor.execute(query)
if return_result:
rows = cursor.fetchall()
cursor.close()
mysql_connection.close()
if return_result:
return rows
def run_sysbench_proxysql(self, threads=4, time=60, db="test",
username=None, password=None, port=None):
"""Runs a sysbench test with the given parameters against the given
ProxySQL instance.
In this case, due to better encapsulation and reduced latency to
ProxySQL, we are assuming that sysbench is installed on the same
container with it.
"""
proxysql_container_id = ProxySQLBaseTest._get_proxysql_container()['Id']
username = username or ProxySQLBaseTest.PROXYSQL_RW_USERNAME
password = password or ProxySQLBaseTest.PROXYSQL_RW_PASSWORD
port = port or ProxySQLBaseTest.PROXYSQL_RW_PORT
params = [
"sysbench",
"--test=/opt/sysbench/sysbench/tests/db/oltp.lua",
"--num-threads=%d" % threads,
"--max-requests=0",
"--max-time=%d" % time,
"--mysql-user=%s" % username,
"--mysql-password=%s" % password,
"--mysql-db=%s" % db,
"--db-driver=mysql",
"--oltp-tables-count=4",
"--oltp-read-only=on",
"--oltp-skip-trx=on",
"--report-interval=1",
"--oltp-point-selects=100",
"--oltp-table-size=400000",
"--mysql-host=127.0.0.1",
"--mysql-port=%s" % port
]
ProxySQLBaseTest.run_bash_command_within_proxysql(params + ["prepare"])
ProxySQLBaseTest.run_bash_command_within_proxysql(params + ["run"])
ProxySQLBaseTest.run_bash_command_within_proxysql(params + ["cleanup"])
@classmethod
def run_bash_command_within_proxysql(cls, params):
"""Run a bash command given as an array of tokens within the ProxySQL
container.
This is useful in a lot of scenarios:
- running sysbench against the ProxySQL instance
- getting environment variables from the ProxySQL container
- running various debugging commands against the ProxySQL instance
"""
proxysql_container_id = ProxySQLBaseTest._get_proxysql_container()['Id']
exec_params = ["docker", "exec", proxysql_container_id] + params
subprocess.call(exec_params)
@classmethod
def _compile_host_proxysql(cls):
"""Compile ProxySQL on the Docker host from which we're running the
tests.
This is used for remote debugging, because that's how the
gdb + gdbserver pair works:
- local gdb with access to the binary with debug symbols
- remote gdbserver which wraps the remote binary so that it can be
debugged when it crashes.
"""
subprocess.call(["make", "clean"])
subprocess.call(["make"])
@classmethod
def _connect_gdb_to_proxysql_within_container(cls):
"""Connect a local gdb running on the docker host to the remote
ProxySQL binary for remote debugging.
This is useful in interactive mode, where we want to stop at a failing
test and prompt the developer to debug the failing instance.
Note: gdb is ran in a separate process because otherwise it will block
the test running process, and it will not be able to run queries anymore
and make assertions. However, we save the process handle so that we can
shut down the process later on.
"""
cls._gdb_process = subprocess.Popen(["gdb", "--command=gdb-commands.txt",
"./proxysql"],
cwd="./src")
@classmethod
def _start_proxysql_pings(cls):
"""During the running of the tests, the test suite will continuously
monitor the ProxySQL daemon in order to check that it's up.
This special thread will do exactly that."""
cls.ping_thread = ProxySQL_Ping_Thread(username=cls.PROXYSQL_RW_USERNAME,
password=cls.PROXYSQL_RW_PASSWORD,
hostname="127.0.0.1",
port=cls.PROXYSQL_RW_PORT)
cls.ping_thread.start()
@classmethod
def _stop_proxysql_pings(cls):
"""Stop the special thread which pings the ProxySQL daemon."""
cls.ping_thread.stop()
cls.ping_thread.join()

Просмотреть файл

@ -0,0 +1,88 @@
from email.mime.text import MIMEText
import smtplib
from threading import Thread
import time
import MySQLdb
class ProxySQL_Ping_Thread(Thread):
"""ProxySQL_Ping_Thread's purpose is to do a continuous health check of the
ProxySQL daemon when tests are running against it. When it has crashed
or it's simply not responding anymore, it will send an e-mail to draw the
attention of the developer so that he or she will examine the situation.
This is because the test suite is designed to be long running and we want
to find out as quickly as possible when the tests ran into trouble without
continuously keeping an eye on the tests.
"""
FAILED_CONNECTIONS_BEFORE_ALERT = 3
def __init__(self, username, password,
hostname="127.0.0.1", port=6033, db="test",
ping_command="SELECT @@version_comment LIMIT 1",
interval=60, **kwargs):
self.username = username
self.password = password
self.hostname = hostname
self.port = port
self.db = db
self.ping_command = ping_command
self.interval = interval
self.running = False
self.failed_connections = 0
super(ProxySQL_Ping_Thread, self).__init__(**kwargs)
def run(self):
self.running = True
while self.running:
time.sleep(self.interval)
if not self.running:
return
try:
connection = MySQLdb.connect(self.hostname,
self.username,
self.password,
port=self.port,
db=self.db,
connect_timeout=30)
cursor = connection.cursor()
cursor.execute(self.ping_command)
rows = cursor.fetchall()
cursor.close()
connection.close()
print("ProxySQL server @ %s:%d responded to query %s with %r" %
(self.hostname, self.port, self.ping_command, rows))
self.failed_connections = 0
except:
self.failed_connections = self.failed_connections + 1
if self.failed_connections >= ProxySQL_Ping_Thread.FAILED_CONNECTIONS_BEFORE_ALERT:
self.send_error_email()
self.running = False
return
def stop(self):
self.running = False
def send_error_email(self):
msg = MIMEText("ProxySQL daemon stopped responding during tests.\n"
"Please check if it has crashed and you have been left with a gdb console on!")
# me == the sender's email address
# you == the recipient's email address
msg['Subject'] = 'Daemon has stopped responding'
msg['From'] = 'ProxySQL Tests <proxysql.tests@gmail.com>'
msg['To'] = 'Andrei-Adnan Ismail <iandrei@gmail.com>'
# Send the message via our own SMTP server, but don't include the
# envelope header.
s = smtplib.SMTP('smtp.gmail.com', 587)
s.ehlo()
s.starttls()
s.login('proxysql.tests', 'pr0xysql')
s.sendmail('proxysql.tests@gmail.com', ['iandrei@gmail.com'], msg.as_string())
s.quit()

8
test/sysbench_test.py Normal file
Просмотреть файл

@ -0,0 +1,8 @@
from proxysql_base_test import ProxySQLBaseTest
class SysBenchTest(ProxySQLBaseTest):
DOCKER_COMPOSE_FILE = "./scenarios/1backend"
def test_proxy_doesnt_crash_under_mild_sysbench_load(self):
self.run_sysbench_proxysql()