Merge remote-tracking branch 'upstream/master' into webdriver2

This commit is contained in:
Joshua Thompson 2016-08-18 14:05:15 -07:00
Родитель 1b9fafe9ef cf08a7061d
Коммит dba9c6d992
106 изменённых файлов: 2010 добавлений и 1385 удалений

12
Dockerfile.percona57 Normal file
Просмотреть файл

@ -0,0 +1,12 @@
FROM vitess/bootstrap:percona57
# Re-copy sources from working tree
USER root
COPY . /vt/src/github.com/youtube/vitess
# Fix permissions
RUN chown -R vitess:vitess /vt
USER vitess
# Build Vitess
RUN make build

Просмотреть файл

@ -150,7 +150,7 @@ php_proto:
docker rm vitess_php-proto
# This rule builds the bootstrap images for all flavors.
DOCKER_IMAGES_FOR_TEST = mariadb mysql56 mysql57 percona
DOCKER_IMAGES_FOR_TEST = mariadb mysql56 mysql57 percona percona57
DOCKER_IMAGES = common $(DOCKER_IMAGES_FOR_TEST)
docker_bootstrap:
for i in $(DOCKER_IMAGES); do echo "image: $$i"; docker/bootstrap/build.sh $$i || exit 1; done
@ -174,6 +174,10 @@ docker_base_percona:
chmod -R o=g *
docker build -f Dockerfile.percona -t vitess/base:percona .
docker_base_percona57:
chmod -R o=g *
docker build -f Dockerfile.percona57 -t vitess/base:percona57 .
docker_base_mariadb:
chmod -R o=g *
docker build -f Dockerfile.mariadb -t vitess/base:mariadb .
@ -190,6 +194,9 @@ docker_lite_mariadb: docker_base_mariadb
docker_lite_percona: docker_base_percona
cd docker/lite && ./build.sh percona
docker_lite_percona57: docker_base_percona57
cd docker/lite && ./build.sh percona57
docker_guestbook:
cd examples/kubernetes/guestbook && ./build.sh

Просмотреть файл

@ -20,6 +20,18 @@ DROP DATABASE IF EXISTS test;
# Vitess-internal database.
CREATE DATABASE IF NOT EXISTS _vt;
# Note that definitions of local_metadata and shard_metadata should be the same
# as in production which is defined in go/vt/mysqlctl/metadata_tables.go.
CREATE TABLE IF NOT EXISTS _vt.local_metadata (
name VARCHAR(255) NOT NULL,
value VARCHAR(255) NOT NULL,
PRIMARY KEY (name)
) ENGINE=InnoDB;
CREATE TABLE IF NOT EXISTS _vt.shard_metadata (
name VARCHAR(255) NOT NULL,
value MEDIUMBLOB NOT NULL,
PRIMARY KEY (name)
) ENGINE=InnoDB;
# Admin user with all privileges.
GRANT ALL ON *.* TO 'vt_dba'@'localhost';

Просмотреть файл

@ -0,0 +1,23 @@
FROM vitess/bootstrap:common
# Install Percona 5.7
RUN apt-key adv --keyserver ha.pool.sks-keyservers.net \
--recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A && \
add-apt-repository 'deb http://repo.percona.com/apt jessie main' && \
{ \
echo debconf debconf/frontend select Noninteractive; \
echo percona-server-server-5.7 percona-server-server/root_password password 'unused'; \
echo percona-server-server-5.7 percona-server-server/root_password_again password 'unused'; \
} | debconf-set-selections && \
apt-get update && \
apt-get install -y --no-install-recommends \
percona-server-server-5.7 libperconaserverclient18.1-dev && \
rm -rf /var/lib/apt/lists/*
# Bootstrap Vitess
WORKDIR /vt/src/github.com/youtube/vitess
USER vitess
# Required by e2e test dependencies e.g. test/environment.py.
ENV USER vitess
ENV MYSQL_FLAVOR MySQL56
RUN ./bootstrap.sh --skip_root_installs

Просмотреть файл

@ -0,0 +1,37 @@
# This image is only meant to be built from within the build.sh script.
FROM debian:jessie
# Install dependencies
RUN apt-key adv --keyserver ha.pool.sks-keyservers.net \
--recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A && \
echo 'deb http://repo.percona.com/apt jessie main' > /etc/apt/sources.list.d/mysql.list && \
{ \
echo debconf debconf/frontend select Noninteractive; \
echo percona-server-server-5.7 percona-server-server/root_password password 'unused'; \
echo percona-server-server-5.7 percona-server-server/root_password_again password 'unused'; \
} | debconf-set-selections && \
apt-get update && \
apt-get install -y --no-install-recommends \
percona-server-server-5.7 libperconaserverclient18.1 bzip2 && \
rm -rf /var/lib/apt/lists/*
# Set up Vitess environment (just enough to run pre-built Go binaries)
ENV VTTOP /vt/src/github.com/youtube/vitess
ENV VTROOT /vt
ENV GOTOP $VTTOP/go
ENV VTDATAROOT $VTROOT/vtdataroot
ENV GOBIN $VTROOT/bin
ENV GOPATH $VTROOT
ENV PATH $VTROOT/bin:$PATH
ENV VT_MYSQL_ROOT /usr
ENV PKG_CONFIG_PATH $VTROOT/lib
# Copy binaries (placed by build.sh)
COPY lite/vt /vt
# Create vitess user
RUN groupadd -r vitess && useradd -r -g vitess vitess && \
mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt
# Create mount point for actual data (e.g. MySQL data dir)
VOLUME /vt/vtdataroot

Просмотреть файл

@ -29,6 +29,12 @@ func (t *SplitCloneTask) Run(parameters map[string]string) ([]*automationpb.Task
if excludeTables := parameters["exclude_tables"]; excludeTables != "" {
args = append(args, "--exclude_tables="+excludeTables)
}
if chunkCount := parameters["chunk_count"]; chunkCount != "" {
args = append(args, "--chunk_count="+chunkCount)
}
if minRowsPerChunk := parameters["min_rows_per_chunk"]; minRowsPerChunk != "" {
args = append(args, "--min_rows_per_chunk="+minRowsPerChunk)
}
if writeQueryMaxRows := parameters["write_query_max_rows"]; writeQueryMaxRows != "" {
args = append(args, "--write_query_max_rows="+writeQueryMaxRows)
}
@ -56,5 +62,5 @@ func (t *SplitCloneTask) RequiredParameters() []string {
// OptionalParameters is part of the Task interface.
func (t *SplitCloneTask) OptionalParameters() []string {
return []string{"online", "offline", "exclude_tables", "write_query_max_rows", "write_query_max_size", "min_healthy_rdonly_tablets"}
return []string{"online", "offline", "exclude_tables", "chunk_count", "min_rows_per_chunk", "write_query_max_rows", "write_query_max_size", "min_healthy_rdonly_tablets"}
}

Просмотреть файл

@ -17,7 +17,7 @@ func TestSplitCloneTask(t *testing.T) {
vtworkerclient.RegisterFactory("fake", fake.FakeVtworkerClientFactory)
defer vtworkerclient.UnregisterFactoryForTest("fake")
flag.Set("vtworker_client_protocol", "fake")
fake.RegisterResult([]string{"SplitClone", "--online=false", "--offline=true", "--exclude_tables=unrelated1", "--write_query_max_rows=1", "--write_query_max_size=1024", "--min_healthy_rdonly_tablets=1", "test_keyspace/0"},
fake.RegisterResult([]string{"SplitClone", "--online=false", "--offline=true", "--exclude_tables=unrelated1", "--chunk_count=2", "--min_rows_per_chunk=4", "--write_query_max_rows=1", "--write_query_max_size=1024", "--min_healthy_rdonly_tablets=1", "test_keyspace/0"},
"", // No output.
nil) // No error.
@ -29,6 +29,8 @@ func TestSplitCloneTask(t *testing.T) {
"online": "false",
"offline": "true",
"exclude_tables": "unrelated1",
"chunk_count": "2",
"min_rows_per_chunk": "4",
"write_query_max_rows": "1",
"write_query_max_size": "1024",
"min_healthy_rdonly_tablets": "1",

Просмотреть файл

@ -22,8 +22,23 @@ func (t *VerticalSplitCloneTask) Run(parameters map[string]string) ([]*automatio
// '--destination_writer_count', '1',
args := []string{"VerticalSplitClone"}
args = append(args, "--tables="+parameters["tables"])
if destinationPackCount := parameters["destination_pack_count"]; destinationPackCount != "" {
args = append(args, "--destination_pack_count="+destinationPackCount)
if online := parameters["online"]; online != "" {
args = append(args, "--online="+online)
}
if offline := parameters["offline"]; offline != "" {
args = append(args, "--offline="+offline)
}
if chunkCount := parameters["chunk_count"]; chunkCount != "" {
args = append(args, "--chunk_count="+chunkCount)
}
if minRowsPerChunk := parameters["min_rows_per_chunk"]; minRowsPerChunk != "" {
args = append(args, "--min_rows_per_chunk="+minRowsPerChunk)
}
if writeQueryMaxRows := parameters["write_query_max_rows"]; writeQueryMaxRows != "" {
args = append(args, "--write_query_max_rows="+writeQueryMaxRows)
}
if writeQueryMaxSize := parameters["write_query_max_size"]; writeQueryMaxSize != "" {
args = append(args, "--write_query_max_size="+writeQueryMaxSize)
}
if minHealthyRdonlyTablets := parameters["min_healthy_rdonly_tablets"]; minHealthyRdonlyTablets != "" {
args = append(args, "--min_healthy_rdonly_tablets="+minHealthyRdonlyTablets)
@ -46,5 +61,5 @@ func (t *VerticalSplitCloneTask) RequiredParameters() []string {
// OptionalParameters is part of the Task interface.
func (t *VerticalSplitCloneTask) OptionalParameters() []string {
return []string{"destination_pack_count", "min_healthy_rdonly_tablets"}
return []string{"online", "offline", "chunk_count", "min_rows_per_chunk", "write_query_max_rows", "write_query_max_size", "min_healthy_rdonly_tablets"}
}

Просмотреть файл

@ -0,0 +1,51 @@
// Copyright 2016, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package automation
import (
"flag"
"testing"
"github.com/youtube/vitess/go/vt/worker/fakevtworkerclient"
"github.com/youtube/vitess/go/vt/worker/vtworkerclient"
)
func TestVerticalSplitCloneTask(t *testing.T) {
fake := fakevtworkerclient.NewFakeVtworkerClient()
vtworkerclient.RegisterFactory("fake", fake.FakeVtworkerClientFactory)
defer vtworkerclient.UnregisterFactoryForTest("fake")
flag.Set("vtworker_client_protocol", "fake")
fake.RegisterResult([]string{"VerticalSplitClone", "--tables=moving1", "--online=false", "--offline=true", "--chunk_count=2", "--min_rows_per_chunk=4", "--write_query_max_rows=1", "--write_query_max_size=1024", "--min_healthy_rdonly_tablets=1", "dest_keyspace/0"},
"", // No output.
nil) // No error.
task := &VerticalSplitCloneTask{}
parameters := map[string]string{
"dest_keyspace": "dest_keyspace",
"shard": "0",
"tables": "moving1",
"vtworker_endpoint": "localhost:15001",
"online": "false",
"offline": "true",
"chunk_count": "2",
"min_rows_per_chunk": "4",
"write_query_max_rows": "1",
"write_query_max_size": "1024",
"min_healthy_rdonly_tablets": "1",
}
err := validateParameters(task, parameters)
if err != nil {
t.Fatalf("Not all required parameters were specified: %v", err)
}
newTasks, _ /* output */, err := task.Run(parameters)
if newTasks != nil {
t.Errorf("Task should not emit new tasks: %v", newTasks)
}
if err != nil {
t.Errorf("Task should not fail: %v", err)
}
}

76
go/vt/etcdtopo/convert.go Normal file
Просмотреть файл

@ -0,0 +1,76 @@
package etcdtopo
import (
"encoding/json"
"strings"
"github.com/golang/protobuf/proto"
topodatapb "github.com/youtube/vitess/go/vt/proto/topodata"
vschemapb "github.com/youtube/vitess/go/vt/proto/vschema"
)
// This file contains utility functions to maintain backward
// compatibility with old-style non-Backend etcd topologies. The old
// implementations (before 2016-08-17) used to deal with explicit data
// types. We converted them to a generic []byte and path
// interface. But the etcd implementation was not compatible with
// this.
// dataType is an enum for possible known data types, used for
// backward compatibility.
type dataType int
// Constants for type conversion
const (
// newType is used to indicate a topology object type of
// anything that is added after the topo.Backend refactor,
// i.e. anything that doesn't require conversion between old
// style topologies and the new style ones. The list of enum
// values after this contain all types that exist at the
// moment (2016-08-17) and doesn't need to be expanded when
// something new is saved in the topology because it will be
// saved in the new style, not in the old one.
newType dataType = iota
srvKeyspaceType
srvVSchemaType
)
// rawDataFromNodeValue convert the data of the given type into an []byte.
// It is mindful of the backward compatibility, i.e. for newer objects
// it doesn't do anything, but for old object types that were stored in JSON
// format in converts them to proto3 binary encoding.
func rawDataFromNodeValue(valueType dataType, value string) ([]byte, error) {
var p proto.Message
switch valueType {
case srvKeyspaceType:
p = &topodatapb.SrvKeyspace{}
case srvVSchemaType:
p = &vschemapb.SrvVSchema{}
default:
return []byte(value), nil
}
if err := json.Unmarshal([]byte(value), p); err != nil {
return nil, err
}
return proto.Marshal(p)
}
// oldTypeAndFilePath returns the data type and old file path for a given path.
func oldTypeAndFilePath(filePath string) (dataType, string) {
parts := strings.Split(filePath, "/")
// SrvKeyspace: local cell, /keyspaces/<keyspace>/SrvKeyspace
if len(parts) == 4 && parts[0] == "" && parts[1] == "keyspaces" && parts[3] == "SrvKeyspace" {
return srvKeyspaceType, srvKeyspaceFilePath(parts[2])
}
// SrvVSchema: local cell, /SrvVSchema
if len(parts) == 2 && parts[1] == "SrvVSchema" {
return srvVSchemaType, srvVSchemaFilePath()
}
return newType, filePath
}

19
go/vt/etcdtopo/version.go Normal file
Просмотреть файл

@ -0,0 +1,19 @@
package etcdtopo
import (
"fmt"
"github.com/youtube/vitess/go/vt/topo"
)
// EtcdVersion is etcd's idea of a version.
// It implements topo.Version.
// We use the native etcd version type, uint64.
type EtcdVersion uint64
// String is part of the topo.Version interface.
func (v EtcdVersion) String() string {
return fmt.Sprintf("%v", uint64(v))
}
var _ topo.Version = (EtcdVersion)(0) // compile-time interface check

116
go/vt/etcdtopo/watch.go Normal file
Просмотреть файл

@ -0,0 +1,116 @@
package etcdtopo
import (
"fmt"
"github.com/coreos/go-etcd/etcd"
"golang.org/x/net/context"
"github.com/youtube/vitess/go/vt/topo"
)
func newWatchData(valueType dataType, node *etcd.Node) *topo.WatchData {
bytes, err := rawDataFromNodeValue(valueType, node.Value)
if err != nil {
return &topo.WatchData{Err: err}
}
return &topo.WatchData{
Contents: bytes,
Version: EtcdVersion(node.ModifiedIndex),
}
}
// Watch is part of the topo.Backend interface
func (s *Server) Watch(ctx context.Context, cellName string, filePath string) (current *topo.WatchData, changes <-chan *topo.WatchData) {
cell, err := s.getCell(cellName)
if err != nil {
return &topo.WatchData{Err: fmt.Errorf("Watch cannot get cell: %v", err)}, nil
}
// Special paths where we need to be backward compatible.
var valueType dataType
valueType, filePath = oldTypeAndFilePath(filePath)
// Get the initial version of the file
initial, err := cell.Get(filePath, false /* sort */, false /* recursive */)
if err != nil {
// generic error
return &topo.WatchData{Err: convertError(err)}, nil
}
if initial.Node == nil {
// node doesn't exist
return &topo.WatchData{Err: topo.ErrNoNode}, nil
}
wd := newWatchData(valueType, initial.Node)
if wd.Err != nil {
return wd, nil
}
notifications := make(chan *topo.WatchData, 10)
// This watch go routine will stop if the 'stop' channel is closed.
// Otherwise it will try to watch everything in a loop, and send events
// to the 'watch' channel.
// In any case, the Watch call will close the 'watch' channel.
watchChannel := make(chan *etcd.Response)
stop := make(chan bool)
watchError := make(chan error)
go func() {
versionToWatch := initial.Node.ModifiedIndex + 1
if _, err := cell.Client.Watch(filePath, versionToWatch, false /* recursive */, watchChannel, stop); err != etcd.ErrWatchStoppedByUser {
// We didn't stop this watch, it errored out.
// In this case, watch was closed already, we just
// have to save the error.
// Note err can never be nil, as we only return when
// the watch is interrupted or broken.
watchError <- err
close(watchError)
}
}()
// This go routine is the main event handling routine:
// - it will stop if ctx.Done() is closed.
// - if it receives a notification from the watch, it will forward it
// to the notifications channel.
go func() {
for {
select {
case resp, ok := <-watchChannel:
if !ok {
// Watch terminated, because of an error
err := <-watchError
notifications <- &topo.WatchData{Err: err}
close(notifications)
return
}
if resp.Node == nil {
// Node doesn't exist any more, we can
// stop watching.
close(stop)
notifications <- &topo.WatchData{Err: topo.ErrNoNode}
close(notifications)
return
}
wd := newWatchData(valueType, resp.Node)
notifications <- wd
if wd.Err != nil {
// Error packing / unpacking data,
// stop the watch.
close(stop)
close(notifications)
return
}
case <-ctx.Done():
close(stop)
notifications <- &topo.WatchData{Err: ctx.Err()}
close(notifications)
return
}
}
}()
return wd, notifications
}

Просмотреть файл

@ -621,7 +621,7 @@ func Restore(
}
if toRestore < 0 {
logger.Errorf("No backup to restore on BackupStorage for directory %v. Starting up empty.", dir)
if err = populateLocalMetadata(mysqld, localMetadata); err == nil {
if err = populateMetadataTables(mysqld, localMetadata); err == nil {
err = ErrNoBackup
}
return replication.Position{}, err
@ -635,7 +635,7 @@ func Restore(
}
if !ok {
logger.Infof("Auto-restore is enabled, but mysqld already contains data. Assuming vttablet was just restarted.")
if err = populateLocalMetadata(mysqld, localMetadata); err == nil {
if err = populateMetadataTables(mysqld, localMetadata); err == nil {
err = ErrExistingDB
}
return replication.Position{}, err
@ -685,7 +685,7 @@ func Restore(
// Populate local_metadata before starting without --skip-networking,
// so it's there before we start announcing ourselves.
logger.Infof("Restore: populating local_metadata")
err = populateLocalMetadata(mysqld, localMetadata)
err = populateMetadataTables(mysqld, localMetadata)
if err != nil {
return replication.Position{}, err
}

Просмотреть файл

@ -12,18 +12,30 @@ import (
"github.com/youtube/vitess/go/sqltypes"
)
// Note that definitions of local_metadata and shard_metadata should be the same
// as in testing which is defined in config/init_db.sql.
const sqlCreateLocalMetadataTable = `CREATE TABLE IF NOT EXISTS _vt.local_metadata (
name VARCHAR(255) NOT NULL,
value VARCHAR(255) NOT NULL,
PRIMARY KEY (name)
) ENGINE=InnoDB`
const sqlCreateShardMetadataTable = `CREATE TABLE IF NOT EXISTS _vt.shard_metadata (
name VARCHAR(255) NOT NULL,
value MEDIUMBLOB NOT NULL,
PRIMARY KEY (name)
) ENGINE=InnoDB`
// populateLocalMetadata creates and fills the _vt.local_metadata table,
// which is a per-tablet table that is never replicated. This allows queries
// populateMetadataTables creates and fills the _vt.local_metadata table and
// creates _vt.shard_metadata table. _vt.local_metadata table is
// a per-tablet table that is never replicated. This allows queries
// against local_metadata to return different values on different tablets,
// which is used for communicating between Vitess and MySQL-level tools like
// Orchestrator (http://github.com/outbrain/orchestrator).
func populateLocalMetadata(mysqld MysqlDaemon, localMetadata map[string]string) error {
// _vt.shard_metadata is a replicated table with per-shard information, but it's
// created here to make it easier to create it on databases that were running
// old version of Vitess, or databases that are getting converted to run under
// Vitess.
func populateMetadataTables(mysqld MysqlDaemon, localMetadata map[string]string) error {
log.Infof("Populating _vt.local_metadata table...")
// Get a non-pooled DBA connection.
@ -46,7 +58,11 @@ func populateLocalMetadata(mysqld MysqlDaemon, localMetadata map[string]string)
if _, err := conn.ExecuteFetch(sqlCreateLocalMetadataTable, 0, false); err != nil {
return err
}
if _, err := conn.ExecuteFetch(sqlCreateShardMetadataTable, 0, false); err != nil {
return err
}
// Populate local_metadata from the passed list of values.
if _, err := conn.ExecuteFetch("BEGIN", 0, false); err != nil {
return err
}

102
go/vt/topo/backend.go Normal file
Просмотреть файл

@ -0,0 +1,102 @@
package topo
import "golang.org/x/net/context"
// Backend defines the interface that must be implemented by topology
// plug-ins to be used with Vitess.
//
// Zookeeper is a good example of an implementation, as defined in
// go/vt/zktopo.
//
// This API is very generic, and file oriented.
//
// FIXME(alainjobart) add all parts of the API, implement them all for
// all our current systems, and convert the higher levels to talk to
// this API. This is a long-term project.
type Backend interface {
// Directory support: NYI
// MkDir(ctx context.Context, cell string, path string) error
// RmDir(ctx context.Context, cell string, path string) error
// ListDir(ctx context.Context, cell string, path string) ([]string, error)
// File support: NYI
// if version == nil, then its an unconditional update / delete.
// Create(ctx context.Context, cell string, path string, contents []byte) error
// Update(ctx context.Context, cell string, path string, contents []byte, version Version) (Version, error)
// Get(ctx context.Context, cell string, path string) ([]byte, Version, error)
// Delete(ctx context.Context, cell string, path string, version Version)
// Locks: NYI
// Lock(ctx context.Context, cell string, dirPath string) (LockDescriptor, error)
// Unlock(ctx context.Context, descriptor LockDescriptor) error
// Watch starts watching a file in the provided cell. It
// returns the current value, as well as a channel to read the
// changes from. If the initial read fails, or the file
// doesn't exist, current.Err is set, and 'changes' is nil.
// Otherwise current.Err is nil, and current.Contents /
// current.Version are accurate.
//
// The 'changes' channel may return a record with Err != nil.
// In that case, the channel will also be closed right after
// that record. In any case, 'changes' has to be drained of
// all events, even when the Context is canceled.
//
// Note the 'changes' channel can return twice the same
// Version/Contents (for instance, if the watch is interrupted
// and restarted within the Backend implementation).
// Similarly, the 'changes' channel may skip versions / changes
// (that is, if value goes [A, B, C, D, E, F], the watch may only
// receive [A, B, F]). This should only happen for rapidly
// changing values though. Usually, the initial value will come
// back right away. And a stable value (that hasn't changed for
// a while) should be seen shortly.
//
// The Watch call is not guaranteed to return exactly up to
// date data right away. For instance, if a file is created
// and saved, and then a watch is set on that file, it may
// return ErrNoNode (as the underlying configuration service
// may use asynchronous caches that are not up to date
// yet). The only guarantee is that the watch data will
// eventually converge. Vitess doesn't explicitly depend on the data
// being correct quickly, as long as it eventually gets there.
//
// To stop the watch, just cancel the context.
Watch(ctx context.Context, cell string, path string) (current *WatchData, changes <-chan *WatchData)
}
// Version is an interface that describes a file version.
type Version interface {
// String returns a text representation of the version.
String() string
}
// LockDescriptor is an interface that describes a lock.
type LockDescriptor interface {
// String returns a text representation of the lock.
String() string
}
// WatchData is the structure returned by the Watch() API.
// It can contain:
// a) an error in Err if the call failed (or if the watch was terminated).
// b) the current or new version of the data.
type WatchData struct {
// Contents has the bytes that were stored by Create
// or Update.
Contents []byte
// Version contains an opaque representation of the Version
// of that file.
Version Version
// Err is set the same way for both the 'current' value
// returned by Watch, or the values read on the 'changes'
// channel. It can be:
// - nil, then Contents and Version are set.
// - ErrNoNode if the file doesn't exist.
// - context.Err() if context.Done() is closed (then the value
// will be context.DeadlineExceeded or context.Interrupted).
// - any other platform-specific error.
Err error
}

Просмотреть файл

@ -89,6 +89,15 @@ func (tee *Tee) Close() {
tee.secondary.Close()
}
//
// Backend API
//
// Watch is part of the topo.Backend interface
func (tee *Tee) Watch(ctx context.Context, cell string, path string) (current *topo.WatchData, changes <-chan *topo.WatchData) {
return tee.primary.Watch(ctx, cell, path)
}
//
// Cell management, global
//

Просмотреть файл

@ -57,7 +57,14 @@ var (
// implementation for this using zookeeper.
//
// Inside Google, we use Chubby.
//
// FIXME(alainjobart) we are deprecating this interface, to be
// replaced with a lower level interface defined by Backend.
type Impl interface {
// Impl will eventually be entirely replaced with Backend, and
// just disappear.
Backend
// topo.Server management interface.
Close()

Просмотреть файл

@ -20,6 +20,13 @@ type FakeTopo struct{}
// Close is part of the topo.Server interface.
func (ft FakeTopo) Close() {}
// Watch is part of the topo.Backend interface.
func (ft FakeTopo) Watch(ctx context.Context, cell string, path string) (current *topo.WatchData, changes <-chan *topo.WatchData) {
return &topo.WatchData{
Err: errNotImplemented,
}, nil
}
// GetKnownCells is part of the topo.Server interface.
func (ft FakeTopo) GetKnownCells(ctx context.Context) ([]string, error) {
return nil, errNotImplemented

Просмотреть файл

@ -97,4 +97,10 @@ func TopoServerTestSuite(t *testing.T, factory func() topo.Impl) {
ts = factory()
checkElection(t, ts)
ts.Close()
t.Log("=== checkWatch")
ts = factory()
checkWatch(t, ts)
checkWatchInterrupt(t, ts)
ts.Close()
}

191
go/vt/topo/test/watch.go Normal file
Просмотреть файл

@ -0,0 +1,191 @@
package test
import (
"testing"
"time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"github.com/youtube/vitess/go/vt/topo"
topodatapb "github.com/youtube/vitess/go/vt/proto/topodata"
)
// waitForInitialValue waits for the initial value of
// /keyspaces/test_keyspace/SrvKeyspace to appear, and match the
// provided srvKeyspace.
func waitForInitialValue(ctx context.Context, t *testing.T, ts topo.Impl, cell string, srvKeyspace *topodatapb.SrvKeyspace) <-chan *topo.WatchData {
var current *topo.WatchData
var changes <-chan *topo.WatchData
start := time.Now()
for {
current, changes = ts.Watch(ctx, cell, "/keyspaces/test_keyspace/SrvKeyspace")
if current.Err == topo.ErrNoNode {
// hasn't appeared yet
if time.Now().Sub(start) > 10*time.Second {
t.Fatalf("time out waiting for file to appear")
}
time.Sleep(10 * time.Millisecond)
continue
}
if current.Err != nil {
t.Fatalf("watch failed: %v", current.Err)
}
// we got a valid result
break
}
got := &topodatapb.SrvKeyspace{}
if err := proto.Unmarshal(current.Contents, got); err != nil {
t.Fatalf("cannot proto-unmarshal data: %v", err)
}
if !proto.Equal(got, srvKeyspace) {
t.Fatalf("got bad data: %v expected: %v", got, srvKeyspace)
}
return changes
}
// checkWatch runs the tests on the Watch part of the Backend API.
// We can't just use the full API yet, so use SrvKeyspace for now.
func checkWatch(t *testing.T, ts topo.Impl) {
ctx := context.Background()
cell := getLocalCell(ctx, t, ts)
// start watching something that doesn't exist -> error
current, changes := ts.Watch(ctx, cell, "/keyspaces/test_keyspace/SrvKeyspace")
if current.Err != topo.ErrNoNode {
t.Errorf("watch on missing node didn't return ErrNoNode: %v %v", current, changes)
}
// create some data
srvKeyspace := &topodatapb.SrvKeyspace{
ShardingColumnName: "user_id",
}
if err := ts.UpdateSrvKeyspace(ctx, cell, "test_keyspace", srvKeyspace); err != nil {
t.Fatalf("UpdateSrvKeyspace(1): %v", err)
}
// start watching again, it should work
changes = waitForInitialValue(ctx, t, ts, cell, srvKeyspace)
// change the data
srvKeyspace.ShardingColumnName = "new_user_id"
if err := ts.UpdateSrvKeyspace(ctx, cell, "test_keyspace", srvKeyspace); err != nil {
t.Fatalf("UpdateSrvKeyspace(2): %v", err)
}
// Make sure we get the watch data, maybe not as first notice,
// but eventually. The API specifies it is possible to get duplicate
// notifications.
for {
wd, ok := <-changes
if !ok {
t.Fatalf("watch channel unexpectedly closed")
}
if wd.Err != nil {
t.Fatalf("watch interrupted: %v", wd.Err)
}
got := &topodatapb.SrvKeyspace{}
if err := proto.Unmarshal(wd.Contents, got); err != nil {
t.Fatalf("cannot proto-unmarshal data: %v", err)
}
if got.ShardingColumnName == "user_id" {
// extra first value, still good
continue
}
if got.ShardingColumnName == "new_user_id" {
// watch worked, good
break
}
t.Fatalf("got unknown SrvKeyspace: %v", got)
}
// remove the SrvKeyspace
if err := ts.DeleteSrvKeyspace(ctx, cell, "test_keyspace"); err != nil {
t.Fatalf("DeleteSrvKeyspace: %v", err)
}
// Make sure we get the ErrNoNode notification eventually.
// The API specifies it is possible to get duplicate
// notifications.
for {
wd, ok := <-changes
if !ok {
t.Fatalf("watch channel unexpectedly closed")
}
if wd.Err == topo.ErrNoNode {
// good
break
}
if wd.Err != nil {
t.Fatalf("bad error returned for deletion: %v", wd.Err)
}
// we got something, better be the right value
got := &topodatapb.SrvKeyspace{}
if err := proto.Unmarshal(wd.Contents, got); err != nil {
t.Fatalf("cannot proto-unmarshal data: %v", err)
}
if got.ShardingColumnName == "new_user_id" {
// good value
continue
}
t.Fatalf("got unknown SrvKeyspace waiting for deletion: %v", got)
}
// now the channel should be closed
if wd, ok := <-changes; ok {
t.Fatalf("got unexpected event after error: %v", wd)
}
}
// checkWatchInterrupt tests we can interrupt a watch.
func checkWatchInterrupt(t *testing.T, ts topo.Impl) {
ctx, cancel := context.WithCancel(context.Background())
cell := getLocalCell(ctx, t, ts)
// create some data
srvKeyspace := &topodatapb.SrvKeyspace{
ShardingColumnName: "user_id",
}
if err := ts.UpdateSrvKeyspace(ctx, cell, "test_keyspace", srvKeyspace); err != nil {
t.Fatalf("UpdateSrvKeyspace(1): %v", err)
}
// start watching, it should work
changes := waitForInitialValue(ctx, t, ts, cell, srvKeyspace)
// Now close the context, it should close the watch.
cancel()
// Make sure we get the context.Canceled notification eventually.
for {
wd, ok := <-changes
if !ok {
t.Fatalf("watch channel unexpectedly closed")
}
if wd.Err == context.Canceled {
// good
break
}
if wd.Err != nil {
t.Fatalf("bad error returned for deletion: %v", wd.Err)
}
// we got something, better be the right value
got := &topodatapb.SrvKeyspace{}
if err := proto.Unmarshal(wd.Contents, got); err != nil {
t.Fatalf("cannot proto-unmarshal data: %v", err)
}
if got.ShardingColumnName == "user_id" {
// good value
continue
}
t.Fatalf("got unknown SrvKeyspace waiting for deletion: %v", got)
}
// now the channel should be closed
if wd, ok := <-changes; ok {
t.Fatalf("got unexpected event after error: %v", wd)
}
}

Просмотреть файл

@ -15,12 +15,14 @@ import (
"golang.org/x/net/context"
"github.com/youtube/vitess/go/vt/logutil"
logutilpb "github.com/youtube/vitess/go/vt/proto/logutil"
"github.com/youtube/vitess/go/vt/schemamanager"
"github.com/youtube/vitess/go/vt/tabletmanager/tmclient"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
"github.com/youtube/vitess/go/vt/wrangler"
logutilpb "github.com/youtube/vitess/go/vt/proto/logutil"
topodatapb "github.com/youtube/vitess/go/vt/proto/topodata"
)
var (
@ -283,7 +285,7 @@ func initAPI(ctx context.Context, ts topo.Server, actions *ActionRepository, rea
return ts.GetTablet(ctx, tabletAlias)
})
// Healthcheck real time status per (cell, keyspace, shard, tablet type).
// Healthcheck real time status per (cell, keyspace, tablet type, metric).
handleCollection("tablet_statuses", func(r *http.Request) (interface{}, error) {
targetPath := getItemPath(r.URL.Path)
@ -309,13 +311,43 @@ func initAPI(ctx context.Context, ts topo.Server, actions *ActionRepository, rea
if err != nil {
return nil, fmt.Errorf("couldn't get heatmap data: %v", err)
}
return heatmap, nil
}
return nil, fmt.Errorf("invalid target path: %q expected path: ?keyspace=<keyspace>&cell=<cell>&type=<type>&metric=<metric>", targetPath)
})
handleCollection("tablet_health", func(r *http.Request) (interface{}, error) {
tabletPath := getItemPath(r.URL.Path)
parts := strings.SplitN(tabletPath, "/", 2)
// Request was incorrectly formatted.
if len(parts) != 2 {
return nil, fmt.Errorf("invalid tablet_health path: %q expected path: /tablet_health/<cell>/<uid>", tabletPath)
}
if realtimeStats == nil {
return nil, fmt.Errorf("realtimeStats not initialized")
}
cell := parts[0]
uidStr := parts[1]
uid, err := topoproto.ParseUID(uidStr)
if err != nil {
return nil, fmt.Errorf("incorrect uid: %v", err)
}
tabletAlias := topodatapb.TabletAlias{
Cell: cell,
Uid: uid,
}
tabletStat, err := realtimeStats.tabletStats(&tabletAlias)
if err != nil {
return nil, fmt.Errorf("could not get tabletStats: %v", err)
}
return tabletStat, nil
})
// Schema Change
http.HandleFunc(apiPrefix+"schema/apply", func(w http.ResponseWriter, r *http.Request) {
req := struct {

Просмотреть файл

@ -146,7 +146,7 @@ func TestAPI(t *testing.T) {
"Error": false
}`},
//Tablet Updates
// Tablet Updates
{"GET", "tablet_statuses/?keyspace=ks1&cell=cell1&type=REPLICA&metric=lag", `
{"Labels":[{"Label":{"Name":"cell1","Rowspan":2},"NestedLabels":[{"Name":"REPLICA","Rowspan":1},{"Name":"RDONLY","Rowspan":1}]},
{"Label":{"Name":"cell2","Rowspan":2},"NestedLabels":[{"Name":"REPLICA","Rowspan":1},{"Name":"RDONLY","Rowspan":1}]}],
@ -155,8 +155,14 @@ func TestAPI(t *testing.T) {
`},
{"GET", "tablet_statuses/cell1/REPLICA/lag", "can't get tablet_statuses: invalid target path: \"cell1/REPLICA/lag\" expected path: ?keyspace=<keyspace>&cell=<cell>&type=<type>&metric=<metric>"},
{"GET", "tablet_statuses/?keyspace=ks1&cell=cell1&type=hello&metric=lag", "can't get tablet_statuses: invalid tablet type: hello"},
}
// Tablet Health
{"GET", "tablet_health/cell1/100", `{ "Key": "", "Tablet": { "alias": { "cell": "cell1", "uid": 100 },"port_map": { "vt": 100 }, "keyspace": "ks1", "shard": "-80", "type": 2},
"Name": "", "Target": { "keyspace": "ks1", "shard": "-80", "tablet_type": 2 }, "Up": true, "Serving": true, "TabletExternallyReparentedTimestamp": 0,
"Stats": { "seconds_behind_master": 100 }, "LastError": null }`},
{"GET", "tablet_health/cell1", "can't get tablet_health: invalid tablet_health path: \"cell1\" expected path: /tablet_health/<cell>/<uid>"},
{"GET", "tablet_health/cell1/gh", "can't get tablet_health: incorrect uid: bad tablet uid strconv.ParseUint: parsing \"gh\": invalid syntax"},
}
for _, in := range table {
var resp *http.Response
var err error
@ -187,7 +193,7 @@ func TestAPI(t *testing.T) {
got := compactJSON(body)
want := compactJSON([]byte(in.want))
if want == "" {
// want is no valid JSON. Fallback to a string comparison.
// want is not valid JSON. Fallback to a string comparison.
want = in.want
// For unknown reasons errors have a trailing "\n\t\t". Remove it.
got = strings.TrimSpace(string(body))

Просмотреть файл

@ -104,7 +104,10 @@ func TestRealtimeStatsWithQueryService(t *testing.T) {
func checkStats(realtimeStats *realtimeStats, tablet *testlib.FakeTablet, want *querypb.RealtimeStats) error {
deadline := time.Now().Add(time.Second * 5)
for time.Now().Before(deadline) {
result := realtimeStats.tabletStatsByAlias(tablet.Tablet.Alias)
result, err := realtimeStats.tabletStats(tablet.Tablet.Alias)
if err != nil {
continue
}
if reflect.DeepEqual(result, discovery.TabletStats{}) {
continue
}

Просмотреть файл

@ -254,15 +254,15 @@ func (c *tabletStatsCache) heatmapData(keyspace, cell, tabletType, metric string
}, nil
}
func (c *tabletStatsCache) tabletStatsByAlias(tabletAlias *topodatapb.TabletAlias) discovery.TabletStats {
func (c *tabletStatsCache) tabletStats(tabletAlias *topodatapb.TabletAlias) (discovery.TabletStats, error) {
c.mu.Lock()
defer c.mu.Unlock()
ts, ok := c.statusesByAlias[tabletAlias.String()]
if !ok {
return discovery.TabletStats{}
return discovery.TabletStats{}, fmt.Errorf("could not find tablet: %v", tabletAlias)
}
return *ts
return *ts, nil
}
func replicationLag(stat *discovery.TabletStats) float64 {

Просмотреть файл

@ -168,6 +168,37 @@ func TestHeatmapData(t *testing.T) {
}
}
func TestTabletStats(t *testing.T) {
// Creating tabletStats.
ts1 := tabletStats("cell1", "ks1", "-80", topodatapb.TabletType_MASTER, 200)
ts2 := tabletStats("cell1", "ks1", "-80", topodatapb.TabletType_REPLICA, 100)
ts3 := tabletStats("cell1", "ks1", "-80", topodatapb.TabletType_REPLICA, 300)
tabletStatsCache := newTabletStatsCache()
tabletStatsCache.StatsUpdate(ts1)
tabletStatsCache.StatsUpdate(ts2)
// Test 1: tablet1 and tablet2 are updated with the stats received by the HealthCheck module.
got1, err := tabletStatsCache.tabletStats(ts1.Tablet.Alias)
want1 := *ts1
if err != nil || !reflect.DeepEqual(got1, want1) {
t.Errorf("got: %v, want: %v", got1, want1)
}
got2, err := tabletStatsCache.tabletStats(ts2.Tablet.Alias)
want2 := *ts2
if err != nil || !reflect.DeepEqual(got2, want2) {
t.Errorf("got: %v, want: %v", got2, want2)
}
// Test 2: tablet3 isn't found in the map since no update was received for it.
_, gotErr := tabletStatsCache.tabletStats(ts3.Tablet.Alias)
wantErr := "could not find tablet: cell:\"cell1\" uid:300 "
if gotErr.Error() != wantErr {
t.Errorf("got: %v, want: %v", gotErr.Error(), wantErr)
}
}
// tabletStats will create a discovery.TabletStats object.
func tabletStats(cell, keyspace, shard string, tabletType topodatapb.TabletType, uid uint32) *discovery.TabletStats {
target := &querypb.Target{

Просмотреть файл

@ -184,8 +184,15 @@ func InitVtctld(ts topo.Server) {
rest = "index.html"
}
filePath := path.Join(*webDir2, rest)
// If the requested file doesn't exist, serve index.html.
if _, err := os.Stat(filePath); err != nil {
// The requested file doesn't exist.
if strings.ContainsAny(rest, "/.") {
// This looks like a real file path, so return Not Found.
http.NotFound(w, r)
return
}
// It looks like a virtual route path (for pages within the app).
// For these, we must serve index.html to initialize the app.
filePath = path.Join(*webDir2, "index.html")
}
http.ServeFile(w, r, filePath)

Просмотреть файл

@ -14,10 +14,11 @@ import (
"strconv"
)
// Stores the mapping of keys
// NumericLookupTable stores the mapping of keys.
type NumericLookupTable map[uint64]uint64
// Similar to vindex Numeric but first attempts a lookup via a json file
// NumericStaticMap is similar to vindex Numeric but first attempts a lookup via
// a JSON file.
type NumericStaticMap struct {
name string
lookup NumericLookupTable

Просмотреть файл

@ -5,27 +5,36 @@
package vindexes
import (
"github.com/youtube/vitess/go/sqltypes"
"github.com/youtube/vitess/go/testfiles"
"reflect"
"testing"
"github.com/youtube/vitess/go/sqltypes"
"github.com/youtube/vitess/go/testfiles"
)
var numericStaticMap Vindex
func init() {
func createVindex() (Vindex, error) {
m := make(map[string]string)
m["json_path"] = testfiles.Locate("vtgate/numeric_static_map_test.json")
numericStaticMap, _ = CreateVindex("numeric_static_map", "numericStaticMap", m)
return CreateVindex("numeric_static_map", "numericStaticMap", m)
}
func TestNumericStaticMapCost(t *testing.T) {
numericStaticMap, err := createVindex()
if err != nil {
t.Fatalf("failed to create vindex: %v", err)
}
if numericStaticMap.Cost() != 1 {
t.Errorf("Cost(): %d, want 1", numericStaticMap.Cost())
}
}
func TestNumericStaticMapMap(t *testing.T) {
numericStaticMap, err := createVindex()
if err != nil {
t.Fatalf("failed to create vindex: %v", err)
}
sqlVal, _ := sqltypes.BuildIntegral("8")
got, err := numericStaticMap.(Unique).Map(nil, []interface{}{
1,
@ -59,7 +68,12 @@ func TestNumericStaticMapMap(t *testing.T) {
}
func TestNumericStaticMapMapBadData(t *testing.T) {
_, err := numericStaticMap.(Unique).Map(nil, []interface{}{1.1})
numericStaticMap, err := createVindex()
if err != nil {
t.Fatalf("failed to create vindex: %v", err)
}
_, err = numericStaticMap.(Unique).Map(nil, []interface{}{1.1})
want := `NumericStaticMap.Map: unexpected type for 1.1: float64`
if err == nil || err.Error() != want {
t.Errorf("NumericStaticMap.Map: %v, want %v", err, want)
@ -67,6 +81,11 @@ func TestNumericStaticMapMapBadData(t *testing.T) {
}
func TestNumericStaticMapVerify(t *testing.T) {
numericStaticMap, err := createVindex()
if err != nil {
t.Fatalf("failed to create vindex: %v", err)
}
success, err := numericStaticMap.Verify(nil, 1, []byte("\x00\x00\x00\x00\x00\x00\x00\x01"))
if err != nil {
t.Error(err)
@ -77,7 +96,12 @@ func TestNumericStaticMapVerify(t *testing.T) {
}
func TestNumericStaticMapVerifyBadData(t *testing.T) {
_, err := numericStaticMap.Verify(nil, 1.1, []byte("\x00\x00\x00\x00\x00\x00\x00\x01"))
numericStaticMap, err := createVindex()
if err != nil {
t.Fatalf("failed to create vindex: %v", err)
}
_, err = numericStaticMap.Verify(nil, 1.1, []byte("\x00\x00\x00\x00\x00\x00\x00\x01"))
want := `NumericStaticMap.Verify: unexpected type for 1.1: float64`
if err == nil || err.Error() != want {
t.Errorf("numericStaticMap.Map: %v, want %v", err, want)

Просмотреть файл

@ -14,7 +14,7 @@ import (
)
var (
completeChunk = chunk{sqltypes.NULL, sqltypes.NULL}
completeChunk = chunk{sqltypes.NULL, sqltypes.NULL, 1, 1}
singleCompleteChunk = []chunk{completeChunk}
)
@ -25,23 +25,47 @@ var (
type chunk struct {
start sqltypes.Value
end sqltypes.Value
// number records the position of this chunk among all "total" chunks.
// The lowest value is 1.
number int
// total is the total number of chunks this chunk belongs to.
total int
}
// String returns a human-readable presentation of the chunk range.
func (c chunk) String() string {
return fmt.Sprintf("[%v,%v)", c.start, c.end)
// Pad the chunk number such that all log messages align nicely.
digits := digits(c.total)
return fmt.Sprintf("%*d/%d", digits, c.number, c.total)
}
func digits(i int) int {
digits := 1
for {
i /= 10
if i == 0 {
break
}
digits++
}
return digits
}
// generateChunks returns an array of chunks to use for splitting up a table
// into multiple data chunks. It only works for tables with a primary key
// whose first column is a numeric type.
func generateChunks(ctx context.Context, wr *wrangler.Wrangler, tablet *topodatapb.Tablet, td *tabletmanagerdatapb.TableDefinition, minTableSizeForSplit uint64, chunkCount int) ([]chunk, error) {
func generateChunks(ctx context.Context, wr *wrangler.Wrangler, tablet *topodatapb.Tablet, td *tabletmanagerdatapb.TableDefinition, chunkCount, minRowsPerChunk int) ([]chunk, error) {
if len(td.PrimaryKeyColumns) == 0 {
// No explicit primary key. Cannot chunk the rows then.
wr.Logger().Infof("Not splitting table %v into multiple chunks because it has no primary key columns. This will reduce the performance of the clone.", td.Name)
return singleCompleteChunk, nil
}
if td.DataLength < minTableSizeForSplit {
// Table is too small to split up.
if td.RowCount < 2*uint64(minRowsPerChunk) {
// The automatic adjustment of "chunkCount" based on "minRowsPerChunk"
// below would set "chunkCount" to less than 2 i.e. 1 or 0 chunks.
// In practice in this case there should be exactly one chunk.
// Return early in this case and notice the user about this.
wr.Logger().Infof("Not splitting table %v into multiple chunks because it has only %d rows.", td.Name, td.RowCount)
return singleCompleteChunk, nil
}
if chunkCount == 1 {
@ -69,68 +93,57 @@ func generateChunks(ctx context.Context, wr *wrangler.Wrangler, tablet *topodata
return singleCompleteChunk, nil
}
// Determine the average number of rows per chunk for the given chunkCount.
avgRowsPerChunk := td.RowCount / uint64(chunkCount)
if avgRowsPerChunk < uint64(minRowsPerChunk) {
// Reduce the chunkCount to fulfill minRowsPerChunk.
newChunkCount := td.RowCount / uint64(minRowsPerChunk)
wr.Logger().Infof("Reducing the number of chunks for table %v from the default %d to %d to make sure that each chunk has at least %d rows.", td.Name, chunkCount, newChunkCount, minRowsPerChunk)
chunkCount = int(newChunkCount)
}
// TODO(mberlin): Write a unit test for this part of the function.
var interval interface{}
chunks := make([]chunk, chunkCount)
switch min := min.(type) {
case int64:
max := max.(int64)
interval := (max - min) / int64(chunkCount)
interval = (max - min) / int64(chunkCount)
if interval == 0 {
wr.Logger().Infof("Not splitting table %v into multiple chunks, interval=0: %v to %v", td.Name, min, max)
return singleCompleteChunk, nil
}
start := min
for i := 0; i < chunkCount; i++ {
end := start + interval
chunk, err := toChunk(start, end)
if err != nil {
return nil, err
}
chunks[i] = chunk
start = end
}
case uint64:
max := max.(uint64)
interval := (max - min) / uint64(chunkCount)
interval = (max - min) / uint64(chunkCount)
if interval == 0 {
wr.Logger().Infof("Not splitting table %v into multiple chunks, interval=0: %v to %v", td.Name, min, max)
return singleCompleteChunk, nil
}
start := min
for i := 0; i < chunkCount; i++ {
end := start + interval
chunk, err := toChunk(start, end)
if err != nil {
return nil, err
}
chunks[i] = chunk
start = end
}
case float64:
max := max.(float64)
interval := (max - min) / float64(chunkCount)
interval = (max - min) / float64(chunkCount)
if interval == 0 {
wr.Logger().Infof("Not splitting table %v into multiple chunks, interval=0: %v to %v", td.Name, min, max)
return singleCompleteChunk, nil
}
start := min
for i := 0; i < chunkCount; i++ {
end := start + interval
chunk, err := toChunk(start, end)
if err != nil {
return nil, err
}
chunks[i] = chunk
start = end
}
default:
wr.Logger().Infof("Not splitting table %v into multiple chunks, primary key not numeric.", td.Name)
return singleCompleteChunk, nil
}
// Create chunks.
start := min
for i := 0; i < chunkCount; i++ {
end := add(start, interval)
chunk, err := toChunk(start, end, i+1, chunkCount)
if err != nil {
return nil, err
}
chunks[i] = chunk
start = end
}
// Clear out the MIN and MAX on the first and last chunk respectively
// because other shards might have smaller or higher values than the one we
// looked at.
@ -139,7 +152,20 @@ func generateChunks(ctx context.Context, wr *wrangler.Wrangler, tablet *topodata
return chunks, nil
}
func toChunk(start, end interface{}) (chunk, error) {
func add(start, interval interface{}) interface{} {
switch start := start.(type) {
case int64:
return start + interval.(int64)
case uint64:
return start + interval.(uint64)
case float64:
return start + interval.(float64)
default:
panic(fmt.Sprintf("unsupported type %T for interval start: %v", start, start))
}
}
func toChunk(start, end interface{}, number, total int) (chunk, error) {
startValue, err := sqltypes.BuildValue(start)
if err != nil {
return chunk{}, fmt.Errorf("Failed to convert calculated start value (%v) into internal sqltypes.Value: %v", start, err)
@ -148,5 +174,5 @@ func toChunk(start, end interface{}) (chunk, error) {
if err != nil {
return chunk{}, fmt.Errorf("Failed to convert calculated end value (%v) into internal sqltypes.Value: %v", end, err)
}
return chunk{startValue, endValue}, nil
return chunk{startValue, endValue, number, total}, nil
}

Просмотреть файл

@ -7,8 +7,16 @@ package worker
import "github.com/youtube/vitess/go/vt/throttler"
const (
defaultOnline = true
defaultOffline = true
defaultOnline = true
defaultOffline = true
// defaultChunkCount is the number of chunks in which each table should be
// divided. One chunk is processed by one chunk pipeline at a time.
// -source_reader_count defines the number of concurrent chunk pipelines.
defaultChunkCount = 1000
// defaultMinRowsPerChunk is the minimum number of rows a chunk should have
// on average. If this is not guaranteed, --chunk_count will be reduced
// automatically.
defaultMinRowsPerChunk = 10 * 1000
defaultSourceReaderCount = 10
// defaultWriteQueryMaxRows aggregates up to 100 rows per INSERT or DELETE
// query. Higher values are not recommended to avoid overloading MySQL.

Просмотреть файл

@ -46,7 +46,6 @@ type LegacySplitCloneWorker struct {
strategy *splitStrategy
sourceReaderCount int
destinationPackCount int
minTableSizeForSplit uint64
destinationWriterCount int
minHealthyRdonlyTablets int
maxTPS int64
@ -89,7 +88,7 @@ type LegacySplitCloneWorker struct {
}
// NewLegacySplitCloneWorker returns a new LegacySplitCloneWorker object.
func NewLegacySplitCloneWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, excludeTables []string, strategyStr string, sourceReaderCount, destinationPackCount int, minTableSizeForSplit uint64, destinationWriterCount, minHealthyRdonlyTablets int, maxTPS int64) (Worker, error) {
func NewLegacySplitCloneWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, excludeTables []string, strategyStr string, sourceReaderCount, destinationPackCount, destinationWriterCount, minHealthyRdonlyTablets int, maxTPS int64) (Worker, error) {
strategy, err := newSplitStrategy(wr.Logger(), strategyStr)
if err != nil {
return nil, err
@ -110,7 +109,6 @@ func NewLegacySplitCloneWorker(wr *wrangler.Wrangler, cell, keyspace, shard stri
strategy: strategy,
sourceReaderCount: sourceReaderCount,
destinationPackCount: destinationPackCount,
minTableSizeForSplit: minTableSizeForSplit,
destinationWriterCount: destinationWriterCount,
minHealthyRdonlyTablets: minHealthyRdonlyTablets,
maxTPS: maxTPS,
@ -549,7 +547,7 @@ func (scw *LegacySplitCloneWorker) copy(ctx context.Context) error {
}
rowSplitter := NewRowSplitter(scw.destinationShards, keyResolver)
chunks, err := generateChunks(ctx, scw.wr, scw.sourceTablets[shardIndex], td, scw.minTableSizeForSplit, scw.sourceReaderCount)
chunks, err := generateChunks(ctx, scw.wr, scw.sourceTablets[shardIndex], td, scw.sourceReaderCount, defaultMinRowsPerChunk)
if err != nil {
return err
}

Просмотреть файл

@ -59,8 +59,6 @@ const legacySplitCloneHTML2 = `
<INPUT type="text" id="sourceReaderCount" name="sourceReaderCount" value="{{.DefaultSourceReaderCount}}"></BR>
<LABEL for="destinationPackCount">Destination Pack Count: </LABEL>
<INPUT type="text" id="destinationPackCount" name="destinationPackCount" value="{{.DefaultDestinationPackCount}}"></BR>
<LABEL for="minTableSizeForSplit">Minimun Table Size For Split: </LABEL>
<INPUT type="text" id="minTableSizeForSplit" name="minTableSizeForSplit" value="{{.DefaultMinTableSizeForSplit}}"></BR>
<LABEL for="destinationWriterCount">Destination Writer Count: </LABEL>
<INPUT type="text" id="destinationWriterCount" name="destinationWriterCount" value="{{.DefaultDestinationWriterCount}}"></BR>
<LABEL for="minHealthyRdonlyTablets">Minimum Number of required healthy RDONLY tablets: </LABEL>
@ -90,7 +88,6 @@ func commandLegacySplitClone(wi *Instance, wr *wrangler.Wrangler, subFlags *flag
strategy := subFlags.String("strategy", "", "which strategy to use for restore, use 'vtworker LegacySplitClone --strategy=-help k/s' for more info")
sourceReaderCount := subFlags.Int("source_reader_count", defaultSourceReaderCount, "number of concurrent streaming queries to use on the source")
destinationPackCount := subFlags.Int("destination_pack_count", defaultDestinationPackCount, "number of packets to pack in one destination insert")
minTableSizeForSplit := subFlags.Int("min_table_size_for_split", defaultMinTableSizeForSplit, "tables bigger than this size on disk in bytes will be split into source_reader_count chunks if possible")
destinationWriterCount := subFlags.Int("destination_writer_count", defaultDestinationWriterCount, "number of concurrent RPCs to execute on the destination")
minHealthyRdonlyTablets := subFlags.Int("min_healthy_rdonly_tablets", defaultMinHealthyRdonlyTablets, "minimum number of healthy RDONLY tablets before taking out one")
maxTPS := subFlags.Int64("max_tps", defaultMaxTPS, "if non-zero, limit copy to maximum number of (write) transactions/second on the destination (unlimited by default)")
@ -110,7 +107,7 @@ func commandLegacySplitClone(wi *Instance, wr *wrangler.Wrangler, subFlags *flag
if *excludeTables != "" {
excludeTableArray = strings.Split(*excludeTables, ",")
}
worker, err := NewLegacySplitCloneWorker(wr, wi.cell, keyspace, shard, excludeTableArray, *strategy, *sourceReaderCount, *destinationPackCount, uint64(*minTableSizeForSplit), *destinationWriterCount, *minHealthyRdonlyTablets, *maxTPS)
worker, err := NewLegacySplitCloneWorker(wr, wi.cell, keyspace, shard, excludeTableArray, *strategy, *sourceReaderCount, *destinationPackCount, *destinationWriterCount, *minHealthyRdonlyTablets, *maxTPS)
if err != nil {
return nil, fmt.Errorf("cannot create split clone worker: %v", err)
}
@ -145,7 +142,6 @@ func interactiveLegacySplitClone(ctx context.Context, wi *Instance, wr *wrangler
result["Shard"] = shard
result["DefaultSourceReaderCount"] = fmt.Sprintf("%v", defaultSourceReaderCount)
result["DefaultDestinationPackCount"] = fmt.Sprintf("%v", defaultDestinationPackCount)
result["DefaultMinTableSizeForSplit"] = fmt.Sprintf("%v", defaultMinTableSizeForSplit)
result["DefaultDestinationWriterCount"] = fmt.Sprintf("%v", defaultDestinationWriterCount)
result["DefaultMinHealthyRdonlyTablets"] = fmt.Sprintf("%v", defaultMinHealthyRdonlyTablets)
result["DefaultMaxTPS"] = fmt.Sprintf("%v", defaultMaxTPS)
@ -168,11 +164,6 @@ func interactiveLegacySplitClone(ctx context.Context, wi *Instance, wr *wrangler
if err != nil {
return nil, nil, nil, fmt.Errorf("cannot parse destinationPackCount: %s", err)
}
minTableSizeForSplitStr := r.FormValue("minTableSizeForSplit")
minTableSizeForSplit, err := strconv.ParseInt(minTableSizeForSplitStr, 0, 64)
if err != nil {
return nil, nil, nil, fmt.Errorf("cannot parse minTableSizeForSplit: %s", err)
}
destinationWriterCountStr := r.FormValue("destinationWriterCount")
destinationWriterCount, err := strconv.ParseInt(destinationWriterCountStr, 0, 64)
if err != nil {
@ -190,7 +181,7 @@ func interactiveLegacySplitClone(ctx context.Context, wi *Instance, wr *wrangler
}
// start the clone job
wrk, err := NewLegacySplitCloneWorker(wr, wi.cell, keyspace, shard, excludeTableArray, strategy, int(sourceReaderCount), int(destinationPackCount), uint64(minTableSizeForSplit), int(destinationWriterCount), int(minHealthyRdonlyTablets), maxTPS)
wrk, err := NewLegacySplitCloneWorker(wr, wi.cell, keyspace, shard, excludeTableArray, strategy, int(sourceReaderCount), int(destinationPackCount), int(destinationWriterCount), int(minHealthyRdonlyTablets), maxTPS)
if err != nil {
return nil, nil, nil, fmt.Errorf("cannot create worker: %v", err)
}

Просмотреть файл

@ -149,8 +149,10 @@ func (tc *legacySplitCloneTestCase) setUp(v3 bool) {
Columns: []string{"id", "msg", "keyspace_id"},
PrimaryKeyColumns: []string{"id"},
Type: tmutils.TableBaseTable,
// This informs how many rows we can pack into a single insert
DataLength: 2048,
// Note that LegacySplitClone does not support the flag --min_rows_per_chunk.
// Therefore, we use the default value in our calculation.
// * 10 because --source_reader_count is set to 10 i.e. there are 10 chunks.
RowCount: defaultMinRowsPerChunk * 10,
},
},
}
@ -209,7 +211,6 @@ func (tc *legacySplitCloneTestCase) setUp(v3 bool) {
"LegacySplitClone",
"-source_reader_count", "10",
"-destination_pack_count", "4",
"-min_table_size_for_split", "1",
"-destination_writer_count", "10",
"ks/-80"}
}

Просмотреть файл

@ -138,7 +138,7 @@ func TestGenerateQuery(t *testing.T) {
for _, tc := range testcases {
r := RestartableResultReader{
chunk: chunk{tc.start, tc.end},
chunk: chunk{tc.start, tc.end, 1, 1},
td: &tabletmanagerdatapb.TableDefinition{
Name: tc.table,
Columns: tc.columns,

Просмотреть файл

@ -314,6 +314,11 @@ func NewRowRouter(shardInfos []*topo.ShardInfo, keyResolver keyspaceIDResolver)
// Route returns which shard (specified by the index of the list of shards
// passed in NewRowRouter) contains the given row.
func (rr *RowRouter) Route(row []sqltypes.Value) (int, error) {
if len(rr.keyRanges) == 1 {
// Fast path when there is only one destination shard.
return 0, nil
}
k, err := rr.keyResolver.keyspaceID(row)
if err != nil {
return -1, err

Просмотреть файл

@ -32,26 +32,43 @@ import (
topodatapb "github.com/youtube/vitess/go/vt/proto/topodata"
)
// cloneType specifies whether it is a horizontal resharding or a vertical split.
// TODO(mberlin): Remove this once we merged both into one command.
type cloneType int
const (
horizontalResharding cloneType = iota
verticalSplit
)
// servingTypes is the list of tabletTypes which the source keyspace must be serving.
var servingTypes = []topodatapb.TabletType{topodatapb.TabletType_MASTER, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY}
// SplitCloneWorker will clone the data within a keyspace from a
// source set of shards to a destination set of shards.
type SplitCloneWorker struct {
StatusWorker
wr *wrangler.Wrangler
cell string
keyspace string
shard string
online bool
offline bool
wr *wrangler.Wrangler
cloneType cloneType
cell string
destinationKeyspace string
shard string
online bool
offline bool
// verticalSplit only: List of tables which should be split out.
tables []string
// horizontalResharding only: List of tables which will be skipped.
excludeTables []string
strategy *splitStrategy
chunkCount int
minRowsPerChunk int
sourceReaderCount int
writeQueryMaxRows int
writeQueryMaxSize int
// TODO(mberlin): Delete this when our testing found out that an extra flag
// for this is not necessary.
writeQueryMaxRowsDelete int
minTableSizeForSplit uint64
destinationWriterCount int
minHealthyRdonlyTablets int
maxTPS int64
@ -59,10 +76,10 @@ type SplitCloneWorker struct {
tabletTracker *TabletTracker
// populated during WorkerStateInit, read-only after that
keyspaceInfo *topo.KeyspaceInfo
sourceShards []*topo.ShardInfo
destinationShards []*topo.ShardInfo
keyspaceSchema *vindexes.KeyspaceSchema
destinationKeyspaceInfo *topo.KeyspaceInfo
sourceShards []*topo.ShardInfo
destinationShards []*topo.ShardInfo
keyspaceSchema *vindexes.KeyspaceSchema
// healthCheck is used for the destination shards to a) find out the current
// MASTER tablet, b) get the list of healthy RDONLY tablets and c) track the
// replication lag of all REPLICA tablets.
@ -112,11 +129,30 @@ type SplitCloneWorker struct {
refreshAliases [][]*topodatapb.TabletAlias
refreshTablets []map[topodatapb.TabletAlias]*topo.TabletInfo
ev *events.SplitClone
ev event.Updater
}
// NewSplitCloneWorker returns a new SplitCloneWorker object.
func NewSplitCloneWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, online, offline bool, excludeTables []string, strategyStr string, sourceReaderCount, writeQueryMaxRows, writeQueryMaxSize, writeQueryMaxRowsDelete int, minTableSizeForSplit uint64, destinationWriterCount, minHealthyRdonlyTablets int, maxTPS int64) (Worker, error) {
// newSplitCloneWorker returns a new worker object for the SplitClone command.
func newSplitCloneWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, online, offline bool, excludeTables []string, strategyStr string, chunkCount, minRowsPerChunk, sourceReaderCount, writeQueryMaxRows, writeQueryMaxSize, writeQueryMaxRowsDelete, destinationWriterCount, minHealthyRdonlyTablets int, maxTPS int64) (Worker, error) {
return newCloneWorker(wr, horizontalResharding, cell, keyspace, shard, online, offline, nil /* tables */, excludeTables, strategyStr, chunkCount, minRowsPerChunk, sourceReaderCount, writeQueryMaxRows, writeQueryMaxSize, writeQueryMaxRowsDelete, destinationWriterCount, minHealthyRdonlyTablets, maxTPS)
}
// newVerticalSplitCloneWorker returns a new worker object for the
// VerticalSplitClone command.
func newVerticalSplitCloneWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, online, offline bool, tables []string, strategyStr string, chunkCount, minRowsPerChunk, sourceReaderCount, writeQueryMaxRows, writeQueryMaxSize, writeQueryMaxRowsDelete, destinationWriterCount, minHealthyRdonlyTablets int, maxTPS int64) (Worker, error) {
return newCloneWorker(wr, verticalSplit, cell, keyspace, shard, online, offline, tables, nil /* excludeTables */, strategyStr, chunkCount, minRowsPerChunk, sourceReaderCount, writeQueryMaxRows, writeQueryMaxSize, writeQueryMaxRowsDelete, destinationWriterCount, minHealthyRdonlyTablets, maxTPS)
}
// newCloneWorker returns a new SplitCloneWorker object which is used both by
// the SplitClone and VerticalSplitClone command.
// TODO(mberlin): Rename SplitCloneWorker to cloneWorker.
func newCloneWorker(wr *wrangler.Wrangler, cloneType cloneType, cell, keyspace, shard string, online, offline bool, tables, excludeTables []string, strategyStr string, chunkCount, minRowsPerChunk, sourceReaderCount, writeQueryMaxRows, writeQueryMaxSize, writeQueryMaxRowsDelete, destinationWriterCount, minHealthyRdonlyTablets int, maxTPS int64) (Worker, error) {
if cloneType != horizontalResharding && cloneType != verticalSplit {
return nil, fmt.Errorf("unknown cloneType: %v This is a bug. Please report", cloneType)
}
if tables != nil && len(tables) == 0 {
return nil, errors.New("list of tablets to be split out must not be empty")
}
strategy, err := newSplitStrategy(wr.Logger(), strategyStr)
if err != nil {
return nil, err
@ -130,21 +166,24 @@ func NewSplitCloneWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, on
if !online && !offline {
return nil, errors.New("at least one clone phase (-online, -offline) must be enabled (and not set to false)")
}
return &SplitCloneWorker{
scw := &SplitCloneWorker{
StatusWorker: NewStatusWorker(),
wr: wr,
cloneType: cloneType,
cell: cell,
keyspace: keyspace,
destinationKeyspace: keyspace,
shard: shard,
online: online,
offline: offline,
tables: tables,
excludeTables: excludeTables,
strategy: strategy,
chunkCount: chunkCount,
minRowsPerChunk: minRowsPerChunk,
sourceReaderCount: sourceReaderCount,
writeQueryMaxRows: writeQueryMaxRows,
writeQueryMaxSize: writeQueryMaxSize,
writeQueryMaxRowsDelete: writeQueryMaxRowsDelete,
minTableSizeForSplit: minTableSizeForSplit,
destinationWriterCount: destinationWriterCount,
minHealthyRdonlyTablets: minHealthyRdonlyTablets,
maxTPS: maxTPS,
@ -156,15 +195,30 @@ func NewSplitCloneWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, on
tableStatusListOnline: &tableStatusList{},
tableStatusListOffline: &tableStatusList{},
}
scw.initializeEventDescriptor()
return scw, nil
}
ev: &events.SplitClone{
Cell: cell,
Keyspace: keyspace,
Shard: shard,
ExcludeTables: excludeTables,
Strategy: strategy.String(),
},
}, nil
func (scw *SplitCloneWorker) initializeEventDescriptor() {
switch scw.cloneType {
case horizontalResharding:
scw.ev = &events.SplitClone{
Cell: scw.cell,
Keyspace: scw.destinationKeyspace,
Shard: scw.shard,
ExcludeTables: scw.excludeTables,
Strategy: scw.strategy.String(),
}
case verticalSplit:
scw.ev = &events.VerticalSplitClone{
Cell: scw.cell,
Keyspace: scw.destinationKeyspace,
Shard: scw.shard,
Tables: scw.tables,
Strategy: scw.strategy.String(),
}
}
}
func (scw *SplitCloneWorker) setState(state StatusWorkerState) {
@ -212,7 +266,7 @@ func (scw *SplitCloneWorker) FormattedOfflineSources() string {
func (scw *SplitCloneWorker) StatusAsHTML() template.HTML {
state := scw.State()
result := "<b>Working on:</b> " + scw.keyspace + "/" + scw.shard + "</br>\n"
result := "<b>Working on:</b> " + scw.destinationKeyspace + "/" + scw.shard + "</br>\n"
result += "<b>State:</b> " + state.String() + "</br>\n"
switch state {
case WorkerStateCloneOnline:
@ -256,7 +310,7 @@ func (scw *SplitCloneWorker) StatusAsHTML() template.HTML {
func (scw *SplitCloneWorker) StatusAsText() string {
state := scw.State()
result := "Working on: " + scw.keyspace + "/" + scw.shard + "\n"
result := "Working on: " + scw.destinationKeyspace + "/" + scw.shard + "\n"
result += "State: " + state.String() + "\n"
switch state {
case WorkerStateCloneOnline:
@ -430,81 +484,36 @@ func (scw *SplitCloneWorker) run(ctx context.Context) error {
// - read the destination keyspace, make sure it has 'servedFrom' values
func (scw *SplitCloneWorker) init(ctx context.Context) error {
scw.setState(WorkerStateInit)
var err error
// read the keyspace and validate it
shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
scw.keyspaceInfo, err = scw.wr.TopoServer().GetKeyspace(shortCtx, scw.keyspace)
var err error
scw.destinationKeyspaceInfo, err = scw.wr.TopoServer().GetKeyspace(shortCtx, scw.destinationKeyspace)
cancel()
if err != nil {
return fmt.Errorf("cannot read keyspace %v: %v", scw.keyspace, err)
return fmt.Errorf("cannot read (destination) keyspace %v: %v", scw.destinationKeyspace, err)
}
// find the OverlappingShards in the keyspace
shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout)
osList, err := topotools.FindOverlappingShards(shortCtx, scw.wr.TopoServer(), scw.keyspace)
cancel()
if err != nil {
return fmt.Errorf("cannot FindOverlappingShards in %v: %v", scw.keyspace, err)
}
// find the shard we mentioned in there, if any
os := topotools.OverlappingShardsForShard(osList, scw.shard)
if os == nil {
return fmt.Errorf("the specified shard %v/%v is not in any overlapping shard", scw.keyspace, scw.shard)
}
scw.wr.Logger().Infof("Found overlapping shards: %+v\n", os)
// one side should have served types, the other one none,
// figure out wich is which, then double check them all
if len(os.Left[0].ServedTypes) > 0 {
scw.sourceShards = os.Left
scw.destinationShards = os.Right
} else {
scw.sourceShards = os.Right
scw.destinationShards = os.Left
}
// Verify that filtered replication is not already enabled.
for _, si := range scw.destinationShards {
if len(si.SourceShards) > 0 {
return fmt.Errorf("destination shard %v/%v has filtered replication already enabled from a previous resharding (ShardInfo is set)."+
" This requires manual intervention e.g. use vtctl SourceShardDelete to remove it",
si.Keyspace(), si.ShardName())
// Set source and destination shard infos.
switch scw.cloneType {
case horizontalResharding:
if err := scw.initShardsForHorizontalResharding(ctx); err != nil {
return err
}
case verticalSplit:
if err := scw.initShardsForVerticalSplit(ctx); err != nil {
return err
}
}
// validate all serving types
servingTypes := []topodatapb.TabletType{topodatapb.TabletType_MASTER, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY}
for _, st := range servingTypes {
for _, si := range scw.sourceShards {
if si.GetServedType(st) == nil {
return fmt.Errorf("source shard %v/%v is not serving type %v", si.Keyspace(), si.ShardName(), st)
}
}
}
for _, si := range scw.destinationShards {
if len(si.ServedTypes) > 0 {
return fmt.Errorf("destination shard %v/%v is serving some types", si.Keyspace(), si.ShardName())
}
if err := scw.sanityCheckShardInfos(); err != nil {
return err
}
// read the vschema if needed
var keyspaceSchema *vindexes.KeyspaceSchema
if *useV3ReshardingMode {
kschema, err := scw.wr.TopoServer().GetVSchema(ctx, scw.keyspace)
if err != nil {
return fmt.Errorf("cannot load VSchema for keyspace %v: %v", scw.keyspace, err)
if scw.cloneType == horizontalResharding {
if err := scw.loadVSchema(ctx); err != nil {
return err
}
if kschema == nil {
return fmt.Errorf("no VSchema for keyspace %v", scw.keyspace)
}
keyspaceSchema, err = vindexes.BuildKeyspaceSchema(kschema, scw.keyspace)
if err != nil {
return fmt.Errorf("cannot build vschema for keyspace %v: %v", scw.keyspace, err)
}
scw.keyspaceSchema = keyspaceSchema
}
// Initialize healthcheck and add destination shards to it.
@ -525,6 +534,132 @@ func (scw *SplitCloneWorker) init(ctx context.Context) error {
return nil
}
func (scw *SplitCloneWorker) initShardsForHorizontalResharding(ctx context.Context) error {
// find the OverlappingShards in the keyspace
shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
osList, err := topotools.FindOverlappingShards(shortCtx, scw.wr.TopoServer(), scw.destinationKeyspace)
cancel()
if err != nil {
return fmt.Errorf("cannot FindOverlappingShards in %v: %v", scw.destinationKeyspace, err)
}
// find the shard we mentioned in there, if any
os := topotools.OverlappingShardsForShard(osList, scw.shard)
if os == nil {
return fmt.Errorf("the specified shard %v/%v is not in any overlapping shard", scw.destinationKeyspace, scw.shard)
}
scw.wr.Logger().Infof("Found overlapping shards: %+v\n", os)
// one side should have served types, the other one none,
// figure out wich is which, then double check them all
if len(os.Left[0].ServedTypes) > 0 {
scw.sourceShards = os.Left
scw.destinationShards = os.Right
} else {
scw.sourceShards = os.Right
scw.destinationShards = os.Left
}
return nil
}
func (scw *SplitCloneWorker) initShardsForVerticalSplit(ctx context.Context) error {
if len(scw.destinationKeyspaceInfo.ServedFroms) == 0 {
return fmt.Errorf("destination keyspace %v has no KeyspaceServedFrom", scw.destinationKeyspace)
}
// Determine the source keyspace.
servedFrom := ""
for _, st := range servingTypes {
sf := scw.destinationKeyspaceInfo.GetServedFrom(st)
if sf == nil {
return fmt.Errorf("destination keyspace %v is serving type %v", scw.destinationKeyspace, st)
}
if servedFrom == "" {
servedFrom = sf.Keyspace
} else {
if servedFrom != sf.Keyspace {
return fmt.Errorf("destination keyspace %v is serving from multiple source keyspaces %v and %v", scw.destinationKeyspace, servedFrom, sf.Keyspace)
}
}
}
sourceKeyspace := servedFrom
// Init the source and destination shard info.
sourceShardInfo, err := scw.wr.TopoServer().GetShard(ctx, sourceKeyspace, scw.shard)
if err != nil {
return err
}
scw.sourceShards = []*topo.ShardInfo{sourceShardInfo}
destShardInfo, err := scw.wr.TopoServer().GetShard(ctx, scw.destinationKeyspace, scw.shard)
if err != nil {
return err
}
scw.destinationShards = []*topo.ShardInfo{destShardInfo}
return nil
}
func (scw *SplitCloneWorker) sanityCheckShardInfos() error {
// Verify that filtered replication is not already enabled.
for _, si := range scw.destinationShards {
if len(si.SourceShards) > 0 {
return fmt.Errorf("destination shard %v/%v has filtered replication already enabled from a previous resharding (ShardInfo is set)."+
" This requires manual intervention e.g. use vtctl SourceShardDelete to remove it",
si.Keyspace(), si.ShardName())
}
}
// Verify that the source is serving all serving types.
for _, st := range servingTypes {
for _, si := range scw.sourceShards {
if si.GetServedType(st) == nil {
return fmt.Errorf("source shard %v/%v is not serving type %v", si.Keyspace(), si.ShardName(), st)
}
}
}
switch scw.cloneType {
case horizontalResharding:
// Verify that the destination is not serving yet.
for _, si := range scw.destinationShards {
if len(si.ServedTypes) > 0 {
return fmt.Errorf("destination shard %v/%v is serving some types", si.Keyspace(), si.ShardName())
}
}
case verticalSplit:
// Verify that the destination is serving all types.
for _, st := range servingTypes {
for _, si := range scw.destinationShards {
if si.GetServedType(st) == nil {
return fmt.Errorf("source shard %v/%v is not serving type %v", si.Keyspace(), si.ShardName(), st)
}
}
}
}
return nil
}
func (scw *SplitCloneWorker) loadVSchema(ctx context.Context) error {
var keyspaceSchema *vindexes.KeyspaceSchema
if *useV3ReshardingMode {
kschema, err := scw.wr.TopoServer().GetVSchema(ctx, scw.destinationKeyspace)
if err != nil {
return fmt.Errorf("cannot load VSchema for keyspace %v: %v", scw.destinationKeyspace, err)
}
if kschema == nil {
return fmt.Errorf("no VSchema for keyspace %v", scw.destinationKeyspace)
}
keyspaceSchema, err = vindexes.BuildKeyspaceSchema(kschema, scw.destinationKeyspace)
if err != nil {
return fmt.Errorf("cannot build vschema for keyspace %v: %v", scw.destinationKeyspace, err)
}
scw.keyspaceSchema = keyspaceSchema
}
return nil
}
// findOfflineSourceTablets phase:
// - find one rdonly in the source shard
// - mark it as 'worker' pointing back to us
@ -739,22 +874,14 @@ func (scw *SplitCloneWorker) clone(ctx context.Context, state StatusWorkerState)
for tableIndex, td := range sourceSchemaDefinition.TableDefinitions {
td = reorderColumnsPrimaryKeyFirst(td)
var keyResolver keyspaceIDResolver
if *useV3ReshardingMode {
keyResolver, err = newV3ResolverFromTableDefinition(scw.keyspaceSchema, td)
if err != nil {
return fmt.Errorf("cannot resolve v3 sharding keys for keyspace %v: %v", scw.keyspace, err)
}
} else {
keyResolver, err = newV2Resolver(scw.keyspaceInfo, td)
if err != nil {
return fmt.Errorf("cannot resolve sharding keys for keyspace %v: %v", scw.keyspace, err)
}
keyResolver, err := scw.createKeyResolver(td)
if err != nil {
return fmt.Errorf("cannot resolve sharding keys for keyspace %v: %v", scw.destinationKeyspace, err)
}
// TODO(mberlin): We're going to chunk *all* source shards based on the MIN
// and MAX values of the *first* source shard. Is this going to be a problem?
chunks, err := generateChunks(ctx, scw.wr, firstSourceTablet, td, scw.minTableSizeForSplit, scw.sourceReaderCount)
chunks, err := generateChunks(ctx, scw.wr, firstSourceTablet, td, scw.chunkCount, scw.minRowsPerChunk)
if err != nil {
return err
}
@ -829,7 +956,7 @@ func (scw *SplitCloneWorker) clone(ctx context.Context, state StatusWorkerState)
destResultReader, err := NewRestartableResultReader(ctx, scw.wr.Logger(), scw.wr.TopoServer(), destAlias, td, chunk)
if err != nil {
processError("NewQueryResultReaderForTablet for dest tablet: %v failed: %v", destAlias, err)
processError("NewQueryResultReaderForTablet for destination tablet: %v failed: %v", destAlias, err)
return
}
defer destResultReader.Close()
@ -841,7 +968,7 @@ func (scw *SplitCloneWorker) clone(ctx context.Context, state StatusWorkerState)
if len(sourceReaders) >= 2 {
sourceReader, err = NewResultMerger(sourceReaders, len(td.PrimaryKeyColumns))
if err != nil {
processError("NewResultMerger for source tablets failed: %v", err)
processError("NewResultMerger for table: %v for source tablets failed: %v", td.Name, err)
return
}
} else {
@ -850,7 +977,7 @@ func (scw *SplitCloneWorker) clone(ctx context.Context, state StatusWorkerState)
if len(destReaders) >= 2 {
destReader, err = NewResultMerger(destReaders, len(td.PrimaryKeyColumns))
if err != nil {
processError("NewResultMerger for dest tablets failed: %v", err)
processError("NewResultMerger for table: %v for destination tablets failed: %v", td.Name, err)
return
}
} else {
@ -943,9 +1070,9 @@ func (scw *SplitCloneWorker) clone(ctx context.Context, state StatusWorkerState)
scw.wr.Logger().Infof("Skipping setting SourceShard on destination shards.")
} else {
for _, si := range scw.destinationShards {
scw.wr.Logger().Infof("Setting SourceShard on shard %v/%v", si.Keyspace(), si.ShardName())
scw.wr.Logger().Infof("Setting SourceShard on shard %v/%v (tables: %v)", si.Keyspace(), si.ShardName(), scw.tables)
shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
err := scw.wr.SetSourceShards(shortCtx, si.Keyspace(), si.ShardName(), scw.offlineSourceAliases, nil)
err := scw.wr.SetSourceShards(shortCtx, si.Keyspace(), si.ShardName(), scw.offlineSourceAliases, scw.tables)
cancel()
if err != nil {
return fmt.Errorf("failed to set source shards: %v", err)
@ -989,7 +1116,7 @@ func (scw *SplitCloneWorker) getSourceSchema(ctx context.Context, tablet *topoda
// in each source shard for each table to be about the same
// (rowCount is used to estimate an ETA)
shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
sourceSchemaDefinition, err := scw.wr.GetSchema(shortCtx, tablet.Alias, nil, scw.excludeTables, false /* includeViews */)
sourceSchemaDefinition, err := scw.wr.GetSchema(shortCtx, tablet.Alias, scw.tables, scw.excludeTables, false /* includeViews */)
cancel()
if err != nil {
return nil, fmt.Errorf("cannot get schema from source %v: %v", topoproto.TabletAliasString(tablet.Alias), err)
@ -1005,6 +1132,23 @@ func (scw *SplitCloneWorker) getSourceSchema(ctx context.Context, tablet *topoda
return sourceSchemaDefinition, nil
}
// createKeyResolver is called at the start of each chunk pipeline.
// It creates a keyspaceIDResolver which translates a given row to a
// keyspace ID. This is necessary to route the to be copied rows to the
// different destination shards.
func (scw *SplitCloneWorker) createKeyResolver(td *tabletmanagerdatapb.TableDefinition) (keyspaceIDResolver, error) {
if scw.cloneType == verticalSplit {
// VerticalSplitClone currently always has exactly one destination shard
// and therefore does not require routing between multiple shards.
return nil, nil
}
if *useV3ReshardingMode {
return newV3ResolverFromTableDefinition(scw.keyspaceSchema, td)
}
return newV2Resolver(scw.destinationKeyspaceInfo, td)
}
// StatsUpdate receives replication lag updates for each destination master
// and forwards them to the respective throttler instance.
// It is part of the discovery.HealthCheckStatsListener interface.

Просмотреть файл

@ -58,6 +58,10 @@ const splitCloneHTML2 = `
<INPUT type="text" id="excludeTables" name="excludeTables" value="moving.*"></BR>
<LABEL for="strategy">Strategy: </LABEL>
<INPUT type="text" id="strategy" name="strategy" value=""></BR>
<LABEL for="chunkCount">Chunk Count: </LABEL>
<INPUT type="text" id="chunkCount" name="chunkCount" value="{{.DefaultChunkCount}}"></BR>
<LABEL for="minRowsPerChunk">Minimun Number of Rows per Chunk (may reduce the Chunk Count): </LABEL>
<INPUT type="text" id="minRowsPerChunk" name="minRowsPerChunk" value="{{.DefaultMinRowsPerChunk}}"></BR>
<LABEL for="sourceReaderCount">Source Reader Count: </LABEL>
<INPUT type="text" id="sourceReaderCount" name="sourceReaderCount" value="{{.DefaultSourceReaderCount}}"></BR>
<LABEL for="writeQueryMaxRows">Maximum Number of Rows per Write Query: </LABEL>
@ -66,8 +70,6 @@ const splitCloneHTML2 = `
<INPUT type="text" id="writeQueryMaxSize" name="writeQueryMaxSize" value="{{.DefaultWriteQueryMaxSize}}"></BR>
<LABEL for="writeQueryMaxRowsDelete">Maximum Number of Rows per DELETE FROM Write Query: </LABEL>
<INPUT type="text" id="writeQueryMaxRowsDelete" name="writeQueryMaxRowsDelete" value="{{.DefaultWriteQueryMaxRowsDelete}}"></BR>
<LABEL for="minTableSizeForSplit">Minimun Table Size For Split: </LABEL>
<INPUT type="text" id="minTableSizeForSplit" name="minTableSizeForSplit" value="{{.DefaultMinTableSizeForSplit}}"></BR>
<LABEL for="destinationWriterCount">Destination Writer Count: </LABEL>
<INPUT type="text" id="destinationWriterCount" name="destinationWriterCount" value="{{.DefaultDestinationWriterCount}}"></BR>
<LABEL for="minHealthyRdonlyTablets">Minimum Number of required healthy RDONLY tablets in the source and destination shard at start: </LABEL>
@ -97,11 +99,12 @@ func commandSplitClone(wi *Instance, wr *wrangler.Wrangler, subFlags *flag.FlagS
offline := subFlags.Bool("offline", defaultOffline, "do offline copy (exact copy at a specific GTID, required before shard migration, source and destination tablets will be put out of serving during copy)")
excludeTables := subFlags.String("exclude_tables", "", "comma separated list of tables to exclude")
strategy := subFlags.String("strategy", "", "which strategy to use for restore, use 'vtworker SplitClone --strategy=-help k/s' for more info")
chunkCount := subFlags.Int("chunk_count", defaultChunkCount, "number of chunks per table")
minRowsPerChunk := subFlags.Int("min_rows_per_chunk", defaultChunkCount, "minimum number of rows per chunk (may reduce --chunk_count)")
sourceReaderCount := subFlags.Int("source_reader_count", defaultSourceReaderCount, "number of concurrent streaming queries to use on the source")
writeQueryMaxRows := subFlags.Int("write_query_max_rows", defaultWriteQueryMaxRows, "maximum number of rows per write query")
writeQueryMaxSize := subFlags.Int("write_query_max_size", defaultWriteQueryMaxSize, "maximum size (in bytes) per write query")
writeQueryMaxRowsDelete := subFlags.Int("write_query_max_rows_delete", defaultWriteQueryMaxRows, "maximum number of rows per DELETE FROM write query")
minTableSizeForSplit := subFlags.Int("min_table_size_for_split", defaultMinTableSizeForSplit, "tables bigger than this size on disk in bytes will be split into source_reader_count chunks if possible")
destinationWriterCount := subFlags.Int("destination_writer_count", defaultDestinationWriterCount, "number of concurrent RPCs to execute on the destination")
minHealthyRdonlyTablets := subFlags.Int("min_healthy_rdonly_tablets", defaultMinHealthyRdonlyTablets, "minimum number of healthy RDONLY tablets in the source and destination shard at start")
maxTPS := subFlags.Int64("max_tps", defaultMaxTPS, "if non-zero, limit copy to maximum number of (write) transactions/second on the destination (unlimited by default)")
@ -121,7 +124,7 @@ func commandSplitClone(wi *Instance, wr *wrangler.Wrangler, subFlags *flag.FlagS
if *excludeTables != "" {
excludeTableArray = strings.Split(*excludeTables, ",")
}
worker, err := NewSplitCloneWorker(wr, wi.cell, keyspace, shard, *online, *offline, excludeTableArray, *strategy, *sourceReaderCount, *writeQueryMaxRows, *writeQueryMaxSize, *writeQueryMaxRowsDelete, uint64(*minTableSizeForSplit), *destinationWriterCount, *minHealthyRdonlyTablets, *maxTPS)
worker, err := newSplitCloneWorker(wr, wi.cell, keyspace, shard, *online, *offline, excludeTableArray, *strategy, *chunkCount, *minRowsPerChunk, *sourceReaderCount, *writeQueryMaxRows, *writeQueryMaxSize, *writeQueryMaxRowsDelete, *destinationWriterCount, *minHealthyRdonlyTablets, *maxTPS)
if err != nil {
return nil, fmt.Errorf("cannot create split clone worker: %v", err)
}
@ -200,11 +203,12 @@ func interactiveSplitClone(ctx context.Context, wi *Instance, wr *wrangler.Wrang
result["Shard"] = shard
result["DefaultOnline"] = defaultOnline
result["DefaultOffline"] = defaultOffline
result["DefaultChunkCount"] = fmt.Sprintf("%v", defaultChunkCount)
result["DefaultMinRowsPerChunk"] = fmt.Sprintf("%v", defaultMinRowsPerChunk)
result["DefaultSourceReaderCount"] = fmt.Sprintf("%v", defaultSourceReaderCount)
result["DefaultWriteQueryMaxRows"] = fmt.Sprintf("%v", defaultWriteQueryMaxRows)
result["DefaultWriteQueryMaxSize"] = fmt.Sprintf("%v", defaultWriteQueryMaxSize)
result["DefaultWriteQueryMaxRowsDelete"] = fmt.Sprintf("%v", defaultWriteQueryMaxRows)
result["DefaultMinTableSizeForSplit"] = fmt.Sprintf("%v", defaultMinTableSizeForSplit)
result["DefaultDestinationWriterCount"] = fmt.Sprintf("%v", defaultDestinationWriterCount)
result["DefaultMinHealthyRdonlyTablets"] = fmt.Sprintf("%v", defaultMinHealthyRdonlyTablets)
result["DefaultMaxTPS"] = fmt.Sprintf("%v", defaultMaxTPS)
@ -222,6 +226,16 @@ func interactiveSplitClone(ctx context.Context, wi *Instance, wr *wrangler.Wrang
excludeTableArray = strings.Split(excludeTables, ",")
}
strategy := r.FormValue("strategy")
chunkCountStr := r.FormValue("chunkCount")
chunkCount, err := strconv.ParseInt(chunkCountStr, 0, 64)
if err != nil {
return nil, nil, nil, fmt.Errorf("cannot parse chunkCount: %s", err)
}
minRowsPerChunkStr := r.FormValue("minRowsPerChunk")
minRowsPerChunk, err := strconv.ParseInt(minRowsPerChunkStr, 0, 64)
if err != nil {
return nil, nil, nil, fmt.Errorf("cannot parse minRowsPerChunk: %s", err)
}
sourceReaderCount, err := strconv.ParseInt(sourceReaderCountStr, 0, 64)
if err != nil {
return nil, nil, nil, fmt.Errorf("cannot parse sourceReaderCount: %s", err)
@ -241,11 +255,6 @@ func interactiveSplitClone(ctx context.Context, wi *Instance, wr *wrangler.Wrang
if err != nil {
return nil, nil, nil, fmt.Errorf("cannot parse writeQueryMaxRowsDelete: %s", err)
}
minTableSizeForSplitStr := r.FormValue("minTableSizeForSplit")
minTableSizeForSplit, err := strconv.ParseInt(minTableSizeForSplitStr, 0, 64)
if err != nil {
return nil, nil, nil, fmt.Errorf("cannot parse minTableSizeForSplit: %s", err)
}
destinationWriterCountStr := r.FormValue("destinationWriterCount")
destinationWriterCount, err := strconv.ParseInt(destinationWriterCountStr, 0, 64)
if err != nil {
@ -263,7 +272,7 @@ func interactiveSplitClone(ctx context.Context, wi *Instance, wr *wrangler.Wrang
}
// start the clone job
wrk, err := NewSplitCloneWorker(wr, wi.cell, keyspace, shard, online, offline, excludeTableArray, strategy, int(sourceReaderCount), int(writeQueryMaxRows), int(writeQueryMaxSize), int(writeQueryMaxRowsDelete), uint64(minTableSizeForSplit), int(destinationWriterCount), int(minHealthyRdonlyTablets), maxTPS)
wrk, err := newSplitCloneWorker(wr, wi.cell, keyspace, shard, online, offline, excludeTableArray, strategy, int(chunkCount), int(minRowsPerChunk), int(sourceReaderCount), int(writeQueryMaxRows), int(writeQueryMaxSize), int(writeQueryMaxRowsDelete), int(destinationWriterCount), int(minHealthyRdonlyTablets), maxTPS)
if err != nil {
return nil, nil, nil, fmt.Errorf("cannot create worker: %v", err)
}

Просмотреть файл

@ -37,6 +37,8 @@ const (
splitCloneTestMin int = 100
// splitCloneTestMax is the maximum value of the primary key.
splitCloneTestMax int = 200
// In the default test case there are 100 rows on the source.
splitCloneTestRowsCount = splitCloneTestMax - splitCloneTestMin
)
var (
@ -78,13 +80,10 @@ type splitCloneTestCase struct {
}
func (tc *splitCloneTestCase) setUp(v3 bool) {
tc.setUpWithConcurreny(v3, 10, 2)
tc.setUpWithConcurreny(v3, 10, 2, splitCloneTestRowsCount)
}
func (tc *splitCloneTestCase) setUpWithConcurreny(v3 bool, concurrency, writeQueryMaxRows int) {
// In the default test case there are 100 rows on the source.
rowsTotal := 100
func (tc *splitCloneTestCase) setUpWithConcurreny(v3 bool, concurrency, writeQueryMaxRows, rowsCount int) {
*useV3ReshardingMode = v3
db := fakesqldb.Register()
tc.ts = zktestserver.New(tc.t, []string{"cell1", "cell2"})
@ -182,8 +181,9 @@ func (tc *splitCloneTestCase) setUpWithConcurreny(v3 bool, concurrency, writeQue
Columns: []string{"msg", "keyspace_id", "id"},
PrimaryKeyColumns: []string{"id"},
Type: tmutils.TableBaseTable,
// Set the table size to a value higher than --min_table_size_for_split.
DataLength: 2048,
// Set the row count to avoid that --min_rows_per_chunk reduces the
// number of chunks.
RowCount: uint64(rowsCount),
},
},
}
@ -198,8 +198,8 @@ func (tc *splitCloneTestCase) setUpWithConcurreny(v3 bool, concurrency, writeQue
}
shqs := fakes.NewStreamHealthQueryService(sourceRdonly.Target())
shqs.AddDefaultHealthResponse()
qs := newTestQueryService(tc.t, sourceRdonly.Target(), shqs, 0, 1, sourceRdonly.Tablet.Alias.Uid)
qs.addGeneratedRows(100, 100+rowsTotal)
qs := newTestQueryService(tc.t, sourceRdonly.Target(), shqs, 0, 1, sourceRdonly.Tablet.Alias.Uid, false /* omitKeyspaceID */)
qs.addGeneratedRows(100, 100+rowsCount)
grpcqueryservice.Register(sourceRdonly.RPCServer, qs)
tc.sourceRdonlyQs = append(tc.sourceRdonlyQs, qs)
}
@ -207,7 +207,7 @@ func (tc *splitCloneTestCase) setUpWithConcurreny(v3 bool, concurrency, writeQue
for i, destRdonly := range []*testlib.FakeTablet{leftRdonly1, rightRdonly1, leftRdonly2, rightRdonly2} {
shqs := fakes.NewStreamHealthQueryService(destRdonly.Target())
shqs.AddDefaultHealthResponse()
qs := newTestQueryService(tc.t, destRdonly.Target(), shqs, i%2, 2, destRdonly.Tablet.Alias.Uid)
qs := newTestQueryService(tc.t, destRdonly.Target(), shqs, i%2, 2, destRdonly.Tablet.Alias.Uid, false /* omitKeyspaceID */)
grpcqueryservice.Register(destRdonly.RPCServer, qs)
if i%2 == 0 {
tc.leftRdonlyQs = append(tc.leftRdonlyQs, qs)
@ -222,9 +222,9 @@ func (tc *splitCloneTestCase) setUpWithConcurreny(v3 bool, concurrency, writeQue
// In the default test case there will be 30 inserts per destination shard
// because 10 writer threads will insert 5 rows on each destination shard.
// (100 source rows / 10 writers / 2 shards = 5 rows.)
// (100 rowsCount / 10 writers / 2 shards = 5 rows.)
// Due to --write_query_max_rows=2 there will be 3 inserts for 5 rows.
rowsPerDestinationShard := rowsTotal / 2
rowsPerDestinationShard := rowsCount / 2
rowsPerThread := rowsPerDestinationShard / concurrency
insertsPerThread := math.Ceil(float64(rowsPerThread) / float64(writeQueryMaxRows))
insertsTotal := int(insertsPerThread) * concurrency
@ -259,8 +259,9 @@ func (tc *splitCloneTestCase) setUpWithConcurreny(v3 bool, concurrency, writeQue
// the rate limit is set very high.
"-max_tps", "9999",
"-write_query_max_rows", strconv.Itoa(writeQueryMaxRows),
"-chunk_count", strconv.Itoa(concurrency),
"-min_rows_per_chunk", strconv.Itoa(rowsPerThread),
"-source_reader_count", strconv.Itoa(concurrency),
"-min_table_size_for_split", "1",
"-destination_writer_count", strconv.Itoa(concurrency),
"ks/-80"}
}
@ -284,8 +285,11 @@ type testQueryService struct {
shardIndex int
shardCount int
tabletUID uint32
fields []*querypb.Field
rows [][]sqltypes.Value
// omitKeyspaceID is true when the returned rows should not contain the
// "keyspace_id" column.
omitKeyspaceID bool
fields []*querypb.Field
rows [][]sqltypes.Value
// mu guards the fields in this group.
mu sync.Mutex
@ -294,7 +298,11 @@ type testQueryService struct {
forceError map[int64]bool
}
func newTestQueryService(t *testing.T, target querypb.Target, shqs *fakes.StreamHealthQueryService, shardIndex, shardCount int, tabletUID uint32) *testQueryService {
func newTestQueryService(t *testing.T, target querypb.Target, shqs *fakes.StreamHealthQueryService, shardIndex, shardCount int, tabletUID uint32, omitKeyspaceID bool) *testQueryService {
fields := v2Fields
if omitKeyspaceID {
fields = v3Fields
}
return &testQueryService{
t: t,
target: target,
@ -302,7 +310,8 @@ func newTestQueryService(t *testing.T, target querypb.Target, shqs *fakes.Stream
shardIndex: shardIndex,
shardCount: shardCount,
tabletUID: tabletUID,
fields: v2Fields,
omitKeyspaceID: omitKeyspaceID,
fields: fields,
forceError: make(map[int64]bool),
}
}
@ -384,11 +393,15 @@ func (sq *testQueryService) addGeneratedRows(from, to int) {
shardIndex := id % 2
if sq.shardCount == 1 || shardIndex == sq.shardIndex {
idValue, _ := sqltypes.BuildValue(int64(id))
rows = append(rows, []sqltypes.Value{
row := []sqltypes.Value{
idValue,
sqltypes.MakeString([]byte(fmt.Sprintf("Text for %v", id))),
sqltypes.MakeString([]byte(fmt.Sprintf("%v", ksids[shardIndex]))),
})
}
if !sq.omitKeyspaceID {
row = append(row, sqltypes.MakeString([]byte(fmt.Sprintf("%v", ksids[shardIndex]))))
}
rows = append(rows, row)
}
}
@ -439,13 +452,24 @@ var v2Fields = []*querypb.Field{
Name: "msg",
Type: sqltypes.VarChar,
},
// TODO(mberlin): Omit keyspace_id in the v3 test.
{
Name: "keyspace_id",
Type: sqltypes.Int64,
},
}
// v3Fields is identical to v2Fields but lacks the "keyspace_id" column.
var v3Fields = []*querypb.Field{
{
Name: "id",
Type: sqltypes.Int64,
},
{
Name: "msg",
Type: sqltypes.VarChar,
},
}
// TestSplitCloneV2_Offline tests the offline phase with an empty destination.
func TestSplitCloneV2_Offline(t *testing.T) {
tc := &splitCloneTestCase{t: t}
@ -458,6 +482,30 @@ func TestSplitCloneV2_Offline(t *testing.T) {
}
}
// TestSplitCloneV2_Offline_HighChunkCount is identical to
// TestSplitCloneV2_Offline but sets the --chunk_count to 1000. Given
// --source_reader_count=10, at most 10 out of the 1000 chunk pipeplines will
// get processed concurrently while the other pending ones are blocked.
func TestSplitCloneV2_Offline_HighChunkCount(t *testing.T) {
tc := &splitCloneTestCase{t: t}
tc.setUpWithConcurreny(false /* v3 */, 10, 5 /* writeQueryMaxRows */, 1000 /* rowsCount */)
defer tc.tearDown()
args := make([]string, len(tc.defaultWorkerArgs))
copy(args, tc.defaultWorkerArgs)
// Set -write_query_max_rows to 5.
args[5] = "5"
// Set -chunk_count to 1000.
args[7] = "1000"
// Set -min_rows_per_chunk to 5.
args[9] = "5"
// Run the vtworker command.
if err := runCommand(t, tc.wi, tc.wi.wr, args); err != nil {
t.Fatal(err)
}
}
// TestSplitCloneV2_Offline_RestartStreamingQuery is identical to
// TestSplitCloneV2_Offline but forces SplitClone to restart the streaming
// query on the source before reading the last row.
@ -566,7 +614,7 @@ func TestSplitCloneV2_Reconciliation(t *testing.T) {
tc := &splitCloneTestCase{t: t}
// We reduce the parallelism to 1 to test the order of expected
// insert/update/delete statements on the destination master.
tc.setUpWithConcurreny(false /* v3 */, 1, 10)
tc.setUpWithConcurreny(false /* v3 */, 1, 10, splitCloneTestRowsCount)
defer tc.tearDown()
// We assume that an Online Clone ran before which copied the rows 100-199

Просмотреть файл

@ -1,607 +0,0 @@
// Copyright 2014, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package worker
import (
"errors"
"fmt"
"html/template"
"io"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"github.com/youtube/vitess/go/event"
"github.com/youtube/vitess/go/sqltypes"
"github.com/youtube/vitess/go/sync2"
"github.com/youtube/vitess/go/vt/binlog/binlogplayer"
"github.com/youtube/vitess/go/vt/discovery"
"github.com/youtube/vitess/go/vt/throttler"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
"github.com/youtube/vitess/go/vt/worker/events"
"github.com/youtube/vitess/go/vt/wrangler"
tabletmanagerdatapb "github.com/youtube/vitess/go/vt/proto/tabletmanagerdata"
topodatapb "github.com/youtube/vitess/go/vt/proto/topodata"
)
// VerticalSplitCloneWorker will clone the data from a source keyspace/shard
// to a destination keyspace/shard.
type VerticalSplitCloneWorker struct {
StatusWorker
wr *wrangler.Wrangler
cell string
destinationKeyspace string
destinationShard string
tables []string
strategy *splitStrategy
sourceReaderCount int
destinationPackCount int
minTableSizeForSplit uint64
destinationWriterCount int
minHealthyRdonlyTablets int
maxTPS int64
cleaner *wrangler.Cleaner
// populated during WorkerStateInit, read-only after that
sourceKeyspace string
// populated during WorkerStateFindTargets, read-only after that
sourceAlias *topodatapb.TabletAlias
sourceTablet *topodatapb.Tablet
// healthCheck tracks the health of all MASTER and REPLICA tablets.
// It must be closed at the end of the command.
healthCheck discovery.HealthCheck
tsc *discovery.TabletStatsCache
// destinationShardWatchers contains a TopologyWatcher for each destination
// shard. It updates the list of tablets in the healthcheck if replicas are
// added/removed.
// Each watcher must be stopped at the end of the command.
destinationShardWatchers []*discovery.TopologyWatcher
// destinationDbNames stores for each destination keyspace/shard the MySQL
// database name.
// Example Map Entry: test_keyspace/-80 => vt_test_keyspace
destinationDbNames map[string]string
// populated during WorkerStateClone
// tableStatusList holds the status for each table.
tableStatusList tableStatusList
// aliases of tablets that need to have their state refreshed.
// Only populated once, read-only after that.
refreshAliases []*topodatapb.TabletAlias
refreshTablets map[topodatapb.TabletAlias]*topo.TabletInfo
ev *events.VerticalSplitClone
}
// NewVerticalSplitCloneWorker returns a new VerticalSplitCloneWorker object.
func NewVerticalSplitCloneWorker(wr *wrangler.Wrangler, cell, destinationKeyspace, destinationShard string, tables []string, strategyStr string, sourceReaderCount, destinationPackCount int, minTableSizeForSplit uint64, destinationWriterCount, minHealthyRdonlyTablets int, maxTPS int64) (Worker, error) {
if len(tables) == 0 {
return nil, errors.New("list of tablets to be split out must not be empty")
}
strategy, err := newSplitStrategy(wr.Logger(), strategyStr)
if err != nil {
return nil, err
}
if maxTPS != throttler.MaxRateModuleDisabled {
wr.Logger().Infof("throttling enabled and set to a max of %v transactions/second", maxTPS)
}
if maxTPS != throttler.MaxRateModuleDisabled && maxTPS < int64(destinationWriterCount) {
return nil, fmt.Errorf("-max_tps must be >= -destination_writer_count: %v >= %v", maxTPS, destinationWriterCount)
}
return &VerticalSplitCloneWorker{
StatusWorker: NewStatusWorker(),
wr: wr,
cell: cell,
destinationKeyspace: destinationKeyspace,
destinationShard: destinationShard,
tables: tables,
strategy: strategy,
sourceReaderCount: sourceReaderCount,
destinationPackCount: destinationPackCount,
minTableSizeForSplit: minTableSizeForSplit,
destinationWriterCount: destinationWriterCount,
minHealthyRdonlyTablets: minHealthyRdonlyTablets,
maxTPS: maxTPS,
cleaner: &wrangler.Cleaner{},
destinationDbNames: make(map[string]string),
ev: &events.VerticalSplitClone{
Cell: cell,
Keyspace: destinationKeyspace,
Shard: destinationShard,
Tables: tables,
Strategy: strategy.String(),
},
}, nil
}
func (vscw *VerticalSplitCloneWorker) setState(state StatusWorkerState) {
vscw.SetState(state)
event.DispatchUpdate(vscw.ev, state.String())
}
func (vscw *VerticalSplitCloneWorker) setErrorState(err error) {
vscw.SetState(WorkerStateError)
event.DispatchUpdate(vscw.ev, "error: "+err.Error())
}
// StatusAsHTML implements the Worker interface
func (vscw *VerticalSplitCloneWorker) StatusAsHTML() template.HTML {
state := vscw.State()
result := "<b>Working on:</b> " + vscw.destinationKeyspace + "/" + vscw.destinationShard + "</br>\n"
result += "<b>State:</b> " + state.String() + "</br>\n"
switch state {
case WorkerStateCloneOffline:
result += "<b>Running</b>:</br>\n"
result += "<b>Copying from</b>: " + topoproto.TabletAliasString(vscw.sourceAlias) + "</br>\n"
statuses, eta := vscw.tableStatusList.format()
result += "<b>ETA</b>: " + eta.String() + "</br>\n"
result += strings.Join(statuses, "</br>\n")
case WorkerStateDone:
result += "<b>Success</b>:</br>\n"
statuses, _ := vscw.tableStatusList.format()
result += strings.Join(statuses, "</br>\n")
}
return template.HTML(result)
}
// StatusAsText implements the Worker interface
func (vscw *VerticalSplitCloneWorker) StatusAsText() string {
state := vscw.State()
result := "Working on: " + vscw.destinationKeyspace + "/" + vscw.destinationShard + "\n"
result += "State: " + state.String() + "\n"
switch state {
case WorkerStateCloneOffline:
result += "Running:\n"
result += "Copying from: " + topoproto.TabletAliasString(vscw.sourceAlias) + "\n"
statuses, eta := vscw.tableStatusList.format()
result += "ETA: " + eta.String() + "\n"
result += strings.Join(statuses, "\n")
case WorkerStateDone:
result += "Success:\n"
statuses, _ := vscw.tableStatusList.format()
result += strings.Join(statuses, "\n")
}
return result
}
// Run implements the Worker interface
func (vscw *VerticalSplitCloneWorker) Run(ctx context.Context) error {
resetVars()
// Run the command.
err := vscw.run(ctx)
// Cleanup.
vscw.setState(WorkerStateCleanUp)
// Reverse any changes e.g. setting the tablet type of a source RDONLY tablet.
cerr := vscw.cleaner.CleanUp(vscw.wr)
if cerr != nil {
if err != nil {
vscw.wr.Logger().Errorf("CleanUp failed in addition to job error: %v", cerr)
} else {
err = cerr
}
}
// Stop healthcheck.
for _, watcher := range vscw.destinationShardWatchers {
watcher.Stop()
}
if vscw.healthCheck != nil {
if err := vscw.healthCheck.Close(); err != nil {
vscw.wr.Logger().Errorf("HealthCheck.Close() failed: %v", err)
}
}
if err != nil {
vscw.setErrorState(err)
return err
}
vscw.setState(WorkerStateDone)
return nil
}
func (vscw *VerticalSplitCloneWorker) run(ctx context.Context) error {
// first state: read what we need to do
if err := vscw.init(ctx); err != nil {
return fmt.Errorf("init() failed: %v", err)
}
if err := checkDone(ctx); err != nil {
return err
}
// second state: find targets
if err := vscw.findTargets(ctx); err != nil {
return fmt.Errorf("findTargets() failed: %v", err)
}
if err := checkDone(ctx); err != nil {
return err
}
// third state: copy data
if err := vscw.clone(ctx); err != nil {
return fmt.Errorf("copy() failed: %v", err)
}
if err := checkDone(ctx); err != nil {
return err
}
return nil
}
// init phase:
// - read the destination keyspace, make sure it has 'servedFrom' values
func (vscw *VerticalSplitCloneWorker) init(ctx context.Context) error {
vscw.setState(WorkerStateInit)
// read the keyspace and validate it
shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
destinationKeyspaceInfo, err := vscw.wr.TopoServer().GetKeyspace(shortCtx, vscw.destinationKeyspace)
cancel()
if err != nil {
return fmt.Errorf("cannot read destination keyspace %v: %v", vscw.destinationKeyspace, err)
}
if len(destinationKeyspaceInfo.ServedFroms) == 0 {
return fmt.Errorf("destination keyspace %v has no KeyspaceServedFrom", vscw.destinationKeyspace)
}
// validate all serving types, find sourceKeyspace
servingTypes := []topodatapb.TabletType{topodatapb.TabletType_MASTER, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY}
servedFrom := ""
for _, st := range servingTypes {
sf := destinationKeyspaceInfo.GetServedFrom(st)
if sf == nil {
return fmt.Errorf("destination keyspace %v is serving type %v", vscw.destinationKeyspace, st)
}
if servedFrom == "" {
servedFrom = sf.Keyspace
} else {
if servedFrom != sf.Keyspace {
return fmt.Errorf("destination keyspace %v is serving from multiple source keyspaces %v and %v", vscw.destinationKeyspace, servedFrom, sf.Keyspace)
}
}
}
vscw.sourceKeyspace = servedFrom
// Verify that filtered replication is not already enabled.
destShardInfo, err := vscw.wr.TopoServer().GetShard(ctx, vscw.destinationKeyspace, vscw.destinationShard)
if len(destShardInfo.SourceShards) > 0 {
return fmt.Errorf("destination shard %v/%v has filtered replication already enabled from a previous resharding (ShardInfo is set)."+
" This requires manual intervention e.g. use vtctl SourceShardDelete to remove it",
vscw.destinationKeyspace, vscw.destinationShard)
}
return nil
}
// findTargets phase:
// - find one rdonly in the source shard
// - mark it as 'worker' pointing back to us
// - get the aliases of all the targets
func (vscw *VerticalSplitCloneWorker) findTargets(ctx context.Context) error {
vscw.setState(WorkerStateFindTargets)
// find an appropriate tablet in the source shard
var err error
vscw.sourceAlias, err = FindWorkerTablet(ctx, vscw.wr, vscw.cleaner, nil /* tsc */, vscw.cell, vscw.sourceKeyspace, "0", vscw.minHealthyRdonlyTablets)
if err != nil {
return fmt.Errorf("FindWorkerTablet() failed for %v/%v/0: %v", vscw.cell, vscw.sourceKeyspace, err)
}
vscw.wr.Logger().Infof("Using tablet %v as the source", topoproto.TabletAliasString(vscw.sourceAlias))
// get the tablet info for it
shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
ti, err := vscw.wr.TopoServer().GetTablet(shortCtx, vscw.sourceAlias)
cancel()
if err != nil {
return fmt.Errorf("cannot read tablet %v: %v", topoproto.TabletAliasString(vscw.sourceAlias), err)
}
vscw.sourceTablet = ti.Tablet
// stop replication on it
shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout)
err = vscw.wr.TabletManagerClient().StopSlave(shortCtx, vscw.sourceTablet)
cancel()
if err != nil {
return fmt.Errorf("cannot stop replication on tablet %v", topoproto.TabletAliasString(vscw.sourceAlias))
}
wrangler.RecordStartSlaveAction(vscw.cleaner, vscw.sourceTablet)
// Initialize healthcheck and add destination shards to it.
vscw.healthCheck = discovery.NewHealthCheck(*remoteActionsTimeout, *healthcheckRetryDelay, *healthCheckTimeout)
vscw.tsc = discovery.NewTabletStatsCache(vscw.healthCheck, vscw.cell)
watcher := discovery.NewShardReplicationWatcher(vscw.wr.TopoServer(), vscw.healthCheck,
vscw.cell, vscw.destinationKeyspace, vscw.destinationShard,
*healthCheckTopologyRefresh, discovery.DefaultTopoReadConcurrency)
vscw.destinationShardWatchers = append(vscw.destinationShardWatchers, watcher)
// Make sure we find a master for each destination shard and log it.
vscw.wr.Logger().Infof("Finding a MASTER tablet for each destination shard...")
waitCtx, waitCancel := context.WithTimeout(ctx, *waitForHealthyTabletsTimeout)
defer waitCancel()
if err := vscw.tsc.WaitForTablets(waitCtx, vscw.cell, vscw.destinationKeyspace, vscw.destinationShard, []topodatapb.TabletType{topodatapb.TabletType_MASTER}); err != nil {
return fmt.Errorf("cannot find MASTER tablet for destination shard for %v/%v (in cell: %v): %v", vscw.destinationKeyspace, vscw.destinationShard, vscw.cell, err)
}
masters := vscw.tsc.GetHealthyTabletStats(vscw.destinationKeyspace, vscw.destinationShard, topodatapb.TabletType_MASTER)
if len(masters) == 0 {
return fmt.Errorf("cannot find MASTER tablet for destination shard for %v/%v (in cell: %v) in HealthCheck: empty TabletStats list", vscw.destinationKeyspace, vscw.destinationShard, vscw.cell)
}
master := masters[0]
// Get the MySQL database name of the tablet.
keyspaceAndShard := topoproto.KeyspaceShardString(vscw.destinationKeyspace, vscw.destinationShard)
vscw.destinationDbNames[keyspaceAndShard] = topoproto.TabletDbName(master.Tablet)
// TODO(mberlin): Verify on the destination master that the
// _vt.blp_checkpoint table has the latest schema.
vscw.wr.Logger().Infof("Using tablet %v as destination master for %v/%v", topoproto.TabletAliasString(master.Tablet.Alias), vscw.destinationKeyspace, vscw.destinationShard)
vscw.wr.Logger().Infof("NOTE: The used master of a destination shard might change over the course of the copy e.g. due to a reparent. The HealthCheck module will track and log master changes and any error message will always refer the actually used master address.")
return nil
}
// Find all tablets on the destination shard. This should be done immediately before refreshing
// the state on these tablets, to minimize the chances of the topo changing in between.
func (vscw *VerticalSplitCloneWorker) findRefreshTargets(ctx context.Context) error {
refreshAliases, refreshTablets, err := resolveRefreshTabletsForShard(ctx, vscw.destinationKeyspace, vscw.destinationShard, vscw.wr)
if err != nil {
return err
}
vscw.refreshAliases, vscw.refreshTablets = refreshAliases, refreshTablets
return nil
}
// clone phase:
// - copy the data from source tablets to destination masters (with replication on)
// Assumes that the schema has already been created on each destination tablet
// (probably from vtctl's CopySchemaShard)
func (vscw *VerticalSplitCloneWorker) clone(ctx context.Context) error {
vscw.setState(WorkerStateCloneOffline)
start := time.Now()
defer func() {
statsStateDurationsNs.Set(string(WorkerStateCloneOffline), time.Now().Sub(start).Nanoseconds())
}()
// get source schema
shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
sourceSchemaDefinition, err := vscw.wr.GetSchema(shortCtx, vscw.sourceAlias, vscw.tables, nil, false /* includeViews */)
cancel()
if err != nil {
return fmt.Errorf("cannot get schema from source %v: %v", topoproto.TabletAliasString(vscw.sourceAlias), err)
}
if len(sourceSchemaDefinition.TableDefinitions) == 0 {
return fmt.Errorf("no tables matching the table filter")
}
for _, td := range sourceSchemaDefinition.TableDefinitions {
if len(td.Columns) == 0 {
return fmt.Errorf("schema for table %v has no columns", td.Name)
}
}
vscw.wr.Logger().Infof("Source tablet has %v tables to copy", len(sourceSchemaDefinition.TableDefinitions))
vscw.tableStatusList.initialize(sourceSchemaDefinition)
// In parallel, setup the channels to send SQL data chunks to
// for each destination tablet.
//
// mu protects firstError
mu := sync.Mutex{}
var firstError error
ctx, cancelCopy := context.WithCancel(ctx)
processError := func(format string, args ...interface{}) {
vscw.wr.Logger().Errorf(format, args...)
mu.Lock()
if firstError == nil {
firstError = fmt.Errorf(format, args...)
cancelCopy()
}
mu.Unlock()
}
destinationWaitGroup := sync.WaitGroup{}
// we create one channel for the destination tablet. It
// is sized to have a buffer of a maximum of
// destinationWriterCount * 2 items, to hopefully
// always have data. We then have
// destinationWriterCount go routines reading from it.
insertChannel := make(chan string, vscw.destinationWriterCount*2)
// Set up the throttler for the destination shard.
keyspaceAndShard := topoproto.KeyspaceShardString(vscw.destinationKeyspace, vscw.destinationShard)
destinationThrottler, err := throttler.NewThrottler(
keyspaceAndShard, "transactions", vscw.destinationWriterCount, vscw.maxTPS, throttler.ReplicationLagModuleDisabled)
if err != nil {
return fmt.Errorf("cannot instantiate throttler: %v", err)
}
for j := 0; j < vscw.destinationWriterCount; j++ {
destinationWaitGroup.Add(1)
go func(threadID int) {
defer destinationWaitGroup.Done()
defer destinationThrottler.ThreadFinished(threadID)
executor := newExecutor(vscw.wr, vscw.tsc, destinationThrottler, vscw.destinationKeyspace, vscw.destinationShard, threadID)
if err := executor.fetchLoop(ctx, insertChannel); err != nil {
processError("executer.FetchLoop failed: %v", err)
}
}(j)
}
// Now for each table, read data chunks and send them to insertChannel
sourceWaitGroup := sync.WaitGroup{}
sema := sync2.NewSemaphore(vscw.sourceReaderCount, 0)
dbName := vscw.destinationDbNames[topoproto.KeyspaceShardString(vscw.destinationKeyspace, vscw.destinationShard)]
for tableIndex, td := range sourceSchemaDefinition.TableDefinitions {
chunks, err := generateChunks(ctx, vscw.wr, vscw.sourceTablet, td, vscw.minTableSizeForSplit, vscw.sourceReaderCount)
if err != nil {
return err
}
vscw.tableStatusList.setThreadCount(tableIndex, len(chunks)-1)
for _, c := range chunks {
sourceWaitGroup.Add(1)
go func(td *tabletmanagerdatapb.TableDefinition, tableIndex int, chunk chunk) {
defer sourceWaitGroup.Done()
sema.Acquire()
defer sema.Release()
vscw.tableStatusList.threadStarted(tableIndex)
// Start streaming from the source tablet.
rr, err := NewRestartableResultReader(ctx, vscw.wr.Logger(), vscw.wr.TopoServer(), vscw.sourceAlias, td, chunk)
if err != nil {
processError("NewRestartableResultReader failed: %v", err)
return
}
defer rr.Close()
// process the data
if err := vscw.processData(ctx, dbName, td, tableIndex, rr, insertChannel, vscw.destinationPackCount); err != nil {
processError("ResultReader failed: %v", err)
}
vscw.tableStatusList.threadDone(tableIndex)
}(td, tableIndex, c)
}
}
sourceWaitGroup.Wait()
close(insertChannel)
destinationWaitGroup.Wait()
// Stop Throttler.
destinationThrottler.Close()
if firstError != nil {
return firstError
}
// then create and populate the blp_checkpoint table
if vscw.strategy.skipPopulateBlpCheckpoint {
vscw.wr.Logger().Infof("Skipping populating the blp_checkpoint table")
} else {
// get the current position from the source
shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
status, err := vscw.wr.TabletManagerClient().SlaveStatus(shortCtx, vscw.sourceTablet)
cancel()
if err != nil {
return err
}
queries := make([]string, 0, 4)
queries = append(queries, binlogplayer.CreateBlpCheckpoint()...)
flags := ""
if vscw.strategy.dontStartBinlogPlayer {
flags = binlogplayer.BlpFlagDontStart
}
queries = append(queries, binlogplayer.PopulateBlpCheckpoint(0, status.Position, vscw.maxTPS, throttler.ReplicationLagModuleDisabled, time.Now().Unix(), flags))
vscw.wr.Logger().Infof("Making and populating blp_checkpoint table")
if err := runSQLCommands(ctx, vscw.wr, vscw.tsc, vscw.destinationKeyspace, vscw.destinationShard, dbName, queries); err != nil {
processError("blp_checkpoint queries failed: %v", err)
}
if firstError != nil {
return firstError
}
}
// Now we're done with data copy, update the shard's source info.
if vscw.strategy.skipSetSourceShards {
vscw.wr.Logger().Infof("Skipping setting SourceShard on destination shard.")
} else {
vscw.wr.Logger().Infof("Setting SourceShard on shard %v/%v", vscw.destinationKeyspace, vscw.destinationShard)
shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
err := vscw.wr.SetSourceShards(shortCtx, vscw.destinationKeyspace, vscw.destinationShard, []*topodatapb.TabletAlias{vscw.sourceAlias}, vscw.tables)
cancel()
if err != nil {
return fmt.Errorf("Failed to set source shards: %v", err)
}
}
err = vscw.findRefreshTargets(ctx)
if err != nil {
return fmt.Errorf("failed before refreshing state on destination tablets: %v", err)
}
// And force a state refresh (re-read topo) on all destination tablets.
// The master tablet will end up starting filtered replication
// at this point.
for _, tabletAlias := range vscw.refreshAliases {
destinationWaitGroup.Add(1)
go func(ti *topo.TabletInfo) {
defer destinationWaitGroup.Done()
vscw.wr.Logger().Infof("Refreshing state on tablet %v", ti.AliasString())
shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)
err := vscw.wr.TabletManagerClient().RefreshState(shortCtx, ti.Tablet)
cancel()
if err != nil {
processError("RefreshState failed on tablet %v: %v", ti.AliasString(), err)
}
}(vscw.refreshTablets[*tabletAlias])
}
destinationWaitGroup.Wait()
return firstError
}
// processData pumps the data out of the provided QueryResultReader.
// It returns any error the source encounters.
func (vscw *VerticalSplitCloneWorker) processData(ctx context.Context, dbName string, td *tabletmanagerdatapb.TableDefinition, tableIndex int, rr ResultReader, insertChannel chan string, destinationPackCount int) error {
// process the data
baseCmd := "INSERT INTO " + escape(dbName) + "." + escape(td.Name) + "(" + strings.Join(escapeAll(td.Columns), ", ") + ") VALUES "
var rows [][]sqltypes.Value
packCount := 0
fields := rr.Fields()
for {
r, err := rr.Next()
if err != nil {
// we are done, see if there was an error
if err != io.EOF {
return err
}
// send the remainder if any
if packCount > 0 {
cmd := baseCmd + makeValueString(fields, rows)
select {
case insertChannel <- cmd:
case <-ctx.Done():
return nil
}
}
return nil
}
// add the rows to our current result
rows = append(rows, r.Rows...)
vscw.tableStatusList.addCopiedRows(tableIndex, len(r.Rows))
// see if we reach the destination pack count
packCount++
if packCount < destinationPackCount {
continue
}
// send the rows to be inserted
cmd := baseCmd + makeValueString(fields, rows)
select {
case insertChannel <- cmd:
case <-ctx.Done():
return nil
}
// and reset our row buffer
rows = nil
packCount = 0
}
}

Просмотреть файл

@ -51,14 +51,24 @@ const verticalSplitCloneHTML2 = `
<form action="/Clones/VerticalSplitClone" method="post">
<LABEL for="tables">Tables: </LABEL>
<INPUT type="text" id="tables" name="tables" value="moving.*"></BR>
<LABEL for="online">Do Online Copy: (optional approximate copy, source and destination tablets will not be put out of serving, minimizes downtime during offline copy)</LABEL>
<INPUT type="checkbox" id="online" name="online" value="true"{{if .DefaultOnline}} checked{{end}}></BR>
<LABEL for="offline">Do Offline Copy: (exact copy at a specific GTID, required before shard migration, source and destination tablets will be put out of serving during copy)</LABEL>
<INPUT type="checkbox" id="offline" name="offline" value="true"{{if .DefaultOnline}} checked{{end}}></BR>
<LABEL for="strategy">Strategy: </LABEL>
<INPUT type="text" id="strategy" name="strategy" value=""></BR>
<LABEL for="chunkCount">Chunk Count: </LABEL>
<INPUT type="text" id="chunkCount" name="chunkCount" value="{{.DefaultChunkCount}}"></BR>
<LABEL for="minRowsPerChunk">Minimun Number of Rows per Chunk (may reduce the Chunk Count): </LABEL>
<INPUT type="text" id="minRowsPerChunk" name="minRowsPerChunk" value="{{.DefaultMinRowsPerChunk}}"></BR>
<LABEL for="sourceReaderCount">Source Reader Count: </LABEL>
<INPUT type="text" id="sourceReaderCount" name="sourceReaderCount" value="{{.DefaultSourceReaderCount}}"></BR>
<LABEL for="destinationPackCount">Destination Pack Count: </LABEL>
<INPUT type="text" id="destinationPackCount" name="destinationPackCount" value="{{.DefaultDestinationPackCount}}"></BR>
<LABEL for="minTableSizeForSplit">Minimun Table Size For Split: </LABEL>
<INPUT type="text" id="minTableSizeForSplit" name="minTableSizeForSplit" value="{{.DefaultMinTableSizeForSplit}}"></BR>
<LABEL for="writeQueryMaxRows">Maximum Number of Rows per Write Query: </LABEL>
<INPUT type="text" id="writeQueryMaxRows" name="writeQueryMaxRows" value="{{.DefaultWriteQueryMaxRows}}"></BR>
<LABEL for="writeQueryMaxSize">Maximum Size (in bytes) per Write Query: </LABEL>
<INPUT type="text" id="writeQueryMaxSize" name="writeQueryMaxSize" value="{{.DefaultWriteQueryMaxSize}}"></BR>
<LABEL for="writeQueryMaxRowsDelete">Maximum Number of Rows per DELETE FROM Write Query: </LABEL>
<INPUT type="text" id="writeQueryMaxRowsDelete" name="writeQueryMaxRowsDelete" value="{{.DefaultWriteQueryMaxRowsDelete}}"></BR>
<LABEL for="destinationWriterCount">Destination Writer Count: </LABEL>
<INPUT type="text" id="destinationWriterCount" name="destinationWriterCount" value="{{.DefaultDestinationWriterCount}}"></BR>
<LABEL for="minHealthyRdonlyTablets">Minimum Number of required healthy RDONLY tablets: </LABEL>
@ -83,11 +93,16 @@ var verticalSplitCloneTemplate = mustParseTemplate("verticalSplitClone", vertica
var verticalSplitCloneTemplate2 = mustParseTemplate("verticalSplitClone2", verticalSplitCloneHTML2)
func commandVerticalSplitClone(wi *Instance, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (Worker, error) {
online := subFlags.Bool("online", defaultOnline, "do online copy (optional approximate copy, source and destination tablets will not be put out of serving, minimizes downtime during offline copy)")
offline := subFlags.Bool("offline", defaultOffline, "do offline copy (exact copy at a specific GTID, required before shard migration, source and destination tablets will be put out of serving during copy)")
tables := subFlags.String("tables", "", "comma separated list of tables to replicate (used for vertical split)")
strategy := subFlags.String("strategy", "", "which strategy to use for restore, use 'vtworker VerticalSplitClone --strategy=-help k/s' for more info")
chunkCount := subFlags.Int("chunk_count", defaultChunkCount, "number of chunks per table")
minRowsPerChunk := subFlags.Int("min_rows_per_chunk", defaultChunkCount, "minimum number of rows per chunk (may reduce --chunk_count)")
sourceReaderCount := subFlags.Int("source_reader_count", defaultSourceReaderCount, "number of concurrent streaming queries to use on the source")
destinationPackCount := subFlags.Int("destination_pack_count", defaultDestinationPackCount, "number of packets to pack in one destination insert")
minTableSizeForSplit := subFlags.Int("min_table_size_for_split", defaultMinTableSizeForSplit, "tables bigger than this size on disk in bytes will be split into source_reader_count chunks if possible")
writeQueryMaxRows := subFlags.Int("write_query_max_rows", defaultWriteQueryMaxRows, "maximum number of rows per write query")
writeQueryMaxSize := subFlags.Int("write_query_max_size", defaultWriteQueryMaxSize, "maximum size (in bytes) per write query")
writeQueryMaxRowsDelete := subFlags.Int("write_query_max_rows_delete", defaultWriteQueryMaxRows, "maximum number of rows per DELETE FROM write query")
destinationWriterCount := subFlags.Int("destination_writer_count", defaultDestinationWriterCount, "number of concurrent RPCs to execute on the destination")
minHealthyRdonlyTablets := subFlags.Int("min_healthy_rdonly_tablets", defaultMinHealthyRdonlyTablets, "minimum number of healthy RDONLY tablets before taking out one")
maxTPS := subFlags.Int64("max_tps", defaultMaxTPS, "if non-zero, limit copy to maximum number of (write) transactions/second on the destination (unlimited by default)")
@ -107,7 +122,7 @@ func commandVerticalSplitClone(wi *Instance, wr *wrangler.Wrangler, subFlags *fl
if *tables != "" {
tableArray = strings.Split(*tables, ",")
}
worker, err := NewVerticalSplitCloneWorker(wr, wi.cell, keyspace, shard, tableArray, *strategy, *sourceReaderCount, *destinationPackCount, uint64(*minTableSizeForSplit), *destinationWriterCount, *minHealthyRdonlyTablets, *maxTPS)
worker, err := newVerticalSplitCloneWorker(wr, wi.cell, keyspace, shard, *online, *offline, tableArray, *strategy, *chunkCount, *minRowsPerChunk, *sourceReaderCount, *writeQueryMaxRows, *writeQueryMaxSize, *writeQueryMaxRowsDelete, *destinationWriterCount, *minHealthyRdonlyTablets, *maxTPS)
if err != nil {
return nil, fmt.Errorf("cannot create worker: %v", err)
}
@ -180,9 +195,14 @@ func interactiveVerticalSplitClone(ctx context.Context, wi *Instance, wr *wrangl
// display the input form
result := make(map[string]interface{})
result["Keyspace"] = keyspace
result["DefaultOnline"] = defaultOnline
result["DefaultOffline"] = defaultOffline
result["DefaultChunkCount"] = fmt.Sprintf("%v", defaultChunkCount)
result["DefaultMinRowsPerChunk"] = fmt.Sprintf("%v", defaultMinRowsPerChunk)
result["DefaultSourceReaderCount"] = fmt.Sprintf("%v", defaultSourceReaderCount)
result["DefaultDestinationPackCount"] = fmt.Sprintf("%v", defaultDestinationPackCount)
result["DefaultMinTableSizeForSplit"] = fmt.Sprintf("%v", defaultMinTableSizeForSplit)
result["DefaultWriteQueryMaxRows"] = fmt.Sprintf("%v", defaultWriteQueryMaxRows)
result["DefaultWriteQueryMaxSize"] = fmt.Sprintf("%v", defaultWriteQueryMaxSize)
result["DefaultWriteQueryMaxRowsDelete"] = fmt.Sprintf("%v", defaultWriteQueryMaxRows)
result["DefaultDestinationWriterCount"] = fmt.Sprintf("%v", defaultDestinationWriterCount)
result["DefaultMinHealthyRdonlyTablets"] = fmt.Sprintf("%v", defaultMinHealthyRdonlyTablets)
result["DefaultMaxTPS"] = fmt.Sprintf("%v", defaultMaxTPS)
@ -191,21 +211,40 @@ func interactiveVerticalSplitClone(ctx context.Context, wi *Instance, wr *wrangl
tableArray := strings.Split(tables, ",")
// get other parameters
onlineStr := r.FormValue("online")
online := onlineStr == "true"
offlineStr := r.FormValue("offline")
offline := offlineStr == "true"
strategy := r.FormValue("strategy")
chunkCountStr := r.FormValue("chunkCount")
chunkCount, err := strconv.ParseInt(chunkCountStr, 0, 64)
if err != nil {
return nil, nil, nil, fmt.Errorf("cannot parse chunkCount: %s", err)
}
minRowsPerChunkStr := r.FormValue("minRowsPerChunk")
minRowsPerChunk, err := strconv.ParseInt(minRowsPerChunkStr, 0, 64)
if err != nil {
return nil, nil, nil, fmt.Errorf("cannot parse minRowsPerChunk: %s", err)
}
sourceReaderCountStr := r.FormValue("sourceReaderCount")
sourceReaderCount, err := strconv.ParseInt(sourceReaderCountStr, 0, 64)
if err != nil {
return nil, nil, nil, fmt.Errorf("cannot parse sourceReaderCount: %s", err)
}
destinationPackCountStr := r.FormValue("destinationPackCount")
destinationPackCount, err := strconv.ParseInt(destinationPackCountStr, 0, 64)
writeQueryMaxRowsStr := r.FormValue("writeQueryMaxRows")
writeQueryMaxRows, err := strconv.ParseInt(writeQueryMaxRowsStr, 0, 64)
if err != nil {
return nil, nil, nil, fmt.Errorf("cannot parse destinationPackCount: %s", err)
return nil, nil, nil, fmt.Errorf("cannot parse writeQueryMaxRows: %s", err)
}
minTableSizeForSplitStr := r.FormValue("minTableSizeForSplit")
minTableSizeForSplit, err := strconv.ParseInt(minTableSizeForSplitStr, 0, 64)
writeQueryMaxSizeStr := r.FormValue("writeQueryMaxSize")
writeQueryMaxSize, err := strconv.ParseInt(writeQueryMaxSizeStr, 0, 64)
if err != nil {
return nil, nil, nil, fmt.Errorf("cannot parse minTableSizeForSplit: %s", err)
return nil, nil, nil, fmt.Errorf("cannot parse writeQueryMaxSize: %s", err)
}
writeQueryMaxRowsDeleteStr := r.FormValue("writeQueryMaxRowsDelete")
writeQueryMaxRowsDelete, err := strconv.ParseInt(writeQueryMaxRowsDeleteStr, 0, 64)
if err != nil {
return nil, nil, nil, fmt.Errorf("cannot parse writeQueryMaxRowsDelete: %s", err)
}
destinationWriterCountStr := r.FormValue("destinationWriterCount")
destinationWriterCount, err := strconv.ParseInt(destinationWriterCountStr, 0, 64)
@ -224,7 +263,7 @@ func interactiveVerticalSplitClone(ctx context.Context, wi *Instance, wr *wrangl
}
// start the clone job
wrk, err := NewVerticalSplitCloneWorker(wr, wi.cell, keyspace, "0", tableArray, strategy, int(sourceReaderCount), int(destinationPackCount), uint64(minTableSizeForSplit), int(destinationWriterCount), int(minHealthyRdonlyTablets), maxTPS)
wrk, err := newVerticalSplitCloneWorker(wr, wi.cell, keyspace, "0", online, offline, tableArray, strategy, int(chunkCount), int(minRowsPerChunk), int(sourceReaderCount), int(writeQueryMaxRows), int(writeQueryMaxSize), int(writeQueryMaxRowsDelete), int(destinationWriterCount), int(minHealthyRdonlyTablets), maxTPS)
if err != nil {
return nil, nil, nil, fmt.Errorf("cannot create worker: %v", err)
}

Просмотреть файл

@ -5,13 +5,9 @@
package worker
import (
"fmt"
"strconv"
"strings"
"testing"
"time"
"github.com/youtube/vitess/go/sqltypes"
"github.com/youtube/vitess/go/vt/mysqlctl/replication"
"github.com/youtube/vitess/go/vt/mysqlctl/tmutils"
"github.com/youtube/vitess/go/vt/tabletserver/grpcqueryservice"
@ -21,7 +17,6 @@ import (
"github.com/youtube/vitess/go/vt/zktopo/zktestserver"
"golang.org/x/net/context"
querypb "github.com/youtube/vitess/go/vt/proto/query"
tabletmanagerdatapb "github.com/youtube/vitess/go/vt/proto/tabletmanagerdata"
topodatapb "github.com/youtube/vitess/go/vt/proto/topodata"
)
@ -33,71 +28,14 @@ const (
verticalSplitCloneTestMax int = 200
)
// verticalTabletServer is a local QueryService implementation to support the tests.
type verticalTabletServer struct {
t *testing.T
*fakes.StreamHealthQueryService
}
func (sq *verticalTabletServer) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]interface{}, sendReply func(reply *sqltypes.Result) error) error {
// Custom parsing of the query we expect
min := verticalSplitCloneTestMin
max := verticalSplitCloneTestMax
var err error
parts := strings.Split(sql, " ")
for _, part := range parts {
if strings.HasPrefix(part, "`id`>=") {
min, err = strconv.Atoi(part[6:])
if err != nil {
return err
}
} else if strings.HasPrefix(part, "`id`<") {
max, err = strconv.Atoi(part[5:])
}
}
sq.t.Logf("verticalTabletServer: got query: %v with min %v max %v", sql, min, max)
// Send the headers
if err := sendReply(&sqltypes.Result{
Fields: []*querypb.Field{
{
Name: "id",
Type: sqltypes.Int64,
},
{
Name: "msg",
Type: sqltypes.VarChar,
},
},
}); err != nil {
return err
}
// Send the values
for i := min; i < max; i++ {
if err := sendReply(&sqltypes.Result{
Rows: [][]sqltypes.Value{
{
sqltypes.MakeString([]byte(fmt.Sprintf("%v", i))),
sqltypes.MakeString([]byte(fmt.Sprintf("Text for %v", i))),
},
},
}); err != nil {
return err
}
}
return nil
}
func createVerticalSplitCloneDestinationFakeDb(t *testing.T, name string, insertCount int) *FakePoolConnection {
f := NewFakePoolConnectionQuery(t, name)
// Provoke a retry to test the error handling. (Let the first write fail.)
f.addExpectedQuery("INSERT INTO `vt_destination_ks`.`moving1`(`id`, `msg`) VALUES (*", errReadOnly)
f.addExpectedQuery("INSERT INTO `vt_destination_ks`.`moving1` (`id`, `msg`) VALUES (*", errReadOnly)
for i := 1; i <= insertCount; i++ {
f.addExpectedQuery("INSERT INTO `vt_destination_ks`.`moving1`(`id`, `msg`) VALUES (*", nil)
f.addExpectedQuery("INSERT INTO `vt_destination_ks`.`moving1` (`id`, `msg`) VALUES (*", nil)
}
expectBlpCheckpointCreationQueries(f)
@ -105,6 +43,10 @@ func createVerticalSplitCloneDestinationFakeDb(t *testing.T, name string, insert
return f
}
// TestVerticalSplitClone will run VerticalSplitClone in the combined
// online and offline mode. The online phase will copy 100 rows from the source
// to the destination and the offline phase won't copy any rows as the source
// has not changed in the meantime.
func TestVerticalSplitClone(t *testing.T) {
db := fakesqldb.Register()
ts := zktestserver.New(t, []string{"cell1", "cell2"})
@ -113,9 +55,7 @@ func TestVerticalSplitClone(t *testing.T) {
sourceMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 0,
topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "source_ks", "0"))
sourceRdonly1 := testlib.NewFakeTablet(t, wi.wr, "cell1", 1,
topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "source_ks", "0"))
sourceRdonly2 := testlib.NewFakeTablet(t, wi.wr, "cell1", 2,
sourceRdonly := testlib.NewFakeTablet(t, wi.wr, "cell1", 1,
topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "source_ks", "0"))
// Create the destination keyspace with the appropriate ServedFromMap
@ -142,7 +82,7 @@ func TestVerticalSplitClone(t *testing.T) {
destRdonly := testlib.NewFakeTablet(t, wi.wr, "cell1", 11,
topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "destination_ks", "0"))
for _, ft := range []*testlib.FakeTablet{sourceMaster, sourceRdonly1, sourceRdonly2, destMaster, destRdonly} {
for _, ft := range []*testlib.FakeTablet{sourceMaster, sourceRdonly, destMaster, destRdonly} {
ft.StartActionLoop(t, wi.wr)
defer ft.StopActionLoop(t)
}
@ -155,40 +95,46 @@ func TestVerticalSplitClone(t *testing.T) {
t.Fatalf("RebuildKeyspaceGraph failed: %v", err)
}
for _, sourceRdonly := range []*testlib.FakeTablet{sourceRdonly1, sourceRdonly2} {
sourceRdonly.FakeMysqlDaemon.Schema = &tabletmanagerdatapb.SchemaDefinition{
DatabaseSchema: "",
TableDefinitions: []*tabletmanagerdatapb.TableDefinition{
{
Name: "moving1",
Columns: []string{"id", "msg"},
PrimaryKeyColumns: []string{"id"},
Type: tmutils.TableBaseTable,
// Set the table size to a value higher than --min_table_size_for_split.
DataLength: 2048,
},
{
Name: "view1",
Type: tmutils.TableView,
},
// Set up source rdonly which will be used as input for the diff during the clone.
sourceRdonly.FakeMysqlDaemon.Schema = &tabletmanagerdatapb.SchemaDefinition{
DatabaseSchema: "",
TableDefinitions: []*tabletmanagerdatapb.TableDefinition{
{
Name: "moving1",
Columns: []string{"id", "msg"},
PrimaryKeyColumns: []string{"id"},
Type: tmutils.TableBaseTable,
// Set the row count to avoid that --min_rows_per_chunk reduces the
// number of chunks.
RowCount: 100,
},
}
sourceRdonly.FakeMysqlDaemon.DbAppConnectionFactory = sourceRdonlyFactory(
t, "vt_source_ks", "moving1", verticalSplitCloneTestMin, verticalSplitCloneTestMax)
sourceRdonly.FakeMysqlDaemon.CurrentMasterPosition = replication.Position{
GTIDSet: replication.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678},
}
sourceRdonly.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"STOP SLAVE",
"START SLAVE",
}
qs := fakes.NewStreamHealthQueryService(sourceRdonly.Target())
qs.AddDefaultHealthResponse()
grpcqueryservice.Register(sourceRdonly.RPCServer, &verticalTabletServer{
t: t,
StreamHealthQueryService: qs,
})
{
Name: "view1",
Type: tmutils.TableView,
},
},
}
sourceRdonly.FakeMysqlDaemon.DbAppConnectionFactory = sourceRdonlyFactory(
t, "vt_source_ks", "moving1", verticalSplitCloneTestMin, verticalSplitCloneTestMax)
sourceRdonly.FakeMysqlDaemon.CurrentMasterPosition = replication.Position{
GTIDSet: replication.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678},
}
sourceRdonly.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"STOP SLAVE",
"START SLAVE",
}
sourceRdonlyShqs := fakes.NewStreamHealthQueryService(sourceRdonly.Target())
sourceRdonlyShqs.AddDefaultHealthResponse()
sourceRdonlyQs := newTestQueryService(t, sourceRdonly.Target(), sourceRdonlyShqs, 0, 1, sourceRdonly.Tablet.Alias.Uid, true /* omitKeyspaceID */)
sourceRdonlyQs.addGeneratedRows(verticalSplitCloneTestMin, verticalSplitCloneTestMax)
grpcqueryservice.Register(sourceRdonly.RPCServer, sourceRdonlyQs)
// Set up destination rdonly which will be used as input for the diff during the clone.
destRdonlyShqs := fakes.NewStreamHealthQueryService(destRdonly.Target())
destRdonlyShqs.AddDefaultHealthResponse()
destRdonlyQs := newTestQueryService(t, destRdonly.Target(), destRdonlyShqs, 0, 1, destRdonly.Tablet.Alias.Uid, true /* omitKeyspaceID */)
// This tablet is empty and does not return any rows.
grpcqueryservice.Register(destRdonly.RPCServer, destRdonlyQs)
// We read 100 source rows. sourceReaderCount is set to 10, so
// we'll have 100/10=10 rows per table chunk.
@ -207,6 +153,12 @@ func TestVerticalSplitClone(t *testing.T) {
// Only wait 1 ms between retries, so that the test passes faster
*executeFetchRetryTime = (1 * time.Millisecond)
// When the online clone inserted the last rows, modify the destination test
// query service such that it will return them as well.
destMasterFakeDb.getEntry(29).AfterFunc = func() {
destRdonlyQs.addGeneratedRows(verticalSplitCloneTestMin, verticalSplitCloneTestMax)
}
// Run the vtworker command.
args := []string{
"VerticalSplitClone",
@ -216,14 +168,36 @@ func TestVerticalSplitClone(t *testing.T) {
"-max_tps", "9999",
"-tables", "moving.*,view1",
"-source_reader_count", "10",
"-destination_pack_count", "4",
"-min_table_size_for_split", "1",
// Each chunk pipeline will process 10 rows. To spread them out across 3
// write queries, set the max row count per query to 4. (10 = 4+4+2)
"-write_query_max_rows", "4",
"-min_rows_per_chunk", "10",
"-destination_writer_count", "10",
// This test uses only one healthy RDONLY tablet.
"-min_healthy_rdonly_tablets", "1",
"destination_ks/0",
}
if err := runCommand(t, wi, wi.wr, args); err != nil {
t.Fatal(err)
}
if inserts := statsOnlineInsertsCounters.Counts()["moving1"]; inserts != 100 {
t.Errorf("wrong number of rows inserted: got = %v, want = %v", inserts, 100)
}
if updates := statsOnlineUpdatesCounters.Counts()["moving1"]; updates != 0 {
t.Errorf("wrong number of rows updated: got = %v, want = %v", updates, 0)
}
if deletes := statsOnlineDeletesCounters.Counts()["moving1"]; deletes != 0 {
t.Errorf("wrong number of rows deleted: got = %v, want = %v", deletes, 0)
}
if inserts := statsOfflineInsertsCounters.Counts()["moving1"]; inserts != 0 {
t.Errorf("no stats for the offline clone phase should have been modified. got inserts = %v", inserts)
}
if updates := statsOfflineUpdatesCounters.Counts()["moving1"]; updates != 0 {
t.Errorf("no stats for the offline clone phase should have been modified. got updates = %v", updates)
}
if deletes := statsOfflineDeletesCounters.Counts()["moving1"]; deletes != 0 {
t.Errorf("no stats for the offline clone phase should have been modified. got deletes = %v", deletes)
}
wantRetryCount := int64(1)
if got := statsRetryCount.Get(); got != wantRetryCount {

Просмотреть файл

@ -15,6 +15,7 @@ import (
"golang.org/x/net/context"
log "github.com/golang/glog"
"github.com/youtube/vitess/go/sqltypes"
"github.com/youtube/vitess/go/vt/concurrency"
"github.com/youtube/vitess/go/vt/mysqlctl/tmutils"
"github.com/youtube/vitess/go/vt/topo"
@ -254,6 +255,11 @@ func (wr *Wrangler) CopySchemaShard(ctx context.Context, sourceTabletAlias *topo
return err
}
err = wr.copyShardMetadata(ctx, sourceTabletAlias, destShardInfo.MasterAlias)
if err != nil {
return err
}
diffs, err := wr.compareSchemas(ctx, sourceTabletAlias, destShardInfo.MasterAlias, tables, excludeTables, includeViews)
if err != nil {
return fmt.Errorf("CopySchemaShard failed because schemas could not be compared initially: %v", err)
@ -306,6 +312,43 @@ func (wr *Wrangler) CopySchemaShard(ctx context.Context, sourceTabletAlias *topo
return nil
}
// copyShardMetadata copies contents of _vt.shard_metadata table from the source
// tablet to the destination tablet. It's assumed that destination tablet is a
// master and binlogging is not turned off when INSERT statements are executed.
func (wr *Wrangler) copyShardMetadata(ctx context.Context, srcTabletAlias *topodatapb.TabletAlias, destTabletAlias *topodatapb.TabletAlias) error {
presenceResult, err := wr.ExecuteFetchAsDba(ctx, srcTabletAlias, "SELECT 1 FROM information_schema.tables WHERE table_schema = '_vt' AND table_name = 'shard_metadata'", 1, false, false)
if err != nil {
return err
}
if len(presenceResult.Rows) == 0 {
log.Infof("_vt.shard_metadata doesn't exist on the source tablet %v, skipping its copy.", topoproto.TabletAliasString(srcTabletAlias))
return nil
}
dataProto, err := wr.ExecuteFetchAsDba(ctx, srcTabletAlias, "SELECT name, value FROM _vt.shard_metadata", 100, false, false)
if err != nil {
return err
}
data := sqltypes.Proto3ToResult(dataProto)
for _, row := range data.Rows {
name := row[0]
value := row[1]
queryBuf := bytes.Buffer{}
queryBuf.WriteString("INSERT INTO _vt.shard_metadata (name, value) VALUES (")
name.EncodeSQL(&queryBuf)
queryBuf.WriteByte(',')
value.EncodeSQL(&queryBuf)
queryBuf.WriteString(") ON DUPLICATE KEY UPDATE value = ")
value.EncodeSQL(&queryBuf)
_, err := wr.ExecuteFetchAsDba(ctx, destTabletAlias, queryBuf.String(), 0, false, false)
if err != nil {
return err
}
}
return nil
}
// compareSchemas returns nil if the schema of the two tablets referenced by
// "sourceAlias" and "destAlias" are identical. Otherwise, the difference is
// returned as []string.

Просмотреть файл

@ -41,6 +41,7 @@ func TestBackupRestore(t *testing.T) {
db.AddQuery("BEGIN", &sqltypes.Result{})
db.AddQuery("COMMIT", &sqltypes.Result{})
db.AddQueryPattern(`SET @@session\.sql_log_bin = .*`, &sqltypes.Result{})
db.AddQueryPattern(`CREATE TABLE IF NOT EXISTS _vt\.shard_metadata .*`, &sqltypes.Result{})
db.AddQueryPattern(`CREATE TABLE IF NOT EXISTS _vt\.local_metadata .*`, &sqltypes.Result{})
db.AddQueryPattern(`INSERT INTO _vt\.local_metadata .*`, &sqltypes.Result{})

Просмотреть файл

@ -95,10 +95,15 @@ func copySchema(t *testing.T, useShardAsSource bool) {
" PRIMARY KEY (`id`),\n" +
" KEY `by_msg` (`msg`)\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8"
selectInformationSchema := "SELECT 1 FROM information_schema.tables WHERE table_schema = '_vt' AND table_name = 'shard_metadata'"
selectShardMetadata := "SELECT name, value FROM _vt.shard_metadata"
db.AddQuery(changeToDb, &sqltypes.Result{})
db.AddQuery(createDb, &sqltypes.Result{})
db.AddQuery(createTable, &sqltypes.Result{})
db.AddQuery(createTableView, &sqltypes.Result{})
db.AddQuery(selectInformationSchema, &sqltypes.Result{Rows: make([][]sqltypes.Value, 1)})
db.AddQuery(selectShardMetadata, &sqltypes.Result{})
destinationMaster.FakeMysqlDaemon.SchemaFunc = func() (*tabletmanagerdatapb.SchemaDefinition, error) {
if db.GetQueryCalledNum(createTableView) == 1 {
@ -114,8 +119,8 @@ func copySchema(t *testing.T, useShardAsSource bool) {
if err := vp.Run([]string{"CopySchemaShard", "-include-views", source, "ks/-40"}); err != nil {
t.Fatalf("CopySchemaShard failed: %v", err)
}
if count := db.GetQueryCalledNum(changeToDb); count != 3 {
t.Fatalf("CopySchemaShard did not change to the db exactly once. Query count: %v", count)
if count := db.GetQueryCalledNum(changeToDb); count != 5 {
t.Fatalf("CopySchemaShard did not change to the db 5 times. Query count: %v", count)
}
if count := db.GetQueryCalledNum(createDb); count != 1 {
t.Fatalf("CopySchemaShard did not create the db exactly once. Query count: %v", count)
@ -126,4 +131,10 @@ func copySchema(t *testing.T, useShardAsSource bool) {
if count := db.GetQueryCalledNum(createTableView); count != 1 {
t.Fatalf("CopySchemaShard did not create the table view exactly once. Query count: %v", count)
}
if count := db.GetQueryCalledNum(selectInformationSchema); count != 1 {
t.Fatalf("CopySchemaShard did not select data from information_schema.tables exactly once. Query count: %v", count)
}
if count := db.GetQueryCalledNum(selectShardMetadata); count != 1 {
t.Fatalf("CopySchemaShard did not select data from _vt.shard_metadata exactly once. Query count: %v", count)
}
}

78
go/vt/zktopo/convert.go Normal file
Просмотреть файл

@ -0,0 +1,78 @@
package zktopo
import (
"encoding/json"
"path"
"strings"
"github.com/golang/protobuf/proto"
topodatapb "github.com/youtube/vitess/go/vt/proto/topodata"
vschemapb "github.com/youtube/vitess/go/vt/proto/vschema"
)
// This file contains utility functions to maintain backward compatibility
// with old-style non-Backend Zookeeper topologies. The old
// implementations (before 2016-08-17) used to deal with explicit data
// types. We converted them to a generic []byte and path
// interface. But the zookeeper implementation was not compatible with
// this.
// dataType is an enum for possible data types, used for backward
// compatibility.
type dataType int
// Constants for type conversion
const (
// newType is used to indicate a topology object type of
// anything that is added after the topo.Backend refactor,
// i.e. anything that doesn't require conversion between old
// style topologies and the new style ones. The list of enum
// values after this contain all types that exist at the
// moment (2016-08-17) and doesn't need to be expanded when
// something new is saved in the topology because it will be
// saved in the new style, not in the old one.
newType dataType = iota
srvKeyspaceType
srvVSchemaType
)
// rawDataFromNodeValue convert the data of the given type into an []byte.
// It is mindful of the backward compatibility, i.e. for newer objects
// it doesn't do anything, but for old object types that were stored in JSON
// format in converts them to proto3 binary encoding.
func rawDataFromNodeValue(what dataType, data string) ([]byte, error) {
var p proto.Message
switch what {
case srvKeyspaceType:
p = &topodatapb.SrvKeyspace{}
case srvVSchemaType:
p = &vschemapb.SrvVSchema{}
default:
return []byte(data), nil
}
if err := json.Unmarshal([]byte(data), p); err != nil {
return nil, err
}
return proto.Marshal(p)
}
// oldTypeAndFilePath returns the data type and old file path for a given path.
func oldTypeAndFilePath(cell, filePath string) (dataType, string) {
parts := strings.Split(filePath, "/")
// SrvKeyspace: local cell, /keyspaces/<keyspace>/SrvKeyspace
if len(parts) == 4 && parts[0] == "" && parts[1] == "keyspaces" && parts[3] == "SrvKeyspace" {
return srvKeyspaceType, zkPathForSrvKeyspace(cell, parts[2])
}
// SrvVSchema: local cell, /SrvVSchema
if len(parts) == 2 && parts[1] == "SrvVSchema" {
return srvVSchemaType, zkPathForSrvVSchema(cell)
}
// General case.
return newType, path.Join("/zk", cell, "vt", filePath)
}

Просмотреть файл

@ -115,3 +115,5 @@ func (zkts *Server) PruneActionLogs(zkActionLogPath string, keepCount int) (prun
}
return prunedCount, nil
}
var _ topo.Impl = (*Server)(nil) // compile-time interface check

19
go/vt/zktopo/version.go Normal file
Просмотреть файл

@ -0,0 +1,19 @@
package zktopo
import (
"fmt"
"github.com/youtube/vitess/go/vt/topo"
)
// ZKVersion is zookeeper's idea of a version.
// It implements topo.Version.
// We use the native zookeeper.Stat.Version type, int32.
type ZKVersion int32
// String is part of the topo.Version interface.
func (v ZKVersion) String() string {
return fmt.Sprintf("%v", int32(v))
}
var _ topo.Version = (ZKVersion)(0) // compile-time interface check

91
go/vt/zktopo/watch.go Normal file
Просмотреть файл

@ -0,0 +1,91 @@
package zktopo
import (
"fmt"
zookeeper "github.com/samuel/go-zookeeper/zk"
"golang.org/x/net/context"
"github.com/youtube/vitess/go/vt/topo"
)
func newWatchData(valueType dataType, data string, stats *zookeeper.Stat) *topo.WatchData {
bytes, err := rawDataFromNodeValue(valueType, data)
if err != nil {
return &topo.WatchData{Err: err}
}
return &topo.WatchData{
Contents: bytes,
Version: ZKVersion(stats.Version),
}
}
// Watch is part of the topo.Backend interface
func (zkts *Server) Watch(ctx context.Context, cell string, filePath string) (*topo.WatchData, <-chan *topo.WatchData) {
// Special paths where we need to be backward compatible.
var valueType dataType
valueType, filePath = oldTypeAndFilePath(cell, filePath)
// Get the initial value, set the initial watch
data, stats, watch, err := zkts.zconn.GetW(filePath)
if err != nil {
return &topo.WatchData{Err: convertError(err)}, nil
}
if stats == nil {
// No stats --> node doesn't exist.
return &topo.WatchData{Err: topo.ErrNoNode}, nil
}
wd := newWatchData(valueType, data, stats)
if wd.Err != nil {
return wd, nil
}
c := make(chan *topo.WatchData, 10)
go func() {
for {
// Act on the watch, or on context close.
select {
case event, ok := <-watch:
if !ok {
c <- &topo.WatchData{Err: fmt.Errorf("watch on %v was closed", filePath)}
close(c)
return
}
if event.Err != nil {
c <- &topo.WatchData{Err: fmt.Errorf("received a non-OK event for %v: %v", filePath, event.Err)}
close(c)
return
}
case <-ctx.Done():
// user is not interested any more
c <- &topo.WatchData{Err: ctx.Err()}
close(c)
return
}
// Get the value again, and send it, or error.
data, stats, watch, err = zkts.zconn.GetW(filePath)
if err != nil {
c <- &topo.WatchData{Err: convertError(err)}
close(c)
return
}
if stats == nil {
// No data --> node doesn't exist
c <- &topo.WatchData{Err: topo.ErrNoNode}
close(c)
return
}
wd := newWatchData(valueType, data, stats)
c <- wd
if wd.Err != nil {
close(c)
return
}
}
}()
return wd, c
}

Просмотреть файл

@ -101,7 +101,8 @@ def setUpModule():
logging.debug('Running the clone worker to start binlog stream...')
utils.run_vtworker(['--cell', 'test_nj',
'SplitClone',
'--min_table_size_for_split', '1',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/0'],
auto_log=True)

Просмотреть файл

@ -464,7 +464,8 @@ index by_msg (msg)
['SplitClone',
'--offline=false',
'--exclude_tables', 'unrelated',
'--min_table_size_for_split', '1',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/0'],
worker_rpc_port)
@ -494,7 +495,8 @@ index by_msg (msg)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--exclude_tables', 'unrelated',
'--min_table_size_for_split', '1',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/0'],
worker_rpc_port)

Просмотреть файл

@ -385,10 +385,6 @@ primary key (name)
shard_1_rdonly1.tablet_alias, keyspace_shard],
auto_log=True)
# the worker will do everything. We test with source_reader_count=10
# (down from default=20) as connection pool is not big enough for 20.
# min_table_size_for_split is set to 1 as to force a split even on the
# small table we have.
# --max_tps is only specified to enable the throttler and ensure that the
# code is executed. But the intent here is not to throttle the test, hence
# the rate limit is set very high.
@ -396,8 +392,6 @@ primary key (name)
'--command_display_interval', '10ms',
'LegacySplitClone',
'--exclude_tables', 'unrelated',
'--source_reader_count', '10',
'--min_table_size_for_split', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],

Просмотреть файл

@ -269,7 +269,8 @@ index by_msg (msg)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--offline=false',
'--min_table_size_for_split', '1',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/-80'],
worker_rpc_port)
@ -295,7 +296,8 @@ index by_msg (msg)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--min_table_size_for_split', '1',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/-80'],
worker_rpc_port)

Просмотреть файл

@ -464,8 +464,6 @@ primary key (name)
auto_log=True)
# Copy the data from the source to the destination shards.
# min_table_size_for_split is set to 1 as to force a split even on the
# small table we have.
# --max_tps is only specified to enable the throttler and ensure that the
# code is executed. But the intent here is not to throttle the test, hence
# the rate limit is set very high.
@ -475,7 +473,8 @@ primary key (name)
['SplitClone',
'--offline=false',
'--exclude_tables', 'unrelated',
'--min_table_size_for_split', '1',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
@ -499,7 +498,8 @@ primary key (name)
['SplitClone',
'--offline=false',
'--exclude_tables', 'unrelated',
'--min_table_size_for_split', '1',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
@ -525,7 +525,8 @@ primary key (name)
['SplitClone',
'--offline=false',
'--exclude_tables', 'unrelated',
'--min_table_size_for_split', '1',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
@ -559,7 +560,8 @@ primary key (name)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--exclude_tables', 'unrelated',
'--min_table_size_for_split', '1',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],

Просмотреть файл

@ -308,7 +308,7 @@ class Tablet(object):
rows = self.mquery('', 'show databases')
for row in rows:
dbname = row[0]
if dbname in ['information_schema', 'performance_schema', 'mysql', 'sys']:
if dbname in ['information_schema', 'performance_schema', 'mysql', 'sys', '_vt']:
continue
self.drop_db(dbname)

Просмотреть файл

@ -390,8 +390,6 @@ index by_msg (msg)
'unexpected errors for VtgateApiErrorCounts inside %s' % str(v))
def test_vertical_split(self):
# min_table_size_for_split is set to 1 as to force a split even on the
# small table we have.
utils.run_vtctl(['CopySchemaShard', '--tables', 'moving.*,view1',
source_rdonly1.tablet_alias, 'destination_keyspace/0'],
auto_log=True)
@ -400,7 +398,8 @@ index by_msg (msg)
'--command_display_interval', '10ms',
'VerticalSplitClone',
'--tables', 'moving.*,view1',
'--min_table_size_for_split', '1',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'destination_keyspace/0'],
auto_log=True)

Просмотреть файл

@ -384,6 +384,9 @@ class TestBaseSplitClone(unittest.TestCase, base_sharding.BaseShardingTest):
t.reset_replication()
t.set_semi_sync_enabled(master=False)
t.clean_dbs()
# _vt.blp_checkpoint should be dropped to avoid interference between
# test cases
t.mquery('', 'drop table if exists _vt.blp_checkpoint')
t.kill_vttablet()
# we allow failures here as some tablets will be gone sometimes
# (the master tablets after an emergency reparent)

Просмотреть файл

@ -1,6 +1,6 @@
# Vitess Control Panel
This project was generated with [angular-cli](https://github.com/angular/angular-cli) version 1.0.0-beta.8.
This project was generated with [angular-cli](https://github.com/angular/angular-cli) version 1.0.0-beta.11-webpack.2.
## Installation
Once downloaded run npm install and bower install to retrieve dependencies. The angular-cli-build.js file controls what node_modules are copied into the dist/vendor folder and all bower components in the public/bower_components folder get copied into the dist folder at build time. To add more Polymer elements install them with bower and import them in elements.html.
@ -10,7 +10,7 @@ Run `ng serve` for a dev server. Navigate to `http://localhost:4200/`. The app w
## Code scaffolding
Run `ng generate component component-name` to generate a new component. You can also use `ng generate directive/pipe/service/route/class`.
Run `ng generate component component-name` to generate a new component. You can also use `ng generate directive/pipe/service/class`.
## Build

26
web/vtctld2/angular-cli-build.js поставляемый
Просмотреть файл

@ -1,26 +0,0 @@
// Angular-CLI build configuration
// This file lists all the node_modules files that will be used in a build
// Also see https://github.com/angular/angular-cli/wiki/3rd-party-libs
/* global require, module */
var Angular2App = require('angular-cli/lib/broccoli/angular2-app');
module.exports = function(defaults) {
return new Angular2App(defaults, {
vendorNpmFiles: [
'systemjs/dist/system-polyfills.js',
'systemjs/dist/system.src.js',
'zone.js/dist/**/*.+(js|js.map)',
'es6-shim/es6-shim.js',
'reflect-metadata/**/*.+(ts|js|js.map)',
'rxjs/**/*.+(js|js.map)',
'@angular/**/*.+(js|js.map)',
'@angular2-material/**/*',
'primeui/**/*',
'primeng/**/*',
'@vaadin/**/*.+(js|js.map)',
'font-awesome/**/*',
]
});
};

Просмотреть файл

@ -1,6 +1,6 @@
{
"project": {
"version": "1.0.0-beta.8",
"version": "1.0.0-beta.11-webpack.2",
"name": "vtctld2"
},
"apps": [

Просмотреть файл

@ -1,13 +0,0 @@
// Angular-CLI server configuration
// Unrelated to environment.dev|prod.ts
/* jshint node: true */
module.exports = function(environment) {
return {
environment: environment,
baseURL: '/',
locationType: 'auto'
};
};

Просмотреть файл

@ -1,56 +0,0 @@
// Test shim for Karma, needed to load files via SystemJS
/*global jasmine, __karma__, window*/
Error.stackTraceLimit = Infinity;
jasmine.DEFAULT_TIMEOUT_INTERVAL = 1000;
__karma__.loaded = function () {
};
var distPath = '/base/dist/';
var appPaths = ['app']; //Add all valid source code folders here
function isJsFile(path) {
return path.slice(-3) == '.js';
}
function isSpecFile(path) {
return path.slice(-8) == '.spec.js';
}
function isAppFile(path) {
return isJsFile(path) && appPaths.some(function(appPath) {
var fullAppPath = distPath + appPath + '/';
return path.substr(0, fullAppPath.length) == fullAppPath;
});
}
var allSpecFiles = Object.keys(window.__karma__.files)
.filter(isSpecFile)
.filter(isAppFile);
// Load our SystemJS configuration.
System.config({
baseURL: distPath
});
System.import('system-config.js').then(function() {
// Load and configure the TestComponentBuilder.
return Promise.all([
System.import('@angular/core/testing'),
System.import('@angular/platform-browser-dynamic/testing')
]).then(function (providers) {
var testing = providers[0];
var testingBrowser = providers[1];
testing.setBaseTestProviders(testingBrowser.TEST_BROWSER_DYNAMIC_PLATFORM_PROVIDERS,
testingBrowser.TEST_BROWSER_DYNAMIC_APPLICATION_PROVIDERS);
});
}).then(function() {
// Finally, load all spec files.
// This will run the tests directly.
return Promise.all(
allSpecFiles.map(function (moduleName) {
return System.import(moduleName);
}));
}).then(__karma__.start, __karma__.error);

Просмотреть файл

@ -4,10 +4,12 @@
module.exports = function (config) {
config.set({
basePath: '..',
frameworks: ['jasmine'],
frameworks: ['jasmine', 'angular-cli'],
plugins: [
require('karma-jasmine'),
require('karma-chrome-launcher')
require('karma-chrome-launcher'),
require('karma-remap-istanbul'),
require('angular-cli/plugins/karma')
],
customLaunchers: {
// chrome setup for travis CI using chromium
@ -17,25 +19,18 @@ module.exports = function (config) {
}
},
files: [
{ pattern: 'dist/vendor/es6-shim/es6-shim.js', included: true, watched: false },
{ pattern: 'dist/vendor/zone.js/dist/zone.js', included: true, watched: false },
{ pattern: 'dist/vendor/reflect-metadata/Reflect.js', included: true, watched: false },
{ pattern: 'dist/vendor/systemjs/dist/system-polyfills.js', included: true, watched: false },
{ pattern: 'dist/vendor/systemjs/dist/system.src.js', included: true, watched: false },
{ pattern: 'dist/vendor/zone.js/dist/async-test.js', included: true, watched: false },
{ pattern: 'dist/vendor/zone.js/dist/fake-async-test.js', included: true, watched: false },
{ pattern: 'config/karma-test-shim.js', included: true, watched: true },
// Distribution folder.
{ pattern: 'dist/**/*', included: false, watched: true }
{ pattern: './src/test.ts', watched: false }
],
exclude: [
// Vendor packages might include spec files. We don't want to use those.
'dist/vendor/**/*.spec.js'
],
preprocessors: {},
reporters: ['progress'],
preprocessors: {
'./src/test.ts': ['angular-cli']
},
remapIstanbulReporter: {
reports: {
html: 'coverage'
}
},
angularCliConfig: './angular-cli.json',
reporters: ['progress', 'karma-remap-istanbul'],
port: 9876,
colors: true,
logLevel: config.LOG_INFO,

Просмотреть файл

@ -4,14 +4,13 @@
"declaration": false,
"emitDecoratorMetadata": true,
"experimentalDecorators": true,
"mapRoot": "",
"module": "commonjs",
"moduleResolution": "node",
"noEmitOnError": true,
"noImplicitAny": false,
"rootDir": ".",
"outDir": "../dist/out-tsc-e2e",
"sourceMap": true,
"sourceRoot": "/",
"target": "es5"
"target": "es5",
"typeRoots": [
"../node_modules/@types"
]
}
}

Просмотреть файл

@ -5,7 +5,6 @@
"angular-cli": {},
"scripts": {
"start": "ng serve",
"postinstall": "typings install",
"lint": "tslint \"src/**/*.ts\"",
"test": "ng test",
"pree2e": "webdriver-manager update",
@ -21,9 +20,6 @@
"@angular/platform-browser": "2.0.0-rc.5",
"@angular/platform-browser-dynamic": "2.0.0-rc.5",
"@angular/router": "3.0.0-rc.1",
"@angular/router-deprecated": "2.0.0-rc.2",
"@angular/upgrade": "2.0.0-rc.5",
"@angular2-material/button": "^2.0.0-alpha.7-2",
"@angular2-material/card": "^2.0.0-alpha.7-2",
"@angular2-material/checkbox": "^2.0.0-alpha.7-2",
@ -35,35 +31,30 @@
"@angular2-material/sidenav": "^2.0.0-alpha.7-2",
"@angular2-material/tabs": "^2.0.0-alpha.7-2",
"@angular2-material/toolbar": "^2.0.0-alpha.7-2",
"@vaadin/angular2-polymer": "^1.0.0-beta2",
"core-js": "^2.4.0",
"primeng": "^1.0.0-beta.9",
"primeui": "^4.1.12",
"es6-shim": "0.35.1",
"systemjs": "0.19.27",
"core-js": "^2.4.0",
"reflect-metadata": "^0.1.3",
"reflect-metadata": "0.1.3",
"rxjs": "5.0.0-beta.6",
"zone.js": "^0.6.12",
"bootstrap": "^3.3.6"
"systemjs": "^0.19.36",
"ts-helpers": "^1.1.1",
"zone.js": "0.6.12"
},
"devDependencies": {
"angular-cli": "1.0.0-beta.8",
"codelyzer": "0.0.20",
"ember-cli-inject-live-reload": "1.4.0",
"@types/jasmine": "^2.2.30",
"@types/protractor": "^1.5.16",
"angular-cli": "1.0.0-beta.11-webpack.2",
"codelyzer": "0.0.26",
"jasmine-core": "2.4.1",
"jasmine-spec-reporter": "2.5.0",
"karma": "0.13.22",
"karma-chrome-launcher": "0.2.3",
"karma-jasmine": "0.3.8",
"karma-remap-istanbul": "^0.2.1",
"protractor": "3.3.0",
"ts-node": "0.5.5",
"tslint": "3.11.0",
"typescript": "1.8.10",
"typings": "0.8.1"
"ts-node": "1.2.1",
"tslint": "3.13.0",
"typescript": "^2.0.0"
}
}

0
web/vtctld2/public/.gitignore поставляемый Normal file
Просмотреть файл

Просмотреть файл

Просмотреть файл

@ -7,8 +7,8 @@ html, body {
}
.flex-column {
display: flex;
flex-direction: column;
display: flex !important;
flex-direction: column !important;
}
.flex-grow {

Просмотреть файл

@ -0,0 +1,28 @@
import { Http, URLSearchParams } from '@angular/http';
import { Injectable } from '@angular/core';
import { Observable } from 'rxjs/Observable';
import 'rxjs/add/observable/interval';
import 'rxjs/add/operator/switchMap';
@Injectable()
export class TabletStatusService {
constructor (private http: Http) {}
getTabletStats(keyspace, cell, tabletType, metric) {
// params stores the key-value pairs to build the query parameter URL.
let params: URLSearchParams = new URLSearchParams();
params.set('metric', metric);
params.set('keyspace', keyspace);
params.set('cell', cell);
params.set('type', tabletType);
return Observable.interval(1000).startWith(0)
.switchMap(() => this.http.get('../api/tablet_statuses/', { search: params })
.map(resp => resp.json()));
}
getTabletHealth(cell: string, uid: number) {
return this.http.get('../api/tablet_health/' + cell + '/' + uid)
.map(resp => resp.json());
}
}

Просмотреть файл

@ -0,0 +1,3 @@
>>> md-sidenav-layout > md-content {
flex-grow: 1;
}

Просмотреть файл

@ -2,7 +2,7 @@
<span>{{title}}</span>
</md-toolbar>
<md-sidenav-layout class="flex-grow">
<md-sidenav-layout class="flex-column flex-grow">
<md-sidenav #sidenav mode="side" opened="true">
<md-nav-list>
<a md-list-item [routerLink]="['/dashboard']"><md-icon>dashboard</md-icon> Dashboard</a>
@ -19,5 +19,5 @@
</a>
</md-nav-list>
</md-sidenav>
<div class="content"><router-outlet></router-outlet></div>
<router-outlet></router-outlet>
</md-sidenav-layout>

Просмотреть файл

@ -0,0 +1,20 @@
/* tslint:disable:no-unused-variable */
import { addProviders, async, inject } from '@angular/core/testing';
import { AppComponent } from './app.component';
describe('App: Vtctld2', () => {
beforeEach(() => {
addProviders([AppComponent]);
});
it('should create the app',
inject([AppComponent], (app: AppComponent) => {
expect(app).toBeTruthy();
}));
it('should have as title \'app works!\'',
inject([AppComponent], (app: AppComponent) => {
expect(app.title).toEqual('app works!');
}));
});

Просмотреть файл

@ -11,7 +11,6 @@ import { TasksComponent } from './tasks/tasks.component';
import './rxjs-operators';
@Component({
moduleId: module.id,
selector: 'app-root',
templateUrl: 'app.component.html',
styleUrls: ['app.component.css'],

Просмотреть файл

@ -1,8 +1,9 @@
import { NgModule, CUSTOM_ELEMENTS_SCHEMA } from '@angular/core';
import { BrowserModule } from '@angular/platform-browser';
import { HTTP_PROVIDERS } from '@angular/http';
import { NgModule, CUSTOM_ELEMENTS_SCHEMA } from '@angular/core';
import { FormsModule } from '@angular/forms'
import { HttpModule } from '@angular/http'
import { AppComponent } from './app.component';
import { AppComponent } from './app.component';
import { DashboardComponent } from './dashboard/dashboard.component';
import { KeyspaceComponent } from './dashboard/keyspace.component';
import { SchemaComponent } from './schema/schema.component';
@ -12,7 +13,6 @@ import { TopoBrowserComponent } from './topo/topo-browser.component';
import { TasksComponent } from './tasks/tasks.component';
import { APP_ROUTER_PROVIDERS, routing } from './app.routes';
import { FormsModule } from '@angular/forms';
import { MdButtonModule } from '@angular2-material/button';
import { MdRippleModule } from '@angular2-material/core/ripple/ripple';
@ -26,7 +26,6 @@ const PolymerComponents = [
];
@NgModule({
imports: [ BrowserModule, routing, FormsModule, MdButtonModule, MdRippleModule],
declarations: [
AppComponent,
DashboardComponent,
@ -38,8 +37,19 @@ const PolymerComponents = [
TopoBrowserComponent,
TasksComponent,
],
providers: [ APP_ROUTER_PROVIDERS, HTTP_PROVIDERS ],
bootstrap: [ AppComponent ],
schemas: [ CUSTOM_ELEMENTS_SCHEMA ],
imports: [
BrowserModule,
FormsModule,
HttpModule,
MdButtonModule,
MdRippleModule,
routing,
],
providers: [APP_ROUTER_PROVIDERS],
entryComponents: [AppComponent],
bootstrap: [AppComponent],
schemas: [CUSTOM_ELEMENTS_SCHEMA],
})
export class AppModule { }
export class AppModule {
}

Просмотреть файл

@ -18,7 +18,6 @@ const routes: Routes = [
{ path: 'tasks', component: TasksComponent},
{ path: 'keyspace', component: KeyspaceComponent},
{ path: 'shard', component: ShardComponent},
];
export const routing = RouterModule.forRoot(routes);

Просмотреть файл

@ -1,46 +1,45 @@
<div class="vt-row">
<div *ngFor="let keyspace of keyspaces" class="vt-card">
<md-card>
<md-card-title>{{keyspace.name}}</md-card-title>
<a class="vt-darkLink" [routerLink]="['/keyspace']" [queryParams]="{keyspace: keyspace.name}">
<md-card-content >
<div class="stats-container">
<md-list>
<md-list-item>
<span class="vt-stat-value-container">
<span class="vt-stat-value">
{{keyspace.servingShards.length}}
</span>
<span class="vt-stat-category">
Serving Shards
</span>
<div *ngFor="let keyspace of keyspaces" class="vt-card">
<md-card>
<md-card-title>{{keyspace.name}}</md-card-title>
<a class="vt-darkLink" [routerLink]="['/keyspace']" [queryParams]="{keyspace: keyspace.name}">
<md-card-content >
<div class="stats-container">
<md-list>
<md-list-item>
<span class="vt-stat-value-container">
<span class="vt-stat-value">
{{keyspace.servingShards.length}}
</span>
</md-list-item>
<md-list-item>
<span class="vt-stat-value-container">
<span class="vt-stat-value">
{{keyspace.nonservingShards.length}}
</span>
<span class="vt-stat-category">
Nonserving Shards
</span>
<span class="vt-stat-category">
Serving Shards
</span>
</md-list-item>
</md-list>
</div>
</md-card-content>
</a>
<md-card-actions>
<div class="vt-actions-container">
<button md-button (click)="blockClicks($event);prepareDelete(keyspace);dialog.open();toggleModal();">Delete</button>
<button md-button (click)="blockClicks($event);prepareEdit(keyspace);dialog.open();toggleModal();">Edit</button>
</span>
</md-list-item>
<md-list-item>
<span class="vt-stat-value-container">
<span class="vt-stat-value">
{{keyspace.nonservingShards.length}}
</span>
<span class="vt-stat-category">
Nonserving Shards
</span>
</span>
</md-list-item>
</md-list>
</div>
</md-card-actions>
</md-card>
</div>
</md-card-content>
</a>
<md-card-actions>
<div class="vt-actions-container">
<button md-button (click)="blockClicks($event);prepareDelete(keyspace);dialog.open();toggleModal();">Delete</button>
<button md-button (click)="blockClicks($event);prepareEdit(keyspace);dialog.open();toggleModal();">Edit</button>
</div>
</md-card-actions>
</md-card>
</div>
<paper-dialog #dialog no-cancel-on-outside-click="true" no-cancel-on-esc-key="true">
<vt-dialog [(dialogContent)]="dialogContent" [(dialogSettings)]="dialogSettings" (close)="dialog.close();"></vt-dialog>
</paper-dialog>

Просмотреть файл

@ -21,7 +21,6 @@ import { Proto } from '../shared/proto';
import { ShardService } from '../api/shard.service';
@Component({
moduleId: module.id,
selector: 'vt-dashboard',
templateUrl: './dashboard.component.html',
styleUrls: ['../styles/vt.style.css'],

Просмотреть файл

@ -3,7 +3,6 @@ import { Component } from '@angular/core';
import { DialogContent } from '../shared/dialog/dialog-content';
@Component({
moduleId: module.id,
selector: 'vt-keyspace-extra',
template:
`

Просмотреть файл

@ -17,7 +17,6 @@ import { PrepareResponse } from '../shared/prepare-response';
import { ShardService } from '../api/shard.service';
@Component({
moduleId: module.id,
selector: 'vt-keyspace-view',
templateUrl: './keyspace.component.html',
styleUrls: ['../styles/vt.style.css'],

Просмотреть файл

@ -17,7 +17,6 @@ import { ShardService } from '../api/shard.service';
import { TabletService } from '../api/tablet.service';
@Component({
moduleId: module.id,
selector: 'vt-shard-view',
templateUrl: './shard.component.html',
styleUrls: ['../styles/vt.style.css'],

Просмотреть файл

@ -1,7 +0,0 @@
// The file for the current environment will overwrite this one during build
// Different environments can be found in config/environment.{dev|prod}.ts
// The build system defaults to the dev environment
export const environment = {
production: false
};

Просмотреть файл

@ -0,0 +1,8 @@
// The file for the current environment will overwrite this one during build.
// Different environments can be found in ./environment.{dev|prod}.ts, and
// you can create your own and use it with the --env flag.
// The build system defaults to the dev environment.
export const environment = {
production: false
};

Просмотреть файл

@ -1,2 +1,3 @@
export * from './environment';
export * from './environments/environment';
export * from './app.component';
export * from './app.module';

Просмотреть файл

@ -2,7 +2,6 @@ import { Component } from '@angular/core';
@Component({
moduleId: module.id,
selector: 'vt-schema',
templateUrl: './schema.component.html',
styleUrls: ['./schema.component.css'],

Просмотреть файл

@ -1,15 +1,10 @@
.add-button-container{
.add-button {
position: fixed;
right: 25px;
bottom: 25px;
}
button {
background-color: rgb(0,150,136);
}
md-icon {
color: white;
padding: 0 !important;
}
}

Просмотреть файл

@ -1,5 +1,3 @@
<div class="add-button-container">
<button md-fab>
<md-icon class="md-48">add</md-icon>
</button>
</div>
<button md-fab class="add-button">
<md-icon class="md-24">add</md-icon>
</button>

Просмотреть файл

@ -4,7 +4,6 @@ import { MdButton } from '@angular2-material/button/button';
import { MdIcon } from '@angular2-material/icon/icon';
@Component({
moduleId: module.id,
selector: 'vt-add-button',
templateUrl: './add-button.component.html',
styleUrls: ['./add-button.component.css'],

Просмотреть файл

@ -11,7 +11,6 @@ class Breadcrumb {
}
@Component({
moduleId: module.id,
selector: 'vt-breadcrumbs',
templateUrl: './breadcrumbs.component.html',
styleUrls: ['./breadcrumbs.component.css'],

Просмотреть файл

@ -13,7 +13,6 @@ import { KeyspaceService } from '../../api/keyspace.service';
import { TabletService } from '../../api/tablet.service';
@Component({
moduleId: module.id,
selector: 'vt-dialog',
templateUrl: './dialog.component.html',
styleUrls: ['./dialog.component.css', '../../styles/vt.style.css'],

Просмотреть файл

@ -4,7 +4,6 @@ import { CORE_DIRECTIVES } from '@angular/common';
import { HeatmapComponent } from './heatmap.component';
@Component({
moduleId: module.id,
selector: 'template-view',
templateUrl: './templateView.component.html',
styleUrls: [],

Просмотреть файл

@ -12,9 +12,9 @@
<!-- The labels, and nested labels, of the map are drawn -->
<div *ngFor="let label of yLabels">
<tr *ngFor="let type of label.tabletTypes; let i = index" [attr.height]="getRowHeight()">
<td *ngIf="i==0" class="bordered" [attr.rowspan]="label.tabletTypes.length">{{label.cell}}</td>
<td class="bordered">{{type}}</td>
<tr *ngFor="let nestedLabel of label.NestedLabels; let isFirst=first" [attr.height]="getRowHeight()">
<td *ngIf="isFirst" [attr.rowspan]="label.Label.Rowspan" class="bordered">{{label.Label.Name}}</td>
<td class="bordered" [attr.rowspan]="nestedLabel.Rowspan">{{nestedLabel.Name}}</td>
</tr>
</div>
</div>
</table>

Просмотреть файл

@ -1,58 +1,66 @@
import { Component, Input, AfterViewInit} from '@angular/core';
import { Component, Input, AfterViewInit, OnInit } from '@angular/core';
import { CORE_DIRECTIVES } from '@angular/common';
import { MD_BUTTON_DIRECTIVES } from '@angular2-material/button';
declare var Plotly: any;
@Component({
moduleId: module.id,
selector: 'vt-heatmap',
templateUrl: './heatmap.component.html',
styleUrls: ['./heatmap.component.css'],
directives: [
CORE_DIRECTIVES,
MD_BUTTON_DIRECTIVES,
]
})
export class HeatmapComponent implements AfterViewInit {
export class HeatmapComponent implements AfterViewInit, OnInit {
@Input() data: number[][];
@Input() aliases: any[][];
// yLabels is an array of objects with 2 properties: the cell and array of tabletTypes.
@Input() yLabels: Array<any>;
@Input() xLabels: Array<string>;
@Input() name: string;
name: string;
plotlyMap: any;
// colorscaleValue defines the gradient for the heatmap.
private colorscaleValue = [
[0.0, '#17A234'],
[0.5, '#A22417'],
[1.0, '#424141'],
[0.0, '#424141'],
[0.5, '#17A234'],
[1.0, '#A22417'],
];
private getRowHeight() { return 50; }
private getXLabelsRowHeight() { return 25; }
static rowHeight = 50;
constructor() {}
// getTotalRows returns the number of rows the entire heatmap should span.
getTotalRows() {
let height = 0;
for (let yLabel of this.yLabels) {
height += yLabel.tabletTypes.length;
if (this.yLabels == null) {
// TODO(pkulshre): fix this when backend is generalized.
return 1;
}
return height;
return this.yLabels.reduce((a, b) => a.Label.Rowspan + b.Label.Rowspan);
}
ngOnInit() {
this.name = 'heatmap';
}
ngAfterViewInit() {
this.drawHeatmap();
let elem = <any>(document.getElementById(this.name));
elem.on('plotly_click', function(data){
alert('clicked');
});
// TODO(pkulshre): get tabletInfo from service.
}.bind(this));
}
drawHeatmap() {
// Settings for the Plotly heatmap.
let chartInfo = [{
z: this.data,
zmin: -10,
zmax: 10,
x: this.xLabels,
colorscale: this.colorscaleValue,
type: 'heatmap',
@ -85,6 +93,6 @@ export class HeatmapComponent implements AfterViewInit {
showlegend: false,
};
Plotly.newPlot(this.name, chartInfo, chartLayout, {scrollZoom: true, displayModeBar: false});
this.plotlyMap = Plotly.newPlot(this.name, chartInfo, chartLayout, {scrollZoom: true, displayModeBar: false});
}
}

Просмотреть файл

@ -1,30 +1,40 @@
<!-- Keyspace selector -->
<paper-dropdown-menu label="Keyspace">
<paper-listbox class="dropdown-content" *ngFor="let k of keyspaces">
<paper-listbox class="dropdown-content" *ngFor="let k of keyspaces; let isLast = last">
<paper-item>{{k}}</paper-item>
<paper-item>all</paper-item>
<paper-item *ngIf="isLast">all</paper-item>
</paper-listbox>
</paper-dropdown-menu>
<!-- Cell selector -->
<paper-dropdown-menu label="Cell">
<paper-listbox class="dropdown-content" *ngFor="let c of cells" >
<paper-listbox class="dropdown-content" *ngFor="let c of cells; let isLast = last" >
<paper-item>{{c}}</paper-item>
<paper-item *ngIf="isLast">all</paper-item>
</paper-listbox>
</paper-dropdown-menu>
<!-- TabletType selector -->
<paper-dropdown-menu label="TabletType">
<paper-listbox class="dropdown-content" *ngFor="let t of tabletType">
<paper-listbox class="dropdown-content" *ngFor="let t of tabletTypes; let isLast = last">
<paper-item>{{t}}</paper-item>
<paper-item *ngIf="isLast">all</paper-item>
</paper-listbox>
</paper-dropdown-menu>
<vt-heatmap
<!-- Metric selector -->
<paper-dropdown-menu label="Metric">
<paper-listbox class="dropdown-content" *ngFor="let m of metrics; let isLast = last">
<paper-item>{{m}}</paper-item>
<paper-item *ngIf="isLast">all</paper-item>
</paper-listbox>
</paper-dropdown-menu>
<!-- Wait until the heatmap data has been obtained from the service -->
<vt-heatmap *ngIf="heatmapDataReady"
[data]="data"
[aliases]="aliases"
[xLabels]="xLabels"
[yLabels]="yLabels"
[name]="name"
>
</vt-heatmap>

Просмотреть файл

@ -1,30 +1,59 @@
import { Component, OnInit, ComponentResolver, ViewContainerRef } from '@angular/core';
import { Component, OnInit, ViewChild } from '@angular/core';
import { CORE_DIRECTIVES } from '@angular/common';
import { HeatmapComponent } from './heatmap.component';
import { TabletStatusService } from '../api/tablet-status.service';
@Component({
moduleId: module.id,
selector: 'status',
selector: 'vt-status',
templateUrl: './status.component.html',
styleUrls: [],
directives: [
CORE_DIRECTIVES,
HeatmapComponent,
HeatmapComponent
],
providers: [
TabletStatusService
]
})
export class StatusComponent implements OnInit {
@ViewChild(HeatmapComponent) heatmap: HeatmapComponent;
// Used for the heatmap component.
private data: number[][];
private aliases: any[][];
// yLabels is an array of structs with the cell and array of tabletTypes.
private yLabels: Array<any>;
private xLabels: Array<string>;
private name: string;
private heatmapDataReady: boolean = false;
constructor(private componentResolver: ComponentResolver, private vcRef: ViewContainerRef) {}
constructor (private tabletService: TabletStatusService) {}
ngOnInit() {
// TODO(pkulshre): Get data and labels from appropriate services.
this.getHeatmapData();
}
getHeatmapData() {
// Subscribe to get updates every second.
this.tabletService.getTabletStats('lag', 'test', 'test_keyspace', 'REPLICA').subscribe(stats => {
this.data = stats.Data;
this.aliases = stats.Aliases;
this.yLabels = stats.Labels;
this.xLabels = [];
for (let i = 0; i < stats.Data[0].length; i++) {
this.xLabels.push('' + i);
}
this.heatmapDataReady = true;
// The heatmap has already been instantiated so it needs to be redrawn.
if (this.heatmap != null) {
this.heatmap.data = this.data;
this.heatmap.aliases = this.aliases;
this.heatmap.yLabels = this.yLabels;
this.heatmap.xLabels = this.xLabels;
this.heatmap.drawHeatmap();
}
});
}
}

Просмотреть файл

@ -4,7 +4,6 @@ import { MD_CARD_DIRECTIVES } from '@angular2-material/card';
import { MD_PROGRESS_BAR_DIRECTIVES } from '@angular2-material/progress-bar';
@Component({
moduleId: module.id,
selector: 'vt-tasks',
templateUrl: './tasks.component.html',
styleUrls: ['./tasks.component.css'],

Просмотреть файл

@ -1,3 +1,5 @@
<div class="content">
<h1>{{title}}</h1>
<vt-breadcrumbs [route]="['/topo']" [crumbs]="breadcrumbs"></vt-breadcrumbs>
@ -19,3 +21,5 @@
<md-card-title>Error</md-card-title>
<md-card-content><pre>{{node.Error}}</pre></md-card-content>
</md-card>
</div>

Просмотреть файл

@ -9,7 +9,6 @@ import { TopoDataService } from '../api/topo-data.service';
import { BreadcrumbsComponent } from '../shared/breadcrumbs.component';
@Component({
moduleId: module.id,
selector: 'vt-topo',
templateUrl: './topo-browser.component.html',
styleUrls: ['./topo-browser.component.css'],

Просмотреть файл

@ -3,6 +3,7 @@
<head>
<meta charset="utf-8">
<title>Vitess</title>
<base href=".">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="icon" type="image/x-icon" href="favicon.ico">
@ -38,23 +39,10 @@
<link rel="stylesheet" type="text/css" href="vendor/primeui/primeui-ng-all.min.css"/>
<script src="https://cdn.plot.ly/plotly-latest.min.js"></script>
<base href=".">
<script src="bower_components/webcomponentsjs/webcomponents-lite.min.js"></script>
<link rel="import" href="elements.html">
</head>
<body class="flex-column">
<app-root class="flex-column flex-grow">Loading...</app-root>
{{#each scripts.polyfills}}
<script src="{{.}}"></script>
{{/each}}
<script>
document.addEventListener('WebComponentsReady', function() {
System.import('system-config.js').then(function () {
System.import('main');
}).catch(console.error.bind(console));
});
</script>
</body>
</html>

Просмотреть файл

@ -1,13 +1,9 @@
import { enableProdMode } from '@angular/core';
import { environment } from './app/';
import { platformBrowserDynamic } from '@angular/platform-browser-dynamic';
import { AppModule } from './app/app.module';
import { enableProdMode } from '@angular/core';
import { AppModule, environment } from './app/';
if (environment.production) {
enableProdMode();
}
platformBrowserDynamic().bootstrapModule(AppModule);

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше