From 2e15cb0b70127cdadb408aa177f2b9fa98b71aa3 Mon Sep 17 00:00:00 2001 From: Michael Berlin Date: Thu, 23 Apr 2015 15:16:01 -0700 Subject: [PATCH 001/128] java/vtgate-client: Process new "Flags" MySQL field in result set. This change breaks the previous behavior: Before, BIGINT values were always mapped to an UnsignedLong by default, unless they were negative. With this change, they will be a UnsignedLong or Long depending on the SQL schema (UNSIGNED flag present or not). If an old VtGate does not send the Flags field, such values will be mapped to a Long instead of an UnsignedLong before. Refactored "FieldType" enum and included it in the "Field" class. --- .../java/com/youtube/vitess/vtgate/Field.java | 209 +++++++++++++++++- .../com/youtube/vitess/vtgate/FieldType.java | 142 ------------ .../vtgate/rpcclient/gorpc/Bsonify.java | 13 +- .../youtube/vitess/vtgate/BsonifyTest.java | 86 ++++--- .../youtube/vitess/vtgate/DateTypesTest.java | 6 +- .../{FieldTypeTest.java => FieldTest.java} | 53 +++-- .../vitess/vtgate/integration/VtGateIT.java | 8 +- 7 files changed, 310 insertions(+), 207 deletions(-) delete mode 100644 java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/FieldType.java rename java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/{FieldTypeTest.java => FieldTest.java} (58%) diff --git a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/Field.java b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/Field.java index 8e0507f104..3276fcc6ed 100644 --- a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/Field.java +++ b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/Field.java @@ -1,27 +1,212 @@ package com.youtube.vitess.vtgate; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.primitives.UnsignedLong; + +import com.youtube.vitess.vtgate.Field.Flag; +import com.youtube.vitess.vtgate.Row.Cell; + +import org.apache.commons.lang.CharEncoding; +import org.joda.time.DateTime; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.ISODateTimeFormat; + +import java.io.UnsupportedEncodingException; +import java.math.BigDecimal; + public class Field { + /** + * MySQL field flags bitset values e.g. to distinguish between signed and unsigned integer. + * Comments are taken from the original source code. + * These numbers should exactly match values defined in dist/mysql-5.1.52/include/mysql_com.h + */ + public enum Flag { + // VT_ZEROVALUE_FLAG is not part of the MySQL specification and only used in unit tests. + VT_ZEROVALUE_FLAG(0), + VT_NOT_NULL_FLAG (1), /* Field can't be NULL */ + VT_PRI_KEY_FLAG(2), /* Field is part of a primary key */ + VT_UNIQUE_KEY_FLAG(4), /* Field is part of a unique key */ + VT_MULTIPLE_KEY_FLAG(8), /* Field is part of a key */ + VT_BLOB_FLAG(16), /* Field is a blob */ + VT_UNSIGNED_FLAG(32), /* Field is unsigned */ + VT_ZEROFILL_FLAG(64), /* Field is zerofill */ + VT_BINARY_FLAG(128), /* Field is binary */ + /* The following are only sent to new clients */ + VT_ENUM_FLAG(256), /* field is an enum */ + VT_AUTO_INCREMENT_FLAG(512), /* field is a autoincrement field */ + VT_TIMESTAMP_FLAG(1024), /* Field is a timestamp */ + VT_SET_FLAG(2048), /* field is a set */ + VT_NO_DEFAULT_VALUE_FLAG(4096), /* Field doesn't have default value */ + VT_ON_UPDATE_NOW_FLAG(8192), /* Field is set to NOW on UPDATE */ + VT_NUM_FLAG(32768); /* Field is num (for clients) */ + + public int mysqlFlag; + + Flag(int mysqlFlag) { + this.mysqlFlag = mysqlFlag; + } + } + + /** + * Represents all field types supported by Vitess and their corresponding types in Java. mysqlType + * numbers should exactly match values defined in dist/mysql-5.1.52/include/mysql/mysql_com.h + * + */ + enum FieldType { + VT_DECIMAL(0, BigDecimal.class), + VT_TINY(1, Integer.class), + VT_SHORT(2, Integer.class), + VT_LONG(3, Long.class), + VT_FLOAT(4, Float.class), + VT_DOUBLE(5, Double.class), + VT_NULL(6, null), + VT_TIMESTAMP(7, DateTime.class), + VT_LONGLONG(8, Long.class, UnsignedLong.class), + VT_INT24(9, Integer.class), + VT_DATE(10, DateTime.class), + VT_TIME(11, DateTime.class), + VT_DATETIME(12, DateTime.class), + VT_YEAR(13, Short.class), + VT_NEWDATE(14, DateTime.class), + VT_VARCHAR(15, byte[].class), + VT_BIT(16, byte[].class), + VT_NEWDECIMAL(246, BigDecimal.class), + VT_ENUM(247, String.class), + VT_SET(248, String.class), + VT_TINY_BLOB(249, byte[].class), + VT_MEDIUM_BLOB(250, byte[].class), + VT_LONG_BLOB(251, byte[].class), + VT_BLOB(252, byte[].class), + VT_VAR_STRING(253, byte[].class), + VT_STRING(254, byte[].class), + VT_GEOMETRY(255, byte[].class); + + public final int mysqlType; + public final Class javaType; + public final Class unsignedJavaType; + + FieldType(int mysqlType, Class javaType) { + this.mysqlType = mysqlType; + this.javaType = javaType; + this.unsignedJavaType = javaType; + } + + FieldType(int mysqlType, Class javaType, Class unsignedJavaType) { + this.mysqlType = mysqlType; + this.javaType = javaType; + this.unsignedJavaType = unsignedJavaType; + } + } + private String name; private FieldType type; + private int mysqlFlags; + + public static Field newFieldFromMysql(String name, int mysqlTypeId, int mysqlFlags) { + for (FieldType ft : FieldType.values()) { + if (ft.mysqlType == mysqlTypeId) { + return new Field(name, ft, mysqlFlags); + } + } + + throw new RuntimeException("Unknown MySQL type: " + mysqlTypeId); + } - public Field(String name, FieldType type) { + @VisibleForTesting + static Field newFieldForTest(FieldType fieldType, Flag flag) { + return new Field("dummyField", fieldType, flag.mysqlFlag); + } + + private Field(String name, FieldType type, int mysqlFlags) { this.name = name; this.type = type; + this.mysqlFlags = mysqlFlags; } - public String getName() { - return name; + public Cell convertValueToCell(byte[] bytes) { + if ((mysqlFlags & Flag.VT_UNSIGNED_FLAG.mysqlFlag) != 0) { + return new Cell(name, convert(bytes), type.unsignedJavaType); + } else { + return new Cell(name, convert(bytes), type.javaType); + } } - public void setName(String name) { - this.name = name; - } + Object convert(byte[] bytes) { + if (bytes == null || bytes.length == 0) { + return null; + } + String s = null; + try { + s = new String(bytes, CharEncoding.ISO_8859_1); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); + } - public FieldType getType() { - return type; - } - - public void setType(FieldType type) { - this.type = type; + switch (type) { + case VT_DECIMAL: + return new BigDecimal(s); + case VT_TINY: + return Integer.valueOf(s); + case VT_SHORT: + return Integer.valueOf(s); + case VT_LONG: + return Long.valueOf(s); + case VT_FLOAT: + return Float.valueOf(s); + case VT_DOUBLE: + return Double.valueOf(s); + case VT_NULL: + return null; + case VT_TIMESTAMP: + s = s.replace(' ', 'T'); + return DateTime.parse(s); + case VT_LONGLONG: + // This can be an unsigned or a signed long + if ((mysqlFlags & Flag.VT_UNSIGNED_FLAG.mysqlFlag) != 0) { + return UnsignedLong.valueOf(s); + } else { + return Long.valueOf(s); + } + case VT_INT24: + return Integer.valueOf(s); + case VT_DATE: + return DateTime.parse(s, ISODateTimeFormat.date()); + case VT_TIME: + DateTime d = DateTime.parse(s, DateTimeFormat.forPattern("HH:mm:ss")); + return d; + case VT_DATETIME: + s = s.replace(' ', 'T'); + return DateTime.parse(s); + case VT_YEAR: + return Short.valueOf(s); + case VT_NEWDATE: + return DateTime.parse(s, ISODateTimeFormat.date()); + case VT_VARCHAR: + return bytes; + case VT_BIT: + return bytes; + case VT_NEWDECIMAL: + return new BigDecimal(s); + case VT_ENUM: + return s; + case VT_SET: + return s; + case VT_TINY_BLOB: + return bytes; + case VT_MEDIUM_BLOB: + return bytes; + case VT_LONG_BLOB: + return bytes; + case VT_BLOB: + return bytes; + case VT_VAR_STRING: + return bytes; + case VT_STRING: + return bytes; + case VT_GEOMETRY: + return bytes; + default: + throw new RuntimeException("invalid field type " + this); + } } } diff --git a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/FieldType.java b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/FieldType.java deleted file mode 100644 index 4c09d80bac..0000000000 --- a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/FieldType.java +++ /dev/null @@ -1,142 +0,0 @@ -package com.youtube.vitess.vtgate; - -import com.google.common.primitives.UnsignedLong; - -import org.apache.commons.lang.CharEncoding; -import org.joda.time.DateTime; -import org.joda.time.format.DateTimeFormat; -import org.joda.time.format.ISODateTimeFormat; - -import java.io.UnsupportedEncodingException; -import java.math.BigDecimal; - -/** - * Represents all field types supported by Vitess and their corresponding types in Java. mysqlType - * numbers should exactly match values defined in dist/mysql-5.1.52/include/mysql/mysql_com.h - * - */ -public enum FieldType { - VT_DECIMAL(0, BigDecimal.class), - VT_TINY(1, Integer.class), - VT_SHORT(2, Integer.class), - VT_LONG(3, Long.class), - VT_FLOAT(4, Float.class), - VT_DOUBLE(5, Double.class), - VT_NULL(6, null), - VT_TIMESTAMP(7, DateTime.class), - VT_LONGLONG(8, UnsignedLong.class), - VT_INT24(9, Integer.class), - VT_DATE(10, DateTime.class), - VT_TIME(11, DateTime.class), - VT_DATETIME(12, DateTime.class), - VT_YEAR(13, Short.class), - VT_NEWDATE(14, DateTime.class), - VT_VARCHAR(15, byte[].class), - VT_BIT(16, byte[].class), - VT_NEWDECIMAL(246, BigDecimal.class), - VT_ENUM(247, String.class), - VT_SET(248, String.class), - VT_TINY_BLOB(249, byte[].class), - VT_MEDIUM_BLOB(250, byte[].class), - VT_LONG_BLOB(251, byte[].class), - VT_BLOB(252, byte[].class), - VT_VAR_STRING(253, byte[].class), - VT_STRING(254, byte[].class), - VT_GEOMETRY(255, byte[].class); - - public int mysqlType; - public Class javaType; - - FieldType(int mysqlType, Class javaType) { - this.mysqlType = mysqlType; - this.javaType = javaType; - } - - public static FieldType get(int mysqlTypeId) { - for (FieldType ft : FieldType.values()) { - if (ft.mysqlType == mysqlTypeId) { - return ft; - } - } - return null; - } - - public Object convert(byte[] bytes) { - if (bytes == null || bytes.length == 0) { - return null; - } - String s = null; - try { - s = new String(bytes, CharEncoding.ISO_8859_1); - } catch (UnsupportedEncodingException e) { - throw new RuntimeException(e); - } - - switch (this) { - case VT_DECIMAL: - return new BigDecimal(s); - case VT_TINY: - return Integer.valueOf(s); - case VT_SHORT: - return Integer.valueOf(s); - case VT_LONG: - return Long.valueOf(s); - case VT_FLOAT: - return Float.valueOf(s); - case VT_DOUBLE: - return Double.valueOf(s); - case VT_NULL: - return null; - case VT_TIMESTAMP: - s = s.replace(' ', 'T'); - return DateTime.parse(s); - case VT_LONGLONG: - // This can be an unsigned or a signed long - try { - return UnsignedLong.valueOf(s); - } catch (NumberFormatException e) { - return Long.valueOf(s); - } - case VT_INT24: - return Integer.valueOf(s); - case VT_DATE: - return DateTime.parse(s, ISODateTimeFormat.date()); - case VT_TIME: - DateTime d = DateTime.parse(s, DateTimeFormat.forPattern("HH:mm:ss")); - return d; - case VT_DATETIME: - s = s.replace(' ', 'T'); - return DateTime.parse(s); - case VT_YEAR: - return Short.valueOf(s); - case VT_NEWDATE: - return DateTime.parse(s, ISODateTimeFormat.date()); - case VT_VARCHAR: - return bytes; - case VT_BIT: - return bytes; - case VT_NEWDECIMAL: - return new BigDecimal(s); - case VT_ENUM: - return s; - case VT_SET: - return s; - case VT_TINY_BLOB: - return bytes; - case VT_MEDIUM_BLOB: - return bytes; - case VT_LONG_BLOB: - return bytes; - case VT_BLOB: - return bytes; - case VT_VAR_STRING: - return bytes; - case VT_STRING: - return bytes; - case VT_GEOMETRY: - return bytes; - default: - throw new RuntimeException("invalid field type " + this); - } - } -} diff --git a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/rpcclient/gorpc/Bsonify.java b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/rpcclient/gorpc/Bsonify.java index c4e60dfe9c..e306778b80 100644 --- a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/rpcclient/gorpc/Bsonify.java +++ b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/rpcclient/gorpc/Bsonify.java @@ -7,7 +7,7 @@ import com.youtube.vitess.vtgate.BatchQuery; import com.youtube.vitess.vtgate.BatchQueryResponse; import com.youtube.vitess.vtgate.BindVariable; import com.youtube.vitess.vtgate.Field; -import com.youtube.vitess.vtgate.FieldType; +import com.youtube.vitess.vtgate.Field.Flag; import com.youtube.vitess.vtgate.KeyRange; import com.youtube.vitess.vtgate.KeyspaceId; import com.youtube.vitess.vtgate.Query; @@ -156,8 +156,12 @@ public class Bsonify { BSONObject fieldBson = (BSONObject) field; String fieldName = new String((byte[]) fieldBson.get("Name")); int mysqlType = Ints.checkedCast((Long) fieldBson.get("Type")); - FieldType fieldType = FieldType.get(mysqlType); - fieldList.add(new Field(fieldName, fieldType)); + int mysqlFlags = Flag.VT_ZEROVALUE_FLAG.mysqlFlag; + Object flags = fieldBson.get("Flags"); + if (flags != null) { + mysqlFlags = Ints.checkedCast((Long) flags); + } + fieldList.add(Field.newFieldFromMysql(fieldName, mysqlType, mysqlFlags)); } return fieldList; } @@ -172,8 +176,7 @@ public class Bsonify { for (Object col : cols) { byte[] val = col != null ? (byte[]) col : null; Field field = fieldsIter.next(); - FieldType ft = field.getType(); - cells.add(new Cell(field.getName(), ft.convert(val), ft.javaType)); + cells.add(field.convertValueToCell(val)); } rowList.add(new Row(cells)); } diff --git a/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/BsonifyTest.java b/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/BsonifyTest.java index 0e83a7d532..ebb827d49b 100644 --- a/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/BsonifyTest.java +++ b/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/BsonifyTest.java @@ -3,6 +3,8 @@ package com.youtube.vitess.vtgate; import com.google.common.primitives.UnsignedLong; import com.youtube.vitess.vtgate.Exceptions.InvalidFieldException; +import com.youtube.vitess.vtgate.Field.FieldType; +import com.youtube.vitess.vtgate.Field.Flag; import com.youtube.vitess.vtgate.Row.Cell; import com.youtube.vitess.vtgate.cursor.Cursor; import com.youtube.vitess.vtgate.cursor.CursorImpl; @@ -20,27 +22,34 @@ import java.math.BigDecimal; @RunWith(JUnit4.class) public class BsonifyTest { - + @Test public void testResultParse() throws InvalidFieldException { BSONObject result = new BasicBSONObject(); result.put("RowsAffected", UnsignedLong.valueOf("12")); result.put("InsertId", UnsignedLong.valueOf("12345")); + BasicBSONList fields = new BasicBSONList(); - for (long l = 0; l < 4; l++) { - BSONObject field = new BasicBSONObject(); - field.put("Name", ("col_" + l).getBytes()); - field.put("Type", l); - fields.add(field); - } + fields.add(newField("col_0", FieldType.VT_DECIMAL, Flag.VT_ZEROVALUE_FLAG)); + fields.add(newField("col_1", FieldType.VT_TINY, Flag.VT_ZEROVALUE_FLAG)); + fields.add(newField("col_2", FieldType.VT_SHORT, Flag.VT_ZEROVALUE_FLAG)); + fields.add(newField("col_3", FieldType.VT_LONG, Flag.VT_ZEROVALUE_FLAG)); + fields.add(newField("col_4", FieldType.VT_LONGLONG, Flag.VT_ZEROVALUE_FLAG)); + fields.add(newField("col_5", FieldType.VT_LONGLONG, Flag.VT_UNSIGNED_FLAG)); result.put("Fields", fields); + + // Fill each column with the following different values: 0, 1, 2 BasicBSONList rows = new BasicBSONList(); - for (int i = 0; i < 3; i++) { + int rowCount = 2; + for (int i = 0; i <= rowCount; i++) { BasicBSONList row = new BasicBSONList(); row.add(new Double(i).toString().getBytes()); row.add(String.valueOf(i).getBytes()); row.add(String.valueOf(i).getBytes()); row.add(new Long(i).toString().getBytes()); + row.add(new Long(i).toString().getBytes()); + row.add(new Long(i).toString().getBytes()); + Assert.assertEquals(fields.size(), row.size()); rows.add(row); } result.put("Rows", rows); @@ -50,25 +59,48 @@ public class BsonifyTest { Assert.assertEquals(12L, cursor.getRowsAffected()); Assert.assertEquals(12345L, cursor.getLastRowId()); - Row firstRow = cursor.next(); - Cell cell0 = firstRow.next(); - Assert.assertEquals("col_0", cell0.getName()); - Assert.assertEquals(BigDecimal.class, cell0.getType()); - Assert.assertEquals(new BigDecimal("0.0"), firstRow.getBigDecimal(cell0.getName())); + for (int i = 0; i <= rowCount; i++) { + Row row = cursor.next(); + Cell cell0 = row.next(); + Assert.assertEquals("col_0", cell0.getName()); + Assert.assertEquals(BigDecimal.class, cell0.getType()); + Assert.assertEquals(new BigDecimal(String.format("%d.0", i)), row.getBigDecimal(cell0.getName())); + + Cell cell1 = row.next(); + Assert.assertEquals("col_1", cell1.getName()); + Assert.assertEquals(Integer.class, cell1.getType()); + Assert.assertEquals(new Integer(i), row.getInt(cell1.getName())); + + Cell cell2 = row.next(); + Assert.assertEquals("col_2", cell2.getName()); + Assert.assertEquals(Integer.class, cell2.getType()); + Assert.assertEquals(new Integer(i), row.getInt(cell2.getName())); + + Cell cell3 = row.next(); + Assert.assertEquals("col_3", cell3.getName()); + Assert.assertEquals(Long.class, cell3.getType()); + Assert.assertEquals(new Long(i), row.getLong(cell3.getName())); + + Cell cell4 = row.next(); + Assert.assertEquals("col_4", cell4.getName()); + Assert.assertEquals(Long.class, cell4.getType()); + Assert.assertEquals(new Long(i), row.getLong(cell4.getName())); + + Cell cell5 = row.next(); + Assert.assertEquals("col_5", cell5.getName()); + Assert.assertEquals(UnsignedLong.class, cell5.getType()); + Assert.assertEquals(UnsignedLong.valueOf(String.format("%d", i)), row.getULong(cell5.getName())); + } + // No more rows left. + Assert.assertFalse(cursor.hasNext()); +} - Cell cell1 = firstRow.next(); - Assert.assertEquals("col_1", cell1.getName()); - Assert.assertEquals(Integer.class, cell1.getType()); - Assert.assertEquals(new Integer(0), firstRow.getInt(cell1.getName())); - - Cell cell2 = firstRow.next(); - Assert.assertEquals("col_2", cell2.getName()); - Assert.assertEquals(Integer.class, cell2.getType()); - Assert.assertEquals(new Integer(0), firstRow.getInt(cell2.getName())); - - Cell cell3 = firstRow.next(); - Assert.assertEquals("col_3", cell3.getName()); - Assert.assertEquals(Long.class, cell3.getType()); - Assert.assertEquals(new Long(0), firstRow.getLong(cell3.getName())); + private BSONObject newField(String name, FieldType fieldType, Flag flag) { + BSONObject field = new BasicBSONObject(); + field.put("Name", name.getBytes()); + field.put("Type", (long) fieldType.mysqlType); + field.put("Flags", (long) flag.mysqlFlag); + + return field; } } diff --git a/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/DateTypesTest.java b/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/DateTypesTest.java index f2e7efcc3a..2b64034621 100644 --- a/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/DateTypesTest.java +++ b/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/DateTypesTest.java @@ -1,5 +1,8 @@ package com.youtube.vitess.vtgate; +import com.youtube.vitess.vtgate.Field.FieldType; +import com.youtube.vitess.vtgate.Field.Flag; + import org.joda.time.DateTime; import org.junit.Assert; import org.junit.Test; @@ -37,7 +40,8 @@ public class DateTypesTest { } private void check(FieldType typeUnderTest, DateTime dt, byte[] bytes) throws ParseException { - Object o = typeUnderTest.convert(bytes); + Field f = Field.newFieldForTest(typeUnderTest, Flag.VT_ZEROVALUE_FLAG); + Object o = f.convert(bytes); Assert.assertEquals(DateTime.class, o.getClass()); Assert.assertEquals(dt, (DateTime) o); } diff --git a/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/FieldTypeTest.java b/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/FieldTest.java similarity index 58% rename from java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/FieldTypeTest.java rename to java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/FieldTest.java index 683fd163db..6fc5f26802 100644 --- a/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/FieldTypeTest.java +++ b/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/FieldTest.java @@ -3,6 +3,9 @@ package com.youtube.vitess.vtgate; import com.google.common.collect.Lists; import com.google.common.primitives.UnsignedLong; +import com.youtube.vitess.vtgate.Field.FieldType; +import com.youtube.vitess.vtgate.Field.Flag; + import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; @@ -11,14 +14,17 @@ import org.junit.runners.JUnit4; import java.math.BigDecimal; import java.util.List; + + @RunWith(JUnit4.class) -public class FieldTypeTest { +public class FieldTest { @Test public void testDouble() { List typesToTest = Lists.newArrayList(FieldType.VT_DOUBLE); String val = "1000.01"; for (FieldType type : typesToTest) { - Object o = type.convert(val.getBytes()); + Field f = Field.newFieldForTest(type, Flag.VT_ZEROVALUE_FLAG); + Object o = f.convert(val.getBytes()); Assert.assertEquals(Double.class, o.getClass()); Assert.assertEquals(1000.01, ((Double) o).doubleValue(), 0.01); } @@ -29,7 +35,8 @@ public class FieldTypeTest { List typesToTest = Lists.newArrayList(FieldType.VT_DECIMAL, FieldType.VT_NEWDECIMAL); String val = "1000.01"; for (FieldType type : typesToTest) { - Object o = type.convert(val.getBytes()); + Field f = Field.newFieldForTest(type, Flag.VT_ZEROVALUE_FLAG); + Object o = f.convert(val.getBytes()); Assert.assertEquals(BigDecimal.class, o.getClass()); Assert.assertEquals(1000.01, ((BigDecimal) o).doubleValue(), 0.01); } @@ -41,39 +48,53 @@ public class FieldTypeTest { Lists.newArrayList(FieldType.VT_TINY, FieldType.VT_SHORT, FieldType.VT_INT24); String val = "1000"; for (FieldType type : typesToTest) { - Object o = type.convert(val.getBytes()); + Field f = Field.newFieldForTest(type, Flag.VT_ZEROVALUE_FLAG); + Object o = f.convert(val.getBytes()); Assert.assertEquals(Integer.class, o.getClass()); Assert.assertEquals(1000, ((Integer) o).intValue()); } } @Test - public void testLong() { + public void testLong_LONG() { String val = "1000"; - Object o = FieldType.VT_LONG.convert(val.getBytes()); + Field f = Field.newFieldForTest(FieldType.VT_LONG, Flag.VT_ZEROVALUE_FLAG); + Object o = f.convert(val.getBytes()); Assert.assertEquals(Long.class, o.getClass()); Assert.assertEquals(1000L, ((Long) o).longValue()); } + @Test + public void testLong_LONGLONG() { + String val = "10000000000000"; + Field f = Field.newFieldForTest(FieldType.VT_LONGLONG, Flag.VT_ZEROVALUE_FLAG); + Object o = f.convert(val.getBytes()); + Assert.assertEquals(Long.class, o.getClass()); + Assert.assertEquals(10000000000000L, ((Long) o).longValue()); + } + + @Test + public void testLong_LONGLONG_UNSIGNED() { + String val = "10000000000000"; + Field f = Field.newFieldForTest(FieldType.VT_LONGLONG, Flag.VT_UNSIGNED_FLAG); + Object o = f.convert(val.getBytes()); + Assert.assertEquals(UnsignedLong.class, o.getClass()); + Assert.assertEquals(10000000000000L, ((UnsignedLong) o).longValue()); + } + @Test public void testNull() { - Object o = FieldType.VT_NULL.convert(null); + Field f = Field.newFieldForTest(FieldType.VT_NULL, Flag.VT_ZEROVALUE_FLAG); + Object o = f.convert(null); Assert.assertEquals(null, o); } @Test public void testFloat() { String val = "1000.01"; - Object o = FieldType.VT_FLOAT.convert(val.getBytes()); + Field f = Field.newFieldForTest(FieldType.VT_FLOAT, Flag.VT_ZEROVALUE_FLAG); + Object o = f.convert(val.getBytes()); Assert.assertEquals(Float.class, o.getClass()); Assert.assertEquals(1000.01, ((Float) o).floatValue(), 0.1); } - - @Test - public void testULong() { - String val = "10000000000000"; - Object o = FieldType.VT_LONGLONG.convert(val.getBytes()); - Assert.assertEquals(UnsignedLong.class, o.getClass()); - Assert.assertEquals(10000000000000L, ((UnsignedLong) o).longValue()); - } } diff --git a/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/VtGateIT.java b/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/VtGateIT.java index 0562261732..4908dd8ebb 100644 --- a/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/VtGateIT.java +++ b/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/VtGateIT.java @@ -1,4 +1,4 @@ -package com.youtube.vitess.vtgate.integration; + package com.youtube.vitess.vtgate.integration; import com.google.common.collect.Lists; import com.google.common.primitives.UnsignedLong; @@ -116,8 +116,8 @@ public class VtGateIT { Row row = cursor.next(); Cell idCell = row.next(); Assert.assertEquals("id", idCell.getName()); - Assert.assertEquals(UnsignedLong.class, idCell.getType()); - UnsignedLong id = row.getULong(idCell.getName()); + Assert.assertEquals(Long.class, idCell.getType()); + Long id = row.getLong(idCell.getName()); Cell nameCell = row.next(); Assert.assertEquals("name", nameCell.getName()); @@ -307,7 +307,7 @@ public class VtGateIT { List actual = new ArrayList<>(); for (Cursor cursor : cursors) { for (Row row : cursor) { - actual.add(row.getULong("id").longValue()); + actual.add(row.getLong("id").longValue()); } } Assert.assertTrue(expected.equals(actual)); From 54115af88c49fd565adbd8ed7eda415f3606f731 Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Mon, 11 May 2015 14:45:41 -0700 Subject: [PATCH 002/128] Check for unconverted Markdown lists after running Jekyll. If the writer doesn't put a newline before the list, it might look fine in GitHub, but get passed through as plain text in Jekyll. It's hard to detect this case in the raw Markdown without actually parsing Markdown, because we'd need to distinguish between the first item of a list, and subsequent items. However, after running Jekyll, all Markdown lists should have been converted, so we look for any unconverted lists before publishing. --- publish-site.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/publish-site.sh b/publish-site.sh index 44e8826dcf..db5f27e88e 100755 --- a/publish-site.sh +++ b/publish-site.sh @@ -55,6 +55,19 @@ git mv vitess.io/LICENSE . rm -rf vitess.io +# pre-commit checks +set +e +list=$(find . -name '*.html' | xargs grep -lE '^\s*([\-\*]|\d\.) ') +if [ -n "$list" ]; then + echo + echo "ERROR: The following pages appear to contain bulleted lists that weren't properly converted." + echo "Make sure all bulleted lists have a blank line before them." + echo + echo "$list" + exit 1 +fi +set -e + git add -u git commit -m "publish site `date`" From da20fe2a4eaee5eb85afaf99214eb3ec1535a244 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Mon, 11 May 2015 15:26:10 -0700 Subject: [PATCH 003/128] add dbname_override to java_vtgate_test_helper By adding dbname_override flag, it makes caller be able to specify a different dbname other than the default: vt_ + keyspace --- test/java_vtgate_test_helper.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/test/java_vtgate_test_helper.py b/test/java_vtgate_test_helper.py index d6c6b0a633..511e70e68d 100644 --- a/test/java_vtgate_test_helper.py +++ b/test/java_vtgate_test_helper.py @@ -44,6 +44,7 @@ class TestEnv(object): self.schema = options.schema self.vschema = options.vschema self.vtgate_port = options.vtgate_port + self.dbname_override = options.dbname_override self.tablets = [] tablet_config = json.loads(options.tablet_config) for shard in options.shards.split(','): @@ -62,10 +63,16 @@ class TestEnv(object): t.init_tablet(t.type, keyspace=self.keyspace, shard=t.shard) utils.run_vtctl(['RebuildKeyspaceGraph', self.keyspace], auto_log=True) for t in self.tablets: - t.create_db('vt_' + self.keyspace) + dbname = 'vt_' + self.keyspace + + if self.dbname_override: + dbname = self.dbname_override + + t.create_db(dbname) t.start_vttablet( wait_for_state=None, - extra_args=['-queryserver-config-schema-reload-time', '1'], + extra_args=['-queryserver-config-schema-reload-time', '1', + '-init_db_name_override', dbname], ) for t in self.tablets: t.wait_for_vttablet_state('SERVING') @@ -107,6 +114,7 @@ def parse_args(): parser.add_option("--tablet-config", action="store", type="string", help="json config for for non-master tablets. e.g {'replica':2, 'rdonly':1}") parser.add_option("--keyspace", action="store", type="string") + parser.add_option("--dbname-override", action="store", type="string") parser.add_option("--schema", action="store", type="string") parser.add_option("--vschema", action="store", type="string") parser.add_option("--vtgate-port", action="store", type="int") From 5716f02e81de29429b1b64bf46af7d9183f90449 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Mon, 11 May 2015 15:48:12 -0700 Subject: [PATCH 004/128] remove VtGateExecutor and add TabletExecutor 1. Stop applying schema changes through VtGate, using tabletmanager instead. 2. Update Executor api to not accept a list of shards as input. Schema changes need to apply to all shards. 3. Fix topo in SimpleDataSourcer. --- go/cmd/vtctld/vtctld.go | 17 ++- go/vt/schemamanager/schemamanager.go | 7 +- go/vt/schemamanager/schemamanager_test.go | 73 ++++++---- go/vt/schemamanager/simple_data_sourcer.go | 8 +- go/vt/schemamanager/tablet_executor.go | 147 +++++++++++++++++++ go/vt/schemamanager/vtgate_executor.go | 150 -------------------- go/vt/schemamanager/vtgate_executor_test.go | 140 ------------------ 7 files changed, 211 insertions(+), 331 deletions(-) create mode 100644 go/vt/schemamanager/tablet_executor.go delete mode 100644 go/vt/schemamanager/vtgate_executor.go delete mode 100644 go/vt/schemamanager/vtgate_executor_test.go diff --git a/go/cmd/vtctld/vtctld.go b/go/cmd/vtctld/vtctld.go index 8b5fb7a7b6..b7472acb12 100644 --- a/go/cmd/vtctld/vtctld.go +++ b/go/cmd/vtctld/vtctld.go @@ -5,7 +5,6 @@ import ( "fmt" "net/http" "strings" - "time" "golang.org/x/net/context" @@ -485,16 +484,16 @@ func main() { } sqlStr := r.FormValue("data") keyspace := r.FormValue("keyspace") - shards, err := ts.GetShardNames(keyspace) - if err != nil { - httpError(w, "error getting shards for keyspace: <"+keyspace+">, error: %v", err) - } + executor := schmgr.NewTabletExecutor( + tmclient.NewTabletManagerClient(), + ts, + keyspace) + schmgr.Run( - schmgr.NewSimepleDataSourcer(sqlStr), - schmgr.NewVtGateExecutor( - keyspace, nil, 1*time.Second), + schmgr.NewSimpleDataSourcer(sqlStr), + executor, uihandler.NewUIEventHandler(w), - shards) + ) }) servenv.RunDefault() } diff --git a/go/vt/schemamanager/schemamanager.go b/go/vt/schemamanager/schemamanager.go index f7fd7fabf4..dd1081538e 100644 --- a/go/vt/schemamanager/schemamanager.go +++ b/go/vt/schemamanager/schemamanager.go @@ -29,7 +29,7 @@ type EventHandler interface { type Executor interface { Open() error Validate(sqls []string) error - Execute(sqls []string, shards []string) *ExecuteResult + Execute(sqls []string) *ExecuteResult Close() error } @@ -57,8 +57,7 @@ type ShardResult struct { // Run schema changes on Vitess through VtGate func Run(sourcer DataSourcer, exec Executor, - handler EventHandler, - shards []string) error { + handler EventHandler) error { if err := sourcer.Open(); err != nil { log.Errorf("failed to open data sourcer: %v", err) return err @@ -80,7 +79,7 @@ func Run(sourcer DataSourcer, return handler.OnValidationFail(err) } handler.OnValidationSuccess(sqls) - result := exec.Execute(sqls, shards) + result := exec.Execute(sqls) handler.OnExecutorComplete(result) return nil } diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index 485b92182c..432874e73c 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -7,10 +7,12 @@ package schemamanager import ( "errors" "testing" - "time" - fakevtgateconn "github.com/youtube/vitess/go/vt/vtgate/fakerpcvtgateconn" - "golang.org/x/net/context" + "github.com/youtube/vitess/go/vt/tabletmanager/faketmclient" + "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/topo/test/faketopo" + + _ "github.com/youtube/vitess/go/vt/tabletmanager/gorpctmclient" ) var ( @@ -22,9 +24,8 @@ var ( func TestRunSchemaChangesDataSourcerOpenFail(t *testing.T) { dataSourcer := newFakeDataSourcer([]string{"select * from test_db"}, true, false, false) handler := newFakeHandler() - fakeConn := newFakeVtGateConn() - exec := newFakeVtGateExecutor(fakeConn) - err := Run(dataSourcer, exec, handler, []string{"0", "1", "2"}) + exec := newFakeExecutor() + err := Run(dataSourcer, exec, handler) if err != errDataSourcerOpen { t.Fatalf("data sourcer open fail, shoud get error: %v, but get error: %v", errDataSourcerOpen, err) @@ -34,9 +35,8 @@ func TestRunSchemaChangesDataSourcerOpenFail(t *testing.T) { func TestRunSchemaChangesDataSourcerReadFail(t *testing.T) { dataSourcer := newFakeDataSourcer([]string{"select * from test_db"}, false, true, false) handler := newFakeHandler() - fakeConn := newFakeVtGateConn() - exec := newFakeVtGateExecutor(fakeConn) - err := Run(dataSourcer, exec, handler, []string{"0", "1", "2"}) + exec := newFakeExecutor() + err := Run(dataSourcer, exec, handler) if err != errDataSourcerRead { t.Fatalf("data sourcer read fail, shoud get error: %v, but get error: %v", errDataSourcerRead, err) @@ -47,22 +47,20 @@ func TestRunSchemaChangesDataSourcerReadFail(t *testing.T) { } func TestRunSchemaChangesValidationFail(t *testing.T) { - dataSourcer := newFakeDataSourcer([]string{"invalid sql"}, false, false, false) + dataSourcer := newFakeDataSourcer([]string{"invalid sql"}, true, false, false) handler := newFakeHandler() - fakeConn := newFakeVtGateConn() - exec := newFakeVtGateExecutor(fakeConn) - err := Run(dataSourcer, exec, handler, []string{"0", "1", "2"}) + exec := newFakeExecutor() + err := Run(dataSourcer, exec, handler) if err == nil { t.Fatalf("run schema change should fail due to executor.Open fail") } } func TestRunSchemaChanges(t *testing.T) { - dataSourcer := NewSimepleDataSourcer("select * from test_db;") + dataSourcer := NewSimpleDataSourcer("create table test_table (pk int);") handler := newFakeHandler() - fakeConn := newFakeVtGateConn() - exec := newFakeVtGateExecutor(fakeConn) - err := Run(dataSourcer, exec, handler, []string{"0", "1", "2"}) + exec := newFakeExecutor() + err := Run(dataSourcer, exec, handler) if err != nil { t.Fatalf("schema change should success but get error: %v", err) } @@ -83,15 +81,42 @@ func TestRunSchemaChanges(t *testing.T) { } } -func newFakeVtGateConn() *fakevtgateconn.FakeVTGateConn { - return fakevtgateconn.NewFakeVTGateConn(context.Background(), "", 1*time.Second) +func newFakeExecutor() *TabletExecutor { + return NewTabletExecutor( + faketmclient.NewFakeTabletManagerClient(), + newFakeTopo(), + "test_keyspace") } -func newFakeVtGateExecutor(conn *fakevtgateconn.FakeVTGateConn) *VtGateExecutor { - return NewVtGateExecutor( - "test_keyspace", - conn, - 1*time.Second) +type fakeTopo struct { + faketopo.FakeTopo +} + +func newFakeTopo() *fakeTopo { + return &fakeTopo{} +} + +func (topoServer *fakeTopo) GetShardNames(keyspace string) ([]string, error) { + return []string{"0", "1", "2"}, nil +} + +func (topoServer *fakeTopo) GetShard(keyspace string, shard string) (*topo.ShardInfo, error) { + value := &topo.Shard{ + MasterAlias: topo.TabletAlias{ + Cell: "test_cell", + Uid: 0, + }, + } + return topo.NewShardInfo(keyspace, shard, value, 0), nil +} + +func (topoServer *fakeTopo) GetTablet(tabletAlias topo.TabletAlias) (*topo.TabletInfo, error) { + return &topo.TabletInfo{ + Tablet: &topo.Tablet{ + Alias: tabletAlias, + Keyspace: "test_keyspace", + }, + }, nil } type fakeDataSourcer struct { diff --git a/go/vt/schemamanager/simple_data_sourcer.go b/go/vt/schemamanager/simple_data_sourcer.go index a12394ffa9..51665f75f8 100644 --- a/go/vt/schemamanager/simple_data_sourcer.go +++ b/go/vt/schemamanager/simple_data_sourcer.go @@ -11,16 +11,16 @@ type SimpleDataSourcer struct { sqls []string } -// NewSimepleDataSourcer creates a new SimpleDataSourcer instance -func NewSimepleDataSourcer(sqlStr string) *SimpleDataSourcer { - result := SimpleDataSourcer{} +// NewSimpleDataSourcer creates a new SimpleDataSourcer instance +func NewSimpleDataSourcer(sqlStr string) *SimpleDataSourcer { + result := &SimpleDataSourcer{sqls: make([]string, 0, 32)} for _, sql := range strings.Split(sqlStr, ";") { s := strings.TrimSpace(sql) if s != "" { result.sqls = append(result.sqls, s) } } - return &result + return result } // Open is a no-op diff --git a/go/vt/schemamanager/tablet_executor.go b/go/vt/schemamanager/tablet_executor.go new file mode 100644 index 0000000000..9ab672b500 --- /dev/null +++ b/go/vt/schemamanager/tablet_executor.go @@ -0,0 +1,147 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemamanager + +import ( + "fmt" + "sync" + + log "github.com/golang/glog" + "github.com/youtube/vitess/go/vt/sqlparser" + "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" + "github.com/youtube/vitess/go/vt/topo" + "golang.org/x/net/context" +) + +// TabletExecutor applies schema changes to all tablets. +type TabletExecutor struct { + keyspace string + tmClient tmclient.TabletManagerClient + topoServer topo.Server + tabletInfos []*topo.TabletInfo + isClosed bool +} + +// NewTabletExecutor creates a new TabletExecutor instance +func NewTabletExecutor( + tmClient tmclient.TabletManagerClient, + topoServer topo.Server, + keyspace string) *TabletExecutor { + return &TabletExecutor{ + keyspace: keyspace, + tmClient: tmClient, + topoServer: topoServer, + isClosed: true, + } +} + +// Open opens a connection to the master for every shard +func (exec *TabletExecutor) Open() error { + if !exec.isClosed { + return nil + } + shardNames, err := exec.topoServer.GetShardNames(exec.keyspace) + if err != nil { + return fmt.Errorf("unable to get shard names for keyspace: %s, error: %v", exec.keyspace, err) + } + log.Infof("Keyspace: %v, Shards: %v\n", exec.keyspace, shardNames) + exec.tabletInfos = make([]*topo.TabletInfo, len(shardNames)) + for i, shardName := range shardNames { + shardInfo, err := exec.topoServer.GetShard(exec.keyspace, shardName) + log.Infof("\tShard: %s, ShardInfo: %v\n", shardName, shardInfo) + if err != nil { + return fmt.Errorf("unable to get shard info, keyspace: %s, shard: %s, error: %v", exec.keyspace, shardName, err) + } + tabletInfo, err := exec.topoServer.GetTablet(shardInfo.MasterAlias) + if err != nil { + return fmt.Errorf("unable to get master tablet info, keyspace: %s, shard: %s, error: %v", exec.keyspace, shardName, err) + } + exec.tabletInfos[i] = tabletInfo + log.Infof("\t\tTabletInfo: %+v\n", tabletInfo) + } + exec.isClosed = false + return nil +} + +// Validate validates a list of sql statements +func (exec *TabletExecutor) Validate(sqls []string) error { + for _, sql := range sqls { + stat, err := sqlparser.Parse(sql) + if err != nil { + return nil + } + if _, ok := stat.(*sqlparser.DDL); !ok { + return fmt.Errorf("schema change works for DDLs only, but get non DDL statement: %s", sql) + } + } + return nil +} + +// Execute applies schema changes +func (exec *TabletExecutor) Execute(sqls []string) *ExecuteResult { + execResult := ExecuteResult{} + execResult.Sqls = sqls + if exec.isClosed { + execResult.ExecutorErr = "executor is closed" + return &execResult + } + for index, sql := range sqls { + execResult.CurSqlIndex = index + exec.executeOnAllTablets(&execResult, sql) + if len(execResult.FailedShards) > 0 { + break + } + } + return &execResult +} + +func (exec *TabletExecutor) executeOnAllTablets(execResult *ExecuteResult, sql string) { + var wg sync.WaitGroup + numOfMasterTablets := len(exec.tabletInfos) + wg.Add(numOfMasterTablets) + errChan := make(chan ShardWithError, numOfMasterTablets) + successChan := make(chan ShardResult, numOfMasterTablets) + for i := range exec.tabletInfos { + go exec.executeOneTablet(&wg, exec.tabletInfos[i], sql, errChan, successChan) + } + wg.Wait() + close(errChan) + close(successChan) + execResult.FailedShards = make([]ShardWithError, 0, len(errChan)) + execResult.SuccessShards = make([]ShardResult, 0, len(successChan)) + for e := range errChan { + execResult.FailedShards = append(execResult.FailedShards, e) + } + for r := range successChan { + execResult.SuccessShards = append(execResult.SuccessShards, r) + } +} + +func (exec *TabletExecutor) executeOneTablet( + wg *sync.WaitGroup, + tabletInfo *topo.TabletInfo, + sql string, + errChan chan ShardWithError, + successChan chan ShardResult) { + defer wg.Done() + ctx := context.Background() + result, err := exec.tmClient.ExecuteFetchAsDba(ctx, tabletInfo, sql, 10, false, false, true) + if err != nil { + errChan <- ShardWithError{Shard: tabletInfo.Shard, Err: err.Error()} + } else { + successChan <- ShardResult{Shard: tabletInfo.Shard, Result: result} + } +} + +// Close clears tablet executor states +func (exec *TabletExecutor) Close() error { + if !exec.isClosed { + exec.tabletInfos = nil + exec.isClosed = true + } + return nil +} + +var _ Executor = (*TabletExecutor)(nil) diff --git a/go/vt/schemamanager/vtgate_executor.go b/go/vt/schemamanager/vtgate_executor.go deleted file mode 100644 index 2a5cbcb4d5..0000000000 --- a/go/vt/schemamanager/vtgate_executor.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2015, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package schemamanager - -import ( - "sync" - "time" - - log "github.com/golang/glog" - - "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/topo" - "github.com/youtube/vitess/go/vt/vtgate/vtgateconn" - - "golang.org/x/net/context" -) - -// VtGateExecutor applies schema changes via VtGate -type VtGateExecutor struct { - keyspace string - conn vtgateconn.VTGateConn - timeout time.Duration - isClosed bool -} - -// NewVtGateExecutor creates a new VtGateExecutor instance -func NewVtGateExecutor( - keyspace string, - conn vtgateconn.VTGateConn, - timeout time.Duration) *VtGateExecutor { - return &VtGateExecutor{ - keyspace: keyspace, - conn: conn, - timeout: timeout, - isClosed: true, - } -} - -// Open opens a connection to VtGate -func (exec *VtGateExecutor) Open() error { - exec.isClosed = false - return nil -} - -// Validate validates a list of sql statements -func (exec *VtGateExecutor) Validate(sqls []string) error { - for _, sql := range sqls { - if _, err := sqlparser.Parse(sql); err != nil { - return err - } - } - return nil -} - -// Execute applies schema changes through VtGate -func (exec *VtGateExecutor) Execute(sqls []string, shards []string) *ExecuteResult { - execResult := ExecuteResult{} - execResult.Sqls = sqls - if exec.isClosed { - execResult.ExecutorErr = "executor is closed" - return &execResult - } - for index, sql := range sqls { - execResult.CurSqlIndex = index - stat, err := sqlparser.Parse(sql) - if err != nil { - execResult.ExecutorErr = err.Error() - return &execResult - } - _, ok := stat.(*sqlparser.DDL) - if !ok { - exec.execute(&execResult, sql, shards, true) - } else { - exec.execute(&execResult, sql, shards, false) - } - if len(execResult.FailedShards) > 0 { - break - } - } - return &execResult -} - -func (exec *VtGateExecutor) execute(execResult *ExecuteResult, sql string, shards []string, enableTx bool) { - var wg sync.WaitGroup - wg.Add(len(shards)) - errChan := make(chan ShardWithError, len(shards)) - resultChan := make(chan ShardResult, len(shards)) - for _, s := range shards { - go func(keyspace string, shard string) { - defer wg.Done() - ctx := context.Background() - if enableTx { - exec.executeTx(ctx, sql, keyspace, shard, errChan, resultChan) - } else { - queryResult, err := exec.conn.ExecuteShard(ctx, sql, keyspace, []string{shard}, nil, topo.TYPE_MASTER) - if err != nil { - errChan <- ShardWithError{Shard: shard, Err: err.Error()} - } else { - resultChan <- ShardResult{Shard: shard, Result: queryResult} - } - } - }(exec.keyspace, s) - } - wg.Wait() - close(errChan) - close(resultChan) - execResult.FailedShards = make([]ShardWithError, 0, len(errChan)) - execResult.SuccessShards = make([]ShardResult, 0, len(resultChan)) - for e := range errChan { - execResult.FailedShards = append(execResult.FailedShards, e) - } - for r := range resultChan { - execResult.SuccessShards = append(execResult.SuccessShards, r) - } -} - -func (exec *VtGateExecutor) executeTx(ctx context.Context, sql string, keyspace string, shard string, errChan chan ShardWithError, resultChan chan ShardResult) { - tx, err := exec.conn.Begin(ctx) - if err != nil { - errChan <- ShardWithError{Shard: shard, Err: err.Error()} - return - } - queryResult, err := tx.ExecuteShard(ctx, sql, keyspace, []string{shard}, nil, topo.TYPE_MASTER) - if err == nil { - err = tx.Commit(ctx) - } - if err != nil { - errChan <- ShardWithError{Shard: shard, Err: err.Error()} - err = tx.Rollback(ctx) - if err != nil { - log.Errorf("failed to rollback transaction: %s on shard %s, error: %v", sql, shard, err) - } - } else { - resultChan <- ShardResult{Shard: shard, Result: queryResult} - } - -} - -// Close closes VtGate connection -func (exec *VtGateExecutor) Close() error { - if !exec.isClosed { - exec.conn.Close() - exec.isClosed = true - } - return nil -} - -var _ Executor = (*VtGateExecutor)(nil) diff --git a/go/vt/schemamanager/vtgate_executor_test.go b/go/vt/schemamanager/vtgate_executor_test.go deleted file mode 100644 index 58186b4209..0000000000 --- a/go/vt/schemamanager/vtgate_executor_test.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2015, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package schemamanager - -import ( - "testing" - - mproto "github.com/youtube/vitess/go/mysql/proto" - "github.com/youtube/vitess/go/vt/topo" - "github.com/youtube/vitess/go/vt/vtgate/proto" -) - -func TestOpenVtGateExecutor(t *testing.T) { - fakeConn := newFakeVtGateConn() - exec := newFakeVtGateExecutor(fakeConn) - if err := exec.Open(); err != nil { - t.Fatalf("failed to call executor.Open: %v", err) - } - exec.Close() -} - -func TestValidate(t *testing.T) { - fakeConn := newFakeVtGateConn() - exec := newFakeVtGateExecutor(fakeConn) - defer exec.Close() - - invalidSelect := []string{"select from test_table"} - if err := exec.Validate(invalidSelect); err == nil { - t.Fatalf("exec.Validate should fail due to given invalid select statement") - } - invalidUpdate := []string{"update from test_table"} - if err := exec.Validate(invalidUpdate); err == nil { - t.Fatalf("exec.Validate should fail due to given invalid update statement") - } - invalidDelete := []string{"delete * from test_table"} - if err := exec.Validate(invalidDelete); err == nil { - t.Fatalf("exec.Validate should fail due to given invalid delete statement") - } - validSelect := []string{"select * from test_table"} - if err := exec.Validate(validSelect); err != nil { - t.Fatalf("exec.Validate should success but get error: %v", err) - } - validUpdate := []string{"update test_table set col1=1"} - if err := exec.Validate(validUpdate); err != nil { - t.Fatalf("exec.Validate should success but get error: %v", err) - } - validDelete := []string{"delete from test_table where col1=1"} - if err := exec.Validate(validDelete); err != nil { - t.Fatalf("exec.Validate should success but get error: %v", err) - } -} - -func TestExecuteWithoutOpen(t *testing.T) { - shards := []string{"0", "1"} - sqls := []string{"insert into test_table values (1, 2)"} - fakeConn := newFakeVtGateConn() - exec := newFakeVtGateExecutor(fakeConn) - result := exec.Execute(sqls, shards) - if result.ExecutorErr == "" { - t.Fatalf("execute should fail because Execute() is being called before Open()") - } -} - -func TestExecuteDML(t *testing.T) { - shards := []string{"0", "1"} - validSqls := []string{"insert into test_table values (1, 2)"} - invalidSqls := []string{"insert into test_table ..."} - fakeConn := newFakeVtGateConn() - - for _, sql := range validSqls { - for _, shard := range shards { - fakeConn.AddShardQuery( - &proto.QueryShard{ - Sql: sql, - BindVariables: nil, - Keyspace: "test_keyspace", - Shards: []string{shard}, - TabletType: topo.TYPE_MASTER, - Session: &proto.Session{ - InTransaction: true, - }, - }, - &mproto.QueryResult{}) - } - } - - exec := newFakeVtGateExecutor(fakeConn) - exec.Open() - defer exec.Close() - result := exec.Execute(invalidSqls, shards) - if len(result.FailedShards) == 0 && result.ExecutorErr == "" { - t.Fatalf("execute should fail due to an invalid sql") - } - result = exec.Execute(validSqls, shards) - if len(result.FailedShards) > 0 { - t.Fatalf("execute failed, error: %v", result.FailedShards) - } - if result.ExecutorErr != "" { - t.Fatalf("execute failed, sqls: %v, error: %s", validSqls, result.ExecutorErr) - } -} - -func TestExecuteDDL(t *testing.T) { - fakeConn := newFakeVtGateConn() - shards := []string{"0", "1"} - - validSqls := []string{"alter table test_table add column_01 int"} - for _, sql := range validSqls { - for _, shard := range shards { - fakeConn.AddShardQuery( - &proto.QueryShard{ - Sql: sql, - BindVariables: nil, - Keyspace: "test_keyspace", - Shards: []string{shard}, - TabletType: topo.TYPE_MASTER, - Session: nil, - }, - &mproto.QueryResult{}) - } - } - exec := newFakeVtGateExecutor(fakeConn) - exec.Open() - defer exec.Close() - result := exec.Execute(validSqls, shards) - if len(result.FailedShards) > 0 { - t.Fatalf("execute failed, error: %v", result.FailedShards) - } - if result.ExecutorErr != "" { - t.Fatalf("execute failed, sqls: %v, error: %s", validSqls, result.ExecutorErr) - } - // alter a non exist table - invalidSqls := []string{"alter table table_not_exist add column_01 int"} - result = exec.Execute(invalidSqls, shards) - if len(result.FailedShards) == 0 { - t.Fatalf("execute should fail") - } -} From 5d2cb62933b677f96acaf5c43bf189cf55fae4d2 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Mon, 11 May 2015 16:15:14 -0700 Subject: [PATCH 005/128] TabletExecutor.Validate should return err when sql syntax is invalid Add a test case that schema change should fail if executor.Open returns error. --- go/vt/schemamanager/schemamanager_test.go | 20 +++++++++++++++++++- go/vt/schemamanager/tablet_executor.go | 2 +- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index 432874e73c..bdf3248252 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -6,6 +6,7 @@ package schemamanager import ( "errors" + "fmt" "testing" "github.com/youtube/vitess/go/vt/tabletmanager/faketmclient" @@ -47,10 +48,23 @@ func TestRunSchemaChangesDataSourcerReadFail(t *testing.T) { } func TestRunSchemaChangesValidationFail(t *testing.T) { - dataSourcer := newFakeDataSourcer([]string{"invalid sql"}, true, false, false) + dataSourcer := newFakeDataSourcer([]string{"invalid sql"}, false, false, false) handler := newFakeHandler() exec := newFakeExecutor() err := Run(dataSourcer, exec, handler) + if err == nil { + t.Fatalf("run schema change should fail due to executor.Validate fail") + } +} + +func TestRunSchemaChangesExecutorOpenFail(t *testing.T) { + dataSourcer := newFakeDataSourcer([]string{"create table test_table (pk int);"}, false, false, false) + handler := newFakeHandler() + exec := NewTabletExecutor( + faketmclient.NewFakeTabletManagerClient(), + newFakeTopo(), + "unknown_keyspace") + err := Run(dataSourcer, exec, handler) if err == nil { t.Fatalf("run schema change should fail due to executor.Open fail") } @@ -97,6 +111,10 @@ func newFakeTopo() *fakeTopo { } func (topoServer *fakeTopo) GetShardNames(keyspace string) ([]string, error) { + if keyspace != "test_keyspace" { + return nil, fmt.Errorf("expect to get keyspace: test_keyspace, but got: %s", + keyspace) + } return []string{"0", "1", "2"}, nil } diff --git a/go/vt/schemamanager/tablet_executor.go b/go/vt/schemamanager/tablet_executor.go index 9ab672b500..1f310f8616 100644 --- a/go/vt/schemamanager/tablet_executor.go +++ b/go/vt/schemamanager/tablet_executor.go @@ -70,7 +70,7 @@ func (exec *TabletExecutor) Validate(sqls []string) error { for _, sql := range sqls { stat, err := sqlparser.Parse(sql) if err != nil { - return nil + return err } if _, ok := stat.(*sqlparser.DDL); !ok { return fmt.Errorf("schema change works for DDLs only, but get non DDL statement: %s", sql) From c13f568bcd72bf8ab83f065e28ce7705169a6e3a Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Tue, 12 May 2015 08:49:43 -0700 Subject: [PATCH 006/128] Backport of fix in google tree. --- go/vt/worker/clone_utils.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/go/vt/worker/clone_utils.go b/go/vt/worker/clone_utils.go index 89813e2bbd..19e5379b9c 100644 --- a/go/vt/worker/clone_utils.go +++ b/go/vt/worker/clone_utils.go @@ -284,8 +284,7 @@ func findChunks(ctx context.Context, wr *wrangler.Wrangler, ti *topo.TabletInfo, qr, err := wr.TabletManagerClient().ExecuteFetchAsApp(ctx, ti, query, 1, true) cancel() if err != nil { - wr.Logger().Infof("Not splitting table %v into multiple chunks: %v", td.Name, err) - return result, nil + return nil, fmt.Errorf("ExecuteFetchAsApp: %v", err) } if len(qr.Rows) != 1 { wr.Logger().Infof("Not splitting table %v into multiple chunks, cannot get min and max", td.Name) From 100e7db414e448cdc76c0e010af8c8a70c2e54d6 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Tue, 12 May 2015 13:51:04 -0700 Subject: [PATCH 007/128] add console event handler for schema manager --- go/vt/schemamanager/console_event_handler.go | 52 ++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 go/vt/schemamanager/console_event_handler.go diff --git a/go/vt/schemamanager/console_event_handler.go b/go/vt/schemamanager/console_event_handler.go new file mode 100644 index 0000000000..010d4fbb69 --- /dev/null +++ b/go/vt/schemamanager/console_event_handler.go @@ -0,0 +1,52 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemamanager + +import ( + "encoding/json" + "fmt" +) + +// ConsoleEventHandler prints various schema manager events to the stdout +type ConsoleEventHandler struct{} + +// NewConsoleEventHandler creates a new ConsoleEventHandler instance. +func NewConsoleEventHandler() *ConsoleEventHandler { + return &ConsoleEventHandler{} +} + +// OnDataSourcerReadSuccess is called when schemamanager successfully reads all sql statements. +func (handler *ConsoleEventHandler) OnDataSourcerReadSuccess(sql []string) error { + fmt.Println("Successfully read all schema changes.") + return nil +} + +// OnDataSourcerReadFail is called when schemamanager fails to read all sql statements. +func (handler *ConsoleEventHandler) OnDataSourcerReadFail(err error) error { + fmt.Printf("Failed to read schema changes, error: %v\n", err) + return nil +} + +// OnValidationSuccess is called when schemamanager successfully validates all sql statements. +func (handler *ConsoleEventHandler) OnValidationSuccess([]string) error { + fmt.Println("Successfully validate all sqls.") + return nil +} + +// OnValidationFail is called when schemamanager fails to validate sql statements. +func (handler *ConsoleEventHandler) OnValidationFail(err error) error { + fmt.Printf("Failed to validate sqls, error: %v\n", err) + return nil +} + +// OnExecutorComplete is called when schemamanager finishes applying schema changes. +func (handler *ConsoleEventHandler) OnExecutorComplete(result *ExecuteResult) error { + out, _ := json.MarshalIndent(result, "", " ") + fmt.Printf("Executor finished, result: %s\n", string(out)) + return nil +} + +// ConsoleEventHandler have to implement EventHandler interface +var _ EventHandler = (*ConsoleEventHandler)(nil) From 6a2257ed51d437b476140abea32f987e8a0b1072 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Tue, 12 May 2015 18:20:16 -0700 Subject: [PATCH 008/128] remove several schema commands out of vtctl Remove ReloadSchema, ValidateSchemaShard, ValidateSchemaKeyspace, PreflightSchema, ApplySchemaShard and ApplySchemaKeyspace commands out of vtctl. Those endpoints are still presented in wrangler package and will be removed once autoschema is mature. --- go/vt/vtctl/vtctl.go | 186 +------------------------------------------ 1 file changed, 2 insertions(+), 184 deletions(-) diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index e942e3f2bd..ad592abd20 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -237,27 +237,9 @@ var commands = []commandGroup{ command{"GetSchema", commandGetSchema, "[-tables=,,...] [-exclude_tables=,,...] [-include-views] ", "Display the full schema for a tablet, or just the schema for the provided tables."}, - command{"ReloadSchema", commandReloadSchema, - "", - "Asks a remote tablet to reload its schema."}, - command{"ValidateSchemaShard", commandValidateSchemaShard, - "[-exclude_tables=''] [-include-views] ", - "Validate the master schema matches all the slaves."}, - command{"ValidateSchemaKeyspace", commandValidateSchemaKeyspace, - "[-exclude_tables=''] [-include-views] ", - "Validate the master schema from shard 0 matches all the other tablets in the keyspace."}, - command{"PreflightSchema", commandPreflightSchema, - "{-sql= || -sql-file=} ", - "Apply the schema change to a temporary database to gather before and after schema and validate the change. The sql can be inlined or read from a file."}, command{"ApplySchema", commandApplySchema, - "[-force] {-sql= || -sql-file=} [-skip-preflight] [-stop-replication] ", - "Apply the schema change to the specified tablet (allowing replication by default). The sql can be inlined or read from a file. Note this doesn't change any tablet state (doesn't go into 'schema' type)."}, - command{"ApplySchemaShard", commandApplySchemaShard, - "[-force] {-sql= || -sql-file=} [-simple] [-new-parent=] ", - "Apply the schema change to the specified shard. If simple is specified, we just apply on the live master. Otherwise we will need to do the shell game. So we will apply the schema change to every single slave. if new_parent is set, we will also reparent (otherwise the master won't be touched at all). Using the force flag will cause a bunch of checks to be ignored, use with care."}, - command{"ApplySchemaKeyspace", commandApplySchemaKeyspace, - "[-force] {-sql= || -sql-file=} [-simple] ", - "Apply the schema change to the specified keyspace. If simple is specified, we just apply on the live masters. Otherwise we will need to do the shell game on each shard. So we will apply the schema change to every single slave (running in parallel on all shards, but on one host at a time in a given shard). We will not reparent at the end, so the masters won't be touched at all. Using the force flag will cause a bunch of checks to be ignored, use with care."}, + "[-force] {-sql= || -sql-file=} ", + "Apply the schema change to the specified keyspace."}, command{"CopySchemaShard", commandCopySchemaShard, "[-tables=,,...] [-exclude_tables=,,...] [-include-views] ", "Copy the schema from a source tablet to the specified shard. The schema is applied directly on the master of the destination shard, and is propogated to the replicas through binlogs."}, @@ -1770,171 +1752,7 @@ func commandGetSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag return err } -func commandReloadSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - if err := subFlags.Parse(args); err != nil { - return err - } - if subFlags.NArg() != 1 { - return fmt.Errorf("action ReloadSchema requires ") - } - tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) - if err != nil { - return err - } - return wr.ReloadSchema(ctx, tabletAlias) -} - -func commandValidateSchemaShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - excludeTables := subFlags.String("exclude_tables", "", "comma separated list of regexps for tables to exclude") - includeViews := subFlags.Bool("include-views", false, "include views in the validation") - if err := subFlags.Parse(args); err != nil { - return err - } - if subFlags.NArg() != 1 { - return fmt.Errorf("action ValidateSchemaShard requires ") - } - - keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) - if err != nil { - return err - } - var excludeTableArray []string - if *excludeTables != "" { - excludeTableArray = strings.Split(*excludeTables, ",") - } - return wr.ValidateSchemaShard(ctx, keyspace, shard, excludeTableArray, *includeViews) -} - -func commandValidateSchemaKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - excludeTables := subFlags.String("exclude_tables", "", "comma separated list of regexps for tables to exclude") - includeViews := subFlags.Bool("include-views", false, "include views in the validation") - if err := subFlags.Parse(args); err != nil { - return err - } - if subFlags.NArg() != 1 { - return fmt.Errorf("action ValidateSchemaKeyspace requires ") - } - - keyspace := subFlags.Arg(0) - var excludeTableArray []string - if *excludeTables != "" { - excludeTableArray = strings.Split(*excludeTables, ",") - } - return wr.ValidateSchemaKeyspace(ctx, keyspace, excludeTableArray, *includeViews) -} - -func commandPreflightSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - sql := subFlags.String("sql", "", "sql command") - sqlFile := subFlags.String("sql-file", "", "file containing the sql commands") - if err := subFlags.Parse(args); err != nil { - return err - } - - if subFlags.NArg() != 1 { - return fmt.Errorf("action PreflightSchema requires ") - } - tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) - if err != nil { - return err - } - change, err := getFileParam(*sql, *sqlFile, "sql") - if err != nil { - return err - } - scr, err := wr.PreflightSchema(ctx, tabletAlias, change) - if err == nil { - log.Infof(scr.String()) - } - return err -} - func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - force := subFlags.Bool("force", false, "will apply the schema even if preflight schema doesn't match") - sql := subFlags.String("sql", "", "sql command") - sqlFile := subFlags.String("sql-file", "", "file containing the sql commands") - skipPreflight := subFlags.Bool("skip-preflight", false, "do not preflight the schema (use with care)") - stopReplication := subFlags.Bool("stop-replication", false, "stop replication before applying schema") - if err := subFlags.Parse(args); err != nil { - return err - } - - if subFlags.NArg() != 1 { - return fmt.Errorf("action ApplySchema requires ") - } - tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) - if err != nil { - return err - } - change, err := getFileParam(*sql, *sqlFile, "sql") - if err != nil { - return err - } - - sc := &myproto.SchemaChange{} - sc.Sql = change - sc.AllowReplication = !(*stopReplication) - - // do the preflight to get before and after schema - if !(*skipPreflight) { - scr, err := wr.PreflightSchema(ctx, tabletAlias, sc.Sql) - if err != nil { - return fmt.Errorf("preflight failed: %v", err) - } - log.Infof("Preflight: " + scr.String()) - sc.BeforeSchema = scr.BeforeSchema - sc.AfterSchema = scr.AfterSchema - sc.Force = *force - } - - scr, err := wr.ApplySchema(ctx, tabletAlias, sc) - if err == nil { - log.Infof(scr.String()) - } - return err -} - -func commandApplySchemaShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - force := subFlags.Bool("force", false, "will apply the schema even if preflight schema doesn't match") - sql := subFlags.String("sql", "", "sql command") - sqlFile := subFlags.String("sql-file", "", "file containing the sql commands") - simple := subFlags.Bool("simple", false, "just apply change on master and let replication do the rest") - newParent := subFlags.String("new-parent", "", "will reparent to this tablet after the change") - waitSlaveTimeout := subFlags.Duration("wait_slave_timeout", 30*time.Second, "time to wait for slaves to catch up in reparenting") - if err := subFlags.Parse(args); err != nil { - return err - } - - if subFlags.NArg() != 1 { - return fmt.Errorf("action ApplySchemaShard requires ") - } - keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) - if err != nil { - return err - } - change, err := getFileParam(*sql, *sqlFile, "sql") - if err != nil { - return err - } - var newParentAlias topo.TabletAlias - if *newParent != "" { - newParentAlias, err = topo.ParseTabletAliasString(*newParent) - if err != nil { - return err - } - } - - if (*simple) && (*newParent != "") { - return fmt.Errorf("new_parent for action ApplySchemaShard can only be specified for complex schema upgrades") - } - - scr, err := wr.ApplySchemaShard(ctx, keyspace, shard, change, newParentAlias, *simple, *force, *waitSlaveTimeout) - if err == nil { - log.Infof(scr.String()) - } - return err -} - -func commandApplySchemaKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { force := subFlags.Bool("force", false, "will apply the schema even if preflight schema doesn't match") sql := subFlags.String("sql", "", "sql command") sqlFile := subFlags.String("sql-file", "", "file containing the sql commands") From 8d5f43bb1c226a6b2b19b4c0694bb69c70f80ac6 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Tue, 12 May 2015 19:24:36 -0700 Subject: [PATCH 009/128] change wrangler.ApplySchemaKeyspace to use schemamanager --- go/vt/wrangler/schema.go | 106 +++------------------------------------ 1 file changed, 7 insertions(+), 99 deletions(-) diff --git a/go/vt/wrangler/schema.go b/go/vt/wrangler/schema.go index 086c922cf2..7215f6d5f7 100644 --- a/go/vt/wrangler/schema.go +++ b/go/vt/wrangler/schema.go @@ -18,6 +18,7 @@ import ( log "github.com/golang/glog" "github.com/youtube/vitess/go/vt/concurrency" myproto "github.com/youtube/vitess/go/vt/mysqlctl/proto" + "github.com/youtube/vitess/go/vt/schemamanager" "github.com/youtube/vitess/go/vt/tabletmanager/actionnode" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topotools/events" @@ -407,106 +408,13 @@ func (wr *Wrangler) ApplySchemaKeyspace(ctx context.Context, keyspace string, ch return nil, err } - scr, err := wr.applySchemaKeyspace(ctx, keyspace, change, simple, force, waitSlaveTimeout) - return scr, wr.unlockKeyspace(ctx, keyspace, actionNode, lockPath, err) -} + err = schemamanager.Run( + schemamanager.NewSimpleDataSourcer(change), + schemamanager.NewTabletExecutor(wr.tmc, wr.ts, keyspace), + schemamanager.NewConsoleEventHandler(), + ) -func (wr *Wrangler) applySchemaKeyspace(ctx context.Context, keyspace string, change string, simple, force bool, waitSlaveTimeout time.Duration) (*myproto.SchemaChangeResult, error) { - shards, err := wr.ts.GetShardNames(keyspace) - if err != nil { - return nil, err - } - - // corner cases - if len(shards) == 0 { - return nil, fmt.Errorf("No shards in keyspace %v", keyspace) - } - if len(shards) == 1 { - log.Infof("Only one shard in keyspace %v, using ApplySchemaShard", keyspace) - return wr.ApplySchemaShard(ctx, keyspace, shards[0], change, topo.TabletAlias{}, simple, force, waitSlaveTimeout) - } - - // Get schema on all shard masters in parallel - log.Infof("Getting schema on all shards") - beforeSchemas := make([]*myproto.SchemaDefinition, len(shards)) - shardInfos := make([]*topo.ShardInfo, len(shards)) - wg := sync.WaitGroup{} - mu := sync.Mutex{} - getErrs := make([]string, 0, 5) - for i, shard := range shards { - wg.Add(1) - go func(i int, shard string) { - var err error - defer func() { - if err != nil { - mu.Lock() - getErrs = append(getErrs, err.Error()) - mu.Unlock() - } - wg.Done() - }() - - shardInfos[i], err = wr.ts.GetShard(keyspace, shard) - if err != nil { - return - } - - beforeSchemas[i], err = wr.GetSchema(ctx, shardInfos[i].MasterAlias, nil, nil, false) - }(i, shard) - } - wg.Wait() - if len(getErrs) > 0 { - return nil, fmt.Errorf("Error(s) getting schema: %v", strings.Join(getErrs, ", ")) - } - - // check they all match, or use the force flag - log.Infof("Checking starting schemas match on all shards") - for i, beforeSchema := range beforeSchemas { - if i == 0 { - continue - } - diffs := myproto.DiffSchemaToArray("shard 0", beforeSchemas[0], fmt.Sprintf("shard %v", i), beforeSchema) - if len(diffs) > 0 { - if force { - log.Warningf("Shard %v has inconsistent schema, ignoring: %v", i, strings.Join(diffs, "\n")) - } else { - return nil, fmt.Errorf("Shard %v has inconsistent schema: %v", i, strings.Join(diffs, "\n")) - } - } - } - - // preflight on shard 0 master, to get baseline - // this assumes shard 0 master doesn't have the schema upgrade applied - // if it does, we'll have to fix the slaves and other shards manually. - log.Infof("Running Preflight on Shard 0 Master") - preflight, err := wr.PreflightSchema(ctx, shardInfos[0].MasterAlias, change) - if err != nil { - return nil, err - } - - // for each shard, apply the change - log.Infof("Applying change on all shards") - var applyErr error - for i, shard := range shards { - wg.Add(1) - go func(i int, shard string) { - defer wg.Done() - - _, err := wr.lockAndApplySchemaShard(ctx, shardInfos[i], preflight, keyspace, shard, shardInfos[i].MasterAlias, change, topo.TabletAlias{}, simple, force, waitSlaveTimeout) - if err != nil { - mu.Lock() - applyErr = err - mu.Unlock() - return - } - }(i, shard) - } - wg.Wait() - if applyErr != nil { - return nil, applyErr - } - - return &myproto.SchemaChangeResult{BeforeSchema: preflight.BeforeSchema, AfterSchema: preflight.AfterSchema}, nil + return nil, wr.unlockKeyspace(ctx, keyspace, actionNode, lockPath, err) } // CopySchemaShard copies the schema from a source tablet to the From ee4b3c97c4f09cc6bce75820d68f2630c8670e4d Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Tue, 12 May 2015 19:36:30 -0700 Subject: [PATCH 010/128] add returncode as one of return values from utils.run method This change allows caller to know whether the vtctl command fails or not. --- test/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils.py b/test/utils.py index 274b7a062b..995824134e 100644 --- a/test/utils.py +++ b/test/utils.py @@ -204,7 +204,7 @@ def run(cmd, trap_output=False, raise_on_error=True, **kargs): raise TestError('cmd fail:', args, stdout, stderr) else: logging.debug('cmd fail: %s %s %s', str(args), stdout, stderr) - return stdout, stderr + return stdout, stderr, proc.returncode # run sub-process, expects failure def run_fail(cmd, **kargs): From 85fae2e9be4fb7593c1db4fb7ec4fa2e4d698fc2 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Tue, 12 May 2015 19:54:22 -0700 Subject: [PATCH 011/128] make vtctl.ApplySchema accept a list sqls separated by semicolon --- go/vt/vtctl/vtctl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index ad592abd20..ad3877bd63 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -1754,7 +1754,7 @@ func commandGetSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { force := subFlags.Bool("force", false, "will apply the schema even if preflight schema doesn't match") - sql := subFlags.String("sql", "", "sql command") + sql := subFlags.String("sql", "", "a list of sql commands separated by semicolon") sqlFile := subFlags.String("sql-file", "", "file containing the sql commands") simple := subFlags.Bool("simple", false, "just apply change on master and let replication do the rest") waitSlaveTimeout := subFlags.Duration("wait_slave_timeout", 30*time.Second, "time to wait for slaves to catch up in reparenting") From 9a5e04f5714d1254b60ff4dcb125f802281edc17 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Tue, 12 May 2015 19:54:52 -0700 Subject: [PATCH 012/128] rework schema integration test --- test/schema.py | 279 ++++++++++++++++++------------------------------- 1 file changed, 103 insertions(+), 176 deletions(-) diff --git a/test/schema.py b/test/schema.py index 6f4f846b89..5dbb4d433d 100755 --- a/test/schema.py +++ b/test/schema.py @@ -16,7 +16,8 @@ shard_1_master = tablet.Tablet() shard_1_replica1 = tablet.Tablet() shard_2_master = tablet.Tablet() shard_2_replica1 = tablet.Tablet() - +test_keyspace = 'test_keyspace' +db_name = 'vt_' + test_keyspace def setUpModule(): try: @@ -38,6 +39,8 @@ def setUpModule(): tearDownModule() raise + utils.run_vtctl(['CreateKeyspace', test_keyspace]) + def tearDownModule(): if utils.options.skip_teardown: return @@ -69,58 +72,30 @@ def tearDownModule(): shard_2_master.remove_tree() shard_2_replica1.remove_tree() -# statements to create the table -create_vt_select_test = [ - ('''create table vt_select_test%d ( -id bigint not null, -msg varchar(64), -primary key (id) -) Engine=InnoDB''' % x).replace("\n", "") - for x in xrange(4)] - class TestSchema(unittest.TestCase): - def _check_tables(self, tablet, expectedCount): - tables = tablet.mquery('vt_test_keyspace', 'show tables') - self.assertEqual(len(tables), expectedCount, - 'Unexpected table count on %s (not %u): %s' % - (tablet.tablet_alias, expectedCount, str(tables))) + def setUp(self): + shard_0_master.init_tablet( 'master', test_keyspace, '0') + shard_0_replica1.init_tablet('replica', test_keyspace, '0') + shard_0_replica2.init_tablet('replica', test_keyspace, '0') + shard_0_rdonly.init_tablet( 'rdonly', test_keyspace, '0') + shard_0_backup.init_tablet( 'backup', test_keyspace, '0') + shard_1_master.init_tablet( 'master', test_keyspace, '1') + shard_1_replica1.init_tablet('replica', test_keyspace, '1') + shard_2_master.init_tablet( 'master', test_keyspace, '2') + shard_2_replica1.init_tablet('replica', test_keyspace, '2') - def _check_db_not_created(self, tablet): - # Broadly catch all exceptions, since the exception being raised is internal to MySQL. - # We're strictly checking the error message though, so should be fine. - with self.assertRaisesRegexp(Exception, '(1049, "Unknown database \'vt_test_keyspace\'")'): - tables = tablet.mquery('vt_test_keyspace', 'show tables') - - def test_complex_schema(self): - - utils.run_vtctl(['CreateKeyspace', 'test_keyspace']) - - shard_0_master.init_tablet( 'master', 'test_keyspace', '0') - shard_0_replica1.init_tablet('replica', 'test_keyspace', '0') - shard_0_replica2.init_tablet('replica', 'test_keyspace', '0') - shard_0_rdonly.init_tablet( 'rdonly', 'test_keyspace', '0') - shard_0_backup.init_tablet( 'backup', 'test_keyspace', '0') - shard_1_master.init_tablet( 'master', 'test_keyspace', '1') - shard_1_replica1.init_tablet('replica', 'test_keyspace', '1') - shard_2_master.init_tablet( 'master', 'test_keyspace', '2') - shard_2_replica1.init_tablet('replica', 'test_keyspace', '2') - - utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True) + utils.run_vtctl(['RebuildKeyspaceGraph', test_keyspace], auto_log=True) # run checks now before we start the tablets utils.validate_topology() - # create databases, start the tablets for t in [shard_0_master, shard_0_replica1, shard_0_replica2, - shard_0_rdonly, shard_0_backup, shard_1_master, shard_1_replica1]: - t.create_db('vt_test_keyspace') + shard_0_rdonly, shard_0_backup, shard_1_master, shard_1_replica1, + shard_2_master, shard_2_replica1]: + t.create_db(db_name) t.start_vttablet(wait_for_state=None) - # we intentionally don't want to create db on these tablets - shard_2_master.start_vttablet(wait_for_state=None) - shard_2_replica1.start_vttablet(wait_for_state=None) - # wait for the tablets to start shard_0_master.wait_for_vttablet_state('SERVING') shard_0_replica1.wait_for_vttablet_state('SERVING') @@ -129,154 +104,106 @@ class TestSchema(unittest.TestCase): shard_0_backup.wait_for_vttablet_state('NOT_SERVING') shard_1_master.wait_for_vttablet_state('SERVING') shard_1_replica1.wait_for_vttablet_state('SERVING') - shard_2_master.wait_for_vttablet_state('NOT_SERVING') - shard_2_replica1.wait_for_vttablet_state('NOT_SERVING') + shard_2_master.wait_for_vttablet_state('SERVING') + shard_2_replica1.wait_for_vttablet_state('SERVING') # make sure all replication is good for t in [shard_0_master, shard_0_replica1, shard_0_replica2, shard_0_rdonly, shard_0_backup, shard_1_master, shard_1_replica1, shard_2_master, shard_2_replica1]: t.reset_replication() - utils.run_vtctl(['InitShardMaster', 'test_keyspace/0', + + utils.run_vtctl(['InitShardMaster', test_keyspace+'/0', shard_0_master.tablet_alias], auto_log=True) - utils.run_vtctl(['InitShardMaster', 'test_keyspace/1', + utils.run_vtctl(['InitShardMaster', test_keyspace+'/1', shard_1_master.tablet_alias], auto_log=True) - utils.run_vtctl(['InitShardMaster', 'test_keyspace/2', + utils.run_vtctl(['InitShardMaster', test_keyspace+'/2', shard_2_master.tablet_alias], auto_log=True) - utils.run_vtctl(['ValidateKeyspace', '-ping-tablets', 'test_keyspace']) + utils.run_vtctl(['ValidateKeyspace', '-ping-tablets', test_keyspace]) # check after all tablets are here and replication is fixed utils.validate_topology(ping_tablets=True) - # shard 0: apply the schema using a complex schema upgrade, no - # reparenting yet - utils.run_vtctl(['ApplySchemaShard', - '-sql='+create_vt_select_test[0], - 'test_keyspace/0'], - auto_log=True) - - # check all expected hosts have the change: - # - master won't have it as it's a complex change - self._check_tables(shard_0_master, 0) - self._check_tables(shard_0_replica1, 1) - self._check_tables(shard_0_replica2, 1) - self._check_tables(shard_0_rdonly, 1) - self._check_tables(shard_0_backup, 1) - self._check_tables(shard_1_master, 0) - self._check_tables(shard_1_replica1, 0) - - # shard 0: apply schema change to just master directly - # (to test its state is not changed) - utils.run_vtctl(['ApplySchema', - '-stop-replication', - '-sql='+create_vt_select_test[0], - shard_0_master.tablet_alias], - auto_log=True) - self._check_tables(shard_0_master, 1) - - # shard 0: apply new schema change, with reparenting - utils.run_vtctl(['ApplySchemaShard', - '-new-parent='+shard_0_replica1.tablet_alias, - '-sql='+create_vt_select_test[1], - 'test_keyspace/0'], - auto_log=True) - self._check_tables(shard_0_master, 1) - self._check_tables(shard_0_replica1, 2) - self._check_tables(shard_0_replica2, 2) - self._check_tables(shard_0_rdonly, 2) - self._check_tables(shard_0_backup, 2) - - # verify GetSchema --tables works - s = utils.run_vtctl_json(['GetSchema', '--tables=vt_select_test0', - shard_0_replica1.tablet_alias]) - self.assertEqual(len(s['TableDefinitions']), 1) - self.assertEqual(s['TableDefinitions'][0]['Name'], 'vt_select_test0') - - # CopySchemaShard is responsible for creating the db; one shouldn't exist before - # the command is run. - self._check_db_not_created(shard_2_master) - self._check_db_not_created(shard_2_replica1) - - utils.run_vtctl(['CopySchemaShard', - shard_0_replica1.tablet_alias, - 'test_keyspace/2'], - auto_log=True) - - # shard_2_master should look the same as the replica we copied from - self._check_tables(shard_2_master, 2) - self._check_tables(shard_2_replica1, 2) - # shard_2_replica1 should have gotten an identical schema applied to it via replication - self.assertEqual( - utils.run_vtctl_json(['GetSchema', shard_0_replica1.tablet_alias]), - utils.run_vtctl_json(['GetSchema', shard_2_replica1.tablet_alias]), - ) - - # keyspace: try to apply a keyspace-wide schema change, should fail - # as the preflight would be different in shard1 vs the others - out, err = utils.run_vtctl(['ApplySchemaKeyspace', - '-sql='+create_vt_select_test[2], - 'test_keyspace'], - trap_output=True, - log_level='INFO', - raise_on_error=False) - if err.find('ApplySchemaKeyspace Shard 1 has inconsistent schema') == -1: - self.fail('Unexpected ApplySchemaKeyspace output: %s' % err) - - # shard 1: catch it up with simple updates - utils.run_vtctl(['ApplySchemaShard', - '-simple', - '-sql='+create_vt_select_test[0], - 'test_keyspace/1'], - auto_log=True) - utils.run_vtctl(['ApplySchemaShard', - '-simple', - '-sql='+create_vt_select_test[1], - 'test_keyspace/1'], - auto_log=True) - self._check_tables(shard_1_master, 2) - self._check_tables(shard_1_replica1, 2) - - # keyspace: apply a keyspace-wide simple schema change, should work now - utils.run_vtctl(['ApplySchemaKeyspace', - '-simple', - '-sql='+create_vt_select_test[2], - 'test_keyspace'], - auto_log=True) - - # check all expected hosts have the change - self._check_tables(shard_0_master, 2) # was stuck a long time ago as scrap - self._check_tables(shard_0_replica1, 3) # current master - self._check_tables(shard_0_replica2, 3) - self._check_tables(shard_0_rdonly, 3) - self._check_tables(shard_0_backup, 3) - self._check_tables(shard_1_master, 3) # current master - self._check_tables(shard_1_replica1, 3) - self._check_tables(shard_2_master, 3) # current master - self._check_tables(shard_2_replica1, 3) - - # keyspace: apply a keyspace-wide complex schema change, should work too - utils.run_vtctl(['ApplySchemaKeyspace', - '-sql='+create_vt_select_test[3], - 'test_keyspace'], - auto_log=True) - - # check all expected hosts have the change: - # - master won't have it as it's a complex change - # - backup won't have it as IsReplicatingType is false - self._check_tables(shard_0_master, 2) # was stuck a long time ago as scrap - self._check_tables(shard_0_replica1, 3) # current master - self._check_tables(shard_0_replica2, 4) - self._check_tables(shard_0_rdonly, 4) - self._check_tables(shard_0_backup, 4) - self._check_tables(shard_1_master, 3) # current master - self._check_tables(shard_1_replica1, 4) - self._check_tables(shard_2_master, 3) # current master - self._check_tables(shard_2_replica1, 4) - - utils.pause("Look at schema now!") - + def tearDown(self): tablet.kill_tablets([shard_0_master, shard_0_replica1, shard_0_replica2, shard_0_rdonly, shard_0_backup, shard_1_master, shard_1_replica1, shard_2_master, shard_2_replica1]) + def _check_tables(self, tablet, expectedCount): + tables = tablet.mquery(db_name, 'show tables') + self.assertEqual(len(tables), expectedCount, + 'Unexpected table count on %s (not %u): %s' % + (tablet.tablet_alias, expectedCount, str(tables))) + + def _check_db_not_created(self, tablet): + # Broadly catch all exceptions, since the exception being raised is internal to MySQL. + # We're strictly checking the error message though, so should be fine. + with self.assertRaisesRegexp(Exception, '(1049, "Unknown database \'%s\'")' % db_name): + tables = tablet.mquery(db_name, 'show tables') + + def _apply_schema(self, keyspace, sql): + out, err, returncode = utils.run_vtctl(['ApplySchema', + '-sql='+sql, + keyspace], + trap_output=True, + log_level='INFO', + raise_on_error=False) + self.assertEqual(0, returncode) + + return out + + def _get_schema(self, tablet_alias, tables): + out, err, returncode = utils.run_vtctl(['GetSchema', + '-tables='+tables, + tablet_alias], + trap_output=True, + log_level='INFO', + raise_on_error=False) + self.assertEqual(0, returncode) + return out + + def _create_test_table_sql(self, table): + return 'CREATE TABLE %s ( \ + `id` BIGINT(20) not NULL, \ + `msg` varchar(64), \ + PRIMARY KEY (`id`) \ + ) ENGINE=InnoDB' % table + + def _alter_test_table_sql(self, table, index_column_name): + return 'ALTER TABLE %s \ + ADD COLUMN new_id bigint(20) NOT NULL AUTO_INCREMENT FIRST, \ + DROP PRIMARY KEY, \ + ADD PRIMARY KEY (new_id), \ + ADD INDEX idx_column(%s) \ + ' % (table, index_column_name) + + def test_schema_changes(self): + schema_changes = ';'.join([ + self._create_test_table_sql('vt_select_test01'), + self._create_test_table_sql('vt_select_test02'), + self._create_test_table_sql('vt_select_test03'), + self._alter_test_table_sql('vt_select_test03', 'msg'), + self._create_test_table_sql('vt_select_test04')]) + + tables = ','.join([ + 'vt_select_test01', 'vt_select_test02', + 'vt_select_test03', 'vt_select_test04']) + + # apply schema changes to the test keyspace + self._apply_schema(test_keyspace, schema_changes) + + # check number of tables + self._check_tables(shard_0_master, 4) + self._check_tables(shard_1_master, 4) + self._check_tables(shard_2_master, 4) + + # get schema for each shard + shard_0_schema = self._get_schema(shard_0_master.tablet_alias, tables) + shard_1_schema = self._get_schema(shard_1_master.tablet_alias, tables) + shard_2_schema = self._get_schema(shard_2_master.tablet_alias, tables) + + # all shards should have the same schema + self.assertEqual(shard_0_schema, shard_1_schema) + self.assertEqual(shard_0_schema, shard_2_schema) + if __name__ == '__main__': utils.main() From 56f3def56257b6a7524fad2760649393c0c2246e Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Tue, 12 May 2015 20:20:52 -0700 Subject: [PATCH 013/128] make schemamanager unit test does not rely on go/vt/topo/test/faketopo go/vt/topo/test/faketopo package replies on wrangler package which causes an import cycle in tests. --- go/vt/schemamanager/schemamanager_test.go | 155 +++++++++++++++++++++- 1 file changed, 149 insertions(+), 6 deletions(-) diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index bdf3248252..cc9f00563b 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -10,10 +10,9 @@ import ( "testing" "github.com/youtube/vitess/go/vt/tabletmanager/faketmclient" - "github.com/youtube/vitess/go/vt/topo" - "github.com/youtube/vitess/go/vt/topo/test/faketopo" - _ "github.com/youtube/vitess/go/vt/tabletmanager/gorpctmclient" + "github.com/youtube/vitess/go/vt/topo" + "golang.org/x/net/context" ) var ( @@ -102,9 +101,7 @@ func newFakeExecutor() *TabletExecutor { "test_keyspace") } -type fakeTopo struct { - faketopo.FakeTopo -} +type fakeTopo struct{} func newFakeTopo() *fakeTopo { return &fakeTopo{} @@ -137,6 +134,152 @@ func (topoServer *fakeTopo) GetTablet(tabletAlias topo.TabletAlias) (*topo.Table }, nil } +func (topoServer *fakeTopo) GetSrvKeyspaceNames(cell string) ([]string, error) { + return nil, fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) GetSrvKeyspace(cell, keyspace string) (*topo.SrvKeyspace, error) { + return nil, fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) GetEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) { + return nil, fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) Close() {} + +func (topoServer *fakeTopo) GetKnownCells() ([]string, error) { + return nil, fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) CreateKeyspace(keyspace string, value *topo.Keyspace) error { + return fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) UpdateKeyspace(ki *topo.KeyspaceInfo, existingVersion int64) (int64, error) { + return 0, fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) GetKeyspace(keyspace string) (*topo.KeyspaceInfo, error) { + return nil, fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) GetKeyspaces() ([]string, error) { + return nil, fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) DeleteKeyspaceShards(keyspace string) error { + return fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) CreateShard(keyspace, shard string, value *topo.Shard) error { + return fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) UpdateShard(si *topo.ShardInfo, existingVersion int64) (int64, error) { + return 0, fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) ValidateShard(keyspace, shard string) error { + return fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) DeleteShard(keyspace, shard string) error { + return fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) CreateTablet(tablet *topo.Tablet) error { + return fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) UpdateTablet(tablet *topo.TabletInfo, existingVersion int64) (newVersion int64, err error) { + return 0, fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) UpdateTabletFields(tabletAlias topo.TabletAlias, update func(*topo.Tablet) error) error { + return fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) DeleteTablet(alias topo.TabletAlias) error { + return fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) GetTabletsByCell(cell string) ([]topo.TabletAlias, error) { + return nil, fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) UpdateShardReplicationFields(cell, keyspace, shard string, update func(*topo.ShardReplication) error) error { + return fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) GetShardReplication(cell, keyspace, shard string) (*topo.ShardReplicationInfo, error) { + return nil, fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) DeleteShardReplication(cell, keyspace, shard string) error { + return fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) LockSrvShardForAction(ctx context.Context, cell, keyspace, shard, contents string) (string, error) { + return "", fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) UnlockSrvShardForAction(cell, keyspace, shard, lockPath, results string) error { + return fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) GetSrvTabletTypesPerShard(cell, keyspace, shard string) ([]topo.TabletType, error) { + return nil, fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) UpdateEndPoints(cell, keyspace, shard string, tabletType topo.TabletType, addrs *topo.EndPoints) error { + return fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) DeleteEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) error { + return fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) WatchEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) (<-chan *topo.EndPoints, chan<- struct{}, error) { + return nil, nil, fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) UpdateSrvShard(cell, keyspace, shard string, srvShard *topo.SrvShard) error { + return fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) GetSrvShard(cell, keyspace, shard string) (*topo.SrvShard, error) { + return nil, fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) DeleteSrvShard(cell, keyspace, shard string) error { + return fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) UpdateSrvKeyspace(cell, keyspace string, srvKeyspace *topo.SrvKeyspace) error { + return fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) UpdateTabletEndpoint(cell, keyspace, shard string, tabletType topo.TabletType, addr *topo.EndPoint) error { + return fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) LockKeyspaceForAction(ctx context.Context, keyspace, contents string) (string, error) { + return "", fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) UnlockKeyspaceForAction(keyspace, lockPath, results string) error { + return fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) LockShardForAction(ctx context.Context, keyspace, shard, contents string) (string, error) { + return "", fmt.Errorf("not implemented") +} + +func (topoServer *fakeTopo) UnlockShardForAction(keyspace, shard, lockPath, results string) error { + return fmt.Errorf("not implemented") +} + type fakeDataSourcer struct { sqls []string openFail bool From 9ad065b90611ebabc9dda7937ade299bfddf95f2 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 13 May 2015 10:20:51 -0700 Subject: [PATCH 014/128] Fixing reparent code to restart replication appropriately. EmergencyReparent used to stop replication on all slaves, then SetMaster would see replication is off, and not restart it. Fixed unit and intergration tests to test this case. MysqlDaemon fake now only has one place to store replication status. API changes: - SlaveStatus returns a myproto.ReplicationStatus (not a pointer any more) - StopReplicationAndGetPosition is changed to StopReplicationAndGetStatus - SetMaster has an extra forceStartSlave boolean. --- go/vt/mysqlctl/clone.go | 2 +- go/vt/mysqlctl/mysql_daemon.go | 33 +++++++--- go/vt/mysqlctl/mysql_flavor.go | 2 +- go/vt/mysqlctl/mysql_flavor_mariadb.go | 6 +- go/vt/mysqlctl/mysql_flavor_mysql56.go | 6 +- go/vt/mysqlctl/mysql_flavor_test.go | 4 +- go/vt/mysqlctl/query.go | 3 + go/vt/mysqlctl/replication.go | 21 ++++--- go/vt/tabletmanager/actionnode/actionnode.go | 6 +- go/vt/tabletmanager/agent_rpc_actions.go | 55 +++++++++------- .../agentrpctest/test_agent_rpc.go | 30 ++++----- .../tabletmanager/faketmclient/fake_client.go | 12 ++-- go/vt/tabletmanager/gorpcproto/structs.go | 7 ++- .../gorpctmclient/gorpc_client.go | 21 ++++--- .../gorpctmserver/gorpc_server.go | 14 ++--- .../tabletmanager/tmclient/rpc_client_api.go | 10 +-- go/vt/worker/split_clone_test.go | 6 +- go/vt/worker/vertical_split_clone_test.go | 6 +- go/vt/wrangler/reparent.go | 37 ++++++----- .../testlib/emergency_reparent_shard_test.go | 14 +++-- .../testlib/planned_reparent_shard_test.go | 8 +-- go/vt/wrangler/testlib/reparent_utils_test.go | 19 +++--- test/reparent.py | 63 ++++++++++++------- 23 files changed, 218 insertions(+), 167 deletions(-) diff --git a/go/vt/mysqlctl/clone.go b/go/vt/mysqlctl/clone.go index d79375439a..339c39ff0f 100644 --- a/go/vt/mysqlctl/clone.go +++ b/go/vt/mysqlctl/clone.go @@ -276,7 +276,7 @@ func (mysqld *Mysqld) CreateSnapshot(logger logutil.Logger, dbName, sourceAddr s if err = mysqld.StopSlave(hookExtraEnv); err != nil { return } - var slaveStatus *proto.ReplicationStatus + var slaveStatus proto.ReplicationStatus slaveStatus, err = mysqld.SlaveStatus() if err != nil { return diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index d05699fe15..885b876b26 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -29,7 +29,7 @@ type MysqlDaemon interface { // replication related methods StartSlave(hookExtraEnv map[string]string) error StopSlave(hookExtraEnv map[string]string) error - SlaveStatus() (*proto.ReplicationStatus, error) + SlaveStatus() (proto.ReplicationStatus, error) // reparenting related methods ResetReplicationCommands() ([]string, error) @@ -77,9 +77,6 @@ type FakeMysqlDaemon struct { // test owner responsability to have these two match) Replicating bool - // CurrentSlaveStatus is returned by SlaveStatus - CurrentSlaveStatus *proto.ReplicationStatus - // ResetReplicationResult is returned by ResetReplication ResetReplicationResult []string @@ -87,8 +84,15 @@ type FakeMysqlDaemon struct { ResetReplicationError error // CurrentMasterPosition is returned by MasterPosition + // and SlaveStatus CurrentMasterPosition proto.ReplicationPosition + // CurrentMasterHost is returned by SlaveStatus + CurrentMasterHost string + + // CurrentMasterport is returned by SlaveStatus + CurrentMasterPort int + // ReadOnly is the current value of the flag ReadOnly bool @@ -175,11 +179,14 @@ func (fmd *FakeMysqlDaemon) StopSlave(hookExtraEnv map[string]string) error { } // SlaveStatus is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) SlaveStatus() (*proto.ReplicationStatus, error) { - if fmd.CurrentSlaveStatus == nil { - return nil, fmt.Errorf("no slave status defined") - } - return fmd.CurrentSlaveStatus, nil +func (fmd *FakeMysqlDaemon) SlaveStatus() (proto.ReplicationStatus, error) { + return proto.ReplicationStatus{ + Position: fmd.CurrentMasterPosition, + SlaveIORunning: fmd.Replicating, + SlaveSQLRunning: fmd.Replicating, + MasterHost: fmd.CurrentMasterHost, + MasterPort: fmd.CurrentMasterPort, + }, nil } // ResetReplicationCommands is part of the MysqlDaemon interface @@ -261,6 +268,14 @@ func (fmd *FakeMysqlDaemon) ExecuteSuperQueryList(queryList []string) error { if expected != query { return fmt.Errorf("wrong query for ExecuteSuperQueryList: expected %v got %v", expected, query) } + + // intercept some queries to update our status + switch query { + case SqlStartSlave: + fmd.Replicating = true + case SqlStopSlave: + fmd.Replicating = false + } } return nil } diff --git a/go/vt/mysqlctl/mysql_flavor.go b/go/vt/mysqlctl/mysql_flavor.go index 9c48a7e226..e7f7248822 100644 --- a/go/vt/mysqlctl/mysql_flavor.go +++ b/go/vt/mysqlctl/mysql_flavor.go @@ -30,7 +30,7 @@ type MysqlFlavor interface { MasterPosition(mysqld *Mysqld) (proto.ReplicationPosition, error) // SlaveStatus returns the ReplicationStatus of a slave. - SlaveStatus(mysqld *Mysqld) (*proto.ReplicationStatus, error) + SlaveStatus(mysqld *Mysqld) (proto.ReplicationStatus, error) // ResetReplicationCommands returns the commands to completely reset // replication on the host. diff --git a/go/vt/mysqlctl/mysql_flavor_mariadb.go b/go/vt/mysqlctl/mysql_flavor_mariadb.go index cb5c00b6e4..f54b6f4225 100644 --- a/go/vt/mysqlctl/mysql_flavor_mariadb.go +++ b/go/vt/mysqlctl/mysql_flavor_mariadb.go @@ -40,16 +40,16 @@ func (flavor *mariaDB10) MasterPosition(mysqld *Mysqld) (rp proto.ReplicationPos } // SlaveStatus implements MysqlFlavor.SlaveStatus(). -func (flavor *mariaDB10) SlaveStatus(mysqld *Mysqld) (*proto.ReplicationStatus, error) { +func (flavor *mariaDB10) SlaveStatus(mysqld *Mysqld) (proto.ReplicationStatus, error) { fields, err := mysqld.fetchSuperQueryMap("SHOW ALL SLAVES STATUS") if err != nil { - return nil, ErrNotSlave + return proto.ReplicationStatus{}, ErrNotSlave } status := parseSlaveStatus(fields) status.Position, err = flavor.ParseReplicationPosition(fields["Gtid_Slave_Pos"]) if err != nil { - return nil, fmt.Errorf("SlaveStatus can't parse MariaDB GTID (Gtid_Slave_Pos: %#v): %v", fields["Gtid_Slave_Pos"], err) + return proto.ReplicationStatus{}, fmt.Errorf("SlaveStatus can't parse MariaDB GTID (Gtid_Slave_Pos: %#v): %v", fields["Gtid_Slave_Pos"], err) } return status, nil } diff --git a/go/vt/mysqlctl/mysql_flavor_mysql56.go b/go/vt/mysqlctl/mysql_flavor_mysql56.go index 55aa6a7180..b2e2df579f 100644 --- a/go/vt/mysqlctl/mysql_flavor_mysql56.go +++ b/go/vt/mysqlctl/mysql_flavor_mysql56.go @@ -41,16 +41,16 @@ func (flavor *mysql56) MasterPosition(mysqld *Mysqld) (rp proto.ReplicationPosit } // SlaveStatus implements MysqlFlavor.SlaveStatus(). -func (flavor *mysql56) SlaveStatus(mysqld *Mysqld) (*proto.ReplicationStatus, error) { +func (flavor *mysql56) SlaveStatus(mysqld *Mysqld) (proto.ReplicationStatus, error) { fields, err := mysqld.fetchSuperQueryMap("SHOW SLAVE STATUS") if err != nil { - return nil, ErrNotSlave + return proto.ReplicationStatus{}, ErrNotSlave } status := parseSlaveStatus(fields) status.Position, err = flavor.ParseReplicationPosition(fields["Executed_Gtid_Set"]) if err != nil { - return nil, fmt.Errorf("SlaveStatus can't parse MySQL 5.6 GTID (Executed_Gtid_Set: %#v): %v", fields["Executed_Gtid_Set"], err) + return proto.ReplicationStatus{}, fmt.Errorf("SlaveStatus can't parse MySQL 5.6 GTID (Executed_Gtid_Set: %#v): %v", fields["Executed_Gtid_Set"], err) } return status, nil } diff --git a/go/vt/mysqlctl/mysql_flavor_test.go b/go/vt/mysqlctl/mysql_flavor_test.go index 828f3b37ba..c760886872 100644 --- a/go/vt/mysqlctl/mysql_flavor_test.go +++ b/go/vt/mysqlctl/mysql_flavor_test.go @@ -33,7 +33,9 @@ func (fakeMysqlFlavor) WaitMasterPos(mysqld *Mysqld, targetPos proto.Replication func (fakeMysqlFlavor) MasterPosition(mysqld *Mysqld) (proto.ReplicationPosition, error) { return proto.ReplicationPosition{}, nil } -func (fakeMysqlFlavor) SlaveStatus(mysqld *Mysqld) (*proto.ReplicationStatus, error) { return nil, nil } +func (fakeMysqlFlavor) SlaveStatus(mysqld *Mysqld) (proto.ReplicationStatus, error) { + return proto.ReplicationStatus{}, nil +} func (fakeMysqlFlavor) StartReplicationCommands(params *sqldb.ConnParams, status *proto.ReplicationStatus) ([]string, error) { return nil, nil } diff --git a/go/vt/mysqlctl/query.go b/go/vt/mysqlctl/query.go index 5b729c6dd8..ef12065f7b 100644 --- a/go/vt/mysqlctl/query.go +++ b/go/vt/mysqlctl/query.go @@ -69,6 +69,9 @@ func (mysqld *Mysqld) fetchSuperQueryMap(query string) (map[string]string, error return rowMap, nil } +const masterPasswordStart = " MASTER_PASSWORD = '" +const masterPasswordEnd = "',\n" + func redactMasterPassword(input string) string { i := strings.Index(input, masterPasswordStart) if i == -1 { diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index 9c526c0d58..6270e3ae35 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -30,8 +30,13 @@ import ( "github.com/youtube/vitess/go/vt/mysqlctl/proto" ) -var masterPasswordStart = " MASTER_PASSWORD = '" -var masterPasswordEnd = "',\n" +const ( + // SqlStartSlave is the SQl command issued to start MySQL replication + SqlStartSlave = "START SLAVE" + + // SqlStopSlave is the SQl command issued to stop MySQL replication + SqlStopSlave = "STOP SLAVE" +) func fillStringTemplate(tmpl string, vars interface{}) (string, error) { myTemplate := template.Must(template.New("").Parse(tmpl)) @@ -69,8 +74,8 @@ func changeMasterArgs(params *sqldb.ConnParams, masterHost string, masterPort in } // parseSlaveStatus parses the common fields of SHOW SLAVE STATUS. -func parseSlaveStatus(fields map[string]string) *proto.ReplicationStatus { - status := &proto.ReplicationStatus{ +func parseSlaveStatus(fields map[string]string) proto.ReplicationStatus { + status := proto.ReplicationStatus{ MasterHost: fields["Master_Host"], SlaveIORunning: fields["Slave_IO_Running"] == "Yes", SlaveSQLRunning: fields["Slave_SQL_Running"] == "Yes", @@ -114,7 +119,7 @@ func (mysqld *Mysqld) WaitForSlaveStart(slaveStartDeadline int) error { // StartSlave starts a slave func (mysqld *Mysqld) StartSlave(hookExtraEnv map[string]string) error { - if err := mysqld.ExecuteSuperQuery("START SLAVE"); err != nil { + if err := mysqld.ExecuteSuperQuery(SqlStartSlave); err != nil { return err } @@ -131,7 +136,7 @@ func (mysqld *Mysqld) StopSlave(hookExtraEnv map[string]string) error { return err } - return mysqld.ExecuteSuperQuery("STOP SLAVE") + return mysqld.ExecuteSuperQuery(SqlStopSlave) } // GetMasterAddr returns master address @@ -202,10 +207,10 @@ func (mysqld *Mysqld) WaitMasterPos(targetPos proto.ReplicationPosition, waitTim } // SlaveStatus returns the slave replication statuses -func (mysqld *Mysqld) SlaveStatus() (*proto.ReplicationStatus, error) { +func (mysqld *Mysqld) SlaveStatus() (proto.ReplicationStatus, error) { flavor, err := mysqld.flavor() if err != nil { - return nil, fmt.Errorf("SlaveStatus needs flavor: %v", err) + return proto.ReplicationStatus{}, fmt.Errorf("SlaveStatus needs flavor: %v", err) } return flavor.SlaveStatus(mysqld) } diff --git a/go/vt/tabletmanager/actionnode/actionnode.go b/go/vt/tabletmanager/actionnode/actionnode.go index 76ef1f4ecf..c00e4ae6ab 100644 --- a/go/vt/tabletmanager/actionnode/actionnode.go +++ b/go/vt/tabletmanager/actionnode/actionnode.go @@ -87,9 +87,9 @@ const ( // case, and update its own topology record. TabletActionSlaveWasRestarted = "SlaveWasRestarted" - // TabletActionStopReplicationAndGetPosition will stop replication, - // and return the current position. - TabletActionStopReplicationAndGetPosition = "StopReplicationAndGetPosition" + // TabletActionStopReplicationAndGetStatus will stop replication, + // and return the current replication status. + TabletActionStopReplicationAndGetStatus = "StopReplicationAndGetStatus" // TabletActionPromoteSlave will make this tablet the master TabletActionPromoteSlave = "PromoteSlave" diff --git a/go/vt/tabletmanager/agent_rpc_actions.go b/go/vt/tabletmanager/agent_rpc_actions.go index e76d0502ef..d0cbaf9d03 100644 --- a/go/vt/tabletmanager/agent_rpc_actions.go +++ b/go/vt/tabletmanager/agent_rpc_actions.go @@ -74,7 +74,7 @@ type RPCAgent interface { // Replication related methods - SlaveStatus(ctx context.Context) (*myproto.ReplicationStatus, error) + SlaveStatus(ctx context.Context) (myproto.ReplicationStatus, error) MasterPosition(ctx context.Context) (myproto.ReplicationPosition, error) @@ -112,11 +112,11 @@ type RPCAgent interface { SlaveWasPromoted(ctx context.Context) error - SetMaster(ctx context.Context, parent topo.TabletAlias, timeCreatedNS int64) error + SetMaster(ctx context.Context, parent topo.TabletAlias, timeCreatedNS int64, forceStartSlave bool) error SlaveWasRestarted(ctx context.Context, swrd *actionnode.SlaveWasRestartedArgs) error - StopReplicationAndGetPosition(ctx context.Context) (myproto.ReplicationPosition, error) + StopReplicationAndGetStatus(ctx context.Context) (myproto.ReplicationStatus, error) PromoteSlave(ctx context.Context) (myproto.ReplicationPosition, error) @@ -321,7 +321,7 @@ func (agent *ActionAgent) ExecuteFetchAsApp(ctx context.Context, query string, m // SlaveStatus returns the replication status // Should be called under RPCWrap. -func (agent *ActionAgent) SlaveStatus(ctx context.Context) (*myproto.ReplicationStatus, error) { +func (agent *ActionAgent) SlaveStatus(ctx context.Context) (myproto.ReplicationStatus, error) { return agent.MysqlDaemon.SlaveStatus() } @@ -556,31 +556,37 @@ func (agent *ActionAgent) SlaveWasPromoted(ctx context.Context) error { // SetMaster sets replication master, and waits for the // reparent_journal table entry up to context timeout -func (agent *ActionAgent) SetMaster(ctx context.Context, parent topo.TabletAlias, timeCreatedNS int64) error { +func (agent *ActionAgent) SetMaster(ctx context.Context, parent topo.TabletAlias, timeCreatedNS int64, forceStartSlave bool) error { ti, err := agent.TopoServer.GetTablet(parent) if err != nil { return err } - // See if we are replicating at all - replicating := false + // See if we were replicating at all, and should be replicating + wasReplicating := false + shouldbeReplicating := false rs, err := agent.MysqlDaemon.SlaveStatus() if err == nil && (rs.SlaveIORunning || rs.SlaveSQLRunning) { - replicating = true + wasReplicating = true + shouldbeReplicating = true + } + if forceStartSlave { + shouldbeReplicating = true } // Create the list of commands to set the master - cmds, err := agent.MysqlDaemon.SetMasterCommands(ti.Hostname, ti.Portmap["mysql"]) + cmds := []string{} + if wasReplicating { + cmds = append(cmds, mysqlctl.SqlStopSlave) + } + smc, err := agent.MysqlDaemon.SetMasterCommands(ti.Hostname, ti.Portmap["mysql"]) if err != nil { return err } - if replicating { - newCmds := []string{"STOP SLAVE"} - newCmds = append(newCmds, cmds...) - newCmds = append(newCmds, "START SLAVE") - cmds = newCmds + cmds = append(cmds, smc...) + if shouldbeReplicating { + cmds = append(cmds, mysqlctl.SqlStartSlave) } - if err := agent.MysqlDaemon.ExecuteSuperQueryList(cmds); err != nil { return err } @@ -600,7 +606,7 @@ func (agent *ActionAgent) SetMaster(ctx context.Context, parent topo.TabletAlias // if needed, wait until we get the replicated row, or our // context times out - if !replicating || timeCreatedNS == 0 { + if !shouldbeReplicating || timeCreatedNS == 0 { return nil } return agent.MysqlDaemon.WaitForReparentJournal(ctx, timeCreatedNS) @@ -633,14 +639,17 @@ func (agent *ActionAgent) SlaveWasRestarted(ctx context.Context, swrd *actionnod return nil } -// StopReplicationAndGetPosition stops MySQL replication, and returns the -// current position -func (agent *ActionAgent) StopReplicationAndGetPosition(ctx context.Context) (myproto.ReplicationPosition, error) { - if err := agent.MysqlDaemon.StopSlave(agent.hookExtraEnv()); err != nil { - return myproto.ReplicationPosition{}, err +// StopReplicationAndGetStatus stops MySQL replication, and returns the +// current status +func (agent *ActionAgent) StopReplicationAndGetStatus(ctx context.Context) (myproto.ReplicationStatus, error) { + rs, err := agent.MysqlDaemon.SlaveStatus() + if err != nil { + return myproto.ReplicationStatus{}, err } - - return agent.MysqlDaemon.MasterPosition() + if err := agent.MysqlDaemon.StopSlave(agent.hookExtraEnv()); err != nil { + return myproto.ReplicationStatus{}, err + } + return rs, nil } // PromoteSlave makes the current tablet the master diff --git a/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go b/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go index 801b3d1118..da5c35e647 100644 --- a/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go +++ b/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go @@ -632,7 +632,7 @@ func agentRPCTestExecuteFetchPanic(ctx context.Context, t *testing.T, client tmc // Replication related methods // -var testReplicationStatus = &myproto.ReplicationStatus{ +var testReplicationStatus = myproto.ReplicationStatus{ Position: myproto.ReplicationPosition{ GTIDSet: myproto.MariadbGTID{ Domain: 1, @@ -648,7 +648,7 @@ var testReplicationStatus = &myproto.ReplicationStatus{ MasterConnectRetry: 12, } -func (fra *fakeRPCAgent) SlaveStatus(ctx context.Context) (*myproto.ReplicationStatus, error) { +func (fra *fakeRPCAgent) SlaveStatus(ctx context.Context) (myproto.ReplicationStatus, error) { if fra.panics { panic(fmt.Errorf("test-triggered panic")) } @@ -1039,24 +1039,26 @@ func agentRPCTestSlaveWasPromotedPanic(ctx context.Context, t *testing.T, client } var testSetMasterCalled = false +var testForceStartSlave = true -func (fra *fakeRPCAgent) SetMaster(ctx context.Context, parent topo.TabletAlias, timeCreatedNS int64) error { +func (fra *fakeRPCAgent) SetMaster(ctx context.Context, parent topo.TabletAlias, timeCreatedNS int64, forceStartSlave bool) error { if fra.panics { panic(fmt.Errorf("test-triggered panic")) } compare(fra.t, "SetMaster parent", parent, testMasterAlias) compare(fra.t, "SetMaster timeCreatedNS", timeCreatedNS, testTimeCreatedNS) + compare(fra.t, "SetMaster forceStartSlave", forceStartSlave, testForceStartSlave) testSetMasterCalled = true return nil } func agentRPCTestSetMaster(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.SetMaster(ctx, ti, testMasterAlias, testTimeCreatedNS) + err := client.SetMaster(ctx, ti, testMasterAlias, testTimeCreatedNS, testForceStartSlave) compareError(t, "SetMaster", err, true, testSetMasterCalled) } func agentRPCTestSetMasterPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.SetMaster(ctx, ti, testMasterAlias, testTimeCreatedNS) + err := client.SetMaster(ctx, ti, testMasterAlias, testTimeCreatedNS, testForceStartSlave) expectRPCWrapLockActionPanic(t, err) } @@ -1087,20 +1089,20 @@ func agentRPCTestSlaveWasRestartedPanic(ctx context.Context, t *testing.T, clien expectRPCWrapLockActionPanic(t, err) } -func (fra *fakeRPCAgent) StopReplicationAndGetPosition(ctx context.Context) (myproto.ReplicationPosition, error) { +func (fra *fakeRPCAgent) StopReplicationAndGetStatus(ctx context.Context) (myproto.ReplicationStatus, error) { if fra.panics { panic(fmt.Errorf("test-triggered panic")) } - return testReplicationPosition, nil + return testReplicationStatus, nil } -func agentRPCTestStopReplicationAndGetPosition(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - rp, err := client.StopReplicationAndGetPosition(ctx, ti) - compareError(t, "StopReplicationAndGetPosition", err, rp, testReplicationPosition) +func agentRPCTestStopReplicationAndGetStatus(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { + rp, err := client.StopReplicationAndGetStatus(ctx, ti) + compareError(t, "StopReplicationAndGetStatus", err, rp, testReplicationStatus) } -func agentRPCTestStopReplicationAndGetPositionPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - _, err := client.StopReplicationAndGetPosition(ctx, ti) +func agentRPCTestStopReplicationAndGetStatusPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { + _, err := client.StopReplicationAndGetStatus(ctx, ti) expectRPCWrapLockActionPanic(t, err) } @@ -1358,7 +1360,7 @@ func Run(t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo, agentRPCTestSlaveWasPromoted(ctx, t, client, ti) agentRPCTestSetMaster(ctx, t, client, ti) agentRPCTestSlaveWasRestarted(ctx, t, client, ti) - agentRPCTestStopReplicationAndGetPosition(ctx, t, client, ti) + agentRPCTestStopReplicationAndGetStatus(ctx, t, client, ti) agentRPCTestPromoteSlave(ctx, t, client, ti) // Backup / restore related methods @@ -1414,7 +1416,7 @@ func Run(t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo, agentRPCTestSlaveWasPromotedPanic(ctx, t, client, ti) agentRPCTestSetMasterPanic(ctx, t, client, ti) agentRPCTestSlaveWasRestartedPanic(ctx, t, client, ti) - agentRPCTestStopReplicationAndGetPositionPanic(ctx, t, client, ti) + agentRPCTestStopReplicationAndGetStatusPanic(ctx, t, client, ti) agentRPCTestPromoteSlavePanic(ctx, t, client, ti) // Backup / restore related methods diff --git a/go/vt/tabletmanager/faketmclient/fake_client.go b/go/vt/tabletmanager/faketmclient/fake_client.go index 448910b09a..877f7db1e5 100644 --- a/go/vt/tabletmanager/faketmclient/fake_client.go +++ b/go/vt/tabletmanager/faketmclient/fake_client.go @@ -158,9 +158,9 @@ func (client *FakeTabletManagerClient) ExecuteFetchAsApp(ctx context.Context, ta // // SlaveStatus is part of the tmclient.TabletManagerClient interface -func (client *FakeTabletManagerClient) SlaveStatus(ctx context.Context, tablet *topo.TabletInfo) (*myproto.ReplicationStatus, error) { +func (client *FakeTabletManagerClient) SlaveStatus(ctx context.Context, tablet *topo.TabletInfo) (myproto.ReplicationStatus, error) { var status myproto.ReplicationStatus - return &status, nil + return status, nil } // MasterPosition is part of the tmclient.TabletManagerClient interface @@ -269,7 +269,7 @@ func (client *FakeTabletManagerClient) SlaveWasPromoted(ctx context.Context, tab } // SetMaster is part of the tmclient.TabletManagerClient interface -func (client *FakeTabletManagerClient) SetMaster(ctx context.Context, tablet *topo.TabletInfo, parent topo.TabletAlias, timeCreatedNS int64) error { +func (client *FakeTabletManagerClient) SetMaster(ctx context.Context, tablet *topo.TabletInfo, parent topo.TabletAlias, timeCreatedNS int64, forceStartSlave bool) error { return nil } @@ -278,9 +278,9 @@ func (client *FakeTabletManagerClient) SlaveWasRestarted(ctx context.Context, ta return nil } -// StopReplicationAndGetPosition is part of the tmclient.TabletManagerClient interface -func (client *FakeTabletManagerClient) StopReplicationAndGetPosition(ctx context.Context, tablet *topo.TabletInfo) (myproto.ReplicationPosition, error) { - var rp myproto.ReplicationPosition +// StopReplicationAndGetStatus is part of the tmclient.TabletManagerClient interface +func (client *FakeTabletManagerClient) StopReplicationAndGetStatus(ctx context.Context, tablet *topo.TabletInfo) (myproto.ReplicationStatus, error) { + var rp myproto.ReplicationStatus return rp, nil } diff --git a/go/vt/tabletmanager/gorpcproto/structs.go b/go/vt/tabletmanager/gorpcproto/structs.go index 8ac033b84d..7699ff2c8e 100644 --- a/go/vt/tabletmanager/gorpcproto/structs.go +++ b/go/vt/tabletmanager/gorpcproto/structs.go @@ -48,9 +48,10 @@ type InitSlaveArgs struct { // SetMasterArgs has arguments for SetMaster type SetMasterArgs struct { - Parent topo.TabletAlias - TimeCreatedNS int64 - WaitTimeout time.Duration // pass in zero to wait indefinitely + Parent topo.TabletAlias + TimeCreatedNS int64 + ForceStartSlave bool + WaitTimeout time.Duration // pass in zero to wait indefinitely } // GetSchemaArgs has arguments for GetSchema diff --git a/go/vt/tabletmanager/gorpctmclient/gorpc_client.go b/go/vt/tabletmanager/gorpctmclient/gorpc_client.go index a4026df022..0ed8fe4e6a 100644 --- a/go/vt/tabletmanager/gorpctmclient/gorpc_client.go +++ b/go/vt/tabletmanager/gorpctmclient/gorpc_client.go @@ -255,12 +255,12 @@ func (client *GoRPCTabletManagerClient) ExecuteFetchAsApp(ctx context.Context, t // // SlaveStatus is part of the tmclient.TabletManagerClient interface -func (client *GoRPCTabletManagerClient) SlaveStatus(ctx context.Context, tablet *topo.TabletInfo) (*myproto.ReplicationStatus, error) { +func (client *GoRPCTabletManagerClient) SlaveStatus(ctx context.Context, tablet *topo.TabletInfo) (myproto.ReplicationStatus, error) { var status myproto.ReplicationStatus if err := client.rpcCallTablet(ctx, tablet, actionnode.TabletActionSlaveStatus, &rpc.Unused{}, &status); err != nil { - return nil, err + return myproto.ReplicationStatus{}, err } - return &status, nil + return status, nil } // MasterPosition is part of the tmclient.TabletManagerClient interface @@ -413,10 +413,11 @@ func (client *GoRPCTabletManagerClient) SlaveWasPromoted(ctx context.Context, ta } // SetMaster is part of the tmclient.TabletManagerClient interface -func (client *GoRPCTabletManagerClient) SetMaster(ctx context.Context, tablet *topo.TabletInfo, parent topo.TabletAlias, timeCreatedNS int64) error { +func (client *GoRPCTabletManagerClient) SetMaster(ctx context.Context, tablet *topo.TabletInfo, parent topo.TabletAlias, timeCreatedNS int64, forceStartSlave bool) error { args := &gorpcproto.SetMasterArgs{ - Parent: parent, - TimeCreatedNS: timeCreatedNS, + Parent: parent, + TimeCreatedNS: timeCreatedNS, + ForceStartSlave: forceStartSlave, } deadline, ok := ctx.Deadline() if ok { @@ -434,10 +435,10 @@ func (client *GoRPCTabletManagerClient) SlaveWasRestarted(ctx context.Context, t return client.rpcCallTablet(ctx, tablet, actionnode.TabletActionSlaveWasRestarted, args, &rpc.Unused{}) } -// StopReplicationAndGetPosition is part of the tmclient.TabletManagerClient interface -func (client *GoRPCTabletManagerClient) StopReplicationAndGetPosition(ctx context.Context, tablet *topo.TabletInfo) (myproto.ReplicationPosition, error) { - var rp myproto.ReplicationPosition - if err := client.rpcCallTablet(ctx, tablet, actionnode.TabletActionStopReplicationAndGetPosition, &rpc.Unused{}, &rp); err != nil { +// StopReplicationAndGetStatus is part of the tmclient.TabletManagerClient interface +func (client *GoRPCTabletManagerClient) StopReplicationAndGetStatus(ctx context.Context, tablet *topo.TabletInfo) (myproto.ReplicationStatus, error) { + var rp myproto.ReplicationStatus + if err := client.rpcCallTablet(ctx, tablet, actionnode.TabletActionStopReplicationAndGetStatus, &rpc.Unused{}, &rp); err != nil { return rp, err } return rp, nil diff --git a/go/vt/tabletmanager/gorpctmserver/gorpc_server.go b/go/vt/tabletmanager/gorpctmserver/gorpc_server.go index aa4efc2473..41ace9195d 100644 --- a/go/vt/tabletmanager/gorpctmserver/gorpc_server.go +++ b/go/vt/tabletmanager/gorpctmserver/gorpc_server.go @@ -234,7 +234,7 @@ func (tm *TabletManager) SlaveStatus(ctx context.Context, args *rpc.Unused, repl return tm.agent.RPCWrap(ctx, actionnode.TabletActionSlaveStatus, args, reply, func() error { status, err := tm.agent.SlaveStatus(ctx) if err == nil { - *reply = *status + *reply = status } return err }) @@ -429,7 +429,7 @@ func (tm *TabletManager) SetMaster(ctx context.Context, args *gorpcproto.SetMast ctx, cancel = context.WithTimeout(ctx, args.WaitTimeout) defer cancel() } - return tm.agent.SetMaster(ctx, args.Parent, args.TimeCreatedNS) + return tm.agent.SetMaster(ctx, args.Parent, args.TimeCreatedNS, args.ForceStartSlave) }) } @@ -441,13 +441,13 @@ func (tm *TabletManager) SlaveWasRestarted(ctx context.Context, args *actionnode }) } -// StopReplicationAndGetPosition wraps RPCAgent.StopReplicationAndGetPosition -func (tm *TabletManager) StopReplicationAndGetPosition(ctx context.Context, args *rpc.Unused, reply *myproto.ReplicationPosition) error { +// StopReplicationAndGetStatus wraps RPCAgent.StopReplicationAndGetStatus +func (tm *TabletManager) StopReplicationAndGetStatus(ctx context.Context, args *rpc.Unused, reply *myproto.ReplicationStatus) error { ctx = callinfo.RPCWrapCallInfo(ctx) - return tm.agent.RPCWrapLockAction(ctx, actionnode.TabletActionStopReplicationAndGetPosition, args, reply, true, func() error { - position, err := tm.agent.StopReplicationAndGetPosition(ctx) + return tm.agent.RPCWrapLockAction(ctx, actionnode.TabletActionStopReplicationAndGetStatus, args, reply, true, func() error { + status, err := tm.agent.StopReplicationAndGetStatus(ctx) if err == nil { - *reply = position + *reply = status } return err }) diff --git a/go/vt/tabletmanager/tmclient/rpc_client_api.go b/go/vt/tabletmanager/tmclient/rpc_client_api.go index 65ffabb287..6325d40433 100644 --- a/go/vt/tabletmanager/tmclient/rpc_client_api.go +++ b/go/vt/tabletmanager/tmclient/rpc_client_api.go @@ -94,7 +94,7 @@ type TabletManagerClient interface { // // SlaveStatus returns the tablet's mysql slave status. - SlaveStatus(ctx context.Context, tablet *topo.TabletInfo) (*myproto.ReplicationStatus, error) + SlaveStatus(ctx context.Context, tablet *topo.TabletInfo) (myproto.ReplicationStatus, error) // MasterPosition returns the tablet's master position MasterPosition(ctx context.Context, tablet *topo.TabletInfo) (myproto.ReplicationPosition, error) @@ -170,15 +170,15 @@ type TabletManagerClient interface { // SetMaster tells a tablet to make itself a slave to the // passed in master tablet alias, and wait for the row in the - // reparent_journal table. - SetMaster(ctx context.Context, tablet *topo.TabletInfo, parent topo.TabletAlias, timeCreatedNS int64) error + // reparent_journal table (if timeCreatedNS is non-zero). + SetMaster(ctx context.Context, tablet *topo.TabletInfo, parent topo.TabletAlias, timeCreatedNS int64, forceStartSlave bool) error // SlaveWasRestarted tells the remote tablet its master has changed SlaveWasRestarted(ctx context.Context, tablet *topo.TabletInfo, args *actionnode.SlaveWasRestartedArgs) error - // StopReplicationAndGetPosition stops replication and returns the + // StopReplicationAndGetStatus stops replication and returns the // current position. - StopReplicationAndGetPosition(ctx context.Context, tablet *topo.TabletInfo) (myproto.ReplicationPosition, error) + StopReplicationAndGetStatus(ctx context.Context, tablet *topo.TabletInfo) (myproto.ReplicationStatus, error) // PromoteSlave makes the tablet the new master PromoteSlave(ctx context.Context, tablet *topo.TabletInfo) (myproto.ReplicationPosition, error) diff --git a/go/vt/worker/split_clone_test.go b/go/vt/worker/split_clone_test.go index 307051cdf3..b632315ddb 100644 --- a/go/vt/worker/split_clone_test.go +++ b/go/vt/worker/split_clone_test.go @@ -306,10 +306,8 @@ func testSplitClone(t *testing.T, strategy string) { }, } sourceRdonly.FakeMysqlDaemon.DbAppConnectionFactory = SourceRdonlyFactory(t) - sourceRdonly.FakeMysqlDaemon.CurrentSlaveStatus = &myproto.ReplicationStatus{ - Position: myproto.ReplicationPosition{ - GTIDSet: myproto.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678}, - }, + sourceRdonly.FakeMysqlDaemon.CurrentMasterPosition = myproto.ReplicationPosition{ + GTIDSet: myproto.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678}, } sourceRdonly.RPCServer.Register(gorpcqueryservice.New(&testQueryService{t: t})) } diff --git a/go/vt/worker/vertical_split_clone_test.go b/go/vt/worker/vertical_split_clone_test.go index de64119022..8c952c387d 100644 --- a/go/vt/worker/vertical_split_clone_test.go +++ b/go/vt/worker/vertical_split_clone_test.go @@ -294,10 +294,8 @@ func testVerticalSplitClone(t *testing.T, strategy string) { }, } sourceRdonly.FakeMysqlDaemon.DbAppConnectionFactory = VerticalSourceRdonlyFactory(t) - sourceRdonly.FakeMysqlDaemon.CurrentSlaveStatus = &myproto.ReplicationStatus{ - Position: myproto.ReplicationPosition{ - GTIDSet: myproto.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678}, - }, + sourceRdonly.FakeMysqlDaemon.CurrentMasterPosition = myproto.ReplicationPosition{ + GTIDSet: myproto.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678}, } sourceRdonly.RPCServer.Register(gorpcqueryservice.New(&verticalSqlQuery{t: t})) } diff --git a/go/vt/wrangler/reparent.go b/go/vt/wrangler/reparent.go index 1137597749..3ead55c410 100644 --- a/go/vt/wrangler/reparent.go +++ b/go/vt/wrangler/reparent.go @@ -99,7 +99,7 @@ func (wr *Wrangler) tabletReplicationStatuses(ctx context.Context, tablets []*to rec.RecordError(fmt.Errorf("SlaveStatus(%v) failed: %v", ti.Alias, err)) return } - result[i] = status + result[i] = &status }(i, ti) } } @@ -142,7 +142,7 @@ func (wr *Wrangler) ReparentTablet(ctx context.Context, tabletAlias topo.TabletA } // and do the remote command - return wr.TabletManagerClient().SetMaster(ctx, ti, shardInfo.MasterAlias, 0) + return wr.TabletManagerClient().SetMaster(ctx, ti, shardInfo.MasterAlias, 0, false) } // InitShardMaster will make the provided tablet the master for the shard. @@ -401,17 +401,12 @@ func (wr *Wrangler) plannedReparentShardLocked(ctx context.Context, ev *events.R go func(alias topo.TabletAlias, tabletInfo *topo.TabletInfo) { defer wgSlaves.Done() wr.logger.Infof("setting new master on slave %v", alias) - if err := wr.TabletManagerClient().SetMaster(ctx, tabletInfo, masterElectTabletAlias, now); err != nil { + // also restart replication on old master + forceStartSlave := alias == oldMasterTabletInfo.Alias + if err := wr.TabletManagerClient().SetMaster(ctx, tabletInfo, masterElectTabletAlias, now, forceStartSlave); err != nil { rec.RecordError(fmt.Errorf("Tablet %v SetMaster failed: %v", alias, err)) return } - - // also restart replication on old master - if alias == oldMasterTabletInfo.Alias { - if err := wr.TabletManagerClient().StartSlave(ctx, tabletInfo); err != nil { - rec.RecordError(fmt.Errorf("old master %v StartSlave failed: %v", alias, err)) - } - } }(alias, tabletInfo) } } @@ -531,7 +526,7 @@ func (wr *Wrangler) emergencyReparentShardLocked(ctx context.Context, ev *events event.DispatchUpdate(ev, "stop replication on all slaves") wg := sync.WaitGroup{} mu := sync.Mutex{} - positionMap := make(map[topo.TabletAlias]myproto.ReplicationPosition) + statusMap := make(map[topo.TabletAlias]myproto.ReplicationStatus) for alias, tabletInfo := range tabletMap { wg.Add(1) go func(alias topo.TabletAlias, tabletInfo *topo.TabletInfo) { @@ -539,29 +534,29 @@ func (wr *Wrangler) emergencyReparentShardLocked(ctx context.Context, ev *events wr.logger.Infof("getting replication position from %v", alias) ctx, cancel := context.WithTimeout(ctx, waitSlaveTimeout) defer cancel() - rp, err := wr.TabletManagerClient().StopReplicationAndGetPosition(ctx, tabletInfo) + rp, err := wr.TabletManagerClient().StopReplicationAndGetStatus(ctx, tabletInfo) if err != nil { - wr.logger.Warningf("failed to get replication position from %v, ignoring tablet", alias) + wr.logger.Warningf("failed to get replication status from %v, ignoring tablet", alias) return } mu.Lock() - positionMap[alias] = rp + statusMap[alias] = rp mu.Unlock() }(alias, tabletInfo) } wg.Wait() // Verify masterElect is alive and has the most advanced position - masterElectPosition, ok := positionMap[masterElectTabletAlias] + masterElectStatus, ok := statusMap[masterElectTabletAlias] if !ok { return fmt.Errorf("couldn't get master elect %v replication position", masterElectTabletAlias) } - for alias, pos := range positionMap { + for alias, status := range statusMap { if alias == masterElectTabletAlias { continue } - if !masterElectPosition.AtLeast(pos) { - return fmt.Errorf("tablet %v is more advanced than master elect tablet %v: %v > %v", alias, masterElectTabletAlias, pos, masterElectPosition) + if !masterElectStatus.Position.AtLeast(status.Position) { + return fmt.Errorf("tablet %v is more advanced than master elect tablet %v: %v > %v", alias, masterElectTabletAlias, status.Position, masterElectStatus) } } @@ -597,7 +592,11 @@ func (wr *Wrangler) emergencyReparentShardLocked(ctx context.Context, ev *events go func(alias topo.TabletAlias, tabletInfo *topo.TabletInfo) { defer wgSlaves.Done() wr.logger.Infof("setting new master on slave %v", alias) - if err := wr.TabletManagerClient().SetMaster(ctx, tabletInfo, masterElectTabletAlias, now); err != nil { + forceStartSlave := false + if status, ok := statusMap[alias]; ok { + forceStartSlave = status.SlaveIORunning || status.SlaveSQLRunning + } + if err := wr.TabletManagerClient().SetMaster(ctx, tabletInfo, masterElectTabletAlias, now, forceStartSlave); err != nil { rec.RecordError(fmt.Errorf("Tablet %v SetMaster failed: %v", alias, err)) } }(alias, tabletInfo) diff --git a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go index 03719c16e8..ba3ad09970 100644 --- a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go @@ -54,6 +54,7 @@ func TestEmergencyReparentShard(t *testing.T) { // good slave 1 is replicating goodSlave1.FakeMysqlDaemon.ReadOnly = true + goodSlave1.FakeMysqlDaemon.Replicating = true goodSlave1.FakeMysqlDaemon.CurrentMasterPosition = myproto.ReplicationPosition{ GTIDSet: myproto.MariadbGTID{ Domain: 2, @@ -61,14 +62,9 @@ func TestEmergencyReparentShard(t *testing.T) { Sequence: 455, }, } - goodSlave1.FakeMysqlDaemon.CurrentSlaveStatus = &myproto.ReplicationStatus{ - SlaveIORunning: true, - SlaveSQLRunning: true, - } goodSlave1.FakeMysqlDaemon.SetMasterCommandsInput = fmt.Sprintf("%v:%v", newMaster.Tablet.Hostname, newMaster.Tablet.Portmap["mysql"]) goodSlave1.FakeMysqlDaemon.SetMasterCommandsResult = []string{"set master cmd 1"} goodSlave1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "STOP SLAVE", "set master cmd 1", "START SLAVE", } @@ -77,6 +73,7 @@ func TestEmergencyReparentShard(t *testing.T) { // good slave 2 is not replicating goodSlave2.FakeMysqlDaemon.ReadOnly = true + goodSlave2.FakeMysqlDaemon.Replicating = false goodSlave2.FakeMysqlDaemon.CurrentMasterPosition = myproto.ReplicationPosition{ GTIDSet: myproto.MariadbGTID{ Domain: 2, @@ -120,7 +117,12 @@ func TestEmergencyReparentShard(t *testing.T) { if !goodSlave2.FakeMysqlDaemon.ReadOnly { t.Errorf("goodSlave2.FakeMysqlDaemon.ReadOnly not set") } - + if !goodSlave1.FakeMysqlDaemon.Replicating { + t.Errorf("goodSlave1.FakeMysqlDaemon.Replicating not set") + } + if goodSlave2.FakeMysqlDaemon.Replicating { + t.Errorf("goodSlave2.FakeMysqlDaemon.Replicating set") + } } // TestEmergencyReparentShardMasterElectNotBest tries to emergency reparent diff --git a/go/vt/wrangler/testlib/planned_reparent_shard_test.go b/go/vt/wrangler/testlib/planned_reparent_shard_test.go index c4d1b87dbf..8ec6d505d2 100644 --- a/go/vt/wrangler/testlib/planned_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/planned_reparent_shard_test.go @@ -64,6 +64,7 @@ func TestPlannedReparentShard(t *testing.T) { oldMaster.FakeMysqlDaemon.SetMasterCommandsResult = []string{"set master cmd 1"} oldMaster.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "set master cmd 1", + "START SLAVE", } oldMaster.StartActionLoop(t, wr) defer oldMaster.StopActionLoop(t) @@ -72,10 +73,6 @@ func TestPlannedReparentShard(t *testing.T) { // good slave 1 is replicating goodSlave1.FakeMysqlDaemon.ReadOnly = true goodSlave1.FakeMysqlDaemon.Replicating = true - goodSlave1.FakeMysqlDaemon.CurrentSlaveStatus = &myproto.ReplicationStatus{ - SlaveIORunning: true, - SlaveSQLRunning: true, - } goodSlave1.FakeMysqlDaemon.SetMasterCommandsInput = fmt.Sprintf("%v:%v", newMaster.Tablet.Hostname, newMaster.Tablet.Portmap["mysql"]) goodSlave1.FakeMysqlDaemon.SetMasterCommandsResult = []string{"set master cmd 1"} goodSlave1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ @@ -136,6 +133,9 @@ func TestPlannedReparentShard(t *testing.T) { if !oldMaster.FakeMysqlDaemon.Replicating { t.Errorf("oldMaster.FakeMysqlDaemon.Replicating not set") } + if !goodSlave1.FakeMysqlDaemon.Replicating { + t.Errorf("goodSlave1.FakeMysqlDaemon.Replicating not set") + } if goodSlave2.FakeMysqlDaemon.Replicating { t.Errorf("goodSlave2.FakeMysqlDaemon.Replicating set") } diff --git a/go/vt/wrangler/testlib/reparent_utils_test.go b/go/vt/wrangler/testlib/reparent_utils_test.go index 0612e21c89..55faffa1db 100644 --- a/go/vt/wrangler/testlib/reparent_utils_test.go +++ b/go/vt/wrangler/testlib/reparent_utils_test.go @@ -52,18 +52,15 @@ func TestShardReplicationStatuses(t *testing.T) { defer master.StopActionLoop(t) // slave loop - slave.FakeMysqlDaemon.CurrentSlaveStatus = &myproto.ReplicationStatus{ - Position: myproto.ReplicationPosition{ - GTIDSet: myproto.MariadbGTID{ - Domain: 5, - Server: 456, - Sequence: 890, - }, + slave.FakeMysqlDaemon.CurrentMasterPosition = myproto.ReplicationPosition{ + GTIDSet: myproto.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 890, }, - MasterHost: master.Tablet.Hostname, - MasterPort: master.Tablet.Portmap["mysql"], - MasterConnectRetry: 10, } + slave.FakeMysqlDaemon.CurrentMasterHost = master.Tablet.Hostname + slave.FakeMysqlDaemon.CurrentMasterPort = master.Tablet.Portmap["mysql"] slave.StartActionLoop(t, wr) defer slave.StopActionLoop(t) @@ -75,7 +72,7 @@ func TestShardReplicationStatuses(t *testing.T) { // check result (make master first in the array) if len(ti) != 2 || len(rs) != 2 { - t.Fatalf("ShardReplicationStatuses returend wrong results: %v %v", ti, rs) + t.Fatalf("ShardReplicationStatuses returned wrong results: %v %v", ti, rs) } if ti[0].Alias == slave.Tablet.Alias { ti[0], ti[1] = ti[1], ti[0] diff --git a/test/reparent.py b/test/reparent.py index fabe7f2cda..fe9fabd64a 100755 --- a/test/reparent.py +++ b/test/reparent.py @@ -75,6 +75,30 @@ class TestReparent(unittest.TestCase): t.clean_dbs() super(TestReparent, self).tearDown() + _create_vt_insert_test = '''create table vt_insert_test ( + id bigint, + msg varchar(64), + primary key (id) + ) Engine=InnoDB''' + + def _populate_vt_insert_test(self, master_tablet, index): + q = "insert into vt_insert_test(id, msg) values (%d, 'test %d')" % \ + (index, index) + master_tablet.mquery('vt_test_keyspace', q, write=True) + + def _check_vt_insert_test(self, tablet, index): + # wait until it gets the data + timeout = 10.0 + while True: + result = tablet.mquery('vt_test_keyspace', + 'select msg from vt_insert_test where id=%d' % + index) + if len(result) == 1: + break + timeout = utils.wait_step('waiting for replication to catch up on %s' % + tablet.tablet_alias, + timeout, sleep_time=0.1) + def _check_db_addr(self, shard, db_type, expected_port, cell='test_nj'): ep = utils.run_vtctl_json(['GetEndPoints', cell, 'test_keyspace/' + shard, db_type]) @@ -140,6 +164,7 @@ class TestReparent(unittest.TestCase): utils.run_vtctl(['InitShardMaster', 'test_keyspace/0', tablet_62344.tablet_alias], auto_log=True) utils.validate_topology() + tablet_62344.mquery('vt_test_keyspace', self._create_vt_insert_test) # Make the current master agent and database unavailable. tablet_62344.kill_vttablet() @@ -178,6 +203,11 @@ class TestReparent(unittest.TestCase): utils.validate_topology() self._check_db_addr('0', 'master', tablet_62044.port) + # insert data into the new master, check the connected slaves work + self._populate_vt_insert_test(tablet_62044, 2) + self._check_vt_insert_test(tablet_41983, 2) + self._check_vt_insert_test(tablet_31981, 2) + utils.run_vtctl(['ChangeSlaveType', '-force', tablet_62344.tablet_alias, 'idle']) @@ -311,6 +341,7 @@ class TestReparent(unittest.TestCase): utils.run_vtctl(['InitShardMaster', 'test_keyspace/' + shard_id, tablet_62344.tablet_alias]) utils.validate_topology(ping_tablets=True) + tablet_62344.mquery('vt_test_keyspace', self._create_vt_insert_test) self._check_db_addr(shard_id, 'master', tablet_62344.port) @@ -341,6 +372,11 @@ class TestReparent(unittest.TestCase): self._check_db_addr(shard_id, 'master', tablet_62044.port) + # insert data into the new master, check the connected slaves work + self._populate_vt_insert_test(tablet_62044, 1) + self._check_vt_insert_test(tablet_41983, 1) + self._check_vt_insert_test(tablet_62344, 1) + # Verify MasterCell is set to new cell. srvShard = utils.run_vtctl_json(['GetSrvShard', 'test_nj', 'test_keyspace/%s' % (shard_id)]) @@ -540,16 +576,6 @@ class TestReparent(unittest.TestCase): tablet_62044_master_status = tablet_62044.get_status() self.assertIn('Serving graph: test_keyspace 0 master', tablet_62044_master_status) - _create_vt_insert_test = '''create table vt_insert_test ( - id bigint auto_increment, - msg varchar(64), - primary key (id) - ) Engine=InnoDB''' - - _populate_vt_insert_test = [ - "insert into vt_insert_test (msg) values ('test %s')" % x - for x in xrange(4)] - # See if a missing slave can be safely reparented after the fact. def test_reparent_with_down_slave(self, shard_id='0'): utils.run_vtctl(['CreateKeyspace', 'test_keyspace']) @@ -588,7 +614,6 @@ class TestReparent(unittest.TestCase): utils.run_vtctl(['InitShardMaster', 'test_keyspace/' + shard_id, tablet_62344.tablet_alias]) utils.validate_topology(ping_tablets=True) - tablet_62344.mquery('vt_test_keyspace', self._create_vt_insert_test) utils.wait_procs([tablet_41983.shutdown_mysql()]) @@ -603,9 +628,10 @@ class TestReparent(unittest.TestCase): "didn't find the right error strings in failed PlannedReparentShard: " + stderr) - # insert data into the new master - for q in self._populate_vt_insert_test: - tablet_62044.mquery('vt_test_keyspace', q, write=True) + # insert data into the new master, check the connected slaves work + self._populate_vt_insert_test(tablet_62044, 3) + self._check_vt_insert_test(tablet_31981, 3) + self._check_vt_insert_test(tablet_62344, 3) # restart mysql on the old slave, should still be connecting to the # old master @@ -619,14 +645,7 @@ class TestReparent(unittest.TestCase): utils.run_vtctl(['StartSlave', tablet_41983.tablet_alias]) # wait until it gets the data - timeout = 10.0 - while True: - result = tablet_41983.mquery('vt_test_keyspace', - 'select msg from vt_insert_test where id=1') - if len(result) == 1: - break - timeout = utils.wait_step('waiting for replication to catch up', - timeout, sleep_time=0.1) + self._check_vt_insert_test(tablet_41983, 3) tablet.kill_tablets([tablet_62344, tablet_62044, tablet_41983, tablet_31981]) From 18099a706b3a7cc35167a925d01351f5df3063b0 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Wed, 13 May 2015 10:40:26 -0700 Subject: [PATCH 015/128] add vtctl ReloadSchema back --- go/vt/vtctl/vtctl.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index ad3877bd63..4762393747 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -237,6 +237,9 @@ var commands = []commandGroup{ command{"GetSchema", commandGetSchema, "[-tables=,,...] [-exclude_tables=,,...] [-include-views] ", "Display the full schema for a tablet, or just the schema for the provided tables."}, + command{"ReloadSchema", commandReloadSchema, + "", + "Asks a remote tablet to reload its schema."}, command{"ApplySchema", commandApplySchema, "[-force] {-sql= || -sql-file=} ", "Apply the schema change to the specified keyspace."}, @@ -1752,6 +1755,20 @@ func commandGetSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag return err } +func commandReloadSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { + if err := subFlags.Parse(args); err != nil { + return err + } + if subFlags.NArg() != 1 { + return fmt.Errorf("action ReloadSchema requires ") + } + tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) + if err != nil { + return err + } + return wr.ReloadSchema(ctx, tabletAlias) +} + func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { force := subFlags.Bool("force", false, "will apply the schema even if preflight schema doesn't match") sql := subFlags.String("sql", "", "a list of sql commands separated by semicolon") From 6869f2e1354ff665fae6b57d4c8f195321592150 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Wed, 13 May 2015 10:46:53 -0700 Subject: [PATCH 016/128] not return returncode from utils.run method --- test/schema.py | 26 ++++++++++++-------------- test/utils.py | 2 +- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/test/schema.py b/test/schema.py index 5dbb4d433d..2ae47be764 100755 --- a/test/schema.py +++ b/test/schema.py @@ -141,24 +141,22 @@ class TestSchema(unittest.TestCase): tables = tablet.mquery(db_name, 'show tables') def _apply_schema(self, keyspace, sql): - out, err, returncode = utils.run_vtctl(['ApplySchema', - '-sql='+sql, - keyspace], - trap_output=True, - log_level='INFO', - raise_on_error=False) - self.assertEqual(0, returncode) + out, err = utils.run_vtctl(['ApplySchema', + '-sql='+sql, + keyspace], + trap_output=True, + log_level='INFO', + raise_on_error=True) return out def _get_schema(self, tablet_alias, tables): - out, err, returncode = utils.run_vtctl(['GetSchema', - '-tables='+tables, - tablet_alias], - trap_output=True, - log_level='INFO', - raise_on_error=False) - self.assertEqual(0, returncode) + out, err = utils.run_vtctl(['GetSchema', + '-tables='+tables, + tablet_alias], + trap_output=True, + log_level='INFO', + raise_on_error=True) return out def _create_test_table_sql(self, table): diff --git a/test/utils.py b/test/utils.py index 995824134e..274b7a062b 100644 --- a/test/utils.py +++ b/test/utils.py @@ -204,7 +204,7 @@ def run(cmd, trap_output=False, raise_on_error=True, **kargs): raise TestError('cmd fail:', args, stdout, stderr) else: logging.debug('cmd fail: %s %s %s', str(args), stdout, stderr) - return stdout, stderr, proc.returncode + return stdout, stderr # run sub-process, expects failure def run_fail(cmd, **kargs): From de3f07fccb120ae452c069ef1968ee437ce417d6 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Wed, 13 May 2015 11:14:06 -0700 Subject: [PATCH 017/128] remove simple flag in vtctl.ApplySchema --- go/vt/vtctl/vtctl.go | 3 +-- test/binlog.py | 3 +-- test/initial_sharding.py | 9 +++------ test/java_vtgate_test_helper.py | 2 +- test/keyspace_util.py | 2 +- test/primecache.py | 3 +-- test/resharding.py | 15 +++++---------- test/sharded.py | 20 ++++---------------- test/vertical_split.py | 6 ++---- 9 files changed, 19 insertions(+), 44 deletions(-) diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index 4762393747..3ddf983fcb 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -1773,7 +1773,6 @@ func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl force := subFlags.Bool("force", false, "will apply the schema even if preflight schema doesn't match") sql := subFlags.String("sql", "", "a list of sql commands separated by semicolon") sqlFile := subFlags.String("sql-file", "", "file containing the sql commands") - simple := subFlags.Bool("simple", false, "just apply change on master and let replication do the rest") waitSlaveTimeout := subFlags.Duration("wait_slave_timeout", 30*time.Second, "time to wait for slaves to catch up in reparenting") if err := subFlags.Parse(args); err != nil { return err @@ -1787,7 +1786,7 @@ func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl if err != nil { return err } - scr, err := wr.ApplySchemaKeyspace(ctx, keyspace, change, *simple, *force, *waitSlaveTimeout) + scr, err := wr.ApplySchemaKeyspace(ctx, keyspace, change, true, *force, *waitSlaveTimeout) if err == nil { log.Infof(scr.String()) } diff --git a/test/binlog.py b/test/binlog.py index ddd9984fb7..dfd175d53b 100755 --- a/test/binlog.py +++ b/test/binlog.py @@ -79,8 +79,7 @@ def setUpModule(): index by_msg (msg) ) Engine=InnoDB''' - utils.run_vtctl(['ApplySchemaKeyspace', - '-simple', + utils.run_vtctl(['ApplySchema', '-sql=' + create_table, 'test_keyspace'], auto_log=True) diff --git a/test/initial_sharding.py b/test/initial_sharding.py index e7905269d5..684bbfcb1f 100755 --- a/test/initial_sharding.py +++ b/test/initial_sharding.py @@ -110,8 +110,7 @@ primary key (id), index by_msg (msg) ) Engine=InnoDB''' - utils.run_vtctl(['ApplySchemaKeyspace', - '-simple', + utils.run_vtctl(['ApplySchema', '-sql=' + create_table_template % ("resharding1"), 'test_keyspace'], auto_log=True) @@ -122,8 +121,7 @@ index by_msg (msg) else: t = 'bigint(20) unsigned' sql = 'alter table %s add keyspace_id ' + t - utils.run_vtctl(['ApplySchemaKeyspace', - '-simple', + utils.run_vtctl(['ApplySchema', '-sql=' + sql % ("resharding1"), 'test_keyspace'], auto_log=True) @@ -134,8 +132,7 @@ index by_msg (msg) else: t = 'bigint(20) unsigned' sql = 'alter table %s modify keyspace_id ' + t + ' not null' - utils.run_vtctl(['ApplySchemaKeyspace', - '-simple', + utils.run_vtctl(['ApplySchema', '-sql=' + sql % ("resharding1"), 'test_keyspace'], auto_log=True) diff --git a/test/java_vtgate_test_helper.py b/test/java_vtgate_test_helper.py index 511e70e68d..72da9cb4c5 100644 --- a/test/java_vtgate_test_helper.py +++ b/test/java_vtgate_test_helper.py @@ -81,7 +81,7 @@ class TestEnv(object): utils.run_vtctl(['InitShardMaster', self.keyspace+'/'+t.shard, t.tablet_alias], auto_log=True) utils.run_vtctl(['RebuildKeyspaceGraph', self.keyspace], auto_log=True) if self.schema: - utils.run_vtctl(['ApplySchemaKeyspace', '-simple', '-sql', self.schema, self.keyspace]) + utils.run_vtctl(['ApplySchema', '-sql', self.schema, self.keyspace]) if self.vschema: if self.vschema[0] == '{': utils.run_vtctl(['ApplyVSchema', "-vschema", self.vschema]) diff --git a/test/keyspace_util.py b/test/keyspace_util.py index 09ffec9e7b..cf0f07d9e1 100644 --- a/test/keyspace_util.py +++ b/test/keyspace_util.py @@ -58,7 +58,7 @@ class TestEnv(object): fname = os.path.join(environment.tmproot, "ddl.sql") with open(fname, "w") as f: f.write(ddl) - utils.run_vtctl(['ApplySchemaKeyspace', '-simple', '-sql-file', fname, keyspace]) + utils.run_vtctl(['ApplySchema', '-sql-file', fname, keyspace]) def teardown(self): all_tablets = self.tablet_map.values() diff --git a/test/primecache.py b/test/primecache.py index 43368fcd10..1b4bff3472 100755 --- a/test/primecache.py +++ b/test/primecache.py @@ -70,8 +70,7 @@ ts datetime, msg varchar(4096), primary key (id) ) Engine=InnoDB''' - utils.run_vtctl(['ApplySchemaKeyspace', - '-simple', + utils.run_vtctl(['ApplySchema', '-sql=' + create_table_template, 'test_keyspace'], auto_log=True) diff --git a/test/resharding.py b/test/resharding.py index fd60ab3a78..45a3f75ca5 100755 --- a/test/resharding.py +++ b/test/resharding.py @@ -211,28 +211,23 @@ name varchar(64), primary key (name) ) Engine=InnoDB''' - utils.run_vtctl(['ApplySchemaKeyspace', - '-simple', + utils.run_vtctl(['ApplySchema', '-sql=' + create_table_template % ("resharding1"), 'test_keyspace'], auto_log=True) - utils.run_vtctl(['ApplySchemaKeyspace', - '-simple', + utils.run_vtctl(['ApplySchema', '-sql=' + create_table_template % ("resharding2"), 'test_keyspace'], auto_log=True) - utils.run_vtctl(['ApplySchemaKeyspace', - '-simple', + utils.run_vtctl(['ApplySchema', '-sql=' + create_view_template % ("view1", "resharding1"), 'test_keyspace'], auto_log=True) - utils.run_vtctl(['ApplySchemaKeyspace', - '-simple', + utils.run_vtctl(['ApplySchema', '-sql=' + create_timestamp_table, 'test_keyspace'], auto_log=True) - utils.run_vtctl(['ApplySchemaKeyspace', - '-simple', + utils.run_vtctl(['ApplySchema', '-sql=' + create_unrelated_table, 'test_keyspace'], auto_log=True) diff --git a/test/sharded.py b/test/sharded.py index 3c5888e574..362466e066 100755 --- a/test/sharded.py +++ b/test/sharded.py @@ -107,16 +107,10 @@ class TestSharded(unittest.TestCase): t.wait_for_vttablet_state('SERVING') # apply the schema on the first shard through vtctl, so all tablets - # are the same (replication is not enabled yet, so allow_replication=false - # is just there to be tested) + # are the same. utils.run_vtctl(['ApplySchema', - '-stop-replication', '-sql=' + create_vt_select_test.replace("\n", ""), - shard_0_master.tablet_alias]) - utils.run_vtctl(['ApplySchema', - '-stop-replication', - '-sql=' + create_vt_select_test.replace("\n", ""), - shard_0_replica.tablet_alias]) + 'test_keyspace']) # start vtgate, we'll use it later vtgate_server, vtgate_port = utils.vtgate_start() @@ -128,12 +122,6 @@ class TestSharded(unittest.TestCase): utils.run_vtctl(['InitShardMaster', 'test_keyspace/80-', shard_1_master.tablet_alias], auto_log=True) - # apply the schema on the second shard using a simple schema upgrade - utils.run_vtctl(['ApplySchemaShard', - '-simple', - '-sql=' + create_vt_select_test_reverse.replace("\n", ""), - 'test_keyspace/80-']) - # insert some values directly (db is RO after minority reparent) # FIXME(alainjobart) these values don't match the shard map utils.run_vtctl(['SetReadWrite', shard_0_master.tablet_alias]) @@ -193,9 +181,9 @@ class TestSharded(unittest.TestCase): utils.run_vtctl(['ValidateSchemaShard', 'test_keyspace/-80']) utils.run_vtctl(['ValidateSchemaShard', 'test_keyspace/80-']) out, err = utils.run_vtctl(['ValidateSchemaKeyspace', 'test_keyspace'], - trap_output=True, raise_on_error=False) + trap_output=True, raise_on_error=False) if 'test_nj-0000062344 and test_nj-0000062346 disagree on schema for table vt_select_test:\nCREATE TABLE' not in err or \ - 'test_nj-0000062344 and test_nj-0000062347 disagree on schema for table vt_select_test:\nCREATE TABLE' not in err: + 'test_nj-0000062344 and test_nj-0000062347 disagree on schema for table vt_select_test:\nCREATE TABLE' not in err: self.fail('wrong ValidateSchemaKeyspace output: ' + err) # validate versions diff --git a/test/vertical_split.py b/test/vertical_split.py index 227d3a7c62..36266382ea 100755 --- a/test/vertical_split.py +++ b/test/vertical_split.py @@ -113,13 +113,11 @@ index by_msg (msg) create_view_template = '''create view %s(id, msg) as select id, msg from %s''' for t in ['moving1', 'moving2', 'staying1', 'staying2']: - utils.run_vtctl(['ApplySchemaKeyspace', - '-simple', + utils.run_vtctl(['ApplySchema', '-sql=' + create_table_template % (t), 'source_keyspace'], auto_log=True) - utils.run_vtctl(['ApplySchemaKeyspace', - '-simple', + utils.run_vtctl(['ApplySchema', '-sql=' + create_view_template % ('view1', 'moving1'), 'source_keyspace'], auto_log=True) From 2dfa25ea4c1a737cb574ede82d86527d8b666a68 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 13 May 2015 11:24:28 -0700 Subject: [PATCH 018/128] Refactoring {Start,Stop}Slave in mysqlctl. With new way of doing things, we keep replication flags in mocks in sync better, and unit tests test more. --- go/vt/mysqlctl/clone.go | 4 +-- go/vt/mysqlctl/mysql_daemon.go | 14 --------- go/vt/mysqlctl/reparent.go | 10 +++--- go/vt/mysqlctl/replication.go | 12 +++---- go/vt/tabletmanager/agent_rpc_actions.go | 31 +++++++++++++------ go/vt/tabletmanager/rpc_server.go | 6 ++-- go/vt/worker/split_clone_test.go | 4 +++ go/vt/worker/vertical_split_clone_test.go | 4 +++ go/vt/wrangler/reparent.go | 2 +- .../testlib/emergency_reparent_shard_test.go | 11 +++++++ 10 files changed, 57 insertions(+), 41 deletions(-) diff --git a/go/vt/mysqlctl/clone.go b/go/vt/mysqlctl/clone.go index 339c39ff0f..4aff118eba 100644 --- a/go/vt/mysqlctl/clone.go +++ b/go/vt/mysqlctl/clone.go @@ -273,7 +273,7 @@ func (mysqld *Mysqld) CreateSnapshot(logger logutil.Logger, dbName, sourceAddr s } masterAddr = mysqld.IPAddr() } else { - if err = mysqld.StopSlave(hookExtraEnv); err != nil { + if err = StopSlave(mysqld, hookExtraEnv); err != nil { return } var slaveStatus proto.ReplicationStatus @@ -354,7 +354,7 @@ func (mysqld *Mysqld) SnapshotSourceEnd(slaveStartRequired, readOnly, deleteSnap // Restore original mysqld state that we saved above. if slaveStartRequired { - if err := mysqld.StartSlave(hookExtraEnv); err != nil { + if err := StartSlave(mysqld, hookExtraEnv); err != nil { return err } diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index 885b876b26..e365b69e0c 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -27,8 +27,6 @@ type MysqlDaemon interface { GetMysqlPort() (int, error) // replication related methods - StartSlave(hookExtraEnv map[string]string) error - StopSlave(hookExtraEnv map[string]string) error SlaveStatus() (proto.ReplicationStatus, error) // reparenting related methods @@ -166,18 +164,6 @@ func (fmd *FakeMysqlDaemon) GetMysqlPort() (int, error) { return fmd.MysqlPort, nil } -// StartSlave is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) StartSlave(hookExtraEnv map[string]string) error { - fmd.Replicating = true - return nil -} - -// StopSlave is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) StopSlave(hookExtraEnv map[string]string) error { - fmd.Replicating = false - return nil -} - // SlaveStatus is part of the MysqlDaemon interface func (fmd *FakeMysqlDaemon) SlaveStatus() (proto.ReplicationStatus, error) { return proto.ReplicationStatus{ diff --git a/go/vt/mysqlctl/reparent.go b/go/vt/mysqlctl/reparent.go index 53cea0d5f7..4735f6563d 100644 --- a/go/vt/mysqlctl/reparent.go +++ b/go/vt/mysqlctl/reparent.go @@ -82,12 +82,10 @@ func (mysqld *Mysqld) DemoteMaster() (rp proto.ReplicationPosition, err error) { return mysqld.MasterPosition() } -// PromoteSlave will promote a slave to be the new master +// PromoteSlave will promote a slave to be the new master. func (mysqld *Mysqld) PromoteSlave(hookExtraEnv map[string]string) (proto.ReplicationPosition, error) { - // stop replication for good - if err := mysqld.StopSlave(hookExtraEnv); err != nil { - return proto.ReplicationPosition{}, err - } + // we handle replication, just stop it + cmds := []string{SqlStopSlave} // Promote to master. flavor, err := mysqld.flavor() @@ -95,7 +93,7 @@ func (mysqld *Mysqld) PromoteSlave(hookExtraEnv map[string]string) (proto.Replic err = fmt.Errorf("PromoteSlave needs flavor: %v", err) return proto.ReplicationPosition{}, err } - cmds := flavor.PromoteSlaveCommands() + cmds = append(cmds, flavor.PromoteSlaveCommands()...) if err := mysqld.ExecuteSuperQueryList(cmds); err != nil { return proto.ReplicationPosition{}, err } diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index 6270e3ae35..9a25a4e0bd 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -117,9 +117,9 @@ func (mysqld *Mysqld) WaitForSlaveStart(slaveStartDeadline int) error { return nil } -// StartSlave starts a slave -func (mysqld *Mysqld) StartSlave(hookExtraEnv map[string]string) error { - if err := mysqld.ExecuteSuperQuery(SqlStartSlave); err != nil { +// StartSlave starts a slave on the provided MysqldDaemon +func StartSlave(md MysqlDaemon, hookExtraEnv map[string]string) error { + if err := md.ExecuteSuperQueryList([]string{SqlStartSlave}); err != nil { return err } @@ -128,15 +128,15 @@ func (mysqld *Mysqld) StartSlave(hookExtraEnv map[string]string) error { return h.ExecuteOptional() } -// StopSlave stops a slave -func (mysqld *Mysqld) StopSlave(hookExtraEnv map[string]string) error { +// StopSlave stops a slave on the provided MysqldDaemon +func StopSlave(md MysqlDaemon, hookExtraEnv map[string]string) error { h := hook.NewSimpleHook("preflight_stop_slave") h.ExtraEnv = hookExtraEnv if err := h.ExecuteOptional(); err != nil { return err } - return mysqld.ExecuteSuperQuery(SqlStopSlave) + return md.ExecuteSuperQueryList([]string{SqlStopSlave}) } // GetMasterAddr returns master address diff --git a/go/vt/tabletmanager/agent_rpc_actions.go b/go/vt/tabletmanager/agent_rpc_actions.go index d0cbaf9d03..eb5ea882a1 100644 --- a/go/vt/tabletmanager/agent_rpc_actions.go +++ b/go/vt/tabletmanager/agent_rpc_actions.go @@ -331,28 +331,31 @@ func (agent *ActionAgent) MasterPosition(ctx context.Context) (myproto.Replicati return agent.MysqlDaemon.MasterPosition() } -// StopSlave will stop the replication +// StopSlave will stop the replication Works both when Vitess manages +// replication or not (using hook if not). // Should be called under RPCWrapLock. func (agent *ActionAgent) StopSlave(ctx context.Context) error { - return agent.MysqlDaemon.StopSlave(agent.hookExtraEnv()) + return mysqlctl.StopSlave(agent.MysqlDaemon, agent.hookExtraEnv()) } // StopSlaveMinimum will stop the slave after it reaches at least the -// provided position. +// provided position. Works both when Vitess manages +// replication or not (using hook if not). func (agent *ActionAgent) StopSlaveMinimum(ctx context.Context, position myproto.ReplicationPosition, waitTime time.Duration) (myproto.ReplicationPosition, error) { if err := agent.Mysqld.WaitMasterPos(position, waitTime); err != nil { return myproto.ReplicationPosition{}, err } - if err := agent.Mysqld.StopSlave(agent.hookExtraEnv()); err != nil { + if err := mysqlctl.StopSlave(agent.MysqlDaemon, agent.hookExtraEnv()); err != nil { return myproto.ReplicationPosition{}, err } return agent.Mysqld.MasterPosition() } -// StartSlave will start the replication +// StartSlave will start the replication. Works both when Vitess manages +// replication or not (using hook if not). // Should be called under RPCWrapLock. func (agent *ActionAgent) StartSlave(ctx context.Context) error { - return agent.MysqlDaemon.StartSlave(agent.hookExtraEnv()) + return mysqlctl.StartSlave(agent.MysqlDaemon, agent.hookExtraEnv()) } // GetSlaves returns the address of all the slaves @@ -642,12 +645,22 @@ func (agent *ActionAgent) SlaveWasRestarted(ctx context.Context, swrd *actionnod // StopReplicationAndGetStatus stops MySQL replication, and returns the // current status func (agent *ActionAgent) StopReplicationAndGetStatus(ctx context.Context) (myproto.ReplicationStatus, error) { + // get the status before we stop replication rs, err := agent.MysqlDaemon.SlaveStatus() if err != nil { - return myproto.ReplicationStatus{}, err + return myproto.ReplicationStatus{}, fmt.Errorf("before status failed: %v", err) } - if err := agent.MysqlDaemon.StopSlave(agent.hookExtraEnv()); err != nil { - return myproto.ReplicationStatus{}, err + if !rs.SlaveIORunning && !rs.SlaveSQLRunning { + // no replication is running, just return what we got + return rs, nil + } + if err := mysqlctl.StopSlave(agent.MysqlDaemon, agent.hookExtraEnv()); err != nil { + return myproto.ReplicationStatus{}, fmt.Errorf("stop slave failed: %v", err) + } + // now patch in the current position + rs.Position, err = agent.MysqlDaemon.MasterPosition() + if err != nil { + return myproto.ReplicationStatus{}, fmt.Errorf("after position failed: %v", err) } return rs, nil } diff --git a/go/vt/tabletmanager/rpc_server.go b/go/vt/tabletmanager/rpc_server.go index 30d12ed1ef..2e86c3bc1f 100644 --- a/go/vt/tabletmanager/rpc_server.go +++ b/go/vt/tabletmanager/rpc_server.go @@ -32,7 +32,7 @@ const rpcTimeout = time.Second * 30 func (agent *ActionAgent) rpcWrapper(ctx context.Context, name string, args, reply interface{}, verbose bool, f func() error, lock, runAfterAction bool) (err error) { defer func() { if x := recover(); x != nil { - log.Errorf("TabletManager.%v(%v) panic: %v\n%s", name, args, x, tb.Stack(4)) + log.Errorf("TabletManager.%v(%v) on %v panic: %v\n%s", name, args, agent.TabletAlias, x, tb.Stack(4)) err = fmt.Errorf("caught panic during %v: %v", name, x) } }() @@ -53,11 +53,11 @@ func (agent *ActionAgent) rpcWrapper(ctx context.Context, name string, args, rep } if err = f(); err != nil { - log.Warningf("TabletManager.%v(%v)(from %v) error: %v", name, args, from, err.Error()) + log.Warningf("TabletManager.%v(%v)(on %v from %v) error: %v", name, args, agent.TabletAlias, from, err.Error()) return fmt.Errorf("TabletManager.%v on %v error: %v", name, agent.TabletAlias, err) } if verbose { - log.Infof("TabletManager.%v(%v)(from %v): %#v", name, args, from, reply) + log.Infof("TabletManager.%v(%v)(on %v from %v): %#v", name, args, agent.TabletAlias, from, reply) } if runAfterAction { err = agent.refreshTablet(ctx, "RPC("+name+")") diff --git a/go/vt/worker/split_clone_test.go b/go/vt/worker/split_clone_test.go index b632315ddb..0484ad6382 100644 --- a/go/vt/worker/split_clone_test.go +++ b/go/vt/worker/split_clone_test.go @@ -309,6 +309,10 @@ func testSplitClone(t *testing.T, strategy string) { sourceRdonly.FakeMysqlDaemon.CurrentMasterPosition = myproto.ReplicationPosition{ GTIDSet: myproto.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678}, } + sourceRdonly.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ + "STOP SLAVE", + "START SLAVE", + } sourceRdonly.RPCServer.Register(gorpcqueryservice.New(&testQueryService{t: t})) } diff --git a/go/vt/worker/vertical_split_clone_test.go b/go/vt/worker/vertical_split_clone_test.go index 8c952c387d..dc2e14fd5b 100644 --- a/go/vt/worker/vertical_split_clone_test.go +++ b/go/vt/worker/vertical_split_clone_test.go @@ -297,6 +297,10 @@ func testVerticalSplitClone(t *testing.T, strategy string) { sourceRdonly.FakeMysqlDaemon.CurrentMasterPosition = myproto.ReplicationPosition{ GTIDSet: myproto.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678}, } + sourceRdonly.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ + "STOP SLAVE", + "START SLAVE", + } sourceRdonly.RPCServer.Register(gorpcqueryservice.New(&verticalSqlQuery{t: t})) } diff --git a/go/vt/wrangler/reparent.go b/go/vt/wrangler/reparent.go index 3ead55c410..6b0096a928 100644 --- a/go/vt/wrangler/reparent.go +++ b/go/vt/wrangler/reparent.go @@ -536,7 +536,7 @@ func (wr *Wrangler) emergencyReparentShardLocked(ctx context.Context, ev *events defer cancel() rp, err := wr.TabletManagerClient().StopReplicationAndGetStatus(ctx, tabletInfo) if err != nil { - wr.logger.Warningf("failed to get replication status from %v, ignoring tablet", alias) + wr.logger.Warningf("failed to get replication status from %v, ignoring tablet: %v", alias, err) return } mu.Lock() diff --git a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go index ba3ad09970..c826581ed0 100644 --- a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go @@ -32,6 +32,7 @@ func TestEmergencyReparentShard(t *testing.T) { // new master newMaster.FakeMysqlDaemon.ReadOnly = true + newMaster.FakeMysqlDaemon.Replicating = true newMaster.FakeMysqlDaemon.CurrentMasterPosition = myproto.ReplicationPosition{ GTIDSet: myproto.MariadbGTID{ Domain: 2, @@ -40,6 +41,7 @@ func TestEmergencyReparentShard(t *testing.T) { }, } newMaster.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ + "STOP SLAVE", "CREATE DATABASE IF NOT EXISTS _vt", "SUBCREATE TABLE IF NOT EXISTS _vt.reparent_journal", "SUBINSERT INTO _vt.reparent_journal (time_created_ns, action_name, master_alias, replication_position) VALUES", @@ -65,6 +67,7 @@ func TestEmergencyReparentShard(t *testing.T) { goodSlave1.FakeMysqlDaemon.SetMasterCommandsInput = fmt.Sprintf("%v:%v", newMaster.Tablet.Hostname, newMaster.Tablet.Portmap["mysql"]) goodSlave1.FakeMysqlDaemon.SetMasterCommandsResult = []string{"set master cmd 1"} goodSlave1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ + "STOP SLAVE", "set master cmd 1", "START SLAVE", } @@ -138,6 +141,7 @@ func TestEmergencyReparentShardMasterElectNotBest(t *testing.T) { moreAdvancedSlave := NewFakeTablet(t, wr, "cell1", 2, topo.TYPE_REPLICA) // new master + newMaster.FakeMysqlDaemon.Replicating = true newMaster.FakeMysqlDaemon.CurrentMasterPosition = myproto.ReplicationPosition{ GTIDSet: myproto.MariadbGTID{ Domain: 2, @@ -145,6 +149,9 @@ func TestEmergencyReparentShardMasterElectNotBest(t *testing.T) { Sequence: 456, }, } + newMaster.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ + "STOP SLAVE", + } newMaster.StartActionLoop(t, wr) defer newMaster.StopActionLoop(t) @@ -153,6 +160,7 @@ func TestEmergencyReparentShardMasterElectNotBest(t *testing.T) { defer oldMaster.StopActionLoop(t) // more advanced slave + moreAdvancedSlave.FakeMysqlDaemon.Replicating = true moreAdvancedSlave.FakeMysqlDaemon.CurrentMasterPosition = myproto.ReplicationPosition{ GTIDSet: myproto.MariadbGTID{ Domain: 2, @@ -160,6 +168,9 @@ func TestEmergencyReparentShardMasterElectNotBest(t *testing.T) { Sequence: 457, }, } + moreAdvancedSlave.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ + "STOP SLAVE", + } moreAdvancedSlave.StartActionLoop(t, wr) defer moreAdvancedSlave.StopActionLoop(t) From a6daf21bb8b4f24ee1057d9217f3d0ef22413450 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Wed, 13 May 2015 11:35:30 -0700 Subject: [PATCH 019/128] add ValidateSchemaKeyspace and ValidateSchemaShard back --- go/vt/vtctl/vtctl.go | 46 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index 3ddf983fcb..e5891271a7 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -240,6 +240,13 @@ var commands = []commandGroup{ command{"ReloadSchema", commandReloadSchema, "", "Asks a remote tablet to reload its schema."}, + command{"ValidateSchemaShard", commandValidateSchemaShard, + "[-exclude_tables=''] [-include-views] ", + "Validate the master schema matches all the slaves."}, + command{"ValidateSchemaKeyspace", commandValidateSchemaKeyspace, + "[-exclude_tables=''] [-include-views] ", + "Validate the master schema from shard 0 matches all the other tablets in the keyspace."}, + command{"ApplySchema", commandApplySchema, "[-force] {-sql= || -sql-file=} ", "Apply the schema change to the specified keyspace."}, @@ -1769,6 +1776,45 @@ func commandReloadSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *f return wr.ReloadSchema(ctx, tabletAlias) } +func commandValidateSchemaShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { + excludeTables := subFlags.String("exclude_tables", "", "comma separated list of regexps for tables to exclude") + includeViews := subFlags.Bool("include-views", false, "include views in the validation") + if err := subFlags.Parse(args); err != nil { + return err + } + if subFlags.NArg() != 1 { + return fmt.Errorf("action ValidateSchemaShard requires ") + } + + keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) + if err != nil { + return err + } + var excludeTableArray []string + if *excludeTables != "" { + excludeTableArray = strings.Split(*excludeTables, ",") + } + return wr.ValidateSchemaShard(ctx, keyspace, shard, excludeTableArray, *includeViews) +} + +func commandValidateSchemaKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { + excludeTables := subFlags.String("exclude_tables", "", "comma separated list of regexps for tables to exclude") + includeViews := subFlags.Bool("include-views", false, "include views in the validation") + if err := subFlags.Parse(args); err != nil { + return err + } + if subFlags.NArg() != 1 { + return fmt.Errorf("action ValidateSchemaKeyspace requires ") + } + + keyspace := subFlags.Arg(0) + var excludeTableArray []string + if *excludeTables != "" { + excludeTableArray = strings.Split(*excludeTables, ",") + } + return wr.ValidateSchemaKeyspace(ctx, keyspace, excludeTableArray, *includeViews) +} + func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { force := subFlags.Bool("force", false, "will apply the schema even if preflight schema doesn't match") sql := subFlags.String("sql", "", "a list of sql commands separated by semicolon") From 5b9d745e08162671a50a84d546093e1c1aadb425 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Wed, 13 May 2015 14:25:43 -0700 Subject: [PATCH 020/128] fix test/sharded.py to not reply on vtctl.ApplySchemaShard --- test/sharded.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/test/sharded.py b/test/sharded.py index 362466e066..1f33134b2c 100755 --- a/test/sharded.py +++ b/test/sharded.py @@ -108,9 +108,19 @@ class TestSharded(unittest.TestCase): # apply the schema on the first shard through vtctl, so all tablets # are the same. - utils.run_vtctl(['ApplySchema', - '-sql=' + create_vt_select_test.replace("\n", ""), - 'test_keyspace']) + shard_0_master.mquery('vt_test_keyspace', + create_vt_select_test.replace("\n", ""), write=True) + shard_0_replica.mquery('vt_test_keyspace', + create_vt_select_test.replace("\n", ""), write=True) + + # apply the schema on the second shard. + shard_1_master.mquery('vt_test_keyspace', + create_vt_select_test_reverse.replace("\n", ""), write=True) + shard_1_replica.mquery('vt_test_keyspace', + create_vt_select_test_reverse.replace("\n", ""), write=True) + + for t in [shard_0_master, shard_0_replica, shard_1_master, shard_1_replica]: + utils.run_vtctl(['ReloadSchema', t.tablet_alias]) # start vtgate, we'll use it later vtgate_server, vtgate_port = utils.vtgate_start() @@ -181,9 +191,9 @@ class TestSharded(unittest.TestCase): utils.run_vtctl(['ValidateSchemaShard', 'test_keyspace/-80']) utils.run_vtctl(['ValidateSchemaShard', 'test_keyspace/80-']) out, err = utils.run_vtctl(['ValidateSchemaKeyspace', 'test_keyspace'], - trap_output=True, raise_on_error=False) + trap_output=True, raise_on_error=False) if 'test_nj-0000062344 and test_nj-0000062346 disagree on schema for table vt_select_test:\nCREATE TABLE' not in err or \ - 'test_nj-0000062344 and test_nj-0000062347 disagree on schema for table vt_select_test:\nCREATE TABLE' not in err: + 'test_nj-0000062344 and test_nj-0000062347 disagree on schema for table vt_select_test:\nCREATE TABLE' not in err: self.fail('wrong ValidateSchemaKeyspace output: ' + err) # validate versions From 119dd1b3101bd1739abf1c2ecd013cafda32ad3b Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 13 May 2015 16:43:23 -0700 Subject: [PATCH 021/128] Fixing comment. --- go/vt/tabletmanager/agent_rpc_actions.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/vt/tabletmanager/agent_rpc_actions.go b/go/vt/tabletmanager/agent_rpc_actions.go index eb5ea882a1..7856402f9b 100644 --- a/go/vt/tabletmanager/agent_rpc_actions.go +++ b/go/vt/tabletmanager/agent_rpc_actions.go @@ -331,7 +331,7 @@ func (agent *ActionAgent) MasterPosition(ctx context.Context) (myproto.Replicati return agent.MysqlDaemon.MasterPosition() } -// StopSlave will stop the replication Works both when Vitess manages +// StopSlave will stop the replication. Works both when Vitess manages // replication or not (using hook if not). // Should be called under RPCWrapLock. func (agent *ActionAgent) StopSlave(ctx context.Context) error { From 7bd078f8afb1c2f6746430ae0ffc27e910be0af4 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Wed, 13 May 2015 18:57:33 -0700 Subject: [PATCH 022/128] return error when schema change fails --- go/vt/schemamanager/schemamanager.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/go/vt/schemamanager/schemamanager.go b/go/vt/schemamanager/schemamanager.go index dd1081538e..e0d51c2bdc 100644 --- a/go/vt/schemamanager/schemamanager.go +++ b/go/vt/schemamanager/schemamanager.go @@ -5,6 +5,9 @@ package schemamanager import ( + "encoding/json" + "fmt" + log "github.com/golang/glog" mproto "github.com/youtube/vitess/go/mysql/proto" ) @@ -81,5 +84,9 @@ func Run(sourcer DataSourcer, handler.OnValidationSuccess(sqls) result := exec.Execute(sqls) handler.OnExecutorComplete(result) + if result.ExecutorErr != "" || len(result.FailedShards) > 0 { + out, _ := json.MarshalIndent(result, "", " ") + return fmt.Errorf("Schema change failed, ExecuteResult: %v\n", string(out)) + } return nil } From 0245dfb5aef0bbee96cac78c40d71efc8f6af9bb Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Wed, 13 May 2015 18:59:27 -0700 Subject: [PATCH 023/128] add schema diffs in schemamanager 1. Make DiffSchema compare table views. 2. Add schema diffs in schemamanager. Each schema changes have to change table structure and schemanager rejects a sql that does not change any table definition. --- go/vt/mysqlctl/proto/schema.go | 27 +++++++---- go/vt/schemamanager/schemamanager_test.go | 56 +++++++++++++++++++++-- go/vt/schemamanager/tablet_executor.go | 36 ++++++++++++++- test/schema.py | 11 ++++- 4 files changed, 114 insertions(+), 16 deletions(-) diff --git a/go/vt/mysqlctl/proto/schema.go b/go/vt/mysqlctl/proto/schema.go index d05a1d311b..5e539ee6e1 100644 --- a/go/vt/mysqlctl/proto/schema.go +++ b/go/vt/mysqlctl/proto/schema.go @@ -189,6 +189,12 @@ func (sd *SchemaDefinition) ToSQLStrings() []string { // generates a report on what's different between two SchemaDefinition // for now, we skip the VIEW entirely. func DiffSchema(leftName string, left *SchemaDefinition, rightName string, right *SchemaDefinition, er concurrency.ErrorRecorder) { + if left == nil && right == nil { + return + } + if left == nil || right == nil { + er.RecordError(fmt.Errorf("%s and %s are different, %s: %v, %s: %v", leftName, rightName, leftName, rightName, left, right)) + } if left.DatabaseSchema != right.DatabaseSchema { er.RecordError(fmt.Errorf("%v and %v don't agree on database creation command:\n%v\n differs from:\n%v", leftName, rightName, left.DatabaseSchema, right.DatabaseSchema)) } @@ -196,16 +202,6 @@ func DiffSchema(leftName string, left *SchemaDefinition, rightName string, right leftIndex := 0 rightIndex := 0 for leftIndex < len(left.TableDefinitions) && rightIndex < len(right.TableDefinitions) { - // skip views - if left.TableDefinitions[leftIndex].Type == TABLE_VIEW { - leftIndex++ - continue - } - if right.TableDefinitions[rightIndex].Type == TABLE_VIEW { - rightIndex++ - continue - } - // extra table on the left side if left.TableDefinitions[leftIndex].Name < right.TableDefinitions[rightIndex].Name { er.RecordError(fmt.Errorf("%v has an extra table named %v", leftName, left.TableDefinitions[leftIndex].Name)) @@ -224,6 +220,11 @@ func DiffSchema(leftName string, left *SchemaDefinition, rightName string, right if left.TableDefinitions[leftIndex].Schema != right.TableDefinitions[rightIndex].Schema { er.RecordError(fmt.Errorf("%v and %v disagree on schema for table %v:\n%v\n differs from:\n%v", leftName, rightName, left.TableDefinitions[leftIndex].Name, left.TableDefinitions[leftIndex].Schema, right.TableDefinitions[rightIndex].Schema)) } + + if left.TableDefinitions[leftIndex].Type != right.TableDefinitions[rightIndex].Type { + er.RecordError(fmt.Errorf("%v and %v disagree on table type for table %v:\n%v\n differs from:\n%v", leftName, rightName, left.TableDefinitions[leftIndex].Name, left.TableDefinitions[leftIndex].Type, right.TableDefinitions[rightIndex].Type)) + } + leftIndex++ rightIndex++ } @@ -232,12 +233,18 @@ func DiffSchema(leftName string, left *SchemaDefinition, rightName string, right if left.TableDefinitions[leftIndex].Type == TABLE_BASE_TABLE { er.RecordError(fmt.Errorf("%v has an extra table named %v", leftName, left.TableDefinitions[leftIndex].Name)) } + if left.TableDefinitions[leftIndex].Type == TABLE_VIEW { + er.RecordError(fmt.Errorf("%v has an extra view named %v", leftName, left.TableDefinitions[leftIndex].Name)) + } leftIndex++ } for rightIndex < len(right.TableDefinitions) { if right.TableDefinitions[rightIndex].Type == TABLE_BASE_TABLE { er.RecordError(fmt.Errorf("%v has an extra table named %v", rightName, right.TableDefinitions[rightIndex].Name)) } + if right.TableDefinitions[rightIndex].Type == TABLE_VIEW { + er.RecordError(fmt.Errorf("%v has an extra view named %v", rightName, right.TableDefinitions[rightIndex].Name)) + } rightIndex++ } } diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index cc9f00563b..f925b983c2 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -9,8 +9,10 @@ import ( "fmt" "testing" + "github.com/youtube/vitess/go/vt/mysqlctl/proto" "github.com/youtube/vitess/go/vt/tabletmanager/faketmclient" _ "github.com/youtube/vitess/go/vt/tabletmanager/gorpctmclient" + "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" "golang.org/x/net/context" ) @@ -60,7 +62,7 @@ func TestRunSchemaChangesExecutorOpenFail(t *testing.T) { dataSourcer := newFakeDataSourcer([]string{"create table test_table (pk int);"}, false, false, false) handler := newFakeHandler() exec := NewTabletExecutor( - faketmclient.NewFakeTabletManagerClient(), + newFakeTabletManagerClient(), newFakeTopo(), "unknown_keyspace") err := Run(dataSourcer, exec, handler) @@ -70,9 +72,29 @@ func TestRunSchemaChangesExecutorOpenFail(t *testing.T) { } func TestRunSchemaChanges(t *testing.T) { - dataSourcer := NewSimpleDataSourcer("create table test_table (pk int);") + sql := "create table test_table (pk int)" + dataSourcer := NewSimpleDataSourcer(sql) handler := newFakeHandler() - exec := newFakeExecutor() + fakeTmc := newFakeTabletManagerClient() + fakeTmc.AddSchemaChange(sql, &proto.SchemaChangeResult{ + BeforeSchema: &proto.SchemaDefinition{}, + AfterSchema: &proto.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE `{{.DatabaseName}}` /*!40100 DEFAULT CHARACTER SET utf8 */", + TableDefinitions: []*proto.TableDefinition{ + &proto.TableDefinition{ + Name: "test_table", + Schema: sql, + Type: proto.TABLE_BASE_TABLE, + }, + }, + }, + }) + + exec := NewTabletExecutor( + fakeTmc, + newFakeTopo(), + "test_keyspace") + err := Run(dataSourcer, exec, handler) if err != nil { t.Fatalf("schema change should success but get error: %v", err) @@ -96,11 +118,37 @@ func TestRunSchemaChanges(t *testing.T) { func newFakeExecutor() *TabletExecutor { return NewTabletExecutor( - faketmclient.NewFakeTabletManagerClient(), + newFakeTabletManagerClient(), newFakeTopo(), "test_keyspace") } +func newFakeTabletManagerClient() *fakeTabletManagerClient { + return &fakeTabletManagerClient{ + TabletManagerClient: faketmclient.NewFakeTabletManagerClient(), + preflightSchemas: make(map[string]*proto.SchemaChangeResult), + } +} + +type fakeTabletManagerClient struct { + tmclient.TabletManagerClient + preflightSchemas map[string]*proto.SchemaChangeResult +} + +func (client *fakeTabletManagerClient) AddSchemaChange( + sql string, schemaResult *proto.SchemaChangeResult) { + client.preflightSchemas[sql] = schemaResult +} + +func (client *fakeTabletManagerClient) PreflightSchema(ctx context.Context, tablet *topo.TabletInfo, change string) (*proto.SchemaChangeResult, error) { + result, ok := client.preflightSchemas[change] + if !ok { + var scr proto.SchemaChangeResult + return &scr, nil + } + return result, nil +} + type fakeTopo struct{} func newFakeTopo() *fakeTopo { diff --git a/go/vt/schemamanager/tablet_executor.go b/go/vt/schemamanager/tablet_executor.go index 1f310f8616..4fd11635b5 100644 --- a/go/vt/schemamanager/tablet_executor.go +++ b/go/vt/schemamanager/tablet_executor.go @@ -9,6 +9,7 @@ import ( "sync" log "github.com/golang/glog" + "github.com/youtube/vitess/go/vt/mysqlctl/proto" "github.com/youtube/vitess/go/vt/sqlparser" "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" @@ -21,6 +22,7 @@ type TabletExecutor struct { tmClient tmclient.TabletManagerClient topoServer topo.Server tabletInfos []*topo.TabletInfo + schemaDiffs []*proto.SchemaChangeResult isClosed bool } @@ -72,13 +74,38 @@ func (exec *TabletExecutor) Validate(sqls []string) error { if err != nil { return err } - if _, ok := stat.(*sqlparser.DDL); !ok { + _, ok := stat.(*sqlparser.DDL) + if !ok { return fmt.Errorf("schema change works for DDLs only, but get non DDL statement: %s", sql) } } return nil } +func (exec *TabletExecutor) preflightSchemaChanges(sqls []string) error { + if len(exec.tabletInfos) == 0 { + return nil + } + exec.schemaDiffs = make([]*proto.SchemaChangeResult, len(sqls)) + for i := range sqls { + schemaDiff, err := exec.tmClient.PreflightSchema( + context.Background(), exec.tabletInfos[0], sqls[i]) + if err != nil { + return err + } + exec.schemaDiffs[i] = schemaDiff + diffs := proto.DiffSchemaToArray( + "BeforeSchema", + exec.schemaDiffs[i].BeforeSchema, + "AfterSchema", + exec.schemaDiffs[i].AfterSchema) + if len(diffs) == 0 { + return fmt.Errorf("Schema change: '%s' does not introduce any table definition change.", sqls[i]) + } + } + return nil +} + // Execute applies schema changes func (exec *TabletExecutor) Execute(sqls []string) *ExecuteResult { execResult := ExecuteResult{} @@ -87,6 +114,13 @@ func (exec *TabletExecutor) Execute(sqls []string) *ExecuteResult { execResult.ExecutorErr = "executor is closed" return &execResult } + + // make sure every schema change introduces a table definition change + if err := exec.preflightSchemaChanges(sqls); err != nil { + execResult.ExecutorErr = err.Error() + return &execResult + } + for index, sql := range sqls { execResult.CurSqlIndex = index exec.executeOnAllTablets(&execResult, sql) diff --git a/test/schema.py b/test/schema.py index 2ae47be764..daa37c5970 100755 --- a/test/schema.py +++ b/test/schema.py @@ -179,7 +179,6 @@ class TestSchema(unittest.TestCase): self._create_test_table_sql('vt_select_test01'), self._create_test_table_sql('vt_select_test02'), self._create_test_table_sql('vt_select_test03'), - self._alter_test_table_sql('vt_select_test03', 'msg'), self._create_test_table_sql('vt_select_test04')]) tables = ','.join([ @@ -203,5 +202,15 @@ class TestSchema(unittest.TestCase): self.assertEqual(shard_0_schema, shard_1_schema) self.assertEqual(shard_0_schema, shard_2_schema) + self._apply_schema(test_keyspace, self._alter_test_table_sql('vt_select_test03', 'msg')) + + shard_0_schema = self._get_schema(shard_0_master.tablet_alias, tables) + shard_1_schema = self._get_schema(shard_1_master.tablet_alias, tables) + shard_2_schema = self._get_schema(shard_2_master.tablet_alias, tables) + + # all shards should have the same schema + self.assertEqual(shard_0_schema, shard_1_schema) + self.assertEqual(shard_0_schema, shard_2_schema) + if __name__ == '__main__': utils.main() From ce518953e8268e1952aff7c8cb474d33a80ab68f Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Wed, 13 May 2015 22:36:15 -0700 Subject: [PATCH 024/128] add more test cases for DiffSchema 1. Remove ByReverseDataLength since no one uses it. 2. Add more test cases for DiffSchema --- go/vt/mysqlctl/proto/schema.go | 20 ++-------- go/vt/mysqlctl/proto/schema_test.go | 62 ++++++++++++++++++++++++++++- 2 files changed, 64 insertions(+), 18 deletions(-) diff --git a/go/vt/mysqlctl/proto/schema.go b/go/vt/mysqlctl/proto/schema.go index 5e539ee6e1..667cba687b 100644 --- a/go/vt/mysqlctl/proto/schema.go +++ b/go/vt/mysqlctl/proto/schema.go @@ -9,7 +9,6 @@ import ( "encoding/hex" "fmt" "regexp" - "sort" "strings" "github.com/youtube/vitess/go/jscfg" @@ -43,15 +42,6 @@ func (tds TableDefinitions) Swap(i, j int) { tds[i], tds[j] = tds[j], tds[i] } -// sort by reverse DataLength -type ByReverseDataLength struct { - TableDefinitions -} - -func (bdl ByReverseDataLength) Less(i, j int) bool { - return bdl.TableDefinitions[j].DataLength < bdl.TableDefinitions[i].DataLength -} - type SchemaDefinition struct { // the 'CREATE DATABASE...' statement, with db name as {{.DatabaseName}} DatabaseSchema string @@ -67,10 +57,6 @@ func (sd *SchemaDefinition) String() string { return jscfg.ToJSON(sd) } -func (sd *SchemaDefinition) SortByReverseDataLength() { - sort.Sort(ByReverseDataLength{sd.TableDefinitions}) -} - // FilterTables returns a copy which includes only // whitelisted tables (tables), no blacklisted tables (excludeTables) and optionally views (includeViews). func (sd *SchemaDefinition) FilterTables(tables, excludeTables []string, includeViews bool) (*SchemaDefinition, error) { @@ -186,14 +172,14 @@ func (sd *SchemaDefinition) ToSQLStrings() []string { return append(sqlStrings, createViewSql...) } -// generates a report on what's different between two SchemaDefinition -// for now, we skip the VIEW entirely. +// generates a report on what's different between two SchemaDefinition, including views. func DiffSchema(leftName string, left *SchemaDefinition, rightName string, right *SchemaDefinition, er concurrency.ErrorRecorder) { if left == nil && right == nil { return } if left == nil || right == nil { - er.RecordError(fmt.Errorf("%s and %s are different, %s: %v, %s: %v", leftName, rightName, leftName, rightName, left, right)) + er.RecordError(fmt.Errorf("%v and %v are different, %s: %v, %s: %v", leftName, rightName, leftName, left, rightName, right)) + return } if left.DatabaseSchema != right.DatabaseSchema { er.RecordError(fmt.Errorf("%v and %v don't agree on database creation command:\n%v\n differs from:\n%v", leftName, rightName, left.DatabaseSchema, right.DatabaseSchema)) diff --git a/go/vt/mysqlctl/proto/schema_test.go b/go/vt/mysqlctl/proto/schema_test.go index baa1982c72..ccf6f1d549 100644 --- a/go/vt/mysqlctl/proto/schema_test.go +++ b/go/vt/mysqlctl/proto/schema_test.go @@ -6,6 +6,7 @@ package proto import ( "errors" + "fmt" "reflect" "testing" ) @@ -161,11 +162,70 @@ func TestSchemaDiff(t *testing.T) { }, }, } - testDiff(t, sd1, sd1, "sd1", "sd2", []string{}) sd2 := &SchemaDefinition{TableDefinitions: make([]*TableDefinition, 0, 2)} + + sd3 := &SchemaDefinition{ + TableDefinitions: []*TableDefinition{ + &TableDefinition{ + Name: "table2", + Schema: "schema2", + Type: TABLE_BASE_TABLE, + }, + }, + } + + sd4 := &SchemaDefinition{ + TableDefinitions: []*TableDefinition{ + &TableDefinition{ + Name: "table2", + Schema: "table2", + Type: TABLE_VIEW, + }, + }, + } + + sd5 := &SchemaDefinition{ + TableDefinitions: []*TableDefinition{ + &TableDefinition{ + Name: "table2", + Schema: "table2", + Type: TABLE_BASE_TABLE, + }, + }, + } + + testDiff(t, sd1, sd1, "sd1", "sd2", []string{}) + testDiff(t, sd2, sd2, "sd2", "sd2", []string{}) + // two schemas are considered the same if both nil + testDiff(t, nil, nil, "sd1", "sd2", nil) + + testDiff(t, sd1, nil, "sd1", "sd2", []string{ + fmt.Sprintf("sd1 and sd2 are different, sd1: %v, sd2: null", sd1), + }) + + testDiff(t, sd1, sd3, "sd1", "sd3", []string{ + "sd1 has an extra table named table1", + }) + + testDiff(t, sd3, sd1, "sd3", "sd1", []string{ + "sd1 has an extra table named table1", + }) + + testDiff(t, sd2, sd4, "sd2", "sd4", []string{ + "sd4 has an extra view named table2", + }) + + testDiff(t, sd4, sd2, "sd4", "sd2", []string{ + "sd4 has an extra view named table2", + }) + + testDiff(t, sd4, sd5, "sd4", "sd5", []string{ + fmt.Sprintf("sd4 and sd5 disagree on table type for table table2:\nVIEW\n differs from:\nBASE TABLE"), + }) + sd1.DatabaseSchema = "CREATE DATABASE {{.DatabaseName}}" sd2.DatabaseSchema = "DONT CREATE DATABASE {{.DatabaseName}}" testDiff(t, sd1, sd2, "sd1", "sd2", []string{"sd1 and sd2 don't agree on database creation command:\nCREATE DATABASE {{.DatabaseName}}\n differs from:\nDONT CREATE DATABASE {{.DatabaseName}}", "sd1 has an extra table named table1", "sd1 has an extra table named table2"}) From 86f64b676982d38af7e0c51e311bb6cf9e1cefd5 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Wed, 13 May 2015 22:46:20 -0700 Subject: [PATCH 025/128] fix mysqlctl/proto/schema.go coding styles --- go/vt/mysqlctl/proto/schema.go | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/go/vt/mysqlctl/proto/schema.go b/go/vt/mysqlctl/proto/schema.go index 667cba687b..27e9eddf0e 100644 --- a/go/vt/mysqlctl/proto/schema.go +++ b/go/vt/mysqlctl/proto/schema.go @@ -16,10 +16,13 @@ import ( ) const ( + // TABLE_BASE_TABLE indicates the table type is a base table. TABLE_BASE_TABLE = "BASE TABLE" - TABLE_VIEW = "VIEW" + // TABLE_VIEW indicates the table type is a view. + TABLE_VIEW = "VIEW" ) +// TableDefinition contains all schema information about a table. type TableDefinition struct { Name string // the table name Schema string // the SQL to run to create the table @@ -31,17 +34,20 @@ type TableDefinition struct { // be approximate count) } -// helper methods for sorting +// TableDefinitions is a list of TableDefinition. type TableDefinitions []*TableDefinition +// Len returns TableDefinitions length. func (tds TableDefinitions) Len() int { return len(tds) } +// Swap used for sorting TableDefinitions. func (tds TableDefinitions) Swap(i, j int) { tds[i], tds[j] = tds[j], tds[i] } +// SchemaDefinition defines schema for a certain database. type SchemaDefinition struct { // the 'CREATE DATABASE...' statement, with db name as {{.DatabaseName}} DatabaseSchema string @@ -127,6 +133,8 @@ func (sd *SchemaDefinition) FilterTables(tables, excludeTables []string, include return ©, nil } +// GenerateSchemaVersion return a unique schema version string based on +// its TableDefinitions. func (sd *SchemaDefinition) GenerateSchemaVersion() { hasher := md5.New() for _, td := range sd.TableDefinitions { @@ -137,6 +145,7 @@ func (sd *SchemaDefinition) GenerateSchemaVersion() { sd.Version = hex.EncodeToString(hasher.Sum(nil)) } +// GetTable returns TableDefinition for a given table name. func (sd *SchemaDefinition) GetTable(table string) (td *TableDefinition, ok bool) { for _, td := range sd.TableDefinitions { if td.Name == table { @@ -172,7 +181,8 @@ func (sd *SchemaDefinition) ToSQLStrings() []string { return append(sqlStrings, createViewSql...) } -// generates a report on what's different between two SchemaDefinition, including views. +// DiffSchema generates a report on what's different between two SchemaDefinitions +// including views. func DiffSchema(leftName string, left *SchemaDefinition, rightName string, right *SchemaDefinition, er concurrency.ErrorRecorder) { if left == nil && right == nil { return @@ -235,16 +245,17 @@ func DiffSchema(leftName string, left *SchemaDefinition, rightName string, right } } +// DiffSchemaToArray diffs two schemas and return the schema diffs if there is any. func DiffSchemaToArray(leftName string, left *SchemaDefinition, rightName string, right *SchemaDefinition) (result []string) { er := concurrency.AllErrorRecorder{} DiffSchema(leftName, left, rightName, right, &er) if er.HasErrors() { return er.ErrorStrings() - } else { - return nil } + return nil } +// SchemaChange contains all necessary information to apply a schema change. type SchemaChange struct { Sql string Force bool @@ -253,6 +264,8 @@ type SchemaChange struct { AfterSchema *SchemaDefinition } +// SchemaChangeResult contains before and after table schemas for +// a schema change sql. type SchemaChangeResult struct { BeforeSchema *SchemaDefinition AfterSchema *SchemaDefinition From 7d69f86b1c7508079f9fea788a0c24a40191ed60 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 14 May 2015 08:39:08 -0700 Subject: [PATCH 026/128] Interface definition for BackupStorage, file implementation. --- go/vt/mysqlctl/backupstorage/file.go | 138 ++++++++++++++++++++++ go/vt/mysqlctl/backupstorage/interface.go | 73 ++++++++++++ 2 files changed, 211 insertions(+) create mode 100644 go/vt/mysqlctl/backupstorage/file.go create mode 100644 go/vt/mysqlctl/backupstorage/interface.go diff --git a/go/vt/mysqlctl/backupstorage/file.go b/go/vt/mysqlctl/backupstorage/file.go new file mode 100644 index 0000000000..d8ce6dab2b --- /dev/null +++ b/go/vt/mysqlctl/backupstorage/file.go @@ -0,0 +1,138 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package backupstorage + +import ( + "flag" + "fmt" + "io" + "os" + "path" +) + +// This file contains the flocal file system implementation of the +// BackupStorage interface. + +var ( + fileBackupStorageRoot = flag.String("file_backup_storage_root", "", "root directory for the file backup storage") +) + +// FileBackupHandle implements BackupHandle for local file system. +type FileBackupHandle struct { + bucket string + name string +} + +// Bucket is part of the BackupHandle interface +func (fbh *FileBackupHandle) Bucket() string { + return fbh.bucket +} + +// Name is part of the BackupHandle interface +func (fbh *FileBackupHandle) Name() string { + return fbh.name +} + +// FileBackupStorage implements BackupStorage for local file system. +type FileBackupStorage struct { + root string +} + +// ListBackups is part of the BackupStorage interface +func (fbs *FileBackupStorage) ListBackups(bucket string) ([]BackupHandle, error) { + p := path.Join(fbs.root, bucket) + f, err := os.Open(p) + if err != nil { + return nil, err + } + defer f.Close() + + fi, err := f.Readdir(-1) + if err != nil { + return nil, err + } + result := make([]BackupHandle, 0, len(fi)) + for _, info := range fi { + if !info.IsDir() { + continue + } + if info.Name() == "." || info.Name() == ".." { + continue + } + result = append(result, &FileBackupHandle{ + bucket: bucket, + name: info.Name(), + }) + } + return result, nil +} + +// StartBackup is part of the BackupStorage interface +func (fbs *FileBackupStorage) StartBackup(bucket, name string) (BackupHandle, error) { + // make sure the bucket directory exists + p := path.Join(fbs.root, bucket) + if err := os.MkdirAll(p, os.ModePerm); err != nil { + return nil, err + } + + // creates the backup directory + p = path.Join(p, name) + if err := os.Mkdir(p, os.ModePerm); err != nil { + return nil, err + } + + return &FileBackupHandle{ + bucket: bucket, + name: name, + }, nil +} + +// AddFile is part of the BackupStorage interface +func (fbs *FileBackupStorage) AddFile(handle BackupHandle, filename string) (io.WriteCloser, error) { + fbh, ok := handle.(*FileBackupHandle) + if !ok { + return nil, fmt.Errorf("FileBackupStorage only accepts FileBackupHandle") + } + p := path.Join(fbs.root, fbh.bucket, fbh.name, filename) + return os.Create(p) +} + +// EndBackup is part of the BackupStorage interface +func (fbs *FileBackupStorage) EndBackup(handle BackupHandle) error { + return nil +} + +// AbortBackup is part of the BackupStorage interface +func (fbs *FileBackupStorage) AbortBackup(handle BackupHandle) error { + fbh, ok := handle.(*FileBackupHandle) + if !ok { + return fmt.Errorf("FileBackupStorage only accepts FileBackupHandle") + } + return fbs.RemoveBackup(fbh.bucket, fbh.name) +} + +// ReadFile is part of the BackupStorage interface +func (fbs *FileBackupStorage) ReadFile(handle BackupHandle, filename string) (io.ReadCloser, error) { + fbh, ok := handle.(*FileBackupHandle) + if !ok { + return nil, fmt.Errorf("FileBackupStorage only accepts FileBackupHandle") + } + p := path.Join(fbs.root, fbh.bucket, fbh.name, filename) + return os.Open(p) +} + +// RemoveBackup is part of the BackupStorage interface +func (fbs *FileBackupStorage) RemoveBackup(bucket, name string) error { + p := path.Join(fbs.root, bucket, name) + return os.RemoveAll(p) +} + +// RegisterFileBackupStorage should be called after Flags has been +// initialized, to register the FileBackupStorage implementation +func RegisterFileBackupStorage() { + BackupStorageMap["file"] = &FileBackupStorage{ + root: *fileBackupStorageRoot, + } +} diff --git a/go/vt/mysqlctl/backupstorage/interface.go b/go/vt/mysqlctl/backupstorage/interface.go new file mode 100644 index 0000000000..59f6bb11f4 --- /dev/null +++ b/go/vt/mysqlctl/backupstorage/interface.go @@ -0,0 +1,73 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package backupstorage contains the interface and file system implementation +// of the backup system. +package backupstorage + +import ( + "flag" + "io" + + log "github.com/golang/glog" +) + +var ( + backupStorageImplementation = flag.String("backup_storage_implementation", "", "which implementation to use for the backup storage feature") +) + +// BackupHandle describes an individual backup. +type BackupHandle interface { + // Bucket is the location of the backup. Will contain keyspace/shard. + Bucket() string + + // Name is the individual name of the backup. Will contain + // tabletAlias-timestamp. + Name() string +} + +// BackupStorage is the interface to the storage system +type BackupStorage interface { + // ListBackups returns all the backups in a bucket. + ListBackups(bucket string) ([]BackupHandle, error) + + // StartBackup creates a new backup with the given name. + // If a backup with the same name already exists, it's an error. + StartBackup(bucket, name string) (BackupHandle, error) + + // AddFile opens a new file to be added to the backup. + // filename is guaranteed to only contain alphanumerical + // characters and hyphens. + // It should be thread safe, it is possible to call AddFile in + // multiple go routines once a backup has been started. + AddFile(handle BackupHandle, filename string) (io.WriteCloser, error) + + // EndBackup stops and closes a backup. The contents should be kept. + EndBackup(handle BackupHandle) error + + // AbortBackup stops a backup, and removes the contents that + // have been copied already. It is called if an error occurs + // while the backup is being taken, and the backup cannot be finished. + AbortBackup(handle BackupHandle) error + + // ReadFile starts reading a file from a backup. + ReadFile(handle BackupHandle, filename string) (io.ReadCloser, error) + + // RemoveBackup removes all the data associated with a backup. + // It will not appear in ListBackups after RemoveBackup succeeds. + RemoveBackup(bucket, name string) error +} + +// BackupStorageMap contains the registered implementations for BackupStorage +var BackupStorageMap = make(map[string]BackupStorage) + +// GetBackupStorage returns the current BackupStorage implementation. +// Should be called after flags have been initialized. +func GetBackupStorage() BackupStorage { + bs, ok := BackupStorageMap[*backupStorageImplementation] + if !ok { + log.Fatalf("no registered implementation of BackupStorage") + } + return bs +} From a748c47b1da974ab28fdd8534788207e1adc70a6 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 14 May 2015 08:41:17 -0700 Subject: [PATCH 027/128] Let optional hooks also work when VTROOT is not set. That's easier for unit tests. --- go/vt/hook/hook.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/go/vt/hook/hook.go b/go/vt/hook/hook.go index 301308a0cb..cac43d9824 100644 --- a/go/vt/hook/hook.go +++ b/go/vt/hook/hook.go @@ -116,9 +116,14 @@ func (hook *Hook) Execute() (result *HookResult) { // Execute an optional hook, returns a printable error func (hook *Hook) ExecuteOptional() error { hr := hook.Execute() - if hr.ExitStatus == HOOK_DOES_NOT_EXIST { + switch hr.ExitStatus { + case HOOK_DOES_NOT_EXIST: log.Infof("%v hook doesn't exist", hook.Name) - } else if hr.ExitStatus != HOOK_SUCCESS { + case HOOK_VTROOT_ERROR: + log.Infof("VTROOT not set, so %v hook doesn't exist", hook.Name) + case HOOK_SUCCESS: + // nothing to do here + default: return fmt.Errorf("%v hook failed(%v): %v", hook.Name, hr.ExitStatus, hr.Stderr) } return nil From bf3823d40f4b9ea44e6affd9693cc5582827e5a8 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 14 May 2015 10:41:35 -0700 Subject: [PATCH 028/128] Adding unit tests for FileBackupStorage. --- go/vt/mysqlctl/backupstorage/file.go | 4 + go/vt/mysqlctl/backupstorage/file_test.go | 173 ++++++++++++++++++++++ go/vt/mysqlctl/backupstorage/interface.go | 15 ++ 3 files changed, 192 insertions(+) create mode 100644 go/vt/mysqlctl/backupstorage/file_test.go diff --git a/go/vt/mysqlctl/backupstorage/file.go b/go/vt/mysqlctl/backupstorage/file.go index d8ce6dab2b..da82b31a78 100644 --- a/go/vt/mysqlctl/backupstorage/file.go +++ b/go/vt/mysqlctl/backupstorage/file.go @@ -45,6 +45,9 @@ func (fbs *FileBackupStorage) ListBackups(bucket string) ([]BackupHandle, error) p := path.Join(fbs.root, bucket) f, err := os.Open(p) if err != nil { + if os.IsNotExist(err) { + return nil, nil + } return nil, err } defer f.Close() @@ -66,6 +69,7 @@ func (fbs *FileBackupStorage) ListBackups(bucket string) ([]BackupHandle, error) name: info.Name(), }) } + SortBackupHandleArray(result) return result, nil } diff --git a/go/vt/mysqlctl/backupstorage/file_test.go b/go/vt/mysqlctl/backupstorage/file_test.go new file mode 100644 index 0000000000..4004ee0ec3 --- /dev/null +++ b/go/vt/mysqlctl/backupstorage/file_test.go @@ -0,0 +1,173 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package backupstorage + +import ( + "io" + "io/ioutil" + "os" + "testing" +) + +// This file tests the file BackupStorage engine. + +// Note this is a very generic test for BackupStorage implementations, +// we test the interface only. But making it a generic test library is +// more cumbersome, we'll do that when we have an actual need for +// another BackupStorage implementation. + +// setupFileBackupStorage creates a temporary directory, and +// returns a FileBackupStorage based on it +func setupFileBackupStorage(t *testing.T) *FileBackupStorage { + root, err := ioutil.TempDir("", "fbstest") + if err != nil { + t.Fatalf("os.TempDir failed: %v", err) + } + return &FileBackupStorage{ + root: root, + } +} + +// cleanupFileBackupStorage removes the entire directory +func cleanupFileBackupStorage(fbs *FileBackupStorage) { + os.RemoveAll(fbs.root) +} + +func TestListBackups(t *testing.T) { + fbs := setupFileBackupStorage(t) + defer cleanupFileBackupStorage(fbs) + + // verify we have no entry now + bucket := "keyspace/shard" + bhs, err := fbs.ListBackups(bucket) + if err != nil { + t.Fatalf("ListBackups on empty fbs failed: %v", err) + } + if len(bhs) != 0 { + t.Fatalf("ListBackups on empty fbs returned results: %#v", bhs) + } + + // add one empty backup + firstBackup := "cell-0001-2015-01-14-10-00-00" + bh, err := fbs.StartBackup(bucket, firstBackup) + if err != nil { + t.Fatalf("fbs.StartBackup failed: %v", err) + } + if err := fbs.EndBackup(bh); err != nil { + t.Fatalf("fbs.EndBackup failed: %v", err) + } + + // verify we have one entry now + bhs, err = fbs.ListBackups(bucket) + if err != nil { + t.Fatalf("ListBackups on empty fbs failed: %v", err) + } + if len(bhs) != 1 || + bhs[0].Bucket() != bucket || + bhs[0].Name() != firstBackup { + t.Fatalf("ListBackups with one backup returned wrong results: %#v", bhs) + } + + // add another one, with earlier date + secondBackup := "cell-0001-2015-01-12-10-00-00" + bh, err = fbs.StartBackup(bucket, secondBackup) + if err != nil { + t.Fatalf("fbs.StartBackup failed: %v", err) + } + if err := fbs.EndBackup(bh); err != nil { + t.Fatalf("fbs.EndBackup failed: %v", err) + } + + // verify we have two sorted entries now + bhs, err = fbs.ListBackups(bucket) + if err != nil { + t.Fatalf("ListBackups on empty fbs failed: %v", err) + } + if len(bhs) != 2 || + bhs[0].Bucket() != bucket || + bhs[0].Name() != secondBackup || + bhs[1].Bucket() != bucket || + bhs[1].Name() != firstBackup { + t.Fatalf("ListBackups with two backups returned wrong results: %#v", bhs) + } + + // remove a backup, back to one + if err := fbs.RemoveBackup(bucket, secondBackup); err != nil { + t.Fatalf("RemoveBackup failed: %v", err) + } + bhs, err = fbs.ListBackups(bucket) + if err != nil { + t.Fatalf("ListBackups after deletion failed: %v", err) + } + if len(bhs) != 1 || + bhs[0].Bucket() != bucket || + bhs[0].Name() != firstBackup { + t.Fatalf("ListBackups after deletion returned wrong results: %#v", bhs) + } + + // add a backup but abort it, should stay at one + bh, err = fbs.StartBackup(bucket, secondBackup) + if err != nil { + t.Fatalf("fbs.StartBackup failed: %v", err) + } + if err := fbs.AbortBackup(bh); err != nil { + t.Fatalf("fbs.AbortBackup failed: %v", err) + } + bhs, err = fbs.ListBackups(bucket) + if err != nil { + t.Fatalf("ListBackups after abort failed: %v", err) + } + if len(bhs) != 1 || + bhs[0].Bucket() != bucket || + bhs[0].Name() != firstBackup { + t.Fatalf("ListBackups after abort returned wrong results: %#v", bhs) + } +} + +func TestFileContents(t *testing.T) { + fbs := setupFileBackupStorage(t) + defer cleanupFileBackupStorage(fbs) + + bucket := "keyspace/shard" + name := "cell-0001-2015-01-14-10-00-00" + filename1 := "file1" + contents1 := "contents of the first file" + + // start a backup, add a file + bh, err := fbs.StartBackup(bucket, name) + if err != nil { + t.Fatalf("fbs.StartBackup failed: %v", err) + } + wc, err := fbs.AddFile(bh, filename1) + if err != nil { + t.Fatalf("fbs.AddFile failed: %v", err) + } + if _, err := wc.Write([]byte(contents1)); err != nil { + t.Fatalf("wc.Write failed: %v", err) + } + if err := wc.Close(); err != nil { + t.Fatalf("wc.Close failed: %v", err) + } + if err := fbs.EndBackup(bh); err != nil { + t.Fatalf("fbs.EndBackup failed: %v", err) + } + + // re-read the file + bhs, err := fbs.ListBackups(bucket) + if err != nil || len(bhs) != 1 { + t.Fatalf("ListBackups after abort returned wrong return: %v %v", err, bhs) + } + rc, err := fbs.ReadFile(bhs[0], filename1) + if err != nil { + t.Fatalf("fbs.ReadFile failed: %v", err) + } + buf := make([]byte, len(contents1)+10) + if n, err := rc.Read(buf); (err != nil && err != io.EOF) || n != len(contents1) { + t.Fatalf("rc.Read returned wrong result: %v %#v", err, n) + } + if err := rc.Close(); err != nil { + t.Fatalf("rc.Close failed: %v", err) + } +} diff --git a/go/vt/mysqlctl/backupstorage/interface.go b/go/vt/mysqlctl/backupstorage/interface.go index 59f6bb11f4..dd51fef105 100644 --- a/go/vt/mysqlctl/backupstorage/interface.go +++ b/go/vt/mysqlctl/backupstorage/interface.go @@ -9,6 +9,7 @@ package backupstorage import ( "flag" "io" + "sort" log "github.com/golang/glog" ) @@ -59,6 +60,20 @@ type BackupStorage interface { RemoveBackup(bucket, name string) error } +// Helper code to sort BackupHandle arrays + +type byName []BackupHandle + +func (bha byName) Len() int { return len(bha) } +func (bha byName) Swap(i, j int) { bha[i], bha[j] = bha[j], bha[i] } +func (bha byName) Less(i, j int) bool { return bha[i].Name() < bha[j].Name() } + +// SortBackupHandleArray will sort the BackupHandle array by name. +// To be used by implementations on the result of ListBackups. +func SortBackupHandleArray(bha []BackupHandle) { + sort.Sort(byName(bha)) +} + // BackupStorageMap contains the registered implementations for BackupStorage var BackupStorageMap = make(map[string]BackupStorage) From cf9f348d4578c823adbf36aaf544d0751e45c60f Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 14 May 2015 11:18:14 -0700 Subject: [PATCH 029/128] Using agent.MysqlDaemon when we can. --- go/vt/tabletmanager/agent_rpc_actions.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/go/vt/tabletmanager/agent_rpc_actions.go b/go/vt/tabletmanager/agent_rpc_actions.go index 7856402f9b..054933f786 100644 --- a/go/vt/tabletmanager/agent_rpc_actions.go +++ b/go/vt/tabletmanager/agent_rpc_actions.go @@ -164,7 +164,7 @@ func (agent *ActionAgent) GetPermissions(ctx context.Context) (*myproto.Permissi // SetReadOnly makes the mysql instance read-only or read-write // Should be called under RPCWrapLockAction. func (agent *ActionAgent) SetReadOnly(ctx context.Context, rdonly bool) error { - return agent.Mysqld.SetReadOnly(rdonly) + return agent.MysqlDaemon.SetReadOnly(rdonly) } // ChangeType changes the tablet type @@ -342,13 +342,13 @@ func (agent *ActionAgent) StopSlave(ctx context.Context) error { // provided position. Works both when Vitess manages // replication or not (using hook if not). func (agent *ActionAgent) StopSlaveMinimum(ctx context.Context, position myproto.ReplicationPosition, waitTime time.Duration) (myproto.ReplicationPosition, error) { - if err := agent.Mysqld.WaitMasterPos(position, waitTime); err != nil { + if err := agent.MysqlDaemon.WaitMasterPos(position, waitTime); err != nil { return myproto.ReplicationPosition{}, err } if err := mysqlctl.StopSlave(agent.MysqlDaemon, agent.hookExtraEnv()); err != nil { return myproto.ReplicationPosition{}, err } - return agent.Mysqld.MasterPosition() + return agent.MysqlDaemon.MasterPosition() } // StartSlave will start the replication. Works both when Vitess manages @@ -400,7 +400,7 @@ func (agent *ActionAgent) RunBlpUntil(ctx context.Context, bpl *blproto.BlpPosit if err := agent.BinlogPlayerMap.RunUntil(bpl, waitTime); err != nil { return nil, err } - rp, err := agent.Mysqld.MasterPosition() + rp, err := agent.MysqlDaemon.MasterPosition() return &rp, err } From db1c4be5f24f3faf73853bf56428d6ad511504c9 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Thu, 14 May 2015 11:24:57 -0700 Subject: [PATCH 030/128] enforce camel case for TABLE_VIEW and TABLE_BASE_TABLE --- go/vt/mysqlctl/proto/schema.go | 22 ++++++++--------- go/vt/mysqlctl/proto/schema_test.go | 24 +++++++++---------- go/vt/mysqlctl/schema.go | 6 ++--- go/vt/schemamanager/schemamanager_test.go | 2 +- .../agentrpctest/test_agent_rpc.go | 4 ++-- go/vt/worker/split_clone.go | 4 ++-- go/vt/worker/split_clone_test.go | 2 +- go/vt/worker/split_diff_test.go | 6 ++--- go/vt/worker/sqldiffer_test.go | 4 ++-- go/vt/worker/vertical_split_clone.go | 4 ++-- go/vt/worker/vertical_split_clone_test.go | 4 ++-- go/vt/worker/vertical_split_diff_test.go | 6 ++--- .../testlib/copy_schema_shard_test.go | 4 ++-- 13 files changed, 46 insertions(+), 46 deletions(-) diff --git a/go/vt/mysqlctl/proto/schema.go b/go/vt/mysqlctl/proto/schema.go index 27e9eddf0e..7f73d3420e 100644 --- a/go/vt/mysqlctl/proto/schema.go +++ b/go/vt/mysqlctl/proto/schema.go @@ -16,10 +16,10 @@ import ( ) const ( - // TABLE_BASE_TABLE indicates the table type is a base table. - TABLE_BASE_TABLE = "BASE TABLE" - // TABLE_VIEW indicates the table type is a view. - TABLE_VIEW = "VIEW" + // TableBaseTable indicates the table type is a base table. + TableBaseTable = "BASE TABLE" + // TableView indicates the table type is a view. + TableView = "VIEW" ) // TableDefinition contains all schema information about a table. @@ -28,7 +28,7 @@ type TableDefinition struct { Schema string // the SQL to run to create the table Columns []string // the columns in the order that will be used to dump and load the data PrimaryKeyColumns []string // the columns used by the primary key, in order - Type string // TABLE_BASE_TABLE or TABLE_VIEW + Type string // TableBaseTable or TableView DataLength uint64 // how much space the data file takes. RowCount uint64 // how many rows in the table (may // be approximate count) @@ -118,7 +118,7 @@ func (sd *SchemaDefinition) FilterTables(tables, excludeTables []string, include continue } - if !includeViews && table.Type == TABLE_VIEW { + if !includeViews && table.Type == TableView { continue } @@ -165,7 +165,7 @@ func (sd *SchemaDefinition) ToSQLStrings() []string { sqlStrings = append(sqlStrings, sd.DatabaseSchema) for _, td := range sd.TableDefinitions { - if td.Type == TABLE_VIEW { + if td.Type == TableView { createViewSql = append(createViewSql, td.Schema) } else { lines := strings.Split(td.Schema, "\n") @@ -226,19 +226,19 @@ func DiffSchema(leftName string, left *SchemaDefinition, rightName string, right } for leftIndex < len(left.TableDefinitions) { - if left.TableDefinitions[leftIndex].Type == TABLE_BASE_TABLE { + if left.TableDefinitions[leftIndex].Type == TableBaseTable { er.RecordError(fmt.Errorf("%v has an extra table named %v", leftName, left.TableDefinitions[leftIndex].Name)) } - if left.TableDefinitions[leftIndex].Type == TABLE_VIEW { + if left.TableDefinitions[leftIndex].Type == TableView { er.RecordError(fmt.Errorf("%v has an extra view named %v", leftName, left.TableDefinitions[leftIndex].Name)) } leftIndex++ } for rightIndex < len(right.TableDefinitions) { - if right.TableDefinitions[rightIndex].Type == TABLE_BASE_TABLE { + if right.TableDefinitions[rightIndex].Type == TableBaseTable { er.RecordError(fmt.Errorf("%v has an extra table named %v", rightName, right.TableDefinitions[rightIndex].Name)) } - if right.TableDefinitions[rightIndex].Type == TABLE_VIEW { + if right.TableDefinitions[rightIndex].Type == TableView { er.RecordError(fmt.Errorf("%v has an extra view named %v", rightName, right.TableDefinitions[rightIndex].Name)) } rightIndex++ diff --git a/go/vt/mysqlctl/proto/schema_test.go b/go/vt/mysqlctl/proto/schema_test.go index ccf6f1d549..62f76f1b2f 100644 --- a/go/vt/mysqlctl/proto/schema_test.go +++ b/go/vt/mysqlctl/proto/schema_test.go @@ -14,12 +14,12 @@ import ( var basicTable1 = &TableDefinition{ Name: "table1", Schema: "table schema 1", - Type: TABLE_BASE_TABLE, + Type: TableBaseTable, } var basicTable2 = &TableDefinition{ Name: "table2", Schema: "table schema 2", - Type: TABLE_BASE_TABLE, + Type: TableBaseTable, } var table3 = &TableDefinition{ @@ -27,19 +27,19 @@ var table3 = &TableDefinition{ Schema: "CREATE TABLE `table3` (\n" + "id bigint not null,\n" + ") Engine=InnoDB", - Type: TABLE_BASE_TABLE, + Type: TableBaseTable, } var view1 = &TableDefinition{ Name: "view1", Schema: "view schema 1", - Type: TABLE_VIEW, + Type: TableView, } var view2 = &TableDefinition{ Name: "view2", Schema: "view schema 2", - Type: TABLE_VIEW, + Type: TableView, } func TestToSQLStrings(t *testing.T) { @@ -153,12 +153,12 @@ func TestSchemaDiff(t *testing.T) { &TableDefinition{ Name: "table1", Schema: "schema1", - Type: TABLE_BASE_TABLE, + Type: TableBaseTable, }, &TableDefinition{ Name: "table2", Schema: "schema2", - Type: TABLE_BASE_TABLE, + Type: TableBaseTable, }, }, } @@ -170,7 +170,7 @@ func TestSchemaDiff(t *testing.T) { &TableDefinition{ Name: "table2", Schema: "schema2", - Type: TABLE_BASE_TABLE, + Type: TableBaseTable, }, }, } @@ -180,7 +180,7 @@ func TestSchemaDiff(t *testing.T) { &TableDefinition{ Name: "table2", Schema: "table2", - Type: TABLE_VIEW, + Type: TableView, }, }, } @@ -190,7 +190,7 @@ func TestSchemaDiff(t *testing.T) { &TableDefinition{ Name: "table2", Schema: "table2", - Type: TABLE_BASE_TABLE, + Type: TableBaseTable, }, }, } @@ -232,10 +232,10 @@ func TestSchemaDiff(t *testing.T) { sd2.DatabaseSchema = "CREATE DATABASE {{.DatabaseName}}" testDiff(t, sd2, sd1, "sd2", "sd1", []string{"sd1 has an extra table named table1", "sd1 has an extra table named table2"}) - sd2.TableDefinitions = append(sd2.TableDefinitions, &TableDefinition{Name: "table1", Schema: "schema1", Type: TABLE_BASE_TABLE}) + sd2.TableDefinitions = append(sd2.TableDefinitions, &TableDefinition{Name: "table1", Schema: "schema1", Type: TableBaseTable}) testDiff(t, sd1, sd2, "sd1", "sd2", []string{"sd1 has an extra table named table2"}) - sd2.TableDefinitions = append(sd2.TableDefinitions, &TableDefinition{Name: "table2", Schema: "schema3", Type: TABLE_BASE_TABLE}) + sd2.TableDefinitions = append(sd2.TableDefinitions, &TableDefinition{Name: "table2", Schema: "schema3", Type: TableBaseTable}) testDiff(t, sd1, sd2, "sd1", "sd2", []string{"sd1 and sd2 disagree on schema for table table2:\nschema2\n differs from:\nschema3"}) } diff --git a/go/vt/mysqlctl/schema.go b/go/vt/mysqlctl/schema.go index 321a63b55a..b2f2bea988 100644 --- a/go/vt/mysqlctl/schema.go +++ b/go/vt/mysqlctl/schema.go @@ -33,7 +33,7 @@ func (mysqld *Mysqld) GetSchema(dbName string, tables, excludeTables []string, i // get the list of tables we're interested in sql := "SELECT table_name, table_type, data_length, table_rows FROM information_schema.tables WHERE table_schema = '" + dbName + "'" if !includeViews { - sql += " AND table_type = '" + proto.TABLE_BASE_TABLE + "'" + sql += " AND table_type = '" + proto.TableBaseTable + "'" } qr, err := mysqld.fetchSuperQuery(sql) if err != nil { @@ -80,7 +80,7 @@ func (mysqld *Mysqld) GetSchema(dbName string, tables, excludeTables []string, i // vt/tabletserver/table_info.go:162 norm := qr.Rows[0][1].String() norm = autoIncr.ReplaceAllLiteralString(norm, "") - if tableType == proto.TABLE_VIEW { + if tableType == proto.TableView { // Views will have the dbname in there, replace it // with {{.DatabaseName}} norm = strings.Replace(norm, "`"+dbName+"`", "`{{.DatabaseName}}`", -1) @@ -212,7 +212,7 @@ func (mysqld *Mysqld) PreflightSchemaChange(dbName string, change string) (*prot sql += "CREATE DATABASE _vt_preflight;\n" sql += "USE _vt_preflight;\n" for _, td := range beforeSchema.TableDefinitions { - if td.Type == proto.TABLE_BASE_TABLE { + if td.Type == proto.TableBaseTable { sql += td.Schema + ";\n" } } diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index f925b983c2..6a921341ca 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -84,7 +84,7 @@ func TestRunSchemaChanges(t *testing.T) { &proto.TableDefinition{ Name: "test_table", Schema: sql, - Type: proto.TABLE_BASE_TABLE, + Type: proto.TableBaseTable, }, }, }, diff --git a/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go b/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go index da5c35e647..4291799585 100644 --- a/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go +++ b/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go @@ -145,7 +145,7 @@ var testGetSchemaReply = &myproto.SchemaDefinition{ Schema: "create table_name", Columns: []string{"col1", "col2"}, PrimaryKeyColumns: []string{"col1"}, - Type: myproto.TABLE_VIEW, + Type: myproto.TableView, DataLength: 12, RowCount: 6, }, @@ -154,7 +154,7 @@ var testGetSchemaReply = &myproto.SchemaDefinition{ Schema: "create table_name2", Columns: []string{"col1"}, PrimaryKeyColumns: []string{"col1"}, - Type: myproto.TABLE_BASE_TABLE, + Type: myproto.TableBaseTable, DataLength: 12, RowCount: 6, }, diff --git a/go/vt/worker/split_clone.go b/go/vt/worker/split_clone.go index 63d2356dee..b2904f5394 100644 --- a/go/vt/worker/split_clone.go +++ b/go/vt/worker/split_clone.go @@ -466,7 +466,7 @@ func (scw *SplitCloneWorker) copy() error { // Find the column index for the sharding columns in all the databases, and count rows columnIndexes := make([]int, len(sourceSchemaDefinition.TableDefinitions)) for tableIndex, td := range sourceSchemaDefinition.TableDefinitions { - if td.Type == myproto.TABLE_BASE_TABLE { + if td.Type == myproto.TableBaseTable { // find the column to split on columnIndexes[tableIndex] = -1 for i, name := range td.Columns { @@ -534,7 +534,7 @@ func (scw *SplitCloneWorker) copy() error { for shardIndex := range scw.sourceShards { sema := sync2.NewSemaphore(scw.sourceReaderCount, 0) for tableIndex, td := range sourceSchemaDefinition.TableDefinitions { - if td.Type == myproto.TABLE_VIEW { + if td.Type == myproto.TableView { continue } diff --git a/go/vt/worker/split_clone_test.go b/go/vt/worker/split_clone_test.go index 0484ad6382..3a0f7f0c93 100644 --- a/go/vt/worker/split_clone_test.go +++ b/go/vt/worker/split_clone_test.go @@ -299,7 +299,7 @@ func testSplitClone(t *testing.T, strategy string) { Name: "table1", Columns: []string{"id", "msg", "keyspace_id"}, PrimaryKeyColumns: []string{"id"}, - Type: myproto.TABLE_BASE_TABLE, + Type: myproto.TableBaseTable, // This informs how many rows we can pack into a single insert DataLength: 2048, }, diff --git a/go/vt/worker/split_diff_test.go b/go/vt/worker/split_diff_test.go index 866c149b3c..21436ac0c1 100644 --- a/go/vt/worker/split_diff_test.go +++ b/go/vt/worker/split_diff_test.go @@ -204,17 +204,17 @@ func TestSplitDiff(t *testing.T) { Name: "table1", Columns: []string{"id", "msg", "keyspace_id"}, PrimaryKeyColumns: []string{"id"}, - Type: myproto.TABLE_BASE_TABLE, + Type: myproto.TableBaseTable, }, &myproto.TableDefinition{ Name: excludedTable, Columns: []string{"id", "msg", "keyspace_id"}, PrimaryKeyColumns: []string{"id"}, - Type: myproto.TABLE_BASE_TABLE, + Type: myproto.TableBaseTable, }, &myproto.TableDefinition{ Name: "view1", - Type: myproto.TABLE_VIEW, + Type: myproto.TableView, }, }, } diff --git a/go/vt/worker/sqldiffer_test.go b/go/vt/worker/sqldiffer_test.go index 463fa07255..a0af742cd7 100644 --- a/go/vt/worker/sqldiffer_test.go +++ b/go/vt/worker/sqldiffer_test.go @@ -123,11 +123,11 @@ func TestSqlDiffer(t *testing.T) { Name: "moving1", Columns: []string{"id", "msg"}, PrimaryKeyColumns: []string{"id"}, - Type: myproto.TABLE_BASE_TABLE, + Type: myproto.TableBaseTable, }, &myproto.TableDefinition{ Name: "view1", - Type: myproto.TABLE_VIEW, + Type: myproto.TableView, }, }, } diff --git a/go/vt/worker/vertical_split_clone.go b/go/vt/worker/vertical_split_clone.go index 8c0b3e651c..d1d233cf9c 100644 --- a/go/vt/worker/vertical_split_clone.go +++ b/go/vt/worker/vertical_split_clone.go @@ -416,7 +416,7 @@ func (vscw *VerticalSplitCloneWorker) copy() error { // Count rows for i, td := range sourceSchemaDefinition.TableDefinitions { vscw.tableStatus[i].mu.Lock() - if td.Type == myproto.TABLE_BASE_TABLE { + if td.Type == myproto.TableBaseTable { vscw.tableStatus[i].rowCount = td.RowCount } else { vscw.tableStatus[i].isView = true @@ -466,7 +466,7 @@ func (vscw *VerticalSplitCloneWorker) copy() error { sourceWaitGroup := sync.WaitGroup{} sema := sync2.NewSemaphore(vscw.sourceReaderCount, 0) for tableIndex, td := range sourceSchemaDefinition.TableDefinitions { - if td.Type == myproto.TABLE_VIEW { + if td.Type == myproto.TableView { continue } diff --git a/go/vt/worker/vertical_split_clone_test.go b/go/vt/worker/vertical_split_clone_test.go index dc2e14fd5b..28ab443725 100644 --- a/go/vt/worker/vertical_split_clone_test.go +++ b/go/vt/worker/vertical_split_clone_test.go @@ -283,13 +283,13 @@ func testVerticalSplitClone(t *testing.T, strategy string) { Name: "moving1", Columns: []string{"id", "msg"}, PrimaryKeyColumns: []string{"id"}, - Type: myproto.TABLE_BASE_TABLE, + Type: myproto.TableBaseTable, // This informs how many rows we can pack into a single insert DataLength: 2048, }, &myproto.TableDefinition{ Name: "view1", - Type: myproto.TABLE_VIEW, + Type: myproto.TableView, }, }, } diff --git a/go/vt/worker/vertical_split_diff_test.go b/go/vt/worker/vertical_split_diff_test.go index 622c9ed48d..8d8c9f933a 100644 --- a/go/vt/worker/vertical_split_diff_test.go +++ b/go/vt/worker/vertical_split_diff_test.go @@ -142,17 +142,17 @@ func TestVerticalSplitDiff(t *testing.T) { Name: "moving1", Columns: []string{"id", "msg"}, PrimaryKeyColumns: []string{"id"}, - Type: myproto.TABLE_BASE_TABLE, + Type: myproto.TableBaseTable, }, &myproto.TableDefinition{ Name: excludedTable, Columns: []string{"id", "msg"}, PrimaryKeyColumns: []string{"id"}, - Type: myproto.TABLE_BASE_TABLE, + Type: myproto.TableBaseTable, }, &myproto.TableDefinition{ Name: "view1", - Type: myproto.TABLE_VIEW, + Type: myproto.TableView, }, }, } diff --git a/go/vt/wrangler/testlib/copy_schema_shard_test.go b/go/vt/wrangler/testlib/copy_schema_shard_test.go index 8575760b74..4a2c6779d8 100644 --- a/go/vt/wrangler/testlib/copy_schema_shard_test.go +++ b/go/vt/wrangler/testlib/copy_schema_shard_test.go @@ -149,12 +149,12 @@ func TestCopySchemaShard(t *testing.T) { &myproto.TableDefinition{ Name: "table1", Schema: "CREATE TABLE `resharding1` (\n `id` bigint(20) NOT NULL AUTO_INCREMENT,\n `msg` varchar(64) DEFAULT NULL,\n `keyspace_id` bigint(20) unsigned NOT NULL,\n PRIMARY KEY (`id`),\n KEY `by_msg` (`msg`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8", - Type: myproto.TABLE_BASE_TABLE, + Type: myproto.TableBaseTable, }, &myproto.TableDefinition{ Name: "view1", Schema: "CREATE TABLE `view1` (\n `id` bigint(20) NOT NULL AUTO_INCREMENT,\n `msg` varchar(64) DEFAULT NULL,\n `keyspace_id` bigint(20) unsigned NOT NULL,\n PRIMARY KEY (`id`),\n KEY `by_msg` (`msg`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8", - Type: myproto.TABLE_VIEW, + Type: myproto.TableView, }, }, } From 333b9cd0896d6619f4d423157103cecb9accc423 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 14 May 2015 11:26:18 -0700 Subject: [PATCH 031/128] Removing dependency from go/vt/health and go/vt/mysqlctl to topo. --- go/vt/health/health.go | 15 +++++++-------- go/vt/health/health_test.go | 12 +++++------- go/vt/mysqlctl/health.go | 5 ++--- go/vt/tabletmanager/healthcheck.go | 2 +- go/vt/tabletmanager/healthcheck_test.go | 2 +- 5 files changed, 16 insertions(+), 20 deletions(-) diff --git a/go/vt/health/health.go b/go/vt/health/health.go index 4b2c705cae..4785dad102 100644 --- a/go/vt/health/health.go +++ b/go/vt/health/health.go @@ -9,7 +9,6 @@ import ( "time" "github.com/youtube/vitess/go/vt/concurrency" - "github.com/youtube/vitess/go/vt/topo" ) var ( @@ -26,11 +25,11 @@ func init() { type Reporter interface { // Report returns the replication delay gathered by this // module (or 0 if it thinks it's not behind), assuming that - // its tablet type is TabletType, and that its query service + // it is a slave type or not, and that its query service // should be running or not. If Report returns an error it // implies that the tablet is in a bad shape and not able to // handle queries. - Report(tabletType topo.TabletType, shouldQueryServiceBeRunning bool) (replicationDelay time.Duration, err error) + Report(isSlaveType, shouldQueryServiceBeRunning bool) (replicationDelay time.Duration, err error) // HTMLName returns a displayable name for the module. // Can be used to be displayed in the status page. @@ -38,11 +37,11 @@ type Reporter interface { } // FunctionReporter is a function that may act as a Reporter. -type FunctionReporter func(topo.TabletType, bool) (time.Duration, error) +type FunctionReporter func(bool, bool) (time.Duration, error) // Report implements Reporter.Report -func (fc FunctionReporter) Report(tabletType topo.TabletType, shouldQueryServiceBeRunning bool) (time.Duration, error) { - return fc(tabletType, shouldQueryServiceBeRunning) +func (fc FunctionReporter) Report(isSlaveType, shouldQueryServiceBeRunning bool) (time.Duration, error) { + return fc(isSlaveType, shouldQueryServiceBeRunning) } // HTMLName implements Reporter.HTMLName @@ -71,7 +70,7 @@ func NewAggregator() *Aggregator { // The returned replication delay will be the highest of all the replication // delays returned by the Reporter implementations (although typically // only one implementation will actually return a meaningful one). -func (ag *Aggregator) Report(tabletType topo.TabletType, shouldQueryServiceBeRunning bool) (time.Duration, error) { +func (ag *Aggregator) Report(isSlaveType, shouldQueryServiceBeRunning bool) (time.Duration, error) { var ( wg sync.WaitGroup rec concurrency.AllErrorRecorder @@ -83,7 +82,7 @@ func (ag *Aggregator) Report(tabletType topo.TabletType, shouldQueryServiceBeRun wg.Add(1) go func(name string, rep Reporter) { defer wg.Done() - replicationDelay, err := rep.Report(tabletType, shouldQueryServiceBeRunning) + replicationDelay, err := rep.Report(isSlaveType, shouldQueryServiceBeRunning) if err != nil { rec.RecordError(fmt.Errorf("%v: %v", name, err)) return diff --git a/go/vt/health/health_test.go b/go/vt/health/health_test.go index 7c71df422f..bd2e66f649 100644 --- a/go/vt/health/health_test.go +++ b/go/vt/health/health_test.go @@ -4,23 +4,21 @@ import ( "errors" "testing" "time" - - "github.com/youtube/vitess/go/vt/topo" ) func TestReporters(t *testing.T) { ag := NewAggregator() - ag.Register("a", FunctionReporter(func(topo.TabletType, bool) (time.Duration, error) { + ag.Register("a", FunctionReporter(func(bool, bool) (time.Duration, error) { return 10 * time.Second, nil })) - ag.Register("b", FunctionReporter(func(topo.TabletType, bool) (time.Duration, error) { + ag.Register("b", FunctionReporter(func(bool, bool) (time.Duration, error) { return 5 * time.Second, nil })) - delay, err := ag.Report(topo.TYPE_REPLICA, true) + delay, err := ag.Report(true, true) if err != nil { t.Error(err) @@ -29,10 +27,10 @@ func TestReporters(t *testing.T) { t.Errorf("delay=%v, want 10s", delay) } - ag.Register("c", FunctionReporter(func(topo.TabletType, bool) (time.Duration, error) { + ag.Register("c", FunctionReporter(func(bool, bool) (time.Duration, error) { return 0, errors.New("e error") })) - if _, err := ag.Report(topo.TYPE_REPLICA, false); err == nil { + if _, err := ag.Report(true, false); err == nil { t.Errorf("ag.Run: expected error") } diff --git a/go/vt/mysqlctl/health.go b/go/vt/mysqlctl/health.go index a9f240f77a..749fdeaa47 100644 --- a/go/vt/mysqlctl/health.go +++ b/go/vt/mysqlctl/health.go @@ -6,7 +6,6 @@ import ( "time" "github.com/youtube/vitess/go/vt/health" - "github.com/youtube/vitess/go/vt/topo" ) // mysqlReplicationLag implements health.Reporter @@ -15,8 +14,8 @@ type mysqlReplicationLag struct { } // Report is part of the health.Reporter interface -func (mrl *mysqlReplicationLag) Report(tabletType topo.TabletType, shouldQueryServiceBeRunning bool) (time.Duration, error) { - if !topo.IsSlaveType(tabletType) { +func (mrl *mysqlReplicationLag) Report(isSlaveType, shouldQueryServiceBeRunning bool) (time.Duration, error) { + if !isSlaveType { return 0, nil } diff --git a/go/vt/tabletmanager/healthcheck.go b/go/vt/tabletmanager/healthcheck.go index bb602fcd4b..2c761fb364 100644 --- a/go/vt/tabletmanager/healthcheck.go +++ b/go/vt/tabletmanager/healthcheck.go @@ -156,7 +156,7 @@ func (agent *ActionAgent) runHealthCheck(targetTabletType topo.TabletType) { if tablet.Type == topo.TYPE_MASTER { typeForHealthCheck = topo.TYPE_MASTER } - replicationDelay, err := agent.HealthReporter.Report(typeForHealthCheck, shouldQueryServiceBeRunning) + replicationDelay, err := agent.HealthReporter.Report(topo.IsSlaveType(typeForHealthCheck), shouldQueryServiceBeRunning) health := make(map[string]string) if err == nil { if replicationDelay > *unhealthyThreshold { diff --git a/go/vt/tabletmanager/healthcheck_test.go b/go/vt/tabletmanager/healthcheck_test.go index 2d304b6173..7cd7cfed9a 100644 --- a/go/vt/tabletmanager/healthcheck_test.go +++ b/go/vt/tabletmanager/healthcheck_test.go @@ -103,7 +103,7 @@ type fakeHealthCheck struct { reportError error } -func (fhc *fakeHealthCheck) Report(tabletType topo.TabletType, shouldQueryServiceBeRunning bool) (replicationDelay time.Duration, err error) { +func (fhc *fakeHealthCheck) Report(isSlaveType, shouldQueryServiceBeRunning bool) (replicationDelay time.Duration, err error) { return fhc.reportReplicationDelay, fhc.reportError } From 0829bd0c8519d153357dd037a2b624d5bf781ae9 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 14 May 2015 11:34:23 -0700 Subject: [PATCH 032/128] Use ioutil.ReadDir, remove obsolete code. --- go/vt/mysqlctl/backupstorage/file.go | 10 +++------- go/vt/mysqlctl/backupstorage/interface.go | 15 --------------- 2 files changed, 3 insertions(+), 22 deletions(-) diff --git a/go/vt/mysqlctl/backupstorage/file.go b/go/vt/mysqlctl/backupstorage/file.go index da82b31a78..d237697f35 100644 --- a/go/vt/mysqlctl/backupstorage/file.go +++ b/go/vt/mysqlctl/backupstorage/file.go @@ -8,6 +8,7 @@ import ( "flag" "fmt" "io" + "io/ioutil" "os" "path" ) @@ -42,20 +43,16 @@ type FileBackupStorage struct { // ListBackups is part of the BackupStorage interface func (fbs *FileBackupStorage) ListBackups(bucket string) ([]BackupHandle, error) { + // ReadDir already sorts the results p := path.Join(fbs.root, bucket) - f, err := os.Open(p) + fi, err := ioutil.ReadDir(p) if err != nil { if os.IsNotExist(err) { return nil, nil } return nil, err } - defer f.Close() - fi, err := f.Readdir(-1) - if err != nil { - return nil, err - } result := make([]BackupHandle, 0, len(fi)) for _, info := range fi { if !info.IsDir() { @@ -69,7 +66,6 @@ func (fbs *FileBackupStorage) ListBackups(bucket string) ([]BackupHandle, error) name: info.Name(), }) } - SortBackupHandleArray(result) return result, nil } diff --git a/go/vt/mysqlctl/backupstorage/interface.go b/go/vt/mysqlctl/backupstorage/interface.go index dd51fef105..59f6bb11f4 100644 --- a/go/vt/mysqlctl/backupstorage/interface.go +++ b/go/vt/mysqlctl/backupstorage/interface.go @@ -9,7 +9,6 @@ package backupstorage import ( "flag" "io" - "sort" log "github.com/golang/glog" ) @@ -60,20 +59,6 @@ type BackupStorage interface { RemoveBackup(bucket, name string) error } -// Helper code to sort BackupHandle arrays - -type byName []BackupHandle - -func (bha byName) Len() int { return len(bha) } -func (bha byName) Swap(i, j int) { bha[i], bha[j] = bha[j], bha[i] } -func (bha byName) Less(i, j int) bool { return bha[i].Name() < bha[j].Name() } - -// SortBackupHandleArray will sort the BackupHandle array by name. -// To be used by implementations on the result of ListBackups. -func SortBackupHandleArray(bha []BackupHandle) { - sort.Sort(byName(bha)) -} - // BackupStorageMap contains the registered implementations for BackupStorage var BackupStorageMap = make(map[string]BackupStorage) From 69540e5030d1af82611853291ae2d7368197d49e Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Thu, 14 May 2015 16:22:03 -0700 Subject: [PATCH 033/128] Report timings of fast external reparents. Export counts and histogram as debug vars for: * time until new master is visible * time until full rebuild is complete --- go/vt/tabletmanager/reparent.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/go/vt/tabletmanager/reparent.go b/go/vt/tabletmanager/reparent.go index c239ad910d..080dcc58c9 100644 --- a/go/vt/tabletmanager/reparent.go +++ b/go/vt/tabletmanager/reparent.go @@ -12,6 +12,7 @@ import ( log "github.com/golang/glog" "github.com/youtube/vitess/go/event" + "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/trace" "github.com/youtube/vitess/go/vt/concurrency" "github.com/youtube/vitess/go/vt/logutil" @@ -27,6 +28,8 @@ var ( fastReparent = flag.Bool("fast_external_reparent", false, "Skip updating of fields in topology that aren't needed if all MySQL reparents are done by an external tool, instead of by Vitess directly.") finalizeReparentTimeout = flag.Duration("finalize_external_reparent_timeout", 10*time.Second, "Timeout for the finalize stage of a fast external reparent reconciliation.") + + externalReparentStats = stats.NewTimings("ExternalReparents") ) // SetReparentFlags changes flag values. It should only be used in tests. @@ -38,6 +41,8 @@ func SetReparentFlags(fast bool, timeout time.Duration) { // fastTabletExternallyReparented completely replaces TabletExternallyReparented // if the -fast_external_reparent flag is specified. func (agent *ActionAgent) fastTabletExternallyReparented(ctx context.Context, externalID string) (err error) { + startTime := time.Now() + // If there is a finalize step running, wait for it to finish or time out // before checking the global shard record again. if agent.finalizeReparentCtx != nil { @@ -105,6 +110,7 @@ func (agent *ActionAgent) fastTabletExternallyReparented(ctx context.Context, ex if err != nil { return fmt.Errorf("fastTabletExternallyReparented: failed to update master endpoint: %v", err) } + externalReparentStats.Record("NewMasterVisible", startTime) // Start the finalize stage with a background context, but connect the trace. bgCtx, cancel := context.WithTimeout(agent.batchCtx, *finalizeReparentTimeout) @@ -117,7 +123,9 @@ func (agent *ActionAgent) fastTabletExternallyReparented(ctx context.Context, ex if err != nil { log.Warningf("finalizeTabletExternallyReparented error: %v", err) event.DispatchUpdate(ev, "failed: "+err.Error()) + return } + externalReparentStats.Record("FullRebuild", startTime) }() return nil From 82a0a18ac1bc38ebd7c77e4f530ecf795fe45930 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Thu, 14 May 2015 15:40:40 -0700 Subject: [PATCH 034/128] add unit test for ConsolveEventHandler --- go/vt/schemamanager/console_event_handler.go | 4 +- .../console_event_handler_test.go | 41 +++++++++++++++++++ go/vt/schemamanager/schemamanager.go | 6 ++- go/vt/schemamanager/schemamanager_test.go | 13 ++++++ 4 files changed, 60 insertions(+), 4 deletions(-) create mode 100644 go/vt/schemamanager/console_event_handler_test.go diff --git a/go/vt/schemamanager/console_event_handler.go b/go/vt/schemamanager/console_event_handler.go index 010d4fbb69..54d08f065f 100644 --- a/go/vt/schemamanager/console_event_handler.go +++ b/go/vt/schemamanager/console_event_handler.go @@ -26,7 +26,7 @@ func (handler *ConsoleEventHandler) OnDataSourcerReadSuccess(sql []string) error // OnDataSourcerReadFail is called when schemamanager fails to read all sql statements. func (handler *ConsoleEventHandler) OnDataSourcerReadFail(err error) error { fmt.Printf("Failed to read schema changes, error: %v\n", err) - return nil + return err } // OnValidationSuccess is called when schemamanager successfully validates all sql statements. @@ -38,7 +38,7 @@ func (handler *ConsoleEventHandler) OnValidationSuccess([]string) error { // OnValidationFail is called when schemamanager fails to validate sql statements. func (handler *ConsoleEventHandler) OnValidationFail(err error) error { fmt.Printf("Failed to validate sqls, error: %v\n", err) - return nil + return err } // OnExecutorComplete is called when schemamanager finishes applying schema changes. diff --git a/go/vt/schemamanager/console_event_handler_test.go b/go/vt/schemamanager/console_event_handler_test.go new file mode 100644 index 0000000000..f1b4b55fe1 --- /dev/null +++ b/go/vt/schemamanager/console_event_handler_test.go @@ -0,0 +1,41 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemamanager + +import ( + "fmt" + "testing" +) + +func TestConsoleEventHandler(t *testing.T) { + sqls := []string{"CREATE TABLE test_table (pk int)"} + handler := NewConsoleEventHandler() + err := handler.OnDataSourcerReadSuccess(sqls) + if err != nil { + t.Fatalf("OnDataSourcerReadSuccess should succeed") + } + + errReadFail := fmt.Errorf("read fail") + err = handler.OnDataSourcerReadFail(errReadFail) + if err != errReadFail { + t.Fatalf("should get error:%v, but get: %v", errReadFail, err) + } + + err = handler.OnValidationSuccess(sqls) + if err != nil { + t.Fatalf("OnValidationSuccess should succeed") + } + + errValidationFail := fmt.Errorf("validation fail") + err = handler.OnValidationFail(errValidationFail) + if err != errValidationFail { + t.Fatalf("should get error:%v, but get: %v", errValidationFail, err) + } + + err = handler.OnExecutorComplete(&ExecuteResult{}) + if err != nil { + t.Fatalf("OnExecutorComplete should succeed") + } +} diff --git a/go/vt/schemamanager/schemamanager.go b/go/vt/schemamanager/schemamanager.go index e0d51c2bdc..9884778f2e 100644 --- a/go/vt/schemamanager/schemamanager.go +++ b/go/vt/schemamanager/schemamanager.go @@ -69,7 +69,8 @@ func Run(sourcer DataSourcer, sqls, err := sourcer.Read() if err != nil { log.Errorf("failed to read data from data sourcer: %v", err) - return handler.OnDataSourcerReadFail(err) + handler.OnDataSourcerReadFail(err) + return err } handler.OnDataSourcerReadSuccess(sqls) if err := exec.Open(); err != nil { @@ -79,7 +80,8 @@ func Run(sourcer DataSourcer, defer exec.Close() if err := exec.Validate(sqls); err != nil { log.Errorf("validation fail: %v", err) - return handler.OnValidationFail(err) + handler.OnValidationFail(err) + return err } handler.OnValidationSuccess(sqls) result := exec.Execute(sqls) diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index 6a921341ca..ff389ee589 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -71,6 +71,19 @@ func TestRunSchemaChangesExecutorOpenFail(t *testing.T) { } } +func TestRunSchemaChangesExecutorExecuteFail(t *testing.T) { + dataSourcer := newFakeDataSourcer([]string{"create table test_table (pk int);"}, false, false, false) + handler := newFakeHandler() + exec := NewTabletExecutor( + newFakeTabletManagerClient(), + newFakeTopo(), + "test_keyspace") + err := Run(dataSourcer, exec, handler) + if err == nil { + t.Fatalf("run schema change should fail due to executor.Execute fail") + } +} + func TestRunSchemaChanges(t *testing.T) { sql := "create table test_table (pk int)" dataSourcer := NewSimpleDataSourcer(sql) From d48c3188f79f4b1540a2e398c1ed471d5c8349d1 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Thu, 14 May 2015 19:35:19 -0700 Subject: [PATCH 035/128] detect big schema change and reject it 1. Detect big schema change based on table rows. A schema change is considered big if 1) alter more than 100,000 rows, or 2) change a table with more than 2,000,000 rows. 2. Add unit test for TabletExecutor and also improve existing test cases. --- go/vt/schemamanager/schemamanager_test.go | 19 +++- go/vt/schemamanager/tablet_executor.go | 55 ++++++++- go/vt/schemamanager/tablet_executor_test.go | 118 ++++++++++++++++++++ 3 files changed, 185 insertions(+), 7 deletions(-) create mode 100644 go/vt/schemamanager/tablet_executor_test.go diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index ff389ee589..4d071de61f 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -103,6 +103,8 @@ func TestRunSchemaChanges(t *testing.T) { }, }) + fakeTmc.AddSchemaDefinition("vt_test_keyspace", &proto.SchemaDefinition{}) + exec := NewTabletExecutor( fakeTmc, newFakeTopo(), @@ -140,12 +142,14 @@ func newFakeTabletManagerClient() *fakeTabletManagerClient { return &fakeTabletManagerClient{ TabletManagerClient: faketmclient.NewFakeTabletManagerClient(), preflightSchemas: make(map[string]*proto.SchemaChangeResult), + schemaDefinitions: make(map[string]*proto.SchemaDefinition), } } type fakeTabletManagerClient struct { tmclient.TabletManagerClient - preflightSchemas map[string]*proto.SchemaChangeResult + preflightSchemas map[string]*proto.SchemaChangeResult + schemaDefinitions map[string]*proto.SchemaDefinition } func (client *fakeTabletManagerClient) AddSchemaChange( @@ -153,6 +157,11 @@ func (client *fakeTabletManagerClient) AddSchemaChange( client.preflightSchemas[sql] = schemaResult } +func (client *fakeTabletManagerClient) AddSchemaDefinition( + dbName string, schemaDefinition *proto.SchemaDefinition) { + client.schemaDefinitions[dbName] = schemaDefinition +} + func (client *fakeTabletManagerClient) PreflightSchema(ctx context.Context, tablet *topo.TabletInfo, change string) (*proto.SchemaChangeResult, error) { result, ok := client.preflightSchemas[change] if !ok { @@ -162,6 +171,14 @@ func (client *fakeTabletManagerClient) PreflightSchema(ctx context.Context, tabl return result, nil } +func (client *fakeTabletManagerClient) GetSchema(ctx context.Context, tablet *topo.TabletInfo, tables, excludeTables []string, includeViews bool) (*proto.SchemaDefinition, error) { + result, ok := client.schemaDefinitions[tablet.DbName()] + if !ok { + return nil, fmt.Errorf("unknown database: %s", tablet.DbName()) + } + return result, nil +} + type fakeTopo struct{} func newFakeTopo() *fakeTopo { diff --git a/go/vt/schemamanager/tablet_executor.go b/go/vt/schemamanager/tablet_executor.go index 4fd11635b5..39d737dade 100644 --- a/go/vt/schemamanager/tablet_executor.go +++ b/go/vt/schemamanager/tablet_executor.go @@ -63,29 +63,72 @@ func (exec *TabletExecutor) Open() error { exec.tabletInfos[i] = tabletInfo log.Infof("\t\tTabletInfo: %+v\n", tabletInfo) } + + if len(exec.tabletInfos) == 0 { + return fmt.Errorf("keyspace: %s does not contain any master tablets", exec.keyspace) + } exec.isClosed = false return nil } // Validate validates a list of sql statements func (exec *TabletExecutor) Validate(sqls []string) error { - for _, sql := range sqls { + if exec.isClosed { + return fmt.Errorf("executor is closed") + } + parsedDDLs := make([]*sqlparser.DDL, len(sqls)) + for i, sql := range sqls { stat, err := sqlparser.Parse(sql) if err != nil { - return err + return fmt.Errorf("failed to parse sql: %s, got error: %v", sql, err) } - _, ok := stat.(*sqlparser.DDL) + ddl, ok := stat.(*sqlparser.DDL) if !ok { return fmt.Errorf("schema change works for DDLs only, but get non DDL statement: %s", sql) } + parsedDDLs[i] = ddl + } + return exec.detectBigSchemaChanges(parsedDDLs) +} + +// a schema change that satisfies any following condition is considered +// to be a big schema change and will be rejected. +// 1. Alter more than 100,000 rows. +// 2. Change a table with more than 2,000,000 rows (Drops are fine). +func (exec *TabletExecutor) detectBigSchemaChanges(parsedDDLs []*sqlparser.DDL) error { + // exec.tabletInfos is guaranteed to have at least one element; + // Otherwise, Open should fail and executor should fail. + masterTabletInfo := exec.tabletInfos[0] + // get database schema, excluding views. + dbSchema, err := exec.tmClient.GetSchema( + context.Background(), masterTabletInfo, []string{}, []string{}, false) + if err != nil { + return fmt.Errorf("unable to get database schema, error: %v", err) + } + tableWithCount := make(map[string]uint64, dbSchema.TableDefinitions.Len()) + for _, tableSchema := range dbSchema.TableDefinitions { + tableWithCount[tableSchema.Name] = tableSchema.RowCount + } + for _, ddl := range parsedDDLs { + if ddl.Action == sqlparser.AST_DROP { + continue + } + tableName := string(ddl.Table) + if rowCount, ok := tableWithCount[tableName]; ok { + if rowCount > 100000 && ddl.Action == sqlparser.AST_ALTER { + return fmt.Errorf( + "big schema change, ddl: %v alters a table with more than 100 thousand rows", ddl) + } + if rowCount > 2000000 { + return fmt.Errorf( + "big schema change, ddl: %v changes a table with more than 2 million rows", ddl) + } + } } return nil } func (exec *TabletExecutor) preflightSchemaChanges(sqls []string) error { - if len(exec.tabletInfos) == 0 { - return nil - } exec.schemaDiffs = make([]*proto.SchemaChangeResult, len(sqls)) for i := range sqls { schemaDiff, err := exec.tmClient.PreflightSchema( diff --git a/go/vt/schemamanager/tablet_executor_test.go b/go/vt/schemamanager/tablet_executor_test.go new file mode 100644 index 0000000000..394edae46f --- /dev/null +++ b/go/vt/schemamanager/tablet_executor_test.go @@ -0,0 +1,118 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemamanager + +import ( + "testing" + + "github.com/youtube/vitess/go/vt/mysqlctl/proto" +) + +func TestTabletExecutorOpen(t *testing.T) { + executor := newFakeExecutor() + if err := executor.Open(); err != nil { + t.Fatalf("executor.Open() should succeed") + } + + defer executor.Close() + + if err := executor.Open(); err != nil { + t.Fatalf("open an opened executor should also succeed") + } +} + +func TestTabletExecutorValidate(t *testing.T) { + fakeTmc := newFakeTabletManagerClient() + + fakeTmc.AddSchemaDefinition("vt_test_keyspace", &proto.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE `{{.DatabaseName}}` /*!40100 DEFAULT CHARACTER SET utf8 */", + TableDefinitions: []*proto.TableDefinition{ + &proto.TableDefinition{ + Name: "test_table", + Schema: "table schema", + Type: proto.TableBaseTable, + }, + &proto.TableDefinition{ + Name: "test_table_03", + Schema: "table schema", + Type: proto.TableBaseTable, + RowCount: 200000, + }, + &proto.TableDefinition{ + Name: "test_table_04", + Schema: "table schema", + Type: proto.TableBaseTable, + RowCount: 3000000, + }, + }, + }) + + executor := NewTabletExecutor( + fakeTmc, + newFakeTopo(), + "test_keyspace") + + sqls := []string{ + "ALTER TABLE test_table ADD COLUMN new_id bigint(20)", + "CREATE TABLE test_table_02 (pk int)", + } + + if err := executor.Validate(sqls); err == nil { + t.Fatalf("validate should fail because executor is closed") + } + + executor.Open() + defer executor.Close() + + // schema changes with DMLs should fail + if err := executor.Validate([]string{ + "INSERT INTO test_table VALUES(1)"}); err == nil { + t.Fatalf("schema changes are for DDLs") + } + + // validates valid ddls + if err := executor.Validate(sqls); err != nil { + t.Fatalf("executor.Validate should succeed, but got error: %v", err) + } + + // alter a table with more than 100,000 rows + if err := executor.Validate([]string{ + "ALTER TABLE test_table_03 ADD COLUMN new_id bigint(20)", + }); err == nil { + t.Fatalf("executor.Validate should fail, alter a table more than 100,000 rows") + } + + // change a table with more than 2,000,000 rows + if err := executor.Validate([]string{ + "RENAME TABLE test_table_04 TO test_table_05", + }); err == nil { + t.Fatalf("executor.Validate should fail, change a table more than 2,000,000 rows") + } + + if err := executor.Validate([]string{ + "DROP TABLE test_table_04", + }); err != nil { + t.Fatalf("executor.Validate should succeed, drop a table with more than 2,000,000 rows is allowed") + } +} + +func TestTabletExecutorExecute(t *testing.T) { + executor := newFakeExecutor() + + sqls := []string{"DROP TABLE unknown_table"} + + result := executor.Execute(sqls) + if result.ExecutorErr == "" { + t.Fatalf("execute should fail, call execute.Open first") + } + + executor.Open() + defer executor.Close() + + result = executor.Execute(sqls) + if result.ExecutorErr == "" { + t.Fatalf("execute should fail, ddl does not introduce any table schema change") + } +} From 0a95327707026cccafa505c34e2e48a68edcc6a1 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Fri, 15 May 2015 18:52:02 -0700 Subject: [PATCH 036/128] remove deprecated information in schema management doc 1. Old vtctl commands ApplySchemaShard, ApplySchemaKeyspace, PreflightSchema have been removed. 2. ApplySchema only supports doing schema changes for the whole keyspace. --- doc/SchemaManagement.md | 89 ++++------------------------------------- 1 file changed, 7 insertions(+), 82 deletions(-) diff --git a/doc/SchemaManagement.md b/doc/SchemaManagement.md index b81d452b05..7c5511ea01 100644 --- a/doc/SchemaManagement.md +++ b/doc/SchemaManagement.md @@ -61,89 +61,14 @@ type SchemaChangeResult struct { } ``` -The ApplySchema action applies a schema change. It is described by the following structure (also returns a SchemaChangeResult): +The ApplySchema action applies a schema change to a specified keyspace, the performed steps are: -```go -type SchemaChange struct { - Sql string - Force bool - AllowReplication bool - BeforeSchema *SchemaDefinition - AfterSchema *SchemaDefinition -} -``` - -And the associated ApplySchema remote action for a tablet. Then the performed steps are: - -* The database to use is either derived from the tablet dbName if UseVt is false, or is the _vt database. A ‘use dbname’ is prepended to the Sql. -* (if BeforeSchema is not nil) read the schema, make sure it is equal to BeforeSchema. If not equal: if Force is not set, we will abort, if Force is set, we’ll issue a warning and keep going. -* if AllowReplication is false, we’ll disable replication (adding SET sql_log_bin=0 before the Sql). -* We will then apply the Sql command. -* (if AfterSchema is not nil) read the schema again, make sure it is equal to AfterSchema. If not equal: if Force is not set, we will issue an error, if Force is set, we’ll issue a warning. - -We will return the following information: - -* whether it worked or not (doh!) -* BeforeSchema -* AfterSchema - -### Use case 1: Single tablet update: - -* we first do a Preflight (to know what BeforeSchema and AfterSchema will be). This can be disabled, but is not recommended. -* we then do the schema upgrade. We will check BeforeSchema before the upgrade, and AfterSchema after the upgrade. - -### Use case 2: Single Shard update: - -* need to figure out (or be told) if it’s a simple or complex schema update (does it require the shell game?). For now we'll use a command line flag. -* in any case, do a Preflight on the master, to get the BeforeSchema and AfterSchema values. -* in any case, gather the schema on all databases, to see which ones have been upgraded already or not. This guarantees we can interrupt and restart a schema change. Also, this makes sure no action is currently running on the databases we're about to change. -* if simple: - * nobody has it: apply to master, very similar to a single tablet update. - * some tablets have it but not others: error out -* if complex: do the shell game while disabling replication. Skip the tablets that already have it. Have an option to re-parent at the end. - * Note the Backup, and Lag servers won't apply a complex schema change. Only the servers actively in the replication graph will. - * the process can be interrupted at any time, restarting it as a complex schema upgrade should just work. - -### Use case 3: Keyspace update: - -* Similar to Single Shard, but the BeforeSchema and AfterSchema values are taken from the first shard, and used in all shards after that. -* We don't know the new masters to use on each shard, so just skip re-parenting all together. - -This translates into the following vtctl commands: +* It first finds shards belong to this keyspace, including newly added shards in the presence of [resharding event](Resharding.md). +* Validate the sql syntax and reject the schema change if the sql 1) Alter more then 100,000 rows, or 2) The targed table has more then 2,000,000 rows. The rational behind this is that ApplySchema simply applies schema changes to the masters; therefore, a big schema change that takes too much time slows down the replication and may reduce the availability of the overall system. +* Create a temporary database that has the same schema as the targeted table. Apply the sql to it and makes sure it changes table structure. +* Apply the Sql command to the database. +* Read the schema again, make sure it is equal to AfterSchema. ``` -PreflightSchema {-sql= || -sql_file=} +ApplySchema {-sql= || -sql_file=} ``` - -apply the schema change to a temporary database to gather before and after schema and validate the change. The sql can be inlined or read from a file. -This will create a temporary database, copy the existing keyspace schema into it, apply the schema change, and re-read the resulting schema. - -``` -$ echo "create table test_table(id int);" > change.sql -$ vtctl PreflightSchema -sql_file=change.sql nyc-0002009001 -``` - -``` -ApplySchema {-sql= || -sql_file=} [-skip_preflight] [-stop_replication] -``` - -apply the schema change to the specific tablet (allowing replication by default). The sql can be inlined or read from a file. -a PreflightSchema operation will first be used to make sure the schema is OK (unless skip_preflight is specified). - -``` -ApplySchemaShard {-sql= || -sql_file=} [-simple] [-new_parent=] -``` - -apply the schema change to the specific shard. If simple is specified, we just apply on the live master. Otherwise, we do the shell game and will optionally re-parent. -if new_parent is set, we will also reparent (otherwise the master won't be touched at all). Using the force flag will cause a bunch of checks to be ignored, use with care. - -``` -$ vtctl ApplySchemaShard --sql-file=change.sql -simple vtx/0 -$ vtctl ApplySchemaShard --sql-file=change.sql -new_parent=nyc-0002009002 vtx/0 -``` - -``` -ApplySchemaKeyspace {-sql= || -sql_file=} [-simple] -``` - -apply the schema change to the specified shard. If simple is specified, we just apply on the live master. Otherwise we will need to do the shell game. So we will apply the schema change to every single slave. From 01efe324e602fa49abc977eaffc949c8b24a6504 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Sun, 17 May 2015 20:14:16 -0700 Subject: [PATCH 037/128] delete go/vt/concurrency/resource_constraint.go resource_constraint.go is not used by other packages, delete it. --- go/vt/concurrency/resource_constraint.go | 106 ----------------------- 1 file changed, 106 deletions(-) delete mode 100644 go/vt/concurrency/resource_constraint.go diff --git a/go/vt/concurrency/resource_constraint.go b/go/vt/concurrency/resource_constraint.go deleted file mode 100644 index dd2881112b..0000000000 --- a/go/vt/concurrency/resource_constraint.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2013, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package concurrency - -import ( - "fmt" - "sync" - - "github.com/youtube/vitess/go/sync2" -) - -// ResourceConstraint combines 3 different features: -// - a WaitGroup to wait for all tasks to be done -// - a Semaphore to control concurrency -// - an ErrorRecorder -type ResourceConstraint struct { - semaphore *sync2.Semaphore - wg sync.WaitGroup - FirstErrorRecorder -} - -// NewResourceConstraint creates a ResourceConstraint with -// max concurrency. -func NewResourceConstraint(max int) *ResourceConstraint { - return &ResourceConstraint{semaphore: sync2.NewSemaphore(max, 0)} -} - -func (rc *ResourceConstraint) Add(n int) { - rc.wg.Add(n) -} - -func (rc *ResourceConstraint) Done() { - rc.wg.Done() -} - -// Wait waits for the WG and returns the firstError we encountered, or nil -func (rc *ResourceConstraint) Wait() error { - rc.wg.Wait() - return rc.Error() -} - -// Acquire will wait until we have a resource to use -func (rc *ResourceConstraint) Acquire() { - rc.semaphore.Acquire() -} - -func (rc *ResourceConstraint) Release() { - rc.semaphore.Release() -} - -func (rc *ResourceConstraint) ReleaseAndDone() { - rc.Release() - rc.Done() -} - -// MultiResourceConstraint combines 3 different features: -// - a WaitGroup to wait for all tasks to be done -// - a Semaphore map to control multiple concurrencies -// - an ErrorRecorder -type MultiResourceConstraint struct { - semaphoreMap map[string]*sync2.Semaphore - wg sync.WaitGroup - FirstErrorRecorder -} - -func NewMultiResourceConstraint(semaphoreMap map[string]*sync2.Semaphore) *MultiResourceConstraint { - return &MultiResourceConstraint{semaphoreMap: semaphoreMap} -} - -func (mrc *MultiResourceConstraint) Add(n int) { - mrc.wg.Add(n) -} - -func (mrc *MultiResourceConstraint) Done() { - mrc.wg.Done() -} - -// Returns the firstError we encountered, or nil -func (mrc *MultiResourceConstraint) Wait() error { - mrc.wg.Wait() - return mrc.Error() -} - -// Acquire will wait until we have a resource to use -func (mrc *MultiResourceConstraint) Acquire(name string) { - s, ok := mrc.semaphoreMap[name] - if !ok { - panic(fmt.Errorf("MultiResourceConstraint: No resource named %v in semaphore map", name)) - } - s.Acquire() -} - -func (mrc *MultiResourceConstraint) Release(name string) { - s, ok := mrc.semaphoreMap[name] - if !ok { - panic(fmt.Errorf("MultiResourceConstraint: No resource named %v in semaphore map", name)) - } - s.Release() -} - -func (mrc *MultiResourceConstraint) ReleaseAndDone(name string) { - mrc.Release(name) - mrc.Done() -} From 60fc388dbfc8ff1df3d79fa0095720c092180156 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Mon, 18 May 2015 12:00:16 -0700 Subject: [PATCH 038/128] Implementation of the backup core functionnality. New integration test only tests the backup appears to work, nothing serious yet. Need to refactor a couple more things to be much easier to test it all with unit tests. --- go/cmd/vttablet/plugin_filebackupstorage.go | 16 + go/vt/mysqlctl/backup.go | 381 ++++++++++++++++++ go/vt/mysqlctl/backupstorage/file.go | 6 +- go/vt/tabletmanager/actionnode/actionnode.go | 3 + go/vt/tabletmanager/agent_rpc_actions.go | 50 +++ .../agentrpctest/test_agent_rpc.go | 37 ++ .../tabletmanager/faketmclient/fake_client.go | 8 + go/vt/tabletmanager/gorpcproto/structs.go | 5 + .../gorpctmclient/gorpc_client.go | 49 +++ .../gorpctmserver/gorpc_server.go | 27 ++ .../tabletmanager/tmclient/rpc_client_api.go | 3 + go/vt/vtctl/vtctl.go | 30 ++ test/backup.py | 112 +++++ test/tablet.py | 8 +- 14 files changed, 732 insertions(+), 3 deletions(-) create mode 100644 go/cmd/vttablet/plugin_filebackupstorage.go create mode 100644 go/vt/mysqlctl/backup.go create mode 100755 test/backup.py diff --git a/go/cmd/vttablet/plugin_filebackupstorage.go b/go/cmd/vttablet/plugin_filebackupstorage.go new file mode 100644 index 0000000000..ae1bec8e4a --- /dev/null +++ b/go/cmd/vttablet/plugin_filebackupstorage.go @@ -0,0 +1,16 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "github.com/youtube/vitess/go/vt/mysqlctl/backupstorage" + "github.com/youtube/vitess/go/vt/servenv" +) + +func init() { + servenv.OnRun(func() { + backupstorage.RegisterFileBackupStorage() + }) +} diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go new file mode 100644 index 0000000000..dd8c47b41f --- /dev/null +++ b/go/vt/mysqlctl/backup.go @@ -0,0 +1,381 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mysqlctl + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + "sync" + + "github.com/youtube/vitess/go/cgzip" + "github.com/youtube/vitess/go/sync2" + "github.com/youtube/vitess/go/vt/concurrency" + "github.com/youtube/vitess/go/vt/logutil" + "github.com/youtube/vitess/go/vt/mysqlctl/backupstorage" + "github.com/youtube/vitess/go/vt/mysqlctl/proto" +) + +// This file handles the backup and restore related code + +const ( + // the three bases for files to restore + backupInnodbDataHomeDir = "InnoDBData" + backupInnodbLogGroupHomeDir = "InnoDBLog" + backupData = "Data" + + // the manifest file name + backupManifest = "MANIFEST" +) + +// FileEntry is one file to backup +type FileEntry struct { + // Base is one of: + // - backupInnodbDataHomeDir for files that go into Mycnf.InnodbDataHomeDir + // - backupInnodbLogGroupHomeDir for files that go into Mycnf.InnodbLogGroupHomeDir + // - backupData for files that go into Mycnf.DataDir + Base string + + // Name is the file name, relative to Base + Name string + + // Hash is the hash of the gzip compressed data stored in the + // BackupStorage. + Hash string +} + +func (fe *FileEntry) open(cnf *Mycnf) (*os.File, error) { + // find the root to use + var root string + switch fe.Base { + case backupInnodbDataHomeDir: + root = cnf.InnodbDataHomeDir + case backupInnodbLogGroupHomeDir: + root = cnf.InnodbLogGroupHomeDir + case backupData: + root = cnf.DataDir + default: + return nil, fmt.Errorf("unknown base: %v", fe.Base) + } + + // and open the file + name := path.Join(root, fe.Name) + fd, err := os.Open(name) + if err != nil { + return nil, fmt.Errorf("cannot open source file %v: %v", name, err) + } + return fd, nil +} + +// BackupManifest represents the backup. It lists all the files, and +// the ReplicationPosition that the backup was taken at. +type BackupManifest struct { + // FileEntries contains all the files in the backup + FileEntries []FileEntry + + // ReplicationPosition is the position at which the backup was taken + ReplicationPosition proto.ReplicationPosition +} + +// isDbDir returns true if the given directory contains a DB +func isDbDir(p string) bool { + // db.opt is there + if _, err := os.Stat(path.Join(p, "db.opt")); err == nil { + return true + } + + // Look for at least one .frm file + fis, err := ioutil.ReadDir(p) + if err != nil { + return false + } + for _, fi := range fis { + if strings.HasSuffix(fi.Name(), ".frm") { + return true + } + } + + return false +} + +func addDirectory(fes []FileEntry, base string, baseDir string, subDir string) ([]FileEntry, error) { + p := path.Join(baseDir, subDir) + + fis, err := ioutil.ReadDir(p) + if err != nil { + return nil, err + } + for _, fi := range fis { + fes = append(fes, FileEntry{ + Base: base, + Name: path.Join(subDir, fi.Name()), + }) + } + return fes, nil +} + +func findFilesTobackup(cnf *Mycnf, logger logutil.Logger) ([]FileEntry, error) { + var err error + var result []FileEntry + + // first add inno db files + result, err = addDirectory(result, backupInnodbDataHomeDir, cnf.InnodbDataHomeDir, "") + if err != nil { + return nil, err + } + result, err = addDirectory(result, backupInnodbLogGroupHomeDir, cnf.InnodbLogGroupHomeDir, "") + if err != nil { + return nil, err + } + + // then add DB directories + fis, err := ioutil.ReadDir(cnf.DataDir) + if err != nil { + return nil, err + } + + for _, fi := range fis { + p := path.Join(cnf.DataDir, fi.Name()) + + // If this is not a directory, try to eval it as a syslink. + if !fi.IsDir() { + p, err = filepath.EvalSymlinks(p) + if err != nil { + return nil, err + } + fi, err = os.Stat(p) + if err != nil { + return nil, err + } + } + if isDbDir(p) { + result, err = addDirectory(result, backupData, cnf.DataDir, fi.Name()) + if err != nil { + return nil, err + } + } + } + return result, nil +} + +// Backup is the main entry point for a backup: +// - uses the BackupStorage service to store a new backup +// - shuts down Mysqld during the backup +// - remember if we were replicating, restore the exact same state +func (mysqld *Mysqld) Backup(logger logutil.Logger, bucket, name string, backupConcurrency int, hookExtraEnv map[string]string) error { + + // start the backup with the BackupStorage + bs := backupstorage.GetBackupStorage() + bh, err := bs.StartBackup(bucket, name) + if err != nil { + return fmt.Errorf("StartBackup failed: %v", err) + } + + if err = mysqld.backup(logger, bs, bh, backupConcurrency, hookExtraEnv); err != nil { + if err := bs.AbortBackup(bh); err != nil { + logger.Errorf("failed to abort backup: %v", err) + } + } else { + err = bs.EndBackup(bh) + } + + return err +} + +func (mysqld *Mysqld) backup(logger logutil.Logger, bs backupstorage.BackupStorage, bh backupstorage.BackupHandle, backupConcurrency int, hookExtraEnv map[string]string) error { + + // save initial state so we can restore + slaveStartRequired := false + sourceIsMaster := false + readOnly := true + var replicationPosition proto.ReplicationPosition + + // see if we need to restart replication after backup + logger.Infof("getting current replication status") + slaveStatus, err := mysqld.SlaveStatus() + switch err { + case nil: + slaveStartRequired = slaveStatus.SlaveRunning() + case ErrNotSlave: + // keep going if we're the master, might be a degenerate case + sourceIsMaster = true + default: + return fmt.Errorf("cannot get slave status: %v", err) + } + + // get the read-only flag + readOnly, err = mysqld.IsReadOnly() + if err != nil { + return fmt.Errorf("cannot get read only status: %v", err) + } + + // get the replication position + if sourceIsMaster { + if !readOnly { + logger.Infof("turning master read-onyl before backup") + if err = mysqld.SetReadOnly(true); err != nil { + return fmt.Errorf("cannot get read only status: %v", err) + } + } + replicationPosition, err = mysqld.MasterPosition() + if err != nil { + return fmt.Errorf("cannot get master position: %v", err) + } + } else { + if err = StopSlave(mysqld, hookExtraEnv); err != nil { + return fmt.Errorf("cannot stop slave: %v", err) + } + var slaveStatus proto.ReplicationStatus + slaveStatus, err = mysqld.SlaveStatus() + if err != nil { + return fmt.Errorf("cannot get slave status: %v", err) + } + replicationPosition = slaveStatus.Position + } + logger.Infof("using replication position: %#v", replicationPosition) + + // shutdown mysqld + if err = mysqld.Shutdown(true, MysqlWaitTime); err != nil { + return fmt.Errorf("cannot shutdown mysqld: %v", err) + } + + // get the files to backup + fes, err := findFilesTobackup(mysqld.config, logger) + if err != nil { + return fmt.Errorf("cannot find files to backup: %v", err) + } + logger.Infof("found %v files to backup", len(fes)) + + // backup everything + if err := mysqld.backupFiles(logger, bs, bh, fes, replicationPosition, backupConcurrency); err != nil { + return fmt.Errorf("cannot backup files: %v", err) + } + + // Try to restart mysqld + if err := mysqld.Start(MysqlWaitTime); err != nil { + return fmt.Errorf("cannot restart mysqld: %v", err) + } + + // Restore original mysqld state that we saved above. + if slaveStartRequired { + logger.Infof("restarting mysql replication") + if err := StartSlave(mysqld, hookExtraEnv); err != nil { + return fmt.Errorf("cannot restart slave: %v", err) + } + + // this should be quick, but we might as well just wait + if err := mysqld.WaitForSlaveStart(slaveStartDeadline); err != nil { + return fmt.Errorf("slave is not restarting: %v", err) + } + } + + // And set read-only mode + logger.Infof("resetting mysqld read-only to %v", readOnly) + if err := mysqld.SetReadOnly(readOnly); err != nil { + return err + } + + return nil +} + +func (mysqld *Mysqld) backupFiles(logger logutil.Logger, bs backupstorage.BackupStorage, bh backupstorage.BackupHandle, fes []FileEntry, replicationPosition proto.ReplicationPosition, backupConcurrency int) error { + + sema := sync2.NewSemaphore(backupConcurrency, 0) + rec := concurrency.AllErrorRecorder{} + wg := sync.WaitGroup{} + for i, fe := range fes { + wg.Add(1) + go func(i int, fe FileEntry) { + defer wg.Done() + + // wait until we are ready to go, skip if we already + // encountered an error + sema.Acquire() + defer sema.Release() + if rec.HasErrors() { + return + } + + // open the source file for reading + source, err := fe.open(mysqld.config) + if err != nil { + rec.RecordError(err) + return + } + defer source.Close() + + // open the destination file for writing, and a buffer + name := fmt.Sprintf("%v", i) + wc, err := bs.AddFile(bh, name) + if err != nil { + rec.RecordError(fmt.Errorf("cannot add file: %v", err)) + return + } + defer wc.Close() + dst := bufio.NewWriterSize(wc, 2*1024*1024) + + // create the hasher and the tee on top + hasher := newHasher() + tee := io.MultiWriter(dst, hasher) + + // create the gzip compression filter + gzip, err := cgzip.NewWriterLevel(tee, cgzip.Z_BEST_SPEED) + if err != nil { + rec.RecordError(fmt.Errorf("cannot create gziper: %v", err)) + return + } + + // copy from the source file to gzip to tee to output file and hasher + _, err = io.Copy(gzip, source) + if err != nil { + rec.RecordError(fmt.Errorf("cannot copy data: %v", err)) + return + } + + // close gzip to flush it, after that the hash is good + if err = gzip.Close(); err != nil { + rec.RecordError(fmt.Errorf("cannot close gzip: %v", err)) + return + } + + // flush the buffer to finish writing, save the hash + dst.Flush() + fes[i].Hash = hasher.HashString() + }(i, fe) + } + + wg.Wait() + if rec.HasErrors() { + return rec.Error() + } + + // open the MANIFEST + wc, err := bs.AddFile(bh, backupManifest) + if err != nil { + return fmt.Errorf("cannot add %v to backup: %v", backupManifest, err) + } + defer wc.Close() + + // JSON-encode and write the MANIFEST + bm := &BackupManifest{ + FileEntries: fes, + ReplicationPosition: replicationPosition, + } + data, err := json.MarshalIndent(bm, "", " ") + if err != nil { + return fmt.Errorf("cannot JSON encode %v: %v", backupManifest, err) + } + if _, err := wc.Write([]byte(data)); err != nil { + return fmt.Errorf("cannot write %v: %v", backupManifest, err) + } + + return nil +} diff --git a/go/vt/mysqlctl/backupstorage/file.go b/go/vt/mysqlctl/backupstorage/file.go index d237697f35..1e467e4e69 100644 --- a/go/vt/mysqlctl/backupstorage/file.go +++ b/go/vt/mysqlctl/backupstorage/file.go @@ -132,7 +132,9 @@ func (fbs *FileBackupStorage) RemoveBackup(bucket, name string) error { // RegisterFileBackupStorage should be called after Flags has been // initialized, to register the FileBackupStorage implementation func RegisterFileBackupStorage() { - BackupStorageMap["file"] = &FileBackupStorage{ - root: *fileBackupStorageRoot, + if *fileBackupStorageRoot != "" { + BackupStorageMap["file"] = &FileBackupStorage{ + root: *fileBackupStorageRoot, + } } } diff --git a/go/vt/tabletmanager/actionnode/actionnode.go b/go/vt/tabletmanager/actionnode/actionnode.go index c00e4ae6ab..341481a3d6 100644 --- a/go/vt/tabletmanager/actionnode/actionnode.go +++ b/go/vt/tabletmanager/actionnode/actionnode.go @@ -165,6 +165,9 @@ const ( // replication slaves. TabletActionGetSlaves = "GetSlaves" + // TabletActionBackup takes a db backup and stores it into BackupStorage + TabletActionBackup = "Backup" + // TabletActionSnapshot takes a db snapshot TabletActionSnapshot = "Snapshot" diff --git a/go/vt/tabletmanager/agent_rpc_actions.go b/go/vt/tabletmanager/agent_rpc_actions.go index 054933f786..7d28f60118 100644 --- a/go/vt/tabletmanager/agent_rpc_actions.go +++ b/go/vt/tabletmanager/agent_rpc_actions.go @@ -122,6 +122,8 @@ type RPCAgent interface { // Backup / restore related methods + Backup(ctx context.Context, concurrency int, logger logutil.Logger) error + Snapshot(ctx context.Context, args *actionnode.SnapshotArgs, logger logutil.Logger) (*actionnode.SnapshotReply, error) SnapshotSourceEnd(ctx context.Context, args *actionnode.SnapshotSourceEndArgs) error @@ -714,6 +716,54 @@ func (agent *ActionAgent) updateReplicationGraphForPromotedSlave(ctx context.Con // Backup / restore related methods // +// Backup takes a db backup and sends it to the BackupStorage +// Should be called under RPCWrapLockAction. +func (agent *ActionAgent) Backup(ctx context.Context, concurrency int, logger logutil.Logger) error { + // update our type to TYPE_BACKUP + tablet, err := agent.TopoServer.GetTablet(agent.TabletAlias) + if err != nil { + return err + } + if tablet.Type == topo.TYPE_MASTER { + return fmt.Errorf("type MASTER cannot take backup, if you really need to do this, restart vttablet in replica mode") + } + originalType := tablet.Type + if err := topotools.ChangeType(ctx, agent.TopoServer, tablet.Alias, topo.TYPE_BACKUP, make(map[string]string)); err != nil { + return err + } + + // let's update our internal state (stop query service and other things) + if err := agent.refreshTablet(ctx, "backup"); err != nil { + return fmt.Errorf("failed to update state before backup: %v", err) + } + + // create the loggers: tee to console and source + l := logutil.NewTeeLogger(logutil.NewConsoleLogger(), logger) + + // now we can run the backup + bucket := fmt.Sprintf("%v/%v", tablet.Keyspace, tablet.Shard) + name := fmt.Sprintf("%v-%v", tablet.Alias, time.Now().Unix()) + returnErr := agent.Mysqld.Backup(l, bucket, name, concurrency, agent.hookExtraEnv()) + + // and change our type back to the appropriate value: + // - if healthcheck is enabled, go to spare + // - if not, go back to original type + if agent.IsRunningHealthCheck() { + originalType = topo.TYPE_SPARE + } + err = topotools.ChangeType(ctx, agent.TopoServer, tablet.Alias, originalType, nil) + if err != nil { + // failure in changing the topology type is probably worse, + // so returning that (we logged the snapshot error anyway) + if returnErr != nil { + l.Errorf("mysql backup command returned error: %v", returnErr) + } + returnErr = err + } + + return returnErr +} + // Snapshot takes a db snapshot // Should be called under RPCWrapLockAction. func (agent *ActionAgent) Snapshot(ctx context.Context, args *actionnode.SnapshotArgs, logger logutil.Logger) (*actionnode.SnapshotReply, error) { diff --git a/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go b/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go index da5c35e647..a4e0b4faa3 100644 --- a/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go +++ b/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go @@ -1127,6 +1127,41 @@ func agentRPCTestPromoteSlavePanic(ctx context.Context, t *testing.T, client tmc // Backup / restore related methods // +var testBackupConcurrency = 24 +var testBackupCalled = false + +func (fra *fakeRPCAgent) Backup(ctx context.Context, concurrency int, logger logutil.Logger) error { + if fra.panics { + panic(fmt.Errorf("test-triggered panic")) + } + compare(fra.t, "Backup args", concurrency, testBackupConcurrency) + logStuff(logger, 10) + testBackupCalled = true + return nil +} + +func agentRPCTestBackup(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { + logChannel, errFunc, err := client.Backup(ctx, ti, testBackupConcurrency) + if err != nil { + t.Fatalf("Backup failed: %v", err) + } + compareLoggedStuff(t, "Backup", logChannel, 10) + err = errFunc() + compareError(t, "Backup", err, true, testBackupCalled) +} + +func agentRPCTestBackupPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { + logChannel, errFunc, err := client.Backup(ctx, ti, testBackupConcurrency) + if err != nil { + t.Fatalf("Backup failed: %v", err) + } + if e, ok := <-logChannel; ok { + t.Fatalf("Unexpected Backup logs: %v", e) + } + err = errFunc() + expectRPCWrapLockActionPanic(t, err) +} + var testSnapshotArgs = &actionnode.SnapshotArgs{ Concurrency: 42, ServerMode: true, @@ -1364,6 +1399,7 @@ func Run(t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo, agentRPCTestPromoteSlave(ctx, t, client, ti) // Backup / restore related methods + agentRPCTestBackup(ctx, t, client, ti) agentRPCTestSnapshot(ctx, t, client, ti) agentRPCTestSnapshotSourceEnd(ctx, t, client, ti) agentRPCTestReserveForRestore(ctx, t, client, ti) @@ -1420,6 +1456,7 @@ func Run(t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo, agentRPCTestPromoteSlavePanic(ctx, t, client, ti) // Backup / restore related methods + agentRPCTestBackupPanic(ctx, t, client, ti) agentRPCTestSnapshotPanic(ctx, t, client, ti) agentRPCTestSnapshotSourceEndPanic(ctx, t, client, ti) agentRPCTestReserveForRestorePanic(ctx, t, client, ti) diff --git a/go/vt/tabletmanager/faketmclient/fake_client.go b/go/vt/tabletmanager/faketmclient/fake_client.go index 877f7db1e5..472e8e2153 100644 --- a/go/vt/tabletmanager/faketmclient/fake_client.go +++ b/go/vt/tabletmanager/faketmclient/fake_client.go @@ -294,6 +294,14 @@ func (client *FakeTabletManagerClient) PromoteSlave(ctx context.Context, tablet // Backup related methods // +// Backup is part of the tmclient.TabletManagerClient interface +func (client *FakeTabletManagerClient) Backup(ctx context.Context, tablet *topo.TabletInfo, concurrency int) (<-chan *logutil.LoggerEvent, tmclient.ErrFunc, error) { + logstream := make(chan *logutil.LoggerEvent, 10) + return logstream, func() error { + return nil + }, nil +} + // Snapshot is part of the tmclient.TabletManagerClient interface func (client *FakeTabletManagerClient) Snapshot(ctx context.Context, tablet *topo.TabletInfo, sa *actionnode.SnapshotArgs) (<-chan *logutil.LoggerEvent, tmclient.SnapshotReplyFunc, error) { logstream := make(chan *logutil.LoggerEvent, 10) diff --git a/go/vt/tabletmanager/gorpcproto/structs.go b/go/vt/tabletmanager/gorpcproto/structs.go index 7699ff2c8e..a763f6b29a 100644 --- a/go/vt/tabletmanager/gorpcproto/structs.go +++ b/go/vt/tabletmanager/gorpcproto/structs.go @@ -94,6 +94,11 @@ type ExecuteFetchArgs struct { ReloadSchema bool } +// BackupArgs has arguments for Backup +type BackupArgs struct { + Concurrency int +} + // gorpc doesn't support returning a streaming type during streaming // and a final return value, so using structures with either one set. diff --git a/go/vt/tabletmanager/gorpctmclient/gorpc_client.go b/go/vt/tabletmanager/gorpctmclient/gorpc_client.go index 0ed8fe4e6a..23e9d7bce4 100644 --- a/go/vt/tabletmanager/gorpctmclient/gorpc_client.go +++ b/go/vt/tabletmanager/gorpctmclient/gorpc_client.go @@ -457,6 +457,55 @@ func (client *GoRPCTabletManagerClient) PromoteSlave(ctx context.Context, tablet // Backup related methods // +// Backup is part of the tmclient.TabletManagerClient interface +func (client *GoRPCTabletManagerClient) Backup(ctx context.Context, tablet *topo.TabletInfo, concurrency int) (<-chan *logutil.LoggerEvent, tmclient.ErrFunc, error) { + var connectTimeout time.Duration + deadline, ok := ctx.Deadline() + if ok { + connectTimeout = deadline.Sub(time.Now()) + if connectTimeout < 0 { + return nil, nil, timeoutError{fmt.Errorf("timeout connecting to TabletManager.Backup on %v", tablet.Alias)} + } + } + rpcClient, err := bsonrpc.DialHTTP("tcp", tablet.Addr(), connectTimeout, nil) + if err != nil { + return nil, nil, err + } + + logstream := make(chan *logutil.LoggerEvent, 10) + rpcstream := make(chan *logutil.LoggerEvent, 10) + c := rpcClient.StreamGo("TabletManager.Backup", &gorpcproto.BackupArgs{ + Concurrency: concurrency, + }, rpcstream) + interrupted := false + go func() { + for { + select { + case <-ctx.Done(): + // context is done + interrupted = true + close(logstream) + rpcClient.Close() + return + case ssr, ok := <-rpcstream: + if !ok { + close(logstream) + rpcClient.Close() + return + } + logstream <- ssr + } + } + }() + return logstream, func() error { + // this is only called after streaming is done + if interrupted { + return fmt.Errorf("TabletManager.Backup interrupted by context") + } + return c.Error + }, nil +} + // Snapshot is part of the tmclient.TabletManagerClient interface func (client *GoRPCTabletManagerClient) Snapshot(ctx context.Context, tablet *topo.TabletInfo, sa *actionnode.SnapshotArgs) (<-chan *logutil.LoggerEvent, tmclient.SnapshotReplyFunc, error) { var connectTimeout time.Duration diff --git a/go/vt/tabletmanager/gorpctmserver/gorpc_server.go b/go/vt/tabletmanager/gorpctmserver/gorpc_server.go index 41ace9195d..153f9a4e69 100644 --- a/go/vt/tabletmanager/gorpctmserver/gorpc_server.go +++ b/go/vt/tabletmanager/gorpctmserver/gorpc_server.go @@ -467,6 +467,33 @@ func (tm *TabletManager) PromoteSlave(ctx context.Context, args *rpc.Unused, rep // backup related methods +// Backup wraps RPCAgent.Backup +func (tm *TabletManager) Backup(ctx context.Context, args *gorpcproto.BackupArgs, sendReply func(interface{}) error) error { + ctx = callinfo.RPCWrapCallInfo(ctx) + return tm.agent.RPCWrapLockAction(ctx, actionnode.TabletActionBackup, args, nil, true, func() error { + // create a logger, send the result back to the caller + logger := logutil.NewChannelLogger(10) + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + for e := range logger { + // Note we don't interrupt the loop here, as + // we still need to flush and finish the + // command, even if the channel to the client + // has been broken. We'll just keep trying + // to send. + sendReply(&e) + } + wg.Done() + }() + + err := tm.agent.Backup(ctx, args.Concurrency, logger) + close(logger) + wg.Wait() + return err + }) +} + // Snapshot wraps RPCAgent.Snapshot func (tm *TabletManager) Snapshot(ctx context.Context, args *actionnode.SnapshotArgs, sendReply func(interface{}) error) error { ctx = callinfo.RPCWrapCallInfo(ctx) diff --git a/go/vt/tabletmanager/tmclient/rpc_client_api.go b/go/vt/tabletmanager/tmclient/rpc_client_api.go index 6325d40433..b358db3546 100644 --- a/go/vt/tabletmanager/tmclient/rpc_client_api.go +++ b/go/vt/tabletmanager/tmclient/rpc_client_api.go @@ -187,6 +187,9 @@ type TabletManagerClient interface { // Backup / restore related methods // + // Backup creates a database backup + Backup(ctx context.Context, tablet *topo.TabletInfo, concurrency int) (<-chan *logutil.LoggerEvent, ErrFunc, error) + // Snapshot takes a database snapshot Snapshot(ctx context.Context, tablet *topo.TabletInfo, sa *actionnode.SnapshotArgs) (<-chan *logutil.LoggerEvent, SnapshotReplyFunc, error) diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index e5891271a7..e4d8f27f7c 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -104,6 +104,9 @@ var commands = []commandGroup{ command{"Sleep", commandSleep, " ", "Block the action queue for the specified duration (mostly for testing)."}, + command{"Backup", commandBackup, + "[-concurrency=4] ", + "Stop mysqld and copy data to BackupStorage."}, command{"Snapshot", commandSnapshot, "[-force] [-server-mode] [-concurrency=4] ", "Stop mysqld and copy compressed data aside."}, @@ -901,6 +904,33 @@ func commandSleep(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.Fla return wr.TabletManagerClient().Sleep(ctx, ti, duration) } +func commandBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { + concurrency := subFlags.Int("concurrency", 4, "how many compression/checksum jobs to run simultaneously") + if err := subFlags.Parse(args); err != nil { + return err + } + if subFlags.NArg() != 1 { + return fmt.Errorf("action Backup requires ") + } + + tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) + if err != nil { + return err + } + tabletInfo, err := wr.TopoServer().GetTablet(tabletAlias) + if err != nil { + return err + } + logStream, errFunc, err := wr.TabletManagerClient().Backup(ctx, tabletInfo, *concurrency) + if err != nil { + return err + } + for e := range logStream { + wr.Logger().Infof("%v", e) + } + return errFunc() +} + func commandSnapshotSourceEnd(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { slaveStartRequired := subFlags.Bool("slave-start", false, "will restart replication") readWrite := subFlags.Bool("read-write", false, "will make the server read-write") diff --git a/test/backup.py b/test/backup.py new file mode 100755 index 0000000000..d68c57b0dc --- /dev/null +++ b/test/backup.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python + +import warnings +# Dropping a table inexplicably produces a warning despite +# the "IF EXISTS" clause. Squelch these warnings. +warnings.simplefilter("ignore") + +import gzip +import logging +import os +import shutil +from subprocess import call +import unittest + +import environment +import utils +import tablet + +use_mysqlctld = True + +tablet_master = tablet.Tablet(use_mysqlctld=use_mysqlctld) +tablet_replica1 = tablet.Tablet(use_mysqlctld=use_mysqlctld) + +def setUpModule(): + try: + environment.topo_server().setup() + + # start mysql instance external to the test + global setup_procs + setup_procs = [ + tablet_master.init_mysql(), + tablet_replica1.init_mysql(), + ] + if use_mysqlctld: + tablet_master.wait_for_mysqlctl_socket() + tablet_replica1.wait_for_mysqlctl_socket() + else: + utils.wait_procs(setup_procs) + except: + tearDownModule() + raise + +def tearDownModule(): + if utils.options.skip_teardown: + return + + if use_mysqlctld: + # Try to terminate mysqlctld gracefully, so it kills its mysqld. + for proc in setup_procs: + utils.kill_sub_process(proc, soft=True) + teardown_procs = setup_procs + else: + teardown_procs = [ + tablet_master.teardown_mysql(), + tablet_replica1.teardown_mysql(), + ] + utils.wait_procs(teardown_procs, raise_on_error=False) + + environment.topo_server().teardown() + utils.kill_sub_processes() + utils.remove_tmp_files() + + tablet_master.remove_tree() + tablet_replica1.remove_tree() + + +class TestBackup(unittest.TestCase): + def tearDown(self): + tablet.Tablet.check_vttablet_count() + environment.topo_server().wipe() + for t in [tablet_master, tablet_replica1]: + t.reset_replication() + t.clean_dbs() + + _create_vt_insert_test = '''create table vt_insert_test ( + id bigint auto_increment, + msg varchar(64), + primary key (id) + ) Engine=InnoDB''' + + _populate_vt_insert_test = [ + "insert into vt_insert_test (msg) values ('test %s')" % x + for x in xrange(4)] + + def test_backup(self): + for t in tablet_master, tablet_replica1: + t.create_db('vt_test_keyspace') + + tablet_master.init_tablet('master', 'test_keyspace', '0', start=True) + tablet_replica1.init_tablet('replica', 'test_keyspace', '0', start=True, + supports_backups=True) + utils.run_vtctl(['InitShardMaster', 'test_keyspace/0', + tablet_master.tablet_alias]) + + # insert data on master, wait for slave to get it + tablet_master.populate('vt_test_keyspace', self._create_vt_insert_test, + self._populate_vt_insert_test) + timeout = 10 + while True: + result = tablet_replica1.mquery('vt_test_keyspace', 'select count(*) from vt_insert_test') + if result[0][0] == 4: + break + timeout = utils.wait_step('slave tablet getting data', timeout) + + # backup the slave + utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True) + + for t in tablet_master, tablet_replica1: + t.kill_vttablet() + +if __name__ == '__main__': + utils.main() diff --git a/test/tablet.py b/test/tablet.py index 95beaa4663..a6eaebf2af 100644 --- a/test/tablet.py +++ b/test/tablet.py @@ -418,7 +418,8 @@ class Tablet(object): target_tablet_type=None, full_mycnf_args=False, extra_args=None, extra_env=None, include_mysql_port=True, init_tablet_type=None, init_keyspace=None, - init_shard=None, init_db_name_override=None): + init_shard=None, init_db_name_override=None, + supports_backups=False): """Starts a vttablet process, and returns it. The process is also saved in self.proc, so it's easy to kill as well. @@ -485,6 +486,11 @@ class Tablet(object): else: self.dbname = 'vt_' + init_keyspace + if supports_backups: + args.extend(['-backup_storage_implementation', 'file', + '-file_backup_storage_root', + os.path.join(environment.tmproot, 'backupstorage')]) + if extra_args: args.extend(extra_args) From f71c09363e8ffe46ab59137b7aabc7a1ca23ba94 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Mon, 18 May 2015 12:20:32 -0700 Subject: [PATCH 039/128] Refactoring backup interface. --- go/vt/mysqlctl/backup.go | 18 ++--- go/vt/mysqlctl/backupstorage/file.go | 86 ++++++++++++----------- go/vt/mysqlctl/backupstorage/file_test.go | 42 +++++++---- go/vt/mysqlctl/backupstorage/interface.go | 37 ++++++---- 4 files changed, 108 insertions(+), 75 deletions(-) diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index dd8c47b41f..4e312ec731 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -179,18 +179,18 @@ func (mysqld *Mysqld) Backup(logger logutil.Logger, bucket, name string, backupC return fmt.Errorf("StartBackup failed: %v", err) } - if err = mysqld.backup(logger, bs, bh, backupConcurrency, hookExtraEnv); err != nil { - if err := bs.AbortBackup(bh); err != nil { + if err = mysqld.backup(logger, bh, backupConcurrency, hookExtraEnv); err != nil { + if err := bh.AbortBackup(); err != nil { logger.Errorf("failed to abort backup: %v", err) } } else { - err = bs.EndBackup(bh) + err = bh.EndBackup() } return err } -func (mysqld *Mysqld) backup(logger logutil.Logger, bs backupstorage.BackupStorage, bh backupstorage.BackupHandle, backupConcurrency int, hookExtraEnv map[string]string) error { +func (mysqld *Mysqld) backup(logger logutil.Logger, bh backupstorage.BackupHandle, backupConcurrency int, hookExtraEnv map[string]string) error { // save initial state so we can restore slaveStartRequired := false @@ -240,7 +240,7 @@ func (mysqld *Mysqld) backup(logger logutil.Logger, bs backupstorage.BackupStora } replicationPosition = slaveStatus.Position } - logger.Infof("using replication position: %#v", replicationPosition) + logger.Infof("using replication position: %v", replicationPosition) // shutdown mysqld if err = mysqld.Shutdown(true, MysqlWaitTime); err != nil { @@ -255,7 +255,7 @@ func (mysqld *Mysqld) backup(logger logutil.Logger, bs backupstorage.BackupStora logger.Infof("found %v files to backup", len(fes)) // backup everything - if err := mysqld.backupFiles(logger, bs, bh, fes, replicationPosition, backupConcurrency); err != nil { + if err := mysqld.backupFiles(logger, bh, fes, replicationPosition, backupConcurrency); err != nil { return fmt.Errorf("cannot backup files: %v", err) } @@ -286,7 +286,7 @@ func (mysqld *Mysqld) backup(logger logutil.Logger, bs backupstorage.BackupStora return nil } -func (mysqld *Mysqld) backupFiles(logger logutil.Logger, bs backupstorage.BackupStorage, bh backupstorage.BackupHandle, fes []FileEntry, replicationPosition proto.ReplicationPosition, backupConcurrency int) error { +func (mysqld *Mysqld) backupFiles(logger logutil.Logger, bh backupstorage.BackupHandle, fes []FileEntry, replicationPosition proto.ReplicationPosition, backupConcurrency int) error { sema := sync2.NewSemaphore(backupConcurrency, 0) rec := concurrency.AllErrorRecorder{} @@ -314,7 +314,7 @@ func (mysqld *Mysqld) backupFiles(logger logutil.Logger, bs backupstorage.Backup // open the destination file for writing, and a buffer name := fmt.Sprintf("%v", i) - wc, err := bs.AddFile(bh, name) + wc, err := bh.AddFile(name) if err != nil { rec.RecordError(fmt.Errorf("cannot add file: %v", err)) return @@ -358,7 +358,7 @@ func (mysqld *Mysqld) backupFiles(logger logutil.Logger, bs backupstorage.Backup } // open the MANIFEST - wc, err := bs.AddFile(bh, backupManifest) + wc, err := bh.AddFile(backupManifest) if err != nil { return fmt.Errorf("cannot add %v to backup: %v", backupManifest, err) } diff --git a/go/vt/mysqlctl/backupstorage/file.go b/go/vt/mysqlctl/backupstorage/file.go index 1e467e4e69..7245f0e8df 100644 --- a/go/vt/mysqlctl/backupstorage/file.go +++ b/go/vt/mysqlctl/backupstorage/file.go @@ -22,8 +22,10 @@ var ( // FileBackupHandle implements BackupHandle for local file system. type FileBackupHandle struct { - bucket string - name string + fbs *FileBackupStorage + bucket string + name string + readOnly bool } // Bucket is part of the BackupHandle interface @@ -36,6 +38,40 @@ func (fbh *FileBackupHandle) Name() string { return fbh.name } +// AddFile is part of the BackupHandle interface +func (fbh *FileBackupHandle) AddFile(filename string) (io.WriteCloser, error) { + if fbh.readOnly { + return nil, fmt.Errorf("AddFile cannot be called on read-only backup") + } + p := path.Join(fbh.fbs.root, fbh.bucket, fbh.name, filename) + return os.Create(p) +} + +// EndBackup is part of the BackupHandle interface +func (fbh *FileBackupHandle) EndBackup() error { + if fbh.readOnly { + return fmt.Errorf("EndBackup cannot be called on read-only backup") + } + return nil +} + +// AbortBackup is part of the BackupHandle interface +func (fbh *FileBackupHandle) AbortBackup() error { + if fbh.readOnly { + return fmt.Errorf("AbortBackup cannot be called on read-only backup") + } + return fbh.fbs.RemoveBackup(fbh.bucket, fbh.name) +} + +// ReadFile is part of the BackupHandle interface +func (fbh *FileBackupHandle) ReadFile(filename string) (io.ReadCloser, error) { + if !fbh.readOnly { + return nil, fmt.Errorf("ReadFile cannot be called on read-write backup") + } + p := path.Join(fbh.fbs.root, fbh.bucket, fbh.name, filename) + return os.Open(p) +} + // FileBackupStorage implements BackupStorage for local file system. type FileBackupStorage struct { root string @@ -62,8 +98,10 @@ func (fbs *FileBackupStorage) ListBackups(bucket string) ([]BackupHandle, error) continue } result = append(result, &FileBackupHandle{ - bucket: bucket, - name: info.Name(), + fbs: fbs, + bucket: bucket, + name: info.Name(), + readOnly: true, }) } return result, nil @@ -84,45 +122,13 @@ func (fbs *FileBackupStorage) StartBackup(bucket, name string) (BackupHandle, er } return &FileBackupHandle{ - bucket: bucket, - name: name, + fbs: fbs, + bucket: bucket, + name: name, + readOnly: false, }, nil } -// AddFile is part of the BackupStorage interface -func (fbs *FileBackupStorage) AddFile(handle BackupHandle, filename string) (io.WriteCloser, error) { - fbh, ok := handle.(*FileBackupHandle) - if !ok { - return nil, fmt.Errorf("FileBackupStorage only accepts FileBackupHandle") - } - p := path.Join(fbs.root, fbh.bucket, fbh.name, filename) - return os.Create(p) -} - -// EndBackup is part of the BackupStorage interface -func (fbs *FileBackupStorage) EndBackup(handle BackupHandle) error { - return nil -} - -// AbortBackup is part of the BackupStorage interface -func (fbs *FileBackupStorage) AbortBackup(handle BackupHandle) error { - fbh, ok := handle.(*FileBackupHandle) - if !ok { - return fmt.Errorf("FileBackupStorage only accepts FileBackupHandle") - } - return fbs.RemoveBackup(fbh.bucket, fbh.name) -} - -// ReadFile is part of the BackupStorage interface -func (fbs *FileBackupStorage) ReadFile(handle BackupHandle, filename string) (io.ReadCloser, error) { - fbh, ok := handle.(*FileBackupHandle) - if !ok { - return nil, fmt.Errorf("FileBackupStorage only accepts FileBackupHandle") - } - p := path.Join(fbs.root, fbh.bucket, fbh.name, filename) - return os.Open(p) -} - // RemoveBackup is part of the BackupStorage interface func (fbs *FileBackupStorage) RemoveBackup(bucket, name string) error { p := path.Join(fbs.root, bucket, name) diff --git a/go/vt/mysqlctl/backupstorage/file_test.go b/go/vt/mysqlctl/backupstorage/file_test.go index 4004ee0ec3..1d0d58279c 100644 --- a/go/vt/mysqlctl/backupstorage/file_test.go +++ b/go/vt/mysqlctl/backupstorage/file_test.go @@ -55,8 +55,8 @@ func TestListBackups(t *testing.T) { if err != nil { t.Fatalf("fbs.StartBackup failed: %v", err) } - if err := fbs.EndBackup(bh); err != nil { - t.Fatalf("fbs.EndBackup failed: %v", err) + if err := bh.EndBackup(); err != nil { + t.Fatalf("bh.EndBackup failed: %v", err) } // verify we have one entry now @@ -76,8 +76,8 @@ func TestListBackups(t *testing.T) { if err != nil { t.Fatalf("fbs.StartBackup failed: %v", err) } - if err := fbs.EndBackup(bh); err != nil { - t.Fatalf("fbs.EndBackup failed: %v", err) + if err := bh.EndBackup(); err != nil { + t.Fatalf("bh.EndBackup failed: %v", err) } // verify we have two sorted entries now @@ -112,8 +112,8 @@ func TestListBackups(t *testing.T) { if err != nil { t.Fatalf("fbs.StartBackup failed: %v", err) } - if err := fbs.AbortBackup(bh); err != nil { - t.Fatalf("fbs.AbortBackup failed: %v", err) + if err := bh.AbortBackup(); err != nil { + t.Fatalf("bh.AbortBackup failed: %v", err) } bhs, err = fbs.ListBackups(bucket) if err != nil { @@ -124,6 +124,17 @@ func TestListBackups(t *testing.T) { bhs[0].Name() != firstBackup { t.Fatalf("ListBackups after abort returned wrong results: %#v", bhs) } + + // check we cannot chaneg a backup we listed + if _, err := bhs[0].AddFile("test"); err == nil { + t.Fatalf("was able to AddFile to read-only backup") + } + if err := bhs[0].EndBackup(); err == nil { + t.Fatalf("was able to EndBackup a read-only backup") + } + if err := bhs[0].AbortBackup(); err == nil { + t.Fatalf("was able to AbortBackup a read-only backup") + } } func TestFileContents(t *testing.T) { @@ -140,9 +151,9 @@ func TestFileContents(t *testing.T) { if err != nil { t.Fatalf("fbs.StartBackup failed: %v", err) } - wc, err := fbs.AddFile(bh, filename1) + wc, err := bh.AddFile(filename1) if err != nil { - t.Fatalf("fbs.AddFile failed: %v", err) + t.Fatalf("bh.AddFile failed: %v", err) } if _, err := wc.Write([]byte(contents1)); err != nil { t.Fatalf("wc.Write failed: %v", err) @@ -150,8 +161,15 @@ func TestFileContents(t *testing.T) { if err := wc.Close(); err != nil { t.Fatalf("wc.Close failed: %v", err) } - if err := fbs.EndBackup(bh); err != nil { - t.Fatalf("fbs.EndBackup failed: %v", err) + + // test we can't read back on read-write backup + if _, err := bh.ReadFile(filename1); err == nil { + t.Fatalf("was able to ReadFile to read-write backup") + } + + // and close + if err := bh.EndBackup(); err != nil { + t.Fatalf("bh.EndBackup failed: %v", err) } // re-read the file @@ -159,9 +177,9 @@ func TestFileContents(t *testing.T) { if err != nil || len(bhs) != 1 { t.Fatalf("ListBackups after abort returned wrong return: %v %v", err, bhs) } - rc, err := fbs.ReadFile(bhs[0], filename1) + rc, err := bhs[0].ReadFile(filename1) if err != nil { - t.Fatalf("fbs.ReadFile failed: %v", err) + t.Fatalf("bhs[0].ReadFile failed: %v", err) } buf := make([]byte, len(contents1)+10) if n, err := rc.Read(buf); (err != nil && err != io.EOF) || n != len(contents1) { diff --git a/go/vt/mysqlctl/backupstorage/interface.go b/go/vt/mysqlctl/backupstorage/interface.go index 59f6bb11f4..e28dc8d842 100644 --- a/go/vt/mysqlctl/backupstorage/interface.go +++ b/go/vt/mysqlctl/backupstorage/interface.go @@ -25,34 +25,43 @@ type BackupHandle interface { // Name is the individual name of the backup. Will contain // tabletAlias-timestamp. Name() string -} - -// BackupStorage is the interface to the storage system -type BackupStorage interface { - // ListBackups returns all the backups in a bucket. - ListBackups(bucket string) ([]BackupHandle, error) - - // StartBackup creates a new backup with the given name. - // If a backup with the same name already exists, it's an error. - StartBackup(bucket, name string) (BackupHandle, error) // AddFile opens a new file to be added to the backup. + // Only works for read-write backups (created by StartBackup). // filename is guaranteed to only contain alphanumerical // characters and hyphens. // It should be thread safe, it is possible to call AddFile in // multiple go routines once a backup has been started. - AddFile(handle BackupHandle, filename string) (io.WriteCloser, error) + AddFile(filename string) (io.WriteCloser, error) // EndBackup stops and closes a backup. The contents should be kept. - EndBackup(handle BackupHandle) error + // Only works for read-write backups (created by StartBackup). + EndBackup() error // AbortBackup stops a backup, and removes the contents that // have been copied already. It is called if an error occurs // while the backup is being taken, and the backup cannot be finished. - AbortBackup(handle BackupHandle) error + // Only works for read-write backups (created by StartBackup). + AbortBackup() error // ReadFile starts reading a file from a backup. - ReadFile(handle BackupHandle, filename string) (io.ReadCloser, error) + // Only works for read-only backups (created by ListBackups). + ReadFile(filename string) (io.ReadCloser, error) +} + +// BackupStorage is the interface to the storage system +type BackupStorage interface { + // ListBackups returns all the backups in a bucket. The + // returned backups are read-only (ReadFile can be called, but + // AddFile/EndBackup/AbortBackup cannot) + ListBackups(bucket string) ([]BackupHandle, error) + + // StartBackup creates a new backup with the given name. If a + // backup with the same name already exists, it's an error. + // The returned backup is read-write + // (AddFile/EndBackup/AbortBackup cann all be called, not + // ReadFile) + StartBackup(bucket, name string) (BackupHandle, error) // RemoveBackup removes all the data associated with a backup. // It will not appear in ListBackups after RemoveBackup succeeds. From ded4a347c392280c936a938417958089d84b2425 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Mon, 18 May 2015 16:22:26 -0700 Subject: [PATCH 040/128] Refactoring Worker to have an explicit context.Context. --- go/cmd/vtworker/command.go | 5 +- go/cmd/vtworker/status.go | 38 ++++--- go/cmd/vtworker/vtworker.go | 37 +++++-- go/vt/worker/clone_utils.go | 2 +- go/vt/worker/split_clone.go | 111 ++++++++------------ go/vt/worker/split_clone_test.go | 2 +- go/vt/worker/split_diff.go | 103 ++++++++----------- go/vt/worker/split_diff_test.go | 2 +- go/vt/worker/sqldiffer.go | 112 +++++++++------------ go/vt/worker/sqldiffer_test.go | 2 +- go/vt/worker/vertical_split_clone.go | 117 +++++++++------------- go/vt/worker/vertical_split_clone_test.go | 2 +- go/vt/worker/vertical_split_diff.go | 89 +++++++--------- go/vt/worker/vertical_split_diff_test.go | 2 +- go/vt/worker/worker.go | 20 ++-- 15 files changed, 276 insertions(+), 368 deletions(-) diff --git a/go/cmd/vtworker/command.go b/go/cmd/vtworker/command.go index 34790efc29..ee1cf5a2eb 100644 --- a/go/cmd/vtworker/command.go +++ b/go/cmd/vtworker/command.go @@ -112,7 +112,10 @@ func runCommand(args []string) error { case <-done: log.Infof("Command is done:") log.Info(wrk.StatusAsText()) - if wrk.Error() != nil { + currentWorkerMutex.Lock() + err := lastRunError + currentWorkerMutex.Unlock() + if err != nil { os.Exit(1) } os.Exit(0) diff --git a/go/cmd/vtworker/status.go b/go/cmd/vtworker/status.go index 6a43f85e3f..accb92d3bf 100644 --- a/go/cmd/vtworker/status.go +++ b/go/cmd/vtworker/status.go @@ -68,16 +68,14 @@ func initStatusHandling() { currentWorkerMutex.Lock() wrk := currentWorker logger := currentMemoryLogger - done := currentDone + ctx := currentContext currentWorkerMutex.Unlock() data := make(map[string]interface{}) if wrk != nil { data["Status"] = wrk.StatusAsHTML() - select { - case <-done: + if ctx == nil { data["Done"] = true - default: } if logger != nil { data["Logs"] = template.HTML(strings.Replace(logger.String(), "\n", "
\n", -1)) @@ -99,29 +97,27 @@ func initStatusHandling() { acl.SendError(w, err) return } + currentWorkerMutex.Lock() - wrk := currentWorker - done := currentDone - currentWorkerMutex.Unlock() // no worker, we go to the menu - if wrk == nil { + if currentWorker == nil { + currentWorkerMutex.Unlock() http.Redirect(w, r, "/", http.StatusTemporaryRedirect) return } // check the worker is really done - select { - case <-done: - currentWorkerMutex.Lock() + if currentContext == nil { currentWorker = nil currentMemoryLogger = nil - currentDone = nil currentWorkerMutex.Unlock() http.Redirect(w, r, "/", http.StatusTemporaryRedirect) - default: - httpError(w, "worker still executing", nil) + return } + + currentWorkerMutex.Unlock() + httpError(w, "worker still executing", nil) }) // cancel handler @@ -130,18 +126,20 @@ func initStatusHandling() { acl.SendError(w, err) return } - currentWorkerMutex.Lock() - wrk := currentWorker - currentWorkerMutex.Unlock() - // no worker, we go to the menu - if wrk == nil { + currentWorkerMutex.Lock() + + // no worker, or not running, we go to the menu + if currentWorker == nil || currentCancelFunc == nil { + currentWorkerMutex.Unlock() http.Redirect(w, r, "/", http.StatusTemporaryRedirect) return } // otherwise, cancel the running worker and go back to the status page - wrk.Cancel() + cancel := currentCancelFunc + currentWorkerMutex.Unlock() + cancel() http.Redirect(w, r, servenv.StatusURLPath(), http.StatusTemporaryRedirect) }) diff --git a/go/cmd/vtworker/vtworker.go b/go/cmd/vtworker/vtworker.go index 781f6b7b70..6dea5befe6 100644 --- a/go/cmd/vtworker/vtworker.go +++ b/go/cmd/vtworker/vtworker.go @@ -29,6 +29,7 @@ import ( "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/worker" "github.com/youtube/vitess/go/vt/wrangler" + "golang.org/x/net/context" ) var ( @@ -44,10 +45,20 @@ var ( wr *wrangler.Wrangler // mutex is protecting all the following variables + // 3 states here: + // - no job ever ran (or reset was run): currentWorker is nil, + // currentContext/currentCancelFunc is nil, lastRunError is nil + // - one worker running: currentWorker is set, + // currentContext/currentCancelFunc is set, lastRunError is nil + // - (at least) one worker already ran, none is running atm: + // currentWorker is set, currentContext is nil, lastRunError + // has the error returned by the worker. currentWorkerMutex sync.Mutex currentWorker worker.Worker currentMemoryLogger *logutil.MemoryLogger - currentDone chan struct{} + currentContext context.Context + currentCancelFunc context.CancelFunc + lastRunError error ) // signal handling, centralized here @@ -59,7 +70,9 @@ func installSignalHandlers() { // we got a signal, notify our modules currentWorkerMutex.Lock() defer currentWorkerMutex.Unlock() - currentWorker.Cancel() + if currentCancelFunc != nil { + currentCancelFunc() + } }() } @@ -75,17 +88,27 @@ func setAndStartWorker(wrk worker.Worker) (chan struct{}, error) { currentWorker = wrk currentMemoryLogger = logutil.NewMemoryLogger() - currentDone = make(chan struct{}) + currentContext, currentCancelFunc = context.WithCancel(context.Background()) + lastRunError = nil + done := make(chan struct{}) wr.SetLogger(logutil.NewTeeLogger(currentMemoryLogger, logutil.NewConsoleLogger())) - // one go function runs the worker, closes 'done' when done + // one go function runs the worker, changes state when done go func() { + // run will take a long time log.Infof("Starting worker...") - wrk.Run() - close(currentDone) + err := wrk.Run(currentContext) + + // it's done, let's save our state + currentWorkerMutex.Lock() + currentContext = nil + currentCancelFunc = nil + lastRunError = err + currentWorkerMutex.Unlock() + close(done) }() - return currentDone, nil + return done, nil } func main() { diff --git a/go/vt/worker/clone_utils.go b/go/vt/worker/clone_utils.go index 19e5379b9c..6f23f55e60 100644 --- a/go/vt/worker/clone_utils.go +++ b/go/vt/worker/clone_utils.go @@ -212,7 +212,7 @@ func executeFetchWithRetries(ctx context.Context, wr *wrangler.Wrangler, ti *top return ti, fmt.Errorf("interrupted while trying to run %v on tablet %v", command, ti) case <-t.C: // Re-resolve and retry 30s after the failure - err = r.ResolveDestinationMasters() + err = r.ResolveDestinationMasters(ctx) if err != nil { return ti, fmt.Errorf("unable to re-resolve masters for ExecuteFetch, due to: %v", err) } diff --git a/go/vt/worker/split_clone.go b/go/vt/worker/split_clone.go index b2904f5394..b6d8a7f988 100644 --- a/go/vt/worker/split_clone.go +++ b/go/vt/worker/split_clone.go @@ -51,8 +51,6 @@ type SplitCloneWorker struct { minTableSizeForSplit uint64 destinationWriterCount int cleaner *wrangler.Cleaner - ctx context.Context - ctxCancel context.CancelFunc // all subsequent fields are protected by the mutex mu sync.Mutex @@ -94,7 +92,6 @@ func NewSplitCloneWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, ex if err != nil { return nil, err } - ctx, cancel := context.WithCancel(context.Background()) return &SplitCloneWorker{ wr: wr, cell: cell, @@ -107,8 +104,6 @@ func NewSplitCloneWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, ex minTableSizeForSplit: minTableSizeForSplit, destinationWriterCount: destinationWriterCount, cleaner: &wrangler.Cleaner{}, - ctx: ctx, - ctxCancel: cancel, state: stateSCNotSarted, ev: &events.SplitClone{ @@ -195,28 +190,21 @@ func (scw *SplitCloneWorker) StatusAsText() string { return result } -// Cancel is part of the Worker interface -func (scw *SplitCloneWorker) Cancel() { - scw.ctxCancel() -} - -func (scw *SplitCloneWorker) checkInterrupted() bool { +func (scw *SplitCloneWorker) checkInterrupted(ctx context.Context) error { select { - case <-scw.ctx.Done(): - if scw.ctx.Err() == context.DeadlineExceeded { - return false - } - scw.recordError(topo.ErrInterrupted) - return true + case <-ctx.Done(): + err := ctx.Err() + scw.recordError(err) + return err default: } - return false + return nil } // Run implements the Worker interface -func (scw *SplitCloneWorker) Run() { +func (scw *SplitCloneWorker) Run(ctx context.Context) error { resetVars() - err := scw.run() + err := scw.run(ctx) scw.setState(stateSCCleanUp) cerr := scw.cleaner.CleanUp(scw.wr) @@ -229,47 +217,33 @@ func (scw *SplitCloneWorker) Run() { } if err != nil { scw.recordError(err) - return + return err } scw.setState(stateSCDone) + return nil } -func (scw *SplitCloneWorker) Error() error { - return scw.err -} - -func (scw *SplitCloneWorker) run() error { +func (scw *SplitCloneWorker) run(ctx context.Context) error { // first state: read what we need to do if err := scw.init(); err != nil { return fmt.Errorf("init() failed: %v", err) } - if scw.checkInterrupted() { - return topo.ErrInterrupted + if err := scw.checkInterrupted(ctx); err != nil { + return err } // second state: find targets - if err := scw.findTargets(); err != nil { - // A canceled context can appear to cause an application error - if scw.checkInterrupted() { - return topo.ErrInterrupted - } + if err := scw.findTargets(ctx); err != nil { return fmt.Errorf("findTargets() failed: %v", err) } - if scw.checkInterrupted() { - return topo.ErrInterrupted + if err := scw.checkInterrupted(ctx); err != nil { + return err } // third state: copy data - if err := scw.copy(); err != nil { - // A canceled context can appear to cause an application error - if scw.checkInterrupted() { - return topo.ErrInterrupted - } + if err := scw.copy(ctx); err != nil { return fmt.Errorf("copy() failed: %v", err) } - if scw.checkInterrupted() { - return topo.ErrInterrupted - } return nil } @@ -331,14 +305,14 @@ func (scw *SplitCloneWorker) init() error { // - find one rdonly in the source shard // - mark it as 'checker' pointing back to us // - get the aliases of all the targets -func (scw *SplitCloneWorker) findTargets() error { +func (scw *SplitCloneWorker) findTargets(ctx context.Context) error { scw.setState(stateSCFindTargets) var err error // find an appropriate endpoint in the source shards scw.sourceAliases = make([]topo.TabletAlias, len(scw.sourceShards)) for i, si := range scw.sourceShards { - scw.sourceAliases[i], err = findChecker(scw.ctx, scw.wr, scw.cleaner, scw.cell, si.Keyspace(), si.ShardName()) + scw.sourceAliases[i], err = findChecker(ctx, scw.wr, scw.cleaner, scw.cell, si.Keyspace(), si.ShardName()) if err != nil { return fmt.Errorf("cannot find checker for %v/%v/%v: %v", scw.cell, si.Keyspace(), si.ShardName(), err) } @@ -353,8 +327,8 @@ func (scw *SplitCloneWorker) findTargets() error { return fmt.Errorf("cannot read tablet %v: %v", alias, err) } - ctx, cancel := context.WithTimeout(scw.ctx, 60*time.Second) - err := scw.wr.TabletManagerClient().StopSlave(ctx, scw.sourceTablets[i]) + shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + err := scw.wr.TabletManagerClient().StopSlave(shortCtx, scw.sourceTablets[i]) cancel() if err != nil { return fmt.Errorf("cannot stop replication on tablet %v", alias) @@ -368,13 +342,13 @@ func (scw *SplitCloneWorker) findTargets() error { action.TabletType = topo.TYPE_SPARE } - return scw.ResolveDestinationMasters() + return scw.ResolveDestinationMasters(ctx) } // ResolveDestinationMasters implements the Resolver interface. // It will attempt to resolve all shards and update scw.destinationShardsToTablets; // if it is unable to do so, it will not modify scw.destinationShardsToTablets at all. -func (scw *SplitCloneWorker) ResolveDestinationMasters() error { +func (scw *SplitCloneWorker) ResolveDestinationMasters(ctx context.Context) error { statsDestinationAttemptedResolves.Add(1) destinationShardsToTablets := make(map[string]*topo.TabletInfo) @@ -389,7 +363,7 @@ func (scw *SplitCloneWorker) ResolveDestinationMasters() error { } for _, si := range scw.destinationShards { - ti, err := resolveDestinationShardMaster(scw.ctx, si.Keyspace(), si.ShardName(), scw.wr) + ti, err := resolveDestinationShardMaster(ctx, si.Keyspace(), si.ShardName(), scw.wr) if err != nil { return err } @@ -415,12 +389,12 @@ func (scw *SplitCloneWorker) GetDestinationMaster(shardName string) (*topo.Table // Find all tablets on all destination shards. This should be done immediately before reloading // the schema on these tablets, to minimize the chances of the topo changing in between. -func (scw *SplitCloneWorker) findReloadTargets() error { +func (scw *SplitCloneWorker) findReloadTargets(ctx context.Context) error { scw.reloadAliases = make([][]topo.TabletAlias, len(scw.destinationShards)) scw.reloadTablets = make([]map[topo.TabletAlias]*topo.TabletInfo, len(scw.destinationShards)) for shardIndex, si := range scw.destinationShards { - reloadAliases, reloadTablets, err := resolveReloadTabletsForShard(scw.ctx, si.Keyspace(), si.ShardName(), scw.wr) + reloadAliases, reloadTablets, err := resolveReloadTabletsForShard(ctx, si.Keyspace(), si.ShardName(), scw.wr) if err != nil { return err } @@ -434,7 +408,7 @@ func (scw *SplitCloneWorker) findReloadTargets() error { // - copy the data from source tablets to destination masters (wtih replication on) // Assumes that the schema has already been created on each destination tablet // (probably from vtctl's CopySchemaShard) -func (scw *SplitCloneWorker) copy() error { +func (scw *SplitCloneWorker) copy(ctx context.Context) error { scw.setState(stateSCCopy) // get source schema from the first shard @@ -442,8 +416,8 @@ func (scw *SplitCloneWorker) copy() error { // on all source shards. Furthermore, we estimate the number of rows // in each source shard for each table to be about the same // (rowCount is used to estimate an ETA) - ctx, cancel := context.WithTimeout(scw.ctx, 60*time.Second) - sourceSchemaDefinition, err := scw.wr.GetSchema(ctx, scw.sourceAliases[0], nil, scw.excludeTables, true) + shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + sourceSchemaDefinition, err := scw.wr.GetSchema(shortCtx, scw.sourceAliases[0], nil, scw.excludeTables, true) cancel() if err != nil { return fmt.Errorf("cannot get schema from source %v: %v", scw.sourceAliases[0], err) @@ -498,8 +472,7 @@ func (scw *SplitCloneWorker) copy() error { processError := func(format string, args ...interface{}) { scw.wr.Logger().Errorf(format, args...) mu.Lock() - if !scw.checkInterrupted() { - scw.Cancel() + if firstError == nil { firstError = fmt.Errorf(format, args...) } mu.Unlock() @@ -520,7 +493,7 @@ func (scw *SplitCloneWorker) copy() error { destinationWaitGroup.Add(1) go func() { defer destinationWaitGroup.Done() - if err := executeFetchLoop(scw.ctx, scw.wr, scw, shardName, insertChannel); err != nil { + if err := executeFetchLoop(ctx, scw.wr, scw, shardName, insertChannel); err != nil { processError("executeFetchLoop failed: %v", err) } }() @@ -540,7 +513,7 @@ func (scw *SplitCloneWorker) copy() error { rowSplitter := NewRowSplitter(scw.destinationShards, scw.keyspaceInfo.ShardingColumnType, columnIndexes[tableIndex]) - chunks, err := findChunks(scw.ctx, scw.wr, scw.sourceTablets[shardIndex], td, scw.minTableSizeForSplit, scw.sourceReaderCount) + chunks, err := findChunks(ctx, scw.wr, scw.sourceTablets[shardIndex], td, scw.minTableSizeForSplit, scw.sourceReaderCount) if err != nil { return err } @@ -558,7 +531,7 @@ func (scw *SplitCloneWorker) copy() error { // build the query, and start the streaming selectSQL := buildSQLFromChunks(scw.wr, td, chunks, chunkIndex, scw.sourceAliases[shardIndex].String()) - qrr, err := NewQueryResultReaderForTablet(scw.ctx, scw.wr.TopoServer(), scw.sourceAliases[shardIndex], selectSQL) + qrr, err := NewQueryResultReaderForTablet(ctx, scw.wr.TopoServer(), scw.sourceAliases[shardIndex], selectSQL) if err != nil { processError("NewQueryResultReaderForTablet failed: %v", err) return @@ -566,7 +539,7 @@ func (scw *SplitCloneWorker) copy() error { defer qrr.Close() // process the data - if err := scw.processData(td, tableIndex, qrr, rowSplitter, insertChannels, scw.destinationPackCount, scw.ctx.Done()); err != nil { + if err := scw.processData(td, tableIndex, qrr, rowSplitter, insertChannels, scw.destinationPackCount, ctx.Done()); err != nil { processError("processData failed: %v", err) } scw.tableStatus[tableIndex].threadDone() @@ -595,8 +568,8 @@ func (scw *SplitCloneWorker) copy() error { // get the current position from the sources for shardIndex := range scw.sourceShards { - ctx, cancel := context.WithTimeout(scw.ctx, 60*time.Second) - status, err := scw.wr.TabletManagerClient().SlaveStatus(ctx, scw.sourceTablets[shardIndex]) + shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + status, err := scw.wr.TabletManagerClient().SlaveStatus(shortCtx, scw.sourceTablets[shardIndex]) cancel() if err != nil { return err @@ -610,7 +583,7 @@ func (scw *SplitCloneWorker) copy() error { go func(shardName string) { defer destinationWaitGroup.Done() scw.wr.Logger().Infof("Making and populating blp_checkpoint table") - if err := runSqlCommands(scw.ctx, scw.wr, scw, shardName, queries); err != nil { + if err := runSqlCommands(ctx, scw.wr, scw, shardName, queries); err != nil { processError("blp_checkpoint queries failed: %v", err) } }(si.ShardName()) @@ -630,8 +603,8 @@ func (scw *SplitCloneWorker) copy() error { } else { for _, si := range scw.destinationShards { scw.wr.Logger().Infof("Setting SourceShard on shard %v/%v", si.Keyspace(), si.ShardName()) - ctx, cancel := context.WithTimeout(scw.ctx, 60*time.Second) - err := scw.wr.SetSourceShards(ctx, si.Keyspace(), si.ShardName(), scw.sourceAliases, nil) + shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + err := scw.wr.SetSourceShards(shortCtx, si.Keyspace(), si.ShardName(), scw.sourceAliases, nil) cancel() if err != nil { return fmt.Errorf("failed to set source shards: %v", err) @@ -639,7 +612,7 @@ func (scw *SplitCloneWorker) copy() error { } } - err = scw.findReloadTargets() + err = scw.findReloadTargets(ctx) if err != nil { return fmt.Errorf("failed before reloading schema on destination tablets: %v", err) } @@ -652,8 +625,8 @@ func (scw *SplitCloneWorker) copy() error { go func(ti *topo.TabletInfo) { defer destinationWaitGroup.Done() scw.wr.Logger().Infof("Reloading schema on tablet %v", ti.Alias) - ctx, cancel := context.WithTimeout(scw.ctx, 60*time.Second) - err := scw.wr.TabletManagerClient().ReloadSchema(ctx, ti) + shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + err := scw.wr.TabletManagerClient().ReloadSchema(shortCtx, ti) cancel() if err != nil { processError("ReloadSchema failed on tablet %v: %v", ti.Alias, err) diff --git a/go/vt/worker/split_clone_test.go b/go/vt/worker/split_clone_test.go index 3a0f7f0c93..37cde4ea15 100644 --- a/go/vt/worker/split_clone_test.go +++ b/go/vt/worker/split_clone_test.go @@ -331,7 +331,7 @@ func testSplitClone(t *testing.T, strategy string) { // Only wait 1 ms between retries, so that the test passes faster executeFetchRetryTime = (1 * time.Millisecond) - wrk.Run() + wrk.Run(ctx) status := wrk.StatusAsText() t.Logf("Got status: %v", status) if wrk.err != nil || wrk.state != stateSCDone { diff --git a/go/vt/worker/split_diff.go b/go/vt/worker/split_diff.go index 99ec4a6057..8d91a409d2 100644 --- a/go/vt/worker/split_diff.go +++ b/go/vt/worker/split_diff.go @@ -43,8 +43,6 @@ type SplitDiffWorker struct { shard string excludeTables []string cleaner *wrangler.Cleaner - ctx context.Context - ctxCancel context.CancelFunc // all subsequent fields are protected by the mutex mu sync.Mutex @@ -68,7 +66,6 @@ type SplitDiffWorker struct { // NewSplitDiffWorker returns a new SplitDiffWorker object. func NewSplitDiffWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, excludeTables []string) Worker { - ctx, cancel := context.WithCancel(context.Background()) return &SplitDiffWorker{ wr: wr, cell: cell, @@ -76,8 +73,6 @@ func NewSplitDiffWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, exc shard: shard, excludeTables: excludeTables, cleaner: &wrangler.Cleaner{}, - ctx: ctx, - ctxCancel: cancel, state: stateSDNotSarted, } @@ -133,28 +128,21 @@ func (sdw *SplitDiffWorker) StatusAsText() string { return result } -// Cancel is part of the Worker interface -func (sdw *SplitDiffWorker) Cancel() { - sdw.ctxCancel() -} - -func (sdw *SplitDiffWorker) checkInterrupted() bool { +func (sdw *SplitDiffWorker) checkInterrupted(ctx context.Context) error { select { - case <-sdw.ctx.Done(): - if sdw.ctx.Err() == context.DeadlineExceeded { - return false - } - sdw.recordError(topo.ErrInterrupted) - return true + case <-ctx.Done(): + err := ctx.Err() + sdw.recordError(err) + return err default: } - return false + return nil } // Run is mostly a wrapper to run the cleanup at the end. -func (sdw *SplitDiffWorker) Run() { +func (sdw *SplitDiffWorker) Run(ctx context.Context) error { resetVars() - err := sdw.run() + err := sdw.run(ctx) sdw.setState(stateSDCleanUp) cerr := sdw.cleaner.CleanUp(sdw.wr) @@ -167,48 +155,39 @@ func (sdw *SplitDiffWorker) Run() { } if err != nil { sdw.recordError(err) - return + return err } sdw.setState(stateSDDone) + return nil } -func (sdw *SplitDiffWorker) Error() error { - return sdw.err -} - -func (sdw *SplitDiffWorker) run() error { +func (sdw *SplitDiffWorker) run(ctx context.Context) error { // first state: read what we need to do if err := sdw.init(); err != nil { return fmt.Errorf("init() failed: %v", err) } - if sdw.checkInterrupted() { - return topo.ErrInterrupted + if err := sdw.checkInterrupted(ctx); err != nil { + return err } // second state: find targets - if err := sdw.findTargets(); err != nil { + if err := sdw.findTargets(ctx); err != nil { return fmt.Errorf("findTargets() failed: %v", err) } - if sdw.checkInterrupted() { - return topo.ErrInterrupted + if err := sdw.checkInterrupted(ctx); err != nil { + return err } // third phase: synchronize replication - if err := sdw.synchronizeReplication(); err != nil { - if sdw.checkInterrupted() { - return topo.ErrInterrupted - } + if err := sdw.synchronizeReplication(ctx); err != nil { return fmt.Errorf("synchronizeReplication() failed: %v", err) } - if sdw.checkInterrupted() { - return topo.ErrInterrupted + if err := sdw.checkInterrupted(ctx); err != nil { + return err } // fourth phase: diff - if err := sdw.diff(); err != nil { - if sdw.checkInterrupted() { - return topo.ErrInterrupted - } + if err := sdw.diff(ctx); err != nil { return fmt.Errorf("diff() failed: %v", err) } @@ -244,12 +223,12 @@ func (sdw *SplitDiffWorker) init() error { // - find one rdonly per source shard // - find one rdonly in destination shard // - mark them all as 'checker' pointing back to us -func (sdw *SplitDiffWorker) findTargets() error { +func (sdw *SplitDiffWorker) findTargets(ctx context.Context) error { sdw.setState(stateSDFindTargets) // find an appropriate endpoint in destination shard var err error - sdw.destinationAlias, err = findChecker(sdw.ctx, sdw.wr, sdw.cleaner, sdw.cell, sdw.keyspace, sdw.shard) + sdw.destinationAlias, err = findChecker(ctx, sdw.wr, sdw.cleaner, sdw.cell, sdw.keyspace, sdw.shard) if err != nil { return fmt.Errorf("cannot find checker for %v/%v/%v: %v", sdw.cell, sdw.keyspace, sdw.shard, err) } @@ -257,7 +236,7 @@ func (sdw *SplitDiffWorker) findTargets() error { // find an appropriate endpoint in the source shards sdw.sourceAliases = make([]topo.TabletAlias, len(sdw.shardInfo.SourceShards)) for i, ss := range sdw.shardInfo.SourceShards { - sdw.sourceAliases[i], err = findChecker(sdw.ctx, sdw.wr, sdw.cleaner, sdw.cell, sdw.keyspace, ss.Shard) + sdw.sourceAliases[i], err = findChecker(ctx, sdw.wr, sdw.cleaner, sdw.cell, sdw.keyspace, ss.Shard) if err != nil { return fmt.Errorf("cannot find checker for %v/%v/%v: %v", sdw.cell, sdw.keyspace, ss.Shard, err) } @@ -284,7 +263,7 @@ func (sdw *SplitDiffWorker) findTargets() error { // (remove the cleanup task that does the same) // At this point, all checker instances are stopped at the same point. -func (sdw *SplitDiffWorker) synchronizeReplication() error { +func (sdw *SplitDiffWorker) synchronizeReplication(ctx context.Context) error { sdw.setState(stateSDSynchronizeReplication) masterInfo, err := sdw.wr.TopoServer().GetTablet(sdw.shardInfo.MasterAlias) @@ -294,8 +273,8 @@ func (sdw *SplitDiffWorker) synchronizeReplication() error { // 1 - stop the master binlog replication, get its current position sdw.wr.Logger().Infof("Stopping master binlog replication on %v", sdw.shardInfo.MasterAlias) - ctx, cancel := context.WithTimeout(sdw.ctx, 60*time.Second) - blpPositionList, err := sdw.wr.TabletManagerClient().StopBlp(ctx, masterInfo) + shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + blpPositionList, err := sdw.wr.TabletManagerClient().StopBlp(shortCtx, masterInfo) cancel() if err != nil { return fmt.Errorf("StopBlp for %v failed: %v", sdw.shardInfo.MasterAlias, err) @@ -322,8 +301,8 @@ func (sdw *SplitDiffWorker) synchronizeReplication() error { // stop replication sdw.wr.Logger().Infof("Stopping slave[%v] %v at a minimum of %v", i, sdw.sourceAliases[i], blpPos.Position) - ctx, cancel := context.WithTimeout(sdw.ctx, 60*time.Second) - stoppedAt, err := sdw.wr.TabletManagerClient().StopSlaveMinimum(ctx, sourceTablet, blpPos.Position, 30*time.Second) + shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + stoppedAt, err := sdw.wr.TabletManagerClient().StopSlaveMinimum(shortCtx, sourceTablet, blpPos.Position, 30*time.Second) cancel() if err != nil { return fmt.Errorf("cannot stop slave %v at right binlog position %v: %v", sdw.sourceAliases[i], blpPos.Position, err) @@ -344,8 +323,8 @@ func (sdw *SplitDiffWorker) synchronizeReplication() error { // 3 - ask the master of the destination shard to resume filtered // replication up to the new list of positions sdw.wr.Logger().Infof("Restarting master %v until it catches up to %v", sdw.shardInfo.MasterAlias, stopPositionList) - ctx, cancel = context.WithTimeout(sdw.ctx, 60*time.Second) - masterPos, err := sdw.wr.TabletManagerClient().RunBlpUntil(ctx, masterInfo, &stopPositionList, 30*time.Second) + shortCtx, cancel = context.WithTimeout(ctx, 60*time.Second) + masterPos, err := sdw.wr.TabletManagerClient().RunBlpUntil(shortCtx, masterInfo, &stopPositionList, 30*time.Second) cancel() if err != nil { return fmt.Errorf("RunBlpUntil for %v until %v failed: %v", sdw.shardInfo.MasterAlias, stopPositionList, err) @@ -358,8 +337,8 @@ func (sdw *SplitDiffWorker) synchronizeReplication() error { if err != nil { return err } - ctx, cancel = context.WithTimeout(sdw.ctx, 60*time.Second) - _, err = sdw.wr.TabletManagerClient().StopSlaveMinimum(ctx, destinationTablet, masterPos, 30*time.Second) + shortCtx, cancel = context.WithTimeout(ctx, 60*time.Second) + _, err = sdw.wr.TabletManagerClient().StopSlaveMinimum(shortCtx, destinationTablet, masterPos, 30*time.Second) cancel() if err != nil { return fmt.Errorf("StopSlaveMinimum for %v at %v failed: %v", sdw.destinationAlias, masterPos, err) @@ -373,8 +352,8 @@ func (sdw *SplitDiffWorker) synchronizeReplication() error { // 5 - restart filtered replication on destination master sdw.wr.Logger().Infof("Restarting filtered replication on master %v", sdw.shardInfo.MasterAlias) - ctx, cancel = context.WithTimeout(sdw.ctx, 60*time.Second) - err = sdw.wr.TabletManagerClient().StartBlp(ctx, masterInfo) + shortCtx, cancel = context.WithTimeout(ctx, 60*time.Second) + err = sdw.wr.TabletManagerClient().StartBlp(shortCtx, masterInfo) if err := sdw.cleaner.RemoveActionByName(wrangler.StartBlpActionName, sdw.shardInfo.MasterAlias.String()); err != nil { sdw.wr.Logger().Warningf("Cannot find cleaning action %v/%v: %v", wrangler.StartBlpActionName, sdw.shardInfo.MasterAlias.String(), err) } @@ -391,7 +370,7 @@ func (sdw *SplitDiffWorker) synchronizeReplication() error { // - if some table schema mismatches, record them (use existing schema diff tools). // - for each table in destination, run a diff pipeline. -func (sdw *SplitDiffWorker) diff() error { +func (sdw *SplitDiffWorker) diff(ctx context.Context) error { sdw.setState(stateSDDiff) sdw.wr.Logger().Infof("Gathering schema information...") @@ -401,9 +380,9 @@ func (sdw *SplitDiffWorker) diff() error { wg.Add(1) go func() { var err error - ctx, cancel := context.WithTimeout(sdw.ctx, 60*time.Second) + shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) sdw.destinationSchemaDefinition, err = sdw.wr.GetSchema( - ctx, sdw.destinationAlias, nil /* tables */, sdw.excludeTables, false /* includeViews */) + shortCtx, sdw.destinationAlias, nil /* tables */, sdw.excludeTables, false /* includeViews */) cancel() rec.RecordError(err) sdw.wr.Logger().Infof("Got schema from destination %v", sdw.destinationAlias) @@ -413,9 +392,9 @@ func (sdw *SplitDiffWorker) diff() error { wg.Add(1) go func(i int, sourceAlias topo.TabletAlias) { var err error - ctx, cancel := context.WithTimeout(sdw.ctx, 60*time.Second) + shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) sdw.sourceSchemaDefinitions[i], err = sdw.wr.GetSchema( - ctx, sourceAlias, nil /* tables */, sdw.excludeTables, false /* includeViews */) + shortCtx, sourceAlias, nil /* tables */, sdw.excludeTables, false /* includeViews */) cancel() rec.RecordError(err) sdw.wr.Logger().Infof("Got schema from source[%v] %v", i, sourceAlias) @@ -462,14 +441,14 @@ func (sdw *SplitDiffWorker) diff() error { sdw.wr.Logger().Errorf("Source shard doesn't overlap with destination????: %v", err) return } - sourceQueryResultReader, err := TableScanByKeyRange(sdw.ctx, sdw.wr.Logger(), sdw.wr.TopoServer(), sdw.sourceAliases[0], tableDefinition, overlap, sdw.keyspaceInfo.ShardingColumnType) + sourceQueryResultReader, err := TableScanByKeyRange(ctx, sdw.wr.Logger(), sdw.wr.TopoServer(), sdw.sourceAliases[0], tableDefinition, overlap, sdw.keyspaceInfo.ShardingColumnType) if err != nil { sdw.wr.Logger().Errorf("TableScanByKeyRange(source) failed: %v", err) return } defer sourceQueryResultReader.Close() - destinationQueryResultReader, err := TableScanByKeyRange(sdw.ctx, sdw.wr.Logger(), sdw.wr.TopoServer(), sdw.destinationAlias, tableDefinition, key.KeyRange{}, sdw.keyspaceInfo.ShardingColumnType) + destinationQueryResultReader, err := TableScanByKeyRange(ctx, sdw.wr.Logger(), sdw.wr.TopoServer(), sdw.destinationAlias, tableDefinition, key.KeyRange{}, sdw.keyspaceInfo.ShardingColumnType) if err != nil { sdw.wr.Logger().Errorf("TableScanByKeyRange(destination) failed: %v", err) return diff --git a/go/vt/worker/split_diff_test.go b/go/vt/worker/split_diff_test.go index 21436ac0c1..667400780e 100644 --- a/go/vt/worker/split_diff_test.go +++ b/go/vt/worker/split_diff_test.go @@ -225,7 +225,7 @@ func TestSplitDiff(t *testing.T) { sourceRdonly1.RPCServer.Register(gorpcqueryservice.New(&sourceSqlQuery{t: t, excludedTable: excludedTable})) sourceRdonly2.RPCServer.Register(gorpcqueryservice.New(&sourceSqlQuery{t: t, excludedTable: excludedTable})) - wrk.Run() + wrk.Run(ctx) status := wrk.StatusAsText() t.Logf("Got status: %v", status) if wrk.err != nil || wrk.state != stateSCDone { diff --git a/go/vt/worker/sqldiffer.go b/go/vt/worker/sqldiffer.go index d573c267fd..ddc35f19ad 100644 --- a/go/vt/worker/sqldiffer.go +++ b/go/vt/worker/sqldiffer.go @@ -50,12 +50,10 @@ type SourceSpec struct { // database: any row in the subset spec needs to have a conuterpart in // the superset spec. type SQLDiffWorker struct { - wr *wrangler.Wrangler - cell string - shard string - cleaner *wrangler.Cleaner - ctx context.Context - ctxCancel context.CancelFunc + wr *wrangler.Wrangler + cell string + shard string + cleaner *wrangler.Cleaner // alias in the following 2 fields is during // SQLDifferFindTargets, read-only after that. @@ -72,15 +70,12 @@ type SQLDiffWorker struct { // NewSQLDiffWorker returns a new SQLDiffWorker object. func NewSQLDiffWorker(wr *wrangler.Wrangler, cell string, superset, subset SourceSpec) Worker { - ctx, cancel := context.WithCancel(context.Background()) return &SQLDiffWorker{ - wr: wr, - cell: cell, - superset: superset, - subset: subset, - cleaner: new(wrangler.Cleaner), - ctx: ctx, - ctxCancel: cancel, + wr: wr, + cell: cell, + superset: superset, + subset: subset, + cleaner: new(wrangler.Cleaner), state: sqlDiffNotSarted, } @@ -139,28 +134,21 @@ func (worker *SQLDiffWorker) StatusAsText() string { return result } -// Cancel is part of the Worker interface -func (worker *SQLDiffWorker) Cancel() { - worker.ctxCancel() -} - -func (worker *SQLDiffWorker) checkInterrupted() bool { +func (worker *SQLDiffWorker) checkInterrupted(ctx context.Context) error { select { - case <-worker.ctx.Done(): - if worker.ctx.Err() == context.DeadlineExceeded { - return false - } - worker.recordError(topo.ErrInterrupted) - return true + case <-ctx.Done(): + err := ctx.Err() + worker.recordError(err) + return err default: } - return false + return nil } // Run is mostly a wrapper to run the cleanup at the end. -func (worker *SQLDiffWorker) Run() { +func (worker *SQLDiffWorker) Run(ctx context.Context) error { resetVars() - err := worker.run() + err := worker.run(ctx) worker.setState(sqlDiffCleanUp) cerr := worker.cleaner.CleanUp(worker.wr) @@ -173,37 +161,31 @@ func (worker *SQLDiffWorker) Run() { } if err != nil { worker.recordError(err) - return - } - worker.setState(sqlDiffDone) -} - -func (worker *SQLDiffWorker) Error() error { - return worker.err -} - -func (worker *SQLDiffWorker) run() error { - // first state: find targets - if err := worker.findTargets(); err != nil { return err } - if worker.checkInterrupted() { - return topo.ErrInterrupted + worker.setState(sqlDiffDone) + return nil +} + +func (worker *SQLDiffWorker) run(ctx context.Context) error { + // first state: find targets + if err := worker.findTargets(ctx); err != nil { + return err + } + if err := worker.checkInterrupted(ctx); err != nil { + return err } // second phase: synchronize replication - if err := worker.synchronizeReplication(); err != nil { - if worker.checkInterrupted() { - return topo.ErrInterrupted - } + if err := worker.synchronizeReplication(ctx); err != nil { return err } - if worker.checkInterrupted() { - return topo.ErrInterrupted + if err := worker.checkInterrupted(ctx); err != nil { + return err } // third phase: diff - if err := worker.diff(); err != nil { + if err := worker.diff(ctx); err != nil { return err } @@ -214,18 +196,18 @@ func (worker *SQLDiffWorker) run() error { // - find one rdonly in superset // - find one rdonly in subset // - mark them all as 'checker' pointing back to us -func (worker *SQLDiffWorker) findTargets() error { +func (worker *SQLDiffWorker) findTargets(ctx context.Context) error { worker.setState(sqlDiffFindTargets) // find an appropriate endpoint in superset var err error - worker.superset.alias, err = findChecker(worker.ctx, worker.wr, worker.cleaner, worker.cell, worker.superset.Keyspace, worker.superset.Shard) + worker.superset.alias, err = findChecker(ctx, worker.wr, worker.cleaner, worker.cell, worker.superset.Keyspace, worker.superset.Shard) if err != nil { return err } // find an appropriate endpoint in subset - worker.subset.alias, err = findChecker(worker.ctx, worker.wr, worker.cleaner, worker.cell, worker.subset.Keyspace, worker.subset.Shard) + worker.subset.alias, err = findChecker(ctx, worker.wr, worker.cleaner, worker.cell, worker.subset.Keyspace, worker.subset.Shard) if err != nil { return err } @@ -238,7 +220,7 @@ func (worker *SQLDiffWorker) findTargets() error { // 2 - sleep for 5 seconds // 3 - ask the superset slave to stop replication // Note this is not 100% correct, but good enough for now -func (worker *SQLDiffWorker) synchronizeReplication() error { +func (worker *SQLDiffWorker) synchronizeReplication(ctx context.Context) error { worker.setState(sqlDiffSynchronizeReplication) // stop replication on subset slave @@ -247,14 +229,14 @@ func (worker *SQLDiffWorker) synchronizeReplication() error { if err != nil { return err } - ctx, cancel := context.WithTimeout(worker.ctx, 60*time.Second) - err = worker.wr.TabletManagerClient().StopSlave(ctx, subsetTablet) + shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + err = worker.wr.TabletManagerClient().StopSlave(shortCtx, subsetTablet) cancel() if err != nil { return fmt.Errorf("Cannot stop slave %v: %v", worker.subset.alias, err) } - if worker.checkInterrupted() { - return topo.ErrInterrupted + if err := worker.checkInterrupted(ctx); err != nil { + return err } // change the cleaner actions from ChangeSlaveType(rdonly) @@ -268,8 +250,8 @@ func (worker *SQLDiffWorker) synchronizeReplication() error { // sleep for a few seconds time.Sleep(5 * time.Second) - if worker.checkInterrupted() { - return topo.ErrInterrupted + if err := worker.checkInterrupted(ctx); err != nil { + return err } // stop replication on superset slave @@ -278,8 +260,8 @@ func (worker *SQLDiffWorker) synchronizeReplication() error { if err != nil { return err } - ctx, cancel = context.WithTimeout(worker.ctx, 60*time.Second) - err = worker.wr.TabletManagerClient().StopSlave(ctx, supersetTablet) + shortCtx, cancel = context.WithTimeout(ctx, 60*time.Second) + err = worker.wr.TabletManagerClient().StopSlave(shortCtx, supersetTablet) cancel() if err != nil { return fmt.Errorf("Cannot stop slave %v: %v", worker.superset.alias, err) @@ -302,20 +284,20 @@ func (worker *SQLDiffWorker) synchronizeReplication() error { // - if some table schema mismatches, record them (use existing schema diff tools). // - for each table in destination, run a diff pipeline. -func (worker *SQLDiffWorker) diff() error { +func (worker *SQLDiffWorker) diff(ctx context.Context) error { worker.setState(sqlDiffRunning) // run the diff worker.wr.Logger().Infof("Running the diffs...") - supersetQueryResultReader, err := NewQueryResultReaderForTablet(worker.ctx, worker.wr.TopoServer(), worker.superset.alias, worker.superset.SQL) + supersetQueryResultReader, err := NewQueryResultReaderForTablet(ctx, worker.wr.TopoServer(), worker.superset.alias, worker.superset.SQL) if err != nil { worker.wr.Logger().Errorf("NewQueryResultReaderForTablet(superset) failed: %v", err) return err } defer supersetQueryResultReader.Close() - subsetQueryResultReader, err := NewQueryResultReaderForTablet(worker.ctx, worker.wr.TopoServer(), worker.subset.alias, worker.subset.SQL) + subsetQueryResultReader, err := NewQueryResultReaderForTablet(ctx, worker.wr.TopoServer(), worker.subset.alias, worker.subset.SQL) if err != nil { worker.wr.Logger().Errorf("NewQueryResultReaderForTablet(subset) failed: %v", err) return err diff --git a/go/vt/worker/sqldiffer_test.go b/go/vt/worker/sqldiffer_test.go index a0af742cd7..1552345745 100644 --- a/go/vt/worker/sqldiffer_test.go +++ b/go/vt/worker/sqldiffer_test.go @@ -134,7 +134,7 @@ func TestSqlDiffer(t *testing.T) { rdonly.RPCServer.Register(gorpcqueryservice.New(&sqlDifferSqlQuery{t: t})) } - wrk.Run() + wrk.Run(ctx) status := wrk.StatusAsText() t.Logf("Got status: %v", status) if wrk.err != nil || wrk.state != stateSCDone { diff --git a/go/vt/worker/vertical_split_clone.go b/go/vt/worker/vertical_split_clone.go index d1d233cf9c..55736622c4 100644 --- a/go/vt/worker/vertical_split_clone.go +++ b/go/vt/worker/vertical_split_clone.go @@ -50,8 +50,6 @@ type VerticalSplitCloneWorker struct { minTableSizeForSplit uint64 destinationWriterCount int cleaner *wrangler.Cleaner - ctx context.Context - ctxCancel context.CancelFunc // all subsequent fields are protected by the mutex mu sync.Mutex @@ -89,7 +87,6 @@ func NewVerticalSplitCloneWorker(wr *wrangler.Wrangler, cell, destinationKeyspac if err != nil { return nil, err } - ctx, cancel := context.WithCancel(context.Background()) return &VerticalSplitCloneWorker{ wr: wr, cell: cell, @@ -102,8 +99,6 @@ func NewVerticalSplitCloneWorker(wr *wrangler.Wrangler, cell, destinationKeyspac minTableSizeForSplit: minTableSizeForSplit, destinationWriterCount: destinationWriterCount, cleaner: &wrangler.Cleaner{}, - ctx: ctx, - ctxCancel: cancel, state: stateVSCNotSarted, ev: &events.VerticalSplitClone{ @@ -182,28 +177,21 @@ func (vscw *VerticalSplitCloneWorker) StatusAsText() string { return result } -// Cancel is part of the Worker interface -func (vscw *VerticalSplitCloneWorker) Cancel() { - vscw.ctxCancel() -} - -func (vscw *VerticalSplitCloneWorker) checkInterrupted() bool { +func (vscw *VerticalSplitCloneWorker) checkInterrupted(ctx context.Context) error { select { - case <-vscw.ctx.Done(): - if vscw.ctx.Err() == context.DeadlineExceeded { - return false - } - vscw.recordError(topo.ErrInterrupted) - return true + case <-ctx.Done(): + err := ctx.Err() + vscw.recordError(err) + return err default: } - return false + return nil } // Run implements the Worker interface -func (vscw *VerticalSplitCloneWorker) Run() { +func (vscw *VerticalSplitCloneWorker) Run(ctx context.Context) error { resetVars() - err := vscw.run() + err := vscw.run(ctx) vscw.setState(stateVSCCleanUp) cerr := vscw.cleaner.CleanUp(vscw.wr) @@ -216,46 +204,35 @@ func (vscw *VerticalSplitCloneWorker) Run() { } if err != nil { vscw.recordError(err) - return + return err } vscw.setState(stateVSCDone) + return nil } -func (vscw *VerticalSplitCloneWorker) Error() error { - return vscw.err -} - -func (vscw *VerticalSplitCloneWorker) run() error { +func (vscw *VerticalSplitCloneWorker) run(ctx context.Context) error { // first state: read what we need to do if err := vscw.init(); err != nil { return fmt.Errorf("init() failed: %v", err) } - if vscw.checkInterrupted() { - return topo.ErrInterrupted + if err := vscw.checkInterrupted(ctx); err != nil { + return err } // second state: find targets - if err := vscw.findTargets(); err != nil { - // A canceled context can appear to cause an application error - if vscw.checkInterrupted() { - return topo.ErrInterrupted - } + if err := vscw.findTargets(ctx); err != nil { return fmt.Errorf("findTargets() failed: %v", err) } - if vscw.checkInterrupted() { - return topo.ErrInterrupted + if err := vscw.checkInterrupted(ctx); err != nil { + return err } // third state: copy data - if err := vscw.copy(); err != nil { - // A canceled context can appear to cause an application error - if vscw.checkInterrupted() { - return topo.ErrInterrupted - } + if err := vscw.copy(ctx); err != nil { return fmt.Errorf("copy() failed: %v", err) } - if vscw.checkInterrupted() { - return topo.ErrInterrupted + if err := vscw.checkInterrupted(ctx); err != nil { + return err } return nil @@ -300,12 +277,12 @@ func (vscw *VerticalSplitCloneWorker) init() error { // - find one rdonly in the source shard // - mark it as 'checker' pointing back to us // - get the aliases of all the targets -func (vscw *VerticalSplitCloneWorker) findTargets() error { +func (vscw *VerticalSplitCloneWorker) findTargets(ctx context.Context) error { vscw.setState(stateVSCFindTargets) // find an appropriate endpoint in the source shard var err error - vscw.sourceAlias, err = findChecker(vscw.ctx, vscw.wr, vscw.cleaner, vscw.cell, vscw.sourceKeyspace, "0") + vscw.sourceAlias, err = findChecker(ctx, vscw.wr, vscw.cleaner, vscw.cell, vscw.sourceKeyspace, "0") if err != nil { return fmt.Errorf("cannot find checker for %v/%v/0: %v", vscw.cell, vscw.sourceKeyspace, err) } @@ -318,8 +295,8 @@ func (vscw *VerticalSplitCloneWorker) findTargets() error { } // stop replication on it - ctx, cancel := context.WithTimeout(vscw.ctx, 60*time.Second) - err = vscw.wr.TabletManagerClient().StopSlave(ctx, vscw.sourceTablet) + shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + err = vscw.wr.TabletManagerClient().StopSlave(shortCtx, vscw.sourceTablet) cancel() if err != nil { return fmt.Errorf("cannot stop replication on tablet %v", vscw.sourceAlias) @@ -332,13 +309,13 @@ func (vscw *VerticalSplitCloneWorker) findTargets() error { } action.TabletType = topo.TYPE_SPARE - return vscw.ResolveDestinationMasters() + return vscw.ResolveDestinationMasters(ctx) } // ResolveDestinationMasters implements the Resolver interface. // It will attempt to resolve all shards and update vscw.destinationShardsToTablets; // if it is unable to do so, it will not modify vscw.destinationShardsToTablets at all. -func (vscw *VerticalSplitCloneWorker) ResolveDestinationMasters() error { +func (vscw *VerticalSplitCloneWorker) ResolveDestinationMasters(ctx context.Context) error { statsDestinationAttemptedResolves.Add(1) // Allow at most one resolution request at a time; if there are concurrent requests, only // one of them will actualy hit the topo server. @@ -350,7 +327,7 @@ func (vscw *VerticalSplitCloneWorker) ResolveDestinationMasters() error { return nil } - ti, err := resolveDestinationShardMaster(vscw.ctx, vscw.destinationKeyspace, vscw.destinationShard, vscw.wr) + ti, err := resolveDestinationShardMaster(ctx, vscw.destinationKeyspace, vscw.destinationShard, vscw.wr) if err != nil { return err } @@ -375,8 +352,8 @@ func (vscw *VerticalSplitCloneWorker) GetDestinationMaster(shardName string) (*t // Find all tablets on the destination shard. This should be done immediately before reloading // the schema on these tablets, to minimize the chances of the topo changing in between. -func (vscw *VerticalSplitCloneWorker) findReloadTargets() error { - reloadAliases, reloadTablets, err := resolveReloadTabletsForShard(vscw.ctx, vscw.destinationKeyspace, vscw.destinationShard, vscw.wr) +func (vscw *VerticalSplitCloneWorker) findReloadTargets(ctx context.Context) error { + reloadAliases, reloadTablets, err := resolveReloadTabletsForShard(ctx, vscw.destinationKeyspace, vscw.destinationShard, vscw.wr) if err != nil { return err } @@ -388,12 +365,12 @@ func (vscw *VerticalSplitCloneWorker) findReloadTargets() error { // - copy the data from source tablets to destination masters (wtih replication on) // Assumes that the schema has already been created on each destination tablet // (probably from vtctl's CopySchemaShard) -func (vscw *VerticalSplitCloneWorker) copy() error { +func (vscw *VerticalSplitCloneWorker) copy(ctx context.Context) error { vscw.setState(stateVSCCopy) // get source schema - ctx, cancel := context.WithTimeout(vscw.ctx, 60*time.Second) - sourceSchemaDefinition, err := vscw.wr.GetSchema(ctx, vscw.sourceAlias, vscw.tables, nil, true) + shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + sourceSchemaDefinition, err := vscw.wr.GetSchema(shortCtx, vscw.sourceAlias, vscw.tables, nil, true) cancel() if err != nil { return fmt.Errorf("cannot get schema from source %v: %v", vscw.sourceAlias, err) @@ -424,17 +401,17 @@ func (vscw *VerticalSplitCloneWorker) copy() error { vscw.tableStatus[i].mu.Unlock() } - // In parallel, setup the channels to send SQL data chunks to for each destination tablet. + // In parallel, setup the channels to send SQL data chunks to + // for each destination tablet. // - // mu protects the context for cancelation, and firstError + // mu protects firstError mu := sync.Mutex{} var firstError error processError := func(format string, args ...interface{}) { vscw.wr.Logger().Errorf(format, args...) mu.Lock() - if !vscw.checkInterrupted() { - vscw.Cancel() + if firstError == nil { firstError = fmt.Errorf(format, args...) } mu.Unlock() @@ -455,7 +432,7 @@ func (vscw *VerticalSplitCloneWorker) copy() error { go func() { defer destinationWaitGroup.Done() - if err := executeFetchLoop(vscw.ctx, vscw.wr, vscw, shardName, insertChannel); err != nil { + if err := executeFetchLoop(ctx, vscw.wr, vscw, shardName, insertChannel); err != nil { processError("executeFetchLoop failed: %v", err) } }() @@ -470,7 +447,7 @@ func (vscw *VerticalSplitCloneWorker) copy() error { continue } - chunks, err := findChunks(vscw.ctx, vscw.wr, vscw.sourceTablet, td, vscw.minTableSizeForSplit, vscw.sourceReaderCount) + chunks, err := findChunks(ctx, vscw.wr, vscw.sourceTablet, td, vscw.minTableSizeForSplit, vscw.sourceReaderCount) if err != nil { return err } @@ -488,7 +465,7 @@ func (vscw *VerticalSplitCloneWorker) copy() error { // build the query, and start the streaming selectSQL := buildSQLFromChunks(vscw.wr, td, chunks, chunkIndex, vscw.sourceAlias.String()) - qrr, err := NewQueryResultReaderForTablet(vscw.ctx, vscw.wr.TopoServer(), vscw.sourceAlias, selectSQL) + qrr, err := NewQueryResultReaderForTablet(ctx, vscw.wr.TopoServer(), vscw.sourceAlias, selectSQL) if err != nil { processError("NewQueryResultReaderForTablet failed: %v", err) return @@ -496,7 +473,7 @@ func (vscw *VerticalSplitCloneWorker) copy() error { defer qrr.Close() // process the data - if err := vscw.processData(td, tableIndex, qrr, insertChannel, vscw.destinationPackCount, vscw.ctx.Done()); err != nil { + if err := vscw.processData(td, tableIndex, qrr, insertChannel, vscw.destinationPackCount, ctx.Done()); err != nil { processError("QueryResultReader failed: %v", err) } vscw.tableStatus[tableIndex].threadDone() @@ -514,8 +491,8 @@ func (vscw *VerticalSplitCloneWorker) copy() error { // then create and populate the blp_checkpoint table if vscw.strategy.PopulateBlpCheckpoint { // get the current position from the source - ctx, cancel := context.WithTimeout(vscw.ctx, 60*time.Second) - status, err := vscw.wr.TabletManagerClient().SlaveStatus(ctx, vscw.sourceTablet) + shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + status, err := vscw.wr.TabletManagerClient().SlaveStatus(shortCtx, vscw.sourceTablet) cancel() if err != nil { return err @@ -532,7 +509,7 @@ func (vscw *VerticalSplitCloneWorker) copy() error { go func(shardName string) { defer destinationWaitGroup.Done() vscw.wr.Logger().Infof("Making and populating blp_checkpoint table") - if err := runSqlCommands(vscw.ctx, vscw.wr, vscw, shardName, queries); err != nil { + if err := runSqlCommands(ctx, vscw.wr, vscw, shardName, queries); err != nil { processError("blp_checkpoint queries failed: %v", err) } }(vscw.destinationShard) @@ -547,15 +524,15 @@ func (vscw *VerticalSplitCloneWorker) copy() error { vscw.wr.Logger().Infof("Skipping setting SourceShard on destination shard.") } else { vscw.wr.Logger().Infof("Setting SourceShard on shard %v/%v", vscw.destinationKeyspace, vscw.destinationShard) - ctx, cancel := context.WithTimeout(vscw.ctx, 60*time.Second) - err := vscw.wr.SetSourceShards(ctx, vscw.destinationKeyspace, vscw.destinationShard, []topo.TabletAlias{vscw.sourceAlias}, vscw.tables) + shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + err := vscw.wr.SetSourceShards(shortCtx, vscw.destinationKeyspace, vscw.destinationShard, []topo.TabletAlias{vscw.sourceAlias}, vscw.tables) cancel() if err != nil { return fmt.Errorf("Failed to set source shards: %v", err) } } - err = vscw.findReloadTargets() + err = vscw.findReloadTargets(ctx) if err != nil { return fmt.Errorf("failed before reloading schema on destination tablets: %v", err) } @@ -567,8 +544,8 @@ func (vscw *VerticalSplitCloneWorker) copy() error { go func(ti *topo.TabletInfo) { defer destinationWaitGroup.Done() vscw.wr.Logger().Infof("Reloading schema on tablet %v", ti.Alias) - ctx, cancel := context.WithTimeout(vscw.ctx, 30*time.Second) - err := vscw.wr.TabletManagerClient().ReloadSchema(ctx, ti) + shortCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + err := vscw.wr.TabletManagerClient().ReloadSchema(shortCtx, ti) cancel() if err != nil { processError("ReloadSchema failed on tablet %v: %v", ti.Alias, err) diff --git a/go/vt/worker/vertical_split_clone_test.go b/go/vt/worker/vertical_split_clone_test.go index 28ab443725..972d59460c 100644 --- a/go/vt/worker/vertical_split_clone_test.go +++ b/go/vt/worker/vertical_split_clone_test.go @@ -316,7 +316,7 @@ func testVerticalSplitClone(t *testing.T, strategy string) { // Only wait 1 ms between retries, so that the test passes faster executeFetchRetryTime = (1 * time.Millisecond) - wrk.Run() + wrk.Run(ctx) status := wrk.StatusAsText() t.Logf("Got status: %v", status) if wrk.err != nil || wrk.state != stateSCDone { diff --git a/go/vt/worker/vertical_split_diff.go b/go/vt/worker/vertical_split_diff.go index f0456a53b8..2d4fb2b8c5 100644 --- a/go/vt/worker/vertical_split_diff.go +++ b/go/vt/worker/vertical_split_diff.go @@ -43,8 +43,6 @@ type VerticalSplitDiffWorker struct { shard string excludeTables []string cleaner *wrangler.Cleaner - ctx context.Context - ctxCancel context.CancelFunc // all subsequent fields are protected by the mutex mu sync.Mutex @@ -68,7 +66,6 @@ type VerticalSplitDiffWorker struct { // NewVerticalSplitDiffWorker returns a new VerticalSplitDiffWorker object. func NewVerticalSplitDiffWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, excludeTables []string) Worker { - ctx, cancel := context.WithCancel(context.Background()) return &VerticalSplitDiffWorker{ wr: wr, cell: cell, @@ -76,8 +73,6 @@ func NewVerticalSplitDiffWorker(wr *wrangler.Wrangler, cell, keyspace, shard str shard: shard, excludeTables: excludeTables, cleaner: &wrangler.Cleaner{}, - ctx: ctx, - ctxCancel: cancel, state: stateVSDNotSarted, } @@ -133,28 +128,21 @@ func (vsdw *VerticalSplitDiffWorker) StatusAsText() string { return result } -// Cancel is part of the Worker interface -func (vsdw *VerticalSplitDiffWorker) Cancel() { - vsdw.ctxCancel() -} - -func (vsdw *VerticalSplitDiffWorker) checkInterrupted() bool { +func (vsdw *VerticalSplitDiffWorker) checkInterrupted(ctx context.Context) error { select { - case <-vsdw.ctx.Done(): - if vsdw.ctx.Err() == context.DeadlineExceeded { - return false - } - vsdw.recordError(topo.ErrInterrupted) - return true + case <-ctx.Done(): + err := ctx.Err() + vsdw.recordError(err) + return err default: } - return false + return nil } // Run is mostly a wrapper to run the cleanup at the end. -func (vsdw *VerticalSplitDiffWorker) Run() { +func (vsdw *VerticalSplitDiffWorker) Run(ctx context.Context) error { resetVars() - err := vsdw.run() + err := vsdw.run(ctx) vsdw.setState(stateVSDCleanUp) cerr := vsdw.cleaner.CleanUp(vsdw.wr) @@ -167,48 +155,39 @@ func (vsdw *VerticalSplitDiffWorker) Run() { } if err != nil { vsdw.recordError(err) - return + return err } vsdw.setState(stateVSDDone) + return nil } -func (vsdw *VerticalSplitDiffWorker) Error() error { - return vsdw.err -} - -func (vsdw *VerticalSplitDiffWorker) run() error { +func (vsdw *VerticalSplitDiffWorker) run(ctx context.Context) error { // first state: read what we need to do if err := vsdw.init(); err != nil { return fmt.Errorf("init() failed: %v", err) } - if vsdw.checkInterrupted() { - return topo.ErrInterrupted + if err := vsdw.checkInterrupted(ctx); err != nil { + return err } // second state: find targets - if err := vsdw.findTargets(); err != nil { + if err := vsdw.findTargets(ctx); err != nil { return fmt.Errorf("findTargets() failed: %v", err) } - if vsdw.checkInterrupted() { - return topo.ErrInterrupted + if err := vsdw.checkInterrupted(ctx); err != nil { + return err } // third phase: synchronize replication - if err := vsdw.synchronizeReplication(); err != nil { - if vsdw.checkInterrupted() { - return topo.ErrInterrupted - } + if err := vsdw.synchronizeReplication(ctx); err != nil { return fmt.Errorf("synchronizeReplication() failed: %v", err) } - if vsdw.checkInterrupted() { - return topo.ErrInterrupted + if err := vsdw.checkInterrupted(ctx); err != nil { + return err } // fourth phase: diff - if err := vsdw.diff(); err != nil { - if vsdw.checkInterrupted() { - return topo.ErrInterrupted - } + if err := vsdw.diff(ctx); err != nil { return fmt.Errorf("diff() failed: %v", err) } @@ -253,18 +232,18 @@ func (vsdw *VerticalSplitDiffWorker) init() error { // - find one rdonly per source shard // - find one rdonly in destination shard // - mark them all as 'checker' pointing back to us -func (vsdw *VerticalSplitDiffWorker) findTargets() error { +func (vsdw *VerticalSplitDiffWorker) findTargets(ctx context.Context) error { vsdw.setState(stateVSDFindTargets) // find an appropriate endpoint in destination shard var err error - vsdw.destinationAlias, err = findChecker(vsdw.ctx, vsdw.wr, vsdw.cleaner, vsdw.cell, vsdw.keyspace, vsdw.shard) + vsdw.destinationAlias, err = findChecker(ctx, vsdw.wr, vsdw.cleaner, vsdw.cell, vsdw.keyspace, vsdw.shard) if err != nil { return fmt.Errorf("cannot find checker for %v/%v/%v: %v", vsdw.cell, vsdw.keyspace, vsdw.shard, err) } // find an appropriate endpoint in the source shard - vsdw.sourceAlias, err = findChecker(vsdw.ctx, vsdw.wr, vsdw.cleaner, vsdw.cell, vsdw.shardInfo.SourceShards[0].Keyspace, vsdw.shardInfo.SourceShards[0].Shard) + vsdw.sourceAlias, err = findChecker(ctx, vsdw.wr, vsdw.cleaner, vsdw.cell, vsdw.shardInfo.SourceShards[0].Keyspace, vsdw.shardInfo.SourceShards[0].Shard) if err != nil { return fmt.Errorf("cannot find checker for %v/%v/%v: %v", vsdw.cell, vsdw.shardInfo.SourceShards[0].Keyspace, vsdw.shardInfo.SourceShards[0].Shard, err) } @@ -290,7 +269,7 @@ func (vsdw *VerticalSplitDiffWorker) findTargets() error { // (remove the cleanup task that does the same) // At this point, all checker instances are stopped at the same point. -func (vsdw *VerticalSplitDiffWorker) synchronizeReplication() error { +func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) error { vsdw.setState(stateVSDSynchronizeReplication) masterInfo, err := vsdw.wr.TopoServer().GetTablet(vsdw.shardInfo.MasterAlias) @@ -300,7 +279,7 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication() error { // 1 - stop the master binlog replication, get its current position vsdw.wr.Logger().Infof("Stopping master binlog replication on %v", vsdw.shardInfo.MasterAlias) - ctx, cancel := context.WithTimeout(vsdw.ctx, 60*time.Second) + ctx, cancel := context.WithTimeout(ctx, 60*time.Second) blpPositionList, err := vsdw.wr.TabletManagerClient().StopBlp(ctx, masterInfo) cancel() if err != nil { @@ -326,7 +305,7 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication() error { if err != nil { return err } - ctx, cancel = context.WithTimeout(vsdw.ctx, 60*time.Second) + ctx, cancel = context.WithTimeout(ctx, 60*time.Second) stoppedAt, err := vsdw.wr.TabletManagerClient().StopSlaveMinimum(ctx, sourceTablet, pos.Position, 30*time.Second) cancel() if err != nil { @@ -347,7 +326,7 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication() error { // 3 - ask the master of the destination shard to resume filtered // replication up to the new list of positions vsdw.wr.Logger().Infof("Restarting master %v until it catches up to %v", vsdw.shardInfo.MasterAlias, stopPositionList) - ctx, cancel = context.WithTimeout(vsdw.ctx, 60*time.Second) + ctx, cancel = context.WithTimeout(ctx, 60*time.Second) masterPos, err := vsdw.wr.TabletManagerClient().RunBlpUntil(ctx, masterInfo, &stopPositionList, 30*time.Second) cancel() if err != nil { @@ -361,7 +340,7 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication() error { if err != nil { return err } - ctx, cancel = context.WithTimeout(vsdw.ctx, 60*time.Second) + ctx, cancel = context.WithTimeout(ctx, 60*time.Second) _, err = vsdw.wr.TabletManagerClient().StopSlaveMinimum(ctx, destinationTablet, masterPos, 30*time.Second) cancel() if err != nil { @@ -376,7 +355,7 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication() error { // 5 - restart filtered replication on destination master vsdw.wr.Logger().Infof("Restarting filtered replication on master %v", vsdw.shardInfo.MasterAlias) - ctx, cancel = context.WithTimeout(vsdw.ctx, 60*time.Second) + ctx, cancel = context.WithTimeout(ctx, 60*time.Second) err = vsdw.wr.TabletManagerClient().StartBlp(ctx, masterInfo) if err := vsdw.cleaner.RemoveActionByName(wrangler.StartBlpActionName, vsdw.shardInfo.MasterAlias.String()); err != nil { vsdw.wr.Logger().Warningf("Cannot find cleaning action %v/%v: %v", wrangler.StartBlpActionName, vsdw.shardInfo.MasterAlias.String(), err) @@ -394,7 +373,7 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication() error { // - if some table schema mismatches, record them (use existing schema diff tools). // - for each table in destination, run a diff pipeline. -func (vsdw *VerticalSplitDiffWorker) diff() error { +func (vsdw *VerticalSplitDiffWorker) diff(ctx context.Context) error { vsdw.setState(stateVSDDiff) vsdw.wr.Logger().Infof("Gathering schema information...") @@ -403,7 +382,7 @@ func (vsdw *VerticalSplitDiffWorker) diff() error { wg.Add(1) go func() { var err error - ctx, cancel := context.WithTimeout(vsdw.ctx, 60*time.Second) + ctx, cancel := context.WithTimeout(ctx, 60*time.Second) vsdw.destinationSchemaDefinition, err = vsdw.wr.GetSchema( ctx, vsdw.destinationAlias, nil /* tables */, vsdw.excludeTables, false /* includeViews */) cancel() @@ -414,7 +393,7 @@ func (vsdw *VerticalSplitDiffWorker) diff() error { wg.Add(1) go func() { var err error - ctx, cancel := context.WithTimeout(vsdw.ctx, 60*time.Second) + ctx, cancel := context.WithTimeout(ctx, 60*time.Second) vsdw.sourceSchemaDefinition, err = vsdw.wr.GetSchema( ctx, vsdw.sourceAlias, nil /* tables */, vsdw.excludeTables, false /* includeViews */) cancel() @@ -476,14 +455,14 @@ func (vsdw *VerticalSplitDiffWorker) diff() error { defer sem.Release() vsdw.wr.Logger().Infof("Starting the diff on table %v", tableDefinition.Name) - sourceQueryResultReader, err := TableScan(vsdw.ctx, vsdw.wr.Logger(), vsdw.wr.TopoServer(), vsdw.sourceAlias, tableDefinition) + sourceQueryResultReader, err := TableScan(ctx, vsdw.wr.Logger(), vsdw.wr.TopoServer(), vsdw.sourceAlias, tableDefinition) if err != nil { vsdw.wr.Logger().Errorf("TableScan(source) failed: %v", err) return } defer sourceQueryResultReader.Close() - destinationQueryResultReader, err := TableScan(vsdw.ctx, vsdw.wr.Logger(), vsdw.wr.TopoServer(), vsdw.destinationAlias, tableDefinition) + destinationQueryResultReader, err := TableScan(ctx, vsdw.wr.Logger(), vsdw.wr.TopoServer(), vsdw.destinationAlias, tableDefinition) if err != nil { vsdw.wr.Logger().Errorf("TableScan(destination) failed: %v", err) return diff --git a/go/vt/worker/vertical_split_diff_test.go b/go/vt/worker/vertical_split_diff_test.go index 8d8c9f933a..99d7e53063 100644 --- a/go/vt/worker/vertical_split_diff_test.go +++ b/go/vt/worker/vertical_split_diff_test.go @@ -159,7 +159,7 @@ func TestVerticalSplitDiff(t *testing.T) { rdonly.RPCServer.Register(gorpcqueryservice.New(&verticalDiffSqlQuery{t: t, excludedTable: excludedTable})) } - wrk.Run() + wrk.Run(ctx) status := wrk.StatusAsText() t.Logf("Got status: %v", status) if wrk.err != nil || wrk.state != stateSCDone { diff --git a/go/vt/worker/worker.go b/go/vt/worker/worker.go index 8c993d7c62..29ea28b386 100644 --- a/go/vt/worker/worker.go +++ b/go/vt/worker/worker.go @@ -12,6 +12,8 @@ import ( "html/template" "time" + "golang.org/x/net/context" + "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/vt/topo" ) @@ -24,18 +26,10 @@ type Worker interface { // StatusAsText returns the current worker status in plain text StatusAsText() string - // Run is the main entry point for the worker. It will be called - // in a go routine. - // When Cancel() is called, Run should exit as soon as possible. - Run() - - // Cancel should attempt to force the Worker to exit as soon as possible. - // Note that cleanup actions may still run after cancellation. - Cancel() - - // Error returns the error status of the job, if any. - // It will only be called after Run() has completed. - Error() error + // Run is the main entry point for the worker. It will be + // called in a go routine. When the passed in context is + // cancelled, Run should exit as soon as possible. + Run(context.Context) error } // Resolver is an interface that should be implemented by any workers that need to @@ -43,7 +37,7 @@ type Worker interface { type Resolver interface { // ResolveDestinationMasters forces the worker to (re)resolve the topology and update // the destination masters that it knows about. - ResolveDestinationMasters() error + ResolveDestinationMasters(ctx context.Context) error // GetDestinationMaster returns the most recently resolved destination master for a particular shard. GetDestinationMaster(shardName string) (*topo.TabletInfo, error) From 383e2c52e48dd7f573e8a363ea145aa8ae1f92c0 Mon Sep 17 00:00:00 2001 From: Ammar Aijazi Date: Wed, 1 Apr 2015 10:53:03 -0700 Subject: [PATCH 041/128] Added worker.py to cover worker integration tests --- Makefile | 6 +- doc/HorizontalReshardingGuide.md | 2 +- go/vt/worker/clone_utils.go | 8 +- go/vt/worker/split_clone.go | 2 +- go/vt/worker/split_clone_test.go | 2 +- go/vt/worker/vertical_split_clone.go | 2 +- go/vt/worker/vertical_split_clone_test.go | 2 +- go/vt/worker/worker.go | 8 +- test/config.json | 3 + test/tablet.py | 2 +- test/utils.py | 101 ++++- test/worker.py | 438 ++++++++++++++++++++++ 12 files changed, 549 insertions(+), 27 deletions(-) create mode 100755 test/worker.py diff --git a/Makefile b/Makefile index d040aed694..bf1815a0db 100644 --- a/Makefile +++ b/Makefile @@ -100,7 +100,8 @@ medium_integration_test_files = \ reparent.py \ vtdb_test.py \ vtgate_utils_test.py \ - rowcache_invalidator.py + rowcache_invalidator.py \ + worker.py large_integration_test_files = \ vtgatev2_test.py \ @@ -122,7 +123,8 @@ worker_integration_test_files = \ vertical_split.py \ vertical_split_vtgate.py \ initial_sharding.py \ - initial_sharding_bytes.py + initial_sharding_bytes.py \ + worker.py .ONESHELL: SHELL = /bin/bash diff --git a/doc/HorizontalReshardingGuide.md b/doc/HorizontalReshardingGuide.md index bf27985b5f..faa764a081 100644 --- a/doc/HorizontalReshardingGuide.md +++ b/doc/HorizontalReshardingGuide.md @@ -9,7 +9,7 @@ Let’s assume that you’ve already got a keyspace up and running, with a singl The first thing that we need to do is add a column to the soon-to-be-sharded keyspace which will be used as the "sharding key". This column will tell Vitess which shard a particular row of data should go to. You can add the column by running an alter on the unsharded keyspace - probably by running something like: -`vtctl ApplySchemaKeyspace -simple -sql="alter table add keyspace_id" test_keyspace` +`vtctl ApplySchema -sql="alter table
add keyspace_id" test_keyspace` for each table in the keyspace. Once the column is added everywhere, each row needs to be backfilled with the appropriate keyspace ID. diff --git a/go/vt/worker/clone_utils.go b/go/vt/worker/clone_utils.go index 19e5379b9c..74eeaec411 100644 --- a/go/vt/worker/clone_utils.go +++ b/go/vt/worker/clone_utils.go @@ -151,9 +151,6 @@ func formatTableStatuses(tableStatuses []*tableStatus, startTime time.Time) ([]s var errExtract = regexp.MustCompile(`\(errno (\d+)\)`) -// The amount of time we should wait before retrying ExecuteFetch calls -var executeFetchRetryTime = (30 * time.Second) - // executeFetchWithRetries will attempt to run ExecuteFetch for a single command, with a reasonably small timeout. // If will keep retrying the ExecuteFetch (for a finite but longer duration) if it fails due to a timeout or a // retriable application error. @@ -190,6 +187,9 @@ func executeFetchWithRetries(ctx context.Context, wr *wrangler.Wrangler, ti *top case errNo == "1290": wr.Logger().Warningf("ExecuteFetch failed on %v; will reresolve and retry because it's due to a MySQL read-only error: %v", ti, err) statsRetryCounters.Add("ReadOnly", 1) + case errNo == "2002" || errNo == "2006": + wr.Logger().Warningf("ExecuteFetch failed on %v; will reresolve and retry because it's due to a MySQL connection error: %v", ti, err) + statsRetryCounters.Add("ConnectionError", 1) case errNo == "1062": if !isRetry { return ti, fmt.Errorf("ExecuteFetch failed on %v on the first attempt; not retrying as this is not a recoverable error: %v", ti, err) @@ -200,7 +200,7 @@ func executeFetchWithRetries(ctx context.Context, wr *wrangler.Wrangler, ti *top // Unknown error return ti, err } - t := time.NewTimer(executeFetchRetryTime) + t := time.NewTimer(*executeFetchRetryTime) // don't leak memory if the timer isn't triggered defer t.Stop() diff --git a/go/vt/worker/split_clone.go b/go/vt/worker/split_clone.go index b2904f5394..fe74ac4b84 100644 --- a/go/vt/worker/split_clone.go +++ b/go/vt/worker/split_clone.go @@ -384,7 +384,7 @@ func (scw *SplitCloneWorker) ResolveDestinationMasters() error { defer scw.resolveMu.Unlock() // If the last resolution was fresh enough, return it. - if time.Now().Sub(scw.resolveTime) < resolveTTL { + if time.Now().Sub(scw.resolveTime) < *resolveTTL { return nil } diff --git a/go/vt/worker/split_clone_test.go b/go/vt/worker/split_clone_test.go index 3a0f7f0c93..45b46dbc08 100644 --- a/go/vt/worker/split_clone_test.go +++ b/go/vt/worker/split_clone_test.go @@ -329,7 +329,7 @@ func testSplitClone(t *testing.T, strategy string) { rightRdonly.FakeMysqlDaemon.DbAppConnectionFactory = DestinationsFactory(t, 30) // Only wait 1 ms between retries, so that the test passes faster - executeFetchRetryTime = (1 * time.Millisecond) + *executeFetchRetryTime = (1 * time.Millisecond) wrk.Run() status := wrk.StatusAsText() diff --git a/go/vt/worker/vertical_split_clone.go b/go/vt/worker/vertical_split_clone.go index d1d233cf9c..341fd09c2a 100644 --- a/go/vt/worker/vertical_split_clone.go +++ b/go/vt/worker/vertical_split_clone.go @@ -346,7 +346,7 @@ func (vscw *VerticalSplitCloneWorker) ResolveDestinationMasters() error { defer vscw.resolveMu.Unlock() // If the last resolution was fresh enough, return it. - if time.Now().Sub(vscw.resolveTime) < resolveTTL { + if time.Now().Sub(vscw.resolveTime) < *resolveTTL { return nil } diff --git a/go/vt/worker/vertical_split_clone_test.go b/go/vt/worker/vertical_split_clone_test.go index 28ab443725..c4e136723f 100644 --- a/go/vt/worker/vertical_split_clone_test.go +++ b/go/vt/worker/vertical_split_clone_test.go @@ -314,7 +314,7 @@ func testVerticalSplitClone(t *testing.T, strategy string) { destRdonly.FakeMysqlDaemon.DbAppConnectionFactory = VerticalDestinationsFactory(t, 30) // Only wait 1 ms between retries, so that the test passes faster - executeFetchRetryTime = (1 * time.Millisecond) + *executeFetchRetryTime = (1 * time.Millisecond) wrk.Run() status := wrk.StatusAsText() diff --git a/go/vt/worker/worker.go b/go/vt/worker/worker.go index 8c993d7c62..9030049202 100644 --- a/go/vt/worker/worker.go +++ b/go/vt/worker/worker.go @@ -9,6 +9,7 @@ functions for long running actions. 'vtworker' binary will use these. package worker import ( + "flag" "html/template" "time" @@ -49,11 +50,10 @@ type Resolver interface { GetDestinationMaster(shardName string) (*topo.TabletInfo, error) } -// Resolvers should attempt to keep the previous topo resolution cached for at -// least this long. -const resolveTTL = 15 * time.Second - var ( + resolveTTL = flag.Duration("resolve_ttl", 15*time.Second, "Amount of time that a topo resolution can be cached for") + executeFetchRetryTime = flag.Duration("executefetch_retry_time", 30*time.Second, "Amount of time we should wait before retrying ExecuteFetch calls") + statsState = stats.NewString("WorkerState") // the number of times that the worker attempst to reresolve the masters statsDestinationAttemptedResolves = stats.NewInt("WorkerDestinationAttemptedResolves") diff --git a/test/config.json b/test/config.json index ad0ac12a9e..2aab23eda5 100644 --- a/test/config.json +++ b/test/config.json @@ -75,6 +75,9 @@ }, { "File": "resharding.py" + }, + { + "File": "worker.py" } ] } diff --git a/test/tablet.py b/test/tablet.py index 95beaa4663..90eae7a350 100644 --- a/test/tablet.py +++ b/test/tablet.py @@ -240,7 +240,7 @@ class Tablet(object): rows = self.mquery('', 'show databases') for row in rows: dbname = row[0] - if dbname in ['information_schema', '_vt', 'mysql']: + if dbname in ['information_schema', 'mysql']: continue self.drop_db(dbname) diff --git a/test/utils.py b/test/utils.py index 274b7a062b..14963f332a 100644 --- a/test/utils.py +++ b/test/utils.py @@ -20,6 +20,7 @@ import environment from vtctl import vtctl_client from mysql_flavor import set_mysql_flavor +from mysql_flavor import mysql_flavor from protocols_flavor import set_protocols_flavor, protocols_flavor from topo_flavor.server import set_topo_server_flavor @@ -79,7 +80,13 @@ def set_options(opts): # main executes the test classes contained in the passed module, or # __main__ if empty. -def main(mod=None): +def main(mod=None, test_options=None): + """The replacement main method, which parses args and runs tests. + + Args: + test_options - a function which adds OptionParser options that are specific + to a test file. + """ if mod == None: mod = sys.modules['__main__'] @@ -87,6 +94,8 @@ def main(mod=None): parser = optparse.OptionParser(usage="usage: %prog [options] [test_names]") add_options(parser) + if test_options: + test_options(parser) (options, args) = parser.parse_args() if options.verbose == 0: @@ -256,12 +265,6 @@ def wait_procs(proc_list, raise_on_error=True): if raise_on_error: raise CalledProcessError(proc.returncode, ' '.join(proc.args)) -def run_procs(cmds, raise_on_error=True): - procs = [] - for cmd in cmds: - procs.append(run_bg(cmd)) - wait_procs(procs, raise_on_error=raise_on_error) - def validate_topology(ping_tablets=False): if ping_tablets: run_vtctl(['Validate', '-ping-tablets']) @@ -325,6 +328,40 @@ def wait_for_vars(name, port, var=None): break timeout = wait_step('waiting for /debug/vars of %s' % name, timeout) +def poll_for_vars(name, port, condition_msg, timeout=60.0, condition_fn=None): + """Polls for debug variables to exist, or match specific conditions, within a timeout. + + This function polls in a tight loop, with no sleeps. This is useful for + variables that are expected to be short-lived (e.g., a 'Done' state + immediately before a process exits). + + Args: + name - the name of the process that we're trying to poll vars from. + port - the port number that we should poll for variables. + condition_msg - string describing the conditions that we're polling for, + used for error messaging. + timeout - number of seconds that we should attempt to poll for. + condition_fn - a function that takes the debug vars dict as input, and + returns a truthy value if it matches the success conditions. + + Raises: + TestError, if the conditions aren't met within the given timeout + + Returns: + dict of debug variables + """ + start_time = time.time() + while True: + if (time.time() - start_time) >= timeout: + raise TestError('Timed out polling for vars from %s; condition "%s" not met' % (name, condition_msg)) + _vars = get_vars(port) + if _vars is None: + continue + if condition_fn is None: + return _vars + elif condition_fn(_vars): + return _vars + def apply_vschema(vschema): fname = os.path.join(environment.tmproot, "vschema.json") with open(fname, "w") as f: @@ -345,6 +382,23 @@ def wait_for_tablet_type(tablet_alias, expected_type, timeout=10): timeout ) +def wait_for_replication_pos(tablet_a, tablet_b, timeout=60.0): + """Waits for tablet B to catch up to the replication position of tablet A. + + If the replication position does not catch up within timeout seconds, it will + raise a TestError. + """ + replication_pos_a = mysql_flavor().master_position(tablet_a) + while True: + replication_pos_b = mysql_flavor().master_position(tablet_b) + if mysql_flavor().position_at_least(replication_pos_b, replication_pos_a): + break + timeout = wait_step( + "%s's replication position to catch up %s's; currently at: %s, waiting to catch up to: %s" % ( + tablet_b.tablet_alias, tablet_a.tablet_alias, replication_pos_b, replication_pos_a), + timeout + ) + # vtgate helpers, assuming it always restarts on the same port def vtgate_start(vtport=None, cell='test_nj', retry_delay=1, retry_count=2, topo_impl=None, tablet_bson_encrypted=False, cache_ttl='1s', @@ -463,10 +517,37 @@ def run_vtctl_json(clargs): # vtworker helpers def run_vtworker(clargs, log_level='', auto_log=False, expect_fail=False, **kwargs): + """Runs a vtworker process, returning the stdout and stderr""" + cmd, _ = _get_vtworker_cmd(clargs, log_level, auto_log) + if expect_fail: + return run_fail(cmd, **kwargs) + return run(cmd, **kwargs) + +def run_vtworker_bg(clargs, log_level='', auto_log=False, **kwargs): + """Starts a background vtworker process. + + Returns: + proc - process returned by subprocess.Popen + port - int with the port number that the vtworker is running with + """ + cmd, port = _get_vtworker_cmd(clargs, log_level, auto_log) + return run_bg(cmd, **kwargs), port + +def _get_vtworker_cmd(clargs, log_level='', auto_log=False): + """Assembles the command that is needed to run a vtworker. + + Returns: + cmd - list of cmd arguments, can be passed to any `run`-like functions + port - int with the port number that the vtworker is running with + """ + port = environment.reserve_ports(1) args = environment.binary_args('vtworker') + [ '-log_dir', environment.vtlogroot, '-min_healthy_rdonly_endpoints', '1', - '-port', str(environment.reserve_ports(1))] + '-port', str(port), + '-resolve_ttl', '2s', + '-executefetch_retry_time', '1s', + ] args.extend(environment.topo_server().flags()) args.extend(protocols_flavor().tablet_manager_protocol_flags()) @@ -481,9 +562,7 @@ def run_vtworker(clargs, log_level='', auto_log=False, expect_fail=False, **kwar args.append('--stderrthreshold=%s' % log_level) cmd = args + clargs - if expect_fail: - return run_fail(cmd, **kwargs) - return run(cmd, **kwargs) + return cmd, port # vtclient2 helpers # driver is one of: diff --git a/test/worker.py b/test/worker.py new file mode 100755 index 0000000000..3f5beb9d97 --- /dev/null +++ b/test/worker.py @@ -0,0 +1,438 @@ +#!/usr/bin/env python +# +# Copyright 2013, Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can +# be found in the LICENSE file. +""" +Tests the robustness and resiliency of vtworkers. +""" + +import logging +import unittest +from collections import namedtuple + +from vtdb import keyrange_constants + +import environment +import utils +import tablet + + +KEYSPACE_ID_TYPE = keyrange_constants.KIT_UINT64 + + +class ShardTablets(namedtuple('ShardTablets', 'master replicas rdonlys')): + """ShardTablets is a container for all the tablet.Tablets of a shard. + + `master` should be a single Tablet, while `replicas` and `rdonlys` should be + lists of Tablets of the appropriate types. + """ + + @property + def all_tablets(self): + """Returns a list of all the tablets of the shard. + + Does not guarantee any ordering on the returned tablets. + """ + return [self.master] + self.replicas + self.rdonlys + + @property + def replica(self): + """Returns the first replica Tablet instance for the shard, or None.""" + if self.replicas: + return self.replicas[0] + else: + return None + + @property + def rdonly(self): + """Returns the first replica Tablet instance for the shard, or None.""" + if self.rdonlys: + return self.rdonlys[0] + else: + return None + +# initial shard, covers everything +shard_master = tablet.Tablet() +shard_replica = tablet.Tablet() +shard_rdonly1 = tablet.Tablet() + +# split shards +# range "" - 80 +shard_0_master = tablet.Tablet() +shard_0_replica = tablet.Tablet() +shard_0_rdonly1 = tablet.Tablet() +# range 80 - "" +shard_1_master = tablet.Tablet() +shard_1_replica = tablet.Tablet() +shard_1_rdonly1 = tablet.Tablet() + +shard_tablets = ShardTablets(shard_master, [shard_replica], [shard_rdonly1]) +shard_0_tablets = ShardTablets(shard_0_master, [shard_0_replica], [shard_0_rdonly1]) +shard_1_tablets = ShardTablets(shard_1_master, [shard_1_replica], [shard_1_rdonly1]) + + +def init_keyspace(): + """Creates a `test_keyspace` keyspace with a sharding key.""" + utils.run_vtctl(['CreateKeyspace', '-sharding_column_name', 'keyspace_id', + '-sharding_column_type', KEYSPACE_ID_TYPE,'test_keyspace']) + + +def setUpModule(): + try: + environment.topo_server().setup() + + setup_procs = [ + shard_master.init_mysql(), + shard_replica.init_mysql(), + shard_rdonly1.init_mysql(), + shard_0_master.init_mysql(), + shard_0_replica.init_mysql(), + shard_0_rdonly1.init_mysql(), + shard_1_master.init_mysql(), + shard_1_replica.init_mysql(), + shard_1_rdonly1.init_mysql(), + ] + utils.wait_procs(setup_procs) + init_keyspace() + except: + tearDownModule() + raise + + +def tearDownModule(): + if utils.options.skip_teardown: + return + + teardown_procs = [ + shard_master.teardown_mysql(), + shard_replica.teardown_mysql(), + shard_rdonly1.teardown_mysql(), + shard_0_master.teardown_mysql(), + shard_0_replica.teardown_mysql(), + shard_0_rdonly1.teardown_mysql(), + shard_1_master.teardown_mysql(), + shard_1_replica.teardown_mysql(), + shard_1_rdonly1.teardown_mysql(), + ] + utils.wait_procs(teardown_procs, raise_on_error=False) + + environment.topo_server().teardown() + utils.kill_sub_processes() + utils.remove_tmp_files() + + shard_master.remove_tree() + shard_replica.remove_tree() + shard_rdonly1.remove_tree() + shard_0_master.remove_tree() + shard_0_replica.remove_tree() + shard_0_rdonly1.remove_tree() + shard_1_master.remove_tree() + shard_1_replica.remove_tree() + shard_1_rdonly1.remove_tree() + + +class TestBaseSplitCloneResiliency(unittest.TestCase): + """Tests that the SplitClone worker is resilient to particular failures.""" + + def run_shard_tablets(self, shard_name, shard_tablets, create_db=True, create_table=True, wait_state='SERVING'): + """Handles all the necessary work for initially running a shard's tablets. + + This encompasses the following steps: + 1. InitTablet for the appropriate tablets and types + 2. (optional) Create db + 3. Starting vttablets + 4. Waiting for the appropriate vttablet state + 5. Force reparent to the master tablet + 6. RebuildKeyspaceGraph + 7. (optional) Running initial schema setup + + Args: + shard_name - the name of the shard to start tablets in + shard_tablets - an instance of ShardTablets for the given shard + wait_state - string, the vttablet state that we should wait for + create_db - boolean, True iff we should create a db on the tablets + create_table - boolean, True iff we should create a table on the tablets + """ + shard_tablets.master.init_tablet('master', 'test_keyspace', shard_name) + for tablet in shard_tablets.replicas: + tablet.init_tablet('replica', 'test_keyspace', shard_name) + for tablet in shard_tablets.rdonlys: + tablet.init_tablet('rdonly', 'test_keyspace', shard_name) + + # Start tablets (and possibly create databases) + for tablet in shard_tablets.all_tablets: + if create_db: + tablet.create_db('vt_test_keyspace') + tablet.start_vttablet(wait_for_state=None) + + # Wait for tablet state to change after starting all tablets. This allows + # us to start all tablets at once, instead of sequentially waiting. + for tablet in shard_tablets.all_tablets: + tablet.wait_for_vttablet_state(wait_state) + + # Reparent to choose an initial master + utils.run_vtctl(['InitShardMaster', 'test_keyspace/%s' % shard_name, + shard_tablets.master.tablet_alias], auto_log=True) + utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True) + + create_table_sql = ( + 'create table worker_test(' + 'id bigint unsigned,' + 'msg varchar(64),' + 'keyspace_id bigint(20) unsigned not null,' + 'primary key (id),' + 'index by_msg (msg)' + ') Engine=InnoDB' + ) + + if create_table: + utils.run_vtctl(['ApplySchema', + '-sql=' + create_table_sql, + 'test_keyspace'], + auto_log=True) + + def _insert_value(self, tablet, id, msg, keyspace_id): + """Inserts a value in the MySQL database along with the required routing comments. + + Args: + tablet - the Tablet instance to insert into + id - the value of `id` column + msg - the value of `msg` column + keyspace_id - the value of `keyspace_id` column + """ + k = "%u" % keyspace_id + tablet.mquery('vt_test_keyspace', [ + 'begin', + 'insert into worker_test(id, msg, keyspace_id) values(%u, "%s", 0x%x) /* EMD keyspace_id:%s user_id:%u */' % (id, msg, keyspace_id, k, id), + 'commit' + ], write=True) + + def insert_values(self, tablet, num_values, num_shards, offset=0, keyspace_id_range=2**64): + """Inserts simple values, one for each potential shard. + + Each row is given a message that contains the shard number, so we can easily + verify that the source and destination shards have the same data. + + Args: + tablet - the Tablet instance to insert into + num_values - the number of values to insert + num_shards - the number of shards that we expect to have + offset - amount that we should offset the `id`s by. This is useful for + inserting values multiple times. + keyspace_id_range - the number of distinct values that the keyspace id can have + """ + shard_width = keyspace_id_range / num_shards + shard_offsets = [i * shard_width for i in xrange(num_shards)] + for i in xrange(num_values): + for shard_num in xrange(num_shards): + self._insert_value(tablet, shard_offsets[shard_num] + offset + i, + 'msg-shard-%u' % shard_num, + shard_offsets[shard_num] + i) + + def assert_shard_data_equal(self, shard_num, source_tablet, destination_tablet): + """Asserts that a shard's data is identical on source and destination tablets. + + Args: + shard_num - the shard number of the shard that we want to verify the data of + source_tablet - Tablet instance of the source shard + destination_tablet - Tablet instance of the destination shard + """ + select_query = 'select * from worker_test where msg="msg-shard-%s" order by id asc' % shard_num + + source_rows = source_tablet.mquery('vt_test_keyspace', select_query) + destination_rows = destination_tablet.mquery('vt_test_keyspace', select_query) + self.assertEqual(source_rows, destination_rows) + + def run_split_diff(self, keyspace_shard, source_tablets, destination_tablets): + """Runs a vtworker SplitDiff on the given keyspace/shard, and then sets all + former rdonly slaves back to rdonly. + + Args: + keyspace_shard - keyspace/shard to run SplitDiff on (string) + source_tablets - ShardTablets instance for the source shard + destination_tablets - ShardTablets instance for the destination shard + """ + logging.debug("Running vtworker SplitDiff for %s" % keyspace_shard) + stdout, stderr = utils.run_vtworker(['-cell', 'test_nj', 'SplitDiff', + keyspace_shard], auto_log=True) + + for shard_tablets in (source_tablets, destination_tablets): + for tablet in shard_tablets.rdonlys: + utils.run_vtctl(['ChangeSlaveType', tablet.tablet_alias, 'rdonly'], + auto_log=True) + + def setUp(self): + """Creates the necessary shards, starts the tablets, and inserts some data.""" + self.run_shard_tablets('0', shard_tablets) + # create the split shards + self.run_shard_tablets('-80', shard_0_tablets, create_db=False, + create_table=False, wait_state='NOT_SERVING') + self.run_shard_tablets('80-', shard_1_tablets, create_db=False, + create_table=False, wait_state='NOT_SERVING') + + # Copy the schema to the destinattion shards + for keyspace_shard in ('test_keyspace/-80', 'test_keyspace/80-'): + utils.run_vtctl(['CopySchemaShard', + '--exclude_tables', 'unrelated', + shard_rdonly1.tablet_alias, + keyspace_shard], + auto_log=True) + + logging.debug("Start inserting initial data: %s rows", utils.options.num_insert_rows) + self.insert_values(shard_master, utils.options.num_insert_rows, 2) + logging.debug("Done inserting initial data, waiting for replication to catch up") + utils.wait_for_replication_pos(shard_master, shard_rdonly1) + logging.debug("Replication on source rdonly tablet is caught up") + + def tearDown(self): + """Tries to do the minimum to reset topology and tablets to their initial states. + + When benchmarked, this seemed to take around 30% of the time of (setupModule + + tearDownModule). + """ + for shard_tablet in [shard_tablets, shard_0_tablets, shard_1_tablets]: + for tablet in shard_tablet.all_tablets: + tablet.clean_dbs() + tablet.scrap(force=True, skip_rebuild=True) + utils.run_vtctl(['DeleteTablet', tablet.tablet_alias], auto_log=True) + tablet.kill_vttablet() + utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True) + for shard in ['0', '-80', '80-']: + utils.run_vtctl(['DeleteShard', 'test_keyspace/%s' % shard], auto_log=True) + + def verify_successful_worker_copy_with_reparent(self, mysql_down=False): + """Verifies that vtworker can succesfully copy data for a SplitClone. + + Order of operations: + 1. Run a background vtworker + 2. Wait until the worker sucessfully resolves the destination masters. + 3. Reparent the destination tablets + 4. Wait until the vtworker copy is finished + 5. Verify that the worker was forced to reresolve topology and retry writes + due to the reparent. + 6. Verify that the data was copied successfully to both new shards + + Args: + mysql_down - boolean, True iff we expect the MySQL instances on the + destination masters to be down. + + Raises: + AssertionError if things didn't go as expected. + """ + worker_proc, worker_port = utils.run_vtworker_bg(['--cell', 'test_nj', + 'SplitClone', + '--source_reader_count', '1', + '--destination_pack_count', '1', + '--destination_writer_count', '1', + '--strategy=-populate_blp_checkpoint', + 'test_keyspace/0'], + auto_log=True) + + if mysql_down: + # If MySQL is down, we wait until resolving at least twice (to verify that + # we do reresolve and retry due to MySQL being down). + worker_vars = utils.poll_for_vars('vtworker', worker_port, + 'WorkerDestinationActualResolves >= 2', + condition_fn=lambda v: v.get('WorkerDestinationActualResolves') >= 2) + self.assertNotEqual(worker_vars['WorkerRetryCount'], {}, + "expected vtworker to retry, but it didn't") + logging.debug("Worker has resolved at least twice, starting reparent now") + + # Original masters have no running MySQL, so need to force the reparent + utils.run_vtctl(['EmergencyReparentShard', 'test_keyspace/-80', + shard_0_replica.tablet_alias], auto_log=True) + utils.run_vtctl(['EmergencyReparentShard', 'test_keyspace/80-', + shard_1_replica.tablet_alias], auto_log=True) + + else: + utils.poll_for_vars('vtworker', worker_port, + 'WorkerDestinationActualResolves >= 1', + condition_fn=lambda v: v.get('WorkerDestinationActualResolves') >= 1) + logging.debug("Worker has resolved at least once, starting reparent now") + + utils.run_vtctl(['PlannedReparentShard', 'test_keyspace/-80', + shard_0_replica.tablet_alias], auto_log=True) + utils.run_vtctl(['PlannedReparentShard', 'test_keyspace/80-', + shard_1_replica.tablet_alias], auto_log=True) + + logging.debug("Polling for worker state") + # There are a couple of race conditions around this, that we need to be careful of: + # 1. It's possible for the reparent step to take so long that the worker will + # actually finish before we get to the polling step. To workaround this, + # the test takes a parameter to increase the number of rows that the worker + # has to copy (with the idea being to slow the worker down). + # 2. If the worker has a huge number of rows to copy, it's possible for the + # polling to timeout before the worker has finished copying the data. + # + # You should choose a value for num_insert_rows, such that this test passes + # for your environment (trial-and-error...) + worker_vars = utils.poll_for_vars('vtworker', worker_port, + 'WorkerState == cleaning up', + condition_fn=lambda v: v.get('WorkerState') == 'cleaning up') + + # Verify that we were forced to reresolve and retry. + self.assertGreater(worker_vars['WorkerDestinationActualResolves'], 1) + self.assertGreater(worker_vars['WorkerDestinationAttemptedResolves'], 1) + self.assertNotEqual(worker_vars['WorkerRetryCount'], {}, + "expected vtworker to retry, but it didn't") + + utils.wait_procs([worker_proc]) + + utils.run_vtctl(['ChangeSlaveType', shard_rdonly1.tablet_alias, 'rdonly'], + auto_log=True) + + # Make sure that everything is caught up to the same replication point + self.run_split_diff('test_keyspace/-80', shard_tablets, shard_0_tablets) + self.run_split_diff('test_keyspace/80-', shard_tablets, shard_1_tablets) + + self.assert_shard_data_equal(0, shard_master, shard_0_tablets.replica) + self.assert_shard_data_equal(1, shard_master, shard_1_tablets.replica) + + +class TestReparentDuringWorkerCopy(TestBaseSplitCloneResiliency): + + def test_reparent_during_worker_copy(self): + """This test simulates a destination reparent during a worker SplitClone copy. + + The SplitClone command should be able to gracefully handle the reparent and + end up with the correct data on the destination. + + Note: this test has a small possibility of flaking, due to the timing issues + involved. It's possible for the worker to finish the copy step before the + reparent succeeds, in which case there are assertions that will fail. This + seems better than having the test silently pass. + """ + self.verify_successful_worker_copy_with_reparent() + + +class TestMysqlDownDuringWorkerCopy(TestBaseSplitCloneResiliency): + + def setUp(self): + """Shuts down MySQL on the destination masters (in addition to the base setup)""" + logging.debug("Starting base setup for MysqlDownDuringWorkerCopy") + super(TestMysqlDownDuringWorkerCopy, self).setUp() + logging.debug("Starting MysqlDownDuringWorkerCopy-specific setup") + utils.wait_procs([shard_0_master.shutdown_mysql(), + shard_1_master.shutdown_mysql()]) + logging.debug("Finished MysqlDownDuringWorkerCopy-specific setup") + + def tearDown(self): + """Restarts the MySQL processes that were killed during the setup.""" + logging.debug("Starting MysqlDownDuringWorkerCopy-specific tearDown") + utils.wait_procs([shard_0_master.start_mysql(), + shard_1_master.start_mysql()]) + logging.debug("Finished MysqlDownDuringWorkerCopy-specific tearDown") + super(TestMysqlDownDuringWorkerCopy, self).tearDown() + logging.debug("Finished base tearDown for MysqlDownDuringWorkerCopy") + + def test_mysql_down_during_worker_copy(self): + """This test simulates MySQL being down on the destination masters.""" + self.verify_successful_worker_copy_with_reparent(mysql_down=True) + +def add_test_options(parser): + parser.add_option('--num_insert_rows', type="int", default=3000, + help="The number of rows, per shard, that we should insert before resharding for this test.") + +if __name__ == '__main__': + utils.main(test_options=add_test_options) \ No newline at end of file From 7baba1c2f9ecd698863cb8cb112d7122e56e9c95 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Tue, 19 May 2015 08:32:32 -0700 Subject: [PATCH 042/128] Refactoring worker code some more: - worker error is remembered and displayed by the framework - adding a StatusWorker that has common code for all workers which maintain a state with a mutex. --- go/cmd/vtworker/command.go | 1 + go/cmd/vtworker/status.go | 8 +- go/vt/worker/split_clone.go | 105 ++++++------------- go/vt/worker/split_clone_test.go | 4 +- go/vt/worker/split_diff.go | 103 ++++++------------- go/vt/worker/split_diff_test.go | 4 +- go/vt/worker/sqldiffer.go | 117 ++++++---------------- go/vt/worker/sqldiffer_test.go | 4 +- go/vt/worker/status_worker.go | 73 ++++++++++++++ go/vt/worker/vertical_split_clone.go | 109 +++++++------------- go/vt/worker/vertical_split_clone_test.go | 4 +- go/vt/worker/vertical_split_diff.go | 103 ++++++------------- go/vt/worker/vertical_split_diff_test.go | 4 +- go/vt/worker/worker.go | 10 ++ 14 files changed, 256 insertions(+), 393 deletions(-) create mode 100644 go/vt/worker/status_worker.go diff --git a/go/cmd/vtworker/command.go b/go/cmd/vtworker/command.go index ee1cf5a2eb..4a103b1bc0 100644 --- a/go/cmd/vtworker/command.go +++ b/go/cmd/vtworker/command.go @@ -116,6 +116,7 @@ func runCommand(args []string) error { err := lastRunError currentWorkerMutex.Unlock() if err != nil { + log.Errorf("Ended with an error: %v", err) os.Exit(1) } os.Exit(0) diff --git a/go/cmd/vtworker/status.go b/go/cmd/vtworker/status.go index accb92d3bf..f7b10a24a9 100644 --- a/go/cmd/vtworker/status.go +++ b/go/cmd/vtworker/status.go @@ -5,6 +5,7 @@ package main import ( + "fmt" "html/template" "net/http" "strings" @@ -69,14 +70,19 @@ func initStatusHandling() { wrk := currentWorker logger := currentMemoryLogger ctx := currentContext + err := lastRunError currentWorkerMutex.Unlock() data := make(map[string]interface{}) if wrk != nil { - data["Status"] = wrk.StatusAsHTML() + status := template.HTML("Current worker:
\n") + wrk.StatusAsHTML() if ctx == nil { data["Done"] = true + if err != nil { + status += template.HTML(fmt.Sprintf("
\nEnded with an error: %v
\n", err)) + } } + data["Status"] = status if logger != nil { data["Logs"] = template.HTML(strings.Replace(logger.String(), "\n", "
\n", -1)) } else { diff --git a/go/vt/worker/split_clone.go b/go/vt/worker/split_clone.go index 4823d11c98..e097686609 100644 --- a/go/vt/worker/split_clone.go +++ b/go/vt/worker/split_clone.go @@ -25,21 +25,11 @@ import ( "github.com/youtube/vitess/go/vt/wrangler" ) -const ( - // all the states for the worker - stateSCNotSarted = "not started" - stateSCDone = "done" - stateSCError = "error" - - stateSCInit = "initializing" - stateSCFindTargets = "finding target instances" - stateSCCopy = "copying the data" - stateSCCleanUp = "cleaning up" -) - // SplitCloneWorker will clone the data within a keyspace from a // source set of shards to a destination set of shards. type SplitCloneWorker struct { + StatusWorker + wr *wrangler.Wrangler cell string keyspace string @@ -53,22 +43,17 @@ type SplitCloneWorker struct { cleaner *wrangler.Cleaner // all subsequent fields are protected by the mutex - mu sync.Mutex - state string - // populated if state == stateSCError - err error - - // populated during stateSCInit, read-only after that + // populated during WorkerStateInit, read-only after that keyspaceInfo *topo.KeyspaceInfo sourceShards []*topo.ShardInfo destinationShards []*topo.ShardInfo - // populated during stateSCFindTargets, read-only after that + // populated during WorkerStateFindTargets, read-only after that sourceAliases []topo.TabletAlias sourceTablets []*topo.TabletInfo - // populated during stateSCCopy + // populated during WorkerStateCopy tableStatus []*tableStatus startTime time.Time // aliases of tablets that need to have their schema reloaded. @@ -93,6 +78,7 @@ func NewSplitCloneWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, ex return nil, err } return &SplitCloneWorker{ + StatusWorker: NewStatusWorker(), wr: wr, cell: cell, keyspace: keyspace, @@ -105,7 +91,6 @@ func NewSplitCloneWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, ex destinationWriterCount: destinationWriterCount, cleaner: &wrangler.Cleaner{}, - state: stateSCNotSarted, ev: &events.SplitClone{ Cell: cell, Keyspace: keyspace, @@ -116,22 +101,13 @@ func NewSplitCloneWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, ex }, nil } -func (scw *SplitCloneWorker) setState(state string) { - scw.mu.Lock() - scw.state = state - statsState.Set(state) - scw.mu.Unlock() - - event.DispatchUpdate(scw.ev, state) +func (scw *SplitCloneWorker) setState(state StatusWorkerState) { + scw.SetState(state) + event.DispatchUpdate(scw.ev, state.String()) } -func (scw *SplitCloneWorker) recordError(err error) { - scw.mu.Lock() - scw.state = stateSCError - statsState.Set(stateSCError) - scw.err = err - scw.mu.Unlock() - +func (scw *SplitCloneWorker) setErrorState(err error) { + scw.SetState(WorkerStateError) event.DispatchUpdate(scw.ev, "error: "+err.Error()) } @@ -145,20 +121,18 @@ func (scw *SplitCloneWorker) formatSources() string { // StatusAsHTML implements the Worker interface func (scw *SplitCloneWorker) StatusAsHTML() template.HTML { - scw.mu.Lock() - defer scw.mu.Unlock() + scw.Mu.Lock() + defer scw.Mu.Unlock() result := "Working on: " + scw.keyspace + "/" + scw.shard + "
\n" - result += "State: " + scw.state + "
\n" - switch scw.state { - case stateSCError: - result += "Error: " + scw.err.Error() + "
\n" - case stateSCCopy: + result += "State: " + scw.State.String() + "
\n" + switch scw.State { + case WorkerStateCopy: result += "Running:
\n" result += "Copying from: " + scw.formatSources() + "
\n" statuses, eta := formatTableStatuses(scw.tableStatus, scw.startTime) result += "ETA: " + eta.String() + "
\n" result += strings.Join(statuses, "
\n") - case stateSCDone: + case WorkerStateDone: result += "Success:
\n" statuses, _ := formatTableStatuses(scw.tableStatus, scw.startTime) result += strings.Join(statuses, "
\n") @@ -169,20 +143,18 @@ func (scw *SplitCloneWorker) StatusAsHTML() template.HTML { // StatusAsText implements the Worker interface func (scw *SplitCloneWorker) StatusAsText() string { - scw.mu.Lock() - defer scw.mu.Unlock() + scw.Mu.Lock() + defer scw.Mu.Unlock() result := "Working on: " + scw.keyspace + "/" + scw.shard + "\n" - result += "State: " + scw.state + "\n" - switch scw.state { - case stateSCError: - result += "Error: " + scw.err.Error() + "\n" - case stateSCCopy: + result += "State: " + scw.State.String() + "\n" + switch scw.State { + case WorkerStateCopy: result += "Running:\n" result += "Copying from: " + scw.formatSources() + "\n" statuses, eta := formatTableStatuses(scw.tableStatus, scw.startTime) result += "ETA: " + eta.String() + "\n" result += strings.Join(statuses, "\n") - case stateSCDone: + case WorkerStateDone: result += "Success:\n" statuses, _ := formatTableStatuses(scw.tableStatus, scw.startTime) result += strings.Join(statuses, "\n") @@ -190,23 +162,12 @@ func (scw *SplitCloneWorker) StatusAsText() string { return result } -func (scw *SplitCloneWorker) checkInterrupted(ctx context.Context) error { - select { - case <-ctx.Done(): - err := ctx.Err() - scw.recordError(err) - return err - default: - } - return nil -} - // Run implements the Worker interface func (scw *SplitCloneWorker) Run(ctx context.Context) error { resetVars() err := scw.run(ctx) - scw.setState(stateSCCleanUp) + scw.setState(WorkerStateCleanUp) cerr := scw.cleaner.CleanUp(scw.wr) if cerr != nil { if err != nil { @@ -216,10 +177,10 @@ func (scw *SplitCloneWorker) Run(ctx context.Context) error { } } if err != nil { - scw.recordError(err) + scw.setErrorState(err) return err } - scw.setState(stateSCDone) + scw.setState(WorkerStateDone) return nil } @@ -228,7 +189,7 @@ func (scw *SplitCloneWorker) run(ctx context.Context) error { if err := scw.init(); err != nil { return fmt.Errorf("init() failed: %v", err) } - if err := scw.checkInterrupted(ctx); err != nil { + if err := checkInterrupted(ctx); err != nil { return err } @@ -236,7 +197,7 @@ func (scw *SplitCloneWorker) run(ctx context.Context) error { if err := scw.findTargets(ctx); err != nil { return fmt.Errorf("findTargets() failed: %v", err) } - if err := scw.checkInterrupted(ctx); err != nil { + if err := checkInterrupted(ctx); err != nil { return err } @@ -251,7 +212,7 @@ func (scw *SplitCloneWorker) run(ctx context.Context) error { // init phase: // - read the destination keyspace, make sure it has 'servedFrom' values func (scw *SplitCloneWorker) init() error { - scw.setState(stateSCInit) + scw.setState(WorkerStateInit) var err error // read the keyspace and validate it @@ -306,7 +267,7 @@ func (scw *SplitCloneWorker) init() error { // - mark it as 'checker' pointing back to us // - get the aliases of all the targets func (scw *SplitCloneWorker) findTargets(ctx context.Context) error { - scw.setState(stateSCFindTargets) + scw.setState(WorkerStateFindTargets) var err error // find an appropriate endpoint in the source shards @@ -409,7 +370,7 @@ func (scw *SplitCloneWorker) findReloadTargets(ctx context.Context) error { // Assumes that the schema has already been created on each destination tablet // (probably from vtctl's CopySchemaShard) func (scw *SplitCloneWorker) copy(ctx context.Context) error { - scw.setState(stateSCCopy) + scw.setState(WorkerStateCopy) // get source schema from the first shard // TODO(alainjobart): for now, we assume the schema is compatible @@ -426,7 +387,7 @@ func (scw *SplitCloneWorker) copy(ctx context.Context) error { return fmt.Errorf("no tables matching the table filter in tablet %v", scw.sourceAliases[0]) } scw.wr.Logger().Infof("Source tablet 0 has %v tables to copy", len(sourceSchemaDefinition.TableDefinitions)) - scw.mu.Lock() + scw.Mu.Lock() scw.tableStatus = make([]*tableStatus, len(sourceSchemaDefinition.TableDefinitions)) for i, td := range sourceSchemaDefinition.TableDefinitions { scw.tableStatus[i] = &tableStatus{ @@ -435,7 +396,7 @@ func (scw *SplitCloneWorker) copy(ctx context.Context) error { } } scw.startTime = time.Now() - scw.mu.Unlock() + scw.Mu.Unlock() // Find the column index for the sharding columns in all the databases, and count rows columnIndexes := make([]int, len(sourceSchemaDefinition.TableDefinitions)) diff --git a/go/vt/worker/split_clone_test.go b/go/vt/worker/split_clone_test.go index 7997cb1b27..d1e3030358 100644 --- a/go/vt/worker/split_clone_test.go +++ b/go/vt/worker/split_clone_test.go @@ -331,10 +331,10 @@ func testSplitClone(t *testing.T, strategy string) { // Only wait 1 ms between retries, so that the test passes faster *executeFetchRetryTime = (1 * time.Millisecond) - wrk.Run(ctx) + err = wrk.Run(ctx) status := wrk.StatusAsText() t.Logf("Got status: %v", status) - if wrk.err != nil || wrk.state != stateSCDone { + if err != nil || wrk.State != WorkerStateDone { t.Errorf("Worker run failed") } diff --git a/go/vt/worker/split_diff.go b/go/vt/worker/split_diff.go index 8d91a409d2..88318efe7b 100644 --- a/go/vt/worker/split_diff.go +++ b/go/vt/worker/split_diff.go @@ -21,22 +21,11 @@ import ( "github.com/youtube/vitess/go/vt/wrangler" ) -const ( - // all the states for the worker - stateSDNotSarted = "not started" - stateSDDone = "done" - stateSDError = "error" - - stateSDInit = "initializing" - stateSDFindTargets = "finding target instances" - stateSDSynchronizeReplication = "synchronizing replication" - stateSDDiff = "running the diff" - stateSDCleanUp = "cleaning up" -) - // SplitDiffWorker executes a diff between a destination shard and its // source shards in a shard split case. type SplitDiffWorker struct { + StatusWorker + wr *wrangler.Wrangler cell string keyspace string @@ -45,21 +34,16 @@ type SplitDiffWorker struct { cleaner *wrangler.Cleaner // all subsequent fields are protected by the mutex - mu sync.Mutex - state string - // populated if state == stateSDError - err error - - // populated during stateSDInit, read-only after that + // populated during WorkerStateInit, read-only after that keyspaceInfo *topo.KeyspaceInfo shardInfo *topo.ShardInfo - // populated during stateSDFindTargets, read-only after that + // populated during WorkerStateFindTargets, read-only after that sourceAliases []topo.TabletAlias destinationAlias topo.TabletAlias - // populated during stateSDDiff + // populated during WorkerStateDiff sourceSchemaDefinitions []*myproto.SchemaDefinition destinationSchemaDefinition *myproto.SchemaDefinition } @@ -67,44 +51,26 @@ type SplitDiffWorker struct { // NewSplitDiffWorker returns a new SplitDiffWorker object. func NewSplitDiffWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, excludeTables []string) Worker { return &SplitDiffWorker{ + StatusWorker: NewStatusWorker(), wr: wr, cell: cell, keyspace: keyspace, shard: shard, excludeTables: excludeTables, cleaner: &wrangler.Cleaner{}, - - state: stateSDNotSarted, } } -func (sdw *SplitDiffWorker) setState(state string) { - sdw.mu.Lock() - sdw.state = state - statsState.Set(state) - sdw.mu.Unlock() -} - -func (sdw *SplitDiffWorker) recordError(err error) { - sdw.mu.Lock() - sdw.state = stateSDError - statsState.Set(stateSDError) - sdw.err = err - sdw.mu.Unlock() -} - // StatusAsHTML is part of the Worker interface func (sdw *SplitDiffWorker) StatusAsHTML() template.HTML { - sdw.mu.Lock() - defer sdw.mu.Unlock() + sdw.Mu.Lock() + defer sdw.Mu.Unlock() result := "Working on: " + sdw.keyspace + "/" + sdw.shard + "
\n" - result += "State: " + sdw.state + "
\n" - switch sdw.state { - case stateSDError: - result += "Error: " + sdw.err.Error() + "
\n" - case stateSDDiff: + result += "State: " + sdw.State.String() + "
\n" + switch sdw.State { + case WorkerStateDiff: result += "Running...
\n" - case stateSDDone: + case WorkerStateDone: result += "Success.
\n" } @@ -113,38 +79,25 @@ func (sdw *SplitDiffWorker) StatusAsHTML() template.HTML { // StatusAsText is part of the Worker interface func (sdw *SplitDiffWorker) StatusAsText() string { - sdw.mu.Lock() - defer sdw.mu.Unlock() + sdw.Mu.Lock() + defer sdw.Mu.Unlock() result := "Working on: " + sdw.keyspace + "/" + sdw.shard + "\n" - result += "State: " + sdw.state + "\n" - switch sdw.state { - case stateSDError: - result += "Error: " + sdw.err.Error() + "\n" - case stateSDDiff: + result += "State: " + sdw.State.String() + "\n" + switch sdw.State { + case WorkerStateDiff: result += "Running...\n" - case stateSDDone: + case WorkerStateDone: result += "Success.\n" } return result } -func (sdw *SplitDiffWorker) checkInterrupted(ctx context.Context) error { - select { - case <-ctx.Done(): - err := ctx.Err() - sdw.recordError(err) - return err - default: - } - return nil -} - // Run is mostly a wrapper to run the cleanup at the end. func (sdw *SplitDiffWorker) Run(ctx context.Context) error { resetVars() err := sdw.run(ctx) - sdw.setState(stateSDCleanUp) + sdw.SetState(WorkerStateCleanUp) cerr := sdw.cleaner.CleanUp(sdw.wr) if cerr != nil { if err != nil { @@ -154,10 +107,10 @@ func (sdw *SplitDiffWorker) Run(ctx context.Context) error { } } if err != nil { - sdw.recordError(err) + sdw.SetState(WorkerStateError) return err } - sdw.setState(stateSDDone) + sdw.SetState(WorkerStateDone) return nil } @@ -166,7 +119,7 @@ func (sdw *SplitDiffWorker) run(ctx context.Context) error { if err := sdw.init(); err != nil { return fmt.Errorf("init() failed: %v", err) } - if err := sdw.checkInterrupted(ctx); err != nil { + if err := checkInterrupted(ctx); err != nil { return err } @@ -174,7 +127,7 @@ func (sdw *SplitDiffWorker) run(ctx context.Context) error { if err := sdw.findTargets(ctx); err != nil { return fmt.Errorf("findTargets() failed: %v", err) } - if err := sdw.checkInterrupted(ctx); err != nil { + if err := checkInterrupted(ctx); err != nil { return err } @@ -182,7 +135,7 @@ func (sdw *SplitDiffWorker) run(ctx context.Context) error { if err := sdw.synchronizeReplication(ctx); err != nil { return fmt.Errorf("synchronizeReplication() failed: %v", err) } - if err := sdw.checkInterrupted(ctx); err != nil { + if err := checkInterrupted(ctx); err != nil { return err } @@ -197,7 +150,7 @@ func (sdw *SplitDiffWorker) run(ctx context.Context) error { // init phase: // - read the shard info, make sure it has sources func (sdw *SplitDiffWorker) init() error { - sdw.setState(stateSDInit) + sdw.SetState(WorkerStateInit) var err error sdw.keyspaceInfo, err = sdw.wr.TopoServer().GetKeyspace(sdw.keyspace) @@ -224,7 +177,7 @@ func (sdw *SplitDiffWorker) init() error { // - find one rdonly in destination shard // - mark them all as 'checker' pointing back to us func (sdw *SplitDiffWorker) findTargets(ctx context.Context) error { - sdw.setState(stateSDFindTargets) + sdw.SetState(WorkerStateFindTargets) // find an appropriate endpoint in destination shard var err error @@ -264,7 +217,7 @@ func (sdw *SplitDiffWorker) findTargets(ctx context.Context) error { // At this point, all checker instances are stopped at the same point. func (sdw *SplitDiffWorker) synchronizeReplication(ctx context.Context) error { - sdw.setState(stateSDSynchronizeReplication) + sdw.SetState(WorkerStateSyncReplication) masterInfo, err := sdw.wr.TopoServer().GetTablet(sdw.shardInfo.MasterAlias) if err != nil { @@ -371,7 +324,7 @@ func (sdw *SplitDiffWorker) synchronizeReplication(ctx context.Context) error { // - for each table in destination, run a diff pipeline. func (sdw *SplitDiffWorker) diff(ctx context.Context) error { - sdw.setState(stateSDDiff) + sdw.SetState(WorkerStateDiff) sdw.wr.Logger().Infof("Gathering schema information...") sdw.sourceSchemaDefinitions = make([]*myproto.SchemaDefinition, len(sdw.sourceAliases)) diff --git a/go/vt/worker/split_diff_test.go b/go/vt/worker/split_diff_test.go index 667400780e..5ce2fe2057 100644 --- a/go/vt/worker/split_diff_test.go +++ b/go/vt/worker/split_diff_test.go @@ -225,10 +225,10 @@ func TestSplitDiff(t *testing.T) { sourceRdonly1.RPCServer.Register(gorpcqueryservice.New(&sourceSqlQuery{t: t, excludedTable: excludedTable})) sourceRdonly2.RPCServer.Register(gorpcqueryservice.New(&sourceSqlQuery{t: t, excludedTable: excludedTable})) - wrk.Run(ctx) + err := wrk.Run(ctx) status := wrk.StatusAsText() t.Logf("Got status: %v", status) - if wrk.err != nil || wrk.state != stateSCDone { + if err != nil || wrk.State != WorkerStateDone { t.Errorf("Worker run failed") } } diff --git a/go/vt/worker/sqldiffer.go b/go/vt/worker/sqldiffer.go index ddc35f19ad..b83ec30503 100644 --- a/go/vt/worker/sqldiffer.go +++ b/go/vt/worker/sqldiffer.go @@ -7,7 +7,6 @@ package worker import ( "fmt" "html/template" - "sync" "time" "golang.org/x/net/context" @@ -18,24 +17,7 @@ import ( // This file contains the code to run a sanity check in a system with // a lookup database: any row in the actual database needs to have a -// conuterpart in the lookup database. - -type sqlDiffWorkerState string - -const ( - // all the states for the worker - sqlDiffNotSarted sqlDiffWorkerState = "not started" - sqlDiffDone sqlDiffWorkerState = "done" - sqlDiffError sqlDiffWorkerState = "error" - sqlDiffFindTargets sqlDiffWorkerState = "finding target instances" - sqlDiffSynchronizeReplication sqlDiffWorkerState = "synchronizing replication" - sqlDiffRunning sqlDiffWorkerState = "running the diff" - sqlDiffCleanUp sqlDiffWorkerState = "cleaning up" -) - -func (state sqlDiffWorkerState) String() string { - return string(state) -} +// counterpart in the lookup database. // SourceSpec specifies a SQL query in some keyspace and shard. type SourceSpec struct { @@ -50,6 +32,8 @@ type SourceSpec struct { // database: any row in the subset spec needs to have a conuterpart in // the superset spec. type SQLDiffWorker struct { + StatusWorker + wr *wrangler.Wrangler cell string shard string @@ -59,57 +43,31 @@ type SQLDiffWorker struct { // SQLDifferFindTargets, read-only after that. superset SourceSpec subset SourceSpec - - // all subsequent fields are protected by the mutex - mu sync.Mutex - state sqlDiffWorkerState - - // populated if state == SQLDiffError - err error } // NewSQLDiffWorker returns a new SQLDiffWorker object. func NewSQLDiffWorker(wr *wrangler.Wrangler, cell string, superset, subset SourceSpec) Worker { return &SQLDiffWorker{ - wr: wr, - cell: cell, - superset: superset, - subset: subset, - cleaner: new(wrangler.Cleaner), - - state: sqlDiffNotSarted, + StatusWorker: NewStatusWorker(), + wr: wr, + cell: cell, + superset: superset, + subset: subset, + cleaner: new(wrangler.Cleaner), } } -func (worker *SQLDiffWorker) setState(state sqlDiffWorkerState) { - worker.mu.Lock() - worker.state = state - statsState.Set(string(state)) - worker.mu.Unlock() -} - -func (worker *SQLDiffWorker) recordError(err error) { - worker.mu.Lock() - defer worker.mu.Unlock() - - worker.state = sqlDiffError - statsState.Set(string(sqlDiffError)) - worker.err = err -} - // StatusAsHTML is part of the Worker interface func (worker *SQLDiffWorker) StatusAsHTML() template.HTML { - worker.mu.Lock() - defer worker.mu.Unlock() + worker.Mu.Lock() + defer worker.Mu.Unlock() result := "Working on: " + worker.subset.Keyspace + "/" + worker.subset.Shard + "
\n" - result += "State: " + worker.state.String() + "
\n" - switch worker.state { - case sqlDiffError: - result += "Error: " + worker.err.Error() + "
\n" - case sqlDiffRunning: + result += "State: " + worker.State.String() + "
\n" + switch worker.State { + case WorkerStateDiff: result += "Running...
\n" - case sqlDiffDone: + case WorkerStateDone: result += "Success.
\n" } @@ -118,39 +76,26 @@ func (worker *SQLDiffWorker) StatusAsHTML() template.HTML { // StatusAsText is part of the Worker interface func (worker *SQLDiffWorker) StatusAsText() string { - worker.mu.Lock() - defer worker.mu.Unlock() + worker.Mu.Lock() + defer worker.Mu.Unlock() result := "Working on: " + worker.subset.Keyspace + "/" + worker.subset.Shard + "\n" - result += "State: " + worker.state.String() + "\n" - switch worker.state { - case sqlDiffError: - result += "Error: " + worker.err.Error() + "\n" - case sqlDiffRunning: + result += "State: " + worker.State.String() + "\n" + switch worker.State { + case WorkerStateDiff: result += "Running...\n" - case sqlDiffDone: + case WorkerStateDone: result += "Success.\n" } return result } -func (worker *SQLDiffWorker) checkInterrupted(ctx context.Context) error { - select { - case <-ctx.Done(): - err := ctx.Err() - worker.recordError(err) - return err - default: - } - return nil -} - // Run is mostly a wrapper to run the cleanup at the end. func (worker *SQLDiffWorker) Run(ctx context.Context) error { resetVars() err := worker.run(ctx) - worker.setState(sqlDiffCleanUp) + worker.SetState(WorkerStateCleanUp) cerr := worker.cleaner.CleanUp(worker.wr) if cerr != nil { if err != nil { @@ -160,10 +105,10 @@ func (worker *SQLDiffWorker) Run(ctx context.Context) error { } } if err != nil { - worker.recordError(err) + worker.SetState(WorkerStateError) return err } - worker.setState(sqlDiffDone) + worker.SetState(WorkerStateDone) return nil } @@ -172,7 +117,7 @@ func (worker *SQLDiffWorker) run(ctx context.Context) error { if err := worker.findTargets(ctx); err != nil { return err } - if err := worker.checkInterrupted(ctx); err != nil { + if err := checkInterrupted(ctx); err != nil { return err } @@ -180,7 +125,7 @@ func (worker *SQLDiffWorker) run(ctx context.Context) error { if err := worker.synchronizeReplication(ctx); err != nil { return err } - if err := worker.checkInterrupted(ctx); err != nil { + if err := checkInterrupted(ctx); err != nil { return err } @@ -197,7 +142,7 @@ func (worker *SQLDiffWorker) run(ctx context.Context) error { // - find one rdonly in subset // - mark them all as 'checker' pointing back to us func (worker *SQLDiffWorker) findTargets(ctx context.Context) error { - worker.setState(sqlDiffFindTargets) + worker.SetState(WorkerStateFindTargets) // find an appropriate endpoint in superset var err error @@ -221,7 +166,7 @@ func (worker *SQLDiffWorker) findTargets(ctx context.Context) error { // 3 - ask the superset slave to stop replication // Note this is not 100% correct, but good enough for now func (worker *SQLDiffWorker) synchronizeReplication(ctx context.Context) error { - worker.setState(sqlDiffSynchronizeReplication) + worker.SetState(WorkerStateSyncReplication) // stop replication on subset slave worker.wr.Logger().Infof("Stopping replication on subset slave %v", worker.subset.alias) @@ -235,7 +180,7 @@ func (worker *SQLDiffWorker) synchronizeReplication(ctx context.Context) error { if err != nil { return fmt.Errorf("Cannot stop slave %v: %v", worker.subset.alias, err) } - if err := worker.checkInterrupted(ctx); err != nil { + if err := checkInterrupted(ctx); err != nil { return err } @@ -250,7 +195,7 @@ func (worker *SQLDiffWorker) synchronizeReplication(ctx context.Context) error { // sleep for a few seconds time.Sleep(5 * time.Second) - if err := worker.checkInterrupted(ctx); err != nil { + if err := checkInterrupted(ctx); err != nil { return err } @@ -285,7 +230,7 @@ func (worker *SQLDiffWorker) synchronizeReplication(ctx context.Context) error { // - for each table in destination, run a diff pipeline. func (worker *SQLDiffWorker) diff(ctx context.Context) error { - worker.setState(sqlDiffRunning) + worker.SetState(WorkerStateDiff) // run the diff worker.wr.Logger().Infof("Running the diffs...") diff --git a/go/vt/worker/sqldiffer_test.go b/go/vt/worker/sqldiffer_test.go index 1552345745..6ee8728f76 100644 --- a/go/vt/worker/sqldiffer_test.go +++ b/go/vt/worker/sqldiffer_test.go @@ -134,10 +134,10 @@ func TestSqlDiffer(t *testing.T) { rdonly.RPCServer.Register(gorpcqueryservice.New(&sqlDifferSqlQuery{t: t})) } - wrk.Run(ctx) + err := wrk.Run(ctx) status := wrk.StatusAsText() t.Logf("Got status: %v", status) - if wrk.err != nil || wrk.state != stateSCDone { + if err != nil || wrk.State != WorkerStateDone { t.Errorf("Worker run failed") } } diff --git a/go/vt/worker/status_worker.go b/go/vt/worker/status_worker.go new file mode 100644 index 0000000000..af36886cbd --- /dev/null +++ b/go/vt/worker/status_worker.go @@ -0,0 +1,73 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package worker + +import ( + "html/template" + "sync" +) + +// StatusWorkerState is the type for a StatusWorker's status +type StatusWorkerState string + +// All possible status strings (if your implementation needs more, +// just add them) + +const ( + WorkerStateNotSarted StatusWorkerState = "not started" + WorkerStateDone StatusWorkerState = "done" + WorkerStateError StatusWorkerState = "error" + WorkerStateInit StatusWorkerState = "initializing" + WorkerStateFindTargets StatusWorkerState = "finding target instances" + WorkerStateSyncReplication StatusWorkerState = "synchronizing replication" + WorkerStateCopy StatusWorkerState = "copying the data" + WorkerStateDiff StatusWorkerState = "running the diff" + WorkerStateCleanUp StatusWorkerState = "cleaning up" +) + +func (state StatusWorkerState) String() string { + return string(state) +} + +// StatusWorker is the base type for a worker which keeps a status. +// The status is protected by a mutex. Any other internal variable +// can also be protected by that mutex. +// StatusWorker also provides default implementations for StatusAsHTML +// and StatusAsText to make it easier on workers if they don't need to +// export more. +type StatusWorker struct { + Mu *sync.Mutex + State StatusWorkerState +} + +// NewStatusWorker returns a StatusWorker in state WorkerStateNotSarted +func NewStatusWorker() StatusWorker { + return StatusWorker{ + Mu: &sync.Mutex{}, + State: WorkerStateNotSarted, + } +} + +// SetState is a convenience function for workers +func (worker *StatusWorker) SetState(state StatusWorkerState) { + worker.Mu.Lock() + worker.State = state + statsState.Set(string(state)) + worker.Mu.Unlock() +} + +// StatusAsHTML is part of the Worker interface +func (worker *StatusWorker) StatusAsHTML() template.HTML { + worker.Mu.Lock() + defer worker.Mu.Unlock() + return template.HTML("State: " + worker.State.String() + "
\n") +} + +// StatusAsText is part of the Worker interface +func (worker *StatusWorker) StatusAsText() string { + worker.Mu.Lock() + defer worker.Mu.Unlock() + return "State: " + worker.State.String() + "\n" +} diff --git a/go/vt/worker/vertical_split_clone.go b/go/vt/worker/vertical_split_clone.go index d9238909cf..471e371bbb 100644 --- a/go/vt/worker/vertical_split_clone.go +++ b/go/vt/worker/vertical_split_clone.go @@ -24,21 +24,11 @@ import ( "github.com/youtube/vitess/go/vt/wrangler" ) -const ( - // all the states for the worker - stateVSCNotSarted = "not started" - stateVSCDone = "done" - stateVSCError = "error" - - stateVSCInit = "initializing" - stateVSCFindTargets = "finding target instances" - stateVSCCopy = "copying the data" - stateVSCCleanUp = "cleaning up" -) - // VerticalSplitCloneWorker will clone the data from a source keyspace/shard // to a destination keyspace/shard. type VerticalSplitCloneWorker struct { + StatusWorker + wr *wrangler.Wrangler cell string destinationKeyspace string @@ -51,21 +41,16 @@ type VerticalSplitCloneWorker struct { destinationWriterCount int cleaner *wrangler.Cleaner - // all subsequent fields are protected by the mutex - mu sync.Mutex - state string + // all subsequent fields are protected by the StatusWorker mutex - // populated if state == stateVSCError - err error - - // populated during stateVSCInit, read-only after that + // populated during WorkerStateInit, read-only after that sourceKeyspace string - // populated during stateVSCFindTargets, read-only after that + // populated during WorkerStateFindTargets, read-only after that sourceAlias topo.TabletAlias sourceTablet *topo.TabletInfo - // populated during stateVSCCopy + // populated during WorkerStateCopy tableStatus []*tableStatus startTime time.Time // aliases of tablets that need to have their schema reloaded. @@ -88,6 +73,7 @@ func NewVerticalSplitCloneWorker(wr *wrangler.Wrangler, cell, destinationKeyspac return nil, err } return &VerticalSplitCloneWorker{ + StatusWorker: NewStatusWorker(), wr: wr, cell: cell, destinationKeyspace: destinationKeyspace, @@ -100,7 +86,6 @@ func NewVerticalSplitCloneWorker(wr *wrangler.Wrangler, cell, destinationKeyspac destinationWriterCount: destinationWriterCount, cleaner: &wrangler.Cleaner{}, - state: stateVSCNotSarted, ev: &events.VerticalSplitClone{ Cell: cell, Keyspace: destinationKeyspace, @@ -111,41 +96,30 @@ func NewVerticalSplitCloneWorker(wr *wrangler.Wrangler, cell, destinationKeyspac }, nil } -func (vscw *VerticalSplitCloneWorker) setState(state string) { - vscw.mu.Lock() - vscw.state = state - statsState.Set(state) - vscw.mu.Unlock() - - event.DispatchUpdate(vscw.ev, state) +func (vscw *VerticalSplitCloneWorker) setState(state StatusWorkerState) { + vscw.SetState(state) + event.DispatchUpdate(vscw.ev, state.String()) } -func (vscw *VerticalSplitCloneWorker) recordError(err error) { - vscw.mu.Lock() - vscw.state = stateVSCError - statsState.Set(stateVSCError) - vscw.err = err - vscw.mu.Unlock() - +func (vscw *VerticalSplitCloneWorker) setErrorState(err error) { + vscw.SetState(WorkerStateError) event.DispatchUpdate(vscw.ev, "error: "+err.Error()) } // StatusAsHTML implements the Worker interface func (vscw *VerticalSplitCloneWorker) StatusAsHTML() template.HTML { - vscw.mu.Lock() - defer vscw.mu.Unlock() + vscw.Mu.Lock() + defer vscw.Mu.Unlock() result := "Working on: " + vscw.destinationKeyspace + "/" + vscw.destinationShard + "
\n" - result += "State: " + vscw.state + "
\n" - switch vscw.state { - case stateVSCError: - result += "Error: " + vscw.err.Error() + "
\n" - case stateVSCCopy: + result += "State: " + vscw.State.String() + "
\n" + switch vscw.State { + case WorkerStateCopy: result += "Running:
\n" result += "Copying from: " + vscw.sourceAlias.String() + "
\n" statuses, eta := formatTableStatuses(vscw.tableStatus, vscw.startTime) result += "ETA: " + eta.String() + "
\n" result += strings.Join(statuses, "
\n") - case stateVSCDone: + case WorkerStateDone: result += "Success:
\n" statuses, _ := formatTableStatuses(vscw.tableStatus, vscw.startTime) result += strings.Join(statuses, "
\n") @@ -156,20 +130,18 @@ func (vscw *VerticalSplitCloneWorker) StatusAsHTML() template.HTML { // StatusAsText implements the Worker interface func (vscw *VerticalSplitCloneWorker) StatusAsText() string { - vscw.mu.Lock() - defer vscw.mu.Unlock() + vscw.Mu.Lock() + defer vscw.Mu.Unlock() result := "Working on: " + vscw.destinationKeyspace + "/" + vscw.destinationShard + "\n" - result += "State: " + vscw.state + "\n" - switch vscw.state { - case stateVSCError: - result += "Error: " + vscw.err.Error() + "\n" - case stateVSCCopy: + result += "State: " + vscw.State.String() + "\n" + switch vscw.State { + case WorkerStateCopy: result += "Running:\n" result += "Copying from: " + vscw.sourceAlias.String() + "\n" statuses, eta := formatTableStatuses(vscw.tableStatus, vscw.startTime) result += "ETA: " + eta.String() + "\n" result += strings.Join(statuses, "\n") - case stateVSCDone: + case WorkerStateDone: result += "Success:\n" statuses, _ := formatTableStatuses(vscw.tableStatus, vscw.startTime) result += strings.Join(statuses, "\n") @@ -177,23 +149,12 @@ func (vscw *VerticalSplitCloneWorker) StatusAsText() string { return result } -func (vscw *VerticalSplitCloneWorker) checkInterrupted(ctx context.Context) error { - select { - case <-ctx.Done(): - err := ctx.Err() - vscw.recordError(err) - return err - default: - } - return nil -} - // Run implements the Worker interface func (vscw *VerticalSplitCloneWorker) Run(ctx context.Context) error { resetVars() err := vscw.run(ctx) - vscw.setState(stateVSCCleanUp) + vscw.setState(WorkerStateCleanUp) cerr := vscw.cleaner.CleanUp(vscw.wr) if cerr != nil { if err != nil { @@ -203,10 +164,10 @@ func (vscw *VerticalSplitCloneWorker) Run(ctx context.Context) error { } } if err != nil { - vscw.recordError(err) + vscw.setErrorState(err) return err } - vscw.setState(stateVSCDone) + vscw.setState(WorkerStateDone) return nil } @@ -215,7 +176,7 @@ func (vscw *VerticalSplitCloneWorker) run(ctx context.Context) error { if err := vscw.init(); err != nil { return fmt.Errorf("init() failed: %v", err) } - if err := vscw.checkInterrupted(ctx); err != nil { + if err := checkInterrupted(ctx); err != nil { return err } @@ -223,7 +184,7 @@ func (vscw *VerticalSplitCloneWorker) run(ctx context.Context) error { if err := vscw.findTargets(ctx); err != nil { return fmt.Errorf("findTargets() failed: %v", err) } - if err := vscw.checkInterrupted(ctx); err != nil { + if err := checkInterrupted(ctx); err != nil { return err } @@ -231,7 +192,7 @@ func (vscw *VerticalSplitCloneWorker) run(ctx context.Context) error { if err := vscw.copy(ctx); err != nil { return fmt.Errorf("copy() failed: %v", err) } - if err := vscw.checkInterrupted(ctx); err != nil { + if err := checkInterrupted(ctx); err != nil { return err } @@ -241,7 +202,7 @@ func (vscw *VerticalSplitCloneWorker) run(ctx context.Context) error { // init phase: // - read the destination keyspace, make sure it has 'servedFrom' values func (vscw *VerticalSplitCloneWorker) init() error { - vscw.setState(stateVSCInit) + vscw.setState(WorkerStateInit) // read the keyspace and validate it destinationKeyspaceInfo, err := vscw.wr.TopoServer().GetKeyspace(vscw.destinationKeyspace) @@ -278,7 +239,7 @@ func (vscw *VerticalSplitCloneWorker) init() error { // - mark it as 'checker' pointing back to us // - get the aliases of all the targets func (vscw *VerticalSplitCloneWorker) findTargets(ctx context.Context) error { - vscw.setState(stateVSCFindTargets) + vscw.setState(WorkerStateFindTargets) // find an appropriate endpoint in the source shard var err error @@ -366,7 +327,7 @@ func (vscw *VerticalSplitCloneWorker) findReloadTargets(ctx context.Context) err // Assumes that the schema has already been created on each destination tablet // (probably from vtctl's CopySchemaShard) func (vscw *VerticalSplitCloneWorker) copy(ctx context.Context) error { - vscw.setState(stateVSCCopy) + vscw.setState(WorkerStateCopy) // get source schema shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) @@ -379,7 +340,7 @@ func (vscw *VerticalSplitCloneWorker) copy(ctx context.Context) error { return fmt.Errorf("no tables matching the table filter") } vscw.wr.Logger().Infof("Source tablet has %v tables to copy", len(sourceSchemaDefinition.TableDefinitions)) - vscw.mu.Lock() + vscw.Mu.Lock() vscw.tableStatus = make([]*tableStatus, len(sourceSchemaDefinition.TableDefinitions)) for i, td := range sourceSchemaDefinition.TableDefinitions { vscw.tableStatus[i] = &tableStatus{ @@ -388,7 +349,7 @@ func (vscw *VerticalSplitCloneWorker) copy(ctx context.Context) error { } } vscw.startTime = time.Now() - vscw.mu.Unlock() + vscw.Mu.Unlock() // Count rows for i, td := range sourceSchemaDefinition.TableDefinitions { diff --git a/go/vt/worker/vertical_split_clone_test.go b/go/vt/worker/vertical_split_clone_test.go index 569711b6b5..46fae441fc 100644 --- a/go/vt/worker/vertical_split_clone_test.go +++ b/go/vt/worker/vertical_split_clone_test.go @@ -316,10 +316,10 @@ func testVerticalSplitClone(t *testing.T, strategy string) { // Only wait 1 ms between retries, so that the test passes faster *executeFetchRetryTime = (1 * time.Millisecond) - wrk.Run(ctx) + err = wrk.Run(ctx) status := wrk.StatusAsText() t.Logf("Got status: %v", status) - if wrk.err != nil || wrk.state != stateSCDone { + if err != nil || wrk.State != WorkerStateDone { t.Errorf("Worker run failed") } diff --git a/go/vt/worker/vertical_split_diff.go b/go/vt/worker/vertical_split_diff.go index 2d4fb2b8c5..76b842cef5 100644 --- a/go/vt/worker/vertical_split_diff.go +++ b/go/vt/worker/vertical_split_diff.go @@ -21,22 +21,11 @@ import ( "github.com/youtube/vitess/go/vt/wrangler" ) -const ( - // all the states for the worker - stateVSDNotSarted = "not started" - stateVSDDone = "done" - stateVSDError = "error" - - stateVSDInit = "initializing" - stateVSDFindTargets = "finding target instances" - stateVSDSynchronizeReplication = "synchronizing replication" - stateVSDDiff = "running the diff" - stateVSDCleanUp = "cleaning up" -) - // VerticalSplitDiffWorker executes a diff between a destination shard and its // source shards in a shard split case. type VerticalSplitDiffWorker struct { + StatusWorker + wr *wrangler.Wrangler cell string keyspace string @@ -45,21 +34,16 @@ type VerticalSplitDiffWorker struct { cleaner *wrangler.Cleaner // all subsequent fields are protected by the mutex - mu sync.Mutex - state string - // populated if state == stateVSDError - err error - - // populated during stateVSDInit, read-only after that + // populated during WorkerStateInit, read-only after that keyspaceInfo *topo.KeyspaceInfo shardInfo *topo.ShardInfo - // populated during stateVSDFindTargets, read-only after that + // populated during WorkerStateFindTargets, read-only after that sourceAlias topo.TabletAlias destinationAlias topo.TabletAlias - // populated during stateVSDDiff + // populated during WorkerStateDiff sourceSchemaDefinition *myproto.SchemaDefinition destinationSchemaDefinition *myproto.SchemaDefinition } @@ -67,44 +51,26 @@ type VerticalSplitDiffWorker struct { // NewVerticalSplitDiffWorker returns a new VerticalSplitDiffWorker object. func NewVerticalSplitDiffWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, excludeTables []string) Worker { return &VerticalSplitDiffWorker{ + StatusWorker: NewStatusWorker(), wr: wr, cell: cell, keyspace: keyspace, shard: shard, excludeTables: excludeTables, cleaner: &wrangler.Cleaner{}, - - state: stateVSDNotSarted, } } -func (vsdw *VerticalSplitDiffWorker) setState(state string) { - vsdw.mu.Lock() - vsdw.state = state - statsState.Set(state) - vsdw.mu.Unlock() -} - -func (vsdw *VerticalSplitDiffWorker) recordError(err error) { - vsdw.mu.Lock() - vsdw.state = stateVSDError - statsState.Set(stateVSDError) - vsdw.err = err - vsdw.mu.Unlock() -} - // StatusAsHTML is part of the Worker interface. func (vsdw *VerticalSplitDiffWorker) StatusAsHTML() template.HTML { - vsdw.mu.Lock() - defer vsdw.mu.Unlock() + vsdw.Mu.Lock() + defer vsdw.Mu.Unlock() result := "Working on: " + vsdw.keyspace + "/" + vsdw.shard + "
\n" - result += "State: " + vsdw.state + "
\n" - switch vsdw.state { - case stateVSDError: - result += "Error: " + vsdw.err.Error() + "
\n" - case stateVSDDiff: + result += "State: " + vsdw.State.String() + "
\n" + switch vsdw.State { + case WorkerStateDiff: result += "Running:
\n" - case stateVSDDone: + case WorkerStateDone: result += "Success:
\n" } @@ -113,38 +79,25 @@ func (vsdw *VerticalSplitDiffWorker) StatusAsHTML() template.HTML { // StatusAsText is part of the Worker interface. func (vsdw *VerticalSplitDiffWorker) StatusAsText() string { - vsdw.mu.Lock() - defer vsdw.mu.Unlock() + vsdw.Mu.Lock() + defer vsdw.Mu.Unlock() result := "Working on: " + vsdw.keyspace + "/" + vsdw.shard + "\n" - result += "State: " + vsdw.state + "\n" - switch vsdw.state { - case stateVSDError: - result += "Error: " + vsdw.err.Error() + "\n" - case stateVSDDiff: + result += "State: " + vsdw.State.String() + "\n" + switch vsdw.State { + case WorkerStateDiff: result += "Running...\n" - case stateVSDDone: + case WorkerStateDone: result += "Success.\n" } return result } -func (vsdw *VerticalSplitDiffWorker) checkInterrupted(ctx context.Context) error { - select { - case <-ctx.Done(): - err := ctx.Err() - vsdw.recordError(err) - return err - default: - } - return nil -} - // Run is mostly a wrapper to run the cleanup at the end. func (vsdw *VerticalSplitDiffWorker) Run(ctx context.Context) error { resetVars() err := vsdw.run(ctx) - vsdw.setState(stateVSDCleanUp) + vsdw.SetState(WorkerStateCleanUp) cerr := vsdw.cleaner.CleanUp(vsdw.wr) if cerr != nil { if err != nil { @@ -154,10 +107,10 @@ func (vsdw *VerticalSplitDiffWorker) Run(ctx context.Context) error { } } if err != nil { - vsdw.recordError(err) + vsdw.SetState(WorkerStateError) return err } - vsdw.setState(stateVSDDone) + vsdw.SetState(WorkerStateDone) return nil } @@ -166,7 +119,7 @@ func (vsdw *VerticalSplitDiffWorker) run(ctx context.Context) error { if err := vsdw.init(); err != nil { return fmt.Errorf("init() failed: %v", err) } - if err := vsdw.checkInterrupted(ctx); err != nil { + if err := checkInterrupted(ctx); err != nil { return err } @@ -174,7 +127,7 @@ func (vsdw *VerticalSplitDiffWorker) run(ctx context.Context) error { if err := vsdw.findTargets(ctx); err != nil { return fmt.Errorf("findTargets() failed: %v", err) } - if err := vsdw.checkInterrupted(ctx); err != nil { + if err := checkInterrupted(ctx); err != nil { return err } @@ -182,7 +135,7 @@ func (vsdw *VerticalSplitDiffWorker) run(ctx context.Context) error { if err := vsdw.synchronizeReplication(ctx); err != nil { return fmt.Errorf("synchronizeReplication() failed: %v", err) } - if err := vsdw.checkInterrupted(ctx); err != nil { + if err := checkInterrupted(ctx); err != nil { return err } @@ -197,7 +150,7 @@ func (vsdw *VerticalSplitDiffWorker) run(ctx context.Context) error { // init phase: // - read the shard info, make sure it has sources func (vsdw *VerticalSplitDiffWorker) init() error { - vsdw.setState(stateVSDInit) + vsdw.SetState(WorkerStateInit) var err error @@ -233,7 +186,7 @@ func (vsdw *VerticalSplitDiffWorker) init() error { // - find one rdonly in destination shard // - mark them all as 'checker' pointing back to us func (vsdw *VerticalSplitDiffWorker) findTargets(ctx context.Context) error { - vsdw.setState(stateVSDFindTargets) + vsdw.SetState(WorkerStateFindTargets) // find an appropriate endpoint in destination shard var err error @@ -270,7 +223,7 @@ func (vsdw *VerticalSplitDiffWorker) findTargets(ctx context.Context) error { // At this point, all checker instances are stopped at the same point. func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) error { - vsdw.setState(stateVSDSynchronizeReplication) + vsdw.SetState(WorkerStateSyncReplication) masterInfo, err := vsdw.wr.TopoServer().GetTablet(vsdw.shardInfo.MasterAlias) if err != nil { @@ -374,7 +327,7 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) // - for each table in destination, run a diff pipeline. func (vsdw *VerticalSplitDiffWorker) diff(ctx context.Context) error { - vsdw.setState(stateVSDDiff) + vsdw.SetState(WorkerStateDiff) vsdw.wr.Logger().Infof("Gathering schema information...") wg := sync.WaitGroup{} diff --git a/go/vt/worker/vertical_split_diff_test.go b/go/vt/worker/vertical_split_diff_test.go index 99d7e53063..15b8efada2 100644 --- a/go/vt/worker/vertical_split_diff_test.go +++ b/go/vt/worker/vertical_split_diff_test.go @@ -159,10 +159,10 @@ func TestVerticalSplitDiff(t *testing.T) { rdonly.RPCServer.Register(gorpcqueryservice.New(&verticalDiffSqlQuery{t: t, excludedTable: excludedTable})) } - wrk.Run(ctx) + err := wrk.Run(ctx) status := wrk.StatusAsText() t.Logf("Got status: %v", status) - if wrk.err != nil || wrk.state != stateSCDone { + if err != nil || wrk.State != WorkerStateDone { t.Errorf("Worker run failed") } } diff --git a/go/vt/worker/worker.go b/go/vt/worker/worker.go index 77d860298a..3bc675cae2 100644 --- a/go/vt/worker/worker.go +++ b/go/vt/worker/worker.go @@ -65,3 +65,13 @@ func resetVars() { statsDestinationActualResolves.Set(0) statsRetryCounters.Reset() } + +// checkInterrupted returns ctx.Err() iff ctx.Done() +func checkInterrupted(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + return nil +} From e8bd9ebfbb8e42a9f243c27d5ac92bad9f23e70b Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Tue, 19 May 2015 10:43:51 -0700 Subject: [PATCH 043/128] Add Percona Live 2015 slides. --- doc/slides/Percona2015.pptx | Bin 0 -> 546098 bytes doc/{ => slides}/Vitess2014.pdf | Bin 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 doc/slides/Percona2015.pptx rename doc/{ => slides}/Vitess2014.pdf (100%) diff --git a/doc/slides/Percona2015.pptx b/doc/slides/Percona2015.pptx new file mode 100644 index 0000000000000000000000000000000000000000..b38e77df5ee6521201afab86ec1ecedabb199fc7 GIT binary patch literal 546098 zcmeFYRZv~+y0(eCySux)y99R&?ykWhxXZ+y;10pvU4y&3yF+M_Z*~8>x_9sTyVhYZ zswOqyY>ujN-`BXF=T(#i1w#XZf`S5)R;`l&`Y%6dpVtl!E{rY~rnaX4{AOlwx3xLf zny_2t!1D{}0R7=bJJzVoS)$Uff4)#ekJ2O>TQ4SCL9Ik|Y|;sT?%3^bxLz~nzwUG$88gk(vy8iF!dJY*uu*^T&@Miath z@a|HxCFg7S2n_XjWa#puBZlF@1wb01olhH9k~0j_dB{7sVEGASErmS!QYpyPz)^*<)N@;-EGh64 zcfefE@X`rq#o{ ztlB#}l^I)Bg;S`@v77822+P4)EBeLjhMl$D8OlCRsmib7GJsT*Ys?9s%oQjD1kkUZ zU^=<@bkjHs;4L@s0Xp;8CTNa@$U>nix_v+#9N2{F>N8QFB9H8F`Qc`l=WnzQ&)<7b z+jYC4g7|`-E^k+ou-mSg5)|8G_JXgsr?Jz73SA!0Pq4gS)1UPHp7v651e-vUpuMm6 z6E?H?J+9}AyZj!PKsM3cO?pGwh*8!_BrS3%4Kl=mu)`;)LYRvv2JCR8Gj}ov11`#& z13KCJ;B*}DmG8Isg@15r`!5zD(!db*8Tm9Qp%Wr(8C#c02|5qBEH;AOV9P=*h5g@ za_I2=shu$Aao4YukWr_`njo9K%j8_z^9wewBCCX*%z8U{2_b@kk^B*q5VD>5OBRuU)GKVf>X{nN4(4X~O%>Zr2itY}*m=TX~1d$~j-{?X&O!%C|y zXNx|Nh8Tsml#0RMtE&f4G4vbFlQeD@v2~H-p-VkA2f>qk+5Gqco$?>8o8ujIrNGH% zHx$}z+8cs-!Cox&CVUj%wd-m#lc%0kksXmjB`x}kj0gk4CrH}L=AmJU(c?BJSe`{_ z3?)k#t^00BlrWH6lWU3F)=!^(|`q*O8PQ+F!+bi0>Ag9b}%Cy<)#Z2TPX?ZT17 zp`>n7?Ry&0Vd8C;IUe^|_B1R5jGk@2Xn_T1)^LgMGHvq1t3%W@m|H5w!pWrk+KV{1Saj;mFuDg+&8 z2nBZadV#*{4sSN9DG|criOe(>U<>f$ifvlbL+`(DU?OTfafPF zZfE*-xY{;t=}$aW?|626Z8}7ILj&^6uAkHDS$LHzW<4zZUb{_mX*WnapTNh%{Vd~zd)2Pr>-DU@-*Yb<8qd|d#+0w$_1XMl&inPd3aEEzPdozQ z4wRz19vk^^v{6&w~ip)oQ+5L3Jn9y#$A z_o;tikdudA&BBfPH+i~f4eP2_ZkkvJs)v#s`bC;sR~_SgGvGz=d1Z(Um~y?gR3=JL zJE{rCiJ3K(!sT>MQ$3*6PHP98`L~D(eyO;e7k*@bT_CYsZnD zWbX3Jh_=ZsKvAVG(-^lc5`@iz8u{?|> zMvc-1^Hsht2_F%DM^38+PQ;k2JEPGk*1cj*c-EUrm>SZ(fSMP#rRxeAStOPgNj3LU ziS&Wc*cE|fpPj2)Jo@eXr8H{@?eW7}CcWk6P3v|(OZs95E`UvP6~mOIpca;#_~GTt z#j8xRD)uml(hA@X-qi|%NB6EWN^)T@KoQO%PK(?+!f^RoY#3=pFfdJJ{!gyAPobUM zkiFvn5ETm{qH7JVSdusiZy`mq?0<;TmQsgfr&GvbZAvg;4r*J_vo+;+}nL;DM)%_WCIC$tju1^t0?9}<05x?O<7 zAl_fI!1v=Zu)t~mWC&X{R7`60wh>de$|GzKIo2|}zJqstvM@Qcpw}Y`drEp07x`FE z&+icCs|xi96v&Nk)plbFjatQXXOscX*jPIecl)Vxydk(~{L{~F$!-o~-wt)1Yt}E$ z_Fot$MWT1CC0`Z$77M+lyc-M)(&$zN^v+P}P~baJTLCc=sm zJxPpqh*&UGE#SBaz+NOH3Ytd1c={{O=g0S-Pc`1C{YyP`hjfkCu#gZ>%&*H+-VPkI zk~@pv;D98NQ<)jP#&nGqN~8~9Q;WDTZsyRS-iSX56&Mk)+D{~$oegf<0tbF+-dn8Ppp z`4RBFx_j!G8bTx_5PTXDb_YESmwVc2oBDV;K)pLmu-E5%nTbx}_}xS(_^jI950s9Bvm+T0g~FGePWs*YAY3j;4~aqn$04X)L!Z zWM5wY$^X4@ybibpCp~QeBHvtqsu3?W&;+Cg6qO(yC?{yT^(L1gFy%Ac>giTt`}iDT zF-38kVy}P5J{*KU*&)J8nVlH5qyZw*z2HP*PywjHcf0~b{ladspZx#LlB{{pC-_3C z{(I>xx!#J3T^=r5U~`l%j7U3e=W<6R2bc5DO_ESGGSz_soN_wo^5$zE0-8*2_Cf5r zc+Q_PdNKr2VyceKsz}E{OfikdB!!U&jaX`P>pj1sG7UFP^D83IA`oxFSwW}I0f04OThWWCh?GEZ$_Fzvi zzFdn7b{-93zCYy=e#pMNF9i5_-P$~v!hW0a8D090!|bGzen2n$-VB;xW)N0{`$!Tz z^s^a;0eOj!v}ayYwGfh;LFkJC8JW{KPkhN1N(}6V$i{(?bkEbx91Hgz5Kg%sLCP%r z>lLf1u|#-1I6GV5EsS;hhgEL?io3j}@KG{GHqAuSpBI3d--$>SbF$NM%&GC3uN@{# z&||o8N{V$xi94xWMf9%$gSgjb&50=0Ln95`l&`n*{Wm92;>{LbK1;+NRHHBkq^_Xc z0iuqYi2OlXfTj+qB#3OiNJ_e|uI6fru$Z6xuLM=vSo1}e=GN-QgG^BN!*gkFLu&y@ zlGOv99;b4v(qcOYj{f*jx&UK*JY@HqcS#tR)Gq0qvFOj(o&d8nz4?=%@Kl)L%LA@w zBN{{Tk}0dc>+Lb`MMLq3q2-NQBmLfqTUAKb+PPcIqu*v>^VH&hwAfHI%-M0H^~Xc&Bj z!hpCA)>Lp;FON(noX51%)>fT!Z{)}>8=^Tcg z^)xkYV)kzTC$RktTh#17@t@~^#DC&{SsXii7gJ|BLuVILCuhe0^2cALQ>k%khr^5H z8_?Nj^+CCT3#KStxIvrMsFWb!e~sgYzG;FBa(K+!xOgTaB}R)zO%Q7?FtuOT(eBf< zPq(aSt4)1i>6=vujV`uOsW~ybiI`JOVQEB%!EUDf%cC`)!spxTL1j9$W$ANFbRiEg zlA)P%8L?! zy|@IyBR|Q)^BH@hc(UAimbJlXWv?9VX8Q%<|v^x4-q%h?> z%7ovb1T!#Jhg-_ll%#$d1PBC~L3+mM)rb#DYyxKm2o$;1-uq>{N4JTvVgHcO+^X-Q0ZhG$IkdP~yT# za*sj05~fnyo=MuI8yu#HEY|BruE@d<+*2x1M)kh-S)Oy=8yiIWn^Qh%uGKoz`5^NZ zgCr>KU60u?ATH|?)Nub?eVl2a;2$~VyXcC$zo^z)S6XiPNwa;Kx{z%f*nH-ZFlg#p*O7OkG11Jx5saum)S_0rN%6 z8x>+0pwXQi1W)P7^)pZ!s%Oco&3JAMH~E_yOGJC|n0#wp5vlgxl5a!Nm_AbSGvQ0z zqj@eQ$j@dpvuWexlwLZo@e}1#wOH;}w1bi%Jbf7FHRBCknA@VEOkEh~8Dn|IU~K^!7aOg7)kVur+SubLIP| z^niqd?os!x{rsk+!2i>|ApEcQLdC|?#PnY`;qUc7T3bWUZkGc|K=@MVsE|>bjJt6c z>y)l`S*~WuB%#+bQeXzn7I~XcN+!lDT;Rc%UplnFgf_;y+44JvxM4RApl^GelaPC_ z?Qtp~KOJW*F_Y|BI+=(joF-NG3*eHlI#v?87GVs;nf#bcODUth>**kiKz6@GoJntm zIh(A|*)y{!IZ@TXD~~OSi9eyd#k5b=fFi-HliQ>-eG2;rA$loT z5(5r%qA_IED!_788+8xK86=}vh7gsUEj5*Sa*YN{FQb%F+H|k^rz0H}^JrPit@9vA zf+M|qc?F`DGOkt6{nj-D7Q&4etc0@Bt~Tg^NPCZ1&sDF%G>+11Z;Z5szh^sEs6NW`=sorbcqkfvUmm`P@PIe(KW4CrTdw6Z3_J-kUi3b%UIcQU@ z-J$Ah4D1qt$tdhc!^^bd3AxrL7XLgV1)N_cA)FETSxZF@Q8>6wm1Q`)cqgf?OFFh? zCSj1v*i0V!^^=EP(1!@v5x0Be%}42F{uDy|k~Bw0LHp(p*--`F`dPQ{^>rnPOgv4r zwLNYofvzTuy*K)Ly=0oTDMU>(OhY$2b(& zRpu879ZqLFJ<6y`<}TB2s09QPNHIymO!C+(7XO;r-4%F#yw~qsZ%;th+N2JlS()a$ zr5UHoL+M#guYJZ!;BO1}73H>^!}n=gE3aH1j(ALp@E`rb1=~0`>owi=(E6nLLjn~> z`|KTuNR-J1st2QKLRCxYWJ1Bq=9b^#JqVBd-J1+s;TJ8%tn}37C$x6Objgt>T1#~t zaY*L3rzk}+6s0Ift}T5zu2QekAr*4VHbt%i>#{G|V}@?Ao{B~wdF%@y&2eQKZFFNN zX|_&%PpZ_+=I;(6ck*O5cmwsvhbvmuXM%O7n7@1LQtyRgYhd%^NJs1WN7WwP5@^1{Z zk1?>fc;RPS>O~8?Euir3A8qq@Z902Y7?LGFs4CGPpFmoR0NJHgCO}LYYL4T}sKJ;L z6L7lUfqOn$UT}_i=`@C6#-Bry9-s`lh_=hdpx`Rv2KS(a)U*4qFsOy0T_Vs`izP%2 z!ZGWgc-`NoNM9G9U%}lvPz1;X zZyirN_(BdI7g=y%M!MW$NpW6+_DAOs==54_;-3bATR`e3 z0&JD20}wsuBLJDY)I$ADM$XVfmJCSKG!m~8TksIBe!HEXzH`sDvz{=LZ(NcTh>R#) z_4*URW^+!YyNUg1K?8jy>77{#OfvyS#v}|00tes?i5?EneT)cujcN_f&$Iq+rs*n& z#5t`v`jyZCjZP)zXSxwmC% zEo)6Vo_*isdI7eff|4hb+F+Z z3CpC-T_0q+H|QoOu2Z*Sd})O;!lFb_ZW1rG4Wu~Yb_hs)Z0O9DvGCVSY6xc#b zn{yQ7WH(K7XuRTySXSyYdM6w7n?n`@Cp|7C%QxYSHvseb)6V>?hOdR`*s2FtlcxSe z;r=PsUy&x_^t&4r@V^^-GhG4ClrfK()Uox(b_3apnCX1<`$pU1x8XXQaSn+Q9{6bKs0wjlo zYmgheaJM0%b|x3h^-a-WZGFG?bl$*K7dNzvXW@YQ5;Z1~c5DYgxrFnbZG$mH6_Naq zYll=u=uEcR5A^<8?UDwmDI%ri5f-&hd(A13287W!-EDHyd3Las*C_G`v(acVN_&h? z6P)z9V;tH1Dhq`UF*Jm^OuYAydXP)`#+gqPIiM&H;E=zs5^;e~)7n&QvB$wV+C<4+ zc3v@J_~YhWkhU$k&VD}^u}joaA6}x*m-JjX8D1i%;2ie6Exess4lbX8En4;0r8anx zrMnI`|8@OM<>Or%r~--QkMT-YVO>WKIU$%xN=sq*WuQHO_isk6sEcW0mhYOp4Rohu zJJiA!3lrdKLP)+w4tPI0Q-?*Vq`T4?1D0EaTC};AFK7?f+H#E*RFiz$_`;b9l83`qhLS`!Y zeuD6_j9>qHq-;jsK}Ig=rrQU9!)yWCha&#<>xT2iwy5Xr!w)2jk*IwcdB9n70ue=c zo-PB$NbI-EScy-Vm5m4n*Z@?=DeR{@J@SW?xy_JWQrw~_r7toGic874X>0h^)1@sk zXjKd#W6k1~12wi9kt~~Lv5UYDSV~_rL|@9EGf~|NrL>ri^HBT1+>lx)S{kF90`PXK zx(_<%B*oBibjO>ws$k?7?UEA;*qn4bg~q!n@8Tmc?%P zq6>aIR`>(VE~T!?0usO*wene18RheoHu$Vf&ncx88^PM|zMUZJQCztA**n{5E z41%9H+eE?U_jLoOpL)FItDrCnCR0lh_X6B=M15naSHVv|ssf#C3_0Xj!h@o#Z$kbA zA|^>)MV&GHTw+G#9u?%z69_thu2sV<4$y*1Vg7EcH3pMFTa!TcT6oRhRnA9XqHd^r zH1IwZxr+lNmPB6V_55+`(XE`LwbRo6ta$JeS@dsxlgBk)o;Z&8FYB}N=BW7HjFqq5 zs#JmgH?a%s;$U`PjxonFUc2QHn1pS%Kr0u-?e$8?+f;t!tdelqHo+`6T6s5o!433d ztB8E9c*H{aK^Lh;u+@qrRGeWVb)O9zFYys(fb@#DunF|;@(UkqOC4IiL(iQla=DPn zBHVE28&Bmm=Xm|k3z${DmEfXO_Iw#>QEKzl&IB}Qh*#Af3oN#^d~Grtg`cr~x_Cy| zQc&p;GBUXImnwG-I@5jd;-rrbKmO3{%49m0S%mtB_Hsj}56vt8ldY`RFG6^)==xW! z1w#J#M#>d))aPC@=BVa4w)Pe6Sx{oUTa*z@-S7IdIa3q*-K=9cLz6+{m)X zB;WPkN&N{kU3@AXNvei&Ipr9Z+8Ne)bYyFlc3WN51HL}WezRqvE$&)F!IZ1LfPOQQ zFx&Sc*BidaLmhr>+16yUO{p;@?Xp(@Fm`>Y^H!=5F#0PR_<(%PLHP7kOZZf1l%`)k$j%du;{<4R7=LTu%_wGSt zz3B{-SNChA?FC^PWk&dw3by-t! zyAoluF(x?=N6Hm>C*Wv8=y>O@him;<;r zl&>mXZ!bfqCce_Oe*P`7GGo7Jh7i=s!~o1Mqt~)d-jL?mnT$@rexdGOZRtsqgyC?3 z1I>yd9Vj1weg$mNv6^%|=Z3(QyKDG7-!U1&uBd`oLfJ5;oSIH8h$iu;JSXfjHZtZ; zhK4UQFQj|Rn|}b$G;lg^>yvi}KX=sr-!~urcX;=2Wd5f+OibDSPVqFSKUe$E-UHr2 ze;kqn@OXbl^N62N2>)z^-miH6@15@>+TUr? z4ABJ~&%|7lQD@HXvNaSKkT({~utFQdn#!DQ&C{t^UBRp+lbo9_duSUm{81ogo!WF< zhL`B2G8B3)MycHhB2D0njrZyRd5ZxyLBYB3o6T(Xlbcm8t*Em*!w$kZu2ROB}HQQvBh}IaRq)YPd z5-I5ca(#lFE7iv>2KFH^;W|Uo?W{PN_uet9IFvGb6D7=#ozb@Fr_#liMJfdhPg7eM ztH?rYJ{`!H*BIGP2SQQer~Ph#bxW_ciNinp_~}5ZF5!339@dZ=Q~SwnsL+v!gG6vnp9Qc<2axZ8nYk231T*6V)8IcEd%WuSOg!ju#Q=yo^lg|!J<8D_u z=J~f97HE!H_g?vQs}hh`w5N7yFt$jF*VC`yv1ARSXrQ4R_78%-`u*Z7aNDZIRD=701(+k_vvPF@32f7Z2#+ zS7X7!fS)SCfL{D#Krn|iAyEEM@QnHxBpUyy+vledEqZO1cYikFY~F2SkV(VkJvM25 zc)7UN55li)hTtyr(7dw#)nkLOGwz?(82qO-bVWOH(Nj6Lan0_Q`tq#*)fyscrJo#p zZ42;6J0L6oXW#qN8ta&|K8{%_RF&nUrQr4fVdZ~X!`ZqAL>b@Ln}sYm(gl#2xsM4t z6<#7924;633*_rCecLe@?oJT6!R9f9A$Pj5ECHy*;mp5p8_L7a=uL`#>)@CE{kHo!X!+?tK%u}N(6JJtix{ixCAxaQ zerBmvyov=d8PKiu-B=&J+c0x--fk{9h0LhXE~%NRRZi^rO%{)2Fa+jOc-4nnBP?gMJw9$J`2d z-k4NpQruiY zza9m2_Sbdqb?ONCFifSRaQOT_`;yZo7XGIx%0;2BnbJcT;3NCB>x}^%AKNW&uz!jo zj_=ZT;rFjU@8Dfv8YwtsElH)~t4&~est)Yf=(*cic z;-V6U7VW88eS*tL3T&tB5&%@?65T3GN=M7@;)1*hUh@biB3H@2t zeP~gY^-2ArvsI3kBdFan-yuB&1_2I94Y%jixA#^9)LzAw^l3NH{Vc+KK^HW3x}g-u z)DD0p#s= z#E^uY4O!C4uzP%=mSjO7C8yd(F*l+YtaH_%hxrMv^b%h{w*c_k5v5u5fz=Q!gdZ9E z`^<$ARmi6Bc^+3Nb>-xhGn&8pp6w6a!W;qQ!?Y|7&InOWk1l6Jks)?(0vF=ZE4PDb z77V(1?;1110}h0bwQ>5LeL7sYP;6zf-l2w)FheSrt%c&O@3U_fTl@vC=73`}Q&V?P z(E9Lr+dkA#fyV2wBKem!+44}(&U#P3ZrpFMTN`=b4PY*i>ZS$irel_2wYP0)Qo&>q zSoWGA4!D#y##|Sa5WY5d#qo~67%8JAj7gdEVawg{|K5GG;%jb|X=extEa&qZK{#5R z^fTaDA<8|;zs>qIB@2di-cKlXDm_KeMxbza^{jzSbMRhsj@g^COc${*^N%Mro>f_W zn>>vN%$KAwsun}a8br;kB$jV1*fN`r#16fZiT+C(H6Icu8u>j`DBrZNSe^z#^cd}f z#o@mr3Ql>Qb~&&vJb&v~8(~B!EYs2}XVFF{-S<)>>DkHzR76D%ep8MN*rlGh4vJLa zGe*fP0stI}_l+Y6E@?gAM5c%BH%=6@&vF|h8A1vfnm#m_D>Fr@Lou&bQywh1u=QPL z2rvd5ZnEk{^F-N|n9R~l=Z5m)Ax_rIy4}DxO$Wb#RyyJGiSBhLP-$Cwo!X*lSWU3R z_1=8tRup9h<0hK6U-IpH#F5tKY*?h-fQVNQ7jgF8ZVC`Dq7V>aR)vN3w;wctrK?{_9 zDvY2z<*oFtxQ|dQd5qo{b1`mmFv)kYo2u>l{gw}IE1ONwtr+J3YFsvdcP~fXBGT2j z7=~tCDEc)Yy#Uy$OFX*xYa%d(Pd$h5?MzF@XIG`sOWlHk3_bD)qILRJj*y>-0&-ATnn}kx>z8h346KI0^ z{#UnIVKA@Zsi71h#JdB>9B$an9ff*74+B;|a27ZJnjLpaVqxsh(5?I*$CLj%bpAJM z{u8<;rX2s!xt`PN-wCDvwroP$aH5HHGJ8C0%q?r0BTAFgD?$J*GCNc*5LH@>7@=2J zb^)w^BDMH^iSJ1P_C(l|7M)wyI|l5{e)+3%96Q!nQYK|!g0F}WOOkU-I%U^d=cyd3 z5q=Ukx=b%$X%9s;gRgt3xiWVRSc)uf{%wJS{M5``Er^VjLLx-Cv#hDiNsYC-rQH$4 zd@>3C!)ro)fiA^_r}MaHgRT>VJzBX;90LtAQHkiRZBK)3L(&?hOqzh&>E7f?% z9zFa-yZMU4&k1G4^fWCu>mNOBZANZd=Sr<@=rf+Y;x9fMI1p;UHG091r6n9~jlNfY z15M*3uMv`5iE^L1$_zD;uXjzQs25k`6XaCMVNk#0bc!i20G@7V(Zm5Kp2&tkfQcXW;!Dx4rQg(t^oKVKpJ1$SR!qp@`|ql6N1sDVFU23xU^G>g z))`>)siTC+IhPq^v1Cd^j|*m zzFvk|8n5G8MN(lF8O=qNnGk4$|9Yz&4+pAHA>T8cy^6Al*Ua9*k!<0TVDU%dLQhL< z)b)*;PVkXXaKn;Yd$->2OTgQE$Za52@p!LJTmQ8j>f{p9Qzlqo4`zy#Rnds>_W!jP7*{RvPCMhq26r zMT{SIeR@ZT>1?ZG;}4Uu!E+mRVtvBm3w5$`41K%QNRq&9?B7OWwI^8s zhJH_J?7`O=2bz@@TDs-_#dD9F#T-92nT)t0!8=7oKTyzRhZiFy%jh&2E4BE5s_&X= zAk0F+CE@C=C?$!!RP-L|fk;-jZ{UOEu}}{-UlfSlKS!cHaSAou{`ov8 z;u7SJcL*f3;8qXbh1!o`6~ahK?;(_4p!MLp`d7nsD5FdOLF(f!*Drz#kvUa*fAs1fVzSWdEuZtZ*eOx5p>2Fzem8tqiy1#y`O;1As0x2y}_vme`zen%9JXp01^ zrLK(+01d-eBW6*A0H zmze9oqtkz$Spzws36zd(ff;-PRB3m1ERF1asr3UXe+aANoX*~|HH6?;V7PtAf< zSz*fQ^z@_Mfp3I!no_dx0o%P%*JPuWYP(z+nip4&dq*z%jBh zSW(_!oy7&6sKs*0s3wq6^Nw#=ncwGEOCnRY$b5$!pX*b30}ov|t^TKOhOSB&%11;v&3 z9ST~vdnbKgQ)v%pXS4_hIrO>{>C!`cPMpto#ASQKAE@JLc;N!OjpM}#89LfzRpw?0 zw2`cf-vECRwLe-KzZ+?d*d)-$BoMw9U-P2U2@6ccjl^UTW)LfT12eKDk0DwR`~FZZ|mfN=kWZ2fyZAT=QUhL9|CwkC%Q5cYVByoHaT`G zI}4BFb6EN$R4zBIh3O=nQ$T3`2o;mjq03fjr4K*5vPB4AzQcS<8WJQcDw+V=Ozr;b zngLs6gL!gy>vL1aXv*N@{@2tikh7fWONnvuY) zOBk)$TC`HeeYD~x67kGlZna-?3MZSw^dK?COoK*)3P112+~vT@2O5%NsXd#kkG5m- z;(b~G4E)o=wv?`L&S_O24Yjn~ZK@BgpRWLJU7R>B)|QWe0!~kITgmz{3a4P{0_vXv zmhRBcRjRqCLH4Oi#*nmnyt0_wTm1oh%KAFz1L#%>Pz6j~m=JSucWaG<8R+Yur$)%7 zPHXE~jMb0~F&Np5&D|zSCClFW$VCer;^gzxIk6w`DZNL0$Q;W2Agw+zW^qYe2GbKZ{%T4owE!eB@V9j%K)!h^2TUdjgf1}yW|TpU zU6n5ccrn!w1hWvO21txc#X33GGcU;fdj^NYEk;C_sfPeet7*zGptyt$&5|jfo^q1j z=kml`PTxQZ#>6>-1ww}@y_#N}1vD!;phc%@*8IXI3QTxov;FodU9z}ENPh^`0*NWK zQ-4XH=UrGa*F(RxEmN;I#T#MVo^yyu6!Z~K%>B628b=m~c3#GEu-{L2W|O6m(}4su z)B&#;fO8!v9sr`Jp2rPeAbdW=-XQo4x~|y_*;eIH3p&-EDzQB*IB6@lpE2y^pK)a^ zWLP_q=+K3BjAs32{q%O1Ftkc*=xQ6&l+$|saGK)q4qiS~_^9%vuxGtlAjH1?dzC4? z+%M^06@s>ol6m;~E|cbPE5vjoJTm z*YXvmZSD13+D%HPamOXb+BIEyMklOe}zbNTBeafUR!S!r8 zmOVZta%AnFT8)MbuMYQPUaV>>tf2<2PAV*8G?x=-#*(xY;aL`eEZM}u$ z)=;$I1bffi$wB26S@sHP)hV*_%an4~BZ&&abg2EJCsjLxuG3`v}55V_qtSIxo*~Oe_(QHpoF*J7TYsaBdg?5BzKp{S{k;fumx@Rl1FyM0Iob z6mT)xn?}W|nNzPFzPi}?hMhR|jmyl8Uyk58kXex|?O7hRQQ)%d`W0yzlAntz?6DZv zPM(HHZ?y=WKv9o#(hJ zMI8~++w%U|pYsBvyF(nbJMmuyLSL&<)4Qt51f3kj?Q3ihgSwxkYl^?O75oa@(pM6i zke8|}#&wu*O}5$Rm#v?T$iRqNwQb>$c33?P{I>GU_(c*IOx4G=lNCAWr|S|OVKyl* zx=lI}N7(lT9aeYQ^%_e2!)N{gM?ck)P7TcR!xiqgd}C{ZTY(*^U^hBfaDN@rfWhh! z#g}B*?<(G6&|yB%ul7N{+yqgi4#nV%f%-N}^E(onb4X8Dx9v^8X*rFvKE!mB-gXqr z(&rm!^3|A?ebvcP;3la@Mw>0XDI2J`vS;uVnz;s80Z8mgi9x*dgrB2n{pmi92#MeO z`3U%#(^o7>SrDXSE;Jz(oKni2eO=ULyW+jgz`3P-a{_g0rgeZ z)hX?AB$X(^2XAZkV3AUC-a$qsC0d$Xpt|VZ_zZWlxt;^vHTFO1 zkEp;KVsAb`k~mu%@w)wtPeFrDP>RtRZ3O8&`65ASn z%(dUPEiZo$FlsZ0N+^6r_0zwto&F}&f1=vNl=~mr|6i^@<1077hTAddkL>?vD*Q+G zlZeEFNiSc2HA?a7;vYefmQKc>x(d*P6E_fg79xQZc0j$mheePluN0Y?SN%;r-*@J$Ia>g)+Vvcxubwi8NK!?j?~E z7#)ifQ(cR8h$i&dP-!Oujnwdxv}%PkMod_VBTDI$2pHTh%mi}!36JN!wDSElt7mxfqXQ^-_9n zBvpr5)k-yKAd_F`jtlNCn?Rvdlz3|YiTi}3);&ZO1)JO|Rl6`>5m|PFX`?;o5?j+X z*Dcvq5LXcW^!Y9NBP`iQC6G~?15&Kjjl|X(_$iMXjd`Vl*0-CF>P5?~Pzy;-u!yq=~K#%{OwO}p7gLsFz`BN4V12RD?C zPl_`ClcasR@K_Btmi4V_fOTtN=i+A(a*fad1Mez23 z0$}5X*GvZY6*_}u>G?+-JH55ileV!H)!fyfZ{sCvJv1DXGvWwbKZy$GIMuxzgBRF9sK>jYK&_-$<3M-b zLoPUNs*wv^^>kVc@=p=3Qd_DYFk3^IqKoD9h_B2? za`CY)({ltBmSoFiKXJ1ba~F4K&c7VopFKhv1xoX@`|A%6w|6G>zUVSCeeT6u|JjQN zmE>?bd|-Pyof5<;mK*H@MRGc`Gf-tq?|f;=R-rFxb7l@?BB=q<SaXUcu)F(%T|W zbhfehULfS4=9#y`jaXfa0GlObvVsB)J^EWiY-3oP73evu3h@`U0@>})0(@KejHEI> zYk#-!MgtE)uiY#1&~GTWctiq91=hq{8^JJU6=u#vwL)f~XX@RR!e|-riS}fjWY?T8 z@+y*f`Ij0Hd7$&qw88*>e51mzq8qX8_>*+r4>rO zdV+9rC_Ur}Rv(;@+9^w&tQOQ+%>~0s!t22(ZuO=UFrNO+?q2*>RdLr3b&MOn&?r@1 zhOS*V-&KD*Uk`qx)}&bj_o8sTMI4*|y8^_b+7=7=O#6J~f7k8&4Y~h+q&?Apa{}P+ zS#n*e2p` zHI?YEtp#pqkbCz@+m(f$mr(mR~np-)XB zA&iwUZ|T=Ht?c9%(@!C%JgcN=Fd3yJO@=<}an*3#<*^3f8Oa;HR&q#+$%0a->pE#r_cIiEcBab#k(2XUy4BRH_`es0e-q^`1w0MI} zK{aF@X=*Mei0&pM@_NLU4Q2&}Qjf@FcbBY{-*U1WFmfow`o;EAl-a|i>~RN6RXcnC z@vDD>(HzC7n&&Gt@6=gdt1{ji4m@NE1)Q#kM_gg<_VM`t!QNZNc+-cXr6ol#}2i5D#{xh6w?B5ryXT`xZKT*$VGb7yIpNU24t8;541=PzeK|JTDQTbv^TR zp`l*YCx4wWuXzVyFv|9e)JfjoRUQSD=lQiN4ANT^-$8?%BpET$qNfH>!G@G(|D;6< z5fgojSBjAE7&<6H=h-y5eFQFu@AfJX@~rDzr{%;`&0zgVU!lnsq9h}`S}VFgHuiL# zRqz5Vw2z4_!E=RJ70kz(#=>JI`mu)H#=fkPbchX(3#vCjB=kCkBT*?@c%9o2tDQ_R z<@TM3a_9>&0CVCqoGlv+!Er`tsx>Z=*ky}X2uNURt& zwUf$NSH5L+VDwW;8h=YI#b{`r&4>h=Ed3P`mCAVS;dhi@7D|W|t=+x-;?jE%o^hW*$ zd1Ua7z&!sz6|M&?m{0-dEP2@c8Jpd2-%^*cJwVE|+&qdH1mrtc#5QLnxf|#Oc#{Xg zj?upF9E&3Kw7^!u1XMdp}tpc5GBzp2|jDmB$zvVAp zfp38DD=8rssusN9W~*gXfkktgzC5I9-vGewX)l@K$gPFR?MmdVYty&q$KCW6weEd? z*mNV%U77;!&*FTE?M5=$O2;9Th}7IeGji|W0#%j2pl}}aM|qrJL#pww1`9e)w_$Qa z`_nxHy=0&5y{)8LAtQPeD_r0cVj^JH)GnUhv;m*E17D3kob-F}8iA$6<@%28lD^*VehhMPF6yXhR1#_9RdyA&}3aJ(~YG|@YPY643NsM{sMOc^bPGMxr1 z>WqWCR0>g*+t3H(V{i&>sfumTWa6CqAgsF|G%Mcpo>iAJNAI)iSOecx77EuOyy<6y#MdSV#)f9UeS8HyVlZxcn3`{%5epNyCYGxzY~$LMDt8m>n6nLZ zrY{3WPvi=bCX- z;EeuUU6~;>TbFKQR)2(tddiCDh+BNea<%4ULBAP`MF6L;BVgq zU{@@+0wE3;{@c#WV+Yd}ku3J-*zdEoEOH(>_Qcv6n3>!?o8NNhet$v=4nGDT0g!?h zU=rltl@?PB+@g|&vPtKic7B+GHLtJ!sKJ`{k)FxA*!ipJyKf$q>O`p~YHv9??dB(P z`#ULDB#pTP04ZS0IYjkG zT1jixx$-}})Z6wmewp~yvvOQnuAf?Wze!^NukH#7di z&Q2r&6uY?E$S?LB86L8Z9fHrN2Ke#VdS5Gg&jBzAvbYBLAzUu4sYYa@eJ+c?cK%4Q zy{s!+>fS^`)4863_N^t%eC|x3oo^ceJ@GL@Kh)Wx!Co28S*zHO-SV>vY9fh+;~%c= z4p3OJe110j+71_LT3;l-Xeb1uNK*I33D)_M3p4ND%ed^%5=TQ&QAiWy0%o~VuHZTG z_0OOA!wdp%Sq>qY>>n7fhdABx%_H64x3pBSzD9ZHwG&ZSFmb#`^d>J7LQ4j7ff4mA z;r62L)3wPGEC;q!M1^gEGz@r>d#E~;3Oj3koPtg33wA~C&AYrp|NZ7 zXF+PhP{M37%5ph=K-hjzWrY@fIOYX$@6??}3=|qRC!TP(J}T|_&bPefebzGl_JH6)(-g9V1;h<%0+q+j z9Zo{~wPw7^uOlg_et&l?*xy+k9hHD-QPP*HG#Bj(iAA0Z<8X?63biqb`=Pgm8FHaX zBazza?=p4YThIER=q@MJEvB#ok4K~b5L0!q?Qy#m>La!r*`x*5hBP61nXj72(&z?h zQC#bmI3-AptEf&jhm9{qf{~pik`@8_2QonaK;W!qzlUj;fV~UoALKKL@{54coM>CB z^8pCK0x?w;<`VDU{-N{AU{c+XQCDIHJyYm&wEvAqv?DS!Xm>L|9(nnB5LjnattA(m zjwU4nrbK_eC`ms28!>^^bjr&VOb*gUBa$y7!3vQ zWzA$J*!5yQus)@-&EIOcFKr5)YAYr#cq+s2GV zuoEL@45!wk9H$vsXMKHe1>aG&3+EJIxIg=3RcwROWasZUpNu1=Y@LgYPwZHeMa7)~4`W#l2Pn-)Lr!GR!Btb*eTN+bWpvwwn znH0bfjI;OTtJFZpgUD8j=baZdD`=OojPOI=U>)$oYyb}fN4WMkVRbEt&$f|w>Rxe% zH%KC9+%V}upvGrBA|>RJYZoS-m2UCie3c!L91!1D?FPH*pVK{R)GfS0zcn2eYbv!; zy0nyFwOo&yl$t*EJ+_rTJUDwvdNiIr&+gM39q#Cu9Uc0|xJW%lgb6jym&Sw!%=%AO zB1OISoM!;%+cEewB(|=R^ujam$IUxt*bv$o?9_BK;-TP})v!bA; zJL~HP#o}~u3TA16XVo-<;n0{7G1Y^Ne1oz7%u@n)hY;qyOK;o^vO`$kiVV^kW*d!S zMjcz*hsIBSH110H2Y${zD{bH)WSTTGFiIFOtPJAw&1LWO2`u)rMo`Y*?B-6o&poA} ztSC@z`w3tUeI=nw_bP2?tkQjdxF4tJDWn7J{9;D8ki-m>z&9Lz34_sbn?krny+0TC zrIx)^=tDX`V9|ra)t~50;}_2h9H*%ZJAB0mbz;&{3gr5*qkr}@a&$`XHz+OohW3aZ zgpidKFRZQw?z*#B>)>{Y&*0~ow@|(OdOXJD_l!=`=i7T|ixL$7C-RQb730xbc4{U? zLiZ!__v_zWttmoa+GfC`K>DAqeE&<2!oLxj{4YjJ|3s5@?Doi!-@sn*;&RwrvPdm5 zvYqEG%*rLREgDA7xPY+>4Eg;xO#R+;dt^7HGe(qO>MAG`qIVoyf3Nqe@p^kxf|aM& zYL93nWZxqF1esVPt;>on>NSfAYgtby|+k1|B&wl^qoOF{m=WtO3# zP>(o|n3WdP`Y9a&sPbJTVSGWGi^Rd--q~pwQYGfLu;=s+ILOX4}|B{NH_*{A|`~2yo8f6wJTe;ZIQBUH|FaL868(2kCL;{O;j$tb1Wi88fZnVDq}Ge zu}leW;s5Yr#(eh*6IImT(*$?c>FNbLaJR8v;Zz9IA0lS1ZW1AP=Un|8m`cIXgVw^V8zQY6MDD+& z_?>>??2QWoZH*4)Rh?dQ%x;M5R9VFtz5rzFjMR;2_%r>8B7MVo-Fts>xA(<$$c6<6 z*jA+gg?`;dXqo(1U=+8-#!njg0B!6gtVb3&@-JC7WHY0cT5c+Z^887b*;f1qS?0b_ zLxEezt1lj0N3H7Nza|Kj-9;=^PWK;VnUlI7$nDLHKJq`wvi?dnwZ2@;{!iEHUN=1; zV8O~d(i|&CN zEQzB+CroDGM~d5hAid<6v_KN`FmoO|%IU#Mg~lj$$dyLGpUTHIBs)#yFIG;<`=TZH z(}1c7JFp(S?o{0}iNn!F>4Ixvd6W%edl)*h@BXe7wo7ZfI-x}Cw2s12pAiX+4! z4~5(vGFn-L*wnV;7b=+t<^5o#-Iu5Sb7tcDzOA_r^&Y>9`TE*L>{vI# z01aFh-YK{%(al_}72*cTof7kcFt%X_z8{(`%F%DS@x1nEO9%VIE^G79_e8S`@$6v( zCS7Z}(_|vbEKO<~X$Z^-F8=tYDz8UU1JBd6mQ^ybQ?b6#!m_Co17w< ztRevo42vspxe}{{eOQ2ew&y!8K%9kxPQTXK93eeE%*f|Xf=KuEej^dN)O69I>QKXh zcNkttSm`J0&DmwxTD?VR*3fsuJd{i`7^ju?)2+>n^w}RApogOqSKL&cZ1sSi!WgmL z>9eP3{M%E=$kJ>E0(y!aefxMoPqDbVrgV1aIRWufDnXKdJrZH^#p~Nf@_Nf;CUHqi z;xe!5VU)Q=&#&)L7nw)Szl#Bj4wd@Fq4fO$%aSGD<#A%tdd=^$D_M+1wc`Ru47)<~ zp9|=y!huCM*dx*RK&a6_gN7>{GefhoBIbJ8zZ7zx@dLaAoeG-AF((#i>Cj(M%%6)k ze#u7xUXRFKY-zwwVoJfk*>DKNcgu977!` zH=w2X!Ts4%JS1Gd`_oeJ;Y?4qLtJkb67(Xk=Z%NHq{FzScZySW$UunFlPo_D`bQ(( zMy?y{j0#X)%I(9N@noR>x@9bOV#t;T_j=NU{z)6Erbbac6bRlma_aFc$nKPUuX2}u z2`Bv9QXsBFFdV;Li=u5yxVoj+Iu#r$={%;!zqbS^SCePcz znPEPk={)Wg0b4X3XeyaZzlpPy$-JaJgxC8r9t=4;QJlsh5r!Y{{O3Tde|gom!~NKexGz$EgcNGIKEq!?>hQA z6i$5(?lPJM3N;94vh7<~PbJ%MTvF{zw#|>-N#97^`ep9vD?ErSW%eX~l@J zp)$Uyj-)^?@1l@FnYI?6gW5dJfz+X}XAP!VKKXRs*8DD@3A;1@mI>E1{}vI-?~yM; zg0c%+RYOKnH~vkw21<7BxAh!owf;p1lm@zIDsJF zO67H`K%w;eYrH^_!oR(39#QJYg07F_o zM{j?DeTZ_c5NhcW;1JhlpbW}Y!T8=Eg6=Ppzy&_KmLTLK2%~4=hrG$9%LD{Nn@3Y4 z3DVv?>NN5Wwf=7B{`RZy4cJ0pjpPAH{OKmIif8$7Ds|@d$yYM??fZle4q4(KL@%Uv z=s4DFc`r(jm+q-MzR%|X*q)@b7VsQ!{GT@y|3(DzzjzM*VuAltHXE`zf6?4xl}eF~ zNP!p79y;Pgw{a%&rmq7c6QWWpJz41A(8O0xD; zf?jp3nnz$IQ60Xv1gn(mI4RyG-}-Txa*PJ(&aCVRHAVKEQWOnMI{v+;1*w5uQ4~IG zv1v4v&?bOx#bUX{?uYhFqs zng|CP{#w*ae{#w?R^bNx6YTTlU{iCVNz`&Z;u<=jrITokD$12VffEl&#apYiVb+gm z%BjCtf|)t{U#Nj-q%uWU6gz#$78#i6_(mXSC0`<;&e1M*?sB1YA7@U~`qP??-;8es zr(~Aj6{cXyziTj#+9DBurn4y^ex|cU%OU+uXG556V;OO7XZ)MaCS=O>H=PaVGo4M% z;>ocuHAPR(eeWJ?FzFB5)Z9q|>GN0kzz;nsT7E;LF}SZK_W-D!?$x`8rq2Fy)csP^hhen#$cf`K|De z0Yjl@CHoKpp28PHz1}yhQaRBe1$o`=#)rW zEQwaCD3>7off#JM(Ra=cCGFxq08?>n)?`@;Xb%Q8JzUzyCg9B7`)NW$#gg2-L%~QR zIT-fMI!yWv+^0Gez&E#!-2FzBY`c)&6v43(xI1oH?{r_%;b}M@cRq#hJ`VF zX#LB0w*S%;o|0-OJ1El|2xb%KrgKCHw3$!Fyr<|q9 z@NW9G%qf*&*MI9P%1+QwbJG?6wCeJ~Eg!-YA`AcUK~ztHgqfe3O1D)9=v@CqqmIFG zRbM;eJEqCP=u=u5zOyFOCg-VC%zUVZIFGS+IW*dVkFuyH;-03zAqiOm+oCInalf0q4(by8>=E>3Jx~L+4W^0I z-F*-sRkFi74y7>p9t~}kZoU6h0IlPyV4*sc!EWTa;&b+4@Y4DNOBto!m>KAo1mX69 zW|A<|KAQdBobWa}9<+CGgRKnX#?PoLB6|L{xRpu1T0JAJFmvs~7ZuH}tgE7f-$~UY^cRDCmvK}zRMw3k(Mv%^;_%ZQN0b>u8f96V3 zedbDznA1ZJRPCzly!8!Krpm47X7ohn6nEahtph^XkZMhCp|onPL~V=_Dih*&>#5;C zb0sUk{SUd4f295+SF+;3>HB0XN;V!~>~UUjeJ&wu$z|oFUT*j>HEO$mrp3__uvOb2 z*6-~-C+U@D(*K&>;FIUW7SCoHVK5(39FO_DQQFf0b4giH@NX$3Az>=E69Vg_q zb*J8moRXKn1(z#d#ggX#9o_mjfnE?fYt=(Wb9Iaetp~v&=on(6Vj);MyN>~Wa$={k)WU~Z&J#T zR3?vpJJUCJwJ^BCU5ES;Q)it5Ic_|=nrU^WqQNN0N2Lfw2}lJa9|EL;q0BYikPfq4 zfU;IMb`eOL+t-?8wF=p`e=! z_RA+hRMB9M6a1I#YhYyHPIhm;Q$B5PjEFg3_OD;UFo~eoD3gt=N`E~X>aWT3&{Atd zadN(n3t|pXq%>2r59B>sKN>^ol2<=^78UA$;t~hd)a~qxzC?Jj;~MvW2BZH11f#Eb zvv?LPky3wbR>BzmqD+q=gGf;C*;!RqSK23=RV`^0FYtk%MUrcm9T;l{cr?I3k7$Hl zuP*1OX4t~SvxqP23q5%Yq{`{c8s=<+oNE9cjUcWtYtQf2t3+IZ2i~XWbnAJq?oN>SWCb@=c z((67-hNKpG=5>BR@16E@+sE{Fwx1GKcTNmo{w7XljGY8^{53-fOljRM0cAZ*#t%La z2Ev5h(u=Qnn&(<>YyBF1HM3mZGMacL+jKrT$!IH&h`C)2NLrtgk?52g@Am3L(S-qO{IJzZZen9PX!b7EEilCc|K6hIT;sHd$kH{=xUU`6oCNU98ki?e{%(;pH2Z zkDWzuCZ0MU8-Xdp^QKYSY>f%=9Y2YUAIjwi*k{|yui=t(1W-|H$p2$w#J>^u_%BrS zFIMzFSAXcav*qWD<+E-1Gd%td&=Dtkbuy54-SyeQJ?Mrgir!6^yjyM$zjmHoiP+22 zYKs!)h})$ByDt?F3bncYhqb+5%X*TiCjT&pw|%@BvFepfanPIXl$%c|QRq#8W? zW<6H58)LA8q=KyAx2I{}{gkDte7ans03JLs!j(iX&>e^@_X4=^eN*dkR$JyN%1iN)?f37gkK!#SM@j4+>jWO%k6g(GT z0efRQ0ow|KLBQR?%7oCmhrW%VGlxaB0%|ND80Sy`@4>UpCq2}^>5lE8bLX|kykQ1@ zyR^u|fMN|uQ|xaW6b3R{e6RENfm@*1o&~K1{`zb#b4jE8Yg0Mg2^?&2Lx;9q7e;lc z(c!Vg$mYj2?~ZtD9W|{-U=@P+-kYKLow{QT5pO=|Kv(b!n>-$zXAu87rS9P-o%Fcn zpB|Xos~mn)0OjmL&=T&hL1{Og*F5Z{L?7uM!*=+(MbUe0Z;lQyw|W0Cj3Of8 z8)8EK+<8H;Y4R2wZRX{Bd3_hPb*ct*tp$JtUTjp#t@q|SD?rzJU%l-{elFn0++kU` zF;(||L}&3L_+m?FCymr!3ME5?a@(#NV1c=*Dk{fa<$cQIbJ&w8u1S8gEe)@hC4&^# zx2pjt{uaRj#b0*X#efT-_;a%=NioU(NAcIX=J+4QpI1|5ReF8nAf`uSd*q#*x%3x;0#uXvjQse#j$@u9f30V2lq5i!2pU9+DmvXPLJDxwnw3Q|o|LK`t^sAn{jjBc;Z zhyz`WSPOATxTTj*Os94g?;+tX_6r~gxZ3-CO z*&>(ChV^XetfA&h3prGIQ9RYDbumfaoXiN8iHodwQF_3<&wmp!j+rKhxT-O?mSM=c z#X{?>VWk=&q!Trxe;h{e4s{~YOINLfr=eB%{>Nbi@vnR>02KpG%=lC>cd6YrK;lQW z+<hnnp!W)ncg#yg`m%?GoHS>ZG%7NlPesx#9?MN&ljfvp(?B$u^s zvdBZ%L4UefLSdhE`onQoVZ}>tskS-W@%wLjn`%7_*v52f2=^0jOw7JMh0{)4#+dJs z0$qRt06@o~I2qV@8BJransu!MeNX}~ggPxS)=)FOQI~msSIrizVQjGtJhG9;(SD5R zu)Rm15Aq*To{17YfYS)14jr+u+iHWn^v5Rn^6%!>M1=MQRBr0L-H#{x_U2z!dcZwx zL`rlV8mf2?3Nzw% zB#d3?E+vvNZ}|^cYlJHgNY)hOS8N$Q(5bQ>;&uBc(1Nf(z<__a1@S&F#5!L21?#dI z*eQ%_??$Fe&aPG1w>xg)Htg)8Z=tI}#28V2xlL3Jr=*U8m=>Z{c9<*t-Bzea#XB}8 z^Ygukhj=xuoYo+jRGZ zJJ3roSOZ{Hy(o|ggX}yob#40r@rI+F-95max&?8{a1TC;-*dx`I4M4kDYhY>{hMq zTT)=8E;~OLWy`yk#ukpt zI)Xd1I!y5%KMcn|rQcRkeT?yb^*|T-wC&KgGO0Rtn)l3$1zS8ea^l+b>dfDjTD0JN-?t6j zu3N9FWb9;h!?GSdbzy0uWaixTwUw|lE!H{ZX~vLS3wGxzdjDx*W5b%!r#XY0x_GRP zq%M7O$YkN|{LQ*k#4<7>c-^q$>CND6!?;mhjo8-oM&oH7{(P8Q7Y>hgj4Jofh9T=p zWm83ao$sL~!w+p9+C%pn&DniVrVUjcJI`^Lpwg`wdfAJiOhkxvUrp|yfphCu3npFs z_=Vaf+jO9htB3-S1-!zOdjChyJC8PvYSXcc&hp49j0|3@c3$#!vGUVj4+}*2W(7KQ zl$MkrqRDcnmZVAbj=VTsWu|@m{jaVy*mRUvMli6v<7Un0MVWHvTU)GlzUX>3Y6~F` z^-mp7bx+PWu3Z@1F%^H@Uw-C)__BN6^oOsr_JxUwTeZKpq|2VljSczPF5)fTEtJD` zbH6uXjN4X+X~N|(;av=!9_K#u26An`E4(=07$DC)1>+5=>0Pi!KKrqt2GK3`OEPgU z_=&NwzA3_z(y*`OCBEpTdyzfxF}TIn1SnL`Jm0NOGF+PEKQL~1)zUQ{r zQ+?3niL}%)-|>_$onpa;Z9Uvv=Vr9_&WAg?umQbUW>a>&+4L&UJ5e&xQkBVvzk50o zJID2StJP|whse>@?s;=1Wg@3vx+<2*XA;!lho#@v@n+M3@lj%8J-2NBa8Hz!CK?VsevQNtVRhzowWo5rpJ&!&HIC`bxLUB zM@HyKRv9m!)z?;Y=yeUI(vwP8NY0;3N-akV$*u(a%@q_XBkIi2q;TyViWbK&me<=u z0wTsY@?Ip}@0;IlePjw8MR}K;BSksUDu#}SRBnVd+{7v7oJG@tV9qbnxYx%iS6+fP zb9&!I)*Sq;Znm5?6wphxVCE@geujL(gYc1T{mOh<(0i+Z;9xH%z~(25gu^RBcPyA8MqGPs0Ga@9_2dO zJet|PJLJi7P4kyJ*MUB)?ai~9Mo+m_4um_BsIb#S&l=fh)0xc}9(eP2v zI5tprd?W*RJd<&U$-weh3EoZ&Huvy{g4JLfT(kw2LZTZxw)^k8hwOMigzg&Q>gl$c z8%U}tx8KNpsda8^%?C)he}cXDA)aoy^TtC6$L@95Z-WJgr~T%V)%GWygBvVy@_Jqb zx{}|YY2&espPNf^>olO6cnjuKficyUe|GI493TrFk!J}1SjC`!2Ai2|!@;6cOSuAF zpxfp&hiq%YD#J}N)Nbxi1K1F^`Z+^JI##@MhAE^?6Ia29{o7pmA=C(>q~dG<6D*Q0 z2>(2N{-T3JSiHQSO`tLacoY0)Eh{Go$ymyI{>8cP9VQALYP2EBLq)lC#&z|T8^NIi@?tfy}qbD@>AXm|`uF(oxd%W7rfCqWJZh2c63^ za|W=@f`yT`tUik4OCt5JuFu7lE)a`IngSCBMjZvv{z~9LRi$ts;uU&BvsSmdKylSZ zZOCQFU~!Hju8D(43(xO2AVAo0XU7hgZbjY3$X1BSpE4DP;=NT!ug~QWD_tue(14-5S#H%Q!G^88J^4sDvtQRYOIaD~#5<OAx!nYf$FzpEa9Qc3P&ME@yJj6dmvj)<7g2Y7kA`Y(=~4c zG);=-a01QW4aGU80+%Fn9Y~le1`7{S-=`r6CGYY}QXO(@t^5-fA{uSd9A1ll@LcqC5lptp0&x zOkp9xmP6===VkMbb-dW-;Nb%Zc4DjC&uq>oqL~5{*V8c?UIHXM6 zmO0-JIY&-&Q6$t`LPNY}tr8h}fdQYi|A=SMvI_#cZRyU8k8+c*6^EeCie!@eGE!=4 z4KLr@?H;X{S1qsJ)-!Va9mQllFE0~^IsWGJZC#Gu((~2I6g;?wpTy}NT-a&tUG4GW zkYjcAW8Ny(WokX|GgcF?XHK|~&)ZTBZ*lBsC7hJGTFD)mdX7}<_?TvnX72OiknwK~ zJjFlO(XmUL&WCNDX4znPlR91%W2(H(P~%yOa~QO|-;_9DQ)P~@;d}c+FvJr?o)o4J zIcJM0Px~n(3GGH6+TfR)xfz>_Mipyu{%_LD0Krs$E#^C+8+aB!O2;((A+ zn_^`#2fOzOlcSItJ=aXvD^XL#vhd9;7tl(nj}Dqq%bk#C?Y~f>J$WIjmohkruq5C^ zP-~$H)D{#P*HnhsMndSUAoTX3+8;zNGT{c+8}pw1n4zXPDY@JZzN4_Ln%kDTe_Qa( zE!d>(Qd)Z|Z;9|MEWZ(-TAlpuP0m;o-F1c8Kc_>`4B=8ZcQF99ac9exGg@){P3eQH zpvKp-Id#fxvCUJa)xq}Er&z&Oq%Ctw_{Dp|Hs{hQ>)LlnhMbyH;PKGo&?ObY(YJbS zM7FgGiUG4^{STOf20~fro@}d`<4Z{ms9DN1J1hjwifd_Eo#nZ1Zai0t`IrtOxDeVA zDxP{}35f%6ej=z9Y^?eLhM<^29>o;9BqMtn7>)-kq52UueZCteGH38O>CoIZ?)lU8 zqj&%56kEJ=#mo6+DoSAL*o^QHQQA$6*mUwa6~9X8sHs7+SaX$*+9J?I0A&W9Svm-a zS4Pr9CC4yK{Qh2355vv4z5snXejIz8m=Sy`6BJxSFnA(q85Eqg${7QF2&>B_K`L?! z!i4Hr~`yqqWswRovw)H{Mrduk_ia9T<<~Hkcage7yG`)JthZ}_HVM5 z1eC-_aUb<0$~&*Y7KGx_P1dKzCy&C|P=HZILq;RH3#LFOp3rSoK4Xm8MO)Av(e%~64IxC6;K9OZ~Lp;^FN9Zf_VVbaO-?cg~3iA7i zBDLrZ!jL^>XK1}{d&7-27sQl|=?Q~rbsMw;=Ew@Ik%aLW_r!6+LUa=iB_g&9QPl;* zdI7eQupN4LMg(}Own&`!lrwwh(M*n;BsL`tLvjOJ?7^o_$=*H}Gm^uT*M^$VhMGC= zvadX^JHp{TS>%g?>yvZy<{68&*?o%E0)xhgF#&|D?t_BL>pBNL#DpB%PiSb6E?vdzSvs;*Hsh=HXQxi|$?-cI zr}Ai)TdA`ADEqqcAi3@a<(exFLsqr>=|bLA^pH>2rU_`)nn3A~61>)aP-vyw>>7nD z`>zl5DTqh%lqJ2$t0`l`IKWEga|R-DToM!_6pnTJtwe>il7BMQzR%S19CjFaRW6Er z&9wG9Kq*CvX5%RuMph%X649%E^Zj`W?4(MEimG&{S%?Y6J{M9P>%@(b>@L^3z;h|k z#0a-W6Yh7)B#O7j&9ch;gQ9bR-tIWmta3~0h+Xv+A#>)QtDKl=#y=;yivdh*odnw< zD0>{j#T^e1&(Yt$?*w!IoQKD0&@3fL!BkO`|M=!M_qVLFQ=fLyV-f@9_BU3gY1AuY z*3(v0*t*AU%!JUO$gJBVso|(La|;DfC@U8H2h8krlE6gGa7rqZnkcXD1;hf0!*low zf!~Kux$TpT?KPnkY%0E`H0C~?Xdq7{ed*O$soUhkT_~zQ&0te1sf(=!od-UWM(a} z0lLg$ejE^pljR7>&W#jV_)XS|fU`~=bnJ23<~SS{JF&-tW_l;CSAs^pKY-PE z&ZQ<&6KIKThngKEue7Mbj(v~13S3Zbob+`%-Jbv!j>Y>LzZhM00l{R}eful`fQhq_;(ke*{m{(gkQieJeZ z;aj^H*(2Z>C;%T;AS<<0Y=0f%kq;%Wz~$yjA*1i){PR${`}J zGB6R7{DLn6!AQnsUZJA*T+2>196c80Y7Z{^7M&TtTS6pUc_n&qXX*(}YUt}j?BbmY zWBM&<*mr+{u6qjU#LB~~rw{}R=7%f83fdh5M{>OX6zRLa-e6(|gCt|q@~jjGCo@J@ zV5DYYVq~uk2?S)o%31}yldFKQm4dBr(>{8pIZyWrogW zV({CN_36i{*CWT7M5~4GYnumJ2d(C8)jBsfAQ9x^xMJSQJt z#AJrS`8h|I+m>~7UHLXrV>N9?lc89B)VFCQSKRNO-~L2w>Xl&Ab(fWcqgK&vIE72-8O56fev`R-#J_f;4Y4EG56~{w8yVzthnjiJwuDxA5+5`;qp6N=U^0R zGoF>b=qdu(BxjyY=0BpsUl~vQMCX0(OZkR4FKY`JX6^>F?8b$=^{;#k#;Kxv>7qP} z@*YDNs+kG9=?{~?9-RcAf&Fs`9e`vxO=(5D<{Pz4QM#EOMgzM}Wxx zJU{v;YTgOiM-LBvMRtYLcTQRr6wZ837#I|J1cE)%2-74nEHcx~RsaS*^z!`HTI@BE z=&opp))A_yia(sGpN+@7{&VOK7S!w(wPahOKNG#pbwS&_Pums{J})A`ZH4X><5jdA z#Y{8?KawheHyhQOWFe0*fdj6XS-x6+Ys248|7*ljoJ=dIrHkmzD6GpZ6tOB5=9Q{O-m*$mho)EPu+~2>50H^9#-YYLNC%(){$F zr-*`b-&{*8XfRy9|C0F*tb;iYu%#f{04^N_7UEzD1#~Umv->3cA=LPO=59-Rt7a&? zdvL|Yc0Cur*}0{%yh2DTDQWuC@1PZy5Mx5B?nkR1_hvNW5&|M1R!VYue1mAGtNZ;J zClfNuFVL$kJ|+;~5Yo(+fF$z^e%%)pd2NNz@C+>lU4gPTZx~0|9AxJZfo%^X`QC(< zJ5S#c&o<3ujA#COnUL@Xd~5^q$EOU^ib2cGZ2Cfm3gYojkSdnreJB00C2IG*!CLnRu6>9wZB-&2u zEcmxsYpf7=RT>Vg@*DVN_h8#myabcx60p8yBTK=>fD`dBzRsX7HM^2DWe*tURr2Ot zYGjfZ>6{RbzjZ5&;%MwI;ML3GhZGI>OY3*nr@ie=!Mygr9iy)J?pycbwrDta`)tdm zPpymNu(x|E@P}eAfHw-$z@1sn0r6zH%mvSFd{=w^N$uv!q}F?JhjPTwkSJ>kUhm3G z#hVj1@qG;7?qK z)noD>rG;2_U+84X{y04(;)@UIo-ev1l36^Dv$30K566b9#@LQ)$1onBIJCSG7oyk* zM~UP&iFqYnHts8KBzwgfgB>`H$e`QU9n$KUJy9VrvE3EmyW*^$U)hSeat?x1?+acq zFrL%gyFj-NVCup_q-^A>YxA8?3d$N1_nOGw&}PfBCR32kTz-+OaDNe#nn2F={y6zUy_`vi)XAeWxG zlM#HY@y>^xB>T#KKWQ(|4lB5I<4X-O!iMNcl*fjM86I4RqcP9Tyfi^X3F)Pbj9Xfo z!dHID$xdfKPOeOhgcT!!$am}{eIjS5x4=p^+Dzn%kkId_Gt5CEi~w`+KBf&)61MSIJM~P*iGih*3g#|Pk<;(Af%#(JgFyfe@MB>VNydtq7 z&X@!Tr}BPg?2n^9g2Y#qqM!}EZfjzln*4q3J;9CQL?6IN)&cp@v=#X)$ixo)Lwn;GLN(GU40cqyb7y#}> zG#D9?PcNp{1;@QSmoUS7s z!+v#eb4mFkCN3@+P9eatObEQG`+6HH14hf&N24PlCIHV6!ckgith9t85`sxSNg*UW zK|^krtE|)IECqXs;vBTmb7R;Xzyf^k={xM%ralkmDHo{{^xvepJpE`oV;8>V>ouR+ zS!FJa68sJ3lA{&B8~rJEC3|`kx|xiQJ>`)gK$^dz&bb6}8Muc3Q<@(_cJSN945%rG zO~B=8Gu8N$ShO9No$+runr#qQwVHQV_+f$ySHb<6JY(4lSXQ2jNcE613@qJPA9sjK zvr&FqN@$tc+Jy+-bh0Wqj?K%)JvfX0+4!nmIw8NlJ4;Zbu;|g8|%epztM{r z7B~zO0LA7$yD|`i{j1Sc>xRawR%fT^ju9*!xQLbRVW>8zqL5}V4moYUO~R2|HYDfz zngiSH;R3Pj9I(5EV$iWc==b3>4)6uI47{+0>qMkO0@9uXx9s5-v1vHq3{2q{{f=fS zU)y*NT^nwop9+btCE$p2+L-%kAdWj6cL3f+h7r-@-nyMDi^BPNg` za>h?b1iyPBj*1c|bp{6qUJ~W8OUqF^8|EQFz457+F2}8A&8= zaY)$wiDwso075i3&gU_Xy~8cQ_#N4KF&t*3s601@gY+(O^osou>zt4{^r0bvNivKO zbNs!u9b6XbM+HNRHh&gy12#3ia+CvL20cLJl)|^Q4TUlE>abUT>?g00qfFG@j1$X;|RYmCC~*vQXxT zJ>u-9HTXuc4Nm950+8NNV6PBOTzx9z~XtzF(x@Aw15LGBlxy+36K5)Lox2)22`@Fmz z)#`oBMHDHN-m5pibw_S~WS3fQ$7k#f{bn2Gt&YxyMM1hr$1r5kSVzJMYYWuFr2Y6S zEY_;2y_f=OcrE(>{(SxKeb@idHT<8pYqkG6U-!Pdn2mK{I!7%1IbS0!o&e6*CjX!F zb$pUBMMI*yf1O?=VtDGn)#j*N=+)0Fs=f2NIFiDzJGwIdf#%~42{zw$&h{^e!DE!e zOvC6S(02St+iyIC z72Tqsv@zT`(Iju3vw58;46Io zoSOEx&^9k{gOp6aB24l(I;h~s;B19P3lqVgEwunauxGY}AHHcTA67@ZK7P%Mz3t=9 z#~AQDD0;~;|70;?-sY*WK>9j5c$N%g&-faJ3H^f$+$W$*a@v+H;S ztqC_%Vs(XRhU)?X5Fz_8oCz+0!tKiU*hQShY7?x0KsLs-J)8BloYz2=qon=|a4B?} zebXDY3e9?idZe*Y>>->H(n#N^wvk5oJ$tBFG{LGXo%eUNh9t9>5rf~#YyDbDHanH zD#Amco=j>49LH^>1TARuWFwq@#GlPWu*+#$=E*elnHEh(WnF|#c}NK3yk=_MS?+J6 zF-h_gYAiaqR6I)5(M(xY_L6tzrwi55B2#5)6hG=TvMenb{1DAg&?Yojl=eh{_B{DL z(iIZe6=L*jDKg=3VOLD)JsmPMTqeQI<1z8s*N{&7oN7ssS=`*opjqD3bk(wYb|s`?95j=3i=*Bf& zX{?#=uOm-Wwx*|}N6tDllVkC4zFUyuSOFZSo(zqE>H8kA zkIEQDrw8N{BUsh-pd>iBse&!){PQ(KgZpFkI}CQ&BTNkfEo0UnPO}~gs4WGzAsfue z*?xu>uqT16&Bs9?G`W)w1(49p^c$h*)5LJgv+?^h7>o4UgK!!8 z7Z1(LSiT4-SdeskSN>AuB`@{v@s=0R`9virW0ke%+GUgHX>p)Nb!|2l9=V zTE*~Bf?54kqSY2pPuL6Wt#rR<35P6_r8ne`q7I&X92dC$9Hq=0_)B5kHT>H1xKtIm z$;;+1cnq9H->(`1hHNqtz~ywZ%JNM@?1Et4m<9=LfTt1!OS}%AWGQmIOwfYOlQgxTd7-`l|wY{TqLD#Gg7MW^_}X5uB{l) zkhppEtW$Dv!)~DMjro2KSUAE*Z-s-=_=3E>BAXS35;oiX*!(Kyk=212gu^o$L#LHP zUBzAm4f+IM@vNUMTKQ@G_5PfDz<&-@JdPc%$UKV_1lst<8R`2PMF^jO9#-&~v&pvr zNBjW!n>+1ZN7_q^VQy7!eDt3{BwaD`*O0o%kYoNt>N@H?Z1QJI0Ti)=7ukSQ#KTfm zBAdFJ?hMZ@2SxpEa3Wez4=|r4O6^m*rZ9Hq#?B%`o@aJp=gZt36D3 zyXa3??U5CY*4A30)s9AjI*k-jGOd-*-W;g&I~{LUTz<}v{2JcN0>8q&_mY?0;T+Pj zbI$ChW4T7M4bJqEMjJJ`hB4ottKkZ0%`P*wxoo0I&R8K&j`kCLOV<$I?_67(I=!gm z+UJx`r02>|BSaHSWp7GAYw+CIuN4mYT_toWIYtzx*wuA|w@f zCjh&W7WRK%M*qFX`aimi{?lTu@@}(EkKh~dL+IDLj0`?~7-mKDMumiqR`exM)P7vq zM}oEDEOU8nGmmVp@rStJuzYYtkT>u4`=#qqqoYr~33EmGeDV%P*h~mMA$DZf-Wkkq zw`ayQe{rlqM`18Bj8#Nf+HY6WCKcvBfi8wdgkSrdBN*i|iIgB&XrY8&rN#R%bO}&| zh5>_l521N9%tUw1*s}yfDB&=Dog;e4BcK!IfNjxtN&UzL zAk^v22~<(5C#M5l=$*k?BT8Y1{Z!PUXrTAK6hrJZnbt8UlmsP3nu;$m@TdkPy5Psd zq>7}!#?`K2;om}6qZ-nLxzSwMK~61tbZG)wt*g9%+fd|$B@}LG={jFh=I>)$_7TWF z1)Qm;tE3jb+s*VB3=JISP0Frk))j>D1qA;=k zQ)|1jY@CT6CvezYgHVF7GbCEX2tKeCZ3k$&1684e2F0=Q0Na{L-}ZVsdiG>=S(u&t zMmKVzlc=Tbx7Q$M3y?ltnG{_dSAv>DoZ9@sK@?Hr4-{5Wn2!5RYN@mH#ln{ENALD6 z`q{(FNqxQo-kc9B#&n;wXm7)@P-EUq>?Fg4$&x$wQ9EutxzNv&aK-YbcG3V5*-<~j z)i0%D57DfBO)NJ_PC}z3DOXts)VZx=ZF50& zHIbaS=Xu-($1wUsd>)=PiN(#4{ForCqHbl>d3MI&4r{Q`-hsa9Q}2zp+@asPlpXS5 z6#wahkQ=*Gql^2~=kxaW2pA)8Ry1<|>%L_yY)DDSTjke*rN=3$B{t$KijrjByCNX#9JmUIw-6*=y*3PX%L*{i^o9`2Vi8v3YEtH=w zKWJ0-bzYj=Fk~Wc-V*K|7!ov0#U^gNSb}avq+cE$9=sUeJb2o*rd2yb#2K`rTuts; zTkod6mD7+h{7Tv1EA5=B9P??C70s70T$yP){#(0YV2W_zL ztqTKA)$oLaXjiuN@Q_<8Ex7NCJ?p#W`%z`);Zf$^!_j`^g$F~f50f7CDc#W2l3Jvp z9Utx5_0kH-nr%qoN^b0^5fL@FpytL@ry*-4Z}nvl4XZ8NlHP=im+xl@`ebRUkY&hf z-o}k7D|t`s4wReEVp0uX6fh^-v%S~NZil*RkJL99UTrw3;OqKVj*7aq&@KIWzeFC+ z_IH1G7l$|3!xKbR`cq|Syf=^Ufj`&e7PySEXH>SVE{fICi*T0O>2l51=^S!R3{r#9 z)*%&?+=_#k434Zznr~VN2%J-ZqKTqbfzi;vwfTY7m;h~j@`$t?ZG}*!S|Z+Wepkgr z=P)=IXq@2vY3;f_z5HEO-h z*;*rYehO~lzJ+eUqTJ+b=-|qd>&5Zb{^mt;?P(r4AWA?HW9K4Xh!;^niAka4o*LG~ zqAsZpu?TBUV8v_?QsvkoH9Cfw91vGK*#1ofPX;&QxI~1)4BIkq1gMyuy&U}52@_jI zh-3+Z(E&5c@<)8Av1}kj6I$iRONehS_40-Wh)QDnyS&^rV)pZhwbpW32Q(fXYwJF^ z%JktXryDo3kBU%5Zl&`BPNu45GtF_+Rw&)dLhO=|+QF5gK#W@AhV#u*O&OW#-Gzht zE1cS=y0w%BLygtAzJl*qaLNG!$`)Pcw2{n~Z|6pJ?NMjRYe`Cn`Nr19*2qwmfyX;Y z)bA4GdloN`#tmTPy2=Z^+RJV_9P=F;fG|2&*YctP(ztXlbE5ux)$G2bVXpg#h#8d! zd;kJ`AlHPxC|Lt;d@)&7#>|4giuwW!6DEE|Z<-OkVo@u(523jJ2qr{Lc=@e5*ubzAp_`8Z|8n97b z8Bc%#D6=dc6`@}%#R=^UYX_SaO^Y^_X90-1BXb0x)=c;W$oEDwzA=Y5C}6?}BF@LHT`Kq@g6%iZVwUz0i;eoZGaH**VCUVO zp(wjAHZ_&$FBx?$g%V;29g;sn&_2)gF@_4i_M)sW!u9QV7+wl|+=5!eb*qSm7p965 zD@D>~5C#GfH~}-AR_@%}L*;0N=$a14pN0~>Q}!i91SDGML*YOqJ`XhPvb!;+Ay9Up zNxc_QJDXT{q^z5K*RZIR)S|&kAA=o+eoWE)MLM7#fOZ6tO9Gl3ZG6#m50wuZk2jqR zZkY0GjLLxoE*yJAu}WlEnCP{ae4T^ZtNf>E5~z*;7$bpAC5sqjK$gG;P%ye%me?H1 z9CkJ?eQ?Vy42B6iy$?UEDP$Op>0Y0|_TASjw5i{1ERB%NjqFlPTm(rsFA7uCSk{|G zOj|D*?UIT=YO^2jP??xO@I^&%GD9ikhuANevmxS}pTX8{=x^+)O{#^6I!86{m0E8KIy>%HM*QEz&<+b{`zE;%URd5kj|ptar>QLV?cP2mOx zNl~F!ZR6+d$OW8YE!M5W2$L!GpEU>wlN|@6BVrK^h$zdenr*Mt-bc`+xFn1Ev-;jv(W3G2zk z?L-Ca_w%D~Kco;X!ILe?2bZBMzT0gcL&Ye<0IYsmCZk#M+j*`SMeHn95EBt>Y&DQk zXK_ks*$M)LQTAuy*;_l+5P^w-^r}9L=h>;<2M(JbA7-c@}(h~!@-29qG z!Db=raKQ)aeG-*E+{8HLkjaumw70mU8;2w=a-prDE(XJOjBl(rQfSo35ymKhFWcHi zf*6SoM8qySI6+6aEu+8qC-BP?R&H+bkjzZp?lCBVcLA+y-X> zCow|U7-=Z<6lIhwWg+|odpku~|1&HIvS^{dH%NfeMf`2I4HG+4hY7(uA^q?Wq}QHF z_G=2jo6}^NHZR}%!C+~?BSAw>iHu>TB+M?5vd-va(i2JpQAtE3^KhCU!(FZRW28{^ zSmE(ZC6va&X#pVbVXXD*x&dx&mTDk@Zqxvg;NpWFhyOmIaGeGP{(-=8b8%)AegwFW zmNu+41}1Q_{%DfnZV)7qWJrlADV%H-!I8v6pE(QpNW#1T?2b%^o9%qzENEsM4*ljF zCV!7v?kq&0YAU04gOpNg=^F-Ii&aPwbI28Wm9Xm&rWyz|ug)|lyI0u>+{HK9Yv8)5 zafd$_qYkp6o~|@S=G0(q*Y98o?ugR&p;D^96;~%XNRUhj*^O+3ZPAT_by0jE?#_KC zNcC79EgRO&HB=99=6boa7AW=s&K&yuva)H_As4CCqA1?_Mk;JLyz~y-w0qyyJ1urdo4c@QQdOHXWvPH`zHf%6%gzago)2zC+Z;|6!G3FujYpkMUj^KRpLn77w?>O|^6<#4RY%?TRgmy1n5c^X6UAuLCtO~)w~ zhUXQY)csQP0zf^KO!_g=`}EZ56lzO#$~v&!GoU$osxV(K4ihA`r0LnE@p#{4CieE zgK?6U4x%q92`lLq0;97Yh`teYpzG?i*SGD-m*?(q`3vue_pw8=??J=H*LyioyST~; ziC|aQi`yw#`-CK*lvt{DEbD|qqK#_cCI;#9d&HJcQAccPp8Z0`)QkiSC)YQbRYqV1 zPt5Z$fOo4T1gZ<9GbVUGJ8Y>G#X7$Qxic+~(12G?o2Kv8xv#EW*v_*^R>$tTf``Nr zJcL0Lc7jOvwWftYePJ(&8lwuvoUk0oJZe4q~a93z^* zBI#Bc)j|-TB+HHwmjhmVThn zY1_rmx;k+UcSDTR9sQxc6nFnuu3b^m@G}Ds0Rs1iN{a5E%2n8_T-X(4@O0s@8< zM@8UACD0R(E!2b2EL4>IUkXF=cTtWE1SBOLBAO_L5HWyOp#1{H3?k5QUJsIp-hHAQ zMtDz>a=#sKDjoUsn22b{jzxp2d4_4n9Iv@h13&`Xa+(~ApQ;S zZy!QlvX0nRv*G-o;6;T43P1!CqK{J`%8C2;d?6>t-tu9jSqOfF{xY*WgGvKXK%VJ! z_Q2NFFmlxf%{hT0{fDtFFk5OQpblZz7yM3!CN(mGzZ4ilUn=;0p)SMRJhyZMDA3yA zL!ID~>E^@>KuC)#j+eCQ)lMW)2Lx{BVJt-p&iY;zTY-9mkdNg9I6-DFv!)&9fc8n< zr{nym@SGle@X6`EE|#|O!`9rp9_gIej>q4jJDJ|yaiaA|_;fc3XLSf2G0YE28nS|< z@)+5y+m*8&QZSC8INEHM>fVL6s)OO8mPR4StQJwNOHl<7H|?rB_g*DWBd;0s;r{CrkxZN#==lW>g z4UWj^v`Kp=9EP_OprQTFcb`8wlyKZa&iQ1$RKkU2fbqTB0Q9Eo@2io zL|VT>^<Z`OJj?#sM!e z<9kRt7x;c71v_I}$UO%E@ozIFpr>fthxmK2QRD2A7-*BkA?$s}^IBcZMotXSX-IfS zqdQ!xkdT$}LBiw}GdtrtOZDLSqr_c7!cAyG2)Br4#*|=}6?t$%pqOhO>TXJFG5kbY z@Y-;?IRj**!Vu9+@7Op^Ga*38w@1)quskXwx**YenGT3eX0GrOvfU!M*SO9af{)k} zW;x1qS-+^iBD04v*cA;A65HSlFxY8Rf)B+cLFxYn1C9#R_$SHHQB|) zJ?FIlvkYe)ErT|%=Xmx+U{+1X=@{Q zTiRucsNl!Bs~1DIPHF?=>WB8WC>Sg|rxw)B zS{qB<7q9irI#(MufW=hz#bbT**5$?}V6oMG@c~#|ZRGH^v>ajm-mEvtRIu=xgrI}I zxfdZI(}oGB9e%t#-STQp%icF#q=TFD&orn;uINxL^*KqL8oatNIrm|rGphIKOwS9- z|32Hb2c3Nac#fjS^sEsB#=Rg*nZ%vhkG;A@Bs=#*Yvv<;FTES_@XGi;nT%_Dg5g*^ z0`EDjeR6Z1!=UEjHS%Y>_WsZIw)e31i*kYWwTffODeG4Zd0jp%>$%=irbm+~o&pjUBn>L##LRs{rhlSbiq z0}RaMG;_-agnYTO3IMH9R(4pzfBA~ikcHQ6Z z`HZz~Me96xh+P<&^5&=zuX06On7Gl=u%RPO+&Z`E&baJevpPCE?aY*^&q!Gs*>iEw zk>*n?Qkw{$WaQG2K|5neqB9(~wXylu`0{CG`c(PSVHJ*YchFRo(bK7A1xOcRi6m&v z_~zTb-JNNzx)CwBWY|uQhgE=+9y)R3UElo$^GY>RqkhfWX1rr^$bz%eO`Yau*IGJx z!?3$D<7ZQvk&tpH20kx%;x9GtWmZ6rt{HDfL}ZMv`+Mj?eV*^%dNk$KRAAiL zcV~ZFa8Qc!c@kpq{at(bR=M%`AawJwaWZ7nsIBeWqfVO%KXkF=I$?NEU;Ui-q5g>_ z_tTe)AXcXC8r1xvB{$-w%cs@nIe*n>b9q}8_T^+(h(S|ZFUFB&_}i9D>z%@bJFr~u zU1E#Vohj06`%vpbBpusO#fR37{{4Z2wt3LVtFxlf9o>Q1azG;wXXjhGyNk<{*x?D3 zD*cHHv~8$IY2eS3S^!zoxl~!O%9g6so>jQGI}LAn??2M`=A!XS5Q_j4`YOqe^t@}T ziM8EYLOk1qe;<;_r56m4ZmjQrqTc)HD;Iy^-m^RIPD6a*o}*!||0}2-VyfoVt%u4t z%{Jp{iMaF4UTtqfL8Q2?HhO%<*L?W1#;~|7?Ye!>pML+fed*BP!+ar$JQLic+dQfc zlX9Jpp|eMG@`L`f__K?}8vxcw{{d@8=m4;$Nuks&C%g_`MO+1On$lGEj4HMHzIKjjnzg9CQB`RAw*V`(4A z`ozPv*TB{Hi7A_(f)B=*Kbeue1j#<^Hy*w3^q7`PCoZ;7sB`EswIE_chih%b-#dOT z=GL1>t8VzqQuX{caC}$ib5eP)M)~E$)474Noh-t{upyVaKknZ4y5ONU7GaB+$6)R0FIe07{{w4#EJEMENatIJD+~BW z=|9g0*e85}Vf80AmQD`CHTT#lz=rJ2jJ$2oMnP*MzobSJQLzM0ZoWOsN^XbOPLgi7 zk)g_ZgQkrDM-#!;SO^%Frv*rqHToRcC-C0#bI;MSb*nxPTWPJ&dJ_(k(pqN03-v(=T*xVS>kf>Xk#XcG- zobc4nJ}Jd56%9)b$r!fus&%c4{h2UYR2COHU_fTy9FCWJ;}Io)_F zd+}fFDTXwRq?in#qH~XR*To)K3P^2$nYKY~)yvofkPhJ15GaN__P8Qi!j4Yh02oO5 zeUyB_{|3Z@UWng{0T~|dSB9QjkNM$Ib-H(6`nyKP8dSdYWvqqin#DeA_Q~|hq;tZl z<9<_aZCX0U7Y54HRc;x7%3<@;6Yj98A-d6+(C}Cz3QH2n17-kib=<810yHn1Pz5Mk zW*AqGjRC8tu~ol}H^*kVCa%9yCgSu$qwCoGjzJ1!>wyl4H6 zTDp=(AE(fv#}ZKas+_x_Cn zHWW$tsjOuygj}&4I3yR9P%($dM%gP05-T0~bQX__WCWcG*}*H0r!K>$b&MIJaI$F8 zZ~i-py%U$h)W5h|4iC+L=V}Su7Oc==-5L0O7+@p8{`B3MS}}|C06c_KUrcG+d%Jzrs*vr#K>ETm8>zGKjpLqDbk6)_sKvh=J%T z{^4q%+n5X&FkdsDi@&Boi7-OTR{CGsh6nBs_d&)e|3!LYfC3h%N?mRMfNU2Uilk*g z+IoroB&&(COkV7O_`~F&9Q59J$MuFHht+P&G2!zjQNb9b2~Z(Sh|(oML#h*cOeN65 zB2YOwdv%B(jsl<|)y3>jL#ib~GJ$=O+^(_%kBOKL66mNBRT^YzA_#qszoLK&z~3CdS8VL(GFGYgZwogr)v z84GCXQXaOHBm|2eoB7_4G3;Q{)xitD-I(PsuOdfK-t=b96ayrk%+9=9Gj??*OvJ?u zUSb9$uT;xznDW;r4$FPJLNcyqu66-vekRI?9Et;gkF_!?F3pfTHs06?I+%nxBoBD6 zlAcef|&cjkxDQ&`YzISz=2uk+t8}Za20P$Fd%1U|Aguc`>7jpteNPN89Le zYL!59IvDy%QwAPRYT#|s7Tm`A(SsM?wvvHF4}SKtno&{Or+mxpl>%|d?DW_%jRuf4 zjz45=@r9`$WbI~00x-#G3BjMEUD}}_2OJe!Z$`Na~+FEzVD9+Zgno3W+hpS)-*WIv0!dcl;U}B6&uowMm=rK4EP-zXh+`>*G1unpqA#NdnS;$N-2GzNr zFK&k+44NUxo917KN|09lZ7!$?{;;}ipjHDVH#$|YVqbBVE%QUr+QZJgXj>2S1trs_ zOWrvEq#sWRr#O(fzyVLy_W~v!l&sSyilOzABkVK0F9q-e@*J?D z73tjv5&}d-PSp93%mmyJ0(;)i@4{ZP_jp&cQ9_PbaWQaWU_sPaYix4zGBFFE=p}_u z0t6X$f?py)Sp@=-F*)ExV?i>Xv#AcmxvsxZQ9AkL(= zz`r2N<_SRof_6H2vJVO%hGiUP{tKzueqBuci`1U}Ahj$O4)E-?-(8P;YGX5MDPc;k z$K0YB>JpQtI}P6_Ww2-;hPg#Z(+#JikIGkm@6xFHiw#i_2_v(fL|o2C;6quntKwX9 ztx)3u%CxuZ_lKfJ!bTdNFp?-ez>RE_-iw12m6C9soh1G>{HDg? zjGvQj$e&o(KjtBQo&iZD8;iQ*lNh7~c06B(|7fr_I_Tflm0p@^mJ@)7aDDXd2S94$ z0wOnuZTQ!mJ`9!*RvsNfB*c3F|b0x3eeD<&@N$^c4$=Pz2KxKjgtN-vxj zUmY6EmV5!p-0;wQuruQLJQ#GGZ(ovI82GqGuP^way<3!Ddb((vq}WjYoE-3)tS01A8%Bmyq%WRD zzajdvo*Vr8*nS-yk%nU5kO@v!98%#jC#;X7hF;9XI$sm}P8WDj?as*+Rg^_RvnaZ& zjn%uWry8owj>1b@HS~%$Mq%JrK78-jTTMFb@myxvU^O4EQf1X#S6dJ!##V^SjK_f@I-IGu(BiW67vglY5)t-%<#%}8 zOWgL3&*zCyjWu4vjtQSqw3WL|NmbEKy^M{rJfM;9mbq@9BLJSp1-tQwr`i7DX_bF@ zTFyT_t^F@g11$dVwD!L|4X^<4v=>K9&u-gJUmNQuEwAlHlMM+6FY4!f1UX;wUqO)s zb3?0Jdfu8z6E7S#Ax@fOvr~1;P{B5$2S#Dr>l5U@EObWgA8pzBe^}bbAS&m=8w9!m z^P}3WIHy`FI~n7rOje@WWHM0e5vrp}Z@$NnHiq4jE`u6)R_~wfr`tc<_TGQC!}S={ zx)U08xM~MCz-?JM3tFFAQ&;%5Y7>53Y@(mxtCVFwwnJ@N7dl4%Y`HyUi#?ed-;?-o zk$ERH<|0Uj)4l%exhS@>=h0Jh zP)#I19hP2cP2GR#K+U*)=llHl{Ct_|`82`bu{D(+oV%vs|9q$qqv~1`^oOTxtaQ}#CSW+PQF6`L#z0|z|j6Zd-*?y zp~)CHI+-~9D=GA!#lMn5-yQbb;sK!p5XrYsRbjUC%{hytu9+JZ=cX3l@4hq%Z&WO) zDKxp7Dyb!}ba-ZQkKGhz8Vb=s)sb<{1t|eiCXr{qU`1bwvVHM>e*-iW{c0zFnz%T0 zq3o6{Ni<~IrcKEV-8j+neSMVS%s#L5Y8+MDkg?}_S3fW9@qNS7yRvz#9C~``rW}`$ zF>LG^vn`EPY3@v)db}Gk%)aVbzl@N1Uwd!SK2mxA{z9*n^g_pS@?b%lI5@rh;=mOA zY9bB@2Yq+Y{e9%}(L~FtB31;L?5pDH``Na65x?$%D`=CQ%Ex!{bF%eJM`q8aT~ifL ztAHo{XyZhwy&VYCc6q|RearT$vSfOSGk2T2R!!XvZT?pc^`rC32CkzO5Th^o9ShQ0j^~99!VcOp`$QQ*;_1)fB^n?JT$a+fdOuYcEMQKPjg@nl=w zI`bfJ^Br~?7GM=z9LakuFInT6;wD3QP1^Eqrk)+Tz856Iw=Q_B`&#)-V^%%O%N*jc z^X1)*^w66e#eNPY+lD!5Qr!{30rtiKAl>-N*PE@jBUWVo_u26_k9X&m^2L{}qdSi{pUt1vl{7xjLe|pj zBO>;nMqBqED;wn&l&$aOWao#%ON4wkeO|tMf$t+MZ(}f6SKxX3Ky^Ys9xl(eUMr(_ zyFb3Kesw<%e)akxAF6viv+~vZIQYfu2YsmSDq!Jx9Q@|>!!A^}D7O~vBp+?<=uD(V zXp3RCxN{mNA2>IEj?4O$n#G%S_MQff28Ohb0lBu`FCBTx2wTuN9QgmsW+r-T5s)Hm(TmsnR+=iYy6MSd2c1iI4x2f=C@a4(=)k4`7+h-?r zgsa=rdR>bB{h|^@UY7EjukYppw(?SBu!Kdoku;HV!*V>$*&SIL7SkiwGDK3&j zgA_ERU^n8SD#C*Pt-OdUPlUjLI*9GL0(4-ZreA7IV4$X1(W|&9ZJ@yqw%fExlk2cY z3rC;s-J`oD7dhVV0X7 zDTd}6^_539xur)RsQR9oFdK3x-9b1;Pz5Qt{-}cHZ7pEa6+!f1 z!~6=CpaYO*X`I5f!HP&;je;XResxe2(3Tu64|brR^>Cv?*w{;vHd=s@%={SVQo5O?+8)f|)Q0zhS?9USKkT z+e4&gkRg6SJ*rte&1{f^KFkIDt6oJbB8h>PenV8)y2TV?07B`CD?b4+?xBK{3bWV| z4B)RT{_6^Q@xvZM09?U>zMMaA+xVeOfb8u>(d_es#Ou3I=`1zo@3! zKU7n}@}tvS=D=L$fwH>1cru|XF|+R*f^x=qh5xcKtDkVC%!YB9<&X5WB$cf6;UIsQ z`Rw%5@dbRB41Xn&-?b>(u!y`*SxFHq;g0Rr5)`$!CWdK?5^!1M{ zIBxV@DJV?2ABKgBOVlr062#dRo19=2m5j*2JBrJkAP%nLNIxzu{gvEX6-lfWZ7B(c zR?-V5He+qUKC?yNJ~Zk;lbvIbj0r#l;|L@IO$+^wD{qjp1nkt&A@@7W55%)W3pLFI zm0`;7>{kESf{>UZ09#OK7XOyf8M~OCy#{PNf}Xt!EY31l83B7KfGp17Iymp(rs_U0 zWrRs1kO`AB(K*3Ri4_JeGM+eXb_E;oNoggaINl*0)6h$B%_PuEjWDUoV4l)micpN1 z8G2r!AGm>EIL5IT^KAr|G5&JR>*BA=HzJIXF_r+X2^Bc-musS9uK=5fp##1Yz%}pe zw7+s1Nnq510enH2R8C*H1sJ?(b{SWad@^vmovaq>aun7pO-4k|YQ*@$LC&9XA$|5V z#8pT<@S)9q<`%Jlt6vEiiNSvk@aAe!NxjHyjvJ3-pGSM#F>>!P=?j3U&W0GMppIU1 z3bqwk!HDyGlnx2r6pDIa&=`(qq`)+?1qg$8%%*1n=rO|B*l9@g)aCf>6<~7213P8k zxL4REZ*5O*OpPS0kO-+@}Nt7M~?mnug%ZyR)Fl(@};aSz;ooYslO*t|jqfDmatf1?mP z1f8dWXI7XBlFl#Brh9Q6g`@5y?+bbtCC}m$9#ZW&YqG%F~%#<`b z_L|^uA}7R1^B=P5Ndq98{XfP~f`>Q$lFb$X*(^o8(w{w543Tm%KW%X18|X`s$xRx* z#tBYFuTdY+uYN~t(g=3ACRhO0ChaKGN zyF{VRCR~MJFJ62da5aejmO(cxS~~AgqzF(3v1C25a1gy+Ba9$qg=5{>OO1wsmIB#| zew0hAPq|8_ltuHlwC;xtR!Z$mgR22+cVO|mwT?x|Ib~aEdVT^w9-J+bXZAZC*FxBZ zFS?cWVuo)}y`BL@Niuh zGNFKFW5U98d@xk=b)kNZcnbk)t13eT5$N!c5WzMTp)z+v=%HFDL=*Y9-SomKVjg4G z>5f$^-|=&d(cTXGLJY@>rbeqpoNqg*SgM#qgc=sih5qr96UHd%R zQlUOJ8Jeq6G}H!%BHi21uT-vmpX*C(-Z0kvBMD;tl?3&mw`__+hKy$G%%=|biNVux zB1Nqb{mFS?ZG!R?ew7md*ZS3H6R4UEG*gJ8om_(0eJ6)pi&00DrtjV|r>!0X&cyjJqv(|1yXz zKoTTM;7%+3D+y9#{mBa-$}5!l6twPeMtDKWv|-H82m%k{%euDipbzgoBQBZ@ zLTe6ZI~{l0jn#y;SJi=>0oUe)1Ea#|==Ntzzz7v(KT{7b!L&0tzrYLO*7=5<*$Z3y zGv8^`)K*=6u!_4L&e^Wcz&5gr{~gC3BysS8{zn`+Bpfy3W&(CDO_)yj#x>^UkJM!@y9{q+POia z0mg85DmfoWoAK8l9zP(JXzjLP-Txt)KF7^*nYFSB*PQ-y%>E~xXLa8 z2>Fe+KWxDWv~8Ps4+W@$c|6ivot4p^n6}XU6=^E*$s1_TSr1*`sz6a*p$qV$QjV|M zeQ&>XTCc=A9$*O~0xZF9_xT%uC0P3hY2Li$e$TlU%&-I?%{0iLCeOiXWGuo1lgLuX zb!t}3a-Tkj5+12T6DvC3+sp>pFr%{=|CgD%W*qLZZG5a!$wH5OG2S_5r?q#kIrj#FdG>&oT4Ga10+G$ zG6V!wJj8H0<FuMF~KOt`+lp7YqP zy*Xg>A`*a`rIjBY=kc(@pvTjxv6~;4b3A5zN7+t`%n|9)5_hjBP#3!^ zn(kMaX27102n1mvCAH>(g7zO!q$77pQbxyK7W9P+A&m{#>HS9EF8ZXL6Oedp&%PK> zO>t$|fEy$3uV9G-5lege&`~xZ6#hL+}#C4_IkS25-xw1)zD&hU0gpLwv9ee>n<$oTB``CQtZkGm1d={AWfP6$- zj@g_m&)!OG`h1kwq+2C)^4#CmWINxM3bJv0rFOP6J*H}Y;5~o(!a3@se-8uhcjFBT} z%pN0qWXo*5HC%KO>1a`ejj$%$oRp>V(-RFI)Sab(uIUN94NYZ|s^TnN@1Bf<#tgNs zM&yBx!4Q&wVzZBjDR1Xu?YA?SH#X(;uXLRwlzW|xt`0A~#e^cWFZE_VEtxG{=s!+e zTZ>H=j;S}d_IgPZIT6P-&sPhe-q|oa*Ok0q#oEVIqFL~E`BUKRD#wBF=K{GfdrqEj z=yA3c0o^Lo)n@2U5x`v-~vnj?zIro$8@l(v$mTy zu)Ah|TWxiFX>hyA?xx1-c1`bg_IKmV;I{tnTZ}L)FagjV8P<>LiH4;HD18iz^l;lh zEdD5h+rqGD-|a@_1ykX6GA!B+1+oQWR=eE}%MYN!fA?tJ@4FC!9XRc`Y85Nr(0}yq zt_i=t`ubZ^(%Y)32;yv$vi*b#(cYbUU9XTkO!p`B6^w+50@L8?a))Bri?(7xiWUv(wC81ESi;Mhdz`O4t* z27l5L3`4Q$@A98Hdpnoq+k@0@)H9Y;N{=;j7;hw6B`7sZjNyJNq5Y(*mflIqVHqoR z;31> z?caO0U8>*0hwKO+0UzLkGZMsY(YB?Eg=@{^tkzqc9sGFa;mx5%;y=a5Tza~N6zWk| z#G?SFBHS`K9H;HX&mS|aDI}))psMQ0yCezwQ5V|eFQ{Li_dbq@36~X;l*&+N@^M<` zGBSE}Gt!bc1Wb)hnIoW1CaI%osN5v3l32C()+L1`aNrDZPWag%G{@ayi3?f_1x2d( zG7oChsQUiwq^ppq(fR?TEE3pjK^Ss1#%BSmDgNnUOp#6jBS;p6YO&;hrguhD$u*%F zHm3MtR-rciHYqu7;!MG1=qM`=yh->Nd{@StIrK&zRhhfX>F1@x3rvXa1lxGY|DaSq zK-wsCa%YtPD$)Omu`6oJ{)1*w7oC=ujOC+h%TYX}hyO}LJ5Nf>^v)kNzer-W6pPxk zIkwHV${k?ZTW$%jmwFOA{b=S9J9PApn#5cfh30OS8p?wG4%yyv!DFzm=WI|+{GwXQ zbOSZ9Tg1jFL&Qzg^%LujquV*?TRJ?|cqj$Axzf$GpYs}Muq#(rAKz7SZC8Cwzh*K6 zxy8GJL9${6`k{&cLLM3A8}KQ-w(YJ#Qg>CRZdkH#=stR(GjvNrC!$5VoO*3=*HJ|? zn``o-GH&xjZF?o}!P1rmboE{!CGq1kw;Dmz^>IfvZaE3r7vxCQBX0Fw;O{dudRZk7fGp>6R-c-_+d8Y&Y;NH|7h$il-|xWq_;(fd5ffo`c0L~Akkr}7!~ zk0iH`c)5kVexPAegyT#41sqf~5Cb z`S+0unvN5P?`P+i0o+K9yJAiviE#7mIT%V^1n;f>-obslClCqf+5t)$H8ru_B`%yT zvq2dfV=!^KpgMJAQK6~fU=(w~X=><2ag6)0;s|$Y&~-R)n)d>CdZ!70i$Qrp%eR9# z{CPd`D>T=Wx7`EMmNhA@DTvE7u?oWY>q|WBX_WQognx#)unwK!H10-AH z17J>{W9TimzhXnTx|28k{m-WW|5I%Kr|k8&e(jRVmCXV@iZ{@9-~HE=#Kt3$ zCTkraGLjQ2&bup4OWkdX6`AD@E|=`O{J3&(&|uD=i*3i7Hs0>G?ky-d3kzx`#*i~( zAi%#Y-fbNIJe~~n~E@`C=E^jqRg%1_X1{MJ)+$GGVE6C-9ONUmjN6g0utX*Mh(fHG0W&*FfnE9h( z>m)YX$A8__{!B+RD^Cawmqun^WXo+WfrPH#>@gvadD7@Z|3Q&X+L5~k9jvy$r^pD% zf3Tk~-8hT|)Bw1NwF*4^a|#59z`C7cy4jMb|Ma`w5c`#_9a<0N>2w(oiEYdvB{7D~ zO5)`+h~*q8S9g}5p42sb?ICr|X@)u#xTQ;pR}$(OWS~^)X4?w5WntU1C>-aq6dY?2 z-b}6mZ)#ow$-F4AVvL@Ha?pqoo3ufLI5iqOb(T3Lrdl={NJY&2vEA{2obnCCJM46xq`K(q zV=Tu-&0gYOeO(SM#p%Et%TAd=GpGer5N8K>3FZdFxyesq=0I8IW=$T3k+U5LQ+f}px77s79NK7;wktn_#G&D8ICP0dp2ZKa|7*lO-wy>#< z@l4+ORy9;OFaVm10jqIt22I_460o$U7yfdkC+P;c~BN!2r4coCZr&P`Y}UBJz;4f(FolZ zG@4=Vf)Yjyj3hLimX0orZe#*%NV(u6azJhnhtl?gVyieOwv{S5xMwggH-w>0T;@+gT1K^@nMP}&S}$K|}X zYTFmxKBnszgV3%=g`XSs0rb>uEj)a+m5&UuKBSJ1>ER$}8U6(3g)kqPlhckWs#O)I zBW3J{ndn)!PFMaKybP8m4c)k^+$%}JcE9Z7nbRTM^Nk4R?l*b?cHwr4UwR>|(ZT%w zoTInBid1<~yzypJGWVFATt3eKSL)jIjoGOwo>Y z(R|Z)N&Rim>Z@`jlC<~mdSF~QPWhA{PRov5CTNj8 zhZqLXKN63qo08?nBeGr!7b*Ak(lSSzQ*2aiCTgTa%U{*x-$uSFd@V*V87|387zu4R z^>upKMVq^Gh9I*02wIY5hAGK;6pkao8T@ILFCc}p(0Qxf)gZT|aQtIWHI)y#Z`v}*C!v;WPH{BWdOdy8pn_&MH zh(vC-(B7Sn|1?BmHxJ1#a}{4PR+r6Tv5&8sqece$c6$2^gy*xE zug{MKye}<6>q6l3DRnBr`6~P79pOyhRQ8P<4b~^g1H%m$1lByGj_RwWUO$_T#5*Nf{1)xzx3$RPD;iTjIMl+H*qTd$_O(7 zpHYosp?x4yyBMP!QX#0Vv6eceaHaNenirnbBix9b#ZkiL5i06J)Lz7zP+{tg6VP%^Ctm6~3arf0^@~u55 zeX9baZ(=LRL;{3*H-m;QTh$z$`I(lpi&nS6WkY z0qaNZsy7|1C^(y1d!N6V)yUC8DTGlJk;BE40Ye-qp|J>yiv&3Q2$@svZ}XDxmr?pg z{9WY`D`%ZMf=ZNvXu|C%=kZXCxcAXWP*D5%P@_gPuYXl-`E(b-&- z1Cu~u(7Cp|P_MENHmEdfZ{ALAHKF06^&12cRf6t1V@!rDMwm#Y?Q22dH??nj;c4A2 zA#kwUv(-#zk{x#fzF%59PydayS+aFldx~E`8qGD8J}XkQJ%K&5K9zVGAaquk>iSuV*?op*ri6Yg4R1#qz}x8H5P}stYxL* znaN1PJi8b=hC|t%8Z{m@HN+neL~Z7svo|vDSb`NmT==tKjdh2xvWLiKl_}) zk%kih-GFY24+Opfq>U|omx+JcG^fsyTJIme=0k5BVe6XC6`A#jGn{iZ$MV}*v0VWm zLPj;O>$%*6mObL)H>xEBFRUI;4kyc|TE!iT2rpnekqVs+8g53{ng-2m3aH67Q5zh$Uye;%3@Tn%WZ$8Wij&P)|X3Iu@TV1xXGC zk-W;TrTj(a_|Xk8SrH5oPcimc$Dc;vH#Oi%NL;!IoO%lI)3UnmWF=X+URl5UuvlYx zigh!)g-7h`4|prDEao<&F4*77MBp9ReeFeO?=oxi&@N1?aJQXU_V-oA(9ct;D3wBs zX^TS5vVGI4GlNP-Oa)4)^e&7Zkj=#0YkkPJ)UOqoQ+lY3Xnifn_mKh?pBwUP?neY< z!^JQPr-~#il>zTPhJK+!)4HQw5UuSuzhC8-QC84dCVC6k8q>X5JAIYSoU8h{k>P3P6xn+;RBO&JJ`V_2%-+3M{ZibsNAYfeAf$+PKvejULbSDg04&?=KyyJTh92XX;e*Q z%hqH=DxE_wrODymEc10dBe_K1_kQ&~XXI%^sIt2L>+hAiy?EW;#rI-#>wC2>F9i&O z0ssL40U)8=Ao_jr|9t)@Ao~yPdL7GuE=K)}KC)r~01=#`n2uJ*tjb9-)H1Wam?0%H z<%xm&!@IP!SXS0~rZbHIZwa706p*q;8M49$O%EQuqrPdA9}mAjsOyd+(e0D7V|t}b zjp~>u^Vzd9V?JNbuAn*Vp%4odVvQ)c@OWx}KK!oe`AEz`l`tAhS=gs$$CLRrI%CxV zQDTLLLf4R4bid7h(7->c#9K#%>y;#E8U~9Kvf6+zE4K93Iq|mUh8=QA34LQijaq~- z9nU`+iN3&2XJOt@8_}xK%eQkx*Kd_9h1g<`6ah}th{(i;w{`bW8GrW^Ergq@zE1uo zb1LB0idX79#NNu+pZqHz4o#a z%bXAFUiwU$o@*UXwe#UgMDv!)Y3Ud~_d`uY?Co2ye^^g7>^)4Smt1iIb|jTj>Zi3i zlN6YYnJLvb4QY;QYvD_K%QdG>m&-8#*tJ>3BCY)XV4SIzf^&kWkZMm#j-(6~EdJE6%u#e% zO#`+&Fl(-ynYYc_sOxF5$9Z4braIuKW)X+Nh;f;_ON!{L*?7RI45kKTLc9AgVOHfQ z8hl&jhjRJhJr3faL<*U;3fvaC$P)4Xr!yK4IY!I9RRJDR)c2WX4S^!y#0&joO=Ufj+ipEVorOOmK#z;Ryp6E z@@cQqc{bP&GqCSt_7Uu3z%~jtnwgxs(+}93z2B% z?+H{2ME7AHQr8VD_7ku6mF~s7PM4@4hpQ7CV=c3F*g@|Vdj`Ng@pMj7Lm5c$3{mfo zZBOzAtG^wbKb7s57j9KVaJTrxtf^69P1yE@=YtmlAXDTLDejBw1hZTmv7ZcVS=mm? zM}~>%z+xT}m@oeT!xBwoK3{bmvtM`foec`$gI$e0;#Xoh>&ZI50rjJ0*;!ndx3c^jw&ld0@0cy$m*d|0{_puqNvJ^3@S8b<=kTizS! z1v3j5#Uxu_lVVZ%hgbY2;IJjro(Jw~yQ>BDv&h%^E*W>JhfWK6?xbnC-{HC1Hp}kj z^9sTkbc~n=*^4@f8$E%)7I(4MQgMJM8WTcE#eaQV1mdk%KUfIRY_a?@#r1IrmWdXr z&y5n=d#XX+MIc1sVBFWy{s?aIkAE4WxMh*P*R&h_13xK`yzN8o?44ADSMCUZOwMw< zS~jLmtpxfex(@z54?1ku7=H?xDEd|X4q6ERATjJ28JvKZ9|L6wMXyNo$q!mF0BwHD zSAbK4Q+Xm@z}i51T5l>vkf%J;VMz}124_WH%gy|-wsw=l0D zoO8^kIS1x85W#POMD(OO-N8=+Y@=YiS$l#^)#*53Kk&Qbd_!q|_ntsP?-UD>^v%#n zaAcQMTyM`8Fg;yZkB2Z1KUT_#HK99_vwxVjgEyv4www8Po0xCo z%XSVNQZM9{CMfUeK)Z-3wlt+j=}f& zUkUJ+JU|{cDg{-P%ee?RG_%zZj0X%qu#0{+#dP*Z!C4C}{K57*oY9=~dCSw%<8`zO zPPmQREBuurWFfczI$UdX>j0ztGOXMNE6m>XIcmdIPtqxOA@EVeseKfQVcH+TuJk*2 zvMQD=Vl)x%Q7zCeSv114wjI2@)mLkr@mVv4O@p2G0XvkEE$pBGm9C)hyOe(mlBljlI zYfPA|yt`S^_K2g;5VF~b=H(JC-bH9Ua!S7uTG0-=@ZFHWI4XhtD};5$jg(n&eBH)i zD#-=z&LE8C(jztBxUwxe!Yqmq(VAg*dph+m#WSE718c6H3Tao08k*phJHPY%&UJA| zKmdfaz)RM72=lowa1l-7m}*A*b9E*nzE!rm=6#zrkc6mRzg~i9wI?gyCoe}zw_o21 zd+iNUR3PyCW6I3E09oE!;15cN`vLhaTbOV5njQ&wMO))qUgm=^+Oge0=m-ceF{=L1 z?w6QVP~`l%Chtt}+7H)a>jt_y1|Cf&L#bPuag8g230Vn}p%~lv5x2t%36il9IwKXs zdWtc*sK}xk{Y}qk9FOM(%auqou7~X5$`g>KJJRVkGj$qNMqlnEgAY0h2VN^!zkMjD zFIr9&KVhdY@D|;>jIV0F_1!{Jt1TQh;~{2B5SCE1vE8$^HI0P_hgq@ueACfzBP8#7jnY-$|szWu_q*V<%O(WINxxg@Kftz^g zQ6222RezJm7C>dpk@biKrFY2BtX1D<8{l3K!k#O$&orJg^GtD4?Clc6U+J9+3)h}~1o$mJeRIS7okoL@iQG~qy;$9fIZ2MRYp@kNd>!&+Rn-;ujtx^HI za6T>Y#lKi`!torrDr8iz0cP^Cx>$l?a+@#+9;jOR7O<>RYcK_bOMbv0Me$w0k>6Y(pDVMA% zmJyXs>*0|ZfvXrUVEJqSBx_Z?)thTNnk)Y>HzmKoCHHneZOD=WL$@qZ31b_3a6 z-Lp4E)N9&7Xpg;PJGl=FO-_5?Np$H_-Jf3!oI{KK=cB5>e{l(!i+K{8UuWRH4E^Lq zGsO0nzHdb>{tzy&9mj-{_NK(tDu~WnX_2K;%etxsYCzm?lnoc)g>0jZbOQ8t%aKIu z4!BZCw5}=;TU*-Pm6P-g=u#4tC^Z0fxJze4l=o4rg3M*fTUtw2*TAtz`PnSbd@Bvm zZLZXF9hck-A+ziH1`tvxFIRvCNHem9K)IMT!*CGC`(w0CMZAg!W}C4e(Hmjo$+ykTJB!#Yb_VktW`W`tCBU$i2hF3S}je%-HR zs06C;onn)|d&hpUt_)E=_jE0$+_PsXqv&xHig9p@N2psPSfaWY`q5`+J~(WT2~9c|CvvF-*}66&9k8}vi2#_w5?{}8 zV63>n9>%-05A`$z)u61H*`RDT6hH3K17UppNVI~BSDmE0*d^k zz&u&_wM`IXQsFUmQq)KUsS{p`BI&XI{jhF7OQ@)M9E;Tc&&!lsWvrm<#YtQBkdD!UO^NolS(BVjlnZF#fek#$YaaO_ zHysp534b1J%aP9XvB^e#+a+YfgXT6S4Z1W`YG~5_sOxxQvT<3oQ{XB5{jzR;iqTo} z+kDYvew$JTQnT#&(ih-gpc52W8RG>80AT3Xf5J}q-ze(eC=CC0K>9Cdo~m!q3C!?c zpi@E(G0wyjF>hDm0vKk@iq-jd2r=sOmCRi|vDdLm?of~Y5QsmE?s++c zqQch{ReE0tb(1%L4};rlSUSp7Ip98rJ8~~2tq|&=I{T(==`2jXG*>1ikR9BzrNrYU z@OXwUu{`ko6|kl(iMoUDaH#xWgUx?<_vlP6tn25p6;ZeUTzE<^Yj_qJrk|M^mP%6{;s-mb$6dxv~8`p#zjI; znp9#?Hry<`E|X1=GC3FiWhA?dy1BjXKP5TB*(|3>V6l?}BIQN;fk6Y!lDW&;&cp zUm9eEt!3*nWQ{4bLs76xw#2PF0&W^!#wr@bFZH?zt#S(w9WEMR!fe7cQg=x4jqDAo zMF8fbHk{|Ey}X=KhW1qxzrz{qQ|Pu;k2GHD;`{K>@TiEw{G|XHqqryim`xjMje_0u zo9x$7R1Own7R;W0C%4XBI!Reo zO!S@`Q&O&?*l11FYBF_JriI$vPON|)&M&Je%WF@h_0>p<4M`=(x$NviMGfPa?2|m<$oxgrDr~drxJQ>Y!-k zx6{Ow%6O6d)a^wrM?apcERA+2!K#08G1E;Q9_r}iWd;_mGsHCil3M!e_tXno?PO2? z{WHGtmze`1p!w9yAC++<5vi9skVWH%beZ*zm`+wSgZA@jmD8bj;L@vR8=3iIV2ik5 z!I6eebJ5Rp^6B`^KP(1%Q9psyrq4AC4;Y=7Mq^Dv9gdVB{_N zfe)ra++9o;|3n{n@W$2V;lg@}5kw1lqvsMD2muk|KjhYixji!bG5G`PIi2xcJ>7uz z2T|f}efd_k(}Zcj;ke>le*XFB@%=b>{;G;)MTBzUt5iU>bb__SI)6gDY~{7*ObWCb zc~Z^_w7%q$x&>$*rGMauN14^Njb~2M8-y5&UMN2+N<65}d99n@3>8fEm)2)A!qgMt z;@6mxF<*N4NpaCc_GyhwCh3T@+`u|h)@o~^gZi60*_8=xxWwJ65Qa1gBf_}(e%;9f zZq@vaO?qDEr)ECx+iVCQf_tPBLD)}aZC$aIezxZnsxQ{JiFid!QWYyGr3pmk=7RZA z3}K>6P}a6E(LreL8mxAfDCVecSj;xcs7(WADC``>yN3f)#?o0z^3dc&{90 zr~(Mp25}_`r87;+53saLapJc7PJ524GN=1t4O!$%d^{E^kN`w~iL8FA4d<+Awb5aS zH(?EvH^54oPEnA$vK#mF@Hb9^mC9~gTR1&yIN!|U)@n)}OOPvl+H*wtk!`bxZO*LL zAD*z{uhw-sgSalC{MLc`vB6hKnd=bl?Te;S6Z$D*gML53h#DbLWCW1 za9Yz~Bm1IRbe~uKZ+?nv)E|`4=Y-5!9=*dQ#PKpk2PPD-b(;(u8U!O2FLYA$ddC^{ zxx2+Nb|cGFZo7GG^oCm(&YKW7c<-+DIp{yeeYkj-a)sim zvoK{p_O9dow9SW8DPx)x5~>Z+nK94m$E7|*y~f`VGn2r@Zf$Y@m2$`-c{JOcSRH6#})&w52 zJU7t$%bycLOuQ?nB4R=gqy1$UJAfU1bnGbVcf^@Sv)t4yY`isE8&LbzxM&oYr3RmG0b?ZGm@Z%jOh{AAV*>;MmMG z&Q{=Wi@utk)%1!ue4&vG*m9;bsG{Ydc(4t-mMgW(xjWI8Kgw7=nJ-U+TwpCtxbxeF zysz2>%X@&@4qz(f%w|v(Y0k}ZMRedRHNCyKfGaHU#k&9bs$>ZLO{wr4zGO#ahgP6f3_EeprdI1Tc_{lB72LB|YgS^;-PH~- ze?OCUZ@*Mx%Q_ksbRzs;V>hC8+RvekIFki=o1Bc@JpJvF#k1@l+(Ar;WYd_pfmNycH)y{kt=o zbL9?K(NYMmI+hE<62I(&X9+V?_VeE_JWrR5@(Pqjn`$*jmrM;`7mX|wELYdo388)H z&D;`!b$;v|wCaQ#`YQV|gSC3k`fKXbF~ijcV37wt1TGNymjK`qOU-VI3xTwbQ-;A) zH29ZyRH-*Y(fSD_h9CJJfhW5#;zrAv_Zhhyk%gRa*^{LpIDRdNgJ zRhO<`)=*hk&Pq?AZ2Ce|69Mg%H9s$zyFxk&Ak_}4UX4+G7{GC@6!UZ57{`a+FI#9p zQ2=ejGo`5I`XTV$K-^?I=<9Bo2Wy^i(tY1gc8+Z#q8BFUrSyNrquMjJF{Y!6f4CNa zaWH7}HB__*bvA!!r(8$Xc^)|b@Xk(ICQ}VO-fq4SV0&4W`3y@k0EIR8V)M@{P_I3= zZQsJn-I_U+GYU6)al*dJ&v;EQ_Q|16jU!nwx!Cg13Y@Jvt2FL)R_rmPE1`=DAB!U# zE7>q>SaCtlzox+_%337~#eZQHZ0m~SOf9bg^pft#4LcVm9mpK@KyPGq<6{kcX8-FF z8|eVt?U~MOI)z*^g{vcYSqJQ>MN1z|a-B_y$(M0=A#B5QQ)C(4Z|}X?sFW9^{Z!BZy#@*RBNY=C)13*$LJv>d|yODq31DNFAU92LQ&Nr~$UKt$J z_@Hjq4S{F?HZ5|u#%QHfW>|4t%wpO7(PxiGi5;v3GuU1qJ3|(mz6O!;viW&GNk*g%#KcXZ^uJbw&vGot4wc)5+x`$?qL>8I~tU zrsvg>2&#ObCUVdfebAQu`VC)7rSHwTSoA~y^Hfq-!fL&KI76aT0QMH6FrUagsK8+G zjq%5JP8?0PaFY)N7oW5t&g6LpS?b3hLYnd~6Plb%eOnMIqW~<0FJb=yq}PX5Wzm?5 zXXd>bT)K=vMCF*AqB=7`ND$9GPyqR5c-i1?t*a>X&@kCg%x9?Ad^ksS4?=EdHXY8K ze5c=~bZCzS8;*#@AT?0Yl$q|L5yPR}*;TWs>s`Pc#+xHc`ZEIXlt`7yCo|ZYW|xF~ zf!o8CuoG*mrQtYaabz_pA43@5ATuRip`f5^dc?$ zVdpDAfiP9i+iN-6yHA_=`5~bMvJVLo4e*;@@{5bgo`}-2W9cnOdI6squfg5GLm@9{+VUJ=?uK= zpN#dlPMy!RMJdswR*s?k$CV{c^d&bQ4HAt|w@nk0)ZM zX${6IbwBX_!ZX)z$Xik00PsBfe+K~n|H9AM9VXHlh!0~CiLrn&7u$;#_D63M+zAc;1a}AA&7iKKMEP+s~ogI@-~&#Dy>RAh~_?6B2U&}``yuslzPne z4bzKp*UfR5e@B;@)4-jPkW43P(3G;sp!#zb*TPrt$680cV-gKM&5xn8_l(og%lWXi z^(vyc7;QNjQ9L3`rdPz}5QSyQk8>ZPhPP(lki$w1Sgi29GWSAm%(qMAa55g9(!43J zay*2I`?F(APJ-#1k~_*P+r)hrRWZ4qV@%M-$(3Nxh$NK2)m!t% zk6O#t5F&=fX}?S&C3#2;{cMq$$n!t?JS{)sSoQ*LZNFZ#I7_`!uRfV_Kfya{#n;QF zj$4%#8z#qRHsEVI*C{r=%i~G=Q`hs2;VBY%SpULVy&(&CC~Qa}|Ix-ydnP zn^t@~a8+)W+8Hd`5A;BMlRi+OOpn9X!=F4$1Pgsdkwj-FY78sYtgC=*@&84#&8^Zv zj;g9kFl=Fe-cD0Ex4H(TPO{KOAkT(R_f3&0TEhQN5HKW8BUP3IiUqhXEr# z1C{N!yX80I=ki8fxKn<#<6n$ra!~O4WrNmCE=&Qt$$0y5`@!e?I`I6JPGH?@^rWC( zke4-*9{NaGNpX=k%BnMf1a;WfE~LN_LLkGU+@gq|Ab4pGH3;O1JH$XoBzq!DzCSr& zFX{qpEG2S|!_M8*Jm$u zaaOh%)p5LP2Pp#^jQ1e2tZqI1{*##Tj(84=9jk<32aAC3a_*Bv^Z`pOKv0)_2dYY( z;>Wn%n?k4Tv{-|Isj};M2=77_236rcytM10^L`OcBrgtONhb%m4}V5uMwER}?E~Ca z;_0JW5!g`q;y$ZJcBNeiNJZD@4e#N!*c4VN4T6>}l9n%0owX!5m>)&WRPm)}x`DDG zctSMJ1B<&iSR$AtaM4O`<6L3m9A=TuI>iO9JSO>6a0arh9wc%%XgD9<&^JwRs+qhR zt6VJ|I2?G$f1ggozCyyjf+l?7X1lq%gWo=vUH+Yf!iJ5)rbX0VhpE&0<$CLJT6S9J z`-T@fIc|3kgKg&yj4xKFt^fX1P7_TDP54E0l?I#_I{5d9dX4C|(>Jx7&Ez%~>6T#~ zuKyUZhd@8QGaTCsXO@lwd%%-uKd3wKX%|4V){j> zE7>R^OUc4wYDzSP;AJnsQyv4^oKeFU*rM(tOp_!g#DrLut)~7QMcp3XJG(hjbhi26 zY->Z?%Ibx%l~hz*-t}~-sDda=;2?~s4#x2TgS8e}F6u0l8E7Mr zCSaA{@>Cw`a049!Ry_?fpVvKosY-?Y=M3NH{SM#f#|Yoom9OvX=Sa^d*3XY{!>Wty zinVM@E`(z>>|jBIrigxr(%1qic}SBVNfKs=&jGKovXA;rBX44x38h*=R`NX?YROHz`kF z@_Ji@HrxPrN;SeYBYRkIaOx-mQD{MLo%2?jP?X} zBz1KVZvx(jkRuM@z3yhqjmeF<6(;k?o%dM>N_lj49E3`Ny_%z5a)7ybSs7;Rc(9$r2Vb6AGmh?@ zc6O}xu=UuoJVT#H~mr-5`a$>%AQnUh5k|6wJ zqb**zWAenS-2Jgm)$$X}9>wbv7VL5abDUloe?ICXU;kNqm4?{t9(v4l>d zB*A3v5|&9V(#wyos&YlTjpC)Cr50kw;f0=n&d70d+k0cG&(Lx*zJPKx2V+}I+--g( zQTt;XOx*5%B{KVCYYm^(XLP$|#@ttHZk_*Vop}b6;oLSVaASNofNvY;-sC)4acy7U z)VXKxJ$&+3el67Z^-M1Mm^u@1Rg=!Eq zAg84Ih$Y|}c}%0hZ;d#6kuD@!1Qhp2aDM)MX5@s{7zg}~u~g>%cNptmKFt5`QX+_d zRUrm3H;Y`w%}W2zr9`!M(nf!k5{dKv5)b?pGjVZ%OpYejClfZ?9{}@LDN%4jg2#a2 zWF=+LYCm~=PLu)P{-t_Jq%xZ~WkUjIa-Y!&-}Z(N4k z#Nh%wL(B_K5nEXp_ziD4$wpdQTljNr`gQzzmT&Cd@$rd=&{wOn#ANm%z|u|!StnjQ z41AiO*Kcps&Nc_-vf*Br{jR;v``py|#5qx0zTREk5ZWbZ+%ayNhj**qp_b2Y&%Q6! zos|otacRF_<72kk5RQ(_ltX^AA3S#CM>@V(X)0r&-`A%{<%jQ#2{t$d0o;mV$T_r3 z>ER^wq{=4Hf2s#XP)~~}iKCWij|Q*&F0AaBufI**XVm?uC#UNtq%uZKAeI>nq;=Am zyjs;!elgY>7D*0LQ9xJ9yVo}1?(CeJr?q#NPBKPasXuEU>ImK|b-^0=G3`#$4wSF) z6iJvLG((x~{0rr+eM?%<^VH!Ir}R-E{zME#7tPgP1x=#+GR5mpW}ZgQx-l018l}Zr zxGSSnM8jM;YCSVVdYA{D0p50D9Ov@U3T5Ul%66h2<;S0etCVqx{JIEo=d4WA zZ}z66?r?NFgT+E4hp3E?HzT8Dx^v0@my5W%T+>I%(w6!f$L*=BAkZpm54kU4T5B!EC5)JB1o$5Viewq?@Fbbjp_%Zdg-Dt=9NwZGb^24E+u}V zH-6FL_YeMA`1snfSdS;4J{sf!YuoYt_|o(JdTc?RFCK5@g|-BwuSN3{QpdsxAq?k+ z(HsB{>`ht8APRO*0PfvM?NO3w(q^VmRZ zk2zE>B)S_=Yi{o|1brl&A%K(5IhI&N!nTDGd(!O4LcSPRD`0M*^l2_KKR6QrkJ14@ z41H7bF>3AaB)NHYcD*7!EC7L3VT5h|StWa5#DL!4nD9dgM5H#X(r^&F1gp{u``41* zjqkDHt>bN>J<1|KRvvk&mwq`tL-D-kr^{5&m#6trJ>3mB&rbDw)HAyZ>Xt*b4NhfQ z@ZPSsZIa7M%^UmA(V3~4m$Trx^VlMtiA}L94-v*ZtZ^UD#ke09+Vi?Ju+Jx*8xmGU{6z8ml_e+57hWC4lleL>pHi;9U8Ob1Ynnv8{+e{2WASw z*sZx|fM;-oBo43;mg0hwgz3*LtUe|hcW#(^rvNvpT3Zs=@`G>CjTY{utJtvf+VUfY z_tU*Vdp-u)CmaB#N0vDS4WzRL5;z1RHz^}2L%cHG3)=*?XYlb6z=V}J4e(r4O$MN_ zY-V^ws@7cnibk`WwhnGbH5C)iEC*c?Qg8%m@%!eIS>gbYm*(pZ_7ic@7zJd(I+QBZ z;_9VVq*3JKD8#ekd(@o&+1ASR=m@-N?R~~tnfj5?R(NG)n2(8FQ&NuJavY&I^Go;2 zIk2m`^M|q@{cLUHxr-ZSKKb=;Q?vK1$B1qIr3c;2-*J<#oXc#~ort1?=C!Pv#};*9 z&C-`=KZdMBqX2bUFdNK2{GPC#4NwfF!M}}q_U~(Jb4baSNOypL48g86yL&#k_#vPG zYTs+#-95ZzS|E@ z0a?Uz^+|@LTh`FZk-q}V*F&mk6(nzM3}!)^35{o&*}QH+|CmUjz)Hz+=1;b7Q7km>s||EEbD zGsD03%sP;1K775DmNWaWot;J@Clw$ro;xZ3wKrIN#tckc@2;Q8UnkB;Jb<~rm;0et zI&a&Q!pPhbmP+e1!UpY8>lq&J{lac#86#e9-r}|N;8LF;yHvTUN1mmOD%AO?-QBmQ zn@@O$c`JEd1hMIPTL#Z?r4hoTPxJu*OYTUQbnVtheODTvBPSftf#~+W2CpJ)gEq zE{vY}0kl*2Pv6fQQTZb*LW77V{d55gO;6z6;k$T?Jjm+Wf#ZrPuIZ zM=^mblHc!{0D%XD@rh#K!OdIGK(`N?qNn0M(wSPU>k1oOH@T+jcJvQZ^2KZ}>04py zT3|K0rWPrm0GhtJAHCBl9{Y_?4=5ax z7tdoEinOo-G_5<{>poWoXE^F=lUfnXqos>-)xU01C56lzQ8H^eNegNx1kYrLhQ}O~ zw!lpbNNc-*DVFX~5kSeUrA@%|^z(4(b8)$%(V-XVktXB;*k4mM^IO+${rZ;^iheh? zLyBB+tN|OAPSkw1Wn60Q)HWlnV#>h^)0yK6`6K!F zEUNk3!NFPtP^A$zjqp)VJcX7O_j6OJg$^KXL=t}D_$^AGzQH%(WnKTzH&lb_qv_i_ zxc(PZgCOJggLW@gQPIl)u=xqCc4K{@TM3!b@9MXCQy2J;_Cqw?eQ*s3q%>aT+NjT0 z?B@y20aPN3s$65oL7KGiF%L6fAA18E5NoOhRkBm(dq2;hQ{V49FcJ`4#9J zHYhAv{_c)~;QlQMwks6Si^-Rkz$ZVF!a|=MqJpR@O$e#Jk@8!lK%5t_>=SPy)LUvw za9gkM63mZuc5Sd%&yK?^NFS{{9QZHg0~m>M$Pb)!=fnJ$Eux_S4*tI#dzBpZst4Z# zl=uH3TjRg7AO4Sqxb9zU4PDlMusmyVGcr=Y-YEV%mggz>oTUt(3P-7-W)_wrjP3>D zC$#?Y^D!<3S_IxO%4nYqOz@NCmn5-N=Psd%+IPfeA3>acq#^rEK!tLV2D>g5vJ|eg zJyVN+`i{aTN6#W zhXk+)oTDgBYD_H~=dQf9F2}s@UGbb)@|lc@Fg355j0w{a5bH>=G_&d8ntnl>LXi_5 z5#I$Nw`rp{rQuM-tQHpm7<)cT2IuI3J?_pltf z4m-ZOMrax(ejIXr44U*oe8|S|6ooR$kQ{3C4%Aah*N17#EE%F3d!!a?iwscKU@%WD z~k?|8CnEY`0C z(UDGF9MbrtpJNDL0$hx8!awp#(~}goB}~AXdS+#f<-NGOQXxW%t2Li%QuH#?q}@}> zQO=>dN-96q$bRJ+G;@fmBEladW1e$Qz)p&zRm=up%N9v(O8c@9H412ZFUE) zRe{xD`&$7{9@xcKEGJc{OBapCrgbATgjp{Fo&DqzwTcy@A4DluBdxbpX|L;m*vtPVjpK=jDf}k_-+mome15Wtbs|CSpWLSkvWmd*bMbynQ1tqPN#Ymk&6Ew*io92VHw@*lSvsP-MC&(#d_&F$Y z`}`5yySXQl{RXIeS&Nf*`WjdbYQx#TYG?+25LWTdgTNe!yNm4Z5HpzPy@T6mhBJT(#4~~0;FHhGTID*V}{WWB2cz-_2I|dyxe`A0Ws8yICWo zz{#F6)lxM(s+A<#i)plb?dg|7X!Tioxe_@cR49zBlf$Aa^$4v%*>=+ye;y{H0%Z4Q zvMBR7c46}7wBO&u6SJ12NJOgPtt2WQw^O)Lh+9lNwkDIrzVn zqOn>&ytgSVGVpyYU~w;{f;<+VlmQLa$#|ww616?rGG((feXs2$i1lbmEVv-$YX{5; zcJA5Em15GH-g>X@%$4rE`aQaFr{2<`jz4nNU4WcY0D9kd#5k_cnS+=zo(V*M>8P!Q zjArY{blv9U$h=PpLW%weBojIlHiOiNoH-^s5hM}}781$G>-4nWlqYI|XLle&Z16?3 zfzaUHes)GHQ=A8cc=11P`Qb%>qEf69&2Q_z)Z({7DK+sjtSo4Q$!O1f>%QSG2hE;U zuNZ71$IiQa-;sC>O%Hc!DJMY5fh8x5a!6AkY;s305CwbM;3}sbn4GtpCLx7MY1lI& zOVesg)5@9zc5GzV@J}y0~`qs<5#sk|ZCbaEECGbHKEe6livxyP(`5JE~QD=-h)2>beYm4YtamMJdxD2D`MpSOid~-^f3sIoH4nadKChElfZWtF6w<|EGRV{2 zK2YDGct+MlSgvse<`t?cB?zHevt^6S!_@D; zri!BEtUvs~007>J|FbUP?@QS~+`WIZm8{wcs-#_t2KbRz!Xw13+BnxC$KaG9G;1=JJk@_doxSoe)1|_na zAqpCN;q21H`&`!OL0;1>6&!Es(P_)fvCcr18)J0-(T+{Fv>DTg>g+gDbvx|O#PycQ zt%XEUZ6EC_OhSx3wL-!^$*qr^(-=Lo~4i@7FQi?m$2YT&n&+TC zq_GVH3kM=mExTkkX=)yV6uo4;hDx})buciK$R_iCI`SvgeyW+)U(wqR&Vqk%&aL~s z_1%?4ZW)!ce{>va*uNT1&gLHhJnKz*^o2u=pxFk1%gvF~WgLS*H!5|#5+%Fedn1UX z?R2ml@1eHo+|2yqr7sgs7al`8bn$SueFyhC$b3~>bV9aI8!Hz|Vz1L`Z@9KOzcP-~ zUjs6bu%SMy8o831)tohpf7q;$7&g=mIivCvPe9iX3#Oo;PD{jJjN-Z90^j#MZ5pX; z#iqqOHoD%ti2t$fvRS5F4zC(MvdG6LQMRtp@!_Tc;$asU`q+O#7bBUY_UIw%(NtlR zz+Ti!m=8v}UehOAal>1S^yYygZsMu>u>Z%mGC7#t<@$v0TOFxUgw9Vy`kJcuOA6dygTgw_` z%tQ6*WPz8zV*aS1lwfedz2DUxx()h@D3%7bV~f4vaLzq)45g#UA4peN=)J;9qa8j| zViksc+B)SMB>rixa{kh6uZon9PWoLgZ{J|ESMn4N5HFmA9-ITb_{ACwLH28BrYCpa z(V{evH;whKiOeMF^+Q+yNEWKs8wDtZ7>XJSk|c*121*bFq{^6U(F(Z3Uud7=!?Un` zj=ybmB9hPCm!|#nZNBWMarMAsJN8z%`Z&$wZ_V*JQ?11IsT5Ut^y9RSx($ms6;Stz zuKA9X7SS4XRk2J2S92Qa7)e~`G!3=Dl!_}E;E+}gNYh$}HM>}Z08TSAKqLw*XaGS$ zwKy1T8yv)8qkFm?Z#Wa-+4V8v0YhbnD8#d6a-6gs^hJVWCRzn7h)@n$y7>&otL^ti;+fZ z5XX(CaKkh#{h{TSNKKcqIf+tOK@t1j8V9h)_zvQy2F-bnlm77dYbx&&=IJY62%rwA zp#V|UkW$ZH^yK;qZ>z9K%UZ@f#e+eDloTSML1b%l{PeG0*hZM@%5h+$==eyPgEeV( z97+JP z>XGm6nzL0I##>gFG!|lYh zpn`rHx*#gXIP^$>-pi?}h>kei0tp5Ph*AxckdM&de~BOf_x)X6H8{MSea5&rcV)&%OoMK^3D@kykWG+Ktz+?ZH|)D6~KTx z^HQ!AGB;7>`MM3`6maanuaFp`+ho?cQ`KKB9zGcg6uAx@l(=kz? zC`bYw%jjoNnLw@@n()aOXQ)eh3DA*iwj8YbJ?dqcgD&;B$c4=o%Vd~R5X-8wMtN~# zUE&pUzpZxQC>@KG&oM6ZfGoscuBAM>)=VEXcD`C`%a4!Lzg`DteJm zBgw6C0#~;i0HupWOS==}*)HK`VhB{h8i25X%w#|WcqJ|seTKktaKGiiKqG_X{WTyK zDY=uDk~1Y1%kIZ0Xd&=srw4fmawWcPKsoWA(F9$L4RkpN*bIZtY0fqK#I3L$RC7^^ zL!U)YxFaZQuoi@&$_!<&`eG1t4<-lBi$00EU6Y zW1|T}vK9tC7yw`)Py_HiXuI?G4uTPz(>W&P0K;*ji8S`Wi`7<8lzG=CD79!i(y3D_ z$q#)XBc9np*yC)g4`kDs$4M*p@z-cg0{jZ?=9Da|)iIe>KU$M;y-~H17$7Vg@FHMC zI024o-_wZ+u6l1|ogm*+f6x0PQ^}l(1sVL3Z5-49Kw@~<0 zF&1y@0i3}{S;3!AQ=e??sL)&XPzYujv06e$Akg3#NOuhBhBx)orY$aqdGI(6o%4cD z%I&-D4cM}CvTL&E9r&swAQA!^_gLkqCU;mCd+Y;SnOxioBnQHK9}LxYXPm!0uw(atNiifN}_Q^y6*QGI8%+80O*Mf%|MNIS+fn-HDYYfdL z96h<92ZET*iYC9ldVcwTGrLXIvNbrA>i%MOlgSen&Grqy9DaT~5~O=46K49tV1_@h zZi4E+r)8ACr)5ZL10_lLG5fvy25XBVO6|a?!oR0wW=)Ri?L_8Tza0tMU=Q3yTz^f= za48(KemfFaU}pE3U(Wp`?L<;U{NDM7eG<>cLcW>ZksXZZbetksjHp2UtoWAR!JY;9DgYcQ~x8i`~R^~_#0;aZxRDLxtDIfDKa(#VXxY3 zpWCt{Hgxx>Yu(fKH;DItDGck!?Z;P~{GEE+cA(&_tVxuq{SQpOK2mv5WbB;pZSM|_ zpuR2|Xerc^PA&OpTU#BiOR0yTH@$Oa9KnT7T*4!9aZon&iA8hUm5GQeFZruEeLLpx zR2*0&qactIBsTk#oVQk;W&vHw1xPAw5JnWkQ|NZmMYh3YBb*LE>DP%}1X338wMH4O zy1;G0rA``{sf`%ueu()MTI~q&kTP{zH&dF~@k-cMUZ=8y#cK{~o8q=4q-<5d6l{Cn z3PV4(zbJT~ULG!fu5MchI`*P{+;%o#)gm*i7Gvx2A-(>5+UNY0wS@S0-avWF17YRV zOfFP(64`C8UB|_!AG%?qrzHg}w%# z-`MVxlvRsqnwGz@-O_(yyUV2Uw^Dr9E%7^*2H{*6zT-y78;Vz>%}!ZD8|Hf5xn!cS z7&GC5M{Up`i}HIHNnTRPrW$Ime5nBY$by5A!2NNTvn#@@GNqN{ZKXv>L@J{+*xR>&$|Kb8g0zTGTJcuE%oPofP>N zZGzlbqaJXbM{~5YL+_aIAx3b*z&2|bhh)cR{Gr0m;Z4A8b%h=OG|$uZV|vgZi-s2Z zE3e0jo_2iE31>1f{0cleW&Zf(Z+TMCMV@8-TUj{$AC-mw;osx`h4`=jP5l4=gYC{; z{FWA?yh8q&wZlt5iNGU|GBG6dHGXz*&xe%mjAbNIL`53x(~$CH6Wfqr4m%R5wey@} zPU1T1C2`K?h*@R*H*B}tw?3KIUDJB)pO5akoNCmHY)FJ2_e|+trZNGH3awRr&{$~e z0t?{Ot^StXc|d+00j)n?v?8^g^7I$;ANC*Sf9WvK^MCm7@i))>QfK`~J_puc_=>R3 zD=^3bLe{I&D;%?StD<(dxXqtHb(3txXm#-mOKOwWmwZz^qf!tINyr3E$ity+WyS(? z)$`BUg8U!9(#h74;+dxEL*cg4+k)yDqW5grX1^R=Zl~rOJ}M6G5jgSDm=a9#bq*TWiPx4`B@<#OxG} zB#$E{mcTaTz-t~YUEzy`))T#~UE;zMk@h#?py_R-%AkJIb_kj{^5&hF?zmq9XChD&B!AmN%idXR|ak`5SovvS;>J{#MmYaTOR=l*ZlPnc~bS`_&H#d(L%$E>#9 z_xG$dv3cLw(*$`17&|AkCd;bn2!0E*p>peoC*>(mKu>PLPwfd-PxTr#mLkZdbk2LB z{B>oIuLj!#A>|41v=O)2S0HzU*CV?bU^fETfPRO!@ z_-x@MJ4|@yMwq9;)JP_#hzMl)4cm;P=n0O<_7desA_%2wMVg5DzLNMFJ}_tuNHmTE z_`;oHu*w|zXs7vx`-BHAS*p83uD094NXG>0P8`)gHZ-~pQ;zQU$O;ZPVwg>_vh7d1 zCttU@0cf>f=ik4kt=9J-%x(7Z`ZlwFzdu+1{=Dt}p0(5IvkPXWFovx%v=!chGXVFP zY1n|`>iH21OL_t@51cay7tb%uWS(}wUaVvOTZV#6Q{a|8F1UpT9}HzZA%k{TG=pRj zHdQy9OvGkA_4}X`R!>#hw4)6S6z;)*^ZD#HC0(OF-I7t%f*VK?UHxw*x3d|bhdL@{ zVFN-8l(C|Vi#Xu39%z=3X09e-`DOEG>OBO;7G-WcA@Pi?uzcaklL}gO6)EDxD(_&U zG|0g2RFt?Vfoev5GEclAX3}Qp*5t0WzYbE+5Z-~jPW~hf(9ZyM>E4H55*tJxCU5)M zAd=`q%TT>}aJzCoPV?!xhv{x1w9}kMK(6xEgwfWA@tO=qm|$5DPXEX;z%t;DvRQd$ zv$Boxsn^m!E>9Ry|fVN2aEDS^27@bZ<67?0h7wyz(@`d$3~ z1KCd^h`@?)rOCiQ_rlB+PF2jAIH=x2)ojr;r>Ue}WeIS@;~gN#cVDB1F#+LxIT41` z0{EbLY8An+A9UV+)-uZ!W{k5|&Eg2XULw6nl3{jL%wi%>ymA&@?nS=3XS+mPasnZ3gZeHKE{saPi=4;sUSXLlBjY20JSEe{!y6WC*iCmj z86?eK3l{Acc4`(WC@ohOUwp)j3l68ZBY!9@5oT2f#FZSHg89M3G$>tysfYQ-1U+cp zuYiwx&hq!bgScB^hW_r52}3fPU_zzvv?7B@<(v?`Wv-F%eYrQ z(V+tpTfN5>8V_20W6T_AQ|@TfK48m`Mb|wPVvJl=E*j0RO#2s<58I2CGvZCaJ>iR$ z0#ir;%BO0if(dr08-&H8019oYjziS*l{d3(E(|b02RQH=hwgX~{u;c72=>v22qs)j z=bYsXA^W3-2+R71;W4nmccRfD0R$F5lfnXQ#Drc0YBB%-+ernuUW6<>ijK>F8xekh$i<5Oz8uj_~9u?jpa`nvL^_G9+oaC zAsh{$@l~3Z^<}FO;gUOv7UUg<^sB%^ANmXBCwa*ob`oDk*x@c6ewv?$8MqH%Cid76 zOzfug=3R+8TfxUs7qG*dpjn_cgz>!a0@|<6ON<{+*ysQCW!qxdikpb5vxog5ue zgj_MWe;`4>3VW_!o*jYneaA@+%YoA_vg0?tg8{VR#es4>(6kSR-=8ewqUwnC4aUWD zByj>Hcey`Y3HDXRT@L`Y!gt;l`93HDF{skqkP=8uIcNxDMs-c=WWgx*XSV`?Q%H2e zNQ|b!SX0E$p0TVT>uu>3^O}IJ%EUH$PJ$F!+jR_Hs@Q_`@MLwZ5ckM9xvb5{17BD) z^Fbyaxuh9D3D}Vs0sb-?pV)#ulMZ^xpwdXDP zCe3&^`8~AocOE^w(fJY}2c+Ku(W4NQAF)pY&xkrq>2k&61bflr1bWV+9&;8P5h`3r zac!PfX~&ieinv&}Ln1l+9cp=pvCo2DpR7I2Ycp+%%F1P|VROEDPVH_HJKBdb+=#c^ zEd$r11K7TWu(;(jUiq7cv7I=wtlF9`>0;D8ovP+(^Ps?Rm2blD(uw`!mBD_JD>!q! z=&7PEmwR`8$5Y`UW%zg+1)?+70mG#`0!zrBeO$8pQ>m_ZkH-Af+&TzCk*Wc?AULpVCdc?m4 z0m=Qn9?^qKbS=3?haeO`1x6S-02KtR5xOomKctu;`0x|}YgMROV{m$X{oH-fe`}q( zNNx&=L)*{>C^}mU@7QqT=-v7ogcLM?IF(paLvm+j8+XfMNXgy=N>U|Iq?REuE-M0& zebz6JQ?Vl5_uj8C%7X=R6I7f@cb+Gu!&IQ#`0os{E?^fu>H-Z$<GgqI~CJU}N{a6caZA~T8HQ5>s13l|!$0Nz4 z0=gv2;W%7UM;kB;+wcWM`t6q~PM!kq*B3i9Z0Iy}#WbCW9&Brq84`(pid3D5&p)1C zM^O=eFOnz2mO}Jm&kYzK?@#vprpKZN9e9;p> zv~G&V(d@B;^e)wc7*k$3%RvL;SMiAYMzRk+E70QX&i_gT689Qj^0k6T`q~n9Px{R9 zND`Brr`Kzlo@pp0I1Zu{4>XpR_nN-74cEWB zAbZOg--`~PZbb}4C&qT9A^nV?o}65mE+M2#QO!JqtMAbFW<-^Ye=Yy*%|2zm7vDVD z(vIPX+ll^_zK7LmET*6blcer{S`6#re;N$iIQa@_cAlH(Z@SO+IHCCSh=mt<5!UsY z?@%5vzsP@&IsI;Z(3FE|#cRm^80XHB4|}l_eR1N6IeZd$$4CYTeY9#d>atY>6UL@} z;C_sc2p-Y8G5^ye3H<^+Z3^c0&?sTvCvw2$_pNsCSeXmzYC)b=%!!8kcf{bG{FX95 zWRa!!=nu}K!|HIwp1WAbH)|!G2p$7r9;Usg=#CHnMsJ8-$yd9nIY$-N{9jQNA<(Y< zSH@I7(?-9>{D`2p<{cj|nW-ZJ@#&9UH9fh%MmRgpbUn^f;#*SYerqpoWBQD!eU^WH zm~+}HN+9z&3W-dK6FvpD=7e@A6mnGy`Z{x^El*VnE1BB*{9pKS zTa+lZ^la;VVl@oH@`&5$i{9S0AnCflUXS!HTshn3O63lW|*W- zv0nF1kSD;IC{@%*`EsPpC;JLe)Q&5z-)TM%Yi7iut z>xsLV^^mmoJXMyj{}8j)S^t%c*MllvIDtyKYII1&_eZK*q;CrL-Bj0?GCx!nb81}_=BClo_2i;Kma7vE{L+{_*fk})R^090NAM|g zQOEo&_Z|cM-Y&bc(~HE$^}d{OJJ$cZ;>e5vM~d6|Pc`d)VWlPNXFlQg#-iLCD=e$8 zn{C3Z71}14uVox1+|TZ&8~zq)EAmUFU%J8ZY5Be5II>RQ_Kf@dN#kHYkZmKQNOY1s zR8vK=Td7P@BPz$?cMu)Ar;r_x0g(U)To*t=cYumlM^-?G45s#bZkS||qQ+Rnr47R9 z<7Y6YjlPTq$famXTPOEI@IRqd;>y?76*l6>)ia8$YcE|On4-z=a`%j>6RQ`EJq8TX z>`9#J1M}=SN>}M3+IJ3YeUUK`$PZqVu&d|Yv(g~nZSD+$Iy^^g+)g?yG9?>wZDm!eBs zN{&sat_1L*>lRp`TwjT-fxngD)G&7l5A9nJ-1b0n2=IR7q=nquhM;0wyw)`F-ppVg zPtSv%qYfooL|_CVb>6Yv)f&!G$1}JWWH9HqRqcA0qLXNv2TWQp4MxR!gcWQE8#q6> z-o;u%Y>hZ_)Wd)pjDTO;4z)+8{hx93`Aac@&EunC*@ogvg(5S z2|I|$bdiQ*54|9O21g}(i#V&%)lU1k(GHlog&gX?>uHtmRHq&92FX*t#Z z`@>@WEwCY;!e3ixr!nmn|I$Bj*lUrQS`$<0c}3S}SI^<=Q(yk!-wAosBCEyjxex$w z`jXf6C-OYqXB>GmqUCQ(a|ccS- z?wg?d3GJ4>-MqlE4WQ>nh#{$qleL;vGO*V2qSBMlnY^k%?ws(j4m`(=cq}H0DcxMp zYS2Cd+xc=>iG4c2aY1m?(J0I6~bMw}zN# z>SQ#!!$KM&@G)KWZZK6g_?&)`= z34LVZjQiHGxR|K?Z;yg77wqY>lDL`vT!Or@9Q4sKjwqz=OvkSUmazWn#uh(sc0ZQg zCaDAVLk3cuvTL9%4yo=)ykAK(#4Fwf?({FPA6>hcm1NPJO!Q*&EO4fsLJB$>s{4}= zkugJ`A7k-jj+&J(8n;VbO4=W=9Ijnn7}0^ASa*@H7fes#LIg4w$rSV-o}@5hALnM? z+CNerPo5kt@AlqKjZhJOf#Ea46eW-#vK!*>#hrG^VGLVmdQ|ynp}$2O;&Ude-JNIK zL+4)EqbVwO{?Xf5Kxz8TGF{o;H&?g#!r$r<&qZIzV&8*yXvPfWjWjuCiSTiRHA0() z|094-RdUDjVa6i$gS*|PVAb1Bb0#g*6JZieEYso@vG(-T7cvVeWrxAQQ>XTuj-*83 zECm$+=M#BN^mJfw#u`-0M|tjhi36oFnG%zDbS-Vu{UvL$gO*f2*AcWev<9 z;_Ig8@9W0Gj|<2jOT1rPg%My8hUJ&%ui%p++C4FcB!Pr>+{Vc9*TFe8Rl*miuRRr$Lp-cKKs)zT@}?a zk&0q_2Ub*q%aXqQ?3jyCvUi6{RSgtrVhBADH(rr#AUIE;T$+p-^HUmT zN{hGx{;S2?NU!@>i>Dy>SBuB|5ttc7$l)n1E?#BU_^UIM zXMVs{vvh6fRY^1J@}ovqeDf?5Qb4vO`vJ0EyK-e*U?55H*Qwb2Cgmp-MKWdl3CP;3MKk(}HA@>v%RO&O`lDE;Hoea_ZJw`)l53}6Xm?CO~dD=N^(k4gF(Npu#F;)a{D$GV?g*iDC zUuP<=rHj*E*(6F)uzbq8qY6`NQQKn3V9!{|93d*6x9%H!7x7UrMY2xjy87MTeGDuG zS+ODU+QoWg@e*a2D{>`=lU>cTOjczs18PufPFmka@hLeL(OL;Q0eh}0s-wK^(7~;m z$8H-3tSEB51>Cp4Lm!Pc*VAi+_W4?IMwJzK3nd=e?Hx-PqBopnok3Rq%EGr0lG}fs z>HdYa8{jq9Mkx=Rcy_}4R#|I(5=tTH!Kk4FOP0O7fi1&V)v8?%J%bg<1UNoc89#wa z>U11eyN-JIK$u?$;6g|6HbD;MF?Fg;Ld2+KovN9)L{q9k-vo-gR;z<29GnH5jNQWu zGrdIJYXjbv)l8E4J9Xdhmyl*&jlXZ0qJWC)X7-0#%-zx}nqGch8GJn1FEcm38F{L2 z^j$m*{5SeeGYfVBT-DCtfbl}-VljD6{9l?gRI?Qca&qwH1pgrrG{D(`SJiI*@a zE2Wed^Js)m0y_L-TczeeHQ+i}Wch&s>rhFQy73^bTcO~zb;~;=104w4VOCDYBx4Uc z6lXB4Dp|_ift0M#6Axu0AU}EkR7Bi!&E!w*>Q7yO{)Pbk$FDD%Hd-h}DQ-Jt$KM|> z+udJ}$6sg#ktw#D;jHTD8cvU-OKLQibjHG$(zdofvW#<}u0B^OxP8Lh1o@LL$b*dh zhQ`QPLoF8tDHty7tk+@6q&KG<#v;XnWp@(Pho=d*iyJq846~NOgFh<9qZW_W=*d)5 zja_x1BI_-&?(!k;`lBsN214Y}kx}l9Ud&gIDMgFV^!UTpH%b{fqqKh}xDT<4_7=Q9 z>^o|ycO$&RRs~HuJkW!IF zNxh5k_?$WfR?H_d>9Q6cS+)|kNozEX9_?)HuY;4a;A=J^Yc|g8Z0AXG=D14fg>2Hc zTbyj#x)GB_C6i|6Yck0<2rW?yW~Gx#>CM*li?cH8NR6v!a_PMNK6`AL|2(lg(GaX_ z!Hm*~=~^_2=76KTA(;4m5>2c6lF9z4rOBp}$tT}6o3t5E3%COHcsmS>$ z)zB%|{=xu55zqOtT7M*c6 zDtSL^?Wj{IvMsYt4N%nC!^mYqtC!c@jE3uA7boISJwk|l2g@OO>FYKc+pJ@1h+EV{N0ggDJ9TPw`3J$DM}$XfJPSJZ zbz{y_meS=L&8l0G@h2dU6}lY0v>clDyszm1?nxMP6r9|AQzFk9;PAUJU2&^SxZ|!Q za>L((0RG*DE{Cl)7`A=az7RUC!TBqOK)U_?DCm#tiqI+4Y0d!F`$|pb6Hn>wv9!{k zx^^Ve3r}LQ&eHDOn@H;l^`7gS%)a{)c~QO;v#h(g2WdW?h@d&f62yz=VtU4PT74jo z{au!40_#hM?6Rg2n~t{fct0a_*|%8&xeyv(YV6b48a2J=Rjle4j;6XT+pOEbU4$1- z1IsT5w*dLJ+}JkPPjLYdI1IWLmsa_(516Z)(@#9MLS-IY`%S_aE}dCzx9Si3C(C;p zT&LCgoO#N@qz5cse!2#?e9`uUn5Hn*;K)A@FdP=Wb%$dHJAb(`5ZU*=GEBd)0p&^% zT!aYp*Wl!X#N?fv?WLFjo}3`{(CG~{N@-elRaGG}8x9sSg@1$Y+!TufHFG}VcE4Lz zQtuA;+Vx^f6jxMXI2Y{#t6+F5#%w0FpkX*7Mk#xM2DOjFs!HMVsHFOi#MER{x~Ekb zPipsBcgu#O$BxV-1ve1`SB^Neo)c@IQ=YjW;9M%_xg^JxaU~?{|?d3VY}vyM*0|iVi9dcYA0SJvG^Ah?=C7!gzI0mnJcX?PJ9S z9bR_e-FyblarlLv!#}zwK9CmPlpu>8rNZVAqVf`}NXe_zLoX$B6~-ptd{y9bZBB>kFcwAX#U8gFxbD5AF{3DB z%&y=T#L+_DyZrUhRGMck0wqEXzT8R+>a>%uxS2h?wzaTt!5n+PF80j^iV&f`A^?pr z-av7iW)+#c@6T8qNES_9=0lJ;KXzVU%o7s}>tE%sOO9R$FH%8&0<-*%ujAvDG8tb1 z=yVQFV0ghu89MToB2Rgk@&q+XtC(b(O*c+~h=VmQspKlCnyQ2NRuw4uSR>^$f#bv#;wCh8W_tc3rKO77|Tih0t5b*2ujgWhvfu#H9lIo_4&SZaI1CX z<5U!8E$N}~^^u*Do`CAcmY%T8JkG}`+o{%v=VrmhEuQtb51EB2D|dTe#{IzfaurwH zM8(Kqn&>dpRF3;r;b>UKjiM&)+8!rOX0 zm_S~w6)?gwl6&R~pOFMtIYMP+Kg^w(hA)(;db_|NYkCBZc$mcPG%=rcp&4;2k;Qp* zGVyMa*U@=>PaJ62xCwm~e9qHf>ux7E0@2MAXVH$kMWxXBOirTULA2;RT&h~&z6zEo zb`hqeNhVn>_OSO7FLn{4WXUI)E%vbg9-n(LCv1C-^wiOpsB(~``2+XPQAb~b>K{ky z=mYrP`Pb3if;uZl)}dvKyJ1Q{neXlB1XU>AHpXKeM8~-Nk}f1jNfkt+l-2=CbmC4) zDW!=T$rQ-0fikjy=KneoCmic{5A}dDO_xXj%B}9|G01Yl-lNHyD zp;EbRSU6rLukr*bSE-4IEg%wuJ|Nd9P$eab;j$=@=@tHUWU)>OM(IJbA5E0$h-)t= z=;;NtZ1|IUQY!Aryo$*U8R#A%&%qTo=5rBsI%5&dD^IM932qV}Xiib+P7i-CwQ)aY z#*%)|!z4QY!+F@hxKaK0d052%ALe1y>$2JZ;XLebO8Z}}zv~3wbdfjs&?_2e9qe-J zMrTVFbIbgqbr*l}$f4f=?W5iJ7+g1X6=R&{b6pthms#z{uJ)}RTM%#-7SxK2At!D? zfJs8AQdJJm4=|&#O#l_V3>E<=)G5NnE70zUSj}3tP14N=s8x<`(O_UdJg(bS$nsIV zxfmPm?YE|EcPXfmnd^m$MG@RzV1RQ@s z)-0wTe{#+o$-FS&#xWI#*?X#>lSdgSP#O-F6b_85icb?= zn=7U0$GQrN8S45DCgl@%ElD9`^As~~6qziv{~?*V1N+^HSNXrR{;JAUs{L2%@8N&7 z{>~`>qxJXcs;ygF1^$0){eAtf)?e*INnsb!hcJc@g)@O>Tx@`!Mi{l?_ zF$#T?=oR>9V8gOTqLWsJbKV)ls+X;H?qT(<_Oo7Bhz_sZ|4@~H=h)Q5cqCugy^?S##9y$RpOOg78ZaV!5XKC*37 z*FO`>Azizg5)Sp}w|J%jbTcQUVH6#|`B=Ym{VKHLgpge1a(iS-JtpxzxOiP6)NiWU zL__G{-yDiX3(4`?3x#Ugre!~k%J>Hp?Mza1P9(ksMn)PqXq;FS#2Q!}_0|*m;iJ1m zhlMq3#uA04fb*S$Y`rxJo0|0T97|0~2xKQbN&_%tSjB}%Zok)Gh){~{2P*lk{6Utv z$W?FsElAVUUO+RS{7f9oRaqO_mX#_iRze6tr~4XFb1D}n&`%j=vZP$kr$joJ65FV1 z*_SNxhSprNX@{_CT``2LT$4d$c5~AvYV$@|0)wO&h$e>LB5WlU6?FFc$J1msF@FiB z^aI?}ZZbGp-Omxlv&B16rDbmtNFMQR5#S~x2Vy61f;;S6-&QnUs9PSUA&WlViuF+o zmpja4F5(2gnHZ%&iO3pvz(_1-O0{t0wywTeFN8ciSmWMn$~OcFHm_m|frGdDWa_lf zTj)F>;Zs_jGbB;iqxbVG8UQaxqmvCC zjKLxC?Cm5)*pOM`H$T(dB+nl~%tTc+w44DF)HMy7N%|~iYNu%>RK1s}M|}E$l_|B$ zm~!@_OW1*wW|GisXyAaTmTXe5)^7eN1$8FQ&<_%Nqr@~Z-i0{8egL#d@A~zm8#L#S z{Q_9Cv(i~Z4x)I6mgpk6_;@pZcyNr2a`Ci5e+GW0B@BC%*~{%e*}Kun1-tg<#LV&9 z1op6-)8*zBNh55-`2<+6><_75@D}9H5uawFl&|P+2V8JF<=aLMwGJeLkb>0YHnK-i zTol}l74Pu2bHNxR9wJZ!iGGy_qy!SEb0B1+Y!_-uCJCEDRX-vqz`)RGQ z2f(G;(*cmRj~lVy*JGhIm0Urc zBVYX;tJ1l+-HH6`Q;QD!TCAr}+fb*s`RVHO<@KZ8XIbiZE79dg%hKwRhN)=PVUval zhnLuF{bnTG>#VXj?AiseD5Y$T4j&f4DlbKEkuF$gRY=j|Bh)a33Tj~9B`Z&x3#9Nx zNYaqU=1*iONw?S4LE#WK)FC##A&p5tdwMkPA_!ns@fFcXFTDTj1Y2xI|iSVv_@ z2IUOI>|caHisoXmrQgJ8-OEOn1?5GL)(L|G*JS!QVD7;KZpGZ9A%Rm5P}9^4V#@xc zk>TPku4Kktl*1oVdaM~Uuh}xni_{RN0H};<3JS}MZ{b-f2aB#shU&1%`^WpQ=04B! z-bBK4HA=-Sbmd?8C5vhpQtv6DX4O1+n`fZP`JP;3-I6O_f$ZYU(!5HB-IjHDR%BFNXi&G#6m`430x{~c-lFCslfjK6Gy{+ViPzEiEizq1We*p1B#Qen#> zZ#xjgq4vzsy~O`nt9Q>{D29e+!0zl4!j>*~3L8UMUlu!pCKC5<1k)a8VwA}^*mk0r zCV#31mN2VrOHgn5Ur1{==)(h-?^Ij#7X}bqbb3_VG~AxJZ-8XrO7U8$R1FO2A2OTG zvwumoRJ7krYlCm5wfpnGq*@i4Tj~EW)&7HOt@hm($BN?poob6eMtuk3uh17L;vG-I zoy6%~)@{P`sPj<7V-x8x!R}!n9j5DeWU`Dx5{L?8-(||~5oYt~ z9^g!tuu6s7&HNGq%<~@!3_D@}b~=8UCEy=2h|{A5#RZqN@IM~b94;fI*q zwqR?#Ls^oM!67;fFIf8IOx4oNc}KBDV>p;psViEQD&ShWJZ6bu4o}+kK$1x3Y+_q_ zYUhGQUR48rSE{!8K)l_I6)Z4`Egan>Nq;!eGNKCOVq{Im71eHXk-7F@zHpg?YTw$7 z0ZtEBo8$D%!)%an!8ys9PQ3PIEa|EXS3&<8e~bPChZ?PQi0Osdc)fGO?L-)HJA0B| zA!Y@RW8qk=Hf5@D+97j^mvg{w5o!JUg-kQN-@|7^-et{pCu~p7BGzNxSglSs7Y3Y- zTZpIx_FMcIPU{%^OyWwdcmh1V@g=eZ_sG zC8O2|#!0I&=BEM}NlzDH+>ps`{i&MmbLV`n)cIf$w(zWE&d1V{ejy;u8FqFHwzsKq z)D%5F&rYyY`3cVbt(w_+jIu(l=`YINBrz?is_IZ0;q%g-zG>5OL>H%ME2Kl>H0eVuv$72T4c&XZ8E8wfpzK5yud{s1|aed zg2b0ylUY=ZEJpWTU)(w#s)fcP1|t+5V=vjCHz7Av=lR_<0uWL zU=D1M7Jc!E#9(7{m>JAyA2iv(P8fsw0}IWjel^`>e)LVl`+1$X;P+t?>K1VLwJ{jj#Jp;h7sn9i0AdMzJ0iR<*o7>Ye8{5^V4ilYI3S`W_j>m70i?zASeJ35D)<3O7$Y&lbHYd{SR8s|HhjA zZ=Kfwx#Fe)u_6Yb{eTq%R>W&0;XI*RSvCCsTjw<@9EFCN6L5H*B=5j%-`mT}BOnU2 zNQ^_ckv?g#p^t-WA!gy$H40-nRM;+8?_a+ zG8vEidJij++2m$;f}eJnyh3xq3fkT$ZW!m1s}e~`3B0p{%O-^zlho!sh-_)Bd1wRo zXk9ISn$mradi1hr8>#-vzmnRtVXF5K{DS@K_I&y0L;Q#3puhQ+}dbKd<<& z`En0P>w+kMHlu*H6^Oe7q<<%v4gR_C&H0oF60Rz2jEfi{IMb~TH>NzD?O&kaxZgT( z_1<^JfNveR#gApw_ZwG_THhW{YRieF0@Mw8XqzhC&ZWXbmTw(+a*KbZLz~b@eDXhZ z;Mpu=5a;hp74lZs3E-$W(sk!h-#YN~zjWYb<`}tGsH+f^+CU76hR0#+$>v-+koIsH zJx9kPx(K8VA!-a$8?^q>^xB|oBFQ{ML3#t^2xjwLnbWl#@@w$J>z>&XY;2cYb@L$5|{Y%j} zx2yPa#1WGCbJLh>_19)@$yiPo!&%(doE??k%!$EC=e$@SIagBLpKF_#J*QAp#h@Ta z&|kCM4Qh%AFeOhX#B$*=(2ju(c*!jrtRjKU$6!sU_uGGHx#eqSD}hNcN4F%_Y`$gS zZ@YnPSV%bm1#JtF_vaWd^-Y6zRuQ#Lf8gI6*cLX1=^FB%7V2~xWS=_@5WNS}x{Ncx zi}Yco#0SG)9gh@wf_fuct#8nkx9hBk@ZS6UoPs&$f{phHFDdLs^||>&AO+rJB)CRX z)EQ*+4(kU!({M+*sj7b5RixO&qr1iNpd+TNsXfJgtH%2hp0to&F6%GUVQG7*m3L4Q zY+ojP<3VA6&90!7CxjOlS-Wkd9j>J9z->HM-Uc1K$qZOG^Kz&EL!|xy7tO=?VIcO5 z{u2I+_iXVu`n=&qxB=&6@_2zZ?{Dg=PuX^P4DH~&Q|%0#uSaDAaSh`w4rEiF3revJ zKfMR>H7HshKMs(0TSGXuOTC1(Ta$gIes?Si+4FQKc802_=`WZ^3KNPwkLfv_q*Vn2ngsKh1S! zKBolUEF*32K+kNnC-!UvMJL|b>rFgJNvx8FntV4(}F>A|NlUCoxGR6^76eU9AJqx1ikvME8Y7Q z5?=o&ke&PAAiLqeKz0?>;nK@IVgCTx&5DhP)&n|gAbp154$qo--Wn;AGH6Ee2RbYYt`26zl%&C{-40Xbke4jaf#$%b3!tn;@sjFYEc`` zuMmE*BMcA(0AdjzuX_R#;Y3`w##+WjbR@Lx5j+v~&`8zW)DA!r)J8*pR-o7Y!0Cml zNBsGV&3QQ7&s*}wlK^U@jUnQAlqz^0ydf2`hty577r9$c z3Be^@h6iOsxRys94P1fz+@97K>D16Y&!0ZJgV|*Y4#!bEjD(QNnTsB0-mj}o&i9%) zl-oe*2vVDB3{w|jR94TYVme%*0>NEm)o=?2N%&P2IYk~~rgT(*c=Sd+3Yxs;baqok ze}4F+f*}tTWC24G@s9W-9)=cV0-N}pcf}g{R5jnuCWkekVBG`b_#6#-im|P)kPII= zeX>&f1wmvxRl{v|iIQddy%Bv)1EVF1bFM*DF+R2VU>V48KJTz~N0ArN`Bjkvkw9`t zhwjwfw@-^~k~s(mm}zI3u`&4S-l~7QuE8wbwjm>8qodJAkK@rXTIJ~EH$#UKK-$-H91u7aHn@OFdKj=xYkC|`S36TV=x0kNEitEPbvP-*bI8a<(;nu zyWAD-@=V+ER7j#lC1nuJoDJTMz=J;^(`QRU;&K9YTMG~870(3`E>V-EZ6P8*b4u@1 z+vv;kr`75N2&kiK>L~jt*36jYud)3%L=(1HrQP9k#fx*ps7;pBh@L5w!tx0)?z@g8 znz08ngQ@g4&fuJmg-7#rLaF)5C1I%ei*Z;oevaAwfbr<1RAeR1iqYl&%wG=on{eo zkeyFE7|g*Rlgc31R&J2*fKl=IGz%M%~2f8dCv4bBzd{p}LcZ z2&3~6gdA=RM1^pR(*a5ri_q)1$@6(guW$hQvp`HqD~+5}V}<_mXo%S0iR!t8g1k4W zb+r)E)hQFLPna=KH|X}4DtR~0{KtcEW_MS+ppce;x3b2UJvk~`v2&=U-lg6a!mNnX zYI0g@Gidx-HK6!PWjhRYBC@Vz3(EuN7tmiJk7KAWvKj;cKok9c3VHv-IQ3s>r~m4q zY)Mv6KL72BAff-ahq69ZSlYn9^F%1jY?M7Tv8 z8@=MsOp)@@FYbbm&Pr&XLOB!(3=gx-l!QC7d8(!h?f8;%d0lAd`|m(x^{jD{k~uU7py`%EJH|h z4#(PW)9C2#Jdh=VM$`gM&Zruc2d~}SsPsBi2frX1DDXU_w$m!fLGz02if1g*Ar3@! zyHKbk6$w2V$^3-@7Z#(kg+7=sL@yKRu9H{X#3I3$fJKkVE~vJikT8Rd{{yRxlpSOX zmRB{9mykD-<;K;2D%Fby%z1HSWR|>$-|Bry38HK*Ka=*2R9462Nf#!<^DA8bp~nu0 z8F$Oq48{?L{pTNz^<%=w@q!kt1!FPuZh*^^oYe$72i;OvN#>ONOrAt|AG!Jn<@m@i zsm~U%hB|h53G1%EN%~2MDi65dC@8M)LCU{Rx5)pzw~qP-zl;?f-F_MWeRnTXwk_7_ zF$S~st_U2fDeZn8!`n%0tT@s;&sQ`HHXW!DNQx-@4ytp>CJJJuqB&^#fl)C1himv# z{m3`D*@qJ_D~(la6eik61c6?iYV43Upu34Q{ihyvTp|RiIvD&!&-!9Bsrrfp_ey{s zUnJ<1P&(+So~2cH&&mSLS!k;~Pl9!&d>ir7<|%Gb`gx`snJqI`VGFTVUxJQqo~m@e zWt04a;&1`%k90@_TwilH8-kpQ(H%)n@jW|@A7=5>3MkWeK$AbtFxN=rb))bl>M!!b|)>baK|!E4GGl@Kd667zrwIyJ`k>$ z+GYubxzJ14YS{16L~bd&2n3iCX-V335a`g{v^VKsw-Xk@dU>C$h@ffkh@X}xBWj32 zfjIo$WH`k?HTC#|8|dhP!<=kuCr-wz5O@Mo_)>+7w@HNWSC(Of$z9_LQe4dv)s;8E&SS=bBz*hDXMLRlS)i zmERDW7qjQ(8TDk@;lt7Ly|L>1-r!w5cL$7|{ykA{)R(^>_pgmucmIzN@+&La1E5ejr}nZ21sC+BOK^-~U*+NGUm?lfA6bEc zsaS*y#lm|OO~)Cdu0xQB3GGk*9N0vtsN4<#n%hng!$ifNNv%G*cs9XR`OJG92L1^X zn@ERS(%sTyb`)VmTEwuKa5c6pB5CbQZ8#oTeU=IYPE-@sd$&%rt@P6e$*4@N$fh?R0>B(h=RM#Aipz+npZUMsbK!g5_WzDcB zStbdsNeBDM`$aiwP^8Iimzd}fF>Ih`;o}b0=A14Q!S#+lb%@n*3%EbKD7*OM6dU48 z{)i2fT3rd4(P@2KBHPrm_1?nkL0bDw*)0WX1H}i=|R7< zk}H|t0CdC_q{~-OVHj2E@=+lb#VApFP2Z6f+yj%$8<+_mEearhkhcUk0xT`PN|99K6-#7dV)H7eA4uf(vS+51lh(7c*e5wb!Z!Kx*A`+{3jz z7C?5gs3Z&1j5nm=_hjEP=Rk;C_z1DJ=qZJThjlgUCkDn-x81|G-C1|l+w+SG`GL3g z+%o{qikIeUv|k%G%xdk9zH&rb|b<#(ZL@ru6oSy<$g`@jChB3t1$ zyMqU}VPb>J`*|Lq_ny_7PY~|ejkWSy%aw;26+qmIus4S}Zr{0*L$vQxK^?mzmFid3l`qlnxw@ zKh;XkgCExKlNRdTm@HU}K=NORjVsbd7We!{Rl-`^1L|Jye7W2;1uiilu1Gs?mzgxI`O-tR;0B_?8d)3#6{kR)dwX3b>n*W3QYQ+a;Yq$Go{_n=6Yn}QM zzV8U^{XdQX@c;P%{sUI{SDyRV&+uD4HG4LP)=VJsDh2+fb$R-v3!yWHPz zH3oE6Kxv$hYE%I}?))3Obj_Xmzh5bx4W^#U@449Xpy7x>!_Hd)pJ?2XaTL^m838qN#+i=rxKxXsr?+p{5OgYIglsZ{bBw6L_t z_#EfcXpraSD&xUf$x2g8yw5@S%TgdQZkHm@&KE|L^+2rNGoRGA40#ZV@E%GZl= zEux>Fv0x@_Lc;IfAgakfTp$O-;Adc59*TniyN?Tq9UPswQjpilh$IXvtZ8n&qGjoDzPIk z`cU=Em^rxf#^A{Kgm2T+BuNoz#w1a^E*8&QrbM5=m$P)rF^n9(1E>=0{}e!>zn|Yf zp55QiDr$NXs*fH9{F>|a{|C1FUI%v*PRk)B3A8~7e_@#4dCTqw2J94pobP}8(qxF|(}WJFP*p~Sv8 z2L)mc@gVp=w?@~qV~OKtuCb>-U~je6FWugmbJWu+=79`~sq{XOuJPC5`wOlQ7B}-G z`D8pv>!|a4NeBW51ck;XUD`$%uZxb#>3*aCHCP3Z;)<>Nz7U%4%l-Gc0`;G-s+F;k zxjvn_mA;c3C?B zzz(mR_$Mp?KL8kjb?8=CkPXAB-{o$%b*Hj?BDj{$$NR65sNUucw_>B6Xneo6S1peh z>QU@%u|I%HiUb+0cG><~6Eud)kN|oB0Efnnon)*tfH!Md$0=MT^I5Q2vY5S-^SwQK z7wdXj=@11V8lAuf@Wm9A?GwOU$Ct;0->A9+A!Vb=%faf( z1VNl91ZbE8dTe{mcLBgY8|UG=aYA5D zS-%~+`N-`U2HrUg_+@(bgVyoAD{AyAlBS8sF>^ysEZKxqW8wB|E~iZ!I9>l#Uy=<-;5GLgwV0dUvQR#bm#@g#v(vmLd8FwdkvZ73D;RS6zy&crh*aQbs!YCE*5=Plfj~E$K z_(ST3r_ma4>)2JM*m)jX$#2JgA;)C6E^Wft~aTgGL(#5pN|N zLH~o&p@|E4+U73I9V{g3?yjJcaJ&6s*r?M?KDT}#Sq^$Ssc=qCsUoy&59fu_)&r)) z4RFgr^w)%Qm!zfXkwx06P-kmq7M+Ra9u9sFOwg|ARk8W|lroK@uPYrb_S#n$guBFeM^(Q2`wz`jVu$=}ozhQnbS`mwZ2uPXA zn;Xw>Gx^m-AaQS1r|>)1<*Is4?>g3P%4)fsQ|`O{UpL4L>7cz|sPz$OG#_S(y%=&< z8IS$M9FlyeCh5xv)dd%WT7CkNBfEeQHWY@}OP!pd%w%sa5p_gKl%Y&FeXi+BFYV3t zBxl8K{E8*u@geUxfEg6zN9GeKUfV7<0Y!>M42rK9(t>u|ufB?@efV~<>?7^mJe|#b zp60jhC6`B2BI0>^X;2QnoVO?a*5c5X^Lqx-35Mrk?!Pi3(I)FFH~pB>Q~9V~g~7xq z+8Rf_lIT(bj?wushxYN1*YHQ~KH^wYIP!k`#vu7F*4dByFP*b8Sf;2E#=VT?qdsd4 z?fU&u0xFt`@<2E$J`ih2)l?Wrkmb8Rt?+j39M1pQ=w-7N-Yh7pD0@~x-#9xVslUtR zY(36y$GK;_fpx^l{mRadLaK@qRl~ilqf+V9SZ-H?&9m#TiLpJ{hgW!N=$uoE^-G29 zQGQ#5AIP~G{RjqXbn#vkYP<4J-F^=x&RL!4aQg8LX8RR(_n#AQoAaM+JdJmk-OVa@ z?Ry4f+%k*FcBdM5zuHkSYyli~A1_B$?9NWkRusWy7)gt)^_!{j$`opIX&Ya6qh_Bf zE!>qe_g^N6Vxo!HAAEw}cvtnBB9yK66vheZg#2UR=c!1aw&CB2?%Unve=nrQ7aLK| z?;JtWG#K2$YsFqjV-zjr3)XXQ;A!K>I`55Iow)m*`)%fJR>| z=HLqFM9X)@uy&Xkk$Z<4v4wm7h7`Vp^Dv;BlC~x)+f?x0fPA@maorq}ep*w`SR)X0 z&2F^yIBihY-<<=MQSdbjQZ9~{t*T;^7wT3mxG3&VB?+VDIFB?I!ombMq%-Q>oX}oi zI^tk*%OgMrvBZRo%Wf^(q==PXUiPVTPfTw`FMl>#L!v4ZE4jGr(q*zBWO)bni#Wz! zR=iv)BglwZUdFzQmin`9aZIq3FC>=8aNd2o|B78(pcm@=s^PP9fZLV-6!UWRX`M_Y z%hr?bZBZY}fJ8k-f-RB$T8pO!5Phbs&(W*CFFY;molhYWDu~Ey5={xk`QHXS-v^z zuoy~IhQ`JH^xo}8Z(E zeIJRPBA4t0ou3Y30QuAi@GQt#x}?r+{_oKffO^4t3FRJh`#-&Z=?GH2_s8)O3r3?Krar zO$k*cp-)uT#^?Esua3$)4bqJe;sI@}fSE#gvNKt2h*lhBrS|V(9D?_N3h(_5dE&>0 zX=X>J;S#V$l})fCnWS;T_VRTE?2swflly=^A zmPaO-G|TQLxP3uen~<~ywoZ>^WHoK<)T82UL~V!%_MUoRvYh#n9n3xI_V#e0O)hB~ z6;9jA_B%-v(?4J?W+CUux^Iudwr!@mToX8N2&M}=TSTzp31vD_^HT{}g|{)Ypf=ol z(a|;2GPlR1ZQAYPitt+E`yS%>oY@xpJThpphhQvXle=s|Er)aNPyN?g!bN?f$cCIi z3O`(0Jcc6T)bF#HyM5%T&r(claEog26=Gn{VkfzezJ%p!o5DHw`1rJ8?!(+K$_@+a z^J9_X`+k`gvi*tRyqwAxaU@q58XWQ+0RmOnEazjV z+E6brAorG(eP;G~8{K;%1xDPjAl@tPFc<=(KA5PKcRG?d8u0vTNXt>TGRft@0iXA| z#F`I|@JFo!2lLApyoGYx;V&3-iClO*4rX8=hQ4EJO+<*Dh3;oFCAoxp0^iVSI530C zdWNEDle^#xs-v9Y5qfn!B@cBkNM8JQ3JpXx2|S;8n#i0>4ut@~C%>=pg{_2a%du&R zfcX=8Fa$;OprQ%O1TsIhCr)N#%;(dQauv1#gF!ZQ;_$G#{5Ne%5^HyZI;*rwEQW`21gBvP>h_nq6+&3s*mso6JEE>7Rv>9D`tXhIy0~O07^_eA&+))HT{;9LOrh3ANhXp6FntOeN80 zPN>8p^GRavihDU5G1Qi6p2h1SF-n95Rkjz<(Oi@+*7Id|qMh~`bX+#}(TjD&pd2;^ zYWM}JuqMusyCjiucpJ~i^ad-RhvTCg6|l%ZNk0V(1y2q(ylR)CNfhclE!b_Y(VMfT znfqFv_dm%~xKN9{dlFvpw=kO6P?9K5$w*W&DSZ8%0RDh!!i|ZZ9WT#^W&I&!F zG)y0w7GZ`m}sE7$N?2&cR-kskP8V4{8MHRnx_; z`fOeA`tihey)!7dP{=BCZi$yoRY+236j{#FVdP+GuT1N5z~uF;)fB)JQsA@DBxI!n z&8%bA#L+&L`137q?(ZYcN;j1gJ3KD8hnO*kw74|pZQT|NOEy|{$UWzqp6FJ#{8aGTGXOlj(VwEs(>mso!Au@Hj+ z5W~mCJBMDZC2D>9x-MDNy4HA{T7)uEw1C~QM@syna&vYDTLB_(tSVeU88C0z2%n2D zCsD0U+Kx$@yJx?)q{U;#v+s1=<>yx~lr3J1x#c(9U{Y?!?dHVEF=jgYAAsGNGV-JcW+EPpQXSk)4 znRa|aI$Isn4x3`C-)Xg^WP}B2?gr3>-7(d<_xKsDhoJWB3AWiv^mB%8r!u{p+8T27 zH?2wS;s3Yjp&SYcmaYl|y#C=l<>f~A6P1eFo(`tugo}_M%42rHRS}yW=a+jiH4FAs{*A`v zMjBj?U*?SNGhS$!4-vDRuj_60K8@o33NlVKx9C5mbrP;?Th)>2nneUn45Odo1E^zk z$=@7W*{HsaRV<<&v>?ey#nD%*7b{BB7Z?h72F-9wC6R%HAVd1-=+D*F2$?My<;8_= zYp0DF!CqJGFYt__278g8FuBNjH_(~@CxyD+39(8fc|UAL&)dv=XBkY;V|0F2^lh5L z%Yh3@?oG=s)mKzOg`)*fA@&qi$rFj2v>4cWc>BTK;UswCV0)_jmV zg>AOb>;&YeBP#_Aa_M)=zTsa7^D`wTk{DsXREip~tQ~7*9oP5-{n#z&{2`&UBh#t( zrCV9mq9DBJwX1zIwr3*f5B~EL&)4v0PmC>$?937|1A}0$w5H%#_mGA;=Qi9(>~v$r z&E3`p=A9ugT_o+fuVE}1=-m`V(d_IndWOKynF_u+Sj02NBOZA~m+oEUj_@_VYb0Y; zWPO8a9gVF7uS`~@xvc_md3{_j)XDuufkU8jWaBiJ0OrYj=sJ&IQ_#9!qkbDclK1>< z@YM?Lo|JGo@z(x_zqqI-P*nEDtbJ0SovO8uxZW*tfIvZ`Nb~+624V6_L6M;5n@|MK68Na7=v-H5ljPP| zFLuKPn{yx%Hf#YIV$IX>zXah#jdAP*@<^t{4uPm$?PJpdCUsRdeEG}JiCs*w^l~#_iLHi+s-RnA)tnKxp)du+kLSLuKs)ac zK02@LxNGesM3?Lt;F$-elv_@|DR_QT(;9V`z&r^XrCGayKVvem-3(liiFITgzi;6g z9EaSx?rmIgk_sUq!DMmo99P-^$}hNq=$mS<) zrD%B6+5)fZhGZb^OckbqlE}cat_GB=yZV7$ zAUwoNIaINPhO#=fibCR)wI^Bi0}jAeV}ttxr4T|DRMKuWDNwwDW}}pNfq9K226)fX z)%SVuEt?GhAWs$D=5;`i^@YTD$0gAB2;B4Yb^bVZA@rH@ocf8xYiYdO44h!ulUFcG z?;Abl1vW#NBQKpu+p1orp(L6`iwUFQUB+ZrY{%EQZn8^*#R^?91lh zsV}PMV~b{z!u!HY>M~eN8dp=}Rm=Onstk%l`H9@zT;bltls9+TuzCW1v6*t9lxN)i zBy9gY&rc14<2r4hJNLS1W&we>^*QTUbOgZBET+J>8JqM%C%|XQKBTAfw z=3~OF?Kps5=d~aDXqMA4)nU(S$<|w^GIFP!5(!sFm{qnYjp>m6OG==KIH727+m}f7 z?&F@Kx>pyc)=P``!_QnvC4fW+OW@nkVGzs2Qqke4y!=T`6x#Fm0(@b>S zvHa(VmBL9;X14nxi=Wp67a=h07L{i0$?llIs`%PAcKM)~Y^DA^o96W6w+jh7A`>T> zZlpBBy6vzNBgA?6?%=RDy7>BNfPB2QekU2x$&*KB8$)i=p*)zXVk#i zhS>_~AM`&UqnH!+D`VsEA5+2ox&;i-WDjx}Js?H(&|(_|cfzcitCjWhf=#k~1lS}? zUdYXhox^J>y6mlgAz0yyqvbO(hIc+IHCN9$UaY`_nT9Vm<;k_;Vc6!BFvx2vENpf7ABJLomkL*dlZD-q3xi-IvxByaY)V|dcuV_ z>(3<%cH>J94!;m3D^Y*2(K@i`!oqId4suk_R&$$jj^Y;%TRs6;R7 zoLKpTMhEAtUgg-9RO|l2DGCQXLsDaR%X2G(xx>-lj`2?9MAJJnK5jRBJcZM5C6zTZ zzN4ade;T(~Tu|xw(^zC@I{FHEY$*77?tD&SRl0Hz^Mg?-nE`Oh?oCUDJ}9%Z)I(`h z0-*AD2^6Ku)?CKBy3*Ba2s29lNd!E~Yp8?!kc?>s3x~rS0MT zw9ml9&D&XZ<%~E|WEn1|!iYODcQ>=Lx04&o9W8Uc2S&{v9OJcx9|9EIDv|ASPO#dl zhKtG3TDq&Z3L^P@@F}tfN4K_@{{m%li$d}A(+dCri;%oyA$FpI~zo>oU zd63A@wq#THh6AH8fA`=lQ&b$#B&CYIrZQoVWCN=*{;ux7Qtr{rvzkMN_CDOazb~`R z?OjRWP0hH}@V&$N*}p&vvd>(lal!3=REr8Lg1MZVSW*U!nul#U$pkYsM3P}tm3wL$5lnF}7!~ab z9Ae#DNHtO~CRT9jV+;(OXH8IF$@z5qlaL@gp?Sx_O%76TJKt}z-TjMajtsw%@0js* z^k8TV7eAXrALP=FQQ>Pu|7*eX(BRmD7;%?UtgcB2oK65mMn~Or_|u4`^LjLIwe#*Y zuy6!JH+%JUZv(Vdg;+Iu)~41FKkz(hMc%hz)U^Lu73os*=hIos*U)P7| zGhsguURPKK#jR5%%rNE)D>nOStjr26jeqX{GhfzO=IvN+85HH8a_R@>sldrZ3!^+t zoW}*a@h@>KPClF5o`|~Mdlu$)Il`3m-G3rp;z%c=(pH<=c!DnJ#3fT98@Z6cx5yy( zKhL@Y#q9swNt^Nh#&uTg27VuCu#4#RD9(~mGK4bCM)y^07oc~pMJLwG-3I0=_V`X% zQV?QYzK1MKo&B;t*@@a}jlz8ia2m+~CW-yJPFX61kBJnJiCu* zs<8+K50{q4LztT>rZi!5BwX!e5;3qT>E%!ttZ@mmTGLS-!M(Q$g)9MZm0*G1H?nI( z5PRR_hwIz}<9JY&Ixd!8DJeXu3NBcef{+^?eApzoD7T9HvGilqg<#K-kt_lLmM4>&}Kmc9&fSO-@j8t)A6}mG2Jwi*jq{I;Zob zNI1xzs1%ZuNsUaJZM}(nc`jBu8~U1CjTYj@_578l#aNiE&nBeRXhO~B1#-*UWYpWB zp2e5}bxJH=xlPqq`gc=>u%Wzc&g`b_K-GT1LR8~XU28Zr%3(vY1Xge)qT{=$lyL0h zL;nty_o-$gBVN78Nc$|UPtcx%LPpEWZY^axqY6Tdt$dnWgDxB`Z09}mb+Z^emY|+K zCu%h9`XP6Beq3;ZgpC&{0nfVt{s?p*$3>cY;dpP5`)^-tMWT^`b=5H6xjSMRzjUgE z+w?qAr$pXno5toT;j8RSkf{w+r7>%zs=UeS?nXnj321E2-XcJ4r%T>vjauNGkI|#v z3QQlfKf)rTlWmGRSNgV`Gi(adMBgiZcgQLsQ_@(lSmU{wbG6Y*%6ab2E1W&UZRZAk zJ+;xUa^ilPVwKv+V6T{fnV2ZZxR-fg;iR@RkF>N;1X6f*4y32C=8L|!SD_s1yxP@I zc6i?`%;83DF*gpm%*KgepU1)8WCtOdq6z+<8toz1Q#r@$VSFi{pNG??Mq1Hvs)=xQ z`fk8^`ag9KzH^VpMqzeC_al@qVjLvwgI=Hc8whx=M`=ErY$~tyFgYnbVey5Bzqi`# z;GUEjnK5Gn8l}1TM{v2x_dwBtHYNJrV?o-<-2dJM$VujfRhH_WEEnCxn2vID_;WBO z)$_Jm62 zy78T$0@;d(QPnW!fR8U5TIA@R$iI4pwm7uM_P$xPVv(^tbc!#|E8)3t8ctsN5y#G{ zF|)P|f{PhFP~CWdU2%z@Gk1Ks7|k#l4w?YHAbEWHp3_hBcQx%~(z_Kbw8+r-Y?}wN zfzy<0uU!w37XWc+a61w&1a0i*5rxqHJ#h2sjx!MXu8QWwWPKX9P(2u_4b@|Y+$Tv4 zq5d`CQe^sO3N)ZBuLzm_RBm}9fv#ba+WivI?vy+{^loLwC=i{sNuZfj6v}2G-#rQv zJyoE)u$QneP#wUXYy)BM@&j)Iw@bTihK)%0a)W$XEIi2N}$sc}Z&tf+8q4W;#8`G3>u!0pOBf4a0 z(lSR6f}u0xt%ZY|23xK_OZBI@3C1A2QO-@1a_6R1tgqCG9d)8joV}V5remH}B8}lj z15-Y+LkCxz%IeHoN!dIgM+*c_iK|#CNNR68;=`efeW?!m4^NN?hk@^dKjP;v_;^kT ziPgTH-3yS+^F5PtMHNfc!4GWrKi<#o9M%9!<_bUwm?heK`#+1Ga^jm@=Esl7G(qrd zX3wq)Ik`6hBOjYO&zZWUs4Q}S=GMSZXSyJmlIpZFi^@L)w|q}SntiFBndE_lM5N3H zdD`W=&}VHIMTfO2qa;udGmi!7(>EHU55s(U;QG=9xtphSTruM@?dEm@4SAdI*FGG! zzrkVG$v>_cfXxm+5K2;YTQoeczC;gMkxV0#WA|d{cmjigXzAv)@e;_157+9ueJvIdsx|u!cg;0Q{!|e=o}#GSrzku#vJ?VLE9zpERmAJN zX|xiuf;Ply#)>*5a8p!a^Y7_C&e3#c_zH#bjMN_Qn^UUzRUePMeiYq4V^i)BKEG1+ zx__y5W%pV_@z#*CT=C{H3B~k!VY#QuU6=1?yxE<9Y7)*Qf%LJ%=pXa>aZqthWJ+?^ zwFg*0^O=GVirkE0iKdLtb?1Z~LUFj_1SYz^%MF}S*3RmBY!6etWU|wjA37aVpF9gT z))ME|dC(t5AysMla%kYo=*5dgrT0h!#aU08cSFYsA4e!`1fJSB z>Fi{GIL^{GEpAddIgMRRTvU>ob?oGc@}K^h36L~$C%8_2H6i<9fw2%5UvS@Q1VWPMcvwR{cI`c6At`A#|+6YgiEu*{3^&LeUxkuk{I@a;#jm~o?Bm3e9z>@u9nvc zz10o(3*W_+i*}L?3J*x9D&+*~iGZ~hUp&HRHyXVM-9_6)*rXK4XS761B@H=8WC?*Z z4K8~pjy+hF9tX+704Bdb3L4QIYB`N>@6u^ccBJ1-Kc*+eq53x{Y3^M)FcHm+n(2T@=$ydhAc1^S{Kb_U_&@E&Og|;Ip(y}d_0M01 zje)sDJ1$3%Pj*Z%NuW@cp{*~zEmy@4bC;H0*S=Xs{tg>LVwMGRe=F|HMmA@*437vh z>#*sCg&c6%Hvzw+DU)shJ2tKZ0zF`==vc>U)}Vg3&H8z}Z0 z;_=6uBM@Q-hotq2V)KJ}eBJL?H?!ePQu#IDsnkI}K?ns2mDks)n1`Sh`^ig-w${&+ z%H7nB*5#LxW6SRKW~ZH>El)Hc7yVhq=|11vL+DnPOgK8_&YPVY&>PRm3Gf@q%MP&8^Nj!5i zuwsr{%=vkzQnL%!D?|&?zs9ZtO`4L3ziD~@lJ_9cIS^Tx-H*=qv=Fhj;V!A{nJyYR z;OFJywWZqc>7&0V&aO3S#$PhZRWi=|WA8rj?gF>y&GC?}8d`b~iB8NTC(qJ=<?e_uu;bhKFwxd_Sa%xE+<YR0R{(MEJuCxX=}?sUcXIIf>)e+wMoIoKqJyVQ(2CG*qo zbWIYK@tl3(jJn!6F`x8dn_Q6_(k{aXA3+<}t8)|Y;+-1)(%B-qTlKxqheX*#$y4C= zgIU)C8t2x!Fx>kiZ8(DCL1%acbY|{rQh!My&jo?ayT8}&EUk3rEO;ag%c+*mLT4`G zb{GaHoxfh4J(N|$kKQ=Yi40mo>-N6EFRKC@fVg!CQX96-|+&Oe_Jf-heaTgz<3WgZNs3ih7`B z=f_K|_;2W0rSQ;+ZygLWjCSu6fM7ij4@Y$j+iHzDnu(O3U-q2?|BiDL#IpjbihkFM$?IjRwvR3+Xb$Gn>!t zO?-VH;6B!|DpSoZN4_W2u(X)m+S=4fEI-cw6~Zt&*SNi`dkDJut9*BlOdxV~z#HsT zSjc|jt8sYPLbc-swN7%(ni@(kl(ywI)bxwo+QGfFBg5;*XsQ+{{AM58!c5=%`F(4Az2aSR~eE256-3j zcOElSyqSC${7fI@Ha1ESPMdUJ@+s=EI~m+A7D%W`3jpxFF!|;m(>>Z5)`1fxN+!pq`Hy}?RZa4{&OvaBEem zkc#*15Z;}ilhEZ#u8rg{FTL~Z+Ci2DM2(3c`aIjc)O`rdiVtJ*r33)L2U#bz9eN|9 z4+6kL&t!{s5u3L%=`R_k%$raJ%h_ul(J6HSSI!-QckZ~m*8ssS^Mmvh(xPrExO~XP znBAz)=iM-k*H4K=Ji33=#XQqU;Tdw;%&pVbF$GDctz8{KuKkXd5!=WnxEI&$p^E6_ z)o+Z-JD1(TY2W^EQx98eqwUKuL?WTSM@USk<-y)|4NuhgmFI&|;ml6XqCT-TW2ijh z+u*S+W)b1Q*T|#^wF#KxQQ6XuxO8#6wUic`zB#M3gnA^eQP6pIo;)fbXr(>ih1Jns z<~yv#sT-@90|3phHAQo_DOjuM&x#r1KBs6{y{P>anCaN^hp|(`F>P~xXY4HpULIkw z%J<|BLsN26Rm`t?F!flg|9I@jJ%O3N+Jg(}!gG|)IuphnGkoy`EV!C@2fr56#puCh zd9!m4E1sNZzNNlBDv&s^?Q5Hm8qFsUu%)Ja$KPo)QYQnnZvp^B=CqRn)`IMJ@En=W zgTlk(!pod471i)GJ{V2swm@ntU&qf$;0z(Q6E#1*x@WFMfwy@}BT@5s?DG~yO)~tH z{SrG18f1BBGY+1Hmpb^d-<)3<`8%|~vti1+i=TPLsgrm0-PeT2Z^-atW38qns%^fC z2;T<(^{}gBR>0p=Sp0)eRaXzbvc*4F_zM4Aoe=-G1KzW)zW#4x|LO3bvCYn;xi`jA zOfFw);wD;$^Gnq380O!_yH1yXt<+NQT;XO?dr{3`i*rX`fG7k2v^O??=;m3|Myk!#!gI%?*$H z^o*^lVNDeO4-Wq$o&QOE|B=rBAin>M{h!45zbkr5ckO=`>;GRW`9D4Oe`4xCW3M_;zIdX{cT{V@sq;^3 z!;XqF->Oi@hI$vkEboif^{YECLpkc_6H{axC3 zDGM`H^X8@*ppIk&+vhQK>+eF|!jlOgJM0#f@}%P%TO{w8-#-*eBuiU$^|PEOE1Lzi z1#^tC#88Fr5#Ijgd%%c)d*EqY_lw8M$gnSZX(?sVS`OPD%tu7sA1|_L^0&iPtWMz% zIAH}gR_Jx9ZZAN)#~`V1@WGf{b~ z>uT7atAG9|pvX~gIGXibw^vivr=%i#Vm!6+xjrtAMf*D7|HR`Ls(F3mKDfvLSDVEo zuIgd>ZsMULSI$*x`Bten&4kfNVXa9O@wAs7X)$}8djn9qRqNiH_y#Kd43k1gOzqXW zL(1ly$uUOie7P0GX{o!2EEmjCFV1BAtFgUm&2zjnYy=ZDa@j?xXnCmz`Ak*|vYe;h zHP_&)$jV}81qGfa3a>MB->Vp3pFR!~l~nfxnDyU&b`3z}l-rwp{3Q!F%rgdrB&Dc$ zmaXIr2Cbn&qi ztpEIH0bmTHHFu`?Kg>W9>J*4l-+J`^#>OF3mg(7v{~B<>9q&Os<*=H^kCPQVOa0tv zK>~jbkMbRxM#64zRxZ;^dXQk55uSF?OOMVQSF4$sB_wdJ)p=oUrs3ATN*o`r$t=5h zboMBHh1|JvhZjv$lY@GyVQuIq!lVRUXf>qR!wRgV``R!x@4??y&);>$vXgLRIOMD* ze!lh!s`}T#yfSG)f-YafllfTK4O6IS~25-9U5xb&bs z5?4$saNM|Bvl;N*X>B;%@<1E<#2U|J@J|)izf1WAC)9f_ttB3sm zu6Xd31|)c|l`qSI^~BFn!K+gUqYk&ce&x)3&3$;ccnZ{z@5#WwWwC*(3jXI+sYJX1 zlrS;L;}%G8uw;Ql5vrQ7Gwb#b4_dVm8;Kb6Bf5eQjsK;gg*}E%W&;WTWZ=;&pEkfk z`d6zB%mofPSwnacNYiTkE8CK6LG*5$t)S1UVX^pp*$2jAp6u=*|?R5_?9 z{L4ZC4@eLsz8ROBMyFsmphX@~V(l(TzR%+~#olvzh9k;m>i*^^yEW+_&T zjkVW$FfAK0k`*+KnHAft9w}5zyNVZGQP}?w>lIXb`Yp?J2QFN-8ZpFa{b5_&P` zYyf@7wsh0SXY0EMEsLV)$`ge9=0u040OlW?AsCvadWyLOehG)@Htv0?wwAjq`1^iz zLX`9>pskk~Gl{)067;KXT9>j|8-`1a78x+v+2x@`h+Iiu=_}p@eYrbVk01?5H@Q-*|a%o+E za{wLSLbJV;!ld85zCy;cRdQ-TH*fu;`Uu3#1ODUKN!=1UHO~P97*E1+*;rcMKi=mj zI;G&%TX}X?kx$In`d{DsdS!YKftM8g5wL{S*=k{jM;ZSyr4P^q^q5tm$1dpdxSz7% zgID{FD}{ZU)4bixP_-cDrC5m0i%+4czy(Fo`UJmy)qAUOAp5eu9CR6wZ6HlawJBTL zQ!!rRk(dmPZ10cr;PxYu_;=->Y3Y5;0h&lX7P7rXVI14n?00g_O{UW@4e3UcXlHb_ zUd=)*s_9}7FaP7{EKx7}p8ls(vJk9sMb7Y*BIStD0`FULUm|s+?t4*+PYb<`zYws1 z{8=IinHgb(TPuqnj|w{+Wdq#suGIYP_YVoP?F3$6lAwW(^hn-{k)XEar+b#-0eiCI zE@#goJ0sRVSda`N!&VO6sw=FpfeI^4cU&0AX(gJwx+EK6 z!TmR0D5c<2uLa}+&9YaAdx1M2PH`BG{qi3>A(B!@sd`A#t^Q-}&C-vtZy|j-;-}d# zkO2D0fBd`!`!!!nPsNb-O7Eu$#vp-zy*%tzgl&D&DbS;Ls%dczk^qGsDi-{`z1;lZ zLD<~>!x#(lu&rG2Q?h@_iA|Aqnz#h|7IhN|!mY7v|E}D~RPgZf0Fiohxq8`v;lBZMeuwz_tLI8o0z~@i;Qx6|%K7H=Fx4Hzl*cA9Z z+H;*EvOW6xf8Slx+;kHxX=Zoq<$8y0Dz;Q%efTg!f{}4|<7Uf>yHjtEb;XK6N{H8c zmGu5NRYpc~u)O@Dj!yb(W1Y(pA4I8eg;^O(%%@Lnv=1MW@zf-pXl1CWS$)j`{9moC zY5k(@`-XG4lwOa!(Qg0Fq8(rGGW~h5sBzd>qJde)&XMNzkNDsJ%gg&uwdkp|p3$C= z$w9|XX2!R!it_TIstU$Nf^AzXA&H3SMVqT+3s9T%HlPFCLs(&3U42 zbNxt|bhq)qK;TzOO+>e0_-SIn34mzPq??*r{)5sr(Iwi=< z5AV0`eAQe}A)z++(!1>d@MbT>GS?{3D~FBklBc@YIl%ulL{Pym|E zH;>Q0-0?w$EOod4qUKJpx!z1jEnw^Ka1U&p`nC)b9XbY}?nr0}V%f&SS?$jP}Avl_YO@H`G zr}<@&Nq=g;GOlv|BJ6O-ZHP5Y_V~>h8G-lpW^Y?(ho2A#9t+=q&o7yd8fIr>` zAiLQL0A3;Ab4Q}+Qoj6IPW`8X@6g>(cqPmT2EzC8r?u`YhWN({b)|h9m#p z_n(>?$mpGV_#Si-E?2i=Ws+(b8?)O4HT;pY7nD@n1kIfd=a(rfP6=z6nHvSo?F*1k zcV$mQmfO81{99Ws-4=(<-m#E&xzrg7M4Vl;`yJ=2q7TB`P0ns7CDCG&C0~mO1s7xz zwrr7pBxbjERXm=L5$PeE0fkTMHEkuZnaD_gBqcn)_VcgCmuT6NQr2;&F0#3!J*pyp z#KEQlPq)axrx`4yFD{*eyys-7cXr~q!0L3Ae){YMQFJ!|&+fj1Nip&+JXFOi({nAJ zpA&F4Wc8`Sdb!Tu?q7?EWs}cR;b&;xxfj#sB52v z_%sLhK=Ufjl+`jZX86B$NGt@LWp)R17zyIEpHGpW2)- zW=Lyu0o-pK8RX)sj|kr50B6Z}mc}pMq%4(_Q1nuYxzP?i%?R*z7Jtyp43P8ME`2z8 z@ouU)@Tm2v+yTbj>v3tK(Ka%ycR;dlv?1&6dC^ zV8hR~RoPmrJ}PEZd2?pHD;oRQ>v(yJfN%j?IMZH8on$cOx!sxTYkIz^c;pvTqyGF{ zzmJ_rE?Al{QVmjGsBiA zWzC26JpkcdY2FOrT_XIU;xcBpCk8eZ1aRh%xdC`*s|hFx`5SzKF>~8%;@!Rm!KC8O z(CVAL22)G#O`JQq2ukSzk~JPubm!}eQ`dav6`@3m;{%K|&*N+SVuK8Ib|!MH`z2>I zSrk&eQnl(}PJ}uPNSYKb^PYKYU`ftt~#KvC*=-{S0qB3E3dUO05nlqzJ?h6G4 z*UW?_aumG;o&OilUL6qC_r!abltvmUMFga~O9T-Csij+3knZj;(h4FHOG?Ai-LTS) zN_TgIbiIqe_ujv-dw0&9nNOTKcZTi_0RfOC87PF%zO*AduwDFaBqP)4(3Xx9Lo8}5 zZXRfxJfb*aF4f#5>hwU4ubqC>&Hhc=#?&c5dDWz?D8kZY|9m@nTB{P-Hmve=`|#g!1zyz*d2oR ztiN55$w7IJahBizc{G$hlg5DuF8t=HgWd*Cz4ECTAjq3x*jm3m9S<;zb}3>u8JXjxp-k4Y*0~8N+njcxwdqxKuGv% z><(3XcI{BUf6e_sFZy%$dl6GPw8MG?gb*SP^Lqd)QC5_IdX$t=%bC2%q^bVvS7XI5 zc=!6v(q`NJkJ$hRl6-$xRBE!vm#UL^tX-4s?;OEo<5LnX*1CV^d{=Y3BwFBp_!pB( z?4*OZJNoA{S;%*aU~bazR;K%-;8J^8k)7hmR}HV(nZ#7W!`u+ego&|3W_GBHJ9P%r z5ns}IG7Jm%huo(c@~s9E3C<6VsAt^_T(~wa3E=i=i7u;BAP(6F~4dieOpY~HzAbM;V#M88LvSZ{Hn5W?>A z`1j}Bjj|W9`PXApXC&x%1gVS>RugTXz-~~*{6ImD;$ThAgI1T0P8)NM22$AKpCro* zAFvH+#nKyU&ALFyud*PUbj!&fLRH!3X&O@zPksruWhOr2b%y`zYHVZ!HTt~oX*_?V z{9{i!Z|ijniWe@D8+gt~AK6;k`Pw z7#yp%CFa-%aubGzwsRKt6aWJCxa0*rv*Sg6q;_iU3^A12Sy9qJo;AoLwaRCm%WM+%=a4TK?m4}P5p%6 z0~XD{gBr`bRDzc7IRXbly~wkh-?LV|beN?W0ixI5Hl7K?pQi|NNKYN^IWO)?_?67^ zBSzU(EQ1J~$HL!=;d2TPS)5qc8+Rb}BtyO`>83pVqC3(QEgz$GK?zxl+%+!=AMSexcv`0AKX{r6~EnX2f^h+&=ZZFyh=(Ps5 z?O#{3?Sggld%h4W^qviofU1(P6a;0^ofw4oW)j|s$+VTd{_y=YO+(K3BQq-DH-Ax)AwoV z{3lc*m?hDnbiw`hALAD0U6mb~oLc(VWx-Y3Ib?+?PX6?dm0g{nU-mfa9Mr3K^6x&d z2OMOgOm{5!mk!-81)hS{F=wy{UoQoGo_8ObOxe^(#J{5%03%g~$#!C@7_=>yT!YN; zak%3N6(*XlUOKwN_nA5rca31_3GRPI`WmdW85ssGYZ)OhXsUR2sy^`R-}K74ql)3) zp$}!DGMS_HVoPdK9`e5D=&R&-;`)S)44e#*P6*$=0>(lolGhV-CLbDYp5;(FV+Z}h zHfkvgrMP&w3k;U)W#)~lj_Ofh1lzt^3dnxZYgWA^k3nT$AJ+N7uXX!1VC5vXy7i3C z-=~)?hHd1bRhmIa>f1(`J;aC{;cPcQ^?I8 ziRPur+#A&7|I;3Nk410MC?Vx~cQrl^CuUT9%gYX?rcr8wH)x;zP#|$#>*=vdRH&l7 z1&{JzZPABZ+3$1_6;s_yf0BCb56&*cKr6T%cs_O}$cgTVnae_KdmRmM^c{WeM17|) zoXl^h&$T%3f70X>m*1=8`Fy=60@)|iF9M) zb3LAd4R*A{iU}hb12MnYFdy;mL)PV!!F)aUn3c(-E=iLS9Y~zuIrfP^XRl%?*%tqUfOVKf+2dv__SChj8N+F zSZetI#&UEhLckr~BJI&dXr^la6@GwzNZURBIxNR-O&m~|NcOu$o!@*QRJC6h?MIDsIm#jiW0f-w zn>97~Yn(+yF|63Pvg4gA3n+&x)a^%XmC{V1%DJa;znI0?j%u&xy&vKT!l+&msB z*J8_-?(Fk;3n+#W8M6{L6)G zrIt`eDivmc5wn0on6MQ0fg(09w7n2c0zTz9?5y)!9*Xw5KnX&yy4ITTw*;5zYL6>6 zo0G86u!V?dSG-~-4T)QgZy?a>&bmvYjnkfAf*(BpK@H^89#bo&lV)5M)Tdhl9Q3uL zdgEG&B)=tAG(XEcxCcrlJJ4kJsZQ#QZ)JK(0_pvSp&=%tqo1V7{*e664e6*a#C?f?YLsMiS>F^r07I&|I{~m--fcvJ#};sl6~Sor9oC2 zuFQY!8D-h_H;F^>PfZ8Hq!R8~@bS1(#2$P)dV79IkEq?bJjzYWPz|TC+V|2c%9+%p zT9Uq#9+x397O1jGpPEKNX7IYGNK9Q=|LznSZVNM7=O$&Nzb1*vd~GkZeOxUBs;C) zzKuBg@)&cSx^;E&8O{9$Yw+J`S=W+?lxa(6h9}WNKD;hSVi+ zujXy5Y%cuvXu_tC1a=oJg`Og_Tx)zai!CL(tMUbDgny98;WN+0g-Wo|e*t5-4MY-e zDXzP3-h?;yC<1Kz4P@>Hpusca*5knf80mx^wCJvUo;A&*$Dh9JcFY834LxuIDy@{9 zA1_c=F?A6zcQ!r$ukq@T|ME&?GQQr_A8?S3y5~T5_%E!AC-+8!IJ}7i=>idGz$Fjg z&Qk49>d0muu0p9JMy>^)wXyW~PrReDUL4?}xi+!{*kE3lb zTc0_dg~;fBB;e*UP(&P)Jz=ye-H#0mWCm*e&Wcdd3-||_F5P}KkD@Imh;$5oqiZ#-scnh#r3%XBnMYrp&OQwOvA&Nu% zd8>;PO&n{3mZLzz?>&~5Oc1NJnaXD)M0LpTFGC&cw|*WYcd7ZquI!D>{@GQeu^!_o zWp&Kk<_LY@=^l^RBtXWeJ+93tJ6icKf7$b}uwwY$v>wmgkQ@l-EzZ_g8g}A6=;#9%;NBDT6aN2^cN%(hn=+}JIpJ-f&Asqhb_)+2itq{ zcny;xP>Lh~zCux7GtK8em>s4)t7^n~*tR66vFZdw^&e*FJUyGOfMtR0IsEeYGEI7B z*R4@)w5$&Gp=q>#O$ssCQs4pj?o?WfUE2;9ZvTEizZl6ocn#=}xqaFE@M-v<)aYc%+($aP4zWG+iP*SW^5HTSW|3j>|U9!d^PlWw*#?(1U$b^c>1(075elI38qMz z!-569lqh!!ql^@V- z)=RxZnVC~8UD3-{Ch-t_!(K=cFq0R!@+C9VBHhXgA?*Bj^NYBjI@7n`HRx%>La;-C zA&M^Oi%zI$0f5Uoz3A4@TYhmzvL-a&r_CFvcMStSWn6wrF?9AB1k=wk15?AATLT^}m?! zF}e8j?6vqwN9cuQy+j_4iJZ&z1PVd~3rX*BulLg*BBU$hVKFB++PNZ09Am#u;ip<_ zQ&3DJ>F2C0Nua@wdFWopIsR_`oG+mAli*7rARlIcyLiWe8scc9#9UQXx~6KcBlvN` z?mtDOBmUN&{XX<`^NfJLj?vD~OH;<&<5qS+^%SpWFBFoJu=8w3>7-4EUT6RKCIl@Hvj;Hz1(tlGMd;aUp8`=KbGQ;AyhycQd=0JeccyBdip3Zi9S>sm!?4$MI zu5)MVx~f8MSW_;77~(^UbZF5#d4qxM2mJO=$$9j~C~S1jr8=t1h>hnP#>XRdCf6h% z>tuoErH&G*5-om9R^@wm75U#JxsNE3t$zmmCz(WLb`pF9-^?R6{0VKsGoB*ype1BsyG;Eko9 zc1=+U&Np>rS}qbCR6zIWu+bUM86t~}F_!pB?NMKFe*po= zm`3|Q%Dh{0{O?CVJq$AXv~{IS#kKvULpg8)YTTy38KHNMH7wC|nRdTY1|`sc)LZ*b z)S3^vuckuVjEXL%&=0` zo+zB;_q^ys8Dq;BL^R@D_5HWLhm#QPOu5{)2K^X3=L*=lG*_XNVxR;jZ7m&k|7%~> z+9XiNS}WY&Z9ufYtvoZbwr72o*}S3I@i{-TJ9-a?Q$kC@TLhs zm~3gR3vRF1|DZ%J#Za^6#fZS~pq&OY6n%BD+CU&_uH;YleZo-;r~kvG%L~ec;F9^8 zR%T@B5)V8b1uK1J6zrx2i+=#%HTu`#VkKa~h!t5VY}aRATMH-rO)K)S7!`v>wb@jo z&0;tRsM6$Lx+Sv^?)9W@ggTIzz5-HGd)}A-EJ9{QlaKF{M2y8jYJy8k??$Z0d%_dr z1Nkr-tsc%Kb~yA`a4Kx@Q%^eNLO*0e=s~ZQeE8pwkAXk#+rE)ZPv374A{$^dF>^)^ zCNm8ydt~?r=rAi@#A4T!DKO-sdOVdgIkyu?PURWov;DXDkLmf?7z?feR$)aBtcTFk z63`7xGs;$7vvhx%L3>YyA*k7AODF3Ui*9nR9u*HkpqY$i^CpKAw|QR}BQs{I|X@t77tRtU^#&xo0}+xDMGo z;A!f84A-Ozze$yUD9%1sjSQ99jgv8yi=I7N4cT>$vb}fULUtY&l8y99$oS8C&`g1m zGzp~C_tWOhG^_v4R2s+mjQ*F$<4PnB#{HVmPyry}M#;es-C);lxiJHuK_xRAk-YR< zX8Dt}7aZRq?ST%yF^#bsNlXJ|5FsS2u9Hx>k%h?8_`#|2rCaIG2Ebq~{I< zHFVOr-)$PCtiC_MDx><}wq%uATd|P0=VLm2?RR(s zY0iiKJ^SxtnCL=4xG9- zFUJQ!nnr-nP)@YhK?P}TLuU0Ys5veTZ5f=HVjs@$pcLqaLN@{VhWJvA`hNomeOk2H z;zSa`#D3K)cis%|U6tW%&8Mj{ljt*ef5|aEDTG`&OP*W-v5ILd_tNx#-TnhANAFJ* zQ|OEp`SUx59~ z2X-YWJ&Yh1Z=xa>W1<#}=oE4=cwc6gOzZKcyRt?#O(*HlMMp#QczT1@^)wwtVm;NM zg|l6xb%iV>4N(upnnk_6d_)N%dCZ>oye2XJ0wcZP8ij;NYOffTwGA~gJ51l6VxxgX zI+U6&I(@Tu;`#Dfu>s@rHXWXq_MIQNTa8v<0oB5^$K6jxZj!>7M_AbKdRNQxVa+P` zI+ckSk)qSdZ9_E#g46G_iAUfAe?(>^LJB;zsxAHK=xf(0(h~8}(mv;?y#Ra6yZH8c z7_+S4#LbcOTNASSg|nB?jl{XYTGkZejn~DYcZOHg4l0z+&_2n?+(gTROnY32DfyNv zfS9x{?-^X|TR~Y}mN=na%MGCFs~dPbEl9(HP{^$D6&N{k2qecg;6&ORV9Ms^^1_)m z^fb9+>90yh`ul3a^sIAx280n3oXuolH%;UtQa;Xrn%^-hR7`wzC7Kid;u}mi_=S~@ z1qDc=HS7y~$X@gGlFGfHM0{|8c+ODY;*g^ae9RGnTiH{I_sAs!`b>>n&m0v0o};h2 ztvx~I+5^K?P2SI|nBg}=%QDNnidlL=dulh291frMA44Bd&0CI*?4NIwO?1 zyfI6P1#|NFPnq@060$JPZP?ctu?!#qBPR_>Q z6Ty-=dD66Pm5-D7TICZrKU=_JoCIvH$tc)T1= z6DW$F9vAIY6JJRC(mRUA_W3I$aNtoOp961Pp75a3yYpu3KjWYa0k_Y1MxqyI*vOep ze<*&yj&0Xw2JLW%!vI4-yuY3}W(Qv}r)A)i+fmS?nY&y-vX}cN;=+g5syliCj7jdb z80~=S;v6Hw9aY;R|9vUHlHrKUqmdh==xL2F*2uAO zprD=pPQdaq9HnFS0OJ(lHFNhkw$J|IKu$EunKGcLAMf_!!Rc`2G;a`~t+(g!6Ym#C zd1|M+**+s(EVb#~9SU1>0qp2JID&h`6+dod9$kq+hPD91V-o z_iwZBTv2b#7mhhHYRb~%DfU0#kbbkMgu`Z?0%`i&8?3lGlpvj9Z4oJ?*sYhQQ<-bj z8_$jdJzWJwgB_T0V#c@y5XbpdZegKH{-1g#So`~Hc~bXPEp+@JFSfl;UNEpv3SB?3 z@dYWCg4ALF)v}n`$g*{lsl9H4V&sF!B(zN& z{wm1E+b|Ey85f;4Uzs7wSek7=KY_HiW*+D1GLOp###D(15qg0`i6G90ch*v9~oAtNDE!AQ~Vy=$~ZO>_}|BDm_< z3F3JSOtB}Kg_T7@C8hv~uitlzS*WTVR7n%D1chSk>kR*fFP&-y-?ZtugI?}+a;q7( zXarb@*lQV^x0Db}NhsvbFfFCLaOMO*q^h7<#OzKb+FWF#0AuEhez~U|WUCxoBkzmC z(LJV1@y6Aa`7c>43tM&WWtzwkAFv~e*9>Z#&YN%oFff?L(&55Q6fXpE-y9CTHMeC@ z8+K{(ZyN;ofp5Z56Zh8k>CXssDn4{lL{9(){)9$)_kUjNqA7RyOhI4 zIwJ%KWzndUH*9dR=nntI35)FR@|T80jvfgzL{4W z4IU{AfM-Xh%(8yB;~l;+9}u|RjK`{l0Ut=)GEyT#FY8_V;AUW=8)1+MCG|Nl|nn(qV9$guC=)|PKO)V-G}!90}05Q067 zU%9wJY$ec$Pp~iM9#oj9ti*p7!0tVUonEs{Il^`!dAw_)2Ay>herq zLj$_XMCeH^_|1LWvZ;WLn6qhxl?jEm;}FhmYNknGQw*A7UXX zn#AEvL)lHGLp5?mRD7E-KzR(5{&TeIq}aF1FGg-=|2otKhm&G(1gq3x{=Lk+QVI2K z!lF+YOezMhWZ zQ1JG-6kkl@k0|H2WMN)wAq=3l`cYkX!IwWY>T~^Oy}uj{8OYH-$ikB~&PUE&fKo^Bxm%LmmX0B=+WsACehF4+0v8RL`)YIBzQgO& z7jt(rC_oRQ#}lkt2KD1#Uu?wuNMy($lMAp5({?Td!bx%Y)%AR)*)gu=b*?-h1(B0! z;q@HN*OQe29hV)gdU%J+W6vKe!m0tA%g4v+!jEO_@QyrwJ@%wUf~#Z)hVrpOvlJ1) zJ{4ObhOL76=>cC=*M?BHQgOaV@=H>V8W@>*r(d~(2_~SV#|sZu?9sELkC{ME>%fo*Bo`@NEp{z1iFK75Vcxe2saRhs6nk)7ZA;obm>s(hupY2yqlA2Boec zD~95U2x&cvts&mm?`+8SIJ9I=x@yYcK)+ZVUob*0st`90)TN+%MG8i@~r6H15v3uUYy z+^^8YcS|4}q1))nb{c_PTF{eIkKLGWyD(1}t;Nl5pZ`T>duAd>z7dyhJFJ6@G4?t9 zyE~h9wN=kD)aowq#fzq&+u?0fjv~XETk2y4oyEm;d%dkpA&$m*ZTP?&PWhZVF?j-Q z5FUR^JgpXj--suOOp2dqmdq|3T~j%cxB(8NIHCujSq)d6xl2X3UAE==5;Y&>Z}XA4 z`C_)#@3e2*UN<7VhyjhoQ2t!c=aQahJuQ*l!x}N6kUFJnTj8z03`1QB_h4+@l(vyN z(H|FFU@)!=D#nrG%e!Xl&OCQ$h_v@;(%*?V*7ISr%EDiknJABS`0AT{Skw=?PW0)y zdpM7kfu%Dg&I?@A@t0X|j%I6!%e%TDTbY?&K=(%7Wn+y9^}0Nn9=u6)EjG^;T`3QOtlG`#VktXq~dXfW$KG>$lT@y^2*Eehbs0l9C&Md zdkU{wRr?!Kwg@8|CGvk(bg>*_x?HVm2A)I;+TTHlk3)1vnv?@MtM?i>q8x7SLOfZu z_J^~NqY)8dJ{Si6qLf^?D|f9+9}#T%?{QUEi38ON?;kki)!0n zpLXyNh&`@(B`do?h_qEb7R?IpKzq1Qaz%kj!LiR?-`~OI*Y`;bx)|JkE_FIh-7ZI3 zT{Gm5D6o`dj5Csv`J%Xv0u<_F7%e3AI=nl_k1x{Zej`K6%=4MQ-0T=`aOF0#am^FJ z(fXUHz0TA=eK=dChiFkaA@jEK!2r_S&Iu!0I(t_Kw0SO3aNC`5S1|TX66iY|(;$a} z3CZz0dEXNzxSu5u;C$fr2RtaGl%hg!G6ksbz_44!?`{u(!-(sjSvSbAPMTAG{v`)K z*5j9ez1!qX+VF;GwFt>H@{?GNI!;X63gPxuA3pHbil`sAOM z0-eBNB@Rx>gl>}rs$16IXMx`dkht8R$w6iMN49tMgx5$UE(O zLv=MNm2Lko7u8xf+k0&m*VDiPXe^2|V8g1y8Ihxg>C9cTk~U6F^q4#$Eezi}lh|>~%U7Sy~mbFcwr6u_520 zOj7t>P&B0%*x3I>XuJmWx7GA%o+Ev!LUDB{6&TC~iwtjj?G^R@Yz?Cb{n#=3E!tRo z@hJES+J#m^Bw~94TsPBR&BN7-$k<-dh*}$+5ktbHRGc>IU}l`AzCJ}5vx~UQ5Lrw@ zRsAK;`>U}r8x2zj`Xf1Dqkh~KX=I2U+|sw(B(DK$fzeE^T`+SR;f(PDHX+;6dDW+r z4}&_z&q8|UbmWnpH|#JNlVV~1Md6IcXvH1w;|0*V^#3zu9mgOr-|;@aG>;0pmA~H} z<7H)K5DlNZ)IFUCY9yMZKYU;Y0Ac`sBmffq+Hj^Ssb@ z_PL8$4%EZ$63k?M>~2pgdM#Ei1GnQ>2;iWgr>F1uxOjG@9w%{%-bTx<_5D3IGEO*T zqeyTN$0FPe{{4KEi705)krqN|!_IQ`_jAni3D^4lnX`_fZPqzYyXR!?&^q$ZJHG^o ziP1d99t#(a^J#IzZ*mZ7>2GJz$MhG_#2V)LI2VKGsUA=pZ!uZB{&>1Y_dDl11`d*# zsY$=_4jO||YS)*!GN+-Sq_(^Do$={s;W)k2M4=eplA1+}fi!weqJ)VAB4O%spFJ8& zTPCQ!^*;AS7M=I^+b>>P3fkSV?+Bom3_7w6=uJ#}?H4(&=stwb&mR{#e9jlpUc0xC z+#Vabgs|`7>g_XYnYd#Fq`TBzUwqGR(I=1gPug1LeEb_UYNaS1PrF|afmZ1zX0IlV z*=p#hy86~2yd8h1vXwW92wPm668FsyFP?yUphC3ZJ8nYqtY-VhwSl{2?beb*Wd7a! zn$&Dun;i|*zW2(<9L6P7tW54!s6}v!It*Ow2NsNe+tjmh4H5$s{A@R!Kp-6${)8Z}C0*$k6 z`3OyAcAwN<^8@1ZwYN?>cw_b!kr8ci8NW_7p35;_|Gj)Sc353ApNBNrOMZ`Q##72! zwf9ntYL)Cu_q~>k-*?lC%lNxnk*mAm8@-{$<=Ln6@=6IPySjq8ouDze;DC%*Hgq@T#Jp1CPCt+72j^fn?sU56WXb$izjX}VcZ5GX0d4;?P17PKQK&ZKGar-1AoN)tu(!Iq+bc)hcT&a%Fwe_rj&3;?R|m#q zgCfL2DOTtP&%=$Vxy)y9>h&`$t80>Ge^jm>$CS!E4d_jfL%2Z zBD_$4P%Urf3dSYjj(GtJc-?URaNe=~iU;X|eM5wdIlv6MKJVI%j8rd3&)?R%&on+q; zgWM5F>DSr_*Sp28*Qr|s~?aT6we>=d;!J^wkH772qd^%orRjKiX3Rsi=?OpN=F7^k|1wBYV z{7ondvPPM7j0r;G+8h~6ik;F1i)ez?Ah>_m_@gZP*Um+<9%_HF99k9rq&@pv;&F8m z73AiJbKnmo!M`auvm_{EP;YHpm7eVIqy%qw^2hYN278LVV$ZY&p?l+t=h?e#c5G%) ze{8Q`Xgx8iH*SWWhW=|9=JDk@Pk3?bJdfO%pis;OFH~{__Ej?WhaD12<74gE)EPH0 z%eYKvdY+4xXDA>}?YD4vnHv^1!?Yq1HajtY_u7*!{+$6+$dOaQ@cGjTHilB|@9&h< zj32-G;^k?=Bm6CVck;ir{%G~Z0_xzo#wi;&Y*1ChUwjKjT>oLC7d)DLO6ESg16lym zd_OX&4b5^)MEepIT*a^O{|~h&@qt7tC5Bwyf*bV*X(3xM8l=)p89;}$%*$4r;HB(y z|6M!JLOdo1Tx58ChOil=C~ES;sk<9&=}al!kj#uX%+AY2t&FCLc=63>iEo3xnk>LV z#MaLU*R=27TPXDZf&g6y^&$f>b9G~ce{pMM#*_B)mzJ;;e*iy6yR1Ws+zz`nPi!9@ zj%M)_wb9uo8IkJaM-D3wiDi_zw2DG-e(jyb{ISyDiU}mvGQ*obVcC{~`Zi+Q`D40o z^~h0+sz#o2VZKL@yyt|?<2>QyTeg6%Go^kXB$i#}H{$ZTEJI#)YCrzu-V~G{?`6$3 z8V2xde^p4k#-Hls#)V!F^Vzm@v5;!-@aQ9uO;<9@42*G1=$D5}J3qEHKVOqxoTPMh z(dWusZX%)EtN=p9UP}sZ45ql*nr3ZS(aaoZ8*b*!zEx%p8^krE1K3 z0V-6k4wy$!gkmxOpjIN3mRkMG7gQD9D+0h&#O6I^h~s{?YL;RD>G{87&U<#fD6+A* zgXDG5X8s@s(!ZCt#z0nwW1j|t0VS?P(nbZ<4eZJXZnB^6|Sr0p8T% z+Vew|OCw~SBz_lkNqMXu=>E)!+JgTDnft1aGt@W8)>|SV2|qat(t3Xb>X%bR=2kZC zmK&jVuI;6}>nH+hr`guuqGbe-BQ%ukn~L2M~be< zy?b8NMH;}K-=cCvc!qH=3RL0~q;=?0mZ5|91-KZ|lT!-@-{0yp)4SKUx*vdl;k3xy zBk%qjUIHU;EN?zpb3YeD>$)e5Z6xw?8Uhw46QGtHXHa?$v_{ zwfX~B1iL4WE8~WzICGhf#fc?AiK88p0@>EE)-T6f;9L5K?;ds03S!sZ7suFzgL8SyCdRyl4j`{D8d`%P&H+A`54aG{O*F3_CH+Qna z*o~j_4SU*#M9mAIYzyH$e8ZvY?~Xe;ab{YV{eJ#6Clqvmt~Ym#Rky}|*MccMN)NYh zbHAr&hIc@PhQt6n^b3RyqjO(@X;+c4+B}g0V=LyOTq=H77ntsLIR0K#aPR6%jI!*L zGtF*FP3xos?u@z0ALWi=Xeiu|h5t0icjfRe9j3#-*N%3tq-(`^8-z&one%iXio6pK zUzk;`Eqube{yKey8hy;)ZMFm%q>=H90F(*{tY9*12eJUNgr&z4t*!r?If_|86W9?= z^*`p;-BZ5rxV2Msy0{IiD=A%5%F3QNlei&}aGmOOph;h@&-#`0Y_}R`Q5`K zp98$wMk69>O$$a}ZYi$Y0pzF`axn}3@Gys%6Q5S*;C5Hg?&Cr?>QbI7Uig%Um>qvd z7#|wsuHn(El}xYwTETmRU#R6TR;3Yv&vi5Oi|w-?VEpJN$@OgKLa1Ge5yv#ktQwu= z0HzSJfJG4ZpPj3(Ac?NC3dwAGT?$wpH;LuSAeKK z0fdD}P<1g)pHZNjGao`#t#@SkSGsIJ@QkiZ_`H!4x2!=p{%31sLD6M|c%4z(pud=u z)YrMuSPnwT^uOb!50_w5lSAqVI2H~Iw_EvAOHu^vX5~OG==|2p5ODlHxfRqMcTJ_R za`P`|GusNGOJ?*TZ?(})G(MqIMBLxi3L3Bv0^^UqqI^|*TJZG^(@v8e?R4o=^S;O& zH38`BREbx0Ef0rfw6=}kjpVHbu0Q8%*3Tz~`;<=e`441^sbtTby~-5Pg8CcLgo{mj zHknLSWazZs@5mNvEp$w^g@e8{7-yDA;RR48d-{3%?I&y6wV1O7yxG_l{iE}Bo0=q9 z&onxV@km>c2tyou5x3S+*M1;wci2Oos(sj%uy3p(O{hi5c%YBzEEcb2;1OaD=w@oJRW zb%9)ZJXwi_t%vC$97;Q{lB*V*@nok-To3AkEMd#~da!$5lO%ze2N9t`G60=+(fuMw z#5u3_QCd)~WY2P{t4`BxMv%LN)7PQD1ImF45ooa2xM&1trlUV4&_XW>#X3R*I5%?6 zm6+F`aIHs)WeeF)#C?7dyMXSXo;RojN=FJ>$Ng#l%tIy!hog8jiu+jN`ou+}3xPht zf=5>C-vj$sr-TVh2|UQ}psm7*&GZc>M*Nbf?|BwO5o3BGK<0LtCqR7%8?8AHwMu}o z$yBqX_KH=2`+o8;RyEfZH!O`22JqA@Y!8!<2Z`J=lR3F z@%r?{;jc$+chN$&_MIEXQ7v!T{Ls^*_hfn_&E5-5)fzGS$+AwiXR)@B^rz7E_Q}V7 z)ZBlf%d>6{snSWjO`p~M40L{NWqXoL?`1#TJ5MKJ)_Q(og_&Ox;PO&Mt^l#h@*YIt zSer&gpB5)fX7VxqxD1uoMndKq8r!zlcB{<0z)ehWYG_I5O1FK9GJ|=66>5WD;&-;w z>&s58J#`<{KN7@Wx=TZPIDw1l(f|Uj(01};l!Wf+q~D@$*zB*Zj{DNypNk&Rb5E&y&vkRdwsTNY zg5Kj;TF@Z9M1!<;`3x1Z%a@zkMo}%keWU;R`;{c)r%0}6M96?|90_Y=fU@*ab(V=!%k|ZIPXHm6DL2^8Q%X*f!7T0csaE&f|yoMb`+ayw~QTqj8G3@ z==hsLH9oEw3U?zB`R)83PbUofO`QtUOipQK5v8Fv*pTL`#cCp#W|#8j2*a+P`_| z9ba4=5nNikSg)mfG$iL+7-ZW4kr)svp~$0F{gjC>x$$@rRpS;eseV=X_<44H!a}HJ z*zVZD8ESDcsxr&39B5|EEnXEVx*Zt-^GkROPHRGrui4h0oIqDp%sXA#mQlYVTNfF| zf3Jt(uZaddR*|rJ%NY98k@j`y!q_7RyQB3%&VLt4GJLJ$-?{}^8lo7N?4UwRqjbl= zC?c*4OsK0S?n8Q#FV7mDP*pQmyY=(KrDD)~#%8b^@sddi*e$zATc-oOcapF5O)!B% ziJs+>OTEJ!QNhKSRssKG+Z+^3snvKzsXJ zMs3-j`gz40t@8AI!|4c?A;n06CNX}I`Tpqu! z3^^hR0IkGO{`TMLx7le}Ytav*g}Elzol0zILpp3VnLqauhjA=YFwuJAjKF4A=-tiT zb2K3XjUbfoFS*d9ENk*VYD=&MfA#RU?r``^ppsHyJF{MlPkU*NvmIaabfqkRCT2On z`0CS#6z>mM?;=HOthbvK$&C03_G%HBk&T#W`Ks+6!+<6W;V`0QnfTKGY+sCHTjU<~ zV&^tB$fWK_S}cb07u#S}e?=zRz@LhW#&BYtxf1b;Vu6|4>b)9U_ZgUuTOK^% zU9F{+Zbln!=zm;4{(CfVd)wmg@6f{)lu3_!=qEudPJ4EX-#&0H>a%fuiTWkZ3G;rG!;syEOPoG4e;cuY0 zo~$!ReD*<=Ink9UatHr)|E=#+dQM!NdF)92e{hRWlY_>0N}9UUCu%Pte4N&0b4&H` za5_{%96nB1nO7yIz5iC*{2MIS&<`q~|6?7F=zw=q&L}sj;T*ERlwWtxmKG=6jffaB z%_IOQUhmF#$yk`{vGJ=FNRS@xWBl?mb)tc~8jfEa#i>y~TY4Y%yzx=Ce&g*c`geU5 z+IIM!C>$JVd*PpN^ci)x)*_Ramxacw;(*-X$ztxqNSne_rGinUrtTJ{a&~jirxCqc zQq7B_S&R3(`7t>yQ5_emUN>Hv3|+W$3zJA>GRaa4`-^8LrO?eyZIfCH=7IFg_UXuC zI!{T5E}Oz?X%E za*D%v5fwnkJ_)X%H9Y9!WTW0y|Ly~HhfEd|Ilywq8e~}bx#FOWey=`TKoSGjDY?kk z8e=Md;%x;^iIboU-Df}2_cUj~y|x&ig>-SY!yf@M3IBT-fbe<-;8{@)KcWbBB+rx$bnHT0vm@(jqg{>@e}KnwvIqNzbEc z7N+gw)ugSD6lA4~QT3`6l89o`Xh;(U*2tW^{3rvdh!cDNX%QOXgd-g)J{Vbrn!2r8 z0aueBGlzugNL4hzP;2w&Q62M$)vY$-`9r2~G)YJpSl+C-e-vyq#32Z{U1(e0*!fE%+G+dZYD!<_9{A;F}S#ItfLmYOyid|)@Jn|1= zrO7@_Nt9ny+&392{})yH%$u2|zOEf8IOO?{og zT$!zBe$m6uQS;v=4i^DIc_VEE4=*dfYcrUNm&Hy7IBhrpE>LD(NN|5!g~bu=(!Psl zPP|lelI7skka0fdlrh>V3qC!ST#37WMolH zvO1_wTlN}IJed1Gc)snLz`}Bu{%h`0HnooGK^j0g;BKCibE}6SGCYomj5by3$K+z} z*q>AU{w(s(d8M(|-W3i74nW9T#=H<5Jzkf?)vWkzv4Z!xD`2NXR!JBNu*3M&pja)umcX0E)ddLktqHpk!LqX|~6W&v~|ez=HE zea|l{U?*_hF7W=cBiz2`t=%IgUdAVm{g>txdgdc$i&S5|j_OQ)kEx};mzc@Pk(X$_ zSNKGWzX;(H?6(?>dl#ILl9|aHwmHgHczX06Wj?itSyDcl?g4vRh*);dx_|l*0FnWs zl||$C?=-yR(gxkI17-<>CLurm(>VRSd$a=k7Ak@G5%6XGRhm1{LI^yW`y8zBq`Z2T zvlHue{F2D5sN|DyZrj%fwOcOJ!dam*z4U`LMY9dAUvnomP=z=;>?DZ?hikMYzP$^c zI&U3x;|!34Y=&)4z{&a})kyzE;`^Z$B8BWtNi_O(Fzadk=1qvc8OLs_jOQ&>8_eMY(RJyxcx*McLx*O?`?swz;`#;C~fe*lDX02<^ z>x`K}CPX5Dw$?o2nSe4PvIyi^y5&xDUmvjjb4@c@Yo)1q@mF3x zE?>=0+KW#*6}~yC0j~A$3d4zyc@|8ou(5K`7m?0MFqyn>MOwMHke_XpvtB;GsK1z4 zUH9QrU7T?d`{dP|qaNkFrKcOTZZxaR`TYvbkPxbT@mHw$VU!%;2Ro#!ZbD~%L%Pa5 zs;d)(Vv)Ycn2sqI?iBIZnXL$d%uO|ahk6nV`Un#3C&fW$vH2P@^nei?d)`Q`c-v8I zufI(vf@YVN{3bO-^?ly z=siRzdAx&~(#$nB}MdaDt6Isw+y2=A$ zll*{VXqC(FDUZ*0f`H;Nx1&1t4ou3W)GNKCr|~%~$EHqrFo5d;K?P3MY0pa7(Z3$Q z`By|uiO!^2FPC^z?yOAf`d!!M#$X*h$3|A_-M_(p4-RSE_NmGY zv%Q0Z7Ok17v>k|U!n*dq4kt!rWWP!N?vb&sf3RQ^LFSx@xEuUH^fAgnBCx-* z)V2Ii)n^`tOEd7;r&=QvXvDPn0|QmC>j*Zqz`Vf6{B4PRzANFBO~+i8bEqv*O=+${91g5nbruB%&o3VLLh{|)1et6AR_stT_PfO z;+pm&HPoYWCGJ8C6P@VKE7sLi-ojI+cR{}Idfv`299zaI-3TpK3HX+x=V=6bdD}$&%H#26-Kb^stO=I+}^awblm>=;iK=z(~{zO9+|*~_&_Vb)2|-O z==mt2=TaS&Wx#07rf+p%7x5Fmt6oa))u2BCZbSrWj?(x8OxvBk%>lE-TMpO|MAK&+ zunGr*2sMvo2&B|-#R!T^Vq;hpu6487Q%_P1)3%^fki~PSlDm=6ibFtnN`ZhE>lsnh z1Qb3rV1(m&KIHLRzM-?MWbW+dhG^re5l-;2etrYe$nAsrX~!VSdo$YZlMMdIoh0P2 zkA>N6roRpyVa^N&k5QOti)vLE-a_mJJ7At)bUOP%<0Jb;z@mL6+ujx4@HR<1>DdWr z(k0{QSyTRoelkdoz1_5?ZQGdI_j77)q55a3fec3rGdY8kK5 zV!?Kc2snA+BPVfoA19F7Ko6x>2)Es6UI?+m!96@(c6(dAH}Y3*mXzedn%v5{X0Og2 zKWx{w&_42$XJi~>Y_KCBrCjIh62z0x9Q74ToVjmu&A@qOD*y~(GFZtXqActB=U>ii zP!aNN!duxN+DzYVV!8tnIJ zid^zc*~*6*Sz->GL>QUv=1pMn2&Xkeu=|{kJn(!Bw%L|L-@A zLd)%>#8hAZ7h2%m`y(wJMhQ{ksz|V(nsugzlrW|La=hH9v5-He# zicass)_1DTBlSP=Jw4sRlJt@zeWE0Gvu&%jmWt`xjMA+hJGgjR(29CcLtJze51|N@lSAv60~E_dovw)RuL=ME z#Ly@oc#kgwHOU6NpFiys@_D>fD>(suHmG+YSp<_E(q%8Pj}ytCe0~R~&Exdr@yJ9Eu7I{e+7$LTKw`y( zv2C-F4dr56`l0yFb4^lGf@br?;r?7Ul^l|nYF?>|e7AZ_9_3t=4_xKfE{tW{G_ zk~a_L<+X0c+7tz`G9Sy6I58x5TIM2Kkhmyl4i0YP;^}?B#TkpY38~078yJ0aB2GtV zOcl7)VkX>PS7HycrC40jI+0!-XgEp9%hQ+$61scfyF^W0As`_5J-Qsay=dnmpmUJz z#@x!9yyuGOF8*p4meS4YaRY4YGr!x{*zGm3c2+yFaWYF&4Ug3<;Ih?5^&@jkFMpbI zP77|u7!9v%?UI29Yo-a(Ez)C6g^#y?JMGDYtDv?!C+TlpwidefAi0N&ejg*ZRt?q} zs$al`x|LzaPyA!IN{j;`B1o>Q{lT1=RbVRangIc6E~AkI#lZZlfw;%s@dN0Y12?)8 z+#hZW5)$ShU`XWK>V?7L>k6z;M!5`U_YiRBqAp84;bj{L^76f~%S!+CNY-E1Mj(Pv zPd*)}Djj7?v)Yrtmilh`jKj_O;0(gqn59|IeXqWo=b}KMke4kfw8p9)xwD-d35ep? z^nvP1C%tQh`!3UCkQEPcDcDEXbAbNxkA2UG2=9y()^9q+ugWD+70yFQ2JIvjiX1bd zpk(>?%`S>gEUh3$Emhx)xOT(xf(O4eJ^wJx01AfE2hAv3LqBNdi`s^cIQOLbx|iNL zVv97MH)V8XT8Hu^JJNE>wsloYlazo^_Aroa}}wju^<+dwha8s&lo_L+OkdcZ@SigiZjX zPCuv=RF6t1N6g^QbhR<4M%Hk2X7+~^$HDO@8!r4WVsNGgCkumHQ$r%Jek}Hf7OBPn z6`ce|=q__WWmDzbO^~!OZ*F9OQP)VblGVehnBXkd$`oH8YO2!ntR#L)T>8<+^-!gl zeS^D#f^#)$ikPUnWkhHEaarZH=3xr~F<4);C`QAQJ~Sc72{N>9oh4Q7B9ra*D?q!UB^RtBpzJWKdhi1JP}<_Rf@zTb!rnKF zZK#}&;j&sNa6*l$kCYlDL3#e6doiJPdo&|Q^ziE;jNED1@9)p+UiL?fM&(y*6`du- zepS+TytDGz&DchJftm8D_VKO|KnuOJ>ZL-1rmZ-2S0#R335oE64OsH|TR%^N`u^(I z+IZ>%E&z&T=4rwwIu`bIyx1&K6Cqp)U1k1#Y#z@$ofvt%YmW)nQ)70;J89FU5eW)s zM*{YMO!xu8{#KE-%H?e_lMn@NIAd3=1ld;UiCBE!GpGF7N}*tEy@ zsli@8Q0XQLX_=f8SZ4kP$Jw*gMsxdR zuX~gp6QHOlUvxu^Q9E2If+X;>2St)b`4I!pAy($-n}?^W;;vpux{f}+c_rbBx&;=^ zK-+_gS&<&e%GpEUXBweI5Eu8=M5%vcN@vtR)q6XOcC1s5jiO_aG zVxYo*9hK-sl!3co7ZWL_i#0DO6n4@uBC7Q-?{(urxPhMLPW*TyjG9$`WjsTpY-zR= z(*p4m7#d$Txh;av!?oG+EM@NbHF(3nBa zOQKmL86)IDM(yAx!vQH`rD|xv;dvqNe6e_XqfH*(-p{t$)`{J4$YnFe@@C^A>+O8_ z^I@-}!SM_J2`^!15O~_bGX{=Ncg3+kk+~Pqv*>*_G?@M)w+JK7n}5-n2h3R|I6%?x zLce5sr^Db1LtE*P%o_-%Sb1t09y!F4FI$lZ-P3{*Uaek`URKHA4>!_VE*$}-OGj!n z91+Kx#^>lUmen040i;fgOU$e+bKg}}_QPbe(y+ZurHIO%KYvE_(f!- zAf+J*MjSQHs6=q^2%NNIIU2u;Z}`BiID>6Ypo^=uYPdBhvg(mrs&#J^5hz^*o!QAu ziZ}@aaQS>mky4`iOA@_SUsvrE{EaUIhguAMx;FPM$AuFHb&wY?_#=R5J?27Fbe>u2h@a1aZ<&90Uag|@K@=1?FsuLH&3>FWN5NL7 zBcB`45NI;yxa1jr-$?GcT39HdFLnzq18pq-wGnh;Y3XTTF;idCwjZQaS#lvgSR8Sy zf4rf)*n#Sr`Ou;yE-@`W49Z_E;*LwG=#(ugkdZPt9G2ZtdGSOCYsQu|6VH{uE43gK8ubPO{?8m=< zhH7RzFsGi$lm&#~00&mHjqfzP=BNS`{Pp9rvs@f3@4EdWH1ba&VB~^Cw+Xa*))5;& zye@Ho_4vxWmgD!7hkSo90{T0>LJX7K+WM2f|K&Jfmhkf4i-4Z~$E|~-{vO-mpjPWF zG$#R&`7|A#kS3^x4@sSV`J@34<_Z_E@LL--y;Luhnuq~qa&iN=PtVF>d)ja;qYb)A z?!Kc?7UrODmwFhKqcS5I7$%e&)p|PmzLZ$)QM*L!MYoU7SA6ad8YNm2MCw~DwOsQR zjF+<2Sh>h{3C|6)_B82KIEVp!l9IG#sSRJkXh161>ti_FOMTf+wB1UtG7wfd!pcNk zZoMb~C>(RglJ9Cl##>O!$uwKrCoW(ULI4yUP+7~ZzULxL!luGuADwZ93asaUCX7ie z4X$d-&AkY-!buKobTA{A0<9W2)DC^ZeeGd!VwNnW8_PO9=qi$gu;Bu6A?c!DR45YA zU1?dZs&jN(N5Olg%sc)70P?OqNX&#c@%u4#Oir#5xeNn6NtQ4A-yk&az3xr{!4;EQ zcnO!<*Mps$OjWJ@6re{FP&rWf)~MQIhU82a;a+Uc8$vS2W;0wke4GnHMr)3hLp%6HOq@Qs+>NtI5yXiwq-zd&7^tOh?a zGcZDn^W{Po6Hl=bJxo|eY{p=n97kSS@SFHYY}!xNC^bu|UV+<(IozOa2+q})k$Cwj zC-1{GZT88&LGL0WGB(b}t!k675ogS!24Wru;nI{NDElb>-V+G8*Kb2@ZWT~&P&78( zrVFMWXXRX*>oO@rd8UFFAUvI>#!PXKrh_CFCd@Ku(Ha5f0X)O*?nmqD8uJ-e^6G}K zm=JiNyYd`)OwDRuPfG`nU2>gf-%g@eW#B$MU35F={+)M2MDQq^?`m;irTaJcg z*t3}KN$}oCcV+S3M<_$_i)cbDEaqWIZhBX{+{7oo)Oe~0Y!Cv)%Xy~i;V_VKKlEod zkosXCdlbq`dJjT+#w>H}`dum>`5Xvz?S^R{zlsO)3y;bcxmM4%Mc?h<|YE~!SuDPX_Q8OQU?#1 z7O*>Q$^b~hr2dQg!F$vYG9qJ}OaV%l?61G)j%K8!>b`~e3F?{EArxX{;1J2r$){jJ zDU3>WuDWw4hJY%cvYl$lw8TU)4c6GPb!jxO_xd#Hn~Oe99}grS1D>pb|S&yKpiB`GVjD^MV>sS<22aZZ$ffkl-n{ zw{JI-YYSSQi*2=GHh?69wZ488Q1B8I_2RXHE zm6D&6=k{J+ALEvukf4+5?Q|i2(2yzt)71EDq5#Ex4Ngc1ZQ!2yQRg=oSqb&2MlVP> zDd5jAKK?x$;LjSFh&*Y3K)5(iiPo^dE6(uy>GjlwaOGs?_%jKh&n(3A-`Z>?F4?`u z&Nv|fzLB^qT4+9HY0zm~)2AUc;R4s(@BEn(AX&kq#G@fZ;CDZFzr18i*4*g}GgzJ9 z!E*f@B&su&e8qTk5{%&&ucVpReT3u|E!lOfmV-fQ5DPuVakulb7g#7O8@$)BKIt&4 z6XIr3B6Gu`rSVA6b%4=|lw+9FxaV?Efxw&Fm&~E(^>6&fLXe%U0!IdQc zdy6Lk)45@$1q_aY&{?M#9>U$7cbwsi7-WGwvg}v*-N~o(n-+y?r#EyZC2T<*)mQpf zWg6#yBvI#eSw_eZCRf^ybx*Lr)-96+LdYCZIL|q6!pheXv9@yS`4z%Q7q5qr9rEEC zkQt#v4duxDy1rW1`;-3tY}*$W@a94R__cp^=wjax3CT^YXjgZ0RYs1*vEc*d)cdbnWZv58TGNVQHYtLpW=vlshy3zaJ1x8>w|mEHRa z=u}nlPhXhFjB$qsN&==J?XBbMLC|Bz)>b}I;lH>UGhS{$(n9rzp=f)G z81E)qk5h>tZYRAH&SPwV(e-{;zFI%co(GUN^?O2uq*Q@*l0t|lGz4Oytk!gIS_NC% z*``drRE5pOUnLYp1tOU>#?K$Z3@{a3g~8BYTBG8zJu1hzaYVZ$2|3WwlWw%Nn;olF z*#*(GswCy-ZW2_ zq@=_-0|U#%eCwfL$-7{AxxXnbP1lX+vpG|GJ;;-U(b4z0Dry?s z@)>seKe5_wZGH!(FE}J2?LJV?BmQ$7XO*-F!d(2N%&8 z8$S==hlZ&x>=OKW%v|vaPgIPQ)6g*S<2_AK@x(Jbhw7W;_Zjl&Hj8!E9SPLbN!Fz; zXkAu-4^A$lg>D7oHWtl44iascxi@rB{g#2fW1nz0tTRZgJ~NYLZ{3W%Jh05T5tCxw z+UtTGcO9D;DbeH?<a#pVH{YS zxd^>jZeeUN={iy5nNS1R7$*dJTP2KcO`np_=m}Ov)ZVs6)#4b8 zE;Cfs=KY`=pOj7a0U|5)L$vY;uM#my!^mtu^(oE%Z_fCl6a|Y6!rnQ%2lY&J?{MkL z3$%&*VFM~Rnc=9j>4$b2cBX!gIp(|{jA5Xx0# z4vggkb?x*u5tGJ2sK8rqs?Jh9E^qcG~+klzK=ZX6!Q5Teq*hrm2? zN@O+OwaJaK_t=|EZ2c}bhho}>Y>fX)aOIxSi=}*d`&$r$1Q}`oj*o`Mk?Q$X;!~hw zkBm^EZgjHEjM;W}1E1t5P^pf+)ABCVRtWqjD%INg(>uQ2TV|RN-XwmsE7`aIL4~3P zPqnyGLTL8W2i@Zi#8G7s8&p@rCy~tie8d@_omAyKaRBQ6J_X!)~rN%~xAXFdSZ`716TZv`5uHT2@w2Tk^Ln*8) zeBXO70X&j=Y%IvhJ#@*B8PZ?8ls=Z0_tVbdsL8-&JUu0`K`o{jV^c;JV$wIOts^$c z(Nd?=Y&JK)fQoquJEu0*>5_V%gb@~AZxChb?DlcYo({-AaI{9Y&F;l?a9E%D}FGHD?lo58#^f|42H+8uIa5Q8y7BOjl? za=e6>l^4(R{Di$Joi+Zjl)W)g#UoRVC7#_8etw57II-rwWetTD zpnKr-RMnKY<$b|bn!$vVcX3y`jetba5>y9S3^e)I%MYcc=c!6XIla-8vvpG--?fa+ z39Sv{{V8=cuo+-dV^hPq(!ZT-#^Knw^$}>v^wIyZV?exMRPI-2`Gq5f{i!`l?`>&)N-gw?@W*8 z2~(IP5uE4$5YPu=_i#nuL+wSBc3$r_K#~e{_Z>)L$;L@wxtpwV4Mcpa4lPIn}ur8{uYO4%lZ* zQ5HK~Vk7E9RzyDZt!u;b#iAh3Uk`aG30`WoKROee7b!<|{c38^Ao@jw$7cQqz2FV%EjiSya+U7XPO(!y3! zinV$O=w#Ta6u{QHJ#WP$a?+G~YmtQy3K0eTk0~a?`x7dW)#Iokq7cut%!d&V1IP84ls96>AA@sN&UNj$W5QHc$O64= z4kZ_RsQozRNlLqGke)Bn`m&81CrUPA{@AFhR}F`1hOt0GPSrNw`8pBmLy{atW-i@r z2gC(aoOzEAEI4tQq>3;MzRb%y#v>Rj0ah80(^=v9gJSk z-D@mhU)s$vq>hY=2_SyyokKtaE;U-GuSy6nir(;3)1Mu`dHvtBy22Wh)unwKEj7}H zz$9iC5m$tzW=UJr%%*uo-o@D0Kg-?o}SKB*PNFxLtCy$XY=)l$?>K~$r&~4 zW9_#Gf7TH-q)>iSxX69%YN!t(wPU^sL5U&BT2d`C^--MDgUV9~rZ15~iJ1|b!H?m` zAv!iJ4)yd51IwC4^SX({yvfC3gLaE1^k=#-&09g90O?v$MF;mKclpFYewv=So280? z&!>%EByF3fIAcYwK!A6Fspon#!+3 zaNKW7r_cRiO?0rrB4#(jsF0Z-HD0Qab+-juL94MYHGzhh}TXWzh++Q5E47RqB%?nt<8N~TSyJI z;st-CAygV20=`o+GQ76of5>~?2H4?VN|tH&_wWd9sI1oHm40__YqhU)_Gqbev^@1P zn1l&Ju8bIrGNbx@E%aawoftrNzxzCI;=L@)Lt~uwAcRJ6@`ZZuY_uYFzq)t z9qyZXTuOqjZh~r|@STaJx;W@8i;<}Ar(*B#9tl*N+%2%b^zDla7yi)dw`C+c@8(gm zB3bL8^8KpP$u@nDQWN>$LAmtpih#%$xjI-^I4HHS0Q7f*k&4ww1uBE)_`cux*8gM zO9#s{5!1bNgF29$W6%oE$E)=3oA!$>HgB0Ze+#t;5bLwM%1>i>0AkQ?PbV%Ws|i&IrP=J$bHL9T^Ync{w3mwyw6Vc!4cSK0kqe z@3iY&sN{&tbl+?IQ3;Xc2oO~Tr>HAv`%YkdRCsrF*D0h|5Fx^9W7k-Rw4$nY>>r-8 zLn!+XYZPd6?xBAyXwuPuYdM_APAAl`8^5sy)!UuXz3gKzk*4$Q&4G=-c!*W*Bl}HNsvyDv!AvE!ye+2ZCvM@t@Si+D0x=2aC!8Ig`9is2f zxl&FVge|Z4AdhxHJ+yz?+LhO-CyQ-c0n>QsIgTDS2nfc$N1(XJ-%q3uMnJwMYz*Og z763ebqXGhc!&JPW*Y*+&rZ|o6t+RNEVEde}=eCO(+Qy)2rg<^Ey54S)bqLVf@T&zJDvQ{$r_Ib6F zAXQ~vhjP&DcGsYS9J}eNE63_nX|t4vkP&=4n&)lmZT3yTLv20CjI!(o&b^3OWEi|= zKumN==zcm?-S8`@J(^GoeR6C2Tnhi{AvR%BwCU!zw*J%2ihWkiwggFGKHT%OV}s|} zzyU-r@GY=R1|+apc&Xn&6sl^QE4Oh)49{7V?9Oh)%6k`mcM^G}Jm;+L?$nbi>{59NqYnt^wztd&pN z?F$^)hhi|xKC*T{Pomq+XJ~jxJu6tU3kn;=dQXii-}j$%ov|~Ch02=UGy)mvq?nxJ zb{pUQZZ99YAdO#DVI$$Jx&D@fhHB$hV@u3vYjBOv`N5@r-uNcBX*l3lxaR>O*Q2HN z`=2+9&ZfyR2Er4Wc1Q?vqg^Ykv#>C&v&S!e%@OQNYaZLh(o%v zCywj{8L1&nuI=`0X`OA>KWbIj(y6Jqj0P$ofI>w;W)t-*vsE9zCzUyu8WL|Au(WXckrg1vkh*@v?G|Q?`Y^&m|#E+C*?Mk|iWoVFs^$ zdgR+SFKlVtbT}WecrMhRnvlIifJQUx`%$qr=fK-OEZht2mXPvBBy^yQ%5iQhi|1|E zv`KD?cI{`z@rWKAOUaxrAFDFNJd~gO6mPOBPCI$bmkkFx zMI3sl=K4fcMHL=xWZkx$x4uYz7$MOAPYiYAlU(w%RyyIqG>u<&4&is-GTd~kg&JIq z;oVKJI`+w0V~R~38u?pbciaw#w{K3 z4IsCu81cjJgHxCnbrt5C{b7gM>eLpe$B`O4eZfjnN#@zv<~_8#kr44P$YN$e73;CJ zV|BBeV}!6}$HS53+|(r;58~R;If{~>UNujeaOqh|ldQ#wcL^o+!MRG?J6>W4LG0iGc!GeTxF~j+Y&zN-$x4iI zegl=z(sLn-E{Y6?zwZ!xeeRDG{Fwe?1j6muQ8KvZ%j>>FB804FUK$1VY6)zXjRNYn z8F}36R`RovhfA0L4C(xDaTZzL?ppw7cLZ7GZt`M_J(&@mOh}{wJa3gkA)nT~5@a|T0l;6ip6ay`}fAb__nmMRv)%7^gp!_n1$|t!>BOc#E16x5o5w#6Tt6G!;7qq;c&Z8 zk7-#yiT5zXkJN#{lYJ=I(ow!Idf~x#^FtsyKa(Q{iLB?WSnS^6SB{(yZxrqU%POPJCTY>3{3-c`yNGo{RgJx z(-tid%{K>AbZ9M^C?uM`Ii!e(kv6vfur3`C!~r@MHi^EP9iwd@LZPod=h7D!{3(~E zd7TF$<8b;;f9zdN96~F z2cHu|tQ@;%oSW{+Il_poIb!UYzt+PGW>j@@)lXy^7rJWt}L{){I#4I8W zO7xPEc2JDdxwVD9Zz|umEw7&`OyyYAz{{tbi35>O)o31Lk@l^8PHs}zmE&RJ<`qcN z6-b=}$!x>mEAstwl68nYbbv)fZ)5 zS%zywe{bxnea_D|+bQOT52KWP;cW)0@}4>L8p;u#EFzB z*wxEo_Ca6Pgz=UWZj4!k$B3O!Li-Ot;|e>XmeFcQ@zOI>(#;<)=b7#=26FWtZ?ft; z)w_n6Z^HK16VqBU{=1*u2iDXbrlE7?`?ZW}ZyQo>iznxzQi`nmh5WaS=Vj74h}n3? zjlut7O^B}Iq%HAUP6U@}(MP+A+A=+Sk(vQ`Ow>xOl1GmSYT^ve8Dzv&`mkp&i0 z6MFUe2O@e~3r~6MRBaabdJXjzMUbkr1M60aGT~=kZasGBn+~;5`<)EdLzT6B6UESQ zs|fddr(=?<5mPwkmQ_WMsddaaO!SQOnURf}L%cTvzap1dkSx@PAl~yF-dnooC-- z(G=lBe?GV-6q&{GhG?rI-QnBv7D3`^{`tLGG{SQJjvITN4rtP{jN_P@z|5ESstkx3 zjJ_+9-nb0M#1|R<#a*aq*3~c-%C#iu`G9P>G+GRgvuTeLE>%jQOP$59H3MSpO{_hQ3+>pysBL*%BFPW zY>7ApFSUd+4QIhFMUtEGm%Ax?dbGyQ-uNPpH>L}^c*0BT{P|k*&)Zw4%gKEGVG!R= zzHFdBqFhL$)}Fo+g0TU9-Al{V*mY|E*@X%?J=8v+wfLv(=^%k7Nm|zK9+$bTBXfbo z4LFFj6?@ps8y=&l{@PJVp+fK`$(bw=JES$~tViDZro?N?iE)Ya*`og_0s)$$N7Ts+ zU-;?_naG5E&rLK=Z(yK--@^vRA$exGJuxhR$36KjOAY6LDE>ZCPNsM44&ZBa$oFTk z9>wKCk{xkg=2;Jkb6EIWV7}Qhnckf6n=%7c@SgpnyrOUVNhV^ogWRa13h@PqF*U8N2Wq2$w~(QcL=uQ>hkHZGs0aiNr6IQ zt;ia*6QzjfSG;pOD z1((G1g$eQrf2OOvp_O-G$vQn582@f3yigfK0r97aED*wf?yX|yUexoU`t`Oo&!qT& z6`6EQIJO}~BgX{XkG}u=*v=o4hY4GLwZi{553V(5<<(k(PriI1Ms7e&eb(EE8)Z`J z|NSx!vDO*3!Nm~fJ_||qzvYmvFSr@L`P~qU8*2mezvoppW8U`s!9HC zgM!dl^s-7phB0Yg!1eH=#hFn4`nK(2|6Dde4S<(M&hDs}ufnOMx@Ih;-S%td#-kEH zhl8ptPM=x+z(gfP{O<`YRRC#d@E~pdp8UALWK=%P3W$uuKOQtiD-t0aOaAqUTC`>Fn@E@3d=+uP%9SI%g2RI09~=r!9q0owHjAm= z1riwUe_!|m(LsW$ZEjOX^)sfTywJUGUcMiBd<9dpY#(KyH}h~L2-9dS7Bs6&Ara=F z-M~oL?2!(7oR=G_M0RWoAx)6bF2UbdeI3Fx-COYLp@$~ZSfiMMaouz<3ivp@xRZwa zp~^eZUpC^VsM!3(VcwXgo@UH)+1!?lhH~i%6+0gI7S8X<^b~go8V$wp*W0`6w4*`1 zFQ%dcD#VB18Gk7eLM0rfA?d3F*PC~u;nUlvQti6hFQ929txr8Jz^`=b;02A; zaC%Dic$X@^xg#NuJOZF?_*Iq9o!-;Uzsv@+%HTq*N4BuBz=;E zvLN&MDcFC&z)%E}pk^QCB7msL8CZ)kFEu}^2Mm0W6kE_xSymD4 z`bqf)gMv*|2I>vQ^v~~K*-$>F4UJy9-W<2i-aWCWm=yZ_SC+#qUNd0QSGl%aHKu3+O;e68Q*k7IH z*OQ;@ms{rv$luU%p`{N$|CsKsA{**HM=-PH8ujc9uN6t}V!0x8xYI-P@9YvzdaA@K zWI0Q#;_N!PIWiQzeMBXKk6-#m(v10a@k!C&+au`)mOmL=m>my;Ec%|}NS)qqBh4A% zDhq{bmxI*0?gr7B98YE8P7EGT)&@~F%Nj2=(GU53%8wkRP5M)T8>UZZlhAgL$031B z{60Z(*i~8#P=?lirxkGx^iHeNa8gKD&^=r??puMDB*lvSzoucIZ?N{?u=|esbK78j z`X$QM|7czkB#<>~W-kqs@u9O!YB`+T7Gw-B`wENI#343?og7kUT562id0vwJIuGfl zfN=FskF{j7lom+oLNeaL3AW;b*UBDgCl@_(WO$EuX5rO`J7iT!EZ)=9@Jf0H&1%k< z$F(-+&9OA9S>88`Tig z#KGt$H7VXkp>KoMp@Z;tk?De3gKd2E&=Tsb0iJ^Jc3Tm4%Wrs3y718!rh59MP-XQf z9T2V3BIe)_t8|b4+zc;)X5FNqedoWk2R73dIdvn-3O+fa2k2>aZN7M$EB4pC#0R?j zH0D)Xm}z_$&Pnp%mYT1yUXNc&UTqUGXvjXR?R0msET_!%Cp47&y-8?-|D~U#mo4ub zkIxHRp_J*E6s`wV##IL9iQeXVc^+KlZ=8oskdPh`nT}Rh-T8o4QouSV)(5{rhhO#y zOP|%c+C(-MfGr-*$ z(eByILoQCw@wevTsvE;3(>B;7dE-l?`8yPKX9*GKx#V4wBHgB|N#Zu+kRDImBEF%x~+u|YXa_){4CC^HL}Z2CVo19nNz*J45P+szQ-ke zASv;H6{wF(5Vxv5@jP@gNSb5MEXMm4^m5E{cz~H$$HiRc!?mu%|m2qnN?V z@>HLGgeH3iN7EX*hidSo_?D#l-QJ0>)jFb9H8=>*kyI^e<}^j#T-y$hzWF@E*UQFq zg8zlUPr<%UtFw8P83;R`{kd70db^Ux=ll4}GaUbLhbLgd-sS6ebR`nH`8KfAN&T^Q zvqd(u@1n_h1+GVXxjxpWWx&{GA*>ED-^##poO*K0NLR_px==ZDL@P7%Nrj=2O(UyU zKN089{k1r;R>OhJ4eSCYQ8PFGmj%q~gnl-KeoS!*j7=OMdAe^czBK&WAp|~?M8CSN zS-3Bc)uA<+{yN&n(oBQ$2r#~P(7WDFZq&K zWKhL?A^z7a7L!Pj*PrKe-F@6&k-~{R4+H3{okX^rzFU{BM%DNhuD>T@zrW(`Uj_Q1 zlqdg^Qhh*^!5xe8^?%WJ_$3p!RBHpD%?fY8+RC-h>n~NVEVxhTG^>i3{>jNV(%kQQ z{;iS-WlSq8S&(CTi}K5dqQ@W+P&K0zsi);)t8*&H8Y?vd`Zw#D(gySFc7VKi?iId1ja zNOu+&OK+TLM|YJX@f~z9ecuNERtjf5fQ_N0W>E5~UGG%ucSK0S@z55XR4uc$70J^) z0g=KU0)kVu#n>+D-}g`QtR7~RVkAuGnSVFtCBJ`DXm zHZ_I3YR#|`vY__cs60#}t71vTpeco}f=$<&{m&LUxC>0~D)Tx|chm)l3B(d!E5Agy zG(S9@u>I`_eJovVy;blByHU`QeXIRX)Q&Kio(9oK5my^eHv)U1O)NSxV~GS@E+-Au ztHYVvtG)Q%+HO|FA7JN|fX-;{HvFgr@aK0mBlxqZHHEg3!Z2$xyTkv+bl-OigM0zm z+0%a9O^^pK-T(u4W1M<6izsLXrn{VCekXh`NNELmUATNWlqn0X~8?Aw4n z4;MjpRjY1xtL4)!`w+kP>0@O23sY-@zy?@mzCDy?fDfJw;|cl)`gemP_8Q8dY^p6) z^SV0_pK)v-*l5MV);^pWRP5a15kLW$UT!EmNDLAT*0XM8OW6=WPX(zgiG)Cq(L zkPFv*1{Z?O*-@`NLKU|)2?5DsUnfj{3T}MdI7a}IKHBR|e)NMPue()Z`@-H^-x2ZX zgETMmqZYj6v7PALDwYev>&ALr-&Nq_(fi(4mjSyZ1#!DJymV9A(gPe^@DWQb%q}8p z$zCs-qG7U}#$$=}g3J+$a!+|2!+9;S*a|Dha&yXPJzKY5Vo*QEUC=R|REje(0UNB< zU1v+Qe=-LcrBrn)BjJV=78ER%nj6zAWkLhCO73wu;?_R;7r!QSAuueQMd77V+6F8J z1(84!H2=1fnH)X-spsWe#ASV?dv@O_TJPLS^h5QpmBst6YH!%3B6cNYgld*lq|KG! zu?x&_gFwGi;de9`R_y|B=?P7#;{mY7|8hKou z4&gK9j~%O1#q?IExs9iKXQeYesXiyyO&)E8i|vNkbZzPp<3X6DCQt6(m4mN=LR=Rcj}!+>PV<(<)IHIU1jKm>2D(U47#J{cd`}&}aIp zBhz0f3l;seZ;E1=kR6hOmJZPDh^n>2k3ws1%zr4l*%Mnd%lo}fPfkDn-{(zt|F)2J z-<&hbluou$P=O@!OauTo#gfS$T zy&UD*?7JIS4DJL_U99V@&0tMKb-AhiB%PnH$z$&X^T2W<()&XN^ub(?owZH4D=iX@ z3$lwuugL<2M?}fdHoB^X@LcsNEb)RV#qG1&9jI=7Qe$HN3<@;$W`GT+;ACn zMK6MjXQNAPK3NY;hn)OYl)9D$#Gt4608;30w~TedU~7FKWLxRJf6UBgS2+Yq2HWk>FI_b$Tq6+45(%I*zc- zH^G|;PiBBIURFJ_{oN6>Jc4z+i`8wuI}92q9fFr9lqtLtTVc z!SMp19TiRlGtnoA!#j1CS)YQdaD4)Rv{gr$2I+QslZ%5!;duX=Y=-hMjmuU_;K6)p z-tkA1fpO(@mNaNI&WquuJr>@&uI^{#`5V%B7T~L1MmHRc!y+|UNr;;;xli;BdBwGN#W(iovkh$ATSZFRtz<^SiU`ptp*Owb~l>+_Yif`LqyG7B>99L{yQIwV3*|^jk-;ABW&%n;&UhZsoTvg|Fk(4CU(k!cj zet3faDt7>Pf-IkQB#X$+FO@91J;RAt`$MzL=G~J1QIt{6X=+`aTBCTz-rn8NQ);v5 zWpa~&Z~f*~NKyl|NXZK2VHNr&z3L)N@A37dT@y+UGRcM;M8`CAo3{*PoFpyy*{~S+ z9Nw4bnJJWzmMre4E<22db6YIkMx*`-ToC7zQgr185{#5T>$s)9jegl~^ zw%Vbuuo5mw;W`6U3sLo?AcMf!<4Q8Uis8bwCY66^9@<+r)d<&+gCCcga_LG!2Acu* z^5cIEXoTiM8J)3|$?BAFK16%y>CiH~20{NyPAHQ{WY?!+uXgA*)AQ44FG$qI+s3xJ_3>`DP1H+QUHft_6r3gW*=*v#wL5;bsorxUt06He=khze8$)a2A~2;AEY}5bj;@MhMd$Q zXqJ*Zfx88ceDiOLk=8OBqN;}S+@!$g8a0){%aRt!wCV5tHuM6-)l-Uqdy9j1Qmdmh zgMqb0Y#tP}Z!iFk87c1QW<*G8F4D+;-~~4cq~%;Pek@kGV;c6>K22OIqr>6`sD>z z@EKX;IX|G3XF*)v(@HWo?Y)tX?CQ9y?N$|9q`?lYX4D2mQEPUwM$wfgUcxnkfajYp zD1KV~yn*047HD%;Aq3|~JNzFP(;@w-7}$~VypV$ZXCxA@Sg4z@fSB<%TiwDqXEM-p7#Su_#UJIw#|5<5lEuT8^ky>h_u2M^%e%kn_b zzKvLo?eLpYu4}RaASsiv3c95xEx5FUyjk&Z!aim*ntz(?{Xgz-)NpV~e9!`e5HyJL(~9 zSZa_IS-^5A0l^jH<$vbh#dzqSNoyN$nFNX3P^$u(E7e-5+__!ArI(j@4h)I815{u^ z%&ld0#c4vG(t>?6qHv4D`)nDLQPdyQDK&0&u;GE%_R znEyEyAQrQu=MVMy-@=b>xFJULo%fzdYmaKt6(9UzH@#-4k6XsSc(WS8fc^|JfeUav zh(j;d_j07Ja;&B{Z=NRlz5Iqp84e31r{z-@R!uDjs>)`+1P}jJL7D97bHNq-u#QdS z8vJWos^mA4@ab9XF9t3=w1(qrC;`@BXfSx})bZ2PLZ2nKc1 z%_qRNzA5V;3Np8G#Hf4>a1v@1BZij(Q-jCVZe_**YZs!QwowX^n3Gd=nPO^Oxbm{= zP>YP_w`P<8B~4UTQ7I3nVX3!YV)qZJ3_{mt0OH(ddA);nf2=dY0hiAT(PJ~YE$D#f znDZ{dy^C`77V&talryUWO0C_az`0KMaucfOXZ8KM*hWGM>EsJfH2s<0@#=p!Z##&9B3o?s1ox zxXpZEJ~-wFwY?!hDjayV2Y++6A~8SISn~hD|EG-fyS*&`t*UIAy@u?|Ne&HjoQfVY zAe;R88Up6W+mTZrSRk6Ed!X804lwhhraVi3=MliKE(9RTkaNFGg_RI|uFIz|l);8NaH{)UDL^=Bm>SCd}v~nBVY5O<1#%R9WHmEy2SQZYwGPsBP>yj|6 zZ|v!xnj&`DvPW{FgR2I;`L=8x?&2&m+b;yiW8rVvo1Mj6LvwT8C=3g8&kM?N-B7aR zp}pJ7Frm2f4QcPjnX$%6{g96!m?Kp^+*vrf)O4oN4u0q~9Yz$M$cuEFB%-?lzV_I> zA(ClelF+DRT_y;6)Jz{VjWeVXrE-7g>`uM_=e*lI9hxt!&v zCGxd3T<~b!gS-qdOY!e1LN?M zGdw5UYD@>~a8jy(bl=^WbPm~!2|SvmRXwwy!LI@zi~E;YF+IQ=Kfm~xPbj(w6b== z&>j2WWjwqPyo6}tuNzJzVSmS;a*PEU+Sc_6tv0ZU+$E>#|cGx^=M*e(tA-z9b(x@HR36-X1TY-6{RkSzd!_G`!a( zEOZyZ@-MF>73VKPPuHy-B76kwHUiq-c4UJ@xpdj-62=EnMbW!I)7>Xr`fNvPvx=l! z*nMF`L}XGhrkZ>if|d~g;pB%RYG+ zSXiir{vV3|tvG!n-{N~$4w{zk>B}1*1B0mvMX7;#ec6+aML}B|=bS=)2|EyXt-_S{ zuDZYC2yu%yEn)AL*z4p3RnTz#>xe`h8m_s8#UMA5;hPxb*5T7nCw?k6`_lAgJKaIN z(#_LW*uYLGLysQ-tq+J&WICIlUdbjStM-0ZV$sSriY#q|*Yh1L*|rEReW^xu@$W%s ztaW8&y{?W+LkG+XzcbDqAP3kD=>*Q1`g6) z27&rio`!8r!R??6(@u$4oygZ>9E>GPsUl&S5^yCH>}oc&QP2U&j=wJU)e5tr9Vx+$IDna) zl#25TCh&35%9=RX*_o`LPcew3*MxH{QG6Tgr$MX*i?x+QmwItkwfV?qf%ZQH4WT|% z3MlMGT47UK#e(7yMfJ=OPNVJ5K%np$^2eD4wPM`AQps4oN#PD?05e9a1%}7!MeK}P ztqkeNMj=TBSbI7+n+dd9miv5U^;%|n!Kf*LaL~{}2eZU|L5ff}QID0L@XPB{_T2|? z)D!*u)7j_f11N?Ni!6hqYt*K~9{zjhO8~q={mnf_&sf9IPQBYIlWOh27bYpm?G%;7VyVz=Ha~s*r!C1C^#gTgP!5PE=-JSh%i2J68 zwH}bi{5sWn3(=*9R&aJegsRcZCu@0ur z_sPY__no0DCOZf*5Es)Y&5M9fRi!{5Lci@`N=*9%R0T0ZjZdGQW5hT@)XRBTU0o@D zaBmuj@aacg>>(wD1T=FV+;4{QjPft`R+mG&n|t`b)j<6Ovy}uccz-DJ3An9E5SpWB zKK`W0!KQL>(*2v>&Ivb=dMN~NFA|RuI1T6SrkOG+|FpPiLm_YNn|(Fimp-LtWKxxuRw~WKe;Q{%mr;f%BV_SHCu>Ro<-?P` zqO;rVVjJ$=r+y)A(t)j1!g##Nk999hJ95dj8}e2Zov1Qj%*}!!Q@{*@@F9(-X}Op1 zPu7HX_i*tChH75pnoGWqTiW7X2&+N^d|cU;;+})|XHc)NZnpVBpWr{5FTu7fKZynz z(yUVg?z&4C%drhr&#SYj!+jCIA_XEJK$z^6vhOQI6z}*VXZ@;G6rfl;9Wq!+M==eHOOt$ij&dcFq7rq5jtJF$Tz0 ztLXE?B{Bma-tIv+@{)rHce7|CdLh-w?$vnbW8Q_1w&o*CsYG)ic@85S<9XnIrh@wz z9r^3yCy{fJy>D2BvztQn!kB4B34UoH>f7{}Bm}y$qUmJ|si7(A+$Qa9ws+7=xLmK| zyhwv96r&Q?XDQ3#l~H!rJV>V@j!x&?i`w;U^A+9eo`H~GO|wyy(|O}*qmIR+8#92h zDY84Dk?!9Tx`t?xR8SJ6tf`}`v4x$OEghfD8(%GrVL&B}j&bvPEwB6PnAYU~rJvek z3YE3yZ*+PfVLc**e)*~q{~7%+k=;hf^@bUnCk%)v&4s-&B$F3Py8u%?aw~|)T67Hj z8s9hR`CYHuEm%m!V40Pr9hhi$@^2m*#l-N!chI@hZXZ)I{D`@1e6#jqaLwEfv2ttF)%UC{s^FPUsLlL&N*_^pZTU}|BQE@86 zP^@7#!A2p<^YA5VN@QrR=5BZaBq0gyZ2k9nV47BGE zVOvQm&J2esqo@QSf?RbHK}bu_CM6{$35&>0EFO!RCM70=^sp7;kEy$A+VmoYe!w&$ zTVCfGT6TnJuD5OA7ML-{M{u7sx4=P7!dWWOLeo|*G)X2ZsqSU!`|)y% ztfoeet$bJ9m^ZmkucZ&mwURi@*=mh2tg`xO=!lqSPN`kQ8NBv_1H}~;S#7;7`v9F~ zNB}FX94uX$b^5@cCz50lP|Ms6>&-pyorp+(=!n({PA&Wn6BrdDuqibm*zrlpFW#q# z?^9csu3MevPD2kaA2VB_2^e&8o&l6g62`Vw%w212*RDW<9dnTaUa5j+JwZ3eyoha= ztvDhI{dvxE1G{Kyqdvrml-azxZ|W9JiHngIh4fz=m( zEnlJKiuW2qW|av>4bS=UG4)7v3ZyScIQBIt)5>z-@y}e@`*;+8OXFXhM@Dr?yzGO` zO;Ct=O2OfwnqZcvs8oZ$5PR!G?|}U9c-Igdk_A2wCFN@`GBLk)%1Ds!HuQ>_G_MhZ zl9##NUIZSygl8Kio0(nR=p0n^g&dsz(cB!m@=T){;b#Y92;?d=qAdiXH%X-)5`SCx zt~ID_xsBCNEwtp5d7P3Ws}Q8eG>^ckycrd5pT9?$C+;hIFv?oDDA=FIk;GkBrfjPJ zvjtUY<+&Ck(aQm@rCtq{Y4NmaLP2&KXI>o9;C7J6n$OLX&3JsOO(k=rDmZKg zha`Gu=vv>0p|+s(kkxrTMv%wZ3^||}IyD!ORV}_x?3Tyb8#P%2qkgyL3)WXrs7H?Z zc4tOv^NwHc&?_eQiUMT3OJL9YH3_@TV7{hZ)Yc`o$E4T2r}!+ebWI;q!u zcUW+&0m`vOpDj{$Ufd8U!rJ>5PxXjhCB+QR(+EE-FU>p?;X3*gLlS}KG3As@*?MNx zR)t9+*QUlJLr9G|QqpHywcs(MDHwi8e9Zkp(m}z)#|M6TV^)|ly;?7p6!G4?^K0*aR{pj{zpoU5b z0GJx2-5aO8#oXY=wdihSZ7Cl5VeNwM&0-J392IYq+Gzsb77?-h zKdgY>9Z9E&&fw*mU^qmyyI&SXWS>R*_}5P9P*(g8wV2VyR7lbzfBkq=AIeKYVLH|K zp^CV21}-mis2j2+saKDFW5blwh(gX(`U3QS)ov!pYfca`-E(px4Op=DawUU8Q|^nq zF-Ckijj_WUk`s^-zVpCO9V;BiMeBWJun%efLB3dzZszvd^|FXZL;0pw>%(<9CfY8x z@ACrL(8z6JkSD&{seYtd>#FM7>;4yv{a|m*%Zb{qbXvkNk3#o_0EaZ+m^DGQzdb6| zEVVApt#6HDjJMT4x4iMiS|>j0OFihSTKVCH-PD=enU;F@5ov~1^?k-8@{;=`nQ(z4 zW}u48a~B$W<*i4}9AFgm0kJl-218VV-{>gGZ=Jbt)Y{$66)NFRCp^W{Tz57Ck8n>IRxdg-0rZW7!AV|rZ@rGkJ*Do$PR z7pm{mDAGgF_5}lPT-!vZ5JYGBuL1`5MxR7b8yzSL3Xs=Q>;Ezm5FaqX6x<+=<|8lT zkzQ!UC7Y_nrE+vuhy#y>amHRyYFV!1&aKRgzZu!djwiyHPPFIl%KARb{*`apT} zN6(U3s4aJztNZ%AsU)YZwv9pSB64l%T{QxEIUQ)ZoOruhi%c>qysns6al?D*lp(E24ALrnSF4K`2yZ`|S(K0}JlteYO>4 zl7`AQXD{7dK&45B_44JgnqZ4h=)y+M=>itPlw^HPG!j!{$@KQJ1? z@;ddPYZfTsZLkWNEC~PLY`ku_I$i`s%D4=hGq!#8AZ`%D8zq@TkLePS?A&@$~dEq$Fm+ zh}BUFr;Uadw0hOZ!|^51{ImaW&mgv5G5Tlyzei0}D#A#26^9)t85HE|?s~!_2bG!* z4O`|R2}8Lb6j z{(R|xuuy2Uy+-G+4^x-jWL!Z6t80fS1)5&8wd0%{ad?UxI)Y1O(DZ0he<}bF<8G^k z2DM~)ZS_+l!efB;_cym7wd%gS4a$VI(5%@KEP>554otwCQF!V(z~y0W`9y4&fP|`b z3k8Pz)FYYh3>A2hx4!^w0ONQziHYUxM_^%WBzz=Le?AKxRqcOAv7Frtd^nf4wst8!e61$v6? zMlIRvwdt%f*al|lXR)%{l*Nm*SI|y8h2TcWHdv^m;Fs8cu%ttGlNDa957lw)otYdN z8Bsq`bluLC)(@!}L_hCF`ps&d|0XFU0FyJ5Tch~R@rpi;C1M_bNyacg)-?iv4RB}7 zr^;fn+dGCMbukv3xO0lVD%o<$A`vqZ&q2Av@p!*_(RvS@DTk;o(-epK!?yOHYD`-u zmsrGcaRa6t>~TZ5&?lx>x9Bff@EFI1=wt8ohKpLq$G`Y`q`5x6T!omADa;QDW0*c= z&-L>=Z5uBXCbAXZiVR8VYk4!rs1&h2dm4HL_Cg#s3&;30s6du$ZfE!`dt!OHgvHvbFnw~8;&9pMvJkv2F=Mm?Fp?2zg{oXZ_OgO+|SI#0HgYcHI$Vc!c@DTB_s4E2Qi!EOu3d#(H&< zI$}d|DZM;lK`P+vvU1eSk${P&$7~fRW;xJ@#&RXcG?WncP4@)nzOMVW_s{pl}D&9tSJOfE44H(_RGe*K%Fa)pi@`+<5ZPVFBzl%P6GQ)6r zOrR^&@{TycCfqpnm;cy=*Di2CNI0~&XkE)%b?lX?G@HMR-PUm{oGsNY_$ zXZRXtkZkuO$h!;q)pe*gBZaG+-Vv=-OL*3^m|r_-O+hRqRj<3X)H7^>lS{M6w$Hvi zRdIU+o4jcDF_m9eY($Iz!_LmcTT`w3t8449o_`sD6tr*Q8*@ghiCmjjb0I1S+pD?R z=w|aNYm>_|zCm^4OJE9PTVARl(dgbeS6IvMF&jlc;pe8c^LcMqRL zFrc1ZU@13yz(pI82%{ngh>*9ir>~) zUb1$suZuxLOZsU?RUHkg8~h!T zCl5k^ND#xp5@|8&Jv2}3B1yp$$mx)W8st?B82!=pY+1=tb@_7<@! zQm zE(1??f@CPL8@Qd^Q>>dT$hF?O0iU7tiD(qkchzMx_@-u)L?8<7K3%q+Rs&q@CN`2e zCfhnEj3hU_em__GYBhv_qKH@rWFLN{#J;||`oV@t5pgs+bXl^}XL-~z1>e`TN~TjF zW_<9$T1GnD%cAYp$*56u5F4@fO(wUgKp}@Nebt~e(XXtfOD`w=rX^t z(k+j6Q7e?BC4%IAU6Gco_q_t95Pwk~W}|NAq-Z;l+*M5k@9o|FpuK01@i`FH$NvM4 zZFwh5>HV~Xjk+fVpn@i__v6oj29t)y5Sj!r>26>EA3+)z5sIyvV)FT(%JhPxNZ)$^ z;dZ|`lVW*{X^Xd(7%q7XwNldgy*`&CvxBAH?k8**;@b%k#bG%0xD$Dvui9c@t?Z`x zAv3UWLNZr+#Er+7GQfPO$ftoZn5Un((!!)p=kTl7^D&?!+VM}QkvkE1pVI(-@~Dz%IOzciVh9{bcSg7Ecyn;_0)?GPDZroUZxz zt7&yMVa&8_$5Hl^#WjsZ!5-wA=ZM0aRO_NFyPIl)b73(LucGnR0MPA9?(r;`%YPTU z3|$`Bmp*Aws~8eSd2yyaw(!z$yfi%;6bQMgy_N%Rmz+)cf9a#wt0*GA=u_QnC|B{v zgSruHs*!;EiA*rJbT-_ zf$ADd%?T!t)0iu6P#L)3WFFRN+Z2ANQA+Ev1Zi1Y@AkcqCK!*44CkSZPEm5F-T3}& z-4UzxJagx1{VR9?+;sF9=Q;(N~#G4B7W4j>Nl z){=Un*gI_bC$VQ812+SNQ74aglH2ZBT!Yi1Co%&$fzuze` zKxA*EHJZt!4fZ92EvBrofq9~pmeGeNg~;38CA26!GTTKj3|~+!3u3J?^_G$Kn#xH^ z;vt`GsC5Op??fG7xHQkm$w3cRhg`M2sN!6c5V%iwhRke$?O_R0p09Mmp*b zIVOkGZ9FKAbr zJ2-D=pc*tT4S2Zsz!!Urq_MrGuYCtivWLP88Zbdtp)jX$hXvF|?_*c{BfBNSDt=H){vD6zNoUl;XJd`awKGnQ zk1EJktI<6KHoGIgm5q3Z(>NLAueo>HCk?ySd4?0C zvfsp^FGAw|73)ylX{_Z_QF!1k3oyoy1JoJk1;4KevCrATFXf?1xQnYDDo!B|sWI;HffS!Fw2mAbyMQIr?V!gU@%+V?b!nC6OgSCUTt z$O8<5HUe;i7fTliVoU((!^^(!*}93?bKy{d$*>cj>4x3rZe_*WLhW;wo+zDJuXQ{8 z`riN(!dIL9%6Nv1>$C;7`KFc3n6NGv{kL$`&C=BaDIu0fH3F!nJqw)Om*ExQ`mz3l z2Nwvh^n~#ed=kHvr}1>Ed>h0v2+aTNfq#HBz!MN5s!UOW(LhgL90Z}0eEs(7$`X9LUB&L+nZ z0IR+MPo6e^HV%ZJ$nuO!Vdv#7%UYvQ3WmX&CTwZeFBuT#wx``m(`iZqNsQcfZru_j z&+%*5StfylYEV*x6#PXuwNd>r2l}6C=e~*W|NAaL1;Gxkgr)U>mz)t10$EvXj2klc zbLlVmte_4|Vdq?x*EIx#Ii~DO`ZnD{&21*Dk!coPiH}_K`$02L3Z#cB8lsa({1{yg zA!zqNK9vc65mNr~{U~N~6*o9C818M|i^pY5#)4_A9OY^|k0T@$^=Y$pzofGSS_ zf})7-AO)$(zC;2**6CN}Vc5(yL|&3mpRHez42X!Zk1~RaiqahezDnFGQan=Z z$KH8aivQlazm1!XusKZ|9YrNy*LbhsZ%H+Ey(0RGgF;-(;{>-)1oX>WVbr1N?e8d^ud5Z3 z!NcS!hukMrY{c=z^yV|7(Jd}4S~WN#+G=n%qZ(ws|N8?Vk=K9$`GnP1g(ZZ{>Zd%Z z=*SXWcwHTzv^`qr5#OG83?tOIc=V@1`9ILN609PmocJscmVFxu| zxWZ0185SW8SO-x-<$9q|0S5CtmK|(@7bt_%m)z$yjPd3kt9HPvPutnC%BdIL6)_Lrbrhj%4h6IdaT-8n{_>~M0wt#In=jPLh6i#O&7=Wqu zqZ0$K%J0ImAk-S5a+N3wL-p_OSx$aN=V!uSPM(Nf_%x`?t&Psq!~10B`7EDuJ>dPR ze9mG~x%v>jOa}OhT7Ec2U46%JHiSdM@*9KP(FWRhZGk@}*RWyR^7gE;L*Fr}a=rUb zQ+^ZmZ&$EfVTv^Po|=}AKiub+3Shu;Pu)3A$S3I*l0_gf|ex;dF!2~P*($c^fZD??v z$1Uqh3WlKhv02THB&E5wp>AsH_xfs**N~UPq3;FKB7DwMyLlx#B^9BBzC)*&lT@}s1{>pL)-Rk=CP7jWcsG{*xV5fg5}@)kF@TMK8=aE zlaWHvGhF(E1Fd+^^1yXK2+XDSx@DSn~Pjafui%z+&#S#m)pV39p_R9i$%zX=qoSu)Qo z$O}H<7-u4Yv=U5lhe!7XyHiNjV(0O)yTM$T?>gVd^?l-iZ05UxHJ@hjACcRZaxvcT zUm@Y%kvBnQw*dme41sBUS(Cvpbn^Ikr-^c7gg8+j#x=b4xFU{LSm9;M$D#>NB%P&OwVxI zQ^Ss{0J*$1@$|38Y}?{pYJGeQEs4il#}&o7<+AskD=N+Od_K0W8ybsB@Pb2=KYZei zft;9$n(KiGPqE8icgezoXea-bZI~4v+~zcL>&yC$O3d1>&Xr?!4$bSc`H2vNO^PRl z!QBV49XAv6L#Gfc-erbVgU!mcy+aJ7P!Z4&pk(bDZC$V~v&jwB>m(3}%4c}a?sq=0 zWYZv)nN4++KIdHhUOl!y3cP?fA^9ccGl+i*oL-h;E`4`Tm7r1Jawxna2G7HV2IJa{ zrMLRT8bYPszAZ}`QkA;n)US?8lIC3_WC*JJ-oA>ge@1kTw4xkLR$3Citq8+Fn9Wq9%g+%o?9wE`8+BeblC7*}NhF4h_dBu?~j zMp<#0Q_!uaPpfJ5D}oYtZaZ@EOZ7S){b8r|WvQfbbaqQI0k(sd3*#5cjbAVvw>H`R zYejgN5Etfxi(>gm)%yuuhd1pa5yUCBEqr#B3~Lhjo4+sz`->H{->ec8@VB(*y3c_< z8E(Ash3D!Ca_6Oo$@EW>PX&4zuZgvf`eUwbd6qs2TGji-v&GYBC7LEbn^P3g6n=*>$UI(cnhz;8#deAMsA%TzOD!v*B8ZQ$_7<9OnC zoJB7ztbh8L`i0``LqLfZD+2`H1wm8Z7iCzikTtyFk_3^r3uKS{j{m*H)>ss-Cur431TL@wJ9r5I?+U*a7qdN4W|C&>MQ|(QhGtZO4 zJY;~7kQl0nl8|yY6@A06PhF(nmm1Cwr1XT8Ak2i!dL5MTprDMPjQoYZ!vzRI22es- zR>kJVU$;%W);qP4WH`II++-dqXwE!6%V-uA7mxDaO?#a2%NC5@P5FI1?>P>^w*RB1 zgdMWbPaz42xmd&&EwJ%?C@+*jXhT{L_kLkof^DQxEVE|rcH~L$aQ(9LHL0}!KJ_Zm zxtv{oO9QB(5W0*v)yYMFpp|5`R$?>z+ERmfPJw6O%0E-RA| zoha==E4Zxlt|6+O(~$+mU$FmK3R#I!rxV-PF#IBmg^D4E2d;e?vl>m*J?}e&|`~C)Ga&ONIzg z&RhQ3%#e=-oa8w1B)c99ExjCLpI5feRiQUde%tTRfFUA6C0j-8YGQ<9VnN7s4~RQW za*-DSeQy!1ZE6!ZmA26Gf;4NXV<0>r97&T1{eg3@(QDuSb)E!`1VUGj3#7%Wa?`q|IQ~* z%q)Xc2c7amo5Z77I1lrlOS>nUvSM|@vM-$tf|CV7Jb4*j>Y~#!1qHm}0nASbqFiZl zk#B%;Cupj^Nf8Ttb9a{|ogy`P2$KCGxi9N|p=<07$y;e^xK`rE_63nA> zZ?%^t~v1ZmJ4TUxdzS00JB*qXn>UdU)p2D5_1?nk!$L z^WFv~?sc79uI}pP(kto`hIliqytp`As#UD>^N*63B%Uue{NCJ|e&l1GVLiFj(#Ks8 zT;dzE11}2zuYvd#)H&j1^fy;o$l1&u3UUM2X{Sq>gs^~<5ZDZ$&PKknpJE##{qfTcTvdM%f!6`DfvQ zdAylcYa7d)+m_GE+BbLM9IK1tg;bFehPyStc7ctAY8u47Q*YAN%ZOx82_Y9dnc`_| zc@%w<^w>EM>Pe}~WjRWvQy28hNuW;<55er_2GGr9!Kl$hq{i0@4S9@jKRB)TlOW|) z6SP>bJh3_&-O}Px{!BpGL8r9*ETR{e4bu(HPL2MD{gdCV2D~zFSdo?$iuu+; zVZS)Z$j5kBbau*?Q18^&^n`)`$E{lj)%AS)f(LhZ2=49#Xj*_Ui5>pU>|7waazu8u!empu9*ikZ_81 z(yn!&nvmJW_FL^Kk5X*>%)YNW$Mq($LpL+2P~cf)&wR|KFU#7ZGrOhj%0=_F5mU?T zL9~73aCGNJaBbC5Mi&%64BJ^@`o;Gjzm4UK-(VNCPGXrFxr0!(y}U~rm$t7ze)E2; z-||e#WN~N#|8Q}1tlrDo1c<;a&`57bV@#Efil5V4Zy8_7It|?OIH{ zAcucP;-z*cfzUnT{sily&1tBEhNQ*-r^y<^f6~!_!!r;6-UXl%DI+J`^c#)8sEmL1 z3Mrt3_b44c;SLd+oJgIJ74~UQ{TMrv?n!Bimq@3rTuoL!@AB^PnYmLjM|^N8`hIS- z`C(;fYFbeioEd*!M$VKq_yORN1lxs>+n|5@&9R_!A~PBxh=-(>Sp>HWPt)`!OnjLN z8|RemHTD!jy)KzxciI(`Az;|JQ7{=d;=sMI=J)aTtfHMUrhaadhRYSGMEryH?RR72 ztgCizrzDDJVN@DpXU-NLLg~OCL`q@P=Sf=RdR1&wE>C+QJee!IqOx2eXA@%DH7{2#TY$mb>uBdpPh|If z)?0L=s=Zgv+1!_=U5yaLuOCaXnCdYns1w?UWMCG=>ZJsbB=NB;ZWPd#oU2<>vj&pJ zZddP#tQV@_s1wI1E_$#XkyVp0Uw8=k9KU5y1s!?(03}5jUfvAofnYaU|BY`pmEXD> zXlk;g8TT@IjU{AATVS4+CYcI@H((=2gJHgr-a!sVgz4VHEJ)xprVx1nM7+*=V+lA6 zv+YC2fc}<5GsElEOpWuvM(n#wS<;}D7P z_ZSXa;P0<>AtKp|U9N7ckK4#2XEz8F3R!25^-NYHu2+bK(|he@zq+rPS))gLGTOHf zZkoDs%Pp^L;%N`AweynDs%mP!vp?yQ^6xh4Q=#c>k}JMv(Ob=s&eL;yB&lww5Uvkb zt~Ak_sb~7NN_uz%B$&xkY^tawZ}rm$9(921M?FcJQMcLDV?CS;u~LZ@m+;BMVx z%I^Hk6XHQ*dyF3M>_`!Iz!|_Wd8}>q%-99h(YWeqIXtNvz3{^Bl1Cc-_|M`mE{>!! zY7f88Y+jk5A=(T@V1=-n%L`PJxgM~rm5o3Cq@#Jjr`)H6hZPMc7CXEC7~@^eY|fz_ zm4Uh}MLl3_#_~y&WcGVEedtD*k&Yrh?kNzJdG&n`3LRNFoOBOYfMnyLwNroX@zxz` zu%S%EpyFV_?q{C!bfkG%1`{5(?2;B)MTZ;8+18W@CfTLhs7UH$u}bwbZ?z4;!7UqV zpx2J*s$4i*!Acpc{{3)T%wDxfD?HqfCA7WfF}B&3B_K@;B!F*K1e=r;#UchieOi?1 zUQOMA00 zFG|`xse3ZXBBJ9Lgk;RRGd(UUfu!TnX?I3B z;xz6_+PQ1Kq_;Bw$!>{!YJe9pKn@U#Pv(5Sy`-38;h-;7Hpa0=faO36_#4HcJ8no1 z`&4{t(KiQhB}c+4_jy20l#%jGU|G2XJ%Mlvd!U+s#5UQ zr*1wHa%%W>Mwm2Cc-gl{KHms%}8z>C{ISaGL0OxG8~nt7k`miJr+9MbQMQeJ=vE?M8V zbpuCKFZLa)!m35FB&p8IT&9S(B~L3%%DuLaDeeo+6sHfxMq+jFhYZ|w<_)p5ZXTW< zUM&9pFZ9kP4;g@|xDTOzud|m*OS-1E0)E4FWe1Qeeyo-g7a+Q!_=j0tPb8w8145G!>_ES=nL(eua2hk##+N4zS7zY-)FtT1tdL~t_}Y1{t#T9O*H=`nAa6Iex~@)aI<#H z1Lk7L{IQQK{lRxQHKk#!Jxxp#<|9aq(_6&k%z~6443s_5`1#EJ+@9AV1(9#g0@aW`teWmBPJKo_bg@dMn7@vZDcPEM?6fd5 zWL5Ml=^$|6+f#HPoJ+v9uY@y+k{3_AcrWEYwFvF0Cz0)^KvP52= z=i49IDqLwEt^ujx_#$pE+I>vtro*CbLTb{!wc2pHIB>~tF`Pe#$wm~qsE^XfJ#&F1 zM2ojmZngtOQl3u)Sl406%_e2SYmtuF#NBgFLKpJJ2 z{J34K=Bs8;s+e|vpB-6>de=HtOX#1hhU4<~eKkyI!;i+MN0#i-u?4zmf^C49wQ*{~ zyNmlDjvK>`?Fm!e;^CT|!84oMcZGVw1M8TnY}JhVjMM+k`^i6xjoUF#%pnobjO)6u ztk;@X$8g@fS*m(zD2Kb*48lhc)cvZa%De=SiV&n+U@KHoO{|y^wo{y4POMnu<&^RS zLfsS+1vEvsAm$d@g{q>Dyw>HVLCaa-HL?V+?kkD%yrC)xDzOY`w`&{4oap5|DV*8Q z)gv?aU30X>SlY>}IF=os;)Krh1cOmgPSSlu{phWWGZzTJk~t&f;HE!e#bX$z59q0B zOP2~$&fGC~&FR6D))d;QSF4QaG*r0gn5hp0c|&%@tl%?znJA70B)yCI_2B$S34xC} zCOSjrc*bed`)(*dvJ}-hVIT>?-8`B#i)PTXXXH5tXi7wH?|-*e-gSyUo~?k;MZJ^uIt z#Se%%U`7?eMcu&# zN#(LI$Ks)wt{ku+$Z?R_-s(AYe~$G)Uxag0M;q?o_HpoMVP)NR_Xh8zy&4+waKAZe zi7QR;Ac)28;@nXGN#DHT%Ye0wQ%_t`Z|;fr5}hAvx?@w^&h>kUBXTkh&9U9`3hO^evZti~MXp@6Cgj$P0avG9_!iMLeor zF8ZG*UtLjz^SPia57I$qGS{bA7+$|cv(|!6PP8uv@Oi4Hc2XEUgcen>x{(6Z{(1*{ z6*t#c8*DJ58xtY<$Q}}`>a{Q2_RVG~3b9@@a2{+Q+Q&R`hF~tPcH^|!pMC>Zznu}& zmfUFyXSvkg!*2mYX6(&wga?wG+|*&9eZG(z4ZlOP=imtC?le$wFr{}bG4F|}Nn6sp z%{{cC)lm&nm3ewU!8!_5kSkq?Bg=)!b!D5U*a<97p^DaIb?h{_-=F<%*O~{>U%Kn3 zc9;XMF0MyWigP`xW6XW2ypeBZCs?O%Uu;nfaaSgCe8u&Lo!l*g%mkqK32T!0T5ec# zzgXmb5FNS6tvd)3(}Q`_bWHa`0esVH9@3^sWCzKdT)BeO&)0rlwb)wgced8P(25gZ z{N?b$1=Fp^84FTD?Gwyo+%6#^7Y@*W&itgz?o%^;3sm2|z$#feZc0+{e1hd5_Sx6I z(hh{4{(+>M?v#?fgzr++Uz^oi{&>apA>VqC4NBDA{5iGx3Z8FN;=k*(N?~(S0u@=K zLf0S#TZ1`fK8+dgnO8i0Daz*h$hTYT)z+h)GU8Oi=UQO_v!5Y=9Jx?^>BF zYj9^iyxY1cp~UtS4P@6kRrTzjdKcWiIlR?~u7j7dNCx(2wp1bFPHo`TWNB4u4&iU7 z*fa*a9;33hIE7k&}xg+u{W9FYQ-bU1e)pF$$foMnKGmzcdJAt_mp%14rh~r9k-RvE87|lZtGvJ zLf?7byD|GEVlP5be>lA!#|1S`5x@plJ9IWTa4^2U=Dr}uKVGYr4?)zAph4*!2xvSZ zxn&WSI7`E#!DTqbBGjxbC)}GSwJ>An8;I&D1$AJpwyeSi8x>%k07#}d38lN3OURKJ zh)?oiL(nvtf1ZXE79mOUR<}B^Oxj9W`H9vzRf?G91jt)xHIvza708A`kVCjV&~FtlkDNQAA6@!klHwNA6_ukEx8Zk7pBV z!4RBFhv6XGpe?L>0DO6`-)NX^F05#d;?$=Q$}5+;*Y%XNql#XK{@b?z+NLLUj>Yyd zCv@7`tFHCn<=?G`wn9zN_jQ@|NAV>dCUiOM3gt7@A7#?%iyCW{^mvbk8&f^Rq_=s# zNz<+0k51wxlm%JDtkmg3yp=bif!LRlbdk%+oO>e2xl&I-rOAa`(9RD!XI#9g?4y1D zC-+Sm1;VNa2a-Ltn&o*uyLCQOY~?2`lUD6IpN_8T=JVRWVM4_0CRGeT4s7@k>@g{`IV$qZ$lRUaA9^Ssj}J6@~w(q)}N zs8*ZhBN>SLF&Ce8x@?0>|z z^p~OBBas(-g#5h98h9}adN#T+UTG+{YZYKTzMPja@%&lDMz<}W-t2Z#sDTk-kp0`> zV0l0LeQD0gxk=fo-B}q+i)JQa)&onV5W?IHwG{z!?}`o<8#a%!Kku5kj%-#V$yFDDqI9TDmAIl}Q)BQn8%1y`X#S z&kfa~k5ZSnBdTG+winRFGj|zaVAJ0ajYIWA~ya8xw)9Z?pn_~CH zbKuF=T!Pvo(r(#`xu7Fv95H|}2Kz+rW#%Ak3Nem~K^S5a8a4K`^KF;i&M1H?h$<-p zMg(6`OO+IsmIRT~28phagiJcnvhOtzgx(O#NUuyUClKnW>QphkYYEN-I@XAknd!~I z`xhNEVPA0|OyrZgbpTuLuuZ~Wc_m>Dw+!&H!TqfsIOKNC4mhxBi&BIQVpLDwU`{=^ zTH$Ew#E*73%jUGxt9yzDHc&@wi-LL z!L6~xz3%plaW;qga)3^{4PV-*Iw~AU?w{m;Au?K6&7m*S__e6ezf!dgCIg{-Y8%c~ zBrIvuHv*k(-6@Y4R%stfzNswq1bOj=ANP9U!p`Zo?5epA_j=g5)yqWJw$R&3U}GyEr0>M0?6sljMAi#n z^6fPd=0g-*IguWK$hW51obU^7K3e85EEjgHX7lar8KY^p0{~ksqKRRWIa=v*6@c*bXYlz81i95n zy<%V75U#?ZDh?#FwC{6$^XxLi-lS`s9j;AMrYLQK$}>3-v^B~w8D7&@u`xMs5OO{r zi?Nsv;Pisg*dWZT#b+-1%ZYN27f^hAr=!oOp5}Qp68a+ew_*AktJP@VGelnNOZK3qCL%-g#UQhj2zUm!j?2qy5+@R-LV|EZLXmmhxV^ zTjx#Qudvg0sjf1D!nwzwAOhtk+A41@S29vxnsmtl56 zM}M)_nq_j{xh>Y*H~VF3hTiRM4jphvUo-*DvKPsPo(3(-%`z-8^t0RYXI{h=y_NO4 zGu!fefJbPJJqK0)db<{u<*&IRThZB(=ODuMh;GC7a0GA259eH?7=r#9$z)|>)n1sp zUR}fx{xRFoJuar#o>ivZ5da}-x_ce%i& z(!yaZH_NkN{vsg+Bq0Ho+m}a(o8u7oRBc4B*1F;^zd`T>EVpiM?A-^%!mF2?WKze7 zR96{pYQs`Q94Fd3+{5b*cPM$H&2P82J&$iN=c)!OlFH$xvN`2WQM5<`>-boG7=k(n z@Gp=<+M_meS0%R3#tgLT7G<}r08;$nc+#WekaIdkb6(TqBJ^F2>8rB*_t*+I+mTC4 z$x_Di2HD!!a3_E+4n70L<1acSN`Msrq)3=6lsmZ8m;QKsHbt!)|KrKo#2fP{`UtEu zWYcV2yAo*ua(|iU7ds5;PCDnS^kU1JU&1z(VM(_8ojUlZHuTR_{@nb2T+RiQ^IYn) z^H<7aP;`n2Ns0)7pl8e61>!NU%R&s#!e*}PJX#gyqHpiOY_2Mg{f+ypVj$wu$P`Bv zYuxwbVpUubjiXk6BbuU}qenId;>!*(4B7?Hu@=-QYIcXWFZ(E@%HSGcq^_FnZ^Z3p z=fKx$HDkZ?<_l7km_!W$ulrIY7wm#J!(`>1rneXJJ6f~`q^eTSHDnWwwlem@1ROf7 zOzNN`pO`FkH;J=r;wz4l&c#0GQ#e%j*r{7ClrLJaU45!jFa1ka$L;nf3$>x*F0P-% zsGOc}kL8Ax%|HPkO?=V};{#`c`n~hsc3s4oK4=NP=)D@wi863)^yxfMqtz?tp}SAO zO(*#Nm78f^y>9OM_f1sP_e1zR`D8kca=wd;Wa2Yx+~PM@nFf8HBq77Uhcu>*zQ;2& z_}L!0AB9!RJw4KG&!qZkpfWJ)O??3!U|7Bds}9W-6lEpCEQzX-Vp=(UtARw%xvxz( z>rEWQQxN-6y_hQqmdA+m&BO&(YkKX=eN-GmiKD8>Mrm+IU>;KzI&06Uh;B`mfbOhd z%eje^hat&43{?A{f%u(*c!wvg>>X7L8}hwu0p1N7v2A+~z^6>})X?p;^}?=Zd$1)M zR4v<#JVh%c-Ro}->e1I0)D;!*XN+ydvnEkOCDk<2$$P{eQ!$Z^Fiu2i+PeDg z&k+N`@k^Q)0m}F|H1IVz+ zOwDPX@*F?istA+U5cmbi{C?`SUVE$faPOebPEevqa<9%KJ)l%g%8My=|9kAmq3(fi zNq=%;WR`unj5fV(l~bOMJ;}6=)S_Q%$#5geHk>Xz7?;}O&|$6rO-|ezWeF(*<4^Nql&awUUA)7W)>30TU-?D5*ik@3IZ;6c2pzZ9g;w731goPkfa!f2f&*G(BP-MtRg2*(NRAtK|R#0F{=dW%`8aD`5mT;Ez z!73R?#o``F{lb%vjzA?Zdw14w=W)IHe?L@%K<=ZU3kWg#3z;KmmuQO`@6TP51=_`i zn2n*Q|43^V=|D#YUlxF<_gAGVS?Vdzb$=F;` zasP>&+eRMTBdf$?1-Q~lur%agmdNnZFt|ntqr@qP)jg@bA*0tYuf$&nNrOUPwqR+* zJf^}U+T~gOU0zLkYZQPrsSiu->?eF;Dt%c zCzi@leL%tY7;2&>GvKiC7)om{6AWD%T499WfL-yR&@25?hCg{zF|OM$dfc|gC=&uZ zVXuT1EBYGPpQ?!{M)~K5nT{R_4HB?_(5R}7IPGyd9!q9Sy~93w0ex_t+jPeVo| zbS@`p`A-xgwQ}TiPgF=FNjoFWvNx~*XnKH>Whj5-kg)0sq!^m*Wz|wK8tYoH^p)|r zaP1qpQ7bvwKP`m+U)TRo@c-x-OHEik`aZDw8Ql^JfC}zD=}2V_hK5zi?fPtCp^_z( z&*aMhD{LGH2xoe14zpngb$mSHBpw5+oeB1`l&BKix!GRc#^%@6w%>13i6t zC|H`TZtXw4Z-*mVxw>%xLYOYIb6D2U(Bh=%&#X_Z3aDZi(VAX-eMth&y*l_XLYP>r zXO3DdQG^bS?j&j|qC2=^t`ObCI1mM2tkm{b{7EfeHUM81YYZkZ30G*izf|ZsIRe?` z4<{G(iA2R^55XuyI`Z5-yCOTXN1flj55N&NpPXAK4|`OoukQHKvxFvdOt*^Z^98`g zIH)?6cejg$O8EUgdxpdieDK1H0=rTZ#%Ms!EcT81s2Vt|87HP%d(Sae=~~sUJW$h( zE-EnNruDJ`Ot6q~i6(O23N@`|)H5LC)46v2=`nB3MC8+F8jh}{ZRwJOUMymOt=mXRUqx4!y?qW_pP(&$74Bkx%|}0s;!E(#QLw z3ACi_yyFg9=!Wf@F0;}qYVTb@L|SkB?Clq3@cmYJ`|}hFIq4*E&fgiuz{M7+ z&gbS>A4YU(@OdnJd7gfdLLK33+zZe7DKf|LH`>>&n`uM2QWwGCp7(*Ffl1!13N0cI zCZ@~q2GPe}%e>G0Vt0(_Jw6nHI4$m)1F&>P;QH@U?utM|kTABtG1VetpgJn7d1ynF28yiuI5@9u?mGzx&++L+7i}nO> zCMZ5$_)$1MT+7PWLiiWE2J;jD+G_7eka}S2vUlqm&#sjNwD8ZX;`a1+Y^0)YB{xM4E zliepf1a7zQL}6jb&5}Yr+*z~ONVtet4kH7WYwikk)JtH5ICsR_r#tC>3I-$Hxr@Gw zDDV8?Fxf8kKL`Pj0sRp4gaTZ>=_nxE?cezV%=LaDxeAYrMj&y1?soMev&J&XPe68? z7p!((YY+(h!w)U=F@b{V``r!XWD}puj0^)xvwXgqiAKljucZq1E#6@IzK&3&uQW`q z)Q25?`c7dg8CuGWRE5sHmL)uV?l@8_JFZE45}oz={briO8dWJiT`_*WSnw$!(p;Z# z1uIMcoif_R-mE1s9`$>*ynO}I<*rcC))QStgX3&YB5zh+DWjs=#@&Wz2kL^L4pRqx zc#7D)6P*#6IgOdEv3~~8JVQcEmu?rlczXVu&DzCoHQt($l{|iLr=h{bM68;djx04gxVU!*0_g&_8_kvX;1~n}F?lz97 zU2}pgq%pY%=d4?6{fEofQzH4C%{qnkkFGr<1<}Ru3b?HtA(LH*lHgo8gWYBTXa~7p zUEDU|VSd@!F0isAd7tlc6@NQcgnt~dX7YV9fZhn8msF^6yBb2^NeVB;&0 z%o?y)#NC;R{AXkWe!BtKV0c96hT-=tR95L)QzDt;rnEV8_c2Rt@%TIg8#Vk~N;z(>^~E;{#?UxAU$j1}R%USGT8w(IPbGvVAzIz@Ntcb;R-pW`o*Yy=xF+ z(p6WD8!3RI2LmU|aw>$)CLmRtNx)R$kt_nfNz0QtDwrE7cKO%;x#_ux2u zwFdJOUPUj|)b|b}zET`U3msmliyFxf)vc9Sch^*R`ssK-e!>6-Ffz$?`KKtJsAqQG zpmHP4$0(;`v_BY#?4mc%>l+gT)WiSPE;5^?+Iy4Hu^}ozR(n=YQ~b@v`Fjvxg0HW? z-!3s6V!2L-4-KniGKYA$j=f{TWAfBuHX*Rp;q3@P_dF4`x5tO+$LwK7mhH^hFTU=F zWg5gTFWl4Ttn3QsY!>wqoQW3x93kt37*qYQd;SMExt7H#&aL+G{47wwV^6RO zfvN)6?YYuR;MH9mKkQ%mmU)?;|Ay$T3BLKK&4=$2a<=GR`0etA7_n2V9^MzpyTLbz zvQ;zuIUOF7{=%-#TCVe^Ibp<~1s{Nf{Ik8FDL2m_WH!BV2MZnXH_FECtUo(@(i}ce$Id#8#X~RH+K0rd5)Mu%x<1Lf+z;VXc{k;QoRtSr(|LaQTWw?A z7*fpohY!C|>nR6lFUN%}b{A3H*84wB>2S1!Wbp1j>FdrM$YnitqP)wz$mPSQZ z4#@=x=ZvQ)RGkQ}QF_X9F3+NF1(?Hhf8Twv0fa?psyDVFFF})hT2q#3{PXCc&1QAJ7k1^x=K|kqxXWw%EZc*0u7f7ihYceO`3b zKzJ_m@Sa5sKtawYM5K6e9dtx=w5AfC@sKna^O)Gwcd0aSWy_E zg+!zsb@n%Sbi^Lq$MAn&KVM_MT3`}7-Q(xkG+Ce!Jmq>RUlj1(IXg3y^&)_nJ3D@> zcQ(~AFe(ap`$K3yHx*!-DsFJ3!w?+iKh9JOa=qSS2#k*x>sb(nF^bq|dj^7X9pMXw zncUdLd?N*>-daM}(b(P<;p9(F&B>L7ed3xn9z(l(#VIi@!#8aoW(tQ}XQUcJjiezI zf1o#CLc=vP+O&c0B@=@E#dV#xk9x=ard+khY`cWEhn}R@q7x~$kfKJzugMe934RuC z_XP?F|J3=GjxF793(s%?klDU%z(`xk$tnI&Gtch+)XxUS_a-6Sbb92j;aoOw-VZZ< zw$BxC297OX-+o2)|MB~3YWr1j6?e(6~6t_q>7!<(2 z@QdeW6B0Z03aT}CHb2rQ@U@_vWq-f@`6za}pC-@WDYou)V~*a_aXuN}u|q(Ggy8~Q ze7?@5%yXC(EUO&SocKI_qGPcoD7eyCqW8V--lax66t;CY&$;Kucapd46kL5+t@U_A zb;5|H26zj?1G75M8DY@-<&ph?$dWShW7)=TBOk>L3mT)X@GYO?F6u6ROt&dLTaKJ) zx;=B*JzQO$RM(|TgZX*V1rokIcds;?7`kddgGw520qs49)ay}fnQx0K2_5FJRQKcW zaQy1lsbO0`LU&-HLN-SKiJacy4@v?nZ||#O*H8Sb{z>pPc1V099uS0XCrsXMi*PU9 ziE^(%C;3C==9r4hbAAjSyO@A5%FAbYA#*!@1!CTTVRlendAPg}mKA{E6v>+q{h*YK>69El;Vz-_}L5&rSlI_>Wq%Um+1%Y_So%b&NNa3}0^ zcW>K8!d?)xt8Q&Wl9cB*!0X+h3b$}=xqd&b zeD87BWujg8@m7(pdT^@+keyud%w$vG!X);9=MF~!eMRH_j zC=V>h{HXUqh@daYj{c)~J?0Ilx0Jc=n5~gbF*LO z6@naP9~e!&gF#CU;+UpT{Ii&kiwYr53;d6VC@UH))GzYet6t!h8%HkC{9gVnu6IM* zmFG<})%`N|&W%OduFqKrUu~<-y`GWL26VaA^)QL%b|@OCPh0QGapn1&A6QD^41Rs3 zvg@J?2~TdT;kNw|qTT&zpx>j7n?f|DcOD1j^|P7$oWrTQ5znbu!{XPTc+|^yc9GWo za$Ig^YC5G#+MAyC&kU9SR9e$$mn|V7pVL3M>bw8Pcv* z;74o@)`^q;L_zWC;}F)g-iu!`?PUS8gyYpu-GPM;uMb*mQ0#mF$xYy1A7#*V>IWw@ z3!#o$QC(NOF+jz7$gu&5NQcfMpNq9KdZqasss5mmkW~%nM%gTggig@nB9~Sb1=o4H z)D1kE=Yn=`qY`$?2tw@*`i?|RXlWoav~_)y(E)Dbz8bnqD_c3`FvdzXJOq`!dFC>q zG%Y4sFqLW|VK_bk_;Zd9E_}yBI5IKUItq`aFABK2-P71xGkRob3g6=bG=x5Td1X-O zoj23W&e0K$Odfa0Sz9H#Uqx3RuBP-ZQ{S%_b2dtMzNW;se}kjt7g_B0Fw0qOzHM~s z8EoekXf($xAA`guG&P#xK_b|=Z{WiC{36ycS+N8nf`1Igm0SPYw*b0eC)${Ws<`^q zhVKkMy%if`P?CnLN$$>eWj<$uwq*~na#chZz?Krs?9=9BH*wipkG?m9jPDMuyI+9X zb6&9gkI$!@*Gj9ny?13q$VWl;#9_{X=Py#BtX3h6L{cM}_QP(Rs@348N3rKcHBpS0KVg_^kWW{b@CX!miOT>{eHUHi7oEqZ7<`L>Ju}x&hY3Q3P{0 zACD%`ewxR*s4wzL1dmrZ#ysEKH-I>6BO6P>Rd_Wdv4^yEEEl|1boS#Y3E-D|E3P)n z@d2u5G*O)MyUyNwzrPRmLo6O)>+fju-y5rG!xIhYDy8AET_ygU=Mf|_GmOfe>}-OW zY$-vg27U)B%{n+Q;okz!tZ%O#IBhND>Oc6tfztM6{6k8CKyd>LvU;obboyY8RffiQ z@j}74$N3E^hNxwp1_rjR8~vrTJjEBeUx=3{-+DG39$Z%#mq00vnR3SB7F?EPSrJ)6 zH!)VNhDsCFSXCt>R=s?d=VtK8f3nL`0V3OSzm9>5pAbdeel_wiCI?D;m$D*$i&o8{ z@4{enKUvIEYWrdt>8s7=>2+>w85fbep%eUJ7yU&4Xd-D#(kTXBidMWQZ9G%>%NOs`FAL^gLfCCY{AQt?Yfxu+IE=`~ zWcmGe0~*lcUN)2{OlZ?#LI2Y(@qfUp{?n?!21EO&4G{k8%DRU3?+gCJj(-yj5GJ=? zTNtWm9M8Y?d^i^CO;LwbS%z8tz;cNjP9~^D%{>Il-*ZYQrwFS-r@3(^9O#y_s?0wlV4&gc_tQ%mR&ywR?NGX0jaHo)Iv{`-P|>oA6C5g`GsIh`T1 z-Bs-cBe62}gjY4)?B(=>R;_FwgcN9GXnvbu@8)7-9wVhDuJL)$0wnhD2FH~;H0)uL zu4LD%<#a&{npz}puwGQ~RMq6&c09GW&%h1yPirqx5JHMj4|oO5y)LQZ4rj8yx@ggm zW&Q;3)Vgu!13}VAGV*LNZ?DfOfLOuJ5R;I%)Vh3u^4$URBvPVn1CbDZKAUe{>4OCP zgxG_)O&t?M{T_xv?wn4FdeVgmY)fGNHJI5Ja$9`2#1{*8SMc?_&;SrJAGd!3O_&_( zf&#wT$6+n63y5`!QMx?zKCYpx4Kx%9j(NzzH#gjaGiF07VBN>6VSN*?0xYU9gngmA zO;8q?6B7&Ec|PmG6qTa>&lVd0+baHZ*gIz4nqh*+&_u?$yjMq=4{kT`_@>&J&sQUk zffg;*`f~Kh_8VT4p^c24++>G*cO3d|?L;~eiYsJ@96#UCH3-@|GY{vrjX$nFjOHxH z$K3(=QT}Tebq12&C7zcPdl=2|o?oJpoW@_|&xB}KHhW$Ij{k7}iK7716NMGVSI#+o zY3cCfs;`YN5tJI3S=vZI#@OBk=*4!KvRaSOz$kJ}caW!JgFBUIl}4d3*20FBP>+xu zVvFLY!S-!%fG;K;@5h!)+W^5uSm`MK4|HTVG1VYY5-1gOJj4E< zOqkP4@NPtr0i}(!G%aDDkybhKm8pZYBZ#%xmrBTuS|K(JSW%O`6^`!mpD3jNb=oJ_ zr58cG`q0?|_E-rJEBbT+>HEisgw8T1;oT2=JXA^KEhIFlw2-z)x`8lFPj3S<%u5q0 zOqyp+E=KG^wO5w`K0jfPHjG)8k>0C8DQ!#R5UJ!AoahLnPaqy$jE+nD$`ym0TMO7ghh11VM@`In&+Oj)N_RW zf7gcD`p9zQn=emR^x*=2!T523d+bNL$y%HILPbMU%Hunx_xeDTI|cw~f27*yyjrHf z6WW35kwiLlW({VHz@wAI0JjDmD1oa^1wG?p{)YIt*C64LcwYbSB>sOB{EwH2|2Gu_ z+oo@Yh>7ANpubSoKW6~A>qL&4G@zlBU%-D|2%Yvb8<+iSMs3Z!$imKErCCKHh;FLr zkb~Wo(j|?{3|OFSK)2K_M0{_s_jw=1o2rT0t!e!WALxOqzpw0k z78FFX{;-jfk>!J5tY*#@$aKY+{?$o#u7Ak@aJiHO%Y3S?xyTv)@pp^}yWg@ILT~Uo zaU%Y=dXup280micmdR46L4*LW0sy~Vkb}0+W3kTUZ%tNg zT2S0H08dVzW38WMSnWwz)F_M&|*viX)6<(9$>~j8-feoSWdmj35ia1RQ2k-3a?;{!XUOs;}p)LW1NSn&kT>E7aM8_X7+A zp;2o=LZipBxBAJ>4WQEwMO}0;hj+VtU9vt1WQ;{6@BX+;BBIJ?q|&p>3o-+1Gb~xH86H1f;-lC1vVTEGuFjB+{%(oKX1R~KttfmRKsf0B|DG#Iu{ zMJ)v{k}400`wi?irqUivwx~5QQTbAAq zsffP=^T}CT;VQ+G=&y!C*+V)0B^3Gp-%nT;(N}a>((h$y_(Ugk3wH}SLz-nn4FdWy zCug|3)J*itb6q1$$2lYI711(w%&Qc%6oWBNy;1x7Lo}`>e(zs$0`v>glM3Td^BY~;l?fAenQ(oPQ8>kynps9Ob7=yfBh*GEIVn-zmvvwOUD}+*UJ3nBp6~1 zw)QA0t#3~vfYJN(^9h+6%ACM{jBx3~`3+I{gNvALA=oNJ1S5WI3>=7~3^Z@RI)w`#S-`ZQ3reG$e&imv)yr8zw*AlE1crPS zG*_@G@^h7AIg-C_`3f7@E^G{hY+1uqUnl}ZG)ur%-BKFxK!gGW`oyCmgdctUPo9%f z1RHD9(Gw#6S>XN{s;?wsi%@}|%>v#CD9rh!3_us^7UclqWDa8M;5dg*%UyNP}m@~S*K?KTwb%^nK9Lk5u zH%o3gakEcM)Q?F5HYlgGP#+A3Hvq$6G+*`OaTlTF$Z5*x{@Hg>WadZSNyI{IvzP)P zIPZ$h`GjpHY6*lKHNUD%?lcHHQ~GzQ5cgo80oz=>e*C^T;sOAklT#%5Jsna6!E-uZ zECh#Z`-K6DgGyb?k0d;)JF6Y`o44~s%{yK!#w2JhG;)#UWY6{^e-4K^%*IZ>z)82h z*hEz6@YrkIDA2~nlxuZU2e{5BZNzeJlz!2kZrIFK;G>Iuq;gbRTW1N=`R((9sMlr; z18sEuMDE4j2Qw@TC=%OKRy+lq%h0^%c27JOllM68?N1YF{eU_84X<5G#k07y#(Io@ zQGGWxo@B-e1!P%pR)deZl?-j;m)fpK?2O1MQO*&f%d)}7u9aHp>Y_C_VHlGY$?ek(=1z?+&Z_& z6}BHP_@(q@GP`P?Xg&+^PsV@Ev5SOOlJOpRaS0^YSJ#7uy9j!;G~^OCX)$qJ?Ua6ZL4)X0Lno~aT3DaObZK1`q-iuMbnx1VvxA^B-e)iBxMw6a@!gi|T z5#5L{w(8LWkR&VdqZn5!3Ii*^0XC}V;faZ>X3D>^KH-iTCYaj)h+EjC6RH(XScRFp zGvM-Y@V3*ayI?Bf{V{x8I^DCaWr-0X+0(hiy7gnv{u#S1;%tTnn=Lv4xdu{Ro0leU z4iP#BdHB*~X#fBL4=)!T4+Lq+z>jY$Ek0|zg$`T8+HLY_EnG$!ri}`lgo=&>q~nv9 zlcDN~1^JDjS1-!|KP^!@`9|M!f9e-Y#Ennn;T7*~j<>=?1gFf<=Ex;O^+k~7o?rCL z9&etE5N?h4za=r38|8qGsxecaT^S8lY(-_|g!Uf>Y)SMJ!1naz@}K^x_rp%FE~v|n>iFT4&bEI z#AHc>O&yx}e`@O^+P6+`J#*RIIxk^oB?zsA&o2zoZVd!M`lRZV0z# zhuc@x(E1f#hQx#fa&CQMA^a6~Ch8D?1#lDdfY`PN^MtC@lTA8Oa8un)uvc#7$JymY z^|kAP?pg~1#<(#R{G@x0*T*na{#18WXgM?pP$~ap2j=y*t&ryK8^4o&4{1_RT!ULg z+4zfu4~*4R1CVP8GLQhQQ#R`5N7HcPbs+O#jAR0&h%=ME+Mc&rwt0hv+URZ4z6#tw zf&E{2d&{miyS8gI5Zo!n9SXF#yIV_(7I%l@?i$>+xJz+|;u7552~Kf$hYiqOY*2~+A7d8?Ea|Fne&TH5Q#`wXi+3;7_w$b3-6P-Plc;7XOer{ChJM`o+ z^0S(&nm}J{k!d^%4rp@TER-S&aT>$3@4*s4ua^P!`=h)Va7AtjdG;6YT3;|40FL!; zEQ})-)({xkcDY>8(s%Wf1Vjut>Z{T@*Ty-?y1aYdNaHB**(P3~vOwFOn1OzShWhjb zFCQcR-!JYY*fxMY$p}7#yRB$)rl{nalEMa-3l!h93$&xSeLfrct3+1Y8L_LjV@gMbbKjuXS__;b_--vVg@1q+`d5xb znza3(jnbuy)aqSvoKDfh!&ism4g?27n$p5OU-n*L%ht-lv3Ai5Rn+$WTT|w93o_`y zJ7bWWGqdlBcKWD0t}{r!;v)n1zp&Z1BOG`Oc(0L&`TEP`s9V^s{vCuP5}1kF3{*hy zo`ktb4VKYMpK5I>*R3m_?{xKZQrP+M?Z^~0q^6A4lL&OeRE1)F+*qJ<(yuU@I(=@n z_`wG{n-Y1%rCT1%Um^Nmkm_`^xyeodtU6Z^uFsS4kDm+>*pNNR=_=oCh81t*z`{F& zg0Msy1^UgAgca+z35?OfHNT*$7_Td*nO-Z~rn?`P;NfB^lkF65+=1 z?^v+}%T;ytHRjq3E>Q9D6~p)1a41+8HB1M%en(&01LX<5E(VvL$%zYMae3QM_}D`% zY$_*X5vfGM=U;rupt78LdEj?{F8@3r`qYwZHTx~VBjt`>!V@7o5_p;7YlxA}FWtS( z-sHZ@<$}uy>!i}j*({{rQ=Ktjjh--m?j%TKo&Dx!C&1cu>C;K2%wsnUXy*Iz=)$wUm)}5^QZgq+Wc*ng88E z%sRY^I-4l8)H371E_}*u1UWyxJ75yQBQD_xDjah|@O2=jU0TuLwv&7arYuRq@xMEu z7a0~{{1Xvl=K9u?W|!iAz-1ty9g>3|bvMS@|CfdHGY>dStQ-CEw;fyJt%EhuRyY(p z=HzMkItdYLq{E>_1U$}cVh47*_KnT}dnQ6kW;}xH5P41?B!MU2j5^(erxo=~3bA31 z&4|D&f#fg^r3L~Qi?WQxDsXurldA3h!HqD_2X=i%4+HATP%xb*ztfB~F|!d$whbNc z)zA1CpEXhha>5ZP0M&XB6J9c%#T%7Y-Ad?RIdBoKf-Bb}Kt6r;HjwPS{BXWMpe#Wc z@4VU@>0tCCNmyx*9j*m)YpJm&(G7^&@P@`i)ew2b%FS@7qaAs>W>zDD{@Ju`CMz$O z)92>S$eg8%uVN9l6Gn8{*`_(OF)wq#%R{)(2=q*0***<2J-N*`7^JYG@d z3I`GGwG#Bz9(qSMQ!~Pi96cBFFRcOgmnD-=?dCtP3wFIFJfp3cEJ2Vg4g-PkV3euP``MJ}z=}0`A>fGn~JC z^Kne>F82(xq6)ONDXQnUkjF1!@KFL1r-V!|2BOGhF1vR)?=!$Ny9l-d*sedegT8Icb ze#P?QRRQf$w;nUC!q!^)q7_b5dw!Q)qvq?3t*1{_d;QZI48NXFsf9ZHtUABNf;$M}o^J>6gpC&<*lz)rnB_LJD(KI93zv|4i3N}o`Zq4AbB=u)uu37$7nnLhY+ z1H2P&>~5<;eXE*TxxXgR>U=grRuNN-PfZ{`UK!`jTo^hqdXA=lKsHlLf}caRyGBd_ zc(4g&M088M9on+)P`ZVwKjs-6wTkZOYUK8Qrj2-IiSW^{_Z~Jh&smEeht^93q6Q<(2_5z@NdXX>)Pc0 zgALgfLOyJ50Mr5}!~Lz~BjuJMVkZ1%4zW}EVAHncrC94YU>w5tPm+ok6P(wing>F2 zIhvIAbbM?lc)lET(TIJq9`jVS@Z3ZVpak$=SiEsI@)g#=1ra}oM=OLmyxRcjt-1|` zaX{p~JoHMO!zYoQaE%V3P5Zqtam!cnQedyW5h z)A(_>cFMfguh4(-4uFMnRetZy`I9>E6Su?@v~LcvTly%&gK#j|vwS+>T6;Ym$>**v z{SSd%|Jrz%*8M-ezr{s%rXw!@rH~8zBUy5Dxl0!feg(TF+53y|0lEc2+YO2ulM83h zt(qF1wPF%~Zqc`&zVyp)L3F1k-GiCNY)F_@`$uq9nr52N)wyI&e_yC6R$Ob_!VE3b;>E8>siRmg`%Vv^90}MMS3<2L69CSmBlU9w zTJwzej1^lmq!|<>E|D-*%n;s3xt9W_fg35Uw8vZ;Cn2;WZv3Zk>8HAax+03dCyz&x zTOxNITK)SA95{P|&9u#Hxv4eLE)&-z>-OiC08?Z2ely1I=-67Wz^%~>tEK10xY$W$ ztn`rRfM>;dE3_s9r%DFR&2I95n046@(pq4Fq$Kklxvx7HN01jKQ)FfW-JU^GeE_{P z66g0NC8eiWIELMqs5dFFb4c%b9wUAD?RM zp;pzXM?(OoKkt5a7LU;gyl4SSCpi;7%ov?M=LDbR;2*S&Zvn(T{H?qy{qQbT#sf-F1$ zFdtcB&dmcke@hp=DZJ+hLzWx!roKUqFH(V!mqza`g-nJ@YAlBpJ9`PyK*pnu_`@wc z@a?F$IoYv9p|R;eI{(ZGw1&slnm8we@{-!ABz$MAY4lrV3C5wn$M4_1r!GH^x5|iI zc!C4804h#*7|zS`&_CQojUk94EJHR9_)n8%d$9SGH}v<%d1l(>E+4u(2JA;4K(-D^ zq)hq-@02P1^p6dyLG#>3x{fRz7E-%+0CWZE`81lFp7}wh3nN=8g(>ryrJ~{0$rU2_ z@(dCCQb8%=lKb9h?qvOi(|Kt~1TE#tZJ~8uD#D^R!SFsYVKBq2{yqv}R|$q9n9u6J zujpwpbv*n(xqmL?;7>16w-ElftSoC>gXInDqlS?~n?@@ow&;WpJ(Q1$oL09yM zd;gxAp~7onN0Pc3_a+{QYat!&28<2ePl!Q!Z3yC;H~?2bsK0aZ9hvz$e+Ru$LL4j0 z@I2p+vQC&LvFirJba;oEmSag7-TLrKTF>Y|G>LallV#fU_>K*VPsBeOaV%wC=l*`S zQ8uE*95w6z!cbqCv_ZR8cUG|vTER7rZ2^$Xhr&KTe{*(_L4P=CeF;J)BBX|<3-150 zM0aHUGG4Sl>};oTitI3s$1+-5X=_-+gwamfM*g3w++K1f02n*WpVry3MYRlGmV^6o zb(m57lzo?ti=bxWA%z(20Yt?|6^57Au2!YbVS-^cf+wMjCllzYOv1HHp{ZpjPgqrS7u8P{burim0X+U-`B7!$XHx zQs5)sGtPYpMUtkwK?)bfSnBwSy8)=1Z;cUJXd5{odUV7k7SqnS4K5o+G=%sOLM}xn zzI%S(t;(_?N8AeWd5eR~5kqFmcTmh@qctmBfHveV6Xw&Xf8sOyD|sJT#*pU-(El#F zkWB%MT3>5ej0n`>tvM3o_BgnkV7kA|PWLCbz;JW%R_@_jBnuk3v_5M< zN}cwTE~8p!#Aqka{vAJmWUAgy{jP$KT!l%Koq2IScGR!3vu1Xw+;v}u*tU14v;ueT z^Ct~VWPPic;uIazeU)y#;8y1isQP(jzUged&XL)Bp%oU|MpUYG8}{50tZ(=_N6BQ0 zYQVmRNc_zVJZ`-mGx1vq*uY^U`GhL>B^TT1ydr<&BlFncMcTp#R#cZA4g{{BJKriT z-av}1->Vuw#Xp)JJ*BTAMZ~llmh2(TArBVk!=AdZ71X2}Gi~hRbEC{lDOcpwG3`9` zMlkoIhdxqvA(VGQD1!k3@;bYbYMs^^}-kcNX{e#}MMV_$M18 zJ5$=R>kPdDot?K@@W2RV{Ma5d{Bs)0Gu0T919*iS!3A&=-CT~ENjENWg@!ltmfwUB z-10b*!T`GwuD<>V$wIt1KomVNL5Ft_v-i&5Dna58hJwUo$qWU1b-=~ z8-^ho=pAL^cA6}EquC=pxW=IYE;N10fD`Dw+a~CiV?kZjBXz)`CC6F3jLajJHUQcI#{RmS8~J)+sK1*l;lfMjjJn zEc7Ol3eUXM|Kn{rNy%9hOJv@lF1!fzrDYZ>0iUvL=oSO&ow`LZeZRIUZzvL9YIoG| zRGl(r(aPAN_}kDFq$*|W{*UD@Fud{hi7Tx@qxo7?NK*?QTh={Ess`j7Jp+0y&VL0alt^`2u=axAd$pP3X{WT7&`CwzaO@TXmT2#ykN%>^G@ zu%&+PFcRWHhF@n?&A*BoWfUj-65|dpggm8loPHPU1B)S2sG@pekJXvU-4@rms%!Da zT((Mwc{1n2xlf11R(rR8J2F*pWBKI0vM%XM!$W_PB|o=vbDO%$%Pdes+^-GOXQMfu zvLwk-EUuN7?65j}{6=*D6H`TyQu>6nzN5n5asCskb~*e$ty*Sl>1B1_QLFk(Kv$8) zQ&tXKQze|dXli)0XJl;H?>bIzC5ns&<*LKSsf)h8EMI;eY)LnpwvBEfPP3~UyQ>P& z(mCU(Jm>FQFTF%Lo5R?v8Ll)EPi5VPOzjM+!Tm?phMwZv}>1_kqN zdHQqo37}}DjaM^QB}!$~uqBe7ju6$T>{oXKT2pta?F^paG6Z~#B7|{(Qqp zHfyJ1_s{@z?tjDabkL}#i+}Wr>W?hpbOXBxOE+Pbx%m%xNM`@x@iR4*S|(J?bfUB- z?0ToEZEyodFG`vIpF+~6%hkwYl&GuUR3;$eDS-DnY*@>bVym7jPN=SH+L~!&{HJ}+ z0h&b?w(4{L0TS3E-z+%5JYwx{rnu`@#4!%e;E@aDkiBodI%70NhJ9Jfj25l~>=z~3 z3~!M0gFrIBxrImhiHqfqHVnT%CqvHhVDv&0Ac&vMRHKBs_M^&KkACvPO*P-E-ctQB0hwL zSXLWVM4#e8K!YFNo7>@=*oou^L&j`kxb;!XTMYj``VzWVIS(|C{S08oTSnQ0A_xbz zaASvQ9bY#ThLaaytumki$5XE?E$I?5OPUI7676! z@|9+z+m*pflacIBtA(o4sOW)F)8F)Me(n7T%SBOMEB83x(%dyIMG!yGzpTjXdomdt zDpqd?bA@K@#tkT-5Xpk|KHIa{`Ac3&Pta>3U_=+`Xh>T;WW?fo)q1Vx-Er()rA4sM z-{oob|2}|oA@zN1o$h}8=@+H9DFB56XhZ;;Q<#(O?Bb?uUmqh$#yP-0v?}Y9e#`)` z>l5T8gA#hKoKH_vaV;&RC%Rq@b}CtEBY4{nLOd9iELs8UuvtRQLh-yIozDQ@kU8~4 zjXV97^>M&k`@-dkMjr{7&jVAZ!XOo;7jE`fFh$2&dcOtqfH6nSD0*Wgpz+ta_-4{V zwr3(=_`#E;HP$*)QIo}%@~G2SD+bl}@!FX|RPLTmjggA7e@jg8<@`jcOa3X3&}!vo zSL#(Zi;%@koW(UU<5OtvBi~}Wn<WF-iHns! zegZ=s=(8y(w#1}EyXUvDGAaOxFGq?fV6-bFy1QF=50U^b5(q53*etZ4LcQ`-I#5^5 zhNEeQ|2C{#;F#|pe`F3e!G8rZcXF9o`G#wtQOZOr3*%!x__|^F&Y~PYpR8yx8KpU} zesyERfa<4>cErbd&2U7-?wVuV+8VY8)v3$cP~Ig?cJ zG1DE0rFYH?MhVN#_X^D1jP22xsw9r(xECI**OvBz<=-DaZ1(q4Ns;wL^+#l3F&B-R zg5ji6NYy1QG#u&aoG1b1PDJ_DFI<4&=K)z4VRzepUuz#=wuiY_%eaKeE#?{r0d-*OQnCI~wbj#%6tTJQ z6rseR6^6$TdqSK)5&_)8KffW6d*iW7L6ZmU`Sd}YaawRSmqjMwzxnIAiF0}TQEsVHJQDQoV?mH9 zT?RVnjTyAn6^&g+ev10PCGqqt;%)yQ>CAhO8mJZ<{Ps(6lBfLg96z>K@47yN{YOh{ z=BrgrZf8^9&ax(RWG2q)4;EBPiMEf$PK>2{TcXhnYNi2z0Ov8lZ;rLp0i7_G7{}jn z6u8wIYoZUhKg@>{r5Ld{3oYIq0I8Z+AT4ni_DbD%F1uE%E~w5B>V|Bw&4VLkRlm>cx?uOO=+qz= zLu=_zPfV!(+*n#bBYt{%Ks*2z?h2+AbT&`s{)T^L>t#@}mXB9(T_ViRW|!pp+5_y8 zK@STOh^2%F2eG@qQ`&Zlkz;K>GSn6!AbnM{w$<;x+E;5X= zf7M!`jsDADe#nL=4*o;OYZnTlG%#thE}e?>%?h}uv9()s16{g9DXgs{6bb$!U~=xUbAPvAl#z~0 ze}&RQ(*pnzp;i3PzCew)eA7K!Jrbe2TwE(E3G(NVKsQQ6c+zWA_tt)w;S(jsV7AyX zS^@6<)%XF%Wc-zNYTETkja(D*$OXE72@vlc&DH)xbe|MwCXOKKEpY2p9_sj#HrUZ3}OE}P=eJ4Q!ARJc;mOg)G3}0vr z*gL0R=GMP%VyKSqSKlRnhM6ZigD1aaNfu^6z=>X&p(sqmJO}YOoGGDA{iF9;@T~$E zh9Vsd2A>WCJmGR_S`r#hd=BTJ2>B?8zsiJ1On#&WcpA`ya`K^!SOD)%t_DC`J`0JO z(xLi7#j{-WQ32JR(j0Rt-gfcY5VdcIzG%p|wz*Xc@*cTZX#+-anlrerT|gtMFU_zy zj__?Sy_ZIH<{gDsCLe%Mw4nM&>Gy>^K!$#87MECoo!W_E*fR22AZNJXFKvZOxGr)4 z_CDPgfjQOM<$0(xQyaWZH<<;`ecYYQ>av7*@X*G{oE2F*OyT|bQ8~P=!T@{bK-`N( z3=imrpka84FL7cA=xCJq+%ZwEsjxj4rV!Y%@rl9;4J=6ld$48Kz-ecZoqiHSr*gYg zL$zvD`#Bkf-vQK8K!KH}D_beQ>HVKe6oSPe>$9l1NZ0TK6$XTjUwcp`V%YstwG_qe@P~3vivs^$eT)9s#SCi>Ehczb+=Di9%OFvRlB2SeFUX^R> z-~mTT#<->6WQ-CHohrGcGfy~KrZY#?KxsqydTdE}3V7Nx89>ySp$}mSbE${aq!lrC zO#a2XJHTs7Fv|H*rB3xS@oohrJ>?I{Z-A}Ez||9ZRuB+?CuFBS7^KrXjd10WcF}*5 z>7Degg<5UP?Qe{vSOc9max%3sQ*bpwHXWXMVE0E{C-U0mtmBrx5w)fkV=5E>{Q!P+ zrhN8h$8*bZ_1a% zde`K}ur~%^g03+U3UTljpumg(5@K!adu0$Lq3+YwyaS9g;4GX$b8|d9l_N`tH83P| zgr@2TEK^~N!4pPFli2XwGmX#mJZ{KSxbW=`{#yx|Wc=$^5v@Wc=J%QS^LhticGUqd zh{**X--;K;#CrL6&80P|?4t5E(J)}X8}%rf?Ij&@md+fCSF5VJqo!jXPF*rBi3yzT zx6*`Ne7$w^alMpSoO&B?JZZl0s@2wU*}t`l86yA|`{m3`d#+J=R=ym3`d+y(lYv%? z@sFpicfE}F{;b(%G0rs}R2$O?>(YMb13Pyu(RFb{^0&|5)RwWx<_Qr4T>muE19G ziS9qz68NpasnY9$)`bpIXQ0wdF-e?$Gb(N+Q6{(v^ZJ<4o4A+o0 zC=zwBMCu+YyOCGd_8V^yBhX>8?6A7%{N((Vq`1teKd2%$>8nR$*i1M;61VZT8>u}= zF^>jt1rsy(S75vbKY*_lRJCREQ-X}uSE>z0?-$?ATHVzcXidsW>^%Y5bHsibVCv-2 zl+py$T?U6HgWPU71k$q@Cdj84?bTR2GK7BH0S*C1hwePH@6mO|kLs`SFoL7*aRlV_Zy};i(cMvjfQldZGFz0a|056J?c8hj}UFlvQfR zPIi3$woElL6rZ_8gCx^#Kl(dFjuKo-05m$1lJwRYFjj_ol*ulDo`k;JvLe94B{yxS zXsYj5{eT503kK(=vQSxw7td#VcA&o%&X>8r9SygxlByEkuw7O>Di&}tR#lnSWc(~8 zB^&F<(wV0*M>R(ajX4%eq-^16{8HJTqDPAW0D3B&${x=8E(&Be)M4F}h#;EL^N(cKmtdTm2!vrJigfX6#l&^#;OwIH9Y1&GNmMz-K zFcd<-h5ExJ3MZQ!5n}(!pYE1=1MCk}%Nk$&D@EO7&rdTCYa!qQCzDsO%3iR?xN@k(Vt~2@v8$%g6O1>=6eOSCOVYFA7C!yAAc?tW zALbSjV@8C<8ktqwrBfUhlJP7l6Ma)k-lMdyo}~HU%cik*V~-KQh4Ee z8t>R}N5k9tizNg&Sm4<9od)%t!2o=QUqA1yNqiPOZEK#gl4HZ2{q5EImY{p9|D%0C5q`Z| z^P~+WhF)KcYG+I=&<18#j^>)E95g;Ocb?YC-uKp4;ei*iLAJ?&7MhigvxmH|I|QE# zwcQZZioZP3a!?@x$T&219p9p`H5V9#iZG5udR!C6+4b}{j(pisoqVai5Qq6bFt6b2 z>I8kO@hu~h$TL2;u|8|ku|G(xYeX;dyoEE8vvybr*0#EVb_!vqbuC;5mDe$K`ZmBk zVps#6F`nRQ2lI_tUn=>Fx{LKoZIv$0HtQF^Uv|L6H*ioEw9#e+4z9uiNZ{|XbbS61 zWt8ud1q?`KE+bAth13}m;gs98lsWj9d#^*DTxVTRfPc%G|HW;aY4(9>+oiO`!ER^? z|NJzZL-*0kfi0Y{p+C|Nt7cahZ|*xYfO$Vg>Om{-M|t@B(P)HhhTA8auM6iZ%qOa9 zf1+NQln1R1-#rC@*6k{F!_Xg#QAjD~5;{j!E565riq!$mb=`ugp>{_UwXLhwn0oc+ zCVn#;Fc*B0agDQ62bc`|pp}p>rS1;}fi6~Fs2>-KAFVqoTIUZeA|<{#FV+m_;R8#5 zjhIsMNP9Lbk3P=}&Yl23cl^PU@rCtKXJ)2$GWUwIav=y;htO#H9KCmCwr0}t~ge`&@sgji*ShXSZS>FwAK}ox$H5>6;zr_LwNe1i1)E-!= zDwCH_>nO&BL`_2=_m6;X7D2L+T(HN>jur$r*84|#Ijxc4KAx`npu5f`)(0IqfyF8& zRlF?LMdkQQ;z1Ulyg3OkPQ|7-Cga7~cQRCe{B&=meAnH8v0<0i+wUZLQvJaGmw%t= zwsU{=z(&!8=yYPh*0f@`Pp^=%Ti|0s(wbnL^ocVp=fHJT--BB9(( z+;e5?9jVK`oMY+fJrbncq91fQ6A>!DiCXie_V*^auq4;UA3&cGwSX@ESIe3P+6%eS zpXj$ldadU38CnRj8T?IHxrQkAMW6YzEV?B)j7hPoDAt(1zbx=LZjTVD^%gtb4T@@t zv?wSfUWY=7Tj4ZjtooNAuoE3yA3d_*$&oD{bhywYNu||f)JocF+{oN~=ln`cD-Pj^ z{>&%s29ChxqZN5qZdG6?Y(8(QA||IJ{S0@$NsCNfLGSX8iQ@#8<_FC-q|y31MZPYr zp3ZYT(oCV0X&WVOr*F}5fSOjY)=07vD@K^^49W&$zw19HV^9%$3oN{tkwH~5rE`_> z%-{6>dqF>f=Ol<5{|E)Ek#wA_)D_b8LcIAQ&7!Ko3j)*`IQ&ra?A}v%ygIL?kutqA zn)sqoar&(XiwSHtQ8c1WfVS)KH=(M1p?_I27yzNLiMtNT$C64!T;>UaakU+ z`;d0IqAHQSAqp*k9$ZM8@yUWpvHE!+OL``q63Jpe!k?-@Bv5Gy`8{}i#91X@k0D(e z#Yu2_$aTaUN8rWIFsu$+h??tOb>e)SIujhbpsRcUc!)8PLsZ&mH(%P zGS&>9h^siA!TOuL~yKC^O|uA*&ZkgO&NyX2k?4*M7o z(Vj+}Kw5S&6;-Z{kfQ;6m;N%uZw!@oxka_iio1*J%W?+3>0;&73rHyrgjIUn79E=I z?Nfe-eykDWC5EM@=nnN*rQM+BqRDq&49RTxvG10_WtI=wjXZ7H{=n0Is4;*w>RH*n z!#Ko3#2P7uB1kCQF7OSl_QM<>{D_(kV+ZQH5Gt;!W~_@?XFops+jB_%42zz6TS-J< zW;gmo&en7Om(HpjF0Ys$lXzc?8^jZ72ZHFxgV14h&Gkx?Txh9lJIPK^-Nwf_w?)I- z)-MJ&MOluuLj^}4ONT1znzo|#y&jcDv46RSfiSvipE^^KtW=Q7PUWJ72SpX>q|Wn& zprAB7wu&F(Ja@t_+3uyH43%nKYU7-o+Lo7TcLV32CQcN#r!G?SqLYa;#l5mJd+MiA z0KyHP(8TW-HUVvyD9tKP`vR7ULSF;72!0R(ZL`inxOyjKx7;+Be9}jRy1c zZW*E$0v6Xg&MMS-NR;jd!l;_vX%i>rJ5BEFB+St|okQGzOxwOjNz+mj9G_kE0LiBv z+O^mBQN2$ub95y!T9*^bVCPn`;F|erG9WQBSAZ+ zecAeII3r_f_UGu7r5`R)^Si8A&nm?P~m^IkDn>judA{b)xOxAQ#*I5B4g2l~rFxF3Xc$zu8g}Uv zv1cBMXxna16)=Tx)2%#P3=hSOUeOROF4LiL;7+gOhZV~hY#Zz0RbaYi7dZuN55j)p z*5NL!JXOBmxV4(9&GO=<+qD4mZE)Cbku1m|ok-eWDYwM8dU$kj&N3`{>j(#yI-tH} zi)q6{$anA8_-May;hdUuGIkl9q_p_yypSKQkshk#<7y{bLEcTxfTJt+oi)B|ffAzV zQ@h3m-H{~FAhP$X{Wd>j-;+U1sj>24__>{ z*Th{OyzsmQYL9o5(tcL(3DJdwSLAuW&Yw(ufHvUKI#(>e>pwZ)vDtbhJj*FLyqpk} zC4-u=8f5dc(PMgPK#VNH$|YM#Pa@&tO3RjFxRvI)lp%!FN>^{&7JBTdc>9Yv{uLY% zwFu1wDP2jIPYt2e1Q*AV%&#R6fH&NF$m*WAPhZF%@tb*Hr+y2-LPtkE<@?=VjH+y3 znFGbke@?=buxp1^>$`6}UuG=fAqY9tw%uw$>gtv_F>w$i&bg32Uh$T4I$Ok(mQd_A zln-EG4SW%(IDf|k>=n4d-qx@3L^f{$;DNs7!*U$&gRO2obJF4 zKEQ(VLC`^}$|-WdsX<@3w56FeWim5y#7cdssLFq~V%*OCFA`~~lqx^NcT+OR8Xxhp zBkq4E-$#X&1g#RZWLinFS30$5wS39dWzkIPF8DDOt!>NL>-BQ>%CbR<26|T3BI4=R z#CA)^W&-92fui-+5$fD%#Z6u3B`I>|E=gH^r0dx@&W_uK8@aNDF2$PQ%IY4oi>EJ< z2be4K+FgL0D=Y8<1+_A_rW9Jor~zL=e<52cWh8**icJCR1$3~!><@Q(r7gshi8>e@ zEM~#B%xul4x^dE`%#?cJtslwv`AqPE8~}*lq{)xha7{UBiJjI|2^8rDg8lCyH<7=VIJVozN!w!)YmfY%J_ul5!F6$c+FbHUHaIs> zl@hArO~cmRuoddnr+SHb(J#_n+*<7$=&#C{^GEys>uN1bcmd{4TO0Jz3|{khGMxOH z=-6&Xident^M;WBS<=X6kj*U|ML*OAPgq%WS)+zx87`_pm%gyk2jM&@-|{;;!-6v2 zu42N%`L#1b<}uj>u}W9q1}>}Z$(HW7$al@7=hfzP**}lA67a%z zd-9YQwUeLYJHIr9KF z_g~gIm29#)!Ot4u*m0;Dv3BBrsF*`$P07}bf68@BU(33Eyb&A|SZi40 zx%J$|%R-cZs$@;EKLt=5|01cdCGGO^^BltVxEdT2n&YQypy6%3%sMq7+Eo`BW@C^1 zkdVCD>j%>FAmAll;#F29*3iYVYQl1~#<`meHo_d zmVpU6-#>HTZe(*^JrhO(%R;mr_ ziqxy7FczK?E?4&#k)(;&Z?ZIXSai5w6oAreUr<#AQ(O3ba-hC7o&ZC zR(xk$`n`MQ`Xy!*wiA>sHcACH>kiyAYG7cHY6fO<+8y$X#S15U>uUY-f@?fOj+rkE~jV7d99OtTT?|1S0@S*Z)tH8=r$G= zmy!Z%DiY*V!f%F-^F!5Nzh-_<{Lu|EIbT2n^bwcyJ z8nvQO6^hTp7=lQNx>Q2IHN=p zOHf;vSm*PQm9Q5%GI^k~RdN0Bhx0QB{PJfcDJp{;*bw?j`s-%iI@69k)?qUN8!w-P z@ywD5>-7~PppmW@|J6`av$qw0UFqj_8RZR3q{pB3;(?Fh(5u%i78LN02^GqLx(-S< zF!n3+>g&64D5Ha}QEGdYBK;W=^0Sj%YcO-4X}1Rz`fZ9T+l$P|q9%zC`?AWTgOhkD z*5E$C%0Q|VW>OME0_Q)TNFzLVV(!wCWeM6~ak&DG%HxwDhw@lFp9d#UK*rFhurEj0 zU$2{+)$UH7&ws=(I#MbSH4|;{!SZ*%l4vFz9vB+x)?>dd{{EX~{5~E%o6%%Nu$Zbi z;U?plC`cWeJXN2?znn%acvr_(~;1P z;P~z_PnKgI`;WaR&CDfeHZUvfZf4UQx%PSBS>Z23Nf@%e>>&rvs?UW;m$u>524$_E z9#)qyd7m`7?IS;9D*4eag$xbrerEKt`x=f~e)Zza{4M8f#0K~9 zEihMNWhY29;uL+duFVP-pZjn1gvx}~bS}fRz$8Sx2?;Nfm1u)|!tbJn(2?t!Gc}h} z<>CJeMgqD0Hs}py(dO$w7AktVD8&dz1@`&K6ZP@ZS3z{@l9pn1{Abhjz|Zd_>e!SK}Z`RaNahS6o7GYn%eP!?_4O{T1wqBV(Fq}(KA3~K!^Z<&MRXDW-3 zQcYbvP`?NnhG7^+Pq45|!x&&(S>7$W(*dlMh|^e@jAo5R*T9`6@NO&{bY1l>byieve_hO7=~dC1#HiqVGJn4uH|a* zJF}2YQsr?$bLAa>+@xjl6tOxR?MXzVJ~TIVP|D9FfM9vXOwznP+ZRw?cx#~mf&uS1$Vc_ zVHXP?EV#312n2`Vun-8&F79q2xD&$S7Bn~nCqU%o_y6$feSLGQy6RR}^>p8wd-|N2 z`Mdmg3qYc&rlAHvLqh}D|9b#`*8s`@Y)mX{EKF={ENmPcY+QU2e0)4Sd^*%@hQS?MS#nRuC4IXFSwAZkWF0X{APb}kUte?ic2aB%Q(@yYS=$+>7K zX}JEs$KO5xDK6j<%?AUG1%OV9hCzzbT61&h$TG$5<6h!WJNxUs2!*48(Il1)_E z0Qb3}U3!U%U&g>TyI5p1>hfPaS~LJU#(&@D{|-#7e-Q!LIRBhNqyY4PvteMOp<`iU z{O3eN|Ch|axdgEkA>=GVHWb)OKvwT}Y0%=vi~iZmzpDU(f2pIBVvqvl0jD^NCFwcr z9}*>53@U7*Jj&^qK^Q%``)|PCWu%a!+iK~50RojMw8Q>CnAg@R+}C~NL$_SfYxqKr zN4TONX$cS0Am(s!?s zK^tGRnqP-iU49XsPa9|LgBUdIe%RkclE>79wdnA@R!+C__MGsA^lH;cLl_wj<#^f__57WMIs0v6MmQm(223wdylY_4V&y7y~M3)<4q+)}s6vS&`uU z^Jx6N0ELyCcr`?SE;(pg^D>>@<2S!dx#6WNF1vm8VPZnq-ti*;8$EhG;`20N_FEVx zlZ)i#2sh=Es0r=-O2Yorq;=2Y1EDv(P@eTJy5&!clNC2M9lMdi2bZ-*ep@k+)h2sH|JMJz#p|*)iR4!wQpoO{{ZWMD?dS9 zdUa%S6O(65jX2H=bH2Twl!})*0n;n1&mW`St(GQK)@{qqn3<(Z(<QqEJv z?XE;=UKzte$=8%W>+9n|M3F8FRG=(kS%ZmL&*V;=lgBs)Zh0iwdRJ3Ba01RL%hQkV z{8^R8OPAR6yK(=D{u|S4r?7{pKL%Rd*7H@~eaUlWsFJSaeHCXsS4}kl6~KVUbBgK- zy`Xwtj$&ff7hUoXsVQer^b)#6NdnJH3a*lbqpy{JsJ6{h$XIO8`swM^CX;K2k1*V0 zLM|a$)o$HA`#a{#<})2$nQX6XdX%WQUA>zI(#t(`=?w~;9ToceoIE3wdmqU@`QVmo zKP5(l;OQAFH3ThkmAq&4u~7x)9?eleRQ(PgK4<8Km&E+hkwZ^7a(3^TDtwz|^;yfX z2~HLRE}0B-pi7BI_%lq@bNkggYI)|*Jh;@{v~zxX`QtCZnDh}vRufbRDTYP^heb1n zhsu4RSfarpd{ibz`zoM|vHG&XDok+*@mV>uXy1 zApRGnuRyMkNlDURNeR(mkGCh9KOqHzR@6rXR@tP8qez3;TUb4z##S`mo z*(rw>LClAsG7?GlHt>$_ZSU)_(fZFjb5Di^0%g2b)vnb>P>nwyHEP~o1V_^b^SL8;h2sOrsAkVHhXn0CCq|STvK*>laM|UY%F7XGEQ^gAWHii&Jh2eeIv5U8eV*B30*9o>KK=h)a?! znHiC=MKkvp{W&g8lC&tKGyr5YeV2ihjO&^?7HOGoEjyJIPFJ~Nb%zeN^CV{HV*r?( z)n{a&>A&JkEx`9dS2-pS3H`c?n~td$T31xg)086e5w{a}IPvt9SBv#+5cr};GrU1~b|d*x6jlV^pV*`9%|V$wBIVjwfz zde?a{$;DaQUjU)^IVAN^zt2jbC`7T8x2qXq>q2mlA;~WgIyW+JG&u1bb7EVo!9l7p z?(%0gVEvv(cdgGP2hww8=hUu1TlwIqFzU4*3O|bePN`^MJo3E8PV&Q*r+0^WN33P9 z=5C9bbzZH+N`p{HbK8Px))hh%c7UIWtf;8CmK2ru>ria#_g}F!%Rll@6I90fDwhD( z|K-wd)UN-<)s8wH`yrab!0~%A^2Qluc@W`+6C`3PbUDqm*Jdukqakl3T?k`~$zc7C zV4s^QRU8QO)m?LLiybZoT>x_`r17f0+D&;L5b^AlN`)*$C72P!ryYp0^9g3%+Hjl7 zj7WoAc!c&6b+Y21xI*A3urE&F#eBh3g@J4Bj^X|;Qvd5S-B_V);XI6X{zp$${a5}U zvuy>m<;Zl?8Iqqud`F7!pZ1E^vsP^+Bff&9m1aquaawj{?3@5koAl; ztK6L#jz!lC^GbI}S%EGhBV*IzMPdj4(ZI}KK!~S>LvTH(Um*=^nWg?)y`G1^fFV@A zs*Ep>h%r}r#!KVhWP_H>N?mehjjbA{H9mFkxz!JCOTXK{uxARqyEpoF8K^TD#d>k` zQb)}uT~wowCjiF*ZC=mHNvhrKB@4mBW420gA`shAz&u_9kUvwE)+vXMU@_gFFfR8S zc5FFsl8$9QtBex#XckA&<3j!d1RZ%yPR=Vn#*#@KaFr)_ZyMYgP|4*Dp-5ggmRkAnGt-<)v@_rB%W zi)J=@dfCM-iK&Zgp{1X@%Bv`k4+<(r^?iKhzP8 zr7JpRT!4$n%Atbm+P$Ej{H1Y+rZuZjwI%3H;vU?6%L6+g?wmk9cOVT7j+IRWEse~> z*tGOn>9df(N$lI=DMT#v^shG{lgQ-D+wn4t8 z0TGdRNDy5(MSteRXLWM{dJ9)yO;E5|XaMeh%wK>}NTusq9k^*Gs1WWRUCG(Q_T#h~ z3m#)X}#p4&5@SXV(&E)-$ zQ`;3E_|25uQJv8Zc}xYvtZ6u#`aL?sORjnhFL}LriFMj7a7MnUdCiXFr1tf#@7Dv5 z%~mt`BTC5-4eJw0b+(Auob*-Gd}>SDGe;(#4@|ca;@E`-(rZfM)QUlk*UQNw=aLRh z6zO5dECnVBz3sn(r?tKL?wKXZq-2K4e{HmX3lH{VR-g+PXE-vYtR zN`tvgaR4gAvzU<%BvLZ!;Nv3QKrAJE59g4>VcH45&vpJFy^yJP9nvH5a*DB%iqaCYWn2TRQhHE&5(4)Sp;4Wh6C8-xXQ*PFW4 z&nuU8jv(i=-6S7g9lJARFG}aaRDsFAx0%p}{!y45fq!JCT&cBs2`fmv+grs`O3_0^ zxXHp=G!Va8cu#GIvyGvk|9zo`^5Uoi4)jm7kYU?}e@7W2A|U$*i$8X)3^jwPJB$5# zSL<#l+o_D8iFs20`w(VNxPzmzdy$KI)iEC3IFd=#1(WL1bft`zu>AC0!YHna_Rud` zJ{pAjJS#?rL*5qJw;C6Yp5LwAF#Np(r+nmP#~zGC(vic-<9MC zuPmEt>e|KYzktDUwP?nsAf9EHm4mdN>uH(odMUg`(?!vvvk~6}wI6Qr0WlSGxmrmD z;Pmw11U-R}Ve#6pNAN$UOjEZ+J8A~fwQZuS7!CVv+0Bt|?fgyyMP$20mNuk4uW3~3QLnJLxlvct+K8o5CgL){+w-i8le@J3Fs?U2JMR?hhh0;`UaMTV zhU|Z07?+4Ius01Rx(SFNRSfAA=O8M{qkCw+v0E%%#tRm=f)$!ED-w>V>eN{lUp#16 z!<5#$bfgkwA61V;Ns9`SIeA}wPi7sq7Kq*zWCb_U18Zf}Z(2_?BDKc1cD8FcsHb{n z-x5AHdjm||FJt(=OVpNbgv>6mLv6Pk&hAJIDV(!hz5ezpEd91v?FAJHSl&XhyY)PP zwe?LNt)NQG|&nsf1#%ZHikA?p zEd`}9#-oZ;K`L86gI&0^@QvW%=h$wuE+<+nzU>7Sd?^Td2U)vhI| z&wPpP7bA6ptFu~6QU3cN`8Wh_YtE~Cq$I<1k3K>?sYHD~K?=-1T#9gMP#*b(KdWpHJMoyme_vz_|PfE`Q3qH z3h$6VT@XOAHU_T{fssU{a=A3ZMZnE4iQ9m%dy+07Nw*ASg-2OZ*y0JD$F5V0WzUR?bwjE&}C`kRsMHlx=Gkha1`%JBuqEuM=p{H_ZV#;&Dk$YMzUeG>s&z0$;y%&Ol zlxPr-+T8?PGZfA#2R|;lb7Uukzie*q5)q`>`HVv1X|Pxz6FV%!A^IB;!_=l^>9l1rki6w8U<;yfi@|z^}behLq;lPQbI?1>S zswIJ%NVVG7n%ya`@kJQHdBR#A$w{|;puxZJW&j6nLJgkw$rx>7s$+1QmPn6mjYn$c z63F@s48>|e%?Iv>wZ=zNM*)SzU5;EJZk#5HMZ`#1M3FY%Lqx{BIU+k5cmZ(fHu~hK zWb>Nhh?g%rJR<~+Zk-4axn2JS`zjcZcvXlTe_>1 z(?>l(4>Y`e7R`ksOZvG!Gu3~7Xq8aC>(%~)FK$`J2@$teowbHYt0lU!{-wSCi2bXwUK;wZ9nk&6}=oI)LhyWgH*!|PA zw(u22uml zj6$hg+#-T*x)Bi{#U;-c{TofFp}Z`*CSgRDu@J@cqRl&{NGTf^o>X;vs!=i=)9ql; zw7x*V7crJizKKQI7imhMhEyVII1Nqpe%fP^(*+^lzKkMCb7vq?EU1)#M%4CSh!|7O zlv6%_iun^t6ce1OLK`xyQyqhvuCef&YPD&Ys@^|_Hy;gU4&9f@`F18PRt?_wgR{RR z_ebW_v6?GevQCwLf9@og%X8YlM~mkx7C9F#cyh0}K3Xfo(k5vav};RpvJpx%q|?Ls zy%~uqDY(kcD|K2shf3G}3&_Z~&7x|37BpIdV|`f6PWkah?-c@7zTeXc=gKguP@L;* zrf)|A1>RvH9a=AdELKB#Z8) zs$H)+Y2OX&e?fYCGTU#270RUS+Ll^-MUXtQv)5 z;wQ<8M>j&~EEY9vIS@fVaLb=C$LFX-?R&qkl*XZHnFd+|Q{C8ZlAhcF>wOo3DE5P6 zsk4Ujh?^v%D20awJj(-!8anV^#mxw%>~WHJV6)%w%=?XnN&O^ zW{br$TkpqZ_V&}eX$qSImD?K#(3{UgexeSW4L#2JDZZKb;EKL&Nh{Uhv zyU~A|L-_~)XmXO6!#^2mJmAc%h>Q+nJd_c`1yG)JExf}XGEd8$px>K3n{W*S!W@$Z zMM8O2d0VM1^u}LTbt|Iz{8mDlsZ#To3&R%Tl7ajJ=(o{wnDVc$#}2_Rdi`-z-1Y1Z zXR_#qIP7ZfvTKjW7P?}jW<--4_P9A_U?2J^}K!85NS$yzq4i++H4TcGZ$mpTja`71n{jBHtD*Rv@B z_XVb$f%mTsm4&~=^2XJ#nQBt5z%`5mX;LpJK99nmES=^B;wyB$(NAUGG1K4+FMxxT zoY#pNTz>Zw_H{LFlZpC6eE2?CSRo(garV3Kp{%&L}Xc!)LdO6!aAJY(SXgm zUN5E&b9#Sfdsb(wR9Qhdc;XP!jVL|L8uArOA%tQJQNu+_TpW$RHEt$gRY`9?5pqi% zrUgDtWSR6{zLl!=UGEpWyY(-l7>UQoy< zb?5Z>i7GW4ER$TO82p0QBkr?q+or7|L~cazWFQSZuAoL{(K`2t~_EPFpgz z^brUFTu79eb=No|zJlo#F<$wMWEKTvy?a;6TH9~9#}p|kmR;Lo0m#(;E8$}q6?Ky} zS3!eTd46T%gHCOMZ6a{5UAca&`UTVQ*8##ujee)a$C4deEt0pmQFvzv#tI5?N)C>3 z;RPatT5zN1?*;{Ixp|f&T3!*1v4DpvAF=8Ia459$l{aYmJ>x)~Wfq7JYq-;U$R0GZ zCP(yl`h*@_np6?poXVeQia2Kxhn&%|DDIwA#MZ^Z0*ZZ|r%ip_fCZ^_-J+^)AsKH7 zRlx|KKO~%?Z}+!(Pfsq+igDJt7+LaIbzjnXo$0i=mB9ipYb^TpM7xs(ym#=1?T1I# zrRob(`*c>`hD0T(o6jU6g{yd$Z9u#cxSP^m*1U0|tQul11E+sKTb8MtkW5=brtcf& zmUG!F+hgCpU^#^8EP059seZpBxAYDwx>zGOV*ctCgIcih0%e8fKzwtta_f#=ArIU! zQpw6mTjpE6nCESUjG|&59&&cEqQ+@+ZR}0G?X+UnWI9}_3&*zTi<{Jrl}f$NM&PtW z4^b5>Cgp?0>bTbNc~G(@`0c*s-U@f`V@htuE;Sk+M-lC4U3YfTD$NQfgxH-C^G%n( zdzu~Z$lNie;L20Ts40?x&P zY|pp|8#{Zc=1p)=sGR@W0Gh%!!l5p(D;?iphnXyZhCf+;@i z?9>j`lx0D(RWW^O*pEtSaNR=p)Gc8|r>)f5(39yrv(&ya_<$%vfGBYpk9NXBfryz> zJv0m15^J-ZA(}Lj=f;H%;IEh4h|H#lu%D9>6&Ix}8g7XUq@Q=W{4g(?IzP^@Q*=rC z+%Oi8$$aQ2D+U4TgZ$AMr1dd;<^LpiadDSarY0(Wz`{d))YqVK%i5B`CQp5H9b@IK zeBA9stFu+^(V;ooqykBjNNstc54?WD=)@#*`4592L43%#FgC{*2I= z3?Dn^9~l@%PZY=Hg%?b4&jBSQxv5D2UQT8j>`pj7i&7McS3-DC4eGlXB|yLsBp;Iv zZ{RvgV!K%NK0O`N`TT=KDN%C?aHzPXV%>?3H)U~GI34In&m>J&YyOCun`^&`%%Ebv zk@*tr5#hC+5#{vS>G$XlN_@b07e_Lh+4coa5Pd^VbbZa(QuKc+89_{uXV7%bz2s6} z6OT^&=d$TAi>rlHbf;|x62^um8tPJ>XFNwIJxuvs?bnq8^EozQL$}aa_cUi1>}u*4 zezR|^%Og1t)^D;YSCj;BfxbcX)U=kISC~TQoqxBYjwlZkFSdz)3X>%*g<2&l5EoTV zFk75st&`{j!IMo_X3|NK#l(wdtUXU8sF<%~5RX6k$(G$%X1)yh+nkLddWOzK+N<); zY?%kts$ee!fku)|>b8+In;r24AKWF+;@l~2z;sk(@Q7=OW8#s1kNbL5zC0_x{X$hzZK~Eqbg8M?@-Te$+LL>!T)5YKNFN=azG9= z7;99#6t`1`{TfI>u7SPN|2C-LQ-4ca`Pq@n+Ny|+(G{NDd9XGrB30>9)sqohFD=*) z@q$9MTbF5m#H40)la#J+N_mBCdk_5;6T#}OzcqH(~)&^^=p8w%efD6_+vaI!+oaoLy}ybuo|)5GW0u zGt68KjzCZ3^!_bejDI+inwc-yq-3=sf#R;ENino{7rlJjS6*SA9CP`LUGqS6)#MHD zV`p>#Y8tBSJz6B_B0Go5m8U7oHlZklfV-vZcHLMRviy}hZskK02r$!Zje!EyK*oDt zAB_MR+lJ(#!C9fhxadgNBR~d?nFLd~6Wvx8Oo5q~O`m8K0^#pnv*GIUDDji&K=d)9 zT_sdXlSIKwu*9PCkZq3a9&i18>V|Re>1)_G6B^+L6l5zK$svZ;i(4Ok!@BE?RBW2| zyW@D7>SOeiuWA>BhtPeN@ghNaO*S4{2KHL68$4?!^H$;&XHaY)WaqM~B;ftaKAQ_) z)$p0=P80T(-J&SP2A{89f!HMuxOp;3aoI&#dxM~9O5z}oCm$wPWY`w@Kim}B0o0tQJ7o?(8Os>m^roSS&@~`i3Um(4mFrQghH)S4cP#{ zw9X`^phN$iJHigFTRWiQ^yRSzlEuS@7_X)yAMF;%WO(#gX1A4#ZGau^OXtuMGl88? zWrT`#!$_-S62Tizq~M$+Id$_$%<83>I{hB9W1dCNM2zUl=Y{X;$DTHIwNMTi9awfSI$-T7m%ol?dem$AYhf~TzT^Kfn{c+(z2Txdp+bZ?F_bI zXnCE9CP8J=$`kU~d`1*u)V)}i+1Ei4CBCc8gSW57dQ5wF3y@1q$`xX`C9odx?p{Tz z;*0wjkntyz0#3Kk9>`=CK!|>)gLdX1bRFVMn^8|>n0?I`FlFSX02K{_WX!zh7A!POKL^^D9dV* z=lsqnvZzWKnX?*^*#^cBb}Xm&f@X9>u!>yFfZJ87r`ob4u|3F&kTZFMT%45fMZ?9< zU%apN(9Oi^mDgf5@7FW+{@*p$rQ0cY*!i0)j^)C6O@GPhWBSJBck5e z0J5!*eEjm9Jm(saG~9A$^?y@>0LSgG+t~CJvBsPW$dK*nZ>j|iQX842B{84wt zeZ3#xMEzXr7;jj_o7>8?gGP7F*zicFZ0YT6lzaPU4V2jrDV{ef3+o-5krJ`LZi2?$ zG&N+QX?zJQ3c+da+;NiK6K~MrF@q!*e-RQ(+9ucN4={XguH(OsKkUO+%(uXvG{gNF zAY0(E!(OWJ2%T2N9Ppe@lmfjYdDT2^bZ)Hc2aJg#?msG(EdvC@k$!xzA-(fdosNCj zCjeKM#T`ro>ll6i3RnApf|Vhh2)eW;o*22G$Sucd7-$i)6>S(OI=ptv|FFVg|z3BnfnC$t>b9~OpzukD*<++G7dN)eLYH#}|0EL^= zY^-0xUA~l)d}Lc+Y##zzP05wbiBI7Y5Wzf)+Ve1sl&oyCP3UIZxjgS4eTqwsPQ0?B z2$60rdYnu|S!c|mMOY7(=Ij|;v6?`qGmZ=Iay!CKnW#vfSnPSjrQ`M`*$9RO>)hC$ z#v%jHP<4_e!i?i!b~c2hmQh~GOA2&rRuJnpy39N)*Ty0Zwr8nb`Q(n@)XhlHF$J|I zpvUa^L9A-p%PC}$+dWvjBOSlvU9n{GXVkZtvkfPvxGFxwAEU=^HI4gdkajZ9`ii2A za9S!)gN-Zq{#M{JRw+C`TD%ZVNPTC2CE&oE$a zWEB6Mw8aGca$Ci54N~<)d30S#S|V$k5A$#j*^`0WGgN!j&zlgmu zK=T(Ic`<;rvV5&hRMVFbMBr>1WGcwZy$M=2Ix_o|YA&hxc;7@_+H@|NByQnCw%y?p z`jXnIl_@csoak9`h*51{$^39%uyv=h5DCKHMcD7X#ymbku=<5)zoV%_VCqEONs z@fQj}-DvRu1%mv&o+EeV^U{4j1p7FVVzO4nt%!%sw$DpR=)>a;`IBczv~@(?x~Syv zxr&}AGZ2|Wk3+(yQ06ZUH2-gkG>u>ZQ6Z61_+ur94Bmz!4#}!M5oB!v!@NbYli`)2 z{_>~L%IrNubA4bxJ@QOKV9CjdMe55^IzCXS$dAECre7y*bw0nfpd#)vSzI$b$9FQF z+{&5{8`~?n%0(S)TSdy~@`C-*_EA zi|`P?-O(JN-(dU$ywoY@x&z!NmR-C^%)Q}N%Y#4q7epq1Me20$sMFya*)lz+Kv`vp z#iA11jTOC$EWEcBA~C8{D+Vxj)h*i1X_9oEWMc(2F}odmG`=|+2qq*nOC4gEjJE!w zQSfBZIl~ot%*^`1Jlh-*Yfw^*T}y7XU#edJbo12E{GqUMl~5NM_8uDSunR4%PCP=8 zId-LU$JY0Iy~4QkXt%ZsHIERWga}FesCZxI#S#Fq7WvDLl(!#mzy5@1rueaY#g;IB z7HZM))Vwd6&)0hRsP!GImP0X9=vyq@YPttiLX7H98OtyYq8QpZ-|)I>IRmvNwl&ff z4$uJ?guqNZ_Z*A*y>oGXt~EDDVB8!`)Ljp=ck}S`T1N#2c##4cLY??J`nC!#ttol0s5n74)DN?5pvYGSJ6DJ8(ar zIyk6qbelx}FYOmHe zk{O2i5f&k5rvFO9{OoIaYg>!2YT3;UeGScW@cs}?=W33E_%oBR~nisP443xzw<9dVhuNAr8LWNm~@10Q&hUu`r@f{ zUQ+t#s00CF?Ku1-P98+IQ-v3^A8z0$H;~SK>R>^5t%z=30rZ9tF6D{YBE+YOGyduX zLKO%qpbsgAjtNHf3bhC00kH{Q!4lK_(c?B~C;g^@dJ%J2WW@M3V-fZ&wwc{`qbLO965NtxYj{!BKfX^@RM;b3W8y# zR04D+aWCx1WR2#($zh#>WgIe~*$K@$%L#O|xmxMs;)wC5|5i@s$Z!GbF8?~LZrC)YG4VK zD)bM3;9UC5=TkzxV|O+tmlo?MRPS%fK6FHR0gvxyRBuctHu8A+B9_KtdN)(zMt&j? z*qoXiB(a#(zOlVe*p$9&NpF|dV!D1Ywr>H;R+`lj6GAk6!G{;MnXLGxx}p$O=0!P+ z4xJk6axlI0^SP%ZjBg^8ls-2}rZoSEGU#G51rU+E`9`u^D8A)gK?#1t*k4ef;amjQ zx@l&RoT#ndKaGyO=;S?Tnq;e6mvHBKWj5*)*N2<2R)0K;*mn}Wt10thm0#?4mp zsa)iGMsnMU@bz2^13!q_YKR$NeZ*Ea)}{aJ=m>+~@M~eM{7qhxnnqxNPg%@9K;f41 znDhA&bFz-G!$o$PNv)8d%xr@6GLoM|5C(#l)mWUH(2JtR^$D4?2v^+Ms+ehs|qFOn)Y@|@aZoAbE=ObTgQ@CsK2x!KCGTOYd{j(SuQ;sQtKRNXkrsh zHsKv9{EE%vgyot`U(3h{kBqsOnwzRDzV;~6?3NYpe&OvtLxCtJcXaK`)fN;%BGtAQ z`=ZhhoVR|xSRd|=dWQ#nG||*F-9cGUK~`x(DOeb}M}vniZfosvKEhC;M(K#2!i6F) zJvnS4A;NTZG=l5eI-HSNcu(QY6Et?Q&Mbl|@u}7hdAShRlSH#NW;B!e_+0l&Bk=w6 z*+HFb>ZtqUT^Xj!P54Xm7!$(LwkbCFLZ%940_{uZ@0d z)x1fwW{evq^^e%*2@Reo!6B+9=9wD%UP7apt^B3Jli@fC`|UGr}`@E-57 z-ycxj${Wbllp0N^j6jP~5g68ol;P#LfGj*uI9PgL?B1#YiS^ozGc>%@%GGR==6&m> z;y->)kTqYXi4Yx7_akpClGnB&a7omzq-Xdp(?+Pdna=ihoM5FXhn0BtXlV@}_)>>ordzB8JCt8tG9c!xmPJo#Oq>Xd|$R zm5x8tXvxtp2kzCCQqu~wY7%KJdQif#g1>x5ZzP|GO5Pj!5eeyV@y49quVR9U_Xua@$_E9D#GpzO|1k*=p;8<3^$wU(r*bSGiD}3 zqF$`VYdTQi(jq7c9))z@<)<)K_B7r+vPA_ivrH>Sj0nB>Y-JG zYIp)Ta%cSdM(G@r`m|LMB<3+(Ylz3<(n&9F<*L{RAy@lVYh6dBZgsulEKlJJY04*~ zVeF0RqwsS~^|?eatbHlpxT(HgTJ)yKdY-M}X5UooUE`YR;^HFM=1&#J_eUk34A+-P zR^G{^$xQc(S1V9tl%VXwSHrHd`j(HXTCkzOhho$I%Z{M=e8EEJr!3e{KL; z(}Oa6TOcoJf2%p~$Tbh#5)GfZE^TsH<0>(sUbLgp(jIzxdJ3Pqr3a|Q^N^ycYPd+0+SS9J5yN|FC2SL6 z?RW{V>WK^aY1239M1SuL8L!r@+Iq!S)*J$R5gsw3juX~s-$c!A>KjD3nd88O+B86 ze#Kf7NFXv&3=0!|Fz!idc`Z!>>@2Lv6jleN63 zNb`_7IV|Y|wYXuT1XAV?kqpJ|B%MF0POtomniQXfW+-k22HA5Yfg*Y zlpegi|2E0_Xn-t*;X;r%4hGgrp^jx=Tvg7v7_fFA7vkSb_m9anlhay(d6DR{s`^Zk zruu4&uQ7(V4a9(T(=5{S^(v1M>O5DmDSv`5os-^)^ynv2vOZ#gD%cgbuVGHa>E6(m zlwW+*pRGqqR=HmzaO~va&hTn@9FUBY5$KtvxSM7IM9^B;=nS}QErqc~_@V=1IW?BN+&L@(1O2WH0rMmBM^2+ z1Vy>T9kcC6X3eIznFEFfq{LUE>MQqvEKWO~z&2oQcy%5WIRKMCvRdV;guyRP>Ba&xKtIoEqNy_l{ zHs!MB<^<=@lrDNnM;#w{rF_4VS*(In#`R!M-6{pO3I!XOG2{>~fA;gzdflO>LtY`V zp&j{B^2B0I1)X1-^%f~F{@_fl-5!Rw*? zR!e;Nc2FuQ`JMKR{eT*&4T_+(ln;k!wr=^+q-JTEb?5jDxeh^89W)~`s$WF613ycqdX(8L*Oek( zS!k1Flzf3?LA*HNAtum*={_N?Ycq4COnpE3uPD3|cLphp@`@W(!-_$N*~Yx)%z)7B zK{Ar;v{T+pbOb3&jw-NbT#K(BPCI*aKU`kt;Bk+qcw%wVRB ze&rjKRwL%aNYvL;|+Q%S#eVXV-lts~;rGQbw9 z*54*nR$Ft^ME3LxeYW4oosN2{{ z*a$hD*pf-tT`o$W|7#Rr5QT+cKt0iDZv7@&v}FgHRJLW)u^~>pA;fok^Kw2EGOA4R z71>PG$DPDhW+DOkwRb=Aa(!zA!70UnqL+v;y~;>PNEX|ezG~g0+yqTK_Al(nxw^6~ zWfpSp%}0%)sQc}W^-wJx3=*WOXu^A_q6$4v3(6uIOX)vZFFj3bdp~7#N6&uAyR{oS z((=;o^E^69s)71!8OtZjnOJ;4e9k6%tZCaClkGauUn0fn-96}iKEjLF8b}J0IvtgI ziIC?cgV?ysUM85<-|(#Z*4>{(mzyJ`Svoz&u;rbCl`$e!^T>Dz`76wL9}{;_($TESC~^XyjZ)SC;isIhXm`k2 z`C%=2BP(^uDo0esw4bLU`0xSr+Gz&fEHo^v#F*G7dObCFCI&VFwRo<&eo5;ltD5#D z)t+_5qny%zOqVzEtY;=+TqF0|BSzg7TVLM=NS3I(#zGkcxhBbdKaVrfkC<>B-9@lt zzx|n$HzqK^!yP5T-6qXPpT0jMLgs72jePPjF>w5NYHqt&og2_oTOh0n&V0ln-{*q( ziCBiHkNeDI0Q|oD(cP-9 zhEsom=Ug9dMf^XNRnw#l7+`hlc1a%5spn7owK;bwpa{qE1E^oFU#svK>F#FFm-Sv2 z;L*^U6*qAI@D2uHBwPz}JOUK(MQ_pVDOD-Me4RadaYSFC(`UuzDu5VhAi zb+MhR6Io#;JipiMatKs?EmKJwhcoVAH(U|)cR%33e?IqiUzQAOoTEL38J;5D$!x5g z9n3J!Qy&)hMCs+dm>deQM&dU`;54Of3LHCP?^jN+j*1t^6Mq!{Q_h44j-2Caq){Dj z;YNDK=N8AUBp;wBOudbnbpPv+iZg!_jL#JiN*Eue(3op^N>A$IT76a`M4#pDaf#yU zG2@zwcncG7$o@uw(*WtLQng4~O4YEH^fA9F=-Sz!bREp6KB_^^o$DWdtrit@Rlx?OUXjRSK zpOils`9eSxzNjh`D?dua6|grJAe=Uo3&uMJdt?&7W939hiY;N-)4-dKjIm?F%fYm3 z6Xp+jn4Qje19<;E@d0EyEHw=_ zve-nFl#=3xQ1FkWj@IyBY}(mqI>n6lVUO47ION?mAIJ>l-Hd^yzaL>qZ0jw#71E?s z^vD4l*`p?&V`o4Q#f9#-B6LTMg8nNm&AXOYu39H`9wqhxIb-DucKK;a;RXSwocV8C z6+|?h2V&W-cB;O3SA{I%a;?Ay4`ft~v6&m!lHOEO56PyiuSsfCG!-{}MrBX8mF*Da z{fFZW(Di7+l}4DT-h#2&3!awO_b920ku3%s)&&s}{S(Gim+=kNTwj?}8@D-0wGAa7 zv?R2wcq~BJc@Ap18E_P=?KkZWOu8*UB{}$F99!qhK!Q_*QMp?vQ$8zWFRRF9b1|K*wvcJ`vl~I zJ|^T3z#&k}>V%Lrg1}QIc2Q$FE^#mx1I(<-qV}c85L;|DyGI$Xgjut2ZI?CmHV)0h zN@0pYw8Zg=9@*sBYI#Q7mpR1e9a25+X=KgIKowvf%jAG-KYdqP*>9S3$C2~r2*Z00x*#81aN_@$%8bWT+ zVG&F=C;WI&zQI$>X!69j*P%k5Z5npu*J4Ij3940rfc@RiVzEVIIg zCRD%v3!UGn`PpC92x^fwFOm#kbvW5j1c?%-T&FmZYe>vQBHhL(zJRjfN7xKfO0Bdc zAkvJJpztHVKatbF!U@dKBcLhfqD5q^LVOfDk9NX2yshErCpD%;ZXvp~>gMTT>eVTx z+DVX2Qh50jMGSgO27_ ze%u#~7hyy$=h-oPuJonZyt!?=H^H>PYE+4*l?m6&qGZDfkF>&(`_nLJxwIj-pefcBxed-g|5Ia=Mb*Ax(^3kg5>Zln?n?UN|Cv)2ZZ6Vr5?LRmuJe1hKD@!SH%Go-J3MKX^g&$O#w@+2sD`?#TQy5Y1zd@tQL#i%Xk_$lz z_KdhI3=S~m*S)?1EVI}=N=f0LatBl&er&gBg>chWzit_O3J6Et5yG=e9a9Vxts z?Mr@INb{eV|DvA$H(#%Y-&cl^U!Kr)-$`BE)Hh3fcL zz%zJ9hb0VZ?xw_`U*wS)wuw9W^Fpt1(U7t>XgkB8b1F(5DI?sxERP|V+`huKR%YAw z=D#?B|S2N&7b0De*o(vr9Bp$#-?D)xh7|C zCC8Ul)m_Xmn=DoUId)kKd~Z`u+zwT;X74}|-n+yXo(!eYBv4jU#1f1hO^I--6By-UZ<$Lgh*No7&L7o7V8?l`gBw;N`WzYXu1P8&oKCG0|Fo=c$U$=$ z0c6X@E(K)OWtl;zL$V zgTG{yAPYPdTuHa3UCb*+W9c5_&dJ}&S9~xlAlWjOITWPjCCfXZn+&sM6SJ)nr7k!_ zVyIkU56Zk3^KJg6W8&tpPx3`Ysi_MLSEbp!v$CJBV756MMptp_aYzb}$Sv3VG~N{h znqnKOK$XI(2$)RL%=DoP@H~MP;Iag1upxc7s0GRjn*7Gp_BN|RNXZr)4zWX(=S06i z_T|!xUR_MAn48w;KE&)3KNTZ;KPgex1&mmBMv}1;Vw%dVL~9l-$9OW$|6rWVw3c+p z5^xpNWlRG@C&IACXeAUYQas@2Yyw|{}Sx~9E24_sJ_Ujiu?P2XnH`YT4!=<3HLN9*-}n&+sJ?}p~0aw08L zg6SAmx2xGPe)6fxrV^*vXT+1wEK*05OR)cl*TzTs2ViNiKTQpdQx8NRWyq4WU|}XV zI|8?-92^~=*bN}@5`x-Go2o2vuGBm(uS@CGTy}TiCfuU}{3d!co(`Y8? zKlo>5xokvE)P!(b9)CzFd|m(4jZ=!$rYRWKq@X-0hTyE4E>mtffi5Wf9f7QhNtkmq zy<#M{eyV??p}T{>qOsoOlkld(%D`*qBq|%65?os5t}3xf$fE}M+y|ktLo=W*5%0ptzDX#y!cywXBHe|m*^rz>F=g&!pvTBF$ zD!lIs&2T?iICv_YJVTKe+~5LxpQ``t$K=`)w0kxmsBYr-eu3InB_14pf!0(1qZs`H zy-*hB5n?GF5P#0E`yH=YY&ov@S4>hjP1sM0o4XSHVJ<=GfndU(TIG0%;+mzSzmCan zhWD7`hUHqF@1Jg-*5`k1Qn+psv+_y4bB zv;T`y4&LQRGNviQ>6-im#8j}8Rd&~-(naY_$SX+WBG_U3e0iWE=VX;^i$DY=c_rQ> zGHjvJXWz@uIo%O%*&Xh??>`Gw#2#j)namM}h5zDd7}mkRG-H5TrJ6tFow*>D1O2bf zH&D6Oe%dy-!_N5^2s-U6#(oE7`QnuMwr1%x=Jytt#%%R&_8BZ^;-E-OkUVB0-FE7gI%ck zUqtVGrXYf1v8WOsuk7(BN%57+AY_pBbXye9Q@~;B@s9+jSevpU5M5m&BXo5jobExb zwl73qkgXy@?2StXS2)ffjwK0dBzdI}!G9jKR9J9Z98$6ld?0e`ky)9aIpTNye}KLH zfJ`b}IUpeM|Hl6xV2_E`%)<1lO+{-d?)CdSx7dJR)9_4GQgW^!;bU%FywpEDnTa`2 zyp`VhP(1tz9(X|#Y9YqoU;HoU@9nL^7TGJOdf2DFo>%pyr%lZ0MT6si9}d#&Ap+U@ zU2g`D;CT4Lrzm;+E+I`GuqUAseXfb?>^fhqH`ZQo7~KJJJg)Kd} zMVbwp_+Hj|SVkxMnL37IA}8v+TS~Iz3@x)O+}d9kpFcO>4ahtwvl{lcvUG3>Ov;dP z^VED6%*Z8}l#ea$>U&31T8k#aH8~w<1-PX%zGIBl{3O#TSG=J_Bnvtg-;33WP z=w~?o_sR;szu(f@qOhpoeVIc}k!5&hqJXlNJVKRU&8WAxcE>PqV{=Ku$<95a2vc5v zXl*QPVJrPIp4B?+zKp+7Bzy3d!@c-HWWkG|O0K}9rYWiX6*dTw) z^oRY5`(sJ(&_rKNIgmyx2NP3AqBo&Xb}A$y0ju;F>aHc7(%2ZA!7L`J`5Dy|J4P-o zd6?4ZL_}7EvcM6GHCO4AC@0d`2)a~%EzyK%5~H38&el?cpNwlgt2M2s{%Fg(^t;yV z{gde$n_BvssY0dne+55`j;D+KUL0(FeqOoSjeb6IzLf6j@G<_!2oLk`eUnwbx#|?^UW}8?5I%_a_Ty^(d#B_l?ATj|TNRqbj_YjDxU;7I$1c4%O?Q=24a^HMxs#JqPmp16xkcv&y!{yGK`RTHGWy0WMZqxqHz1P+z?1h1u z7hbcj&P73jTVG7S^sP?z5dK|*rYcv^pur*bAnh)hj7X4J4yzF&^21w0N?u~Cr@>!> z@^#yaqZ!Cdxamp^7vJ-kdW=4HjSLk2u;-X46iep>6`Ip5CE-29l)hH5m3Dgb;7|Ow zfQc^IvgiDq2`dSE*Sk+F(_h3yhZ}MzanTs*fO;7bZV6DRW-}%_^NAKZ+SCYpRc86K z{>7}DPj@A2q`C;daa7~Fz)XB&g64ctOswe4}xV$GCPEirOC2s%s*;HXSG}871^-G-R7i zT=XL|3JZdqNcgEz?#3d(;5{0Y2;3t{yxdstelT4*l+R7f1a{92*O#mt!dJv~@HnF= ze37oik?HGTzPH?ZmQo2zMAGyyzV``)F4^$u*+}5R6na#8;MRXkTG%0AT>2hJ1EQ?@ z=cFHE*J6p$ajDrd-N~KbihA-ayE!8BZKhDAB&YXp|IVPSmKh*O_uTcoUd`UpAyrPD z+)apjh~%l)11N|!a7vkod}_HKG|7wulZ%BB)>J@fhAA+J-+Zl)Bx;CNhIFq)`*?Fx zUQBe^w?|ufd9$Xr4RWzqd;Fce^F|ax)9j~;P|A7Oa1hWsb%apiE{NPwfBq5$N+NAqDOfD`)fW6i{JPD=5WYb|Lxh; z#yx{y*K+rsqW>JQul>6r!hcDf{260Tm<*a@7wZTAvUC09-E$M3H%!VmV6tk zDmMaR!Md`Oz7l*vlugb`CEB*sdz4k#x&E8_31=zW32GQT-w6x(s55f{{oRp5At~M; zTF7#Pu1oS*9_w_ebg|s{%E{9w4^r@q8AzE412kCp$-3H{93rU< zMJ%;dNc$B5rttcaFs{W#()pXvFharc8fiY))ZQ{U|IG81+wJb7J6KFi#Z%Bt9{mcp zWj()XM#*Jh$({Et13b(CxQwibQ$y=(TAp#4Hsk7Jvl`9pgqc)6@y}QM?%f&f!sR@V ztM%?;jqXm(RHm!i@!ZSp{>@~Po)^7(qrCt5>IDsDX#x7K)(b7@7&L!rw1#7MnEz*H zQf;y$?Z`(<&383k$@jFmS(!5DGBI@ZLPkQ|pZS_J^Uj}ewFr$f3P|Rk1%^fZuGO~s zGoSxZ0;ix@UuZ$%ud7G38kcjyK`yWFuR4+mO&m4+s zyC_qw{><(MwQnpdm04{-fkT(eqe*CUxbF2wg*=O{5s2TNXHt% zIRBA^I3RDHyk3f`nJ`XagU;Ws!$4C|VDgkmT3q6C$xdCjrlO$1uJ)eNUFP&~(dLqS zBzx*aAw62JrEYJ3*!=@v^?*fj6%=Fpx}G5b#1B%XWD`k3>;S$Ot1{7zgTfNCT@Fot zMt%!zA~Et0zgKbZ4~>=4SyuDFQE79Mr%h9jo~-L}`TfSs@XrG~fnOH>{%E35obl*c z__7cYSgO(IdbpDV5~P)Ala<**XifyCi%5W#PpA(`9RSHPbXT2@?_~Ayt2YlAx~&)n z#dy&GAz}%^n1{y9e_MlQH}#R&?#jPIuOobA@$*;p^i}x8%X{=8QIAfBuC_>v*l-@b0Kt4jp zceUWppSbx-kD5)hubkQM=a%^nwfI`T>hWBcl@~rs^}IQJoqYbewfmy6 zmGuY5Ja_9|6!>FmEs%B>oYitAH@!^8ef%RHky0=L8D9Dx_T0K{%GoDCCb@+boxk;0 z@l;Gb`DuX)h}3whow5oI6jg@xGaWelCX8BZoNTGfhw71Hv9QpKEq;PfsG)y*hQv_U zvG1z8(R1b*IQ(B#=%d>WGQySI{Pom}i7;&T+V#|%?(<4Q7+VJZ&qB{bDoTF@n-}2^ zETWw-J#=ray8HHa@4uH{J?%ns8G>tmvWpqd68snK9n z$=HKsLv_?>vZwV~PeLfCKz7p5wK-WOgy@$4J!O3pmiN@6w7!pmf+^9@DbZkCT4>U) zY*dr8*Pyd+I*@u9>F^|S+wlply$AR`b<}e`L3@z>mb0U}$i%j=5I%}N1)xW|z}LW{ zYoItM5bJ3u5j>_hJ4}%;@ZoO2PgAfmBG5uJ{A+8BZE(0_PWThqSQqoymF!rLkFj10 z;Y&lYzQf^9KZXm~!=MgvaB)QCVAw;mKoePy68d-qm>jn->Fk8`tGM*|7aR*Kd_wudx->v0F+lOAFcNs6 zf`h{@t#H$7{5WT3l<4tJXypft`VOFqgGpdt-{8MBF#II>T=vb+1!{-j zSg)#Z@0?ho_j<(CDS%uG*dhfAQh_GKi8rT^e@mehphmb;(mk zH#e0{E_`%1j`ccqiYS)5I%a|)mV@b)>hm7E*8 zkxKm{1zJt*o%oL90R z>%kvaBNtcUo>zOFS1ORlfQbvdO8M|2|6^``yL&#hQVxRvR5=z(4uD1@LC7_$r4m18 zATzpO4uYhnIMtJt4Zef)t#^H*aN$RSgMDuU3#~z?H30*HSQ1wiSoP$EDhl|I#Lss> zGb9(#!@XV=C|aq`J`->Wua*%)K~>iv16M`=9zz!`qDfBtmKVTG!5-&vFaNl{{8i~+ z3N_i#2sJP(zIa~z_DS&%hG#b#S~SdBcLeRPcOwQW<1;@}lUAp6w3P@mt8`tLbX(+z zd!&A}NO7-v^NlZDIXfn$Ei7d?A>TZ#D;vF>mF}c$^QJ|Zp3b^6)0$PRaPu*g6bn4m zFE^0zX zWR8W6M2*IS2jamJCq=iPi`dsPd|za%+Jh{#pkf4j5cmYDn-XF_UuB;>6Oy;;4?Z6HnKWU770oPg2_I2m%^A*vyccQ2c% z?mnn)CVGSqanF-~9`fPIQ5IYX1Zw{zOlRT;3`JE1K;2KM-BZdL{6L(zdWJO^=^AWN z5DZ3w#c=gj2j$-^8^Cy(Z(0?{2aw%!c);fhsD8BPKvcnQlF@a@_;8X}NZ~C>_5n$@ zx+X4O3kTK0!Q`uK7t3Fd$RNldqR97R_HbHHxF`xDhJ=Y+*0MdQV^2hLdWN>=xhF9O z6vl>?*Mx;Lrb(N>Dlv~y)ToIH4tP!Y=-!bX(Hc@qvG^W&)1>R{eoNF`v{*|n-$>-v zteN<-&dnWtNiC2G4+(;aV^tT^M8?vduDF3JB>$ta2Zc&DP=tcSrpmV#8(`Q9v{5-J zzG{)Q!e*rW>5p<(OTae;%##8>`|yIm`*fr4#a1ME#}tqWRN3p3wHZj_)y(A3%rt&% z0b7HJ`GLUtZA)4WIaUpMiEW#UZDP0vtW_J>{(V@~dnqI!Y7cTpLN{8Q8)}6&u4gd)bW6#iP&Wn&X*e4<-ZA#Z#qgMFK2W4nCGSVyhK-Ht1b4o>E- ziJQ*-=UR=~81^qd>ZBkf0mh4%g4=U~4D-{qV`&ij7)Ct6PYtEV0*L&27a`!ARuAb3 z=<-4Nf_xi=RXOYgq>Aftswp@9Ba@T>cm`TOx$wJ zUFJuq!Cr?dc^RrEEJF5elVDTWf_t=EXEJ|Tt7p9NSm>xHdLh3VrSR#stbI>>7wI2= zySk2U{u(&rpp*5WodV#J1}72&$k)KG`3T0mkAB5?xp_XA-zWo)$Y zw>M78CopRQ8qh$yj&K^xY*_vLC~~+5I=q5zYBl7t^lgN+yr2JNiJ3^E5j>js?_23B+i z4dN#%uR2bXCMsM4yBS|8PeDcTePt{SC7x~hLSX+T+^u#w`_$0Of_iq`R0J2qGZYle z0`^UThn&EQ1fVoi!!$Vj&4w@|5-yF0S)b(c01#WCVdocd$R4B`|BXflj z;Z=kG4;ksV)3)-{a=O#xHq&A!&?c`QXwp!$_3#hoYC|y?9Ouj`29>9kQQ8Uo^*n;G zZPx__d7$CqQ*cKl+~j1I8U+W5!Of8HCnzFqbZ8!96G!fw>CdT+Au)=J9iLmrhvz3; zZpT)`BGeSZ&RGUQ^a}@v6CsL&r8x^G@vpAFFNk#nD2$ZK4lQJO2fiBl@H?@MH>@pB zyN573_0tlNK(tk1y1BIqQG?8$^-&u7O+)4!QzH6&nL8y}hoL zw1@jY>S0;7D_IRNeeAvpp5_S_9)N`-fVC$mIFtT?-7XMD6BMkmkO-In zxbe_Ocxd|sk;>HfZrdbcYz*(?@P)Xj+1*s1@R;Dgi7&htT01%zedaUx%Qz?#1Z=-8 z6fW>QoE%~JCOWaXVhg_dTCr0NmOqCIAvbMsDu?_Ha(|FLcC_WGy?m|RN_zs2Ktk#9eYrXxHZ0p13VIt9rjNDCiM-0g!%mrdPKj=Z z{##4FhY-1SKNg(I{42LO*fkaL{rl2lTj^+D*Yej5!a+~E)vqU>ps-(HmB9V?jERl> ziHeV)id10L|IT;3_a9^T9|JHh0QMM9M6fvsM8N{}Ax2a`aJC0>?!32MAQTQ{PX!G{ zUU-XLps)vzP9R}8c-OxlK6bzxrf<^0hZ0PI=^w{gS3;)>=9xr~V#dSP*(NJ;j!woF z_(YE|MOTkp4=4Gc6j+%xl0q^B%qm4?$7k0+7W`Dy)i)~Xc>kCd+Zt_MPJA>~@n-vF zfL?0H@}HZwK+p2ikP~0Ao96#+1Q1|XJSYGQF$aJ)!VL>>i+@Da{t`|n2Dy_zl-hAC z6RmQ%067{?J`(L+O-m}^%KY={d9)Q!OE~;?S=&bUpgl~bAxyHB-Vd}}^ly&}B4oev zknjulYv%M`q7|I*{v&+*^}hZTOidrk27rNHmwvdo9QRg=qgY%H26CAKG;%nq($I*2 zzd@kHccdRF{4;M-Q^%z8I42o9fWQ!9_vj4%UM}9TTq*aF=G*|hUZLW1y)}|^SN#%| z*OZ7?GrM04??lVS{4uUEDpanqU3NDSngf|O*)B7G__09qWZY6D@!}BO>^1)(X6gQG z7-L`pEk6RPMno4za?3*#g93viNt#gxc#I>_>nNxI%0b+ABvZgTxu6XXgQ<$zMf{RG z&j%L^WK^I+0uPt~z%M6?N18xM$uhAp7;YuwwdBfnwR3WJC=n`@M5JDbYCGdMh)1B< z)YK9OU(?x986XGt!KAc)@c*EHr1pdE2sh~uxGn@&WyjyQD6#htlB66lh90!#+3$ZHIea9$@Sz>F8~RmK= zx^mN~HDahTakD;(P!9YezQ^#l(SRf}#Yj&!>PjO1!6UV&x!b^*Vhv!QzfS2B0(cL%z2sGU|(AD8vExj>GKdh+qXV z7Z&cNujd}nb#|cRUGtw^JZSH{(Zh&y?nQkk)*(hl5?7$Wk>YcERPg}OAvPw&3NN!7 z^79#B7eaDA*~9-Q)GwYaU{B&T7flu?1qqG1W`7K~sHSQH+8*MRq5&yW~8R`ud#^&v!PP?b7du8rr4b)x&SROg}dqeOP5f-+)-QDsC`q zeoZ$?OlM=$hFKACmZ)1`w|VmR>v=8@cT!ae6+Hr>oWLEc$q8kP)&%{cmEH~e#jAL4 z6CkCfyaOf^!=p$b8F(xSgSG53n8Hz`i)6-XXgO%v%D3clg~CtqYCeu{C5F^mc0KxT zhHJz%Q_jl>_9~g@xHd90dAPT`ZqB}^4FU` z8DQ5(tE0AH5S!x~=Ev*YEZ{y~L|mAHczP;^b^(OcM3HJDji5uc5Ef$u1%s-}mko3* zfuaON#5T~&qAW{9la?Z*Bk4f$-$&(&w+MF;FUzkv7s|{NQ?c5cc#@5vD5mdgAjvBS z#EZTDk91$+0}}In*LLzHrM@KS4(NKj0G~|D>pjh+GU=K=eiHOQHMY?)fjMELgsO^@ zoChpAko*{B4#3qt9z-&UhLA7p8y2-2zdatNk@>n~{1W;E2}w{>uBrEW@*sb~HZz~U zaXhuP`t`D*vy2va*SptY1G0mC+_sNTFM@9Ky+>%VE^v@N8wUTG;sbG9aljhJtU(Mx z(TFk*Xmc}a7?QLY{^X$k4+c;Ha1f{lKby@Bff|L zbwhy!Vjdlu-fz2vT+$SVa%-*?x0)^1c7D+M4;+%=fmzr?)CsN`Sf)4Q%V zO1@|=|F1ssk(Jie%VRXm)Mi(H)Iv}2&|cfoeAndDE0gzsnn-2TY8AZO=U(tiRb5AW zT2IrNwa?8qT&pTS)J0%Ak6Ri7yglrNv}RJJXR5i>4jo|m=A|pYDhY8h!vDp1d`WhZ z!ZEevJ_%i17!I0iV)hm`RJ)nUp+B!XR`3Q&7XQOE+P4_~<#S(U(yw#55Bbn?WLlkq zVSa)py?<3tA%QCos*o7`S$m? zZQ!!&r#D0U1nW<>fnCL)&d^J2arH z46ZH@7k9VvoSmCn7<6IvG;L{*TDr&?QkWC*ePI=~Len;^{(*pzxr>6`6_YDi4LgH= zwRfcLzf=D?lTSZk!#2jTbfT5DIN=Vytp zJAY~Y%58Yy+dN7AF#rpyZ|Krpq)(NAAmLM{epzq}Sy@ zLDmFw{fb8z?t-XJlt?^??|Ts176ofR)XWb){otGAeK&o!#$Z{AE>Pk4>1x(@`HQFR zF`Y{*|FVXUrTsfQ&%a_tKnoK_h*FRBoHNm(SFGkr^i6NR5&n1rM_?oepHpG&hEO>->EHXB}9BD(+$6miW<` zGBl9@zsjyGLlszz>>i&09&9TH5gTxTi}C&Wk{JQk9$McXO9oX~RC++{u?Kbi0Es4^ zSGtX=pKG8jup&Y0^G#GpyDfx)SR7yqm18AoQ|h z1@`7Lc=Yb)-`&qQsC#t%CLWw_HY;^=75(($M4kO*3&vC7uE(_lz$J-Mug!_VoyrThbR=(ss()BLsEyD z{ql%P7DRClPz-~(QzAn=6?`~*sik_oOIin<6ljH&21_G-JQ`g+6hcFLU2Bx+I$N1$ zdikutY+FiqaSGvN%20eK=U68ft?UFucEU-Smt1i&v=4;rLgYL=Oz*&#b}WBYUft*u znNjA7YW3li5QjE!+=wtH$pnUU5rm>;f;RhttXg{|CA>M6B%~V2HI;a{l=^D>AC~ni zKvf^n4xpf_ECapFjJ-Z-VDc$6B?>gWkUWFzkyyhN2t6v$#*7Zr2;|7v3gTe)(quF8 zRzJjO{2rM0qfJMpTuP;=01zZtjRL^HrkYKx-|95U*N!&pOalkYh5Os(m3+%H#b*Jb``g8w&1`QM_S&Qvr~2q#_h(;Ew7X4sB$7 z(n%Aj$wba4uRk19rb!sK(u`@=eA+kss&n|+xn|5vYxM2#QlaHvpFIQ4RJ90g+zL9UZ zQ=$L(fqj6OWzv0crWYOGiR`$u}T+OcF6RzwNOiY^~IclP?{0Hk6 z^ThjPpmfm}72X9yoMY-9VH!xOq3-k6ZgS-pw922!`q*!^-D$@Lw+7vH`o>lIc)gUv zXEa8jni0``Xk$|BiB1_mD5qE>l1o>1O-6zE^Jk;+VQt+vTQbk0$NLXk!>0#>&ou)E z#@T2!7KF#1-}VxoMr+ckejb(77&Y$WhRSk!X|SJQENc`t4!(58P7_ktyh3GMk&U*e_`zV?&!6I$LE^BrQ|KZ}o{|^vAxBp~*GJ zp$3!FI#fIcWr*QP7E)S@v_ce3EAe>cTI)+G-H4k=PAT2#+t#~0AIZDMKMPH;{B4aW zo#Hx|VY}bQIitWb-suG$%Lp6u8tWuy{VFi4FWB;x;?tC{V&6;Jk8ajIlDTrWBMgo| zTEa-cL|A|zQd}WS4KAe}vC8|;uAPVCfzg5Nw4^urz*S4H8@>mb^vE04x9}fSZOSbF zK9P_y@!>Q*4hTyy`b-Qa!=?-Oj%iUCh-OwykbTjm`!ZfoF?BOT$zwnhPFlu4I~h%C z^n1*3o4SZU@olgPL*^`f`K*O=A`2#=;Y5S_ug1$Q4bg4macGxr%Z&Z+Sv^I7iNe%* zS;5(4PRCo>KE~9|S=s)`#8G4}V#ZWgZO&(VDExfR{r;SoDyeSpeaWk{0>z%X;}`S;hgf;!_o+ktYbgWz@H46!n+vhRIy@?b~nf(URqD zc6rH!5IJ(9zz6EWC^FfkpkA~<3YY_nh6KN{JqH-j=m~8T<0Bmt8w1X^v?}bYcPSQT z+X?30vw5E^%*m4@O(K@ehy4!VHp- zVg*8|NG>dz>m)xmll5EisHM`Vj*~Uc+6HHB?IaGsPhPz@fAtZSXkDIQ)kV#SNhPFM zFs-QNmeJX!7%Xn^SeEnHmYG=QTiLEV+x}{b9Di&%k+8DY()J@}W&QVZ&WX*gF6hT> z+XSYZx7Wt%_3W?vtLOEzzgtY+c3E7+*!_NM@?B(=zJ9qCZ`U$qcm3LK$tI2P+m0|x zn)8$qu77{d0z$-uKZ3PxAB&SAT+Mkv`HCm(1#kH9W*Lks@%ng!z<1^W$(QNek?6w$o)8$X&%}6}U zk@w2Zrxo6;m6O@FUmdpm6^`a1JQGjLg}WU^b~Z#VHbmi@_vkmpjvWQ1Hwkyc5Il%nPlL$#}u{?E;bb}HjmXTQR<5iMYmM+Y&X?xOKqO0 z(c5bHIR5Hi8SPDj1L?PTDGLs~Rw!Us&;rV(Jn~D`>|P-R!L04n`#*TXO|Gp*#jeJ` zmCy#;u({oF(5mFc?o0R{n%+(FgQEo7W|Z_^wECW;bPtI-K(g0N(Brx@V-RVUq&AXC z2R_ZTJH;UP_FPW)B!6QHe}U-yvOL?eD6=rB4M`%%NIo%2D+A2HFeTMLx;;P2T}^-! zN4H%?S0qXavB8L?(9L*jH#GyCZsb#%uVpkrl@U=ge(1Cv_d-!hGEFoRh4y1Lv+FW_ z(qg>XIVCtg7d~U`b9B&rZ2aEj+ho#Wkk2b#0XK~4Uc2p{M375qCbf0uZ2q_lhr8z; z9)lK26Nhl+DadSpgmT}`fzGn3SZ;3YH$9oO0wC-WQN{g!B$)FA6t~zf%mA=Fw~+J?4qxaYmK+-q|PqY(VW9yg-NaxcV(~hInxHw zxf8hAV5}eYvckO<-pT$7E)@9wLeF1Mdxwn7Z^g9tFn7rI$UUEWyR4I+qCwOMfd2U@ zo!u#Y-082iB-d+n=5bqLI*3#ggIIH?z&n^|qv>UhXo<59#QeGL`O~UnzF%ZQ0iU8l zkGhq^nw`V(Sd4HFz_@TG5(ffZ`7;JbiV+16KnOrmHUQ)XCLzZ7=pHuc9m-zLe5weL zd$;(2;ZVNS@}I3$Ppog=(Q5F+w1RNYfPX)6Ha}J0ob=I~)Y=3px}H#MU;58p))*9sV z^TNmOsb}0%`uQ{Amywb$!IEN8q*%-*22lGr4o>mhsYFdM8hC*uNUU*$blI{Izs62f)8T50?e`@DU zc>cI~@=Q$QSM#G^?QZB-yJJZ`!3wQcaYHu~3RX#GOHC$LNP|@pwm?sHyGz(rf85Vj zU8_%VS8YYXb=v`Haj()`7e~xyKgN8k-n$ty3ma!o#I1#mt-h)QXsa||NWS|c@#u}; zn`>qQ;0|KA`|^ki3C@4+o&WAPVWBAN2oDimi=;umMaqhm^9@#J_tZ22)F2_? z{;7R_TF3DG-J9zLBC6^C_1Y=;DEkEyaR~FzvmbnKgk=Jb9^Qy%KRfR^4wQi~2eXo; z0E!GJuO}_1v`=32EMC&5dCV=dP>214#e;aDG%T{{HKiybG7004Cu`9$)ErtF{M;MS zi4ZZLL)mq%;S|mXhS9;N-0B$&YA#;Y5mGf(a z(j78g`yA@0N*H30VR>FEsS28yk)crV3X6}6#65hNl*!Rgl!RlS`y*zxc4Wp5F!JDINo~erC?>Y3qgfvBebJEY3S8HfYB6?DCvv=UFi-?#+UrAqXJG$R znB~ieQ#~ykAX8L0?Yr||-3d5ZNMSvPq3ko(!lJ3`{+0#I?So~iZ>~um6-Q@kB)h;N zEY3FKsR5Y5PwM>=!PwlPWx+UEE|bwY(9 zb*Y2TgpnZ7okysYD8^L`5gQFR%FnZ2^AH0Kb1c({rS9RxsMbDKeZOTzdsWHaba|@Z z*Q9X1>(vCI47QJ=%PdQcWImAM_{gR>Y|y&s*kKea_%{Qo#b3f-(m`@<%-Qv8ag-gb z>Q#J?TF|RlnO2Aq3j&E~`avNilE5YzT=x4(4;ir+noP(m6%U5j;VdnR=n!sczE=rl zX)z6-DhbxUs_vg`?n^nuQqf+W%s4nGSM0$oPp>oG-ky^0O)5bY1Ff9|>`}B(1hpD1 z2_h9WtvbB^%_2sk-O9uwz#Tqs{bO*+8*F|5!n;*bso;*gZ+p-p9z+yLj~rH%`Toq) z0Zng-DsH`FQwPF8pCC~~G||&fgCTM0Nz1_xYo7UM)O2cg+v3K*1u){CYD!AJ${7d^+2#GJN9B3Kuy4>#VK4`PrQx9M&aF z+ERodjz;&9>Ip9g(#p!e1uhuhp~0j&u2lC)7qk!kU;#`_rd}^2tbcno{e7Hw?Nse< zL9jN_eU_P%?raOsjARq-DmGn;$r&jp;*W^XYZSqY9jv-44Kpi zG_(%z5F`e{P?`*ndMkh=W}}Hlf4Vx1h>$2PDwi}}lF1ei7BUBjW3m0LlpJviw+>WM zIVx?Rbjcp`n@}gE%Sgw&LiA`=w(aTE1XK;k^}hGRHPLOt{sxJtE0*1e69@u7C{mU` zzW*>1O)}Lg(pxer_6C-e-r!YGD2x% zq3{%xgI^22ext?=erV!Ab0q}f#-q7VD0G0VFaTx50}{V-;bC}+kJn(ng{mlUC-dPHK!-B+8j6I|<}dP`lE?vOb1skd8W&I^{$9|{hZ-tC``vep{h^AC6#v0hWQNJdGCTJOx(x5r%iiy<{lg$)p zy7)uhG8#$)n69^05QxrVH<%bhq!3)HbDFrCy?ANy$9e8Y)3(^5q9_c?;a6P zd1QsIOTouH~2R7^NE&OoKR*D$*~SH&q^5 zM)?dPf(3Mi)C9!LweFbWXc;~N7>}Gt63(f(hg6YFqbEYo+aGoMP+_J(1R~jwOH}`)S-Y2yq%=7?_f76;F6Tr3ebjtSb>YLz z@_h}isYH(HlCg_(b_jpxK}!4Yc?`$e&v2^RF-_d}oL%PmHqr|miG%OHdUo?Ayp`Ai}#+~NSuN{un;`K|LA(eNfaeFX}u%L_aZf&1;xhj z4b6vdqU4^6W?Xw=?(wnD_Ax3_YCaPLziG~17JSx9D%}_9j3%TYr9ho+WXVsEy4L-aj9~lte zps2cHzaw`_g^HrYAzrZR6AI7LS{1gBQep(26jPWns?eLi0unx~5zG0ZD3kU7%oMGF zG&K2IZSI9GOc!4K@ak>Kt-YZixT)*Dls;i+`MJJ}q>yt%SzM<__M%PdMu+A>VK3AE zkYeft_9h=Q-`a;{cfgra`Yt7r5ht$pL_3cj{TZK({PoW6?}Ms7%DKtN>&Zv|PAeb% z+hqsnp;&_B-b>)NhycPu&+VX4{QrOhbiq!zj|72{VD&W+uRWC69;S?hTdqO1kVKaD z@CYQ}vj&eu0`kcJF&4=&JLxeyS=!pa7P~*iMYr3lWIL<3_4cGYSh8azi6N5e!ju5w zfWP|RG=SQJfFz=E@V!P-DI*EuSTaK-b@CdFZ5{Y$2iYiMJYlE*y85@Cohhg2Dxv6d z7Ef7V0-r%s=qo{_@H9v~%>w=|CknEFx;ux0%%Cq{J1}23FkKX1eJlDGV^2e0Os7{& z&0E6B%Sr1}Oma{}sa(vhi4%ACBTDEk;Bl1;A?p?N|gG zmYT5_+Fo*}aqW)a8suUX{B?tOXM?JQ^WK3YJgbBszDWb;5>j6iOD+-PbrOV^-jgm7 zhTBUx&jVxo!xcus`SX(Uqvo5FAi?yQi%Ts=LasXS6H5I z!Y1bqGmWsL;vAQvpraJ$I)}d_(eWmwVF{5-35}rBJqa$<@uq~tIuSiqMH#8`kVA!+ zTP<%x3G9S8**O0TEZ={s( z45?u2tYBLFpRF^8X_+}?F;o=UqYLU0FZZi=28gp!9*&}-o1&3VB|C#jOdOO^g#{$ZPHRXuy z|9E-aCY;|z$Op~1;* z3*I$vUlY;7gI2gm#6Tn{W_TS_BM&)93>8>m(I>@fBftu+!UTD^-rbtTHE&anZ2N(? zgeM$@UvZOJ@>0<3*w10_(^Me)xTX8b96d^l4(NNyxNdA8-5mXW>of|&Ea%md|J)-Z*KGaET=nb~D7TYL z`F$hS1kQL39o`;kr$v?_PnUh(L}*0 zoC`T|9R_rc-g}Y;hY?M9f2ZlAf{{X^g@+}ZEzEAD5*)1VvnY&G`S0G2T# z=SEG83DQcP*>(ck5gQfbKF=`nx;Q8sc6 zE#d+s{AvagYCr(}!v{+v5V||Fxt$dd>kd#j#s~V~K4bOZ3^djU&%* z*q?}_JzqUAb)wtz3yHw~a9kt&^@D>6ylWF1**lKe?e;%L_QaXD4%61C1NJF{s@q36 z9sZdRmJjI$@Z^C?WX3|UuuMjoRntri?IE7JGmYF!iMSY#a8sh9JtB!;CPyo=u8K3F zds!~gY&Ug8{7N*kokR+P((bAQ*yxy~M}n07aA!q8c!`4*?$%c#20j$%?>-?P z#RcL9G(Pr%5N^&j4H(;skXOBwmB{vF&k$SQm6`^<4R5;|t`19ad&>9Ht#+~%rIC51 z4DIaei|iX`M{5W5+Gzr7c?9>BYy^@Ofn!uBGl9R4$jra{8t}bR#Xj}`7!ACBOPnxZPv3-?1xhbq+ zlk6+s*4et;WDBGGv16V;-1p2;+0Qn;(a!87eB{}+0G62_biR?M_>%6|eY@Z5s>E9k z6q=weabSw3p2(hnBE!z+jr;!5s?792_X9_huYy#~q*4qU;YnZlXMHt&kMNB9c>O>x zvt}=iW>7Go^d@NX_>g2ElTlNewDpX2MSx_p_`jRxr`ab@f1US^NQnH9xYu9I(#Jve zv+>yy{+ZonzqHrAyUz@7&i49x1E>7N`g@@{GMcDo%vC<{PHsKH2J=nVKi;KVi!DU+ ztvbmkoAeF#X%f2sWZ-cUf^q&4F2Kt_#kNs6-NX~YI5F+b^(Z4Bou)u$!{8?}vN~^0 zq67SpaG=b&nb^knxBu)}X0Ta|xG3m3g(d)#MRT(@Ul+bhW7|)yFh@d!rogo{=We)_ zAotM^b8tf2tm)A#{R{8O4_@zb-t5Z0N&k>esH3@k8o-I6mCey7ocjws&&u~BwY%Pt zZY>;KrwP37J8R5+BgdNDmhbL=jHKphTqTGTY4ap6r0y?-&`6tL5pOlwC(UIj{^(W7S_`F10JM@>o>ajBXg+I6$No zDM=kAjkF-)NI_D%9o-6&QVwZp6#c2_zVAP{_de&G`yN#qfUWiIm=@W7Yf;eQ!>o@F z1+uc&zBaYC{K)+3wZ!C|6|0QaihTZ9xupr%ejB&)RCP73|NmdId8(t`-9HoTUD(jZ zFvs1qE=}?3Pw7DP3(q^D70r|1S|nEfj(_N#><#aC>^$h{KbQ))$T_1-{dp*$R&rNw zkp4#RFudz~cj*|Zg^Cz6GzsAfI% zZ<>CZnPE}b-rsk9#U6Y1PhAXjp^iq`@njOwaZp;G<`{g8<}+$`9*jajxi&l5i+c=R zy8yimxDxe=!WG&ehr^^g-aXJ`5+P>E6{cvpACCy|+b<^GAHtQYM^m$4++vZb0r@EF zue8sp*r`f&6TSwMzAo@Hzg6mgvn0G+=hz##xrRF#dB)7HX(HM&NY_sJYRFnN?_{AT z=yHp{_r3KRyp`FqX<7`;$-ncNyg(~R3nLec6g`Z*nJ90FFSCw zM#%9>oy^?<=kT9Utq%6{Ms8sE&Ow6F1h+?sb{3c3(2HB@eER9vm!DsdT=)jT|Ne>! zHxt|fYYAWqKZEL*!9J3LrLX{AA$&wOrlw3%%(bSR8aMT}Qmw75wt49HR9&hX!=W}k zbL0%FY0ZK3wt;msQ1s34?pNR2uQdbH27XB^O^?ttWlv8`a+Qls@~=3Uk205SOh^fz z?wIi;^Jt>UVb3P~WY!E98WK3}2LWOh_#95Bq=k@Q-fil(`gbUL@65=*cbI5}f|O9$ zF7+(-#=a{8`(mHwyPXG$SNV?IK)uPh#W{o7Z}iajlB$}qiSj&?3qUh+YXWAQol)Oh z)uh%6?ETU}u-Gs5y1Mw@=FwoWz)brPMPyH=?OizrkyT0=F&|J&gbG@_{w16A=&hXD zL+#d?FGg?1Grvml0_*s)D+f6o&7H>AvahWE;{OV5W+|!ud^Z%8#=K^JP&!3a9|gp#Pxc2rq{+>{tHRsfy4id`MGdl2CjuO!cYH zfIlzzUizJ_ATtVCY_|eDGxMv)^!B8bZZXwF46$&}y-(FP- zIxknNb0oNZsFInyBrBzd*tF>zhQ0!d2~WJM-oE?c$`2Wd%{gXY5WaWv$eNxG>Bl6y~r^me4l>E<*J6=fZYC$EAinsx2q_;GVEDHOg zc1Hk}lAgS<`u&T-zGVob5+(+)qNNwTYiJ&-`elL7tSs%$YHnD2Hd@--v*b~>feq}b zDaj?gEP=Ym%RQP$X{Dm};o9#incN7W)4cq(Kj-}i+&6S1x9`Halo+49{$d+p(A+X^ zYWQD^`^$%CQ0w1p)jxmP$~`Tb+*gB~ifjBVTECzyAavJ?1`NBRDKv=y2Bc>It*9mZ zyEa?$%)K}d764R$$Tp{qbQ2FFLPM_4OV@=FYF2JY!aV8MwE~ z@7soq7d?#rGS?srjgJqc+b zsQQ~5Bjxs$o z{bI;5L?wO}0{AvYlg-C%RK&tusQ=BD;%(Wn+toQGn&t=1HP5)kJF-gs#BTk%2sew~ zx6JsPR>4Sw&)os#;#Gu%(CKjf5{Ql_Rr;J3=hu-}{8!JrvXKeJ6ZCuD!R00K_jE!$ zbRHQlYN)wxnVI@nB)`e6GTqq8gOGu?>s!DxM;7;$#sMI)2d>RgduC>>BxCu0U@|@L zc9lSA?bBalW@$a9B9;f5j5R2@1|S`R0ss*Nxd(VT#&3f_8ubMj;-bF_<*55;FRTzJS7xhg&AddsqZktoOEHWL^&l;&~bN}FQL z*u+x!L_a3g^~Q~Ld^PE8cWXg1Sas6{-Vt`Xr5Hrvw0@O85w>*cvJ`F_RxUGn$ieh5 zZ;Z%c7CyNW85rQtLkp99(LLsR7QjRV9;irmYXvk0r(TVIH5>tIuf{$$Z+1Qn#Zs*9 zKCtLEl459k5RAAmwVxNk0xLqG6hhxCWQjsr?&BZQNg5nJ7+;0LTM}}q<|RJXbqZq*rI&TioW6D+97xNJjh8=@uHBsvUanZ~Y_Wx( z?wp1+m5$!uX+BQAH=tJViKw;1@>%iZB)n;5CNv;!uh3tnTB_7T8_mXt7CL9hf! z0GH>$;9|!R2qE1U#r!aQ-5O4f0n&cqI5>A}1#lle24+t6D72bFqu<+svfY>em4?hw zB~n*??g{uRhH_6(3$Cch1n_9b^g`^~MyW0!`v2!SO*^?aGLeCKD=(MDJ%3GGH(r^) zXv!$s`u=klZYP?HJx~>VHTlQKugu8*l{Sr-S|as!1-oOEuG#N}apvs`Bj>LQdP=YT zO;?8pXal|V;~4m`9Q=MVzzQP}3m0nt#{hNgE0W@?N!wz3Jo_G?bn<=)J=XJVIlNa0 z@E^f({2=|-#)li{;1IDbs9_WO(lZ)Zb?d#2IM9NXL%2Gcna z|4T}LD77h}Z^!(f8JJHq<H3@MeSq7k9| zKgtiHVjm6#WB$u*A`5E%&3D`)HjNiZg-15S)lK02x6cNe5G5CgSIGTJ%=a{Xo}qL| zAa*(%pfuMsZBa{-9|OjsAkA=KMuMCO*(2mQSuBCfia^FdAj6=kqX{2uu+U;6^OEg@FpAnw3 z|C&)tipjiL!^syGQ$8L`ah4(_xJSbp9{5k+>{rTdA1? zeRBTsim+<3^P?&G04I&vkZdefHnCx@Nl9DKy#os^#X@%hT#RVw;yh^0oO;n5xN!wy zKuc^|0M$^`q`i5LQ*#(Op6gPosM;s7TBHcDa0~GuQ9c`W-fS-0JRhtN?I4ix1W~8~ zc-3*z!ryo}J-Cz6T*krA6+sV6JFU0}3* z#HiPTMwbH#8L9w1@=9E{tl77YfK9&|1u+zSF;o0D8nR0d(+0pK2ow@%iM?z!C|>QG zCFi*`*ZCFbM22(cEyv#rs+MG=zn7${-iKxc^7T}Ryp+30FiCn0BzqRJYb>q0cqIf^yZ%crZh1woU{a35n z@d8C3nv+JB`tmK^>o@cN?Ycp9V-)eKeniA*br5IG;aZYxLc?`W}FG~v5$H>1E^DUs3dh6HLvE(&H!0I#)^$u=qO z<^eKrs(p^L9^b^1IE#Sf3Wp*m24jSf9b3@fKH zMb*Y&q%L9?b8Q>BY#0)oqlvkbT<=wEd|kQE6d5ZN$A3rtxJ|e2d3>nTL86n^d{)Gm zXZRA79pcq3@8>EN7Ajzp2>|D!s7*GiIrD`}u4!~sJR!b=0Q_BPF&9^F1 zcB+%lI+v@$+LH4;Q zIN|Pitrv0M=+&jV(a?6xUF!tL;9P5l(7WGt)Xh^|j-EAiJuXXfh%9i>d(-AcA(YY? z85?$|POLLcAwMKR-}hs9poTUpUl8>qJna*_Dw9A8WOKbT$=J+;z~?zZR~S(p)R{5~9*2 zWZ%W97Rk_u6a#pOwid_%+x<~w4$xCXl08Sjda@ONHPS{jFsc>ZqVO;M1 zERFv5k!LTveyQ;G-#Ll%0?-x!c!8#w;-IO)!+h|-8Z?a$x_9Y!>3R1-7!l5P{ho$N z{Z~Yp-hXDDYxTg*`Xcrj;AY)Y;k#Kj0^;w7BIS}#UFporT+`-U&>!Gk6lEzELclJh zVj<-OL^+XKO_jWZ2*)e7J}i%BLsPEdsjvbgVC=9*G|fF{$_ebScRy7ofIma7I<>Xb zIC9d7W88&f?6Ge^ zLLjNkK{;o-ozMv9Z4x7PAuFRW@-Dvv*Fs_3Y4+yn)f^*w!<46s;2K=cq0x>!gL zV9)vqM0ab~?8&YHXDe^5mWmTqjr6iy@$gUVK@e&r9X(u!rc5JHF9HyUc-RAH%C@h0 z5wp*e(@SIj+P|h8bN3yOw|(6pR~2jHRoy?HaPz5GZKD6l$!OBoLgP;_>(f5gLt+V_ z2SlhB5kXRtwjq1+cyf(q{t=FpqjqPjn?knaHsF5M23`lfScZp3 zp!N*5c70#%J$edJ`ny|3+>L(qQv(m(*ab9VsrQLw1ptJm3fX;CfC2|-jYz(RgJj~C zgQ97!aFl5{%C5V=3W;PL0TkrKyeG3Oxfx^wLWiNJ6B*X_b)|{5s>iO1g|7rpUUPOh zH#N?;L=z@{~{> z62pI)mxFp!d1?+!LUUJOLnyi0#P>_OYRWw3sCYbK*o<7_ZW7yGY#Ri^d>^9V|NA?L z^0lQ-QdhvzbV36&L-1ci8Nb=Lw}zhJ-VW{LFe1to?B8M&CM3;f3d$a>7$=bP-nPR@ zF+Ova!Bvspe{<*d7P)ar3i85EyFl5rIE@A9q5WbyU-9{2lwzCLjDw+;L3tGvq+m6I z(r6JJf=K~KlDOR?wK7!oQQsecU7u%(Sl2K##X5rM=+s{}vGVXndFFgoe%kB?dCsO zWv3Q@?O}g6!#mqR8<_=oY0gKu4>I-pZ`DRI`@5A(+@>OnOBgwN)iMSF7^cTt>mGVN z^MS=Op@Or#Dyj-e zQ>V`NHX-<98IGX&#lu$f@#pIv(a>F;iP9%y{}MmHz2Te|YXLNNy2Q|telZghXkAkM z6BrDWO54|6gtU=Deq|uqod?_7snU%}JSkjG5O-ZiB$hzV&59mjLWngJ=(QJvbF$>O z88!`|7+kX6YSU#GlV+a*11krfQx;}%O{|<<;aZ#~ZvJQ;Up)VA8G08h6P}O!- zAa%L=kzBYXp^^NEF0-1$<5Vn%5*h|Qw9J716|^=7J_~lmbzguOP*Jd0dK4vX$dc7v zRXMoB!-m5OhhkVLvq8Kej5WB}*?UbMqd~@4_2qt%@6;Zi9U0j+FMM{mVpyG{f+Xi0h!|* zgH@CZ4eLEg{g4<|nc_1(&Mwvr7&0XQ14wOP1PV|9v(3%vQ|kw2a};Rt zeh7$TPpj0I{6u@p`D2CY7w!9Z!hk7TWG}3IZ~zEe15S>FFxCK|)L{r{)Us3qhh$7F z0_FO9+=#^vCpx@gCI3gQgHLfrgrFP65^8zod8aA>f(ToP z502ZWh(X5&^#qhi-_xw~nIJRR!zdN!#*nk%IbdiMRf@Y4GS0f3M?ydO%6>Ug;%B{^ z{}~?aZH5o_*ahBb0D(C7U-8i0695kTAS-a zG;)(%5(pQfG~ znt(+7%9Kt+9YUurN@kM}n1a8`IBwG;ZXi65|9$G3pjxzt?D=ad+M0{9d8N`X04VnmF#?E z=oqut5X0}eL?y3Yn&PdsC641As$lHK&X!d7Ia1DuGpQU}4U({+)_9m@Z zlcBM>q%lRBe)(csbec%zxRo*3pXNv>+^0Rlb65z4Tng~yrUd0579IF`D*8G(YNQn- zsR>HUS!_UW{Wt}!0yDmXfQ6~cUInp$jY%s2P z?DafJhAO7=m>$G}!@-Krf?=8H1>|Ad{L@k_UQx~21R3|0BJ}Z zT$?DzHU|JJ1LA`^@!+C!mF_$0XG}?IW3=msAr1hN;`T}o=R0v&-x1}C39|GV!i~+%7!9QX+ldxV$ zus#x`fmB}#5jeUE^>YKRdq}h~KvE7sf<%xZ79fGa{l5<4bM=$kcs1sDMZP_Ldhpmu z6Y?YB?wNff$)35gJS+a`dN1ig7Ssf(t)Z0} zv1%L0&kTBa_{;(qtv45Ov45{_6ov+P_|8Rk@JGccM8&RptX}HemF04)j=C~egb&?< z#Q^A0Kpz}9??3%VNHVDakRUd;NC8|N26~kQ!z+NjaNtKcuuLW?C9f+a?>~UkPXIC$ z)+As6vPsRAHRw>l^1x$x6=M6~Y1~Pv!}@8t46);OY0iwk(=2LDhiT4LY42HlWdJc( zdLjCEo)|L6SlGrGtpp>Hkj%vxvxMXu+h`@*py=uh&A|-exeQHnSW~W-9@>_gh-{YK+P~H_n)d~a&o)ra zJ}@{b@eh+0XE0JzGReR`iS{xHo*BKC2yui1MQcgf=Rcz%s1QEv6N32@M>xl?L^COT zNS>8}gJ*tze7+yd5dbn+2{w5RJA0jH_#)T`U+9B}6n_dXtPT_>JT+nlgkxWZ z&jC~dAj#R0Fe$J8OIA?8-6#|T5t-xO#bci! zh+4>$)`_gGw^ih1Xw5@ye(X+1Ss9}CJV)oSq4Duv1G!E zFuu9)TEU#}EIHpf; z!mw(7T?xeRd+&Efudf8M_B`pIXGdc5e2`!zBv=XutFcRV(o1dX$oDkJ_p;CT`&2Id zGCv6M!g3G$@fRR$vLfcF0u&1Syzt`ATuJ9dMf_3fU{YoOL}j>lrC9=4psUaa1^p5a zpaTHuGBbp+K)Qfv9^L1Vc@P@`aPJL}?;`jfIS_)bW-b61hk}%QDhCQGbJ_}0y0PJF z*yu^?=!n-(3=GnKr6FOiVG-7&@X*IS1x^XClWS{;ao{h>(Cjzh9HrXr@Y+*FsQnzw zRuX251ZbidGzs`nuf)I*66D7rz#IQ2PQa^)&5Ku{t|jFa)mo17WDXx$F8NDv<+DJc zL}1)hLA+AEb0W4i$;bI;`E*bD%$;3Pf)Jg^XsS#Y6$&bcf?_E;+(bJpGJ#&H9jwh@ z|Md>PKOO##P-Oxr#J5un-5FBU`B)SthG7R1JLv#jamSrc*a3;DUCEJMv2|TQLRTU? zFpC+Wj|$Hg?A8zHE{yCdVDBzw56^e(&Svkf5baJc>dK<%F0bpZD(Xsz>}>ke6Ytv- zaoo{<+!5y68(h~*7tk9y4Wc9Vymjn)>f4om++8!>Gf~u2J>56y+cWOiT^89_PSJy} z>zO_7TTSh&obF$q?*BC1*QUWfM9~$U+PPoXtDFh6&IFzw4=7{WDwvYFCBIvpMfhj-QH*88hxhU)5vohe6NZ;Y9#jJDp4c#93S8IRRRj>+^5`=(8CQqBx+4D%Y#c+X6C zr_Cfqj!oCkj5rOCbA(&7%(`++Pa03#&P;zw8(SBfQ4yP=6&o2T8Vs%L4Ksp;MvY4e z0#6;mti*|Ur-=-|j#J+OG0E|VM&rt8Sa?xa-pPl&Kckh#{f>ShTFjg!c24kQ_L1Mn zT2VJTY6vJk|Jtt~wb4zJK7{g{Zx-ukIh|VKn68Z+?RFZra2gey?YFM^=yo#888z=V zGsPy}-IX>fYtmzB{82D^l)qs?u3?N8H7gvwAVc+WCFW1qp_+4}W9B;|N~Bxx|j z5lY_oAt>_wK{4<&Z5)n*9se0P+!#1vpJaFXfLG~DG-7{IH#z_3!#m1(NXZ;jJRBZ9 zx0Sj~H9MGYG#gnm-@dUTRWkIH>SL7u$7SP1lfK!B>6x|q)mgu3yS_zKTDP>ySoCb4 z=f>)D=P_dMYVzj7>dmLb^ii$Z1>L_h8&O@94Qnk$%b$x!0&aTt`oO^_OCdAw?PohG znE=20z~oT_ea;`KI6q}l%=tQRI7MybQ?7)}ZqTAv-j~dA&VFo2o3H#cFI7JW`uj14 zYIWsw-lu-xRGV;G9rTVg@k5=T7tM>%JQd=lAn=^G{QH`Plw1h$qfpO7|RP>#KN z^7Ti<=ep9*b0;G?CSNc5qb-qn?WWxvzuCl9pyDc@UAVqDP_I?Zel?`t|6DZITf7S~ z-QRZF_dYxEIh&l5IDIg%{YYZ_B=yv<2xymnkSz9rC1amsV+;3SqvXL*dFh7Slf!+k zeyJyO1^>?Yh0ovjALai0^8LY4mfF!D>KzG}v-k&d7iV9f;)kC&ck#ZEwwb;+8GHXq zb|)T;aTRyxt_ve^)y8`N=k#j!ht+(p`dllK7<+h0YO=^u9_3J;v;Klza>ap7|JP|)R~ z#B@ufZ~lw;+KuW*7vsH3mnWO!&sp$5Ure~)8qm%@{Qgw6$CTmK3vg?AHNG5jQxNQr zKF%%q{HOm)b@pfJ=U>C(Bh5wo_x+cvUVUTFAgpm69H)N3_Me_jpKd;wIQN?{ybI=I z28O3E-*P#@sm)1q9hI5h>~DP$fAHmF>S3FU`2}EDW^rya8;@fcOuFoX!9Fk=rZl0+Q89MSc$9rk#eZ zlK^Q1lG=g8JF5Z;reev)V&eET0ZGVo?P8%U9&ILCu+>J`{1?4+Zk-H1Vd$N?#Z*{g#q%L8T8%ad=4xOtt2|Jf_)6M?x^DEh*&rb~jM_S%hFR&t*>Ovp4nCfEO*CUNG zo?MI&{6!%7o9gnR3t@Jj<}#7G`f{)+gw1Z~89h51l@7n47glS-q&^xO&_6!b?e(iP z8gCnGnI8XWNH5kp#v*g2KbvBe{dNkZMK>vs?w5U=qi)8$Q#f;uP8xoaG?*cZTDX#5 z;d(hMmf5zLeTS04K`V}%X%3qX&AuR-s^ob+C_8$F#mB`tcn$VlP0`mtk0gm_y-cwz z`hC2VC6);O|1jm;a7l6`p-GlglJM*I`gcnUjlWI&B&$Dhlhws`*3C;rc1yKO_K&|F z36H{xDt%Qt!~vft9%A7d?iu!Y zEd&nWaSo{}?7LKZW(rRDm9&${QcaZQH-~Efwt4|G{(WSrm&Z0(;MCq@W7kw2a_q>z zaqU|vO(`#etmTpaL|3GPv+pQu-?nF}Lt68aq`np%I`oY$*+}&N-0u}z8(IsUil8t{ zYYqFUkR31v{q|=Zo|aTBHaVW%clD84i)UIai=~3d;^w80!9_SMBcmZ)5Q8NtS$N0~ z(@M(`%vdi?BLoWjsRua~86K1Uq9`avPBPOnj#?k{%Oj7K^ZUFeubkOjo*WJUKIdRe z!e3-yzwdAhrE7WdH^R{%3wyRapC9WF)qRWI(ix8(!oRFNfs20cS*b4gYOMvxZ}77X zFKSr}1$1y@VvNuzP)BUMSqQ{kdWfTa;;M^1eK*$n_pg-kBfQm)t489lnwr?^MT5~D({aBfp%Pkyko#g7O$UK`% zcmNyz4uAtf;`Of^AII|GLL@KZB?&~ZqYzrI?h}w97?1E&(0FCF3}R3~6>lRxeCJ3& zin~dbqzq-u;BArTNW3x|Y2p+5-Ve@m>}&9{c_BoJgN`pE>)J+@=<-p*?CvYzvF9w+yG67)x8RRi&nDVP(k23H;K`5wj>Wxf1 zcQa7QA(wgzAlDkZ`VIBzm%YqpvKh>ZO!+8^6~RQv%(ZI}h%G+U<~3x{`T;2~F3Hn0 z@zr0n85HWCIwUogT7ZSUYJ}e=r09*ycf3J+i11F48+^EL(cv5zBgTIwV9O9A5=O`G z0^Dv#!!_4(rFq;|bAxqwm|hJeb<^c~Ki*kza_KnW`J66t+gnD%TiC?A)Q9aiPi#Ck z|9yc2zbH+7CWwo82l{7EgN+Xz;Y^l^M14Hays@_idO z^vm^myF-axF%Lo?6~lQr^;Bj5Iz9TYFXw^Xil)Gi%pi828V>eJ`z*essK`8^X`ubE zcw=s@-yVVhoCfNj<#*N#2N;>mNYlm{hGNbFn;v%aJ~HnGOP+n*3ZAWrI1F^neO17q z|5>VR^8cm9LkV`nSF|%b1mOgcV{mhBv*%^k%FDcBN?^Sj0Vtg$2RIBDYG_yTHEDr^ zM3W@esHmbr^B+!$29u(wyzym*TuwCxq5(BeD-YeRtQqTJOMRib^+6)_TVvmkr*1~O zjgKi$mY-2ox`~Eb<%=`EcHcX)^bB3%5C51iaxp2gbg#Yk=2|$hWNJ#PcjS`MBKjsQApx;XqoJl z(!rd{;(m(+8C*i^pii4tOQ)PP6jrT!++#U}|9 zL&|SujTX2gvH{$VJy%7;+(CS=d$p8R)bCX&@fB6p+r{!9H*|8Nhz>;pBxTeeBkcyq zTK+kCJ&3YGvvi9V4$%B6@9<8XVdj%wWs_V4I9CWAm>Ewu`rjj36K`Zf1oFQu#?hVn zmvH2;)+oYU$NFB(-$|)uO{K*Msg#d8N!gjqxd1*42#I89_#)EO({y8@J4X-l+(=o? z1_|rO$r5nFZ8>rQ0P!}SKtli1l+t`zr%nNS{~^cgAC}!fiV+ z5kL(lbY%|HA32tAb(OIy0Qs>1zD(UK^0N_X9U zcC5j@YRE0r@1@<3#AIM(a6wkNJFc3dzgcs_)v^>>4*@aKV#7JE1Cle-(j2T|Vyta5 z3Is|;qJDSO;`owqH!2o@ND^1`GairXc^hs#c`}R?rge{*P|wwOe>={2(E{*3eZuy$oO2T z$Cdsaarf0NM^iV)_vbsaYcIuDuhgpH?`jAGaboW&Ntrk}2J`~)x_(B3yZQDNgDAJY z0yi8On;WaI0SB||eJRXgh}0CbNP4$HFB~kX`fK_zA8T0K^y4(6ticbDAu#}Gx3ASO zVYlnhNKwrdfY6Uaw~ zI)sDb5CPyjECy1|x1<3Y#l}6at&}LXZ&grC>Hf}JV$&%7Zq7ArUSUy>(b+1EUfpj*#Ta1AagvKQwGiHSv6Cijqd8k6q1W2y=jT0$5+n3tkMC|8p7^ zmL30Nr8S&36vco6Qk91F=?N7NWe%Hl@nH^0b~I87pi~a+pE*j@?MhV9R#PhMXzl!I zV`*}0T4T9d&9ryzM^?Wo_~=Z`|H#ZTIScu05TKlN4>2N0?(R7Gjyv^ zVY0rNsZrS}zRgkL$Nb}<&b6meFb1avTN%V!CHWhBE7RU9tMqQ@c}6umzoBHRGlH=m zVH97|3!c}H2eT}_L!fa^3rny6TXH52?(*#=>1(1*{BLT^i&HxYyTxO&lK@4L|XAsdmbrr&OsV3}Mi&$RuD@}&nu0hD$~j=< zP$La<aZ()9gd}in;uKmB~glA{bHPzs(l{g$7+^*S_D%8$MG3g0|^>` zMB>2u-~p4kdu+T9Z>qU7sgFskl5`7-`0cg6|0gHw458At^Ljcg&9N(beGl)rwex|P zwQ}9BdW_T;wlhYITItTN8kgw*70+6hW6~?JDlb{giP%V0AsSyV3OW+&-HDUd<~Lwv z1hul?x)W;&t~oaro44uCx_L!mBf^r;&rS>Yf#LVv{KUC!D7T%ZnKi?)mPx*>9_-4Z z=8z?_G1jJaGW`BHE#=Q9@XJzRwd&TEfxyQKT+_)L#w^S;YXL1qI0*U0iv77D*MJBc zDCgJEAZYmd+V&p4)z4f;6cOT;26-}+?f?Z8EKmQxu znHq7|5l5w`zY`E0fsq>3HJ`MYq)2Pt7uIg*rjT;5T6zw<#w8+NvsU;ge1*RIxm zZ66BzFH`D%tPH5}VP&+M3&F~TSp2Do7L(wi+>noG0-L?{j@6xV>E#Zv>1 zSZU%U;oc9{G>^Q$DdawJGa9%Wla|N`pyqmLdN%0FQr~6q~&F}O8UAZG0+XE-$|vH z+xr^CK$VBzFuV1#=HDHUQN4MOfNZ(Uf%oea{kK^`{RLh zfy=$Pvz|Y?hw)n*e(r}NkzT|58zZKEcUUfx9{AHxsgL(uID;N9XEKezeiQ zcWqQ;_;KQKHJ?cX-)vOTeC2k|1G}MiD`di@$>+Ua_s=anj~TrId|uTT@R-kAmpcM9 zyU{LW4z!TNzIfiiYR>1>M!X|R5Aq(JLH?`r_S6%;i&IEGlel$!wtuzv?d&!waNH;N zKKWaw0^P@r-^i%FCOPl*+l(l_JC3g#?VHvRsTCJc8wKteJg z?iNm$85FO60mFgmUz}iI+Nmxf4xafxcV_54iWr!+1#;BQPkU&e2N0)>nGa^nqBq9V z4?IcRp19CUmRFw2x~GtohYFp(Mel;vZD$W{0?o~cL`*E{kFI)D{N0mpvRTl?u+(P( zx&N!!tiPIYyg$5VYy(DzsG~=q{K&8cjeWmo2znF8l}4k1DKbgY zCG;u8Z2D>1s z_~|{w6SYV15u5?I^xB;})RUq}r~S+(hCyTTEAmGld%TvOkfId6eR@H22$P5&IoSkf z^4)2Tj-cJ1eXQ{KCJA!B_n}dDy!GmHG!I-^#G6BUkPfoNJdz}%t~UNyjHk7m@!H?1 z3>E#H=(gZn=A{x|PZk$$Q_mAB2oT=g3f(_EqZRRnAA2cgQr7%XS}y01LRag*Uq0M( zNV&Hd3s1nj26*LX+t>w#XfIjbJwIoCfOY>sM3i{2L4nG=f*g*wJnJ-Quq1#55*s`R z1YX^3um)v`lhmEeqm&vb!Gd(%nd&}5btXxe7YN(Z!zk9uC1#SdB5(M(rzg6(b+Noo<0Gg>v`VNJG^OiocZX!YjH0dWY`z3 zT9X9=v4npB0dMO`TKKxuHD2*{dz13j-=EjAemIDCuxw<8ma801uc^BJioq|eVKhZ> zDt=5gxR5Q%pK!<+ejE|GGEsOM)-)xUHh9`7T#c%nmU)-%71yyizEULOH*h*D7bRQ8 za@Ae(^d{a{>-X;Xt<&fY&4eP2PW}BHFjWUHz*sN5rGK z0}8fM{7WZ_m+f}m{N+?8F&GXu>@B*~ua7oe8R$6RjtVRFI(nwFdf2=A$zGSQV(nCC z@SU;iaYsgf)JfM~^nsVme=k($Cg0Va`ShyaQ9Ye^x}*BzpV~BG-u%0E_q_hX1uGCG z324zKw6!g!Jju5DmKOQt-jcl?Pm0rg2AyCLez__mcKE3QW$GjeY$E{wS+bhPMj=CG z)eTAkZ=%4zlT@3W}}?8n>xUMe(w}4Sp@MSeG*L z{L@s`?r6D-Ut0QBA;;V9Q5lK1ydDkdd7Yi?Rr*<)|A7lGpCkboX)6uAE4gf%E%<8^ z1#ajeHFCIQ5(cGQ;mN~nxuA*GArdrH75l1SafB7QJnhRMO919kaI|J$^+hGuHtgrX zuYsBP*TrkYMnxK5igpz{yeq|&Xa?rhjgsDPEK9h#zq-FES=6+;F=nG{gc%Vn7B;CH zqbkb}(e?pLc+!tT7vQZ~bmYN)y`W7#6 z8|)>yk`sr9u>5%oG}g2bsjM5PHNgOH^!@)|e37^}P@fcnwLi6#rHLa{-Q`_PN@WEh zG;nn238FUQ)CP&if%}JS4Hfha1Zv)cVgBj{X{*-s10)fs?Xj^BXT#A&XW`IHrTHW4 z^?hTx;@kJ@#`>Dpzqu+5njO6Xg09RmRTVbbh#T8x@7nik$fQ{1yIRb;j}GY2|Stg|I==s4crqn0N5FY9lJv8aowprX{e-spbuv)qa=Xy(}TmG8YwX3_sITSf% z7i7BJ2R<7U@W2z|ul}jV((%yhOX6;Jn8|{YYkQb6?uX5etaRz}-C7{=wCeUm9-msh z$BFIf4123IW|Ptqtf#qitlPZ3-L)n;Qo?E!;+3)mz>Vb2oF?%v(3*{)C%-)^vm)=; zrE(6=XayBd`+N=TvVIz1ukd{9kVfI9t)X%TJ$qG9y)pxU`g#BUyNGktZqz#^N}Axw zuyMH3lyLJrObzoX|XzXg;)m&x^zQ zyCJSw+sn(EPhS)pe%N}?M4nS7Y78rW>~HyW0h*pOq49V@pZKpxNU0sUaNI0ht>$BF ztg>(NHAJ z&HsyjcYh(^k@1tCH_Q9)M1H3E{B9k1XXK^%=z+DM(9>ts^_X>05=H zs3kh}Z&)A-Psr7`BJVaap2)_JJh00p+i~8uzA0Pz)BaO^3AwMu7 zc}PJ=6@OixfEIGZ)hQrj67pU$L@#AGU?DLC$8iJcevzKd zZ~eRsml*AGtA)yCgx0#)rz$t4ivJCper!1xz;miThi$$= ztov7B9HhLmDR7O_Qu*2A$zSntp(5uI0_=b4<#11G8q^fvSg}>3 zj9GecG=Nd^EA!?x#=&i2>AT25XO}02Cp=f*kefRwlUnEA_DzvU!j zQ|b)Lt8H`47W+$K6xFkk41&+iw4HI!*tzljY#|0=D{SK3R z##$fA`1pzGIU)N@_3hM;H!L5Mtd+8Tu=9-bmVSV}ui;gJyoj8bE7ClFLqcvuFI`I!5FDmc3?PZFluT}O#sl1=A?cr+GoGdpn7hoIGSIIehsm! zY>RR0czW3WRID(Gr%R-%YecL|!)vzcepn3=Vr2s50n!4N8aQ#qaV0=^+{fi)r$jxbNa%5JW zbY~^Fac30_65R+HSjH=>p?BXWWH%Ie67b${x>N7)!Z+yV92QPL74Zl%VF6VKfHA{Gu%owh zz;ST`^)BSW`zithgvfB>d;GJNAt3{b1vnwTLD9q$W1rzE98mJx(K?XG)X=|3K^=ek zb_6H#Hb_Xo7Ak#GB5%|(0nW_Gz>K!0hcEP8Tl|A-Vme>ilEOU`4dw6vx?id_(yiE7C^hIMZ@6=sK$?N|n z*Ke~jf44aL_LRVY9zL1!M_utlt)863kl$b8fMPe! zr2QDoztJVp>Dq4-xB?t^9hvG|Zi^f`H4v;=aPS$7CNzLv#`SW)YKdtW!7qj?Ql~P@ zs{bk2e|yqK*#5>@o%-+J>kW|PU*pH=z9OZv$7#o|#%iLF&r*J?uj&JtaGmfpAipL7m|jBD3^Z19eSMqa(KMc2F|_B@0B&5jf`v z&~4lt2GZL8o$}8|u`8f4GyNw%H{9>ZqzU|uLqtik0J!39(RIX2bxz)i}n7X(!bQ!Gf3*~CLkcgy?F;W znA9azXr-K7nU-Y9W=CujfMfN_tXeO<2VD#^r*dvx$nXwi5fR`m1JNTYgfad-J$fB@ zNUfrAh*GeQ_kKwZC@Bl6=6Pb?gI0U_%y#-*X24hynt%5x&)4~l^^MhlO+j|@Zc`Go z#-n}9LJ{gBikdlPg2B=05pX$rop64pB4Gi?z&Qzkla$V-9-y0>crrqOs}kV}7w&~U z?8!aJuLuBAkMQ$8;Rit+WE=;FpTWx?7iPF+ma+A_lUYRv*}Fe`?VOkI3KBa-9pDNM zRs=@FeZo_}5$XTr+6ZZGLGqA0b)wsRK_6;7sAEETx^Ih!X5bd|`W`qP|+fOYBuH2j1{%cnIdr?p)Q zJDsJr^#N4XJ+@A;cUl-;LYLQ)9VipMGi%55+{=fd{r`F{>1Cuuy$5dUbTOBXRCS6N zhUMRSmLC<+^Wc}fS3jq0^ObKteQEuDG>;va71gVM**dKFH}?W>aR*VJiMM3lT~G;K z4jH=tWa!I%U%`^tdUn!ep>3s#b?I-1-$&H5vLGd)XlkcaqrF|CEKVB@w|OrB`2(mA z8gPj!%X7yqa;rZI>pMYwqpv13KMs|D-1qdBx_p%URGVT-uA=^d^0z0E;!!&2i5l>F z>o?)Kzq>2#rCxKt2Xbu*OlQFZI#DCiUrj0k^DCnBUkR4yfqrwi18ab55Y7>mMj!wVCJGGJEZhI)1S=sqaNjr1!wPEtD&WJOf-U=tG1{3<$S zdQkYC>bHR&1_Fyi5eu`UUaQ*s>*{x@9Z0q8`Wr-q!VhH;C;G!?6}1+(vnHpo6U(6! zzkbe(*_UOe__m;>YE#vG#;t_E@P4L$rnh#=%3kn&!c2ev`PjRxMyZ6|=AxdpfvfKo znC>C^cnWjDQXKL5IrRrhg0igj8tF{m;JfK}NsE0!Lzx8)(p>^xXY+aRTe*Y4V~I!v zi%0^G29bdiDuzwg(*mMy+(Ad`FV9thc&|%bO76vLAo7JZ8uYU8KqES&D-(uPl?bNc zG1(2ClYt{x)svp$0)FhWv1(;r;`U#GvXPQc{>O$}o)*ovT{f)vF;S}yS39!gSwd2p zAZo#g0Nx=d^aXuctc_aorP>dM3;j6%&F+sMju%2-1e zh`||MtVZQy(C=TReH7J$w!WM2L6mkFW)v3MZ5Y z?>AzrGDXP5zu094D6a*2gNM)TEeYkDzl?HB1i(Y$ki&*j%shGl2TI{^EZRum^o4(e2uR`y zK)Ux{yg+2!a%&1Jd2_cq2|od^Ow^NZBtZT<1M3X*FRw^6KI4FF(b5( z`mzAD_jYlwFo6h-dV0+w`2>&#iDbhPU4;0+KJ*Z7$K{ruZO7d|vM-ttDGS!)itEOK zAd7kbweZo&pV1{i7*RSoojIUJo6{G3M0E7gBAPSt^|XxT=TM4S(?PLM6RD!wyUE$Sm1+#M)X zChN@ucMlsVFijZ2e+MxRlJEGvcz{w^I6obz8#IJtzw*sO*DNJ#=Puboh2f(6Q(k|N)05-K|ApmS!! zP~-F7p)Yjt5{N<=a!LKwvxaz(VoB!mJq_kdxE`x}9JDTIQ1cgkkd$-Ij{CAwV&a%# zfs~a4E~qXsMdx;^{#6gFasHePBPUkAnV?U3g=9!usD5`DDkJ%I{zpMN>$OY>1`32> zh-ng6l21DewWZ@sL0s-5jNRspk80ZP89%@NTkI5Vcb@{jv`*G}&(`F|dPnFSK>5^>QLra$mUoB4VZy#;fzwH@UbY!tK^Jo-S z5|WZTnLwkxK?e(K&tsQ+JaL=ftScN$GCVC9ml4(*PM}DXWi~-%DIpPi zShhiC>JJDoS*F~CZSZwk+TS_ywTs*Rqov?QlC6L z_w?>e-Gd%li`G!~!9eS#?$ zS)8C&`-zv{L&UbpUlRhd!QpKJ?aqSKHv)L zz2kEAE@Ql%fKcJ2QO^1p z{lPdtqh*{Umeo}N*V{<3_b0UeA*?`rp?Xjf8Tf(>g(y|J?Yf|gmShFuT6`i#29heCq*1t%M57~$V-KV_ZDJGz96lY9x9VCp0$anecs721lE=~=zz>>cBGk1Bo-iKZ6+Twq=Z7VTM> z+Kj6iA@*fSs0iG(Kx(UO+?GkcC$ik$1!=zFQzxl0hU%QvGP*&R|FQRnB~zoAkC0y+}=-hE%2ZxY^Ff2RJsgF{{d zdG(xaQN&bzEdJz%^H;d~?sXs=fjFTNI)W}BGf^$@dri9;-BEmdu5RynV?KTNFgWNN zQ!~DP$rAiJECik=9qY(8d+&!|H>)*Up$0n+hj`X%9uhAURK79>qiawH7tH;>|8kPf zAF%u;QSH$2Oj1VR!o} zA{c=wv_K|qH_?~H(x3igbbs-^*^jDP;}KH$0C=oWgT@dLh!zN%6w6OAUnHMkyt4HG zz%{ZL2L{FM_fB>%2g{JF@hwLB?b)TFhw7azdGu$q8y-)R$j2h!-=6n1MBW%T>n_A4 z(qk^Xx2XSa(B;*a*lo#s;u+op=$_Qmtxzcle*n!Po?!(*`~;+Le;ZqN2^rwOtb{pXL%l{f+V02_Ffc7U2RVBrxfZ>uM!+!#}CJ9ha%oLj| zvR3j}wC{3q#stCpR{_I>AT=KOti2&{;_7_+T5I!IT&e*n6LqDoaR7Zi)+lJ~m~N>< zYOF=7lOIw-ICo6AX=ccVgrpHSGpGZQFIbx4Gl_;}!peE}PF6 z+HEpz832_^Lh6vy6vM2})0fNy+WnloMd9msaL^?@q z6+jb1;mjtdmlz=*i7ksbEmzI$WEvn!PY~Kd696X35ADb;#?n9^S|9iQ?u?IL{Uw2c z2Y@pv9?fV8DjC!s$Hy}AYH(AhNT;m?Mmn8%14=-j8Cm?JQ^!*1!T}5t*>W2Mq}@9? z-5exG0!=1>3eh^Ua_+*uJ;k+voy-K)as|KW2?i{%6esZmv=sF>XSHNv%5-9SM#Wgh z>%2u!Wdsp*j7a%{@N*Je2M;+0sHTx<5~L6&0M2h!z)%x9%oonu0ESP}H{Y6}CP?&i zc>4EvhC>`(mDJCMEJJdKMA~C+Vk_d7rs3TDFM>XDDNCJ(S*j5g){Hn9OMm`C_)m<_ zx+$_^OyByB7DuMlA=3`v zRCMtwrA(?r-A2PQ!uLqxNNqf0)TIS z+1)k6G2EL-ZINh`49-Gn(MTibPb32xQ;oi&&KUFp?S<=STyK=?4y5a#ux|bup9FAWBC2<>e})PD1FzWrLmLL97G=jK z9{ue2;^3k8cYa^S7)f5%GjHB1bC~2N*+PR)#?12ZPo_fB>{HM`2pK6YbI)>0L5;zh zX5YoS2Q;VQb%RWkj04ym>5`J(g1#UB6Metmnh8nZ4~5vxxdcEc%FoFv4*(*uj0h4% z<@pEU?`(ekXDZZCze)fc>xTG>p^u@^L<`ac;u(HCGtMSOJf-f~A3UQYbs*a3;S-pf zJOtVc3~eL^CAdkAJ2puqOK=v@u&{E2WgQC*%yU~CPXfk83X^A~y_*(u>FsgJLJ{JNAI=QBhqAe7i=+ z+?!rh_I!AK-XKP}pT#dCK)B+R{p}B=<`6?ygKc-G-!LXL#BJjNn}y0dojt!XTI^ z(Cr!bC-2Z;QwzM5#kt?DfnM!fWE%?&%dm4M1`@4}SDov=$A9Qk`L?rdcIy-h?Qs~? zf+D~~ro}r#`DYVEFs`<(F4U#mTby}n{js)w`Ksw6({jSo`Ei!NjGp6=cGmH^6yJBO zz+D1j1p`~@3aB;Z3c`UlyXfrt0yFSQlO#j~hG8KmEsK)G_#e$8g+A0O#)N`bXXQ?5 zQ8x>d2`7h*N?m0RcFetSs)b07Q19#s-jkR=nO?{+=X47DY#qi)28>`(Or$547&w|x zI^0KB*aG1v(M00dzl1`SF^Koe)pm~1MUI3gKiJAyEl&FgF3)al`wITH5)hN(-yqRS zZjPA9irthtf+s-a{8`g}Sc_LZc}2<)>dz77%nhp%wYifkqvYymlk3Hmk5D3Z@S@1i zj7*F3g_nIFd~9&AMjp%4D&xU5IMff;!YC8tSQ9fxQsDPIRHt&&nn=cu)YnUG|{-&}B5^!@zUQayJgEiE*l>6+Kl_-RLw&a00%L)Q#6B?~jkufD%} z^~`z0nyy-dKAA$Fj6vL`@)K(?5P3eCgm9Yf3+Mn~L=}VR!B>`t5t0Fds+61qHzg+l z^qJnA>nP28wwMd2&+vKP<}VgG|BLZ8%k9~`{2^AO-Ybx9oQfRBU?ql@41gTt;aLQfCxQMaiJsJP z^RHYJP`=5F@_;JGkgmkA)&(b$izf%@f(Z*g4@)QE zkTSXaHl;^WZ5$mz>|rv0c9}BaJ(O3yfY&LcC%L^e_7t=3i`UEy-E??l6(t)Z2E0qE zvcU8IynS3k)xnp^RS$<~>#P{AW|5cP0Kp@XmV4WTIbDBr3OQ!Oxrz*2o zb8nxyEzlj)m1f%gpemypgW4v!eZhDhwu%_&)v=q+8p7*(HfzzFQ1ZQo-fqKTR>-N{ z+y4fiKUW!4D)LLC&|pY6_sDY$f9F1wB6e|28Ncp~=cLI~IEjGfNv`HqN)kAlOuI&d zS}V=HQ^Xd{Jp{9=^8dx(Gg5owpv1ZD&Q1~s--o{?<=Q`;t1o^@u9Yhd{#gKMDC33g z2Q%1l5zi?x*_IjVS4Q423(|kvqz@5YV7R~`<;%$_st|OK;S1*q{c;Cp)nA}I=j`K@?<)o?^AlM{ zjg$^^(Z%*QtGn8AlEfwTOdRwsPB*og5KS=KHL3a;ylnq5~3cP@fpY{}EVHGFHTd@+jG%1xs6>7zFMnLWc78Pm$bSv8NPat7%5dC+ zBoi2laG*e3)92sw4>{Ab@L&S|OML7XBAy|+A_4mBOEHedIfhP|@-?hTkB|{8h$56&qq(RopA_34N>6%%7MZq^$2$R&zIa28N>7^zB z2~#g>XA{@&#_5|u44foyElc8Zz$uWa0&%~FvTB(ex(cEcnLBiQ(y=ObjFs%xSA?5Qua;+Op%=*B8 z>w{rw*Ck_Aza8&ZG(BKgkGMQO{Gj{zo1Nmr^Pjm$Hwg$Zo*k^sr~@+QH~}%5!t($Y zNK(5ti>6o~h3%?XUmWn>y%UfnLT%ern~`{tuOaQ9Fh@hB7q21f@s$gZnQx2+6WLM1 z_+br=U-)58Zc;WgfEz-)ZOpKSi-+Fi-!?VV6EDs}7%P2vg*1QBP6k=hX_48WiWC9@ zOPVv)lEz{P=^_333(T@4=#rsi5zmrQz`XtrG6A}1Y5(xNzy3-5RoEygGr~lR-6px< zm23GIj5@s;ZhzPHiuW9h2?v;m(N+~NdU!PV!&Vt=)?G`%C_Uoqr+HFg+<~M0rgnid z9SRj;O9A8Mp#wjsL|Z>4?yQB+AB7%7t*%E_w2)s|u16eeBagb@N=!fT|9xJd!+hT= z`3p$Pn@EbYl*gju!D?8Cepo9G2*uVE)o4*M1@4R{(&33!+#)3|I(8zwDK!i{2_}SL zc#!rm7`IQ*$x>EF@xNvSpO8_Ks6l>HW5K;w%nCoyUy!EODZVTs5XLw#HW)294Sl7u zq!%=HzG&_K4D#xY->gS{xuohm6z+vyFNDPi9j`dv;oJmIA%1O!fW+EMD zL@5f6i@Wy);1_u%)6Z4=)dlw(%l-kL&0xL0qm!h-nAJ*jypyKKJumBDDQ`Co&>$GT z7Kz-AW4`$a#LABqZAt?HdApUHQ9OuUn{}KjdmPIkF_a68JVpm*gBl31I~=LpfI2_| z|F_KWXifN=6IGS%ZE7STf#Y%Ce}E@5m01jm&i{!}K-bqE!+4oLBJ0egoB1amiuiLP z4(ma`hG&4!`AFC>ovHF+;qc4foF^wWiK^c{h=@Oi@3`S|*mQtr6mSG8tl4NlbOlGX zvn*R^aycSo%-y|ay>cWW5{)LjY`8dUCnFsQZ}EA?rf*Rl>$K7 z0k^p8pS`gb{lr}fJ)A}$Z1CEmOc>yQGds9}KFsU}a|L9#hO_j2b_)v&k0@ankvx~B zY=(J^ez>d239C`MkP=#CuF;VxQPaBCsi!7cF*oCe0cXzuOjNCDs&JJNoijjjcw?&!D1(Lff)7E$To!4eRMd^680GwH6GcV(QE)XzScXMu!`H2@nUb z4Cy<`Ah{Be5)Z50?tV0usqno@-k&=4N#DZre z@Kp@q_vtTfF+q$5;2sdTn-YhC41-+Y!&=(# z2ta3b!Z>PiMeo|*Y%l|be47o|$v!9n1efscLjorOE~hRIE8BwV`l^$gcO^nbJnnl3 zzuN}+GcA zIQm64&F*4c44pMjonfMIaNL+&mawY;LiE0{pxiGr6J-*fCTi)}GkEQfK(7S<9vUd_ zpQyZCNpHQYc|D7qqg|mNP-!5Z(UQ41mnx;$gq%*?( z;8Mnwt_DMserGt)ebMyj0TrpfnvxBb|7$_5J5Q#Suv6Au$3qmt0caTBGLKpqXMwCl zu@FKyr?NmcXBg;*=yS-Ydr9}NQ^v0Pes|&|0O2fGKw~=c9XxXh&U{h6dUyb>pr#K@ zkG#Uv*8>srhorvQ&}C#c(@C`3=GESV$b=LAOTQ~1DzK~05}CzndAW)SpESH~Y!z@1 zm%z1-C7?Jdw}+bFQ5utZf;!18@7EqF7m?^0adGy^uCmJSIN<@X!%#4oN2S-ygkc6* z&8>YpZ6_IjbhI*w{A(}IFEc;;$3O?0_28OTq@Q#*Tew7v1<+~5U*pG*#%}}o>6ZHP zx040(g-<^J5s^@YXBo$$0YKsd%9O$zWe`1q@TceU0RQ#|2rcPErR5$zq*wgSSVIc+ zk|C@FrKzmt-uI@%k&n?B;HRb*AJ~i{j5XhWbmaC>ph{v7e^lHK?Da)Rgcm034o>LG z?@m$M;yTiMX)f^Cuh^jCX_*dOIoj7hkv|&2_&-^{#6$SC@Cdy*0xWe*ND2J&XW!e0 zW6!IB~Vv?hCb9)c<&k(YUv%K>&-y` zqDVmDT_S1@^e+q|L~#O~_&k;4k9Ug)NP>Ihv|;$0R&lmj|B07XD;ba3l9=so$lWr~ zGk+Zqx`}6DBY{A#B9H-m_a@AN?2*bN=0QMzkStUO4>Bi2>dZv~;6Uv&*XO-J!ntMd zcoN|mP<#$VPYOW2mQ`nsF3pb3>69C_3>;s&EB44pVkZ!XdvGfY!fz538T3$qlC0py zEzK|bk2Pi^?D}|4@S6>h=N;U6yARX0V$5i`Z52YD3SkXJFj$|@L}=2)qW5SWtP`8o zvYZ-x<{jk*Tx5d;ZN>U36Dh-`M8dU%2RkNtvrEw@ zAGrAO4@Vt>bRupqYH{oQN_KMLc9OHmOZM>27yF?bOAY`OEj+H3^^SMEQ7GRZIZ*?2P3XUj4&Ox~5 zAZ(N}t}I8`nRS7j;1wB0+P$n1CqAo1M=4F!`%;Wl3kA;NY(f7g+)7DKl2A(m(9_)k z8<7)qVfkY+=iItNa*|!c#qHrDXX8Z$;4=M!Jvot0{ub>+J=|@>e$wM*l7V#|9U^6j z2Vz*CucwROl>$;}vc%%Iu8sg>O)O)Xi((hB858nxlZ)OV{RJHj1&MowF_VQ-6rhwp zNG`Hk-m^Nuq`IKLxLn^^L9g^#O8f!VcT=aP?q1ElV6l;Mv9U<8;t9a?sjvA{Um+54 zNyi5qUScN_CS(xit{=Av2dY?HFfUpET|4bK~x~|DgP2QSq z{_$dBxw(3l_<-bfhNNK+8kSpOO?tAqe;b9m> z9}z``cRxeNlbHX9v}Wxvd=^BX0z_R9?1dl?>N{Ft^{roHXbAPF)iM+w_}LxEKj9rK z(h%=i1T$%neBK~|M`zk}frYaQl^Z3h21M(psJQ|)l#zA_cN7~C5)+EDj1UFL|_en`WaD^pVoRLQj@^$tYy)5H?obzNebOeO(F08)5O&BPa zMbo!RbAJg0n*-tr4{ncl2)BFjNIH#1g;smS#UP6QSrw;D7KvVISeWj7Z`*J~r9pbS zA>X=K^`J3xvgY&eq0-`FGeof|7g!naY~PFK&gzR$Ntp5PZnM&`tK_7P60l71%OJS< z#q`VIOQyrg^IkfKii1zwrjTwDZ6v4Mn}!JK$VACiV)BFT#az?L?q z|0=wFQW!f~^iz?2s9?RbYC4C%>QEE(y7#AXp=h_sjd39(0s>w>xQyo@Iu$!n+ znMgF6=p0p31F2b&z>m|RIjG4wu-Z2j*nTP0vR%Zc>E*B0msF%VeHOTid#cWE>MWsR z?E}c^pvlFnhx|14K`u;#5WCPIA~@5+srcv}TUh^}>84X}YBN*e!=~Z4f@uaMm^*j1 zN-8XXG9yEPDRzvc+s@uFdX?9dxjZ=gQT5g8s|lq_-`(X`w+>%rBim|6)f5R}2>{{0 zdf06YaJUr2(=PID+f2&EFW5aoH!D;e0}R4GAC>nppa46d05>jh0KxZi;FYFDxYE37 zyx2s|L2;S&eEB3ZZeZfvrZ(lrytd$Lu2H@YZM`&AXHB;G45P{HnFWcz6V{SYYs||~ z0_48sV7XLoP4tw9?vzKEAMWaV+&W0T{aH{r1a>&(fd(12kUI`5JP4dI&s)yYAa1$} z$p^i|Wz%TF3oA6(9W$#)Wd1Cu=K63pGi_09aFM!Lw&**%Sbi|K%x4>v<;(d{O2%Ie zCny!|Q$ws-h_CTYh@O+Ndz19HeQd_`sTX4e#Gp_4I}Mp%wIP=eF~e)(o|kS3t$Ng`U3)VnfTWb(1CO5Df}V zYZ9Pl@ZEo3-~C;C8&TP^-p=pv)fr`_ALhYuJ1))&3h!ih8ed!gVbsVjN5H4>_Cwr2Pg|9pPr^F}rq z+PMtjFZX65BRTN`p?FntIkWW zRs?S@Y?W^^s!b>nZTcRrir=3!Npn4of}Wz*T3~DD3d9LTqS4EshYBCgGTfi2WkpX*2%-0|4S=ld&y!f(RzNyX|9(1&rA-1Ri`s~`esBEgH zxa;)Q^|JzozbNXJdUQO8z>dqnXN<(=%~0mvMoQXHLBh01C$^fc2<(Y}$B+q3?|;*! z7A`~b?i7u)D`^*Q@K(ZsPqx6hrB-L^XR!1LN(1V+XmP<@9oFL=^TA9e&`F1^@y9D*p#iO9u#h)G_m1 zz5oCzzyJVHO9KQH00;;O07+D{MgRZ+000000000002KfL0B~@0FKuOHX<;vEZDD6+ zFf=Z3ZfDHAg;x~J8#YWycL>tm4brI~Al=;pBHdlmEg+rJ4T5wmNVl+bEwIwDbjQ2T z!=u0Nyzf8o%{d&89L2fkzUGRVStC`HWH3;PQDIvxdjI)CMudKc-Q_|Q z`U`@~8#xVR=cgq`?w_pAd&k2$0&@`@kheO`EMl9cx*(H ze^Y6S#-RCSHTZ?D`~H5Z@itrT&mX6qMF+q~{29D%!AKL$UMCy-aaEP9apbE^!toN6 ztD~c&?gqLGskHRu9To#|O)u%_-4#T33l|33PotFJqyGEbvcNnZQ|dnheeXJ@&=-_YBnk&k)a-j&=zPebeCp z@1KAopAAjP)M2CbJeC}Q*qoa$CqP)ib{Qw^25jFCXE+Liv?;q+_nn9)%HSwN=07yaJ8hUUChZ=RDALo2)G>m6I!rhxnO8T;s$G)t1_F`qxj z2d&LrrNS4vQ5axSlFP!A;^Mh5a&TPJ=%A=ro*(&nEF{vUuI_of!TG;O0!?D3&s)AP z35ncW@Pm4pDSX}VBf-rOyMc6H>?Vk~Ow+9wn)k1ReTeQac||g?9w5ka|VA6Hg z1))EB2u<~HDl9gN+KlwaPCOE-0m$#PEzb|{y2hoW#{oknd4E=9s;dn9Z-CRm+Mxa$*(a%Oazj|cgETzuRN7D8)S%^s&J*hQgO|XLz05c^SlXhKT#z;*~$#o*f zP8^=0>}_~1qW5|8S$7j?c2^~#xc{uHPL16L^tdiOTh+$PLS(Bf&`vz^zK?S%;vLIM zZ*QZe*G*hBW!n9U72&TkAYRb&G7rW54oyeEf4#xK#ZdagrZ}e}J5!s=#+ABf5_Gih zz#ED0v^DLxyAUp6OaU}}Iy8uv8-#?9A%p&?<~iF585wWp+}W@^MFuW(lAA(BeJ)M( z=t-ImetQIXmGjX++xc z!KJitjSwu!F75$0222us`f83ML)~2Mk=O0unG?R>isY$*F~ZwjdefPXm{uHVkx^*? z2!84mPlC1wH_Zu{OW zr-}*<(Kf$YpXa>;g~1%yYFl}uFrkA>G08sL*)bn8;iGL6PJ;s8NZsY*f>U-6dk^TFH%HB3H3c#)|O6=NNwrJ@J{QnQ@99-0*9P^AUb={Wq zd4J2s{9;dSNu_bR$yk!7N2`xy#%B%fdzF<;sR#2;&J_?b2HBAm^PQ!u>OM&4QjG(8 z&vTb}Uornxx9d{BJDa-GGYZ!ImAlw7`ILL!0bU;45oc%W1Vag;(o12p4q%y~4J+_^ zl3Sk^*YtV~HcQx)sCqv!F-!LB9KXG$q`M_29M5g}G0+dPIkyEv zqcJc?$4IfFJ}2PuHeNKKZ!z|Jln(-KS$cn(0m69o@86A178dm`Z@y7wzl+aNKYqM> zR|Z0n&d481>R?wEv&MgWx8E&_HW;`v_Nfu&s;zzbz$f7Gq$%?Hy8DAw{4J~a(<+Wz z=CgGw+*SPxzh0!bVFGhw*(4w)s&5cM0s@jL6GH5zLxKHFK07^^b*6$ytQVIpNoCt! zv{E5*B{&KZh;aemJ zDWjb4!dy;kyui^xb{3zx&)66as0M*l4?^a;cj65inq*t$@)Okj!US`oj#Oifc2nv} zHmF+Sr;b=y15TQbHfwN=FNjP6_k*mD3qWTMJm{sRmBodXSteauodd?-N}FRAjH`NM z-+gDr8;G02K>jfM(By zx=bZ@rkh$3(T5ts!$mi;*Ga|hcVvfO44hUGTHS4eUmy>q3S{P@+z-e(%*SO~|5f?`)z%IGb`#YJ<{e)e+Q z@7@V4mKz*7Z9^kdEo9LbrO(dFdM|1N%7$W^LVTO<=qm{Tw$EWvPx+F}UvZ{pzg=pSl&R$ze5 zFWVFl$ul%`AC>YPI6WQbE;n@Na7U^@yTuMz2H$LlXX_h@l~mfy-Bk}wJPp)3HvcSZ zky`iurt&Fk|0NPl0(IB@EKiR_WwF>SgQTr+=ErHK&@OvqL(I0MHuncNlk*)==kY|q z-c;Pbr3^mqG|~U@F?u-OHox+GWX5s>+?YSWB}=q`zG2FZ9K2k9+&?=J36_*`V_^S? z6DuT8-N=|6*3F6j88-80qI3044@jrkwF;EbnSz=I^{gl5L%OTdL|EMdMYvW)TFej}?G0=$(2kLAfAbj?HAKS?~+DZtQk5#Co&e)lMTO zR;62^Rj{hpbX1dhKThx54E6W>!+p^c5%lz!M6V+8jqx#4g^I0ptJ_d&gGxa-Bq(T( z-wzciC!^n0v=}8e&?=kqL|8e*|vbhg*(MZ&Liby4J1dBnkfkXb?8$6*#NG ztdC&M9`xvK5&aVU?j?S^-CNSvC0hAi_PKcEtpkwOl%s6tAqAUvw75_;>0H3nEac)0 zl<2bTYgJUp{vKHJs7~R*tO!quE&F0aONv_ag-KfNz|P9ugtUB$g4hhmml|odetX$^ zBn1P-!orZ-av+W{5FrnpV9ksP1sQhp!-p(MubV}$MmwVIY<{2GM(ux{NMfGq?0a+5bd#+>D}2|kT2xdq zao#>y!^|=V#ExE%H5S6gLV&pwnkSnp9>aiIL6V&N10M6}iS=hb-23`JC>W27cRET~ z?+yoOYo~@N@Jun`F1+{(9|xMm)c1V@hqdZWFs2UPUH--vwv8Lfw+x6MTt`K;bw65E zG<9OC0q|#TgRZ+Evf-33N@;n{G*HouAMV4Lw?!SmXAWDraYky<}Lly~3e_H1m~T;At9 z{EKb}o_n_?Ia9mFg509sp50aY@d1sMtPq{jLB}jO;?rr3n{P_l^!4tCXiaZ7tNg8= zXIsW5MDwTlv5N%So?~;BUJpmRK#o=TG3DT4zBRZP{xZU}*+M_S2TAX+YZaAtlsw9S0%6Bf7s!Y`T&C3FmA=q-7q6__GaB1n)u0Dr-K0FGvP(5VT63w9Y^ln z-G?o9qx(hq^+JW$IayhmLijKJT}7J#zoeRrDM=z&f)6Lqi8X$hKHm5GJSfrr zJ1=M2|1IvjlDFad`nF4#O7<3;aV$wmJj(L2vT{q`!RDXnnP#4bl!u0f&JUKuKK7bk zcdcf*->aba(9%@BLd16a)F?rauxOP|pJJg#=ETUniUGRSt+#_h_CBJIXw_XQ(NuqeA_*#(L@WP~B)eJ(5n zXTM}ae7Uu`vVi!jRQDOCU;aOHrr|luf>Oo^J12UD2d~~T4{=o?mBMFX>rqN5wGuPz zt$CBPWms`HJ6I|7bh)k6pgEQzFyNXO-}C@W8ZGdUbw<KsG%4M5`O!%J!MP!w;``vM!VjN1>-x+BG{AvhcuZSWHtTT#VSWwNaz=!xL zv<9T_mZ9$eLmxYYoKC;LtyM@032E*~Dm9qsaWcqNj~aZ0dOPckuU`+l8x@ZH4hWbl z%DsGUMPL;0mc1{`NfB7{60fiQH8ky5M+TeucLcnWqN1O!<3PK#mtj~}dfS^TVkS$C z49Z;S<6L7O+rhRM!YR`vFP)sS%gW2W31@S6Jcyqc_3gL624dRB+L}Q`&7hr?pX!^Y z@0&NX^FM#Sn*J)>X$oT@k<4*}IlD=yv{A}lC8w;ozGIXfAypt%fSI@JK#h@XI-O;P zfoN;Hv;h{3hKV0>@b3bdAh8v|sfh)N5Ex~A`SAn%_%Xu{s-=zRjBMFx8ZbBjbGeS` z#KOW=m%DQ>$ihN>tjvSb^EPtk{9oU_X;}6vxr9B~_x4&V-aEV0_&N221;|jX@+h*b zwPpt08WXpz{cvio9vKrC4|k+(z4FJ3541DarO%=7dTm{7ZK6NBDlQxs?ee9dmlfDs zH>x)V_Fi2zD<&0$8XolE@;%NlYfisa)GdzdsNve4uUKMd!mIHgVdqD@FYoVHEk^Xj z4?T#>NFMG0S#{2ve&VoKWl>RwjoWso!c(vdDRSpNxv6E=)-fcD;emd~kkFiM9CSoM z$Taw4LQcr`<8^@Yd}Ch)(DFO81C0r%%Okft{(;Cc;7x%^##rqk>Cg2I8

x{eLXz zO!+@MTGJnUhI@8;^uWmhsg6=9waaz%v?<+JXPvwFxWb%?_l``li~9s-&Mi`C{EB=#KNa%rX3NK3m%h=Wu7vyN>CBA) z=(eeWExJkkF)OuL;fiI;% z8nPb@kWO+SclX(`NPKTELaWCxm1D%F30B0r0;CHgt$Mn;Ah36(PE%xEov2ShvE9t(lsC!(vR)r^8u3;$2A6r$tcl>b7CYJZ$QuIVSjDf0y1CFf74g7# zB_ColPP1T_FRq{}Y@6e2O$-COM2!pH^|Q7%|9)txe6)PCHDT6TJ000*$I(_@x-%lo z!ra)~?Y?(bI1_WOF`mO2Q?k*jamUPPr}#D{xsq?C#wdMeLKI z=}G4eg+uOBZ;C~o*~&YkfGeIHA8=1=3<>Q3zxTkG$oH>K?N68jNxwmpw2kH1?FB(5f52i#yDt;= zrSO#4#nEK44f`X?P^2VW5R9ns_L=~B+&*t>RUcbLF?O5np{2dg@vD@Y@hjmeoI!*{ zG%7#~=)W6^OXJ$($l zAXiQ)c&#<3QPQsW6C%tjW5Q(H;Rv_grGruyW1pud1E70 z0l-66rot#u@$!`b`?BNBtKW_GlKTf+z2R_D`ElgKEzBUuZ)I_yRs~zEV6>~0gAme<`D{}YLl)jG_ zQh8ZFe$*p}&uxXiu~R-;xf|Q$vD*9L?C0`$L#|#z{^Tq-RQ_Eh0oEt5Tu8s>t&;pQ z1yhZtrsi1^Ui{|tcNx6McS8#DRHCn6O`KiYvsF(_W|Vztv@<706pnIN*J=#EAA7NU z)mvdZ-@XFzgxYpl_RseUfTaJDK<2o?pZp^EN_-WqTJK^>X09fH5B)d&CsVHMXX+dz=Ix(RIe4SD)05wkyOBW#m}G0fr&zf zkj8!XK0eWLK`(SwhTDR87gE9jr<1jP>a10~%K={Wf4LkEHdFlR!Q%1alow!5R>(_< zQd>et`@#Ox(q+t&R@LpH7C=4n`C_g3ubv+OYfX*f7wR#*FMZdIM^X$Y>4|JDUIe4) z`dqRi!GXMglLM${GmH$ET-L3@L{l{+Y}{&2q^YF^*4h}^rbGy~(HkokU$umm&j#yz z2f{ngE=s}N+Z8ikk5&)Or}|Adn!nw$dooPK%R5T?7a;CZuyTdth3u@&+$Km zheoZmi%j#D*KQ)emV=Jtj5>;04O(5-6uat(5V6Aywz->JyG#G5w&!YSZHR;||=MTX=&M8It;(Iwzb{N+LnnK%B4lVrXs zlD4`nu}BXm)QGM445zm_zL*2uq=6DLz9#jfn;p~^?0j9yAkTIQsIAK1_iu9C^ZMvL zZOW+_timbrSbXSv+UNXu{>!EFcSO86Y7eqVYVzy7tDBoEyJI>jystU_tMVzyzO&=m zZcmTyySm_;hnn${{5n006Cy5{rN;TXjxTMGH$p%wzax*ro-Z%0=Z^ZcXKN?|I@zV4 z3##00`g2kHO`RcrYKmWpiR0;va-gi88FwN3Kp^uzPIJE>eLp;Mz{xy}XK!b`x33Qe zb^i5Ie)$(*9)c}e3`Nc&ci_}1bCkRGaa$V&xMFj^mKAScvan8P{|hiNg>A}d<>|pz zxC$_QBX zqbZB*&M32RYt1--QeH@ps+fLlR%*%khuLlU`#4wG&)o$(%QsCU$|5IOIRQ7(>@v@5 zaj?UW{U;(;YS%F28nvG9P51W7r5C^AudF-}?E3aiDfI5JhMk(9WmEVCDGXM8vkK{h zj|UYMdmvaqii}W1necl56_9rx#8hF@MQ^esa-P&amh^v@* zPw^@Y8ml!zdq(e^>7wWCHz||>dmfd+FDXEw%ge@<`Yoq(+4tkmimn*`SKJlfpW!J9 z?KTXtXocJb^Enl25Ejj?b=X$&s z*hb;=VlJ}o!{N)6Djl#(E4;PR)e*pWp$s@9qnvK0ahv__`Ak~;uieXt`29N}H>`*5 zp`s$S_bz+W@#1JUl7u~1>W#06Uuz@HhJaMT(~^;@dLc1l4<~d3Lt}zIJ({+y+0o~k zQdD7)yRCt*$w!yM`O`E7h*uVlAolb9g>*^3HWf{>{);X*%{XCfu+I{zez6>-$#7cjts0U&{MYT++`rw9 z)!RooIboadSc!6}sPKT1_D2%>pq(S0P}qyWF1#&8oscnPnzOxLiVj@!b%6y`=4XuQ zdpO>`RVv~FSDH<%GZDmDq2D-n$!$ z7au3+UgYfI-dd!Fsx90TowKT)Mc6OWQ z4av6}%771X^cXssT5JLr&87z4vz{BDVM*tD+zgY!n8DV>A)h5*R_VydNq-%2Xs*E2 zLcu})0FQDrG(={+(y)I>vTu^`u0XfmT#Sj$FaRH)z9&b>*kMaPph()1d1hu^$XI?a z$4)CdG~6<`EXS({LXCS3d9l`kdgIOUFCQfc{f*MHGzrk=w{C1`Ms`u;{fg<6 zK5spaMhp0<+>cS@w`8kWZk_j!uA@VBy-HYq5Y!wx#J{@j!D?Mt{kU6(egI64YpIe| zl%vIU(ufg8Pq8*5LWC;S>ca;rFOtUN#kSl!|FbwfBc|xyUee)_CJ<((ebr_#KOC9X zTWc%E%CqF0)@|>17)NiVEeNVBa?|^JIlFijH^3X$A0Bc21D;%LS6as)I-n%E=$8e% zpr_QPCTpH8<#=)Xk7mzBOcVdzR>Y<2J)+?H33LuRi-V;$kD4QFbb2?m@Ft?Q$zQ{c zy6M*Ed$S1@0p7&f&h?I@gxdP~d;%%fpJC&`bxt<06M!2Wl12g^k8U&D5!!O+g6VHg zG;x+6pH{P+msQ|Wf-KUd3NUQAE&WXOUpCr(can##f5{4-v?@_)t|)w|jjUSlX7-WH zt!)_7?kFd1^yK`XqZwigw6_~I z!uQ>UwN8vVP3jx+(w-F3yU`r{WTh=0_Ods(awM&H8*f@#>Jdu{yGGL$_pOYd&vC>2 z>;DrN>prolL2OU9mCcWP{8&h`0ky+~}-35;=3ju=s< z)A;eMBW9YbS})m*$EvQx!O)g&jAuEbgJsCZcDnsa%Q3;NImP{Ch(O+2w&>w;t_Z`Z0hde?ce&EAWXU3BeGYE079!AUR(xTDki~%AC{4x)$-F$D%wTsDDy9q zXG}YF>cXDQmwyv8QAfAop`y99ErkCF20sg4_6pG zLRJSBT5S0BJ*HP0bBfd7z4;mF=zckP-(d4(+$5ww=s|WAzEfk0rs7dE(a4s`^Oz=i zfBHExt+?#-M?8xR`eN(a4o7}0@dtW`PW};yEGX$9sHH`_rev=9=&~Y(1K`{OihRCi zHC6j_&00tYF>>T-eYhx7PBjTn1OF3`Qen)!bkko(gmHKG)&1$uBtaF z(>3fHZ?v})ZwY^slsXi@8@q8Ph!?vue#vUI=|P+Du(wiA0uX>8{BwIVJpA538zLIs z^N%m5WXJ!oUe{ndUl*KOJZjQC_1dlZ6X`GN8ugNBkU`sJ-2T4fw=}5W-Q2xoyB%|T zt!}hmjh?z95^ME%tM9DWkS9bsX;ovAPEUTk@RMNqZhqvqU1@;DD2e&=zP)qrg;z_&;|eUL=Xs3^yJTkN^lGBx21mJFUO}6W^(Oy|O=RrrCB_JRU`lwTo@b3e(UhTgpVArfn*2mFf8L{H-`?CQ4@NEBq zg__)K7u*7*E7M=9D<{j0;_jciok&Rs`rfLq5u*V)BckKY$A1zS_^ufG58w#et_U@N zD^Uts)~3xPr1GW6v?1y;(9~vIVYxU>h9x>7uWmWL{s~sE@jkZRZYft8X|=p3bkD&4 zsORcn@uDBY!h&LE=tNn}UZ5m~MmDCU=}hCLGjS9zd>&Vgd- zXhJbYMuC0L|MID9&;QO>vDn*2g}rajE0kH7bp7O2C#mro@5pv#ig@L>dd(4?`&t+h zax!8pGBPe_dlBII+D3*2pXNLgnw&C0PQ+i?4O=-sO*3W%U#EX(jh+y@iy>gyTMEhP zXDV?+^LJTDKZneu|M*J#GocieK+OBEy zZ;zYPr9BAJuGj}Fe3P{egM~yaBh}sm|9{N~8mVc;V(mh?AvaHJfYGU-j9T zE6Nwt4;`PxjekSK!+dQ~7v!S^{TE;1-=IAY+-IN?{RQfYjEsK3lY z4H`Y!KrdIYV5^l(`A<^=Z9K~i+@8RYW&xx0OD2dy?m#6+$}L%$Wb?!2(yP~>@DpT1 zva(29U3Z;-O&B)Wlil3hguJ187fY-e&a_El9)4kdQ0t#{mtl?!aT_+zxdOsodZOj7Z|icp-E#GJb-k*?&D8!qygj!Llh^uUu2TnK0Mrdgz~ zb%P!s=;NQZ$pGMKG;3>X1OJ=Nuc@hLBgT)3_A9M7WRZtUO)gIlS9t%teuz$H(dM=o z#s`?cmPLt_yT3@uoh)#{90RH&hiftl89;TjzBELi!Z%2H%fH3LTi1{~#+9|=)i;&P zc0N0$+ktY;CkV)`bqz=UBJX!Z95(X26t!wUx@c55q?vfa=~$#@{HdyFw^jiU$jeky z`a8kuX+)Su%*tvqp~F6&664oPPDt?1eyvlZvg9+%!v1|$MN7ZPh8~*eqp9tHF7ZkQ zz^;Y0JD0s5;EtGL2W&C4QRt!B6Zj~r2E<37uheNu=U>h2b8+z9@gN0t+#R(Q78NCA zWhq=8tvtll8`IIzRT{J;USE4EiQRnQvn$WtT)MR#{nd$1F&|#9Odf65EL9MP`Yyb? zPGNYsB2Da0-v22zpwDsBQ%xO7U9E0UZ~jsOols1wN@U)C@PdMXqgp4h%XX8E z$Nu5|90<7=zU@A)LHmtw1j|IOtos@IxPTMFy6jh3UQ}{se_qotQ*6}Gv^GaU@rN-{ z|AELdeTR-@F~3_*>nU15!E~#(kyj|Rp>;0XO61-J66bl0QaA`YLV`l zBje^40?lg4S5|B$S4V1qg1+7_RBxNFR()P^nT3E<_p-nOl|~)j4o>}?=;h_*X}s3B znfwm)MCeByfcEy~iPDSrX3jkJou}wZulNu4chi|E$e5S7%PQk)oklQ~o^B3(nr2!GN}CTGZ=Hlh@b~HqTfH?oO2sO-jfHPq>2uxS;wt2-aeY9xLHGSg zXD!>12~}N!OD;vkSFZ@GhK6qT-x~W{m!7cw>w|n|;(1{Hcp`&(OZb4?4Da!l&`^%6 z($YaBsM|Y-Z+g6??vFoE)_aeeP#Rnz=ct%sN|Y8RNok^P&>Bw1a;Dp936CXm(F&S0 zgBEqV7L4V*nXG+FR%FD?&c0Pvl0!{J6~qtnO}YXFfFQCLKfxY`@{sC7Ii>fM#BNZ3 zj}T@GjbA7yPVKkj4ELAS3}!!^QM*lErX&lL)SZr)a_PB>FDi#06U{phPLNIkuu^mR zgVrBfRdn4vErb#lO^6V?1OItAK;hdOxb$?HRpaICY@e@~nZ%JU>T?b7!}#y9O4wfm z7|z5n(Bt+bCPbWlV`jR3pECKe6O`ber`&`YLq3(N9#8VVq7oe{C8(r|Y1SD!#d4Yg zqbY}>A~#b$2kc8dF)h)78EDI~Hy4wDfPgpOu(3qXv8(hNsD*`>AhH-gk5<~4*xCDe z6=9%$cxyC$*=^2nz7ZqPZpruYX4}8N(gmdOnnbXcza*D83TnczDF~^{t2D6EEi!)F zG5vmIFf#4W(mAwT#6`B}QoMVVXV<-hKsRWV*^lEUS5Ja+W$`R-1JDuK^|s+$0uja3 z2kkQcPNq4B#m1yq;rBNqpj?a1R>jT_*1#>)y1IVjdXw%#zzwd%v&AR)n{6=F-wz)~ z(WSR)efYD;v}mDx>`FM6)O2v$##(TuN#WyMP3l5*Y+6 zy!udFWTH!NJ(lQb7++ez#I2ZMO*G#qt?8ClCz{M^SReGHaFh1=r?|CMB9j{)MWHq|&`$>q@E<;ui+Dcd_iT`I25C(|xzb{V>DB*Uc?x*9&UY@)bw6NM{OYo*;W@$S>gMQ{<+dGy`q&p^3*t z|Esn%BkN+{GTpw{Q8JS!(QmuuEsu5@_oJ3=XOQ33WSxaPpg@+bqP!fA5*zvW9H@M@ z9!`Oa8hX7E&7srkS~js8XAqv1(%To>{Ag7IESE-P(=UYh0a7RkYuUryO`ac#Q8u$!|>tr{fjPRN_A36wk-HJA=Q;NHT8UF2lq0wLO@&V+(SOX$^ zx;-6vIujl+O3n7*2FqL)C^ubdXob23wMU{M*g^w7hw?)R#!|G`V}rH7O!;decc07lYbUk*E7Jk&9_{|ADgC|B&?e9wbNC_)87#aC04GYfp6eE=r_;!tQXa6Oc z@I#!2jgb4npy<_dPoZ4WeH`^gR|=bc^LCEU^%4l0Ouy>!)H-O}-)wrhjt=#^X}Stw z|8s+a7pO^s=DEM3?77u&W%k3Hq>GB39Ud5SCnGJr?p#|15wo<@+#dHZYOovwc?X7C zqzjC1(db>Y1N&Wo1SMdvoMFeQk7>nnU5#l%e&h}lI$bdf;o_nv$FmdNZY&@u)2@}# z$;Zr@wf(NiubV`1dy?U&71D3H`WhP5CfHj<9=C1e-p6R0su35N zbQM?TXY4~fzv>#ZMz0UQCgEtZ^8qY<2h&-Nr6rWBWj!Ajmi&o`yiQdP@81tU+~kPf z2&%wo+-7VYL>3I$=XC4h$t0y`Cbb_n1v^v()(A_S^-WzpqXW$QBIbdXs+`SbP)CLxhg<6f~ zh^iC`Jip2Mi!$rPRyQAq)#2o%4ayFz1Gr(ok;Ez1$lhKl@|&Is>+EbfXETv?Sq=Rn z6qpnX+*&*T$2+7O&v9yK{AnE^_lb#SZ;fbf7J#w`yt;wG=;h&(*GV5?3ph^rKF&o) z&&uj_hfEvDRYF(2Uw-fVg>{6M)ahm)%<<2u#6z4qi&Cy9DZjB3G3=5Q2fQ^ZG@Nf8 zO8W}<;9Na|`_ihjlU!nxlHI2Ias$L3Z_?OV|K&igg1>A3Dm~pwftAD9BWGKBqN-eP z!-Le@ZNICR3tsTB@OvR0Vp*J&qoK02zyb?9zR6Gqo5=mB{%BT57et`p)6TDN%bwua zEbqNCf0=}Ld6kth;*Tfs%TNgU%LuX#VD1kX@*BQ0^Ho_gr=acOlzktu?GbQ8_>_Li zm14oSGxSM$*#-V>+}<1By=yXQT1y2WEm5s*qdiNmUuOtF>``rW)_&OPVeYmmn zRGMTA(JwUkR9OwY_1|r9u$g5PW8h++r@p8euYZWE(avRvF7^2SYyw!KOaTDP(%_(m z3LG_qiJ+N$B$cBiFArwUPGGb2UU5mc(UN9uvcrYl2!8meM3(pGuL2*(#gm>JNVrQW zrp$eut4=?(_TTkNU2Ym-NyYzsjv?;t*)(n3(M+TqI2>4Pffn`teK%(*G{e_7$jfi5 z8yRsjk-GDD<@ED^*8Z2QBY`1*KsKG2c>7>Hk(0VC=CC@`xs~R5pz;pu@#$d2$`jv> zknh-M_^3GH%^O)bst{a>HxSB>QVjyhjM4~RNo2e)xDHVw_wW5$! z6l`QRDk{H`(RPaSiAW24;Ne)Gu^d=xW;%Ykql9A zv~r(ksLccJU)e6<#rGNCo0!P31R}%ygrN)?DEarmg@A@M;`uTq-^}V4<=FPo3XNN% z?W;>at=ZQBltVwR0EZcKJ$b8Gd3r^s$0wJclVsK>0bl4)%64_HO|?bnCydP8NevN@ zkP4)uUqS_%2@Vc!H0~{33~*Np>Sf#8+c~|@KQ6a>;fddy{B{f#%b{3CdT4D7sY!i- z+SIoh$r$y;0!92VZeZ9=+!V5|ZYguCY?m?HJ=$#gg%+oL*i;~2Jh8|)T#B&z8uKjG z4#8yF<|746Muqbxgq`1hxq7i&6E0x)xg7Zal~dwjK@bueNe8N-W zi^oH>8jOUf((CsalOP-fSgq(7imRO7C6|&- zq3}a2O*NKXvpTiGBdKkuOgy(|ZHB?Aqzwqx7 zjK`Oj4g^rNx_yl5f)PKM4;Vd=4VbD^Wt)z#`Z5M+~d^ zZKQ74whjX^j{PadPp8bG-3vVJr76kfPV-tLaiT*v+1it`W<1)pD;7 z4TA#i>^Mo=0dZi4i3~C%nB(DJtDdcM@U_BOmNHb#+_Z91-tVBMkTl@i#ZH-r)F9Q28ljBH;TVZlYSy(q8eC_?zmFb zkxMcU@7@Mq6T1VSDj(Fd`MLa4`xnKVjYEsRInYzgT#28V`&YQ==;$6O?p0b{t?LW0 zTV`x(I>Kr?Z&3-^4LF5|A?}qqaZ^D>d5j|7XMl)&n9P~a{8m+6dlc^QaR%yV3KZh_ z*-Ly|DFXSvxYDrY^lJqijV6yJ#>7)SBNaV|doxY0aq)zhucn?+k52a&|BM`P@NMTv zzS@G>9u(SN^*be^mc03^38SsS;op4j=s#*biT(b)xUPne`BSwsA|F{drPln3tNr-$ z4HPDW4_Bc0BKg#f-ICm}rlzI^r-7z4}MsC@j-+_l&Uf)X-n~&?r}DQi(&JTr=uc&?m-$~ zacg|~9&#f@8tGAA=YC)4^L)a?O^USf->W$D*EYo$9X?mvC0R;#ruIh-RtDAk-=dt+ z0(;mVg5lsfPlsfV`y*D7Iq;B6MqWs7B9ae_>n1u1(_zGmUSVogKTIA7+-Q|+Ftwjg z$?J6b3NmX|esm|LR=>HsD^M+XSE|>*JX@)2?oMjZ=7i*CqxDX~)vFo5??6EMGfbXq zAVtPpO;~rlcnC~@G3}~CcgMLxla-*~F5=!yWz-ssF?om5SF8*~9Sf9r(P`Y15{`}j z!?xC0j~3j7WVc;^N&y^M;e4Avb;T?E_umr4 z*nYRiQ)<}3eSnG7)U6scR2L%5WKDE?%bF$c2;l;Fm#P*Z^4TvBN($ryzI$JMI^P8j z91y_-h9=1rXoB^iGH)rvqz09i8ESBUcH6bJhe%4*!siOPrj?9u7QkE2?M+GK;|51{ zQdO~`#E9Rn!i(p`77}j%NV6w-v&cXo;&ZaZw*ODesN>bD7MGsnbF$b)p z%PnDkJrNZKOqE-U#*og#s~eGrNCX%Q2A920XSbs2;D(K15V$;b^j?fcb#FYWM`5Vn(GN#ul#D zO9;sQ9gl^Tlv{OWkkK#Pm#&E+&9S@`y_ctJP9p?pf^{}afz#Zo83^_+w$d+0})z~ zx|7sRzBTdBx53}7=S-m+(XILj?h8g&jnBuAZReGhJa?}4&*y!JYr;0qjo>ysTwHuB zCs%{)MHT%9yKKXo0VWDS%vXei z!V3v~b1T0b_9Y}TDox)hn_r{B=}lXE`*5wTA%H}yRn-|gnD2fa^2E7Q(^>09g9R;Bf z9JML8y8u!^t-pQ$?l2J@!xaqTr1;O&c^iu7w;==nsB%#m3`M1Iafzsu5-$|B-=7a< zsIXUkQHFh2h+5bQlaP6o$-XU#BXQ@k#xL3Uu78j2k%5j5iImsjSLL8!2%y{B`uh6z zq{Sr`CW^I&8=S4Pv$GRP;h_}Y(IBzKF)rtZ-gE*{vXOp9v%>}cL)VUp*Xo0RKu zT}fP`V=)6GjmBC(xt4&K@nIW^qvR7BUI=PJh?q?_iwNeDapmXHw3tNh)t7TkDdtYu zEh^RKDoon7m?9z~`D&$U0B8pPu-_b5_Q%)#)fGnEAAZ7D2qBlm*z;S4t)~Qd5wip* z-^VCQ!0P$_4vRyUPG7NuX`Id7&5&-gM)KA4-EXPdhrqfpeu3+|-d$DKvS`~_ zWP%T>4rkc`7kgOY1b?yLq1BiBZ%;7gwHyZxt@qoVmCj=oXxCUK_x5#OYxR}~42W3E z!M>tkBdWNP4-|AT*k>|M#@dfUSP7?Sx{*A_#M{IvwT07WNtpli54+hoIBr@g@cs8e zp+}Ngh;?@WxtEz!?4uM;+1b6?P^}f()V>qC=6V!OkaS%_n6uDbZ*j_soAVccHPE>~ zL=H5XQ(MfUr1To8MfXDUVczOPO5USlDwCQIjlXln?4|W^)3Y?%2Ep3?eH{r;F6F>; z`R-cdoANkrN^(7x4+6zxr0$!PvDV1uAH&&BsCK0CX34#j+;2l^f{%`mMd!|5sn=N* zPTM{hOjj5OhJ{JY*E{xyiz-v?&(*y|4}c}*bK>AbhWZ(sPM+pcMV;o^_!7`2U^^{s z-Ja;c>@|l*Hcvuy62YCCW*y$S2q|6;$v(+)fYvBp`+>HEUid*tj350@-cM|}vj0&S z0ChBtu5DRp|NIG{5v>0~j3TFn`I{0w@00SXsyBu{#GnhejpKoQjVxqdND&Ia5oDL& z;l7O@H)yTvn=Es?cPhGZN}0Cxy4*;yrh5Gv;r{lLiw_y=-nV`E*Y_7MSsizjE3?mO$n)rp0}aN^L{jusT$`8eLXk0LnmJr#El z$QLkjLZ%&`!3zo`7&wt%d5e3${$QwWY0mZ)rZrj3xBs}@f60HCg884%;e7si-{>c^ z4o4~t?_H(Rq1>WLz*Nd{}pIl$Lri-ky*h(O`Hw zY(l%06A6^?V447%?TUCuS64Erf8awWpdHK2hN(WJyz3t$Ik%YnHD)V@iM4`=|LC+4 z#1vHPwMq86Un@+<9iu)kkEwP2;}8I2CQUr3A`dy-XOG0tJ~T2=P-sNJ3aYg(%k1`F z_e7uNp-=>aGYyn!+^VcmHL$&ORPTtsbOef(4Z@mu%>_gxrt^bl;B^m-OR( z`}vcM(Rv}Z%bM&zO6N+4-to7!NXhG1ud_LlgW;@Oi@8~9I`7oKo1wH%Fcw-6xPf-? zEtAfM7uPHq!N0pXiWmvzbNN)6(V42=On|uzC1yt&#u8CfMnf}K z@sCJA3kE+tEGEI)NyN|v_x!G2^@rep9?baUrsCi4v={|KqlGLqHAsQv{|;hH{nMvZ zV&anMeT}ZCT6%fx1&WS$ve#5q4p6*RUjqa9-uXvB@Oxv?ePoqfj0oy}&><@-FE#8- zVO}l(%#ebD0_s!l2a!_UCQNK>Kz&m&P^hRxB_;-5UAZ*go%C^gKQwCHCcGgIzP1*M zNX%D!uY4iG;$!^-3!ci5%;yt-;{WaYfS);xZ-Wn`h;S>*h$+>rHt1Sw4>CC`AZA8I)6Fxkzi@0gqp&VKKBE1$ezh4o~ zfx#)X=^og!3CxZbgFPo$i4R>ib7}=l+9fNTM#eZY*!s@Uhb%Wl>80~)3mbTh8}c4x zmP9IyBkdY6P$D>=t9xt;er*60TBKv7nh(qSE_?d;!RTx>BGcRf4$wioHRh-x*{#bC zBBQkeC3Hs>8f@0nZhtgR!sKzqpj`&xOF~BqD4x+XFg@{kfF;Tbtio5fJXgnD%(0(F z9;x}0=-jX{(>lV*OclcG_w5cS-P-Su1+YVd5oEBVqLRK*mJYTBRlYFkZoSx_9j^NF zY3l1cfE`4M0OcycdC_ySSRjk4Z#l%(_NY9VAH*LaF5-g>9U`>o!;@ZYZ(+@1P42;J zi<3NApq=fd8%-mMBfbPdvrf{OPUJYW=l;R`b>4nz&&745Mgmry2ATz97lXXe2av|K zsU=i=iR##Zk7a|nhE}Iv)Q>Z%faUGd1&@vK-mK)j`!7%xsQ)Kug1>ET`Ya{U(~r)= z{#K=c2&_{>bpE?p6x1}w{`3x(s&)Ve3q6zdpzD>TJ5^B2*7T#=t!iPIDTF}OynOv+FcCZKr^RecZjVAhpst0?7K?iM)C$5g_@?e!PMgtH1c{hUd+a?@|5-3T6hoD>z^ zxmGXE>C>yPvTI_7f6i5|yBXxa4FGB7SWs%}>b$Y*&0Ti39F}lBl+k8`d^_*>aJ8kO zgVihnz?W_6aCD_dg(RP!?_mHCGpra8lGKh*ACY%}6 zLH1mG*AJj#-*!y09WS!><1iC-I6zCg( zIr2+;UQzzh|D-g$L8asUoJrw{i`z{R%%n@^rWk?XPj(pD>tEXrwwU4kAUdG3*XZ{) zs;J*}V&s~98P0@XaNB)TWxcpCMj~#MR;X5L091LGhwIJxiPJ*=mr7Y5dAM~*_I@*p zKpAOt4Txf^4Qk*t}s)Tn-f`GtN*a`gvP`F@8O#(Hp$%T za%`+&4s-`}c^Ji#|-ByQIEdGjM_}%w7?rk6A1A~!6#aa>oXH-P_y$AJ|dQFRp*0-epu#EULKDUXp zzG3Jia5Sl3d#7hstjmMVzDN?@-B2xJqexE|jI3)wt6)&6X|wkM<|f<~{}w%+z($4!%6v8fT~#Qw4bcs)Ej#t730QP`-(Z8Lq+VmA_rQo z{o!iVYX64=Em|?rq@P6m(!n7yk{^^c3!@cH-m3rOn_pmSNZT^gR03^qeX;wh?bVvZ z++x{lplaFVk9{*6QY`n-@Tj&;U_MtKM+l(4(xIUIf9v}vV>4UwRVbJqUxVk3A z`vwNb0}8}&bKnPPJe^Y?j(0(iTt2U_jI1**0)j^FQIB^TUt({u=q1pw)S1`NO7+5O zwvF+>9_gcJ8L9=>p{?v-ra|AyC8p9lf&pw^lC3Y|O25Kp3kV(`^0;00iqLV}p~g_5 zz1|OJ5IlqXcjCYNa?5#;KdUpAS;zZ!do*3p^K|1+MX(*Qut|nFvMP(m-GCWKu@;(B zz_;$K=x!`H=u8^@zJ(YUt2@+BZ$rrhh-BlLM1b2U`YPxN1M|&xM)NS5A0OQ3YHiyi zZwNJ}%@!NUjFQM-Z6?Sn4VO8oaHlep@VK^GdPbxRVpuPJ%-4B?98E{i9Htg8E?l8M zvS+M*+CGTj%;@GN=s*NH$j)?(nNh?!4X?=2`$B|?y%uqrP)hwUCPB(@1C8Vv@pm2k zC_(o$M`eIXLfL;%e7=uxcckn1tF9FO{UZ~bnYC=xMde(xgpN?hhl7yr&Q?Ch zqv@!>`|zjbsHRQ5wxWuaKlLDWFcsA2PUi+T!)3cqZvlV93b>uI=5S%*lC!a~CGt9g z_0JD;O*2%hg9C%BXA?3?3ylLn8L*FgO; zk17XpIy5jPY;TW=v=Cpxl==0FwkWnoco?kED!{a+uW#t?g6SqTchcrf1x_Y}Q^uLNDYhGIaaj*INd^lz^`21obzd>yLe}^Jh`_ z)2?%ZC5Dg+kH246Dz<~C3pQ}d#+Jo_n^VhTcxUxyFP{4^k#nW`$LQiPU%ocGT2CO~ zo%)J}Xtnro!xpfr{E}i8Q5sh9D#X^&LbY>u@Vt}yM{>JKTn62on@E}rwgk5}GynC9 z`&1pv!v6haiB1S)(TmvE*VpP$PP{XQsSgtlTF>LyKg08_9f{d&ksyr{Yn6g+_7@@} zEVJPy!JdjhSgFDK;>%^jJm}vjjuzr#@hDt4rj?qJc3h-cj}R#G(zhFlx&l>SMiFos zl!cX*aTt{IeSqSXOuDV$HFb=_ASFflnm4sOFIgNDK09ZjJ)$Av*D~Fo1i!zK17e_(h94Gmm<<3O2xbj;-vVRWg4+Ux|rN5zkXV#M2=_XpA2PMmL zwuXSui3vdZn>=s5dIx$B505hAK0J@JVX4gP??Rw&Ftmg9hY?e?qiVeRm?Dkc6LmO(cQQB78InPNgZ&;y(Nh@ya@7 z1SJPIrXR&o8@DTm47Tn1h5)MjzNfZq{d#;F;3%`9pU@kBlD_ryNC#$a#XqfCr{3X3 zwZ){Qs%m_OCkSH;MCr*Vh67I)rVHhFwEKB(W4`-npfRr2eR7JjOK9gp*rM_6l|CHe z_2a54lMD{!dn$1e;gvAHGEJkI_pXu5ma(PlGDw>$L?7jX8ekZily~+ z|K;`oYHDiO-@kuPHG4MPiavBh!u$&C$N=+BeY7iV0c4EOGhp5MRybHPjJHYY~WbX4r!;aD#>R(sekzk+5P zUD?~(+7t^_-$2f&sbHQWjEaGw_ejtJPhPnC{dyf?jn5M;e(OZS8=&@@MOYHlJnQ}_ z*FM9xHCzCH<7kbN(086UE3VX0aH@9PH(XgZ%@ml@++kUF#`<-Nl}WvfLNDTXPtPx) zmfM_dDK#E<5hJ|1HH~nz3JS_XY|A;STeI?`__geisdO3TpO1~Ol#74n^*pU6?%(zg zG7Sn=CVfumB3}r%6jQUsG5nPHcFXE?l;wk9Rq>Nb1V_Ticyp;BA$NH%#{zU=KAMY$ z$D$j$wPl6`BjiL$Wv|`rK^R8FDQag|L3u8rBuuYI{#E-e;e(f;B2F_I9bE|1@~@yi z7uOR}n!H-)R%Hhxg0aWL>QoA(o<9Q;Cpw-G{vXSn{r?iD zsGV`Gh57A2_cXM$1urAvD5M}W)rJmrUWi;UVNMR#?;DKG+L**#qdf!)-VXX|;&=bOZOy{) zf9aUNBwh*oJe?7x%OGg8)m~{Jpoy1&HncfpwLN23h~zDt1S^NENFQmU`*`n5d4Ldp#MRN@Otm<1@sp^SO~Py z?x(R8e_ZlLP_5GaFj$Gn&W_ZZM>V7;=6CdWYFd`h;;H2*d^0tc04GOr`-6uSW&33L zH$)7gnbm+|A;W11Uq(JjFe7S1yFZANamBy2=}-7arcfeIWaoo9dgU+$p?e&lxbwcc zu?~nx`<2P!^?km;hHjugRUV)0U@Y@jgqn)`)o>R3?KVNsUrVKOpH)1r?eg+Hi%!#x z9aE$+e5!xRWVI)IsK<%^pC7C^Ik2)K(KK`<-?OMBG2sIJd_(%}EEzPz7G+b5L&EDd zu*WR{1mfgSi?{c8e}CLsP^UpF`yl_f^}dAFg(i32+*?z`h_tpJXWAU4YNU8FfY){4 zpuynf_am0t^={OQ7=yj_LD|`8DNuW3LeC5X7fz`7`LK&2Gc(KHbaRbFJvIK~6HVl& ze9eqj=&jSEZ;9L+FZjpmq0N;Gd{VU$Inu)3OGHv20s0+KO6Vx69;RlcgzP3i&Bwu@ z3koWRA_O!d3Lv^$?oLAV1y&SNefwuI9KmYed+cS9|K!k_DK`*t_=ws1$3{l_K*J*7 zfPs}jLw%j8l8KOZwcGH3Y4nuMa0)ZCQA)kNa+zJSW4N-tT!!JF@!#4~+sVt>PXoKN zJ{yhM{NwK+gy_y>%iHCnW%E|yDH{qUgjmtZRKRct^J&Z+*!v|121U-WZ#K$CFZ`fF zB)Y0FT7Ko}5xF1Nd){7L9e0zX?Td(K=Lve=d_Lb92VJ-r{$bymJon-zaT2@5G4+GG zy1k~VU#HkAzlYF-goKDyn)Lsq>pd*BZgAXO&Oh*PF4zak1r>8aG^D&Vq!Y1dQDK?y5>ht>_uC>fM9J?&7#qU`D%>uNXStdr= zdKGtKYrYn^cZU*$szA}MG%(_>udxVC!&rzYdz-UtQ_l@2C6Pt}l}Wdf6-UgvS?LJ6 zuY69YiHq8ZwQMHJ*YOoQKE;XB54cvyh|0xy7A-(V)orRx-y7pr^ zDpwP&fFOLT$~Y3f=__*y(M4`z@p(%p?x6*{ke=D7D;M?mP5NzS2-5j%S zw-MF{ExYj#A_K9F-v#xe}%5{mGslv!CedQ^zhRa^NIefKp!Wj)C;lT#1m7K0H1|I7h8HxEP zkq!3t@~}IUko`o*dUv9DYo?kJ0hPc91^?vTRW_afVy`wPOUo(N$lD6a4O^QCWGNh< zqCJvI!{-?T^nZ=@8=bDc@5a^{Au*_&*~*Ir2IM-nCT zt(Gs9nbeuyT4ft8?ei3ttrhlzE<=KXT1RqZDf=pxn*swv%go0aPzhLn4Ut*A+xL|n zK56s~d0?QO^wqy_NG5G}V4hsGCPKx)=nll9R%~$EhZ)zYvwNjkW#)@W?t$rkc_3k8 zvLUnp`8r(zzmpN55ij1ILMAK(o4%fJYM4Xj zM$sv@zN~R^p8KrKq=rYtkw1qd)%m^Oymfdq;b>$`q0IhJ1ag+j=ge$iXgEI-64*l* z!GD4@GBQ%;{pcmOqt_VyeNUTDlM@WDqJp-qgzYlTbaYbjJfg)Wmn9iClp~wa(j*se ze06(k*q_V^5B?Ds=O2R4VsO0DIo0AV^!45B;zO8Tr3uvsBc}4h14`< zI1paq514R3g=Ko&$OF3Xv-SrY*+uK->#n0LgrLAV5lpy0 z$($&Ig11Hb0`Xo=gj_L)qBwQ0N`B%)SBFVXuD5jq@3=Zas$J_r>eti9(=|#FjO`(NjlWdR zWpjD3EnhG7Ot)Z`L4S_~G1$x^?a!mAUv@*kr(I(9X%Ql05Q_K04A^i43oG#en-f42 zT*jxPe!c&$L+%kLcqOf03@E;xvMfz&@w(#T2PZ>f7|UzFW4JwUswmGnr2#>7AVlUa z^C&;c@jU=xb!D8G+iOCgeKBogtl-nuR5^8KX6D1OU{X@jRHH_^;V8s%7L>$pN<~2d z<$kf(I#*|3ZoMd|V~($EQi4LNHRjKB9339Tfp&Cy45EO#jac($%*wsiMi{~0P5*B= zE$}2nkN)rUKN7C@!&(e-lW%CnFH)#Ff3Pb1Yh*rE1*!3#&$T${?Mla#K~!%m?1rq* z_!4=&aY@iGQPn-JmID$6+-tPlu(0X?eRp-UTZ&fLvbRfqv<})ox~L$W$f~uk*Ul#= zHgrP$1Z4Svo;CSfBP8>Qq5_LSfJd$5By7NT7xMiTp z)*am$7b3{BfBs@61JplCpG3!9+l`d+=7MJFEA-q@j>%_5%#C|%96#9mWjp%%F`5&& z3MNWwwbm&a6dS?i`q>*Vz)}L>!9}Wtvu@Z}b+feT9tP4nN;p_to-b++cwh?04WuJh zRXPa{P2t)9jmMhKIW9!Z_0Ocb=A{H-O9|NZ{iUtF0s6PMf(`YZYnwJi1xSt z`j3xOjfzJ|%kc0HS_!$6{~WX#l={@Qv^1kZN2-p@?B8yq?9mhb{i+MtT&eLA7dLS|YDH8kO?wbi1K;eHPgwWsgk7<#eU9Q8i_uHY6^Rx9lgd4_ zN0+-Zl7oVc{ct@s1pw6Q)$!_7jU{oK>joj$Yx{_5A=0(eZcF5PT|PwPUXNkPL7M-J zRA&8mTJ_d9T`wfW=k4xfvCV+!WJoAd=k01hrpoF}&!1jj@JXmLgCJAIcf-w6!@XJi zsMP3{n&v{)Vo^ZR&rd`I_Iy}gUf!C8lbFEsjpA@>SuNJ0AE2==EoIkR5#UBsoulk#|@`CY#qRL7D^yvOH2f{sstu z^_l44wES%duc^kFz=+6_gfm9{vV1{zhX)DZzO z9OsO;a`D3m@F)OKZs$AePaaMCDG`EKYqyTPvbu^^;C<@!wq&tBq(*L67_SrN@H4|x zPpA5Q6nDA}8h!~)5@|O_K|w)aL_{}$***Y)%QWZ=&i|Oxt9L+)1Oo_iWF?$(&s(QU zL{4GR6*Riwmv4wVMU|8*S8=qGb2CJ*kvuL(bpgq>aeIs|MU~9iClB0I+>X%kpW`$6 z|8V0=K3@69=e zx>_)QJXmnlS_IVaDv&gGT0&G*3c&W`KlQ%?Fo@avY>!Vk=VWBWc3)ScRDLfI|EPki zudlCOX&W%D#NSq|1t~M=M1~RuxPyN6gQLGLP*~Yqjy^xhqxoZ0+p7-v*cZ%UyquXl zIHo^;eOyqlH$ee19{I=&O+zCO*3LeHWtb2DKl|57{|#QfRdlGr)2!L6H`eo>-^#kb zl?s3+1Q?JG7Y8?HNN#9nZAKGCFcn7_pK88KxrQ+bw-+Icf^J|{6$j+vV5&Sz>lWY| z*dd*Mz)tB8{S*P6Uzv@2^w!i+glr z>i~G?s}>Uhcb-BZh;VQ}ZCAVURSNN%uU1gF@^P9dPv0@2D{j2U$ zT4CcS9Qgf|$<{TsfF#n+Ra6pgOyYN6uO@HL_NFU)Fgl=r?XPj{33wn^bo@Y+lxaxN zUFzQ0cugg6kF;3yjLU(C@z4Hsz`s^8-+?I9YbW^m_*vp!@r`z4NJND9FWK>C*u@~@ zie=I*4HbxoCUGM|tZ5sXyaAqi1d#i7lghd#F081$ouLfaB2~s3t9h{den++z68f1~ z@lM=NF!D;>5S;lPY#>~jmfNav8o9p+9?Z$Gy=t#_+*QnZkNe5(C6;SN z{2MJ%C7ivvx=)ugd>7YCSOF(Hzaiso!Rzdfg?lb9q;p^b>JdFObAB zsy)okTD~K0Y-)Ov_gu82YBf3{l{cJAhnr|#YLa>|a4s6+MK?}b(2Q45M}J`?D#*o% z;klPd`5h!UA%;^Zh#{VixM$%rdkL%NSh!fDLF#d_KmdqLv^u8%E5{)IAe!LXnE0^G z#XG3U7^~!H{pB`)v*ApC`lg`3V85q+t##9RJY{vuwLX4@y&Wa;oMXING6MIW)7jP# zDhh4HJHGI4*Ck&>3M3f$bV0O)K4T{mu~b24tZhO<%&SH=I7Em4^mp*P|25!*ZEPSi znd<9HP41>bqgAmOK2;3A9HElx&jHSZzTm#Eu{J|$Lm&K~eRRP#wb}$+#+2BlDeS0R zUq)z9NqNU&7sSOSey3PB2Z?+`*w%IKelT=#?Cp*;CNqoHa|(F*^oqeAM3Rz}C&vrD6Yog&xz)^F857L#7QSEqxIg{et(qep%6 zr9zK1^^gLLhIZ(IHYHn=>K<;FJra4nT7&nG!&y>>BiS;!XRoOkdSA<@2Fj(pqhw=K znygPRoeh0m%}8DEL4ZT1mdJvhhhqHAgY7ye@8}{GH&DoG;Ab7}?tBAe8p~l*QQYSB z^T^>=>6PwtPi0FlmeJY#;C%PZEiApSS?bTs#d4zZbef z69-xaO_CiGD})6b3wI7kkfWn^j9p!b<_DFp+GhwrVR0zc!B#hVF=@5fF8c{@`RaG2 zNjD`IH)Q{l(dJZtxKfdHW?+a~%+XO~5PT6P>aPZJ8_T%x^5|jd}9?S&|1&5{gbq*dX7qz`uqq1vX0EqP%G66S& zkC%IOr&o5X^6GE+=dvkMnc>$(VUKQ(x|Nt4i>7URK37O|{iq^A;^!N0=U(2#zfPN` z|L^(l-~Nm4dx^E7y}eJSP*Mqdd!=(Dw&$tG`Kkr{sqzV0H&{tB@voTfH)y}k)epu# z05w>$FaC|(ww$w2375Jxa>cJzj=_C!j}c~2#3k}T2lRk zqx69d!4`Rn*Nvqnct*Ltp-NMQ@87>CPNWCs*KUm!#85hN9rAu3eIZlU6HR$83)9dZ zgnl{M{ik->M2hm8XQeTD$@~Wpot=*|7tE3kuGP148jaz)O1z9u&q&!z`_B!~TeE76 zo;zb}F>0k3D2Npj!kmqpjAHJq(+i|nTbpQVlq|KFxVQjWQ)r$uG6yMTNhZxanUVh~ z4;_TTWwK;`)OSd^El(sKMXrw@JGw2pSrc|NP^%AE0;?k}{XO>!-JB%*4Yfw}(ROmN zKbTS`+2dp_Bz*_xIW|v%(qBKPOvcyF@j}LQ1?faAFqjbJY@yDF5b%z<%_m@%nvoA5 zoJ+V6)AJI^&u1)@M7tr*ToV*UEtNcx5}3~&92CS>bipOG=LaTweC5Bw#5L(VP`qon zR|frLM`yn}3`@%R9ziFF4*6<*XjpXIh8x>7}S9K71(5G?jw#X6jy zAFoW++2u7voPPwnf5gv3CFadx}CP!>JUM&lB41 ze()$mRbM{YCj*vz3O3j)KeYBNgP9OT&y95e=eC+)9k<2k9MAn+04#w8h-Q0yI!0^G zkpL*KY<54q#!~Ol$O-+w0F4g*+ay+ofj_TvvfSVNHIL|%0&-5dY8wqC9w#4XlxkbH z5nVOmX2@W|Bc!LNSCn%>=CGW>0obX%w;@%gfqXR0pLC(cvK!~ow@Zpp2Z4+vD&wE4 z0}0N|`1mKn*m*mL{nmG*oLFLraRma7hNbQ_{@eW||9{UmP%bSQM@C5zgg(-Wi?5M` zdJ9cZIZo5~6-n2l1|Qv8-Dly4)~{^`(d8}#hSnlH!b8t1pbipqnE7y8OeBr~@jw$8 z8p=_05mcFQw7>DAOx>v6Efxn1Zl!wR;yIP&;$=f#xf%&Q@Jc&??x+IeITIEMpG$I3 z#+P(!dD&Y#sz@_+j6#*JvIm{x?JE4^ujv64K~yC23HtYe!KflW2?_nN6MYFRQZ6pF zPi6SMA3$OR<9ZFQ7$fYJlt^h~LVoswH%gpn zsxlou2XBsB5&{xQ0~|(5Ef~3ZINtyFxPQX~3qKyu50Cr27`G&F+g&4(Tx`0xP-ey%yiSLx+u%1Ddp zU|R0kE2^7EohG;OJtV8;-Q5t^(+$Fz)vKhC9$WJz?$iey6^Y1n{H_QWlFow&C#bupW+jRN#CJ4ZxpCS;(i_}M}*@Nflj3_aY zfWvGE>T=$hysfRxW^0I&=GCht(qYzMgS)Od|GvpKlS+bxJerTnxY0Bjvb#4^Wo9s$ z)M4{iQa3@U7_HP8907gnZ~2h_HdQ10(VyQ=P zN53EC;7*kXROdJU7)k1bZz|k2SBcZ*TSBPBQ!g_qd=5YIznFaf^iiBzVyQi~*xHZr2P zKoEtro%+dWM=c2Jay>v zyDxD~b>E>h-#Jyz-aa+%wkC>G0H-Vc#>K}EkcoLssQP%ZKWjEuYnyf`WV7_Nou7e> z0vX@108G4_Yth3{-BSje&Gq0t|FJZp8Tchb0xO}UlCsOJ5$gUZmRj_|=Tp^za~B3H z*)x8>Nc!AdGGdG+k;>;Uea4ArPD4M-*b~OLcIO&})dPYJ_eJ=T$ij=h)xEFUTA%7STz0f9kb(J+%m4jrybDKfQ7Qr1Tc6wg{6>{n_Q$ zO4Grv-!ZiFX&IW;=4P`s#ro%Qi#4CoB2vOg8esh>2qr?HfHD^+@xd#DTvA}PdyiXO z0t7ap;^K<4e*GWO`Ot>;Um!dNG~I@WgBNDFA5y=_+5#Q(#SM%#lqnA50T)>b{numJE%gK_lT- zw1Q}b5pjN8Z1$vHfB-k_OqQk#-k*&IUR*o$OIki!mNFOXs0eyfWHz1fjiS=Gt`GI= zXfm?37I&NcspiARL=0W@hK7QIDuW&Ddb)O0wSE(uY4L84DO3~U*gINQA=}Q+?jd)Y^>&Qy6q ztcV}Q$J`e^PWzP(Ur<=jR+>h*oo!{{z0q7N(C3zM+ek8XSdUZN@kJz4q{9hTPwx!D zmny2Ex%OIIYjEh~0|MCKDeTVpEO-@e-)=zVWl?(V#{py=p^?KpTue14<@>DhPc<#45bffUfU zJ%*_Fb*KQI}f<87j(W^ zDR!RZ)J6pH2+Gj%u$Hy{^r^vg@!apJqx>YaCBvRb?ml2 z-j183F$nJa?^UPm)oP)gb@)TcCg;P2rtvcU_WVouNaw`d0$4=!eB-`&^NFIQH1`9Z z25O1^X3yKPazm+Vt9ebG2B-0^Fk;8MD_dD}d}eL+{h8{@>s@lJVX z`E?tG!g{Jq|C?kOQO-z?>~084a~WT_gpGE1vX}@wl}=+I-d-W6f3eo0IrzSz24EW$`y{P6A-L|1E+?yOT;nY_W8McJ#*zl zQU5=Yez}BEUcz3>c0AQ~=gvfI`jTVrY9-nlvk>cq7-%FY9*3PV`lm>l)V2?sS2CDs zbUhubvp0S9>XpAvJ8R}C_Sw?HK4-1-ZT?wX5(&B(2vshXS3&QwOs?UAtcvernrgWR z{dm5%ZbW#K3P`_a&8y4KDnT|xP&@C1!XMNINxKuy48v~0&-Czs#h+jPb-B-b##f8D zBh7(MuG>*jV4bGSV4M%pLYSduEEFwtiLv?MV(VbH#Q-ulX&@?;gRoy^E)WZQ^!4;8 z-d^mVPurBFsC8lexW>BqSIb4s0nKueb!nSqmpZ6i}3yHXM&Fg zqJA~|Q(1AlHCstZD0|_trf-tH(wUp{gDWaLzk)(A^V&0hxMSqE7heYGWSNhDvA(-I zJvzYaf_d^dEOP_wli@ur*e`d(*icuaii+-|sUR{d&C5;Wm12zk4mW1Bpk*ivZd9>b zbkYdT7GB4ld@YC-k&u_im>W!w*3+1B^_Nk}0Cds{zV~xa4YGJ94Jw(N^#na-fP}iUcuau{ zxn=Y$%pX5y(wg7f)cpLyK)^2`&0(U*6-GQGcA8F@r>VkH#-p)dV?SA=G+!ThRIB~U zyZEE=4nu?elk=_#pP?cXESI9#-y4^ZC(Dlw&5wPHLA_yP}_$DF6RuLV`~$VlfeM0Spc*I8DY?J z)#~X3h^ae|_jj{`{oc3iuKWl2=N;)N{-P&0cgS#lV71DM4FOKc{gT?b19jU$awO@w ziH=$v!me^i^G)y7^Z^ZEps-h$Fv(M`$baFV{ezEOuhRL@r!9~CSRz|t-yD~7- zD>JgOxd(L)`uayZYt`Wr)*HG-nbAKawu0>I-HN`f}UfU5-1H>YdjJB zCxHjGtKs%H3-+X`mAw@a6I= zjHCiLt#p4KNcz`-E8xQLu_9H=li_sl=3V}D7EUSwtEOem{&ctW_(pCKe-xTKTkV%n zyF5S8EoW-j8Jesbk0tB0OY+ZPBDorm+E4&IP8RZRo>w|40#sG+@=0H?$A#7^7a8c-W;}|g?wK4#% zUhLtx?OW!)*G7#3(2&z~yO%_7)1lt`jdty=UzU`0pfJOe)jX{r?x!o(cykkE=VJd8 z`u*MMy#x%j#k=I{6Lq| z2%!h=6#=LYh$3A!zOIXieBP&*wDLv-%)LOyXYQ7^=rXDnUG*K-9d{9DB}!{Pw!bUB z-f*we9>~Aw6k_j>M1K9s{9@I)U^}N8hz7NX$3gZOo2U&N?z3aCwY`f9+jx;{pPx=c zXjS0%|L}?WFKbBDZ4wRAf(V|Bm$nExHpe;k#WQ5!o|27|-t#e1vupVZE9QPVnZU$^ zM{if0iCWdHx;qWLBzkKU_@tSR1))|Z0haFwa02tArB(o0CZFC|ditKC+x_|E(Z}fuA1O@WpACC=NfNK#;qtu@^C?c0@;GPl8q zdkivB|`A94xuT$n$AXK!MaWL&9?`94OvUhO>XBSe()#{XBL-p z_6o|Cds9t%(+{KF9@H0reG(|O9F`+Uam+d+2i&$9n(KDv>X76=kzbr+`TN%$c48cb z7|xfXNOY43qxhfpzpecl0g&=Xl)$yeN6QMIvYBKVbf?d-n~sit?08wS$1y2=@D+~i zM-q-D1ZDhTD&1hZrenAVSRqy%;K-0K$@R-1Vh%tnJ!QY;Y>mnj800CCbDEFQ6%hf& zYAlturlh%j#6KPmoa|O}ljV84hBAagx(2@m-lRa6z zyd3v*fl{SgEScXd5W2JJmL()iVNSS^;Z57kge=ZU=rOsy?`JNi&l{!p_Tln@;=6_w z$sy~#U);C1y_|QJ%E9otI-bz7F7Ud)@ph%5wHw?#y+2;;48|Q*5W3F}VXmhu(rgEz z#Z2zruU>-m&ALJf>jn~Era%ry)E2E{TI&kf`ZqE1ZGZG8-=1-g7if$haI>xF67!lJ zpJHs?tx=)2OZZa$-bL9u8h*)BU-G#XMFl+et1U|AiQG_-Jco2oTyLBhi?qLL%cXAL z$j5lmj8AnXu5x_{$&Y91XuGqzo4Cvr=|Vp-H?v@B>NdFf+SYIssPdH}V_8bK8H zP;d6AD`|Of`@tPmNeRCe+P%uNdI^t=GXz#CjE#zB}krWb`{anN?r3 zTq<#Cct(0gG6W30Z&N1;Q~dL3J2w>{t*!T7g3QYxCw_-N?(6E^hP@u_a@M1TyFA% z^A5?u^_+~1jI)d}BHR?O2|yV!8qMAhk|^2$PM8n<6j64A4zzT?JQ$Z{Y3hjl8XHQ$ zwo=p`9TRij%UJqdrS!{auBm|2q>gieQkJCtILhQIdqU5utyDFw9Wy2>06{>$zeAl3 zbicb>5yJTa1$B3~>ISJZ`c82aS9L<4eh)_-c`%unyp(*mDGHsy{pKff@2^E6(oa{mJ@pTt+7yNJmGAcO=Yywqg8Oqp7+=zlTvaLo7q?*V2eLNW!7Eb*afMdolTE z$?q(a0ha>uL3oeXX2}QmKiC%*afbBOqI#WOPCSdA*TgCyHAc6Fv)aMe*PH3-$G6$p zGy;!zr~2c_lLa428+|dgR#t?EIM$u&I4ZpHn;!lMm=9-X%tvLGSYM*=xa-*SpdBy& zq_gKnwVZXN3sA$2TXW^Wc6~DXdxYrhc z!aD5r6HW#$|5x;(HjAXAHWp=lT|Xvg7io{5(9q1+6KfZmLNGfw7YyLLQ+Lng)0F(6;O$;$cQ8In2`;1RTJ6N% zbme(FhEVEp;^_8R*#A*=l~GZBUwZ%%X;BbSI;6WBN$DIqMWnkMK|+u&=?3ZUk`g4O zyQE_ny7Rp={Qe)_wREX#vApM=v-cDGoVySA(vOr7UKgH4>fmTsl)~d|0CzsuX_(03 zP7N)VjUH>{*SmMSTXJe?!RdOU9^`fPpzyL`15r8h=f|+7uS{pns7}s*ezTw%(Q7wBT}0Z7r=?G<)3V&redZg1M}h3rT#hhmPl%TMqr7V!rw0t_M2W0i3`X z5GK~H7`J7BhcP8|*Z!$Q#;8?A+3Q?qHJhDi;B)o8_pk7Sm-F5X>oh^MFJRv@&3Dx# z6SVc!r4ELZvLmch=7|Pg{ER2^;fy1)ON?)l&!k&oH8_azcNp?dZ#A3NYV z5crq_zD1&e@53mU>tlHTO%%F1>Xier<6&`YxVUgem!{F+cl#w1nHE7jfFOp=d&-tR zNyxkDDHfp_Y|MW+PN9gIyPv{s4w^4(7W zd>Z=`Ut*z1Sg;N)P5)gt$Xge(LQSsF-BF)C`_`CXoOppcmDFEqOJ{>lJACitCva&Aox z2XerQ_fMnoe0fvM_tfv+?d~@p)RC|9O7kbsN+}GBi4USZl@FxEgf1!!7+lrQ)mf3& z^WQ6!m1xz*NG>c$&+2CY;=b09^rvdlEN@Fc^`C(oD;PRrtbXHzO}4a3JY7~oPpG`X z{gUT2#`R7?;kW!o4V8UvTsZ=M5!K)4iCrsE%Ori?N+9Je7p;d)hqK+ujUzXPa&%H5 z=t86F-OWWk|I_>5cNfd$9%s9SK%^K=!x~HjZ|C~k7{K>5nBQ6W%}?#Pq)D8Jq6a4ct-IW9UNYqm3xc{C7z z>F-NzSb#reeU1Wq2bIF7?RV;H)i0^v)yGyj=ncgyiR3(WnZ_phs|MZ0i%;6vz$7MB zXw+kpPdTaEa6V`SsMr!Q!~n*d0J`r*eDSsW_X~khuZcs06Og#2)x>Z4T}oi3r{bn9 za>1_R?+h85FA^_+xK<_5+VzrWFZG7F>rI}%Xr@Suh0Jj%hdt%oz&`9CIpF60%HLP%AiGHWa&WP! zQ1)%$V5Wb^f^8C#AqfC0-%ev)dO=aafudxK?ptXty&`cXdd=!dNlB&x>Q$el&mhyx ztF8Dgtt#zM>G-aHCPyuFuQ|Xp9a(>jZ6@R8Kl;*?x%SfIax@xL#g4CxoG+i7cSX)S zdX<_#6+PKvH-a&IH~Ic80&HZ(tE7d;K`*Jf4CwC?7y`Kuk28!5Y|NHr$`zY6!tx_j zYh_GuF`mw%p>*>C)FW^gr;s{WQL_&q8AB-wX`A3t_e&Qk5nLem6r~~vc_Bxv#=ph$ z9t!0QA|k}aZ&8!;1#A*O3vlRm+3x1P0r$@n?J$Gs3{>*}2*##~qU}^WFGCX3R=ju^ zVo6|3BJ%wsoW=W0=W5a80YW>#j(Kq3>7yF!(49O%LDW9JMQZZ2bDbYm2DXl` zdt#koVMt=WbURtjWxL<#`*7droa-HY#tAjf0{}Sq?*@KSDL00X=pnr5-jGaJD_86_+4*Tsx5(z7bX1b=%eD&b+*f8@$?$$XKpewp-oOZFGiP8QBiS9BWKpIpQx36vd4suzrVnM zn7gJ5Kk?5OwF<5dH~K)Zs@MnB|kQ8H&RtC=*bpM=qg;2vZ2`WOYX^ufM4&$(Em+y*uMd$<^R^{8a~bkJ-Tc(05T}QTlZKKiO*$L-WjO7yim0O=H04+v7G}lWcTuD z%~Vi7PX-T|y7og9M1bUyS`?EmN{=a|KOX*C`t|vSvvX7`@6l^iWfI_pXVu6_; zjy_u;?snD6b4{r+rz_f~CGNU}%UjRBm2EpGzG^v1ib&_ldlJcSA?AJ+-wU0xJn$+r zAt^(v_7}^O!CWU{g8|9!M?UMWB$x1TVOIU@LGyZHwimKUzOzSB;7~M%Y%+OsU3_{u zlQOxzy4!z#J%fg_&JL`Tt#{MtSTc~M>PM?RjHRfgLe{fYJ^Rl>$Q(BZug15M0a>jE zXb0;LUvJ+%$LPnn zB~?^f2N#>0hbmZJ!3!XU@K6jHN<|GPJlWr)oY&Ge;vYlvh{Ff*AE0>PU@2@Fb(C^ozL06 zTj1}=ZE9XY<>9F4jmfVCpLhNwg~h97J{PmciTY;`8M^3N#GvL)%iD0lzow+8hghN; z!^=jXM`mqM-BBnUv%NX5wINTFt18-!^L%PCUr-I^%KK4O-TIN5Fv?eA^=?`rjZY$z z&d)72Ik{+KAaR}uiFI?)b@^_*cfQWLSjhWYoEP)BI7)#eYpz%fo$-fDMt?k`wQNMA ztnid~ShOTHQt; zsM(Mji=tzy`b?Aw;ib#RXn%gvzo?ffl2aOtU4BDDZwcucBi3TYaP@I8YCXCYZ#~_n z0hg*t`b0<}lm1zAG2h**XDHRmk;IgRAcPnIu~W^D_%X^nna}6;GQ$UOlKNo|Z$tpm z$yunOg&ol_T8B}+bg6_ba8^VZM3y*Ut`pJHxn>XfjT7sjoYgQk*0gM>MG7BK&*OlM zlesX{sHG)BYi{M}+QDqZLxT%yT^tu5`fGBf;zKu17upX-B*6WtOKA9?=n4JCF}k8e zr|#rn9UK8$ZlMJusq51n05HB^7c*+k&dw%W*z?RZ1o|dG&x*l0+D+BI`15Qn_vZy~ z_%}=dd@L5QNr(*`FnY~rM=ADa3-6Cf9=an;!-GWP9+{~-%3|{VZef2>5gPMCbY~|; zN5<14b?I56kyd0gitBVJFLtqH{PFRXqw6OOzm}e;I4C-e;JBsM&{&>qvDs*L!X&VI zGg!fPxD1*~ux(Y7GsG``u);GTmj0VwSUET79GanquEXups4aFq%Z*Dd-9?gG#@u!vEpKo1EReDw_Q4ju2lB{~hRr87 z{BDQPgS=Rcf`an!55b=?Eg(I?Qa`P}b>b9aliIRSA_q)iRbT?BIOYLb9lyW3skL1e zYBnZSsI{6^afVFQmMk$hpF_I;d~X-X$Gfy8lMWJXs=b-qJuNS`8i-DVj{mY(+$jzh zuB2r`Aq>h2kW$z#jbEN2f>DhK?juOf6a*G8fNTsxSbZjS=-LP)Qi{WY)GmifvsKB9pAd9b5tkf;N{ zXp^lOcR2CF_!PrkuRxaKl$Dx)ueD`F1E4}YTIZUe_|37fH3=bq+_!vt3Q1Jo?fyoP zk+k^@C2u0+Ea_hL4KTeRo7*Jg?9?r)Y;K4 zri^eB8Fg10>}~Cy?#IADmZk_yZ=D`@)M!pjZ;@QUP;Sup(Y^I{=Jn#j1|4sBls{oK z8fh@$E1MBJJ^~I9>6Mqz)!mdY2y`V2P$sA0FVMg)R8Fv@oXaLQ%(wXY({T*i(Y2`w zl;ENpXKSlJ|Lk5jLGO3oT#cOc_YHntTUoBy+_4sknU0Zx4TC(AA6DKYwV?B9G8v$1 z&0EW(3X8;K*KI1Cul2awX`XD*2P4bJmfC?F23aujOfMM#c7y|*tMq=6zb3^)m*g&sXI#L${bC4vesVwIXfDk~MFGY3DwUBk zcsvb4ClNZKwlk*D#PAGWmO*Rahi!R~mq^Eslx}4RU!u@f$Ja2E3#O-V$gKsPD}D5r zx)&iO7X+<)GbO6B>+9G^m;Mfa3#l4ls?L4ociEfiv$wn4-p*yS@RQSW%4~O{5VmEb zTlSD~Wj66M8W2u-D_y}-0t2dwf@k#x*iTD|^gV6%`T5d)RIs`_&wlo4$c@L5nY(59 z76M>ax54AF_cdtgB96;uxmY}MK0DmZJpd3Xof{cjO%YwnPLLk__Ty;eXgIYYJW`uD zxXpYhnT?r9=+=vfc4%&_OdNCHG0o@3p)Hxaq_m`jVae)g6nzkx^-|MfjP}mXPQnBN zXkEWtzm>!EZ>n%z+ua51*bl7aRivErdwB}hHEO>JQWmmV2kMkk{O+8W7#exG$HP&;5t4P7;|M^-ui_oq0s6TyEr_J~R%PCxo|4 z0#Bm2-4_~#Rb*lSfBRXQ2qd!4pj9Pa{dov>YRXa1vOkXYji5&ja^Za{5t4voLP^y8 z#&-{cb;kGOalIR3UB)O7|6>!YA-1bocMta!l;Z}yE1#+H9R~|&5aV3!fnnKhgRaR$ zPA~CzHr$D1`@u_Jzu0PJq?4O`AL7JRK7Qa+mY>^*Zb@+7cGt5MJ+0+9-M-<(h48+c^_5{H(ItfQ(aQt?=fo=xzCso!;`m70+yXptLYvov?kBxD?e3{ac^1iXqnDf4{?dNp5t=Fq%8w0`~N^=jbaco}0>8vqTCL5m{&`eY~Dl|M71%Ecb zF;G!0V5>Gs_7WlTb0)+JuDf>;NtzXs-92JQ`{i4`Ml*P81xrCe&$?u8TjJ#39LZ>Sh9I=@E(jTd@=-kuC=3?*|i zmP4?USGBlG*3Xcg%uCTcZreKObaO*WwS4A%b+7D~#y6aHhc4RsB#_0%u8K4($9E~D zUip4jhI>!t|GcqrJoUlS-O{gE>S?@~F^Z`?(=E%hv2}hhUmlOn3h)z$ksfEed6MC8 zB=Qn_*D6~!8@(>qvVw`LS`HTK+ms{|G$ege--vcU3CQ17XUr)eX^^K0fcu@~7T+zH8dw*twYb2JqVu9Q@uDDNdcET# zcI`aAc*jrI0gW=hKZ#Zb86pRFjg+;Tdu(5EGZt#JdN}hQcchep{>lShF+Mfb$`wP? zYN5eCVY)=G`JC6=0Cqyq1*$!8gB1PZ*;8Z}nExaqQ^gO(R zot*=C!)ge$E)LsQ<2_sV4w$U%0t5E7+AWX|%S{BreOK}%ay|hg`t$3cs#bl$`>_;< zug;%b{av6QUi-l$4h!6{)Ea)=%k-IUa{j4xw7U2XT*6}wtu(ei&(Cu%^j+=Nh*6Me zxrp*vbb1)EacIT}S^9}Y87*|d zz9jQ~mYk1z%eV{$Oq)_v5vnX9shYr8YZr$$+k` zvujMfKAxBNaNmE?oC5-_b|*0E0JKUdYv}@+V(gvfu$mdEx)+8D&=drOD}@LE8tXO3 zWUV#D*{)Z(HLt$*;TO?}7FVaDfsF}&S7Gd%>(kR#^J0xe@2SoYun}4Pg6I8lsZ6}2 zuzIFQfcIOj2_G&-Ymdp((_s}hcHi$7LEl_4C%gli={kB?3qje#EEw*sKBc-RX>#Zg zhfj2%b?ayzMD7JlYq21V%G?J_*`>tsq>DNeGOj>TGT!e|lBd=M`It;qR9(M^gdvM~ z2K~@}9Wi@_0x=Y<{6KH-MhBAeN)}kX0h9tq0Y9$-&x_@2P74lifPe?2Oh4N&P^48* z^->sL7dxg%vf*MtXz)ROj9u{E>>-2mZ+Xs;i9%yQ`-@nyVpwt)Iv*_Q4W)2nL*{gB z8hc_eP>Ca{oc$b4lF6v3sCIXb#GI5T(XIYea6@x%zqFjZe;RCLW-w=;M~t;a+@6_> zM(=XK1jJq=oLflXY}m(Lq=rhB$Z)m4ClSV!emAL6AAfXJCxbQOnrBHSLGuo5e zkFT)&^aN}YiQVSrC=TeG~{X+YSuo7^}?*_)L^0Yy(WyV^EW0GfAjx1%y!B1~JGnUI>j zmCv5;z^Y%=3-zfy7>0!kgjsBPvJ$lyr&{Q>{AW?Ae_M`RMrCbFFxKb_R8)SqTD!l7 z&3oOSx2|xRNt7N8^Q)eXeCLjj-iU+DlPX110&_}&AKIga} zW>B0Gke?%d=~hyXWO!8xyqZ@WuOpxsyolJ5xvQ%w>594iX0ok#s&xAM^b+i}fv$Z6 z?cWvnk;BECK1U-xgCaE)aPheuSs1)DR-t^mI13-oj{iX-leNcCp=t(x{1{cw@5)@f z{`jfjlMJ=O7d^Poo^F3`Xsm5A^CKA!d<)xHP}J zK1-Mk%Om4R9H+JaNN2Tqf-+HHOuxuql9;<~?UP!)t*5u--hf3c#8f{(5TCAY&zGis zv8XtAd$P|`c22_xou`_w3wIkhHiYBM9ir+&h!WGY{8UqAiAUIfA^Z zEqry-Uo3=vGb>I!XMn*v-QX6UB|-)nW@%M_mMDSs6`HKGo)4MC4vz1dIq<$O^?!vG zinMZkb-U|)hxcagqUk4gI99W7kqePPR_>F?RQoE%zp+udHyNo!o0CFGn>HG8n^>@^ z68|=^53dE;Ocb3htOa9Z6U4`#RW2(3br`nxc3O-N2Uqw757~h}Mv|ooB$55Gb$tQy z3w*_(nF-sm(Q9&2c5pZr_VHzTJuz+IdzUE29b`m9K_OPZ=#m1A@w2mT1K+Hp`m?w$BbuOB(_|!Q(#JPQZ${gT(O%J&^rokf)Gi6%HEBSv zBhA}So$nM^iH(sM_E<2+HC!K9Vyp5GjPFl_On{w~qh0vC-aMjXXS%nQq zUQ;!XYwhNM)Rm@UrfJ5-a^@5^;WD}W60gb1{_Wa@jBaK)ZaW6hCsjzLjK7pBE zb-1O9qo_5}>o6|?_^t2e_7!k~^uE7Hsspm6zo8oGYboj5k~(h6ts%%F!GrVh1Z#*` z{M?6KswD27ITqam1#72+k+--ULqG0u1z{9ahexk&2Do;Y-b*RlwyXSGAKw`y`>`EH zS9N!XDV6ZI4gp>QiBcHPq*Yilv%L9#N_T~wP5v(RIPO7K9%9)}H|03mf8SJgmgn%A zEUA(Y4?p1~E-A^QdXW~k+?tO^Zo1=_TCq!i2)A2N6e=0*#4XzYJv^KtvzR{qc!ku_ zF5rL$h)iL$Cx|XmpugX6(EfCwfxDxka!af-EWos-VN49gE{sg0Rd|-C`~A*yjTeS$ zYzx90$azF<@9+3()%@pa#|P;zn$yo>P8ge2VNb3|?}7*1ug2mUpZnVv8q#f@W6IJ( z%}94ENzxRPSZMnLc~iS03baZdrQA@mg2s85Nw32`UR1eV2oM6xf=d_{k@hk z*k5h~{gG1?K744{{t*XW-e-6yR{M5RvwbDqv|5$S7GM>G6xDvj{`{9g5|4fMBF^)i ze8mi=$X;XD@4{Y9oBYt3Rs$@lTkKm>TehL77X9|nS9F;X8r-A1+!<4u`VT5Y==9<( z7h#UCEbsXZ`t82@c2`bf1e2M*7OTxt%C?yD%9VXW4i~lv@LfyN1U*Wy4?BhN!JYoW z`AII4%NB2!$r)sKgEj};EMtxw@SgQ4B5AwqhYuC}h<>lZ+2W{^WqR}buS4FO^v6~5 zcWUvl;T`?L2wcBFQEM=a+23210O+L*y+2>PP8yTkDa zLj9ynL}VF@yX&c7iWjgQdp{^#EsDrB-Cu{3;L}f?dhXcan@3W%FGjE1H&aRvy`36W zy}+JC$HoK)ve6!2A#qV7`K7W`4n+QJhStr)HpLHU^jmmzR_$PI){DW#>Mp&Y)no!R zn2D1)I^lRo@|-9h=fpX1??oEk4zuMN*r{<@m&QOOrcd~%gk1B6UQhF zMX4Cuyxt#c;;cOlM42pkZ;kgsCT^HSiKd&Z5VRT#JNWZ=%6NDP7=WMG=gq;SBCYED zMb_s?Xn1tF`SK}qL`Z$A{`s_#10zuH@=3tykV0%hxr@>?NylfO+q?z9_>`n%G1-G+};DCOpEybe#i;? zGZa$Ait7e75Dz{OE~A*-3x=*3-F|;!sf(4eL=z^CD>qRu1I&ZgU3i@=r|Bt{AYsmL z^SwOt$k@(MGF-D7K9TGSIZxtl=}dI@{22;F3>}|M_9AH?RaRcl5I@7 znIdiD3y!z*MJW-urEiBq1%Z^=jt2v};5jmu4BIEJA@*y5d_f@w0{P=F_pLNjtx|N2ebDdmW zCqkbXV1YLobOr>TzKNALe$RU!;`;=6Pg#Z=b{3y1x# zfc3R|`2v$^_w}v2ZLrU5CHr^!#(LQFA0xrV;dAgn!kM;aw)obFqVS>$H^o}d@~}GU z%-daGC-Z5UgW=>{NSW17!;vQLT@a}{4`GllmgVp_v?m`Sro6BX2YifV876JbjF$5s z`@|D^8w2sRHErhU#7xZxe>7@MdcXX{?%{Rx(uEz=_q**hafQA6I`$a^g^!4b1)4N5 zanmnL;L>}g>TIb#x{aU*F5X3UXihu6BG?B-UGQLt6$UHNoE@UV4Ak>j0FVC6b=d!6 z0&-yspFTUKsTQ2FiMf7p^KiLl{eg{8q4nbGDdWvQcIZ;#EA_~Xx`!gv;lWV~W1C6j zyx5%c4z{jmTL*>1$M9{pkJuy`K%R

-k(ILn4XO8Ynr>{{A2gvYe`@OK-5PND%OC z-!*W`T435D$#6pPYx%umX9gK=H($J#cacH>5{rn(LB?ok7R1`mAqL19I(HxCczQk2 z4VkoiaGe@zyfke-nc)0xxM^>(R=}Lm#Z1kaX}Hg&g!OX#yGZ`3lvNCAAz!85v1$h| z>zy%Z2{t@QHXof-h{cFukMqqrqi&VsmUwecYMxoNX!2Fx2SJ}<5xx1UFA64~XrE)y z;ljQFTH16vbbx8djj~|<^<|lL>3o71^JLSzW8;8>n+X{*A_21`B`$2West==n4LUdI`)7sKabI%%JYJqwccPQ zVSn0elY(Nhrb7bwfB(6`N-(4{Ftb!#*3`fM+P7=?TVbE8+<4sl#@Ik)#Kgv#Wsu%h z@X)-|uyOfzFw_&akH)g%%T5`1@xD$hSV*y!3s(B|(|jzgk!MTC8ax30y?DUoz=yBO zH1Dg?hYYMY?w0tIKUEa;Gcv;MU{$X=pF59cieHivp}5LPY$kgVM;8qeB0{);OJa!1 zC5W}rs2#QG&y@bKHTIyxTbyELFc}}6s9Wk_%Q`<8xw6|s7zdgkcz4YJR}}B zg)4N^__Azn&Rd(EF7^wc5e`B9crun##yOn1iYUkd`Tp<6_hF!Nr2q}!b8I;AaCmoGzj1M_uYi2|{CRh&SAR6vphRQEU)n*_4WqJm zZ460q=+BX}jca2K9a#UmuZk2tj8~7tSePoU^@PqjN6zmP#(cW)AK6p6XK9_IEDGzEyNFP54nL{pPOz4lLo`$U^c75OKNJlZkuB7tOKh?-4%#jDf-P% z8n=f3dsDq_VTMpjaorGvo}FIyoz4nKq$9I%dP&FBeev5%YHj_OBKLNGs1nTv5*T6U z^cI}Pe>UkSsqp2&27UZul)7|&bYxl#jX(sM{shRj?P~LFvoUEW z?8`nt%+C@biD3Nz2zNfOY?`AscPR$DO>OHv_HLLh?4#n0@j9Mhqdpc4MmO7VeS_=w z5uQzp8yimVhR~^*VUu^}poF4Ruz zTe?0G{}4$m7zZznf{gn6&n+fO7a2<3>BftVE)=J$9hVXSX>ftE?$WsKz`NnXyTO8C zV&cj9aTP<}+x`8FcYmA~w3>wrHa9%~s`z1pTI~wNzbEm$Aj%kDY)81|+ghNA{qX%c z#rwhT(4?6hb>)evv9Sd0NomXR_l4j*uu~?dj*jIkF3iz ziV)T|>aDf|-Gk`?62?X$4Fie&b+7#c*X6AO#d-*$?trON;!V;+hn;w0q~Fj!OA{K7 za=PRG0&foe^XOp9r54_g#>SPd2l_e1Q6jJ#N&j^OgqOV+k9-g26pv#FZQz^4X@3Z* zSon&L&$`tqF?eNxS){=fhVN1HWqjo;!$3on1M>K0x!WLWpxus&E(Mv%x5tZ(?a1KW z%JyZ){ffZc2RxqVrN4i1=0Fyv_v{0n+lt)M12OiTz)u#4 z2BJx7kbc1;;p0_9!?Tp z;t+hiy*vV*_a4wvU$tv3Hu@Rl*QmM3Tg{V$KnCou9G6o3kx9^z87gu1KaQ5 zojT&08y?%bJdn1~J-6qf-ugKzaj%?Mhj}#m6CtEj83!wPe49^PoVd$sbuwRky)6`< zIYPhHgFr8USk=o*0C<*(Q@B*NLu14?Nuw@ZXBeJ%v1DWQ-nAQ|mEW>sIC3I?`iU{A{{`bn&ZAP<@5 z00L`txk|hAay?e@IWF!eCp?cQYrn$-^36lS!#SJ!DGGI&5RuW>QCwF;?$h(lfyw=N z{R6@5l}6V#-$gWu-?H+*TUSZ$$Chdfy@BvPLWm%>QV%bxf9)tV-mfc|K{h|WIOSO0 zCGxRO9s)AXt;pS)hfqO3TwMG#ibGyZ$q$=>?ctu1{34#&K*;OgnPAX;6Q| z$B`{iqTBe5LN-y~iRf@Nf;b$4@##0hRxv^$i0ymX1wJ%ox-0dY*x}iP5e=aVh^x5^ zK0O2bcK?PtyuSna@A^AZ_}v(gztC(LXU%u$*s{8|V16F>|=|b~s0O_L zE_?@THRssxq*IZ(ofwSWepg_^bHQ;q8!^+ct*2J5geatrPN+~Y3S?u8f4=#%!;79l zoMF@MxhLtZzCf;%RwYLA`4e-7Htq4^M_CIa31+Jz(Z#-IDe^xP1YV;}7QnH?ANhl{G){t)_!H2^Kw@u&Q5UdEzAs+9 z@IPpOASUrX#W~&@wsj&RXZw+FUNKK(rdX?3bokitT0-ke1d|! zx$6~1g?oQ*6ySQ`bFEp1^$s2w_1*tXOXi1YraO=`tKH%@-NVmMnmAtkrWo(qrcmB# z1uk|#l)r_X=9^#mgm9Sj<5qqF5QWQeQ%<+V4QH}ggS$y4Jw7=(6j+)!dI2E|KNeTS z-Hw#JSkEvye(F(8Z#L=nS5B2)cqLmRJ)(T6Jsjm}u!La+?b0F)qMEow|78Z1&c=Vs0 z@W_urSOqU}dc=-;g3DLZIs_d}@&}wx&O`B3$_5|X9A5l~4(0eqcW00r@9#E88QNUm zJ5n$7d)p2^XSw!%rg^=N9XyUKf2u42z-O_S*TTLk7LI#X1QGwohx9Vv@aLdxg zvTr1~Slyl=B7`_p791U&6rK)EF&u*o?+7td=1O%PZ!lwwc>mY>^GWO|GW=2i#f_XV zC0M_}IbpX(b3e#f2I$fOdMOw{PLvJIH}5RpgLecwP;YjzF43xf3hOi9OX6$gL{wg5 zHOuC-J<_rL23tJgYRQ(|b=|IMq}H0^)51{ay8D1risRaEhJ^;!fvkDZfBpGynGj&D zvM%T_XX+4K!}k$DGzMIR1`3`pt>37Njn7yBbhEPl%?{qF^fje)=iY3!J#>!qJF#Fg z^>5S~XNWpLr<1@V8F+bTx6#B;v8$>aM+#)=TV{$|BUBL);;!X-5guE7zkg(!^*m9r z=6j3@ z+)S~p@Vv0efw;8*VVt8?{TaYrnqfieZ5c^vISib;_}oIf8EZa;KG46IFQNy#JYWC2 z>f(`ATjauC(MCr{A1t+axw{+vix_ogsc_|Q;cvkY#ruoh++_}ACHeJB`EezVnvjvf zSZc-96x%kOWNGo!%-g#=<#r!$v*aM?e7*5Zg^{GjG0)A#fz*7Rb$FT455ne`F2P}* z_Tv7tdsr=Z4%lf0xtStgUZw3$O@=}Q9=l8}5dLo@4#%4rFJErqvNTC43Hf?m%(}JA zgi4#CzkqLOkDkSBauS&YpDdyE3?Mw*qS>*Z()@w4S8A!>TgV;S91%?XW&`0cvv9cgU2e{K z)<#1A-hXkY(^cnuMf4*Zu0+t{N+-7Gec)g-!NuLoxng(6vc;F62YvBs{#3I`4y;^5EvYB#88espr;aDmPRW@eHaC5elZ=N1&K{rZBmJyG~l zvHG??(ZGkG`ty)~HjSi)J44K`mm%%x_pge;bV!Mi`||d0+bbVpo&=8veydJ29Ev>R zlSdUbG@RJ^{#_8y>CI!$JYqj|c<(X&Y`YbOxyj;U%p^UoY|5q@>kuKLl+G2q&b;G} z(TC$3B!216R(f&RUFU0kg_OFj9?;0VhULVCS7l3I3?d&6B&ru!P8$_|YUcCk7NMbv zM9BWOTr)*YJDT&;r+N7bBe5vU<}CEt>BZfHi6C?dG5xxLikE2+N06_2_G|oGSEIet z@~@lvZC(Ttp}4SIq#WDI@Qq+@zkT6(zQ@`WN(LCr?`3+;zPwHRNFUHu}R#d>B=2JE6jJTQJ6?A@5u{(8QQ(?{{(3>pf zC{jiCX%hl8Y;rys3kzu2$paDjvYS{~`z8h;5?<{=%+rOXXgE zs;GZD9fs2d!>UXM&;}^T$GR4qT`Mi8nPAtBqN1XHP*mh@iYeQjC=5_YZg&{p7Ly6@@j?@j) zwE43~HNpC*Cfr+M;z1`aq-4_>X0>LTD7I}gI1=c=zjmB9&fV&%2VQ_sUsa=4F|4fS zS@O8;VKBUUDWungTJSRFd)YRgsF>K%$BTr+>g4Bog#zTVulxt{mYRdWT?Ps<6xr=t z5aaiW9S18jwp(9Krb@J4!Y-Yv5FjUSB@*&_U7(opJ4-B>#d?k-2lDLXcz`+QeZbRa zn3xiPs1Gm82}{)XK+W(zpEWjVx%e#1QKp0Ul$jr}eb zQ9?Z`)ddeQQT*d=Uw0kI?`&Qvq&1s436{T7uHr$%JCt(!_R+1;RmbejbM2oXHSpSw zz4!%y$A9%dNE9Hf1)6YKPGRWO+u#9OFHT1dEFmfR^pn$gcV~I zjQ4P>KHqpDjhM=WnNdCy(P*$+U#?!Y{`_lD9mM0`ua~-^a3Rny_FwpB@j3Nj346J1 zXT`;VJCa@4HB1!=t`PZZLL?%Fu)?vwgBF?{l%h>8uQe+y90jbfs3jHk%$!OEn6t%c z(hug)T!LT9@2X22We5Iq;#;}Bcu`Q~kQDo@!8O}tcjK+u;)S(QlF2|q&s3?7a*-M> z40iTcS9`*ET=rfA(kIAu$-Q@*&zJRM_cJHR6y{_Rn7v7+tik7w&*OCas84o=;^{@l zN!l9<`rda91UPrhRg~`ozdG|{8J)%{I;W^st+Qwxt+qBp7j-P-ZNwfM-IhLD{XUBL zBGQ5W_oRykVB7V%iK%eQ9K}TSkO&3@l=2UE0kc+}Y0X+ZjzY*UO__TdyK!^qN0({T zDW4=4WlL?2FlUfn%Fz~7E}s{eeIpyx8)y^Zf=Yrooj>vXWBr%8+x&F{dM z#*!rWL%^NwZ@M75gt8%53M;UDx)7y86{?9MFgm;GU-A3fD@W@x{+VW1=GE0z;;S`Z zO*YIPQN(|%(w*W#QIx+vv8@z%OC2pO#D8dv%O6OMp$g8{tYk_%hgu${F(Li`H(L|; zLhlRzk|N=7l04Djo$=c-E#;R*?XM)M#FbB4L8k>8BQHP(1|%iRJ|GPr`D(nlS0qVdyoxz(RCg zy3Tt3Y4K!eS1>kAzhD4wK#;$mvfi9S#S|3&k~}aBu$AdH`ZMV@-9pQ4r^|?74S2o= z1!c=6v59(kCTIBG6I51KM&2GVpZTEKo*slWYt16+;EZ-vX%BvAa6?8Y~ULG}0T$!3mT<5xwJc3JX&ciW#ld{zO*{7DBRQNxCL8Ivk}s- zfq?{ie~bn9MF~L|J14mMyOUhQZAZa|H^WKFnywA(%#MIx0q%Z zBhc@&K{Z^RZ*>p#EcLNss@yH^Nswkf|wY5_Qj`;h=p4%Eqi9Oy4p$0DczB zb!YjXei%BtVtmtWI5m@)W&UOl%d~8G81n^4I>CrF#qPxAsX~V0?{d@Fqfyg=?`4uU z>wt+;X3FNFlSmNFY-iq!@YH}2TuVIp08i|X(7#B((FX?m`b0!T{9yQ>Eqrr(GJMpD z&Me@fAm_v@M}IHmg)ZZmAbi{BZJO$(^?JZ=6~w);ra-vmwP>@1SVV}XdrKOZ1U};#~GJy zyZc+=eW6cIoDS@v3|(gju{ar4>iFtm1V|o&-=Z?TSdwu2s-7ecAT!?@i@N`yl(ys3 z`_X2H_sX|k{=+7q_oIbQ)iG=Zh$SBeEGhE3ovRww*@-Y1W!^gAC0c_GvV- zNq4)&$$nV?LUgszejSfPJ|LqUuj2|S&;*M_h!7p02^YwF@mzd~=F6 z`7;mYsxuYd?K4*MiWRd=gUIA%Ncie#nAC7nDE8>P`!K%-TW-8~iT+(qj&K&gAqH|1 ze0C6VE^kR*W9U=fNbmnf4Q+OodbyqPC2`(2l-vs7H8|raWvHxq5BGx zl;53*xrgPOvm_069<#Bt2g0uYyVFuvPZ-4up~3%TGS9M|bl!=-y5ES9`LBvxg72ci z!DP~b=s)B@xJ=-NUw~6j4IL?N4sl3%ym;X2M$*t~ z0t9&Fdn>?Xik{qiiL&4{V#}-IXw$qdO`e=46MQKW!!Y18@P@N#Q6l)iNCZ7bA^|1a z@f|V>R!vh%^0maUPQAXegQw3B-Nxa4%H5G%?Xx2qyE zRrIJ8hB)NOzMpKsm0Gvqz^u@gb0g={t|u@zH=rOh+ByD7!>C(N_E_Gvbhzt)6&#iI z@$vF%M>)0z*VmKu933Kb2qPBS$!mRvXa--i{~uRh9aZ)BL<@p+r*ue3gLKEGl$P!e z>F$v3lr8}Q>Fx$8X%VhScX#*uT=@RhdhhvQ3|hgh$8=Njw?pf$=~84QD7OxurB0D3F+}dZPA+Mc+lqQr9Adr~PQ~O_-Oive!&7>l8Y!B?#-Y>L7 zV9=2~lS`%}s=T?Gl)gTR)JxbLUSHxtigr4R?Y9XbjkY9@@7Nr)D#l~V38FV0R3}Tt zKm9$BZ)&rq5KW-EzSgKVKEgzCH!qxY341KEJ;=z@o(uV*(m-YH3SX&A-MC>~Z}6*V zxZ3{4jKbr-T^^8}G6_v~HsA`9QtU4y#;F$Mzg_{pRb(*EmF-^!mNSBERpj33v)bOdSUklmYed}?Zw_Ru!R4q z^)bG_xd9A32*Z=b{Ig$0%mgH(yY>CMH6v=!)uhXdPT1);afW^_q~T2xPc(clIvP*1 z1Ojx5Ltv_y_aAqx#55A-VD+qFEx)6wuWcL^qLAAAF`cF$)VV9e#e}w612)4R*i_lV zCr?FlfK?dxqv*6a@7M|3ZcpSQprJ{2`aUSOxu102&li6xs&d@;Fc?sA{FMq(Q&TgQ z!!jXGz>dq~P+2y+6(`cXBM=F*&wwrC+bIQOGbRxB-|YRP<|I2G!KaCNde^;)U?{v-Ko%%-a zFGBuS-7F*+@2B}udq7@rAdt#eaNKKRC%^i!So3BwkK7xfuA)CiIE5F(HDb|HQNeQC z&UdX3B~BK9qUFgN6ZVy#;-HHNaNPLyur@$TO-;eTfQ%LjGuLP*T$!G~ZJjgIw|E3qm znWCPxE}boxKxHyV{NwJA0fk2(JbM5BY;CX!WIqKAukw{(jzPU|c(NAHF0kVpKj9EG z9{rnW8JC`K_fn5Vgc38XhV`2(o2@3z;5u<2e0G1@6$W?6_L8WSoNt69;|u-xn&rc; zzKP4AhH}*YkLbcmf`XqPzNFA|6dL|YKB#utdX@UV?ukz5-nn@)DTqMGo2RF@ce2IB z3M45>s5@YDFk1^Bica=8S0JRB9G{sPb+zQTFsb4Sp_AxGAF0_+64O1IdxsKW0r#lUFs!q}jI^~OqM)LKLSympe=CC} z8}TZ1>UHSU7n$0WRaql?`D{7%mVa*}BO(I&j6{KL+8oP5&KM2JlZfmwVJFZXU9(IN z`T7+H%0G{e$NQSKKb8!Axf1<@sb+sTHmx(Cpa@1^5MK2(>t}m(93-@Wg%v?CNuqfI z72-ljN9-qa?BMmjfq}PNTx!yI00`=GWuO9*w&%34^v!pMWhZ6le9eYiUjm6zP zola+3#njiT43W=a@+;5q`5opZew{oZXH83~Hu<-Kite={zLz7q^rpQprL#=7qc zuYxX&C7(bgod6p-8&pP2)Nsffgq&h)e3-%zU2!?sA43v3I4CWh5Hyy>-}||qQ1{(9 zzvDWBd?wGA=~DHz)}xO1En~v;>V-JBZdTE*a#r01CEh$AL)g-A?j6rLvb~H;;@U9Z zO1h7R#{fw^b10G4_-7cVSj{UK7&fbU@;_1Zs40=^U~CUciFgNw2j)T+t;i#;gFl91 zLa?vj$IubZ4EmC53BnJAPaLIG$TG&HnWr6Z+- zqoboi?JNcvSuEr!SWZq{TKP=YmKneBa)RKkR)H_K^?8Pkvx2@SNdmEXgFMdn#J3$> z5*mdiCHJlIVPT}hO%7|1Jeb!GoIh}*J&xL6!7FT1pIQKf_|SQ!)>Qjs2V5GorOk*L z_uYHVrXhbVDDP$jE>&TTL_1hCp68ja+)y_&B;$Ow%~gV>>;SX|D}Hsn;ZY&KSw+RU$&pmQxt=_zLUP zrzSwc_&ylMtdBJyPfdAo%9(q^td3{G`^`b+Ptcrth4nlON9OZB2CZT9K@zBxN5=+4Yc0=BGL-A*or-mpeN?xm6cNBRE6=w<%33pEqsMe zr>S$f)C>S1BRRrkhK7cc>J-obK_A}e$rkjgGU$GFeS2&4cz+`aegzeThbEaagE~=Q zV;ltgx~tntWtJ|Ne1PpU#eB{6`E_9sYx=oOsVzYtzlOi3Ju*$U4k~0Y8S3t*Ju?X-oDS0Weywp!I zPx^2CGikb_vjPI2JA=~XFA*jffJKcrmyqbCeV#elWQ#lEam8sNPn5|#uInEE-Ag@R zPr4(D?25Smd|aJScPHNEXpuxB0!Q@j@?f&g@)ICfpo!7oL(s|4il)L9gdhG)FmvYp zGWxTu7lwikL)Ep>?WYjul}Lq%7K+U|hmMuLH&m2`g&EIgHl7`wD;|n8O!TBBEU-Y2 zhZgE17Tj@-HKdH1Vd{k8!DUdWHK{qJi^Ra-w?>Wvy=pTTFf=rh#MG?bI6H1 zJ9BvZ_(TJm5YlawmXYb6QV_PWh!s*0{t~0`GtX#N*`6oUzL*|ATAEHKAuh{Ud?98P z4+Dz){d;U*Uth8J;bDVb3mra0Y;0^ievfTcBjJ@75=F@uEB1WBrdaV_any)AZnYEN zhV2Jy7|(zDl-9Ec1Y|$l{z_`h#>IPij7jW8C2r?_&4iZj^{3MPopbOTPFHJMJS_pD zV+9yd|cAe zksXgo%S1YqK55w4A_pyuUL6U#!z@-vGhsfKYUuj&s6r~2T{y5>Y+9o~Ka_faj5^r5 z(BgvJ-Q6wrz8OPmeSs10{mgw6{@WtP35xbNP^Oe|0+ONj&$8B2mL?w=l3YC^kZ^@E zzkWTxsM_B*Zdvc-{6^&I=J{$ur;1`Tl@_%1Ah4*`kgCgO$Z|#ur7)5k+ zh;^93WKTdc`pTe=7LoVnZaSa0_eG~1nV5w z@Bg$s_j5u@|2h4$^nstE%|i4?`euA&{kpGgM38c>sMz~*wbBUzO3G7PeZPl#Ko<}8 z@?^>XR4ETiFw6z;Q`7#I`uc7lidoaqeQYSPvV)fA1*Ax^{|jdk*^!EO#l>ff6UGD; zs`=>_b9JOzTtB|-6B7wIxJUpLUt{#I%Q9?o#oQN?lhEjjbNyr%{TOE5mN&YAZJDvL z==5r3VG+0td45liscgn@kftG%cEh?>D9H(Y_1i~sE$)hFnaYdcKI?#w%NHVFxhR;?RsT8Z`5XKw2{|_EMnFUA>QgznNlOd0zjHlr<+So; zArZL#)4EFg%ycI66QZBTPSy|UReur3C)Dgl52vIcjaO*5Q_{3w5~ug8NhHHF_sS%| z)&i=aHyk&^uOInsVen;%TwGk2DG&QkhRU`8fZUNYF`)v>9-01xzACxK@EQlehh@4;qyPD|OP|2{ft~C8u)QVgO!8OzJLCLOE>T ztn;_yO8>`#FNUOCnw@h&r^UazcaR5bHEYdsop*jqf^!8J{T}au80gy^&2U&B#5~`g zd2R?dYn+!?7vF;VGWQhvAN1Ts z@F37lQevAX5`2(h+HZtHa92I)@|dTTFZs8_ho8&( zuRp-Ah;&1@S@QDptKCnGMzZ+vm6Vj4x!iuPk5{`6>jA>#8{Iv79BFjPXL|%8nXQAy zCA@Ti#4hfD_5CE2l@lgLC@H>#73-uI{lxU(m4g|Hh>B-%1G~t7-W`yZ-T$Kw{?F81 z*Hq7@Cc|Nvxuc51gwerFZ(UtevY7ipPi3K~I^II1nrsSteP=X$*lY231Gj8$SC94% zg@_A<-|Y~TNF(RmtQ;;4tC`GZ&;_;suO68}t!%Kq*{)5P>_Jji>fdw*YlfTvQw(YP z)P`6iEfL}}_dozgR2NNZTI?O4@I+TvWC57Fe)sAQUZCl2gz1*al ztEz-TT}Z^97mE&Jd#>!ow^}ag1rDj4OB<~Q(8;x4{`Kf&$x>m~hpQcEkNV+AEqEHC zk|@)?VbwTsRKBB8Z;4MVf92`U;j*hDo66d~O$b6C0fX_A=v8}uDCJ|kc@w7HXuI;e z;A6{Jo9h}^a^+G?6Cc~)mR+#SN=nc<1_!y{H(|W;Yf+8wLndN?++5z&#g^AD&)%!7V?N{Qh{2uYLcTattH z>EOa*Qa3GX0#L#?F)`(THkk?^@UYU7l7f{BKI8)bUhEm){~I3)9W}#FpWE6=w$Q3X zQb__AU~Nz*6jGGgLo@2ulHf&ZrZ6PxGV}Z2x6 zVy&56ekq1vgTFr%NJ7GJzR5um2jE5XqFU*`p-7n@r2F$!wIRb{Dva7#@_Gc4=w0A} zSFKF&bE~lk!%@-E61^d)!5e^&xJk>(g52Gkj!l~`NB0cRll$4`$H+|KrvtdGkQb5wstu79oV%IhwsHMsE zfDj3vIg_ClPu2tV!b&Y<^l;hI!CBudF`$0#(s+G+Jox`^yMwYB*w`@ZKTr2u>=|`% z-RbW2*R$|y*ziMD>hk}A4 z)!FQZJfkumsoB{N33d0-#|6$%Ivy^y0oZ>H@GeO(#V^(P_;`!i8tR0^#Q3Yvz-54K&{6W~n%@YZ#xv=rRe{7`(BGrUa$jN1^~)^{|hcy-(+|p@$G>0qu6r zi=fSVOTtJ*ARkZdtna;{Y>|RN3dWf@k@Khb|CZ;kAYjieQ}iF4qF;CTK{~~?C2G2t zD;vX0?Y}FWpSZ+#@^*a=O5)-1FOZSfQjUq34 zf++#mt}x$noIA*Lrc@o>>*^5Fg#=~GVR`G=#9qp=x@y?1{}tFex261@6FVJ%I(M~J z?TK8(j z5uYQ~Abiw-;f=>(`_4`b*>OjM<+)D|t5J7thBp(r;y*O1DW8e6*t)CjeAAbUJ+LuA zBLMH?f;ou+*p{s6C3S&Mb^;!4)uIFd9yu`|)xz0Bk@{PfwZfS^$ zt?U&4--eKxyfD#?213q`oUh3ZTHEY8*3lm{*w1W0?t|=^j55a;^+q!(OZh8P;`5&g z4)36z(km5-%SpL!sL=;iwZ{>NwjjG zxJ!CmcqG1g+NV$8r@wWUo>u=uAHSCt?#4SMr5t$$xra@Xc<}dR0Dod>Q6%lZp=Xj- zp_p^MYf7{k7-0b$cC%l#R2u^dg-*MnY?3ofAb?87{jhoKc+Reqf|+?N4sDi~OW^LG z302`jz>o?1cNhZk_%9OyH$i5=FQn18l$5XhUFD2|a(JXbi{h%PI1E?;9!F>r@jb6y zG7?j-zwGSKCJ`~Ht~xS5&zIXSyS;jz7kwv0L-qainuyyMBbjG&+Sfvl z6MN>gQGaC-^YX#YM|%`RO_}c^+z^SjlmUiNU#+!Rl=@jQ*bU-E0I^2*d&M<3n$mqE;KS&D!>mW zyUYj2g#(xt*k{x|Q?7k-yH`o18$7soeRr4dake?ZfeJyko}L~@n@fPp{Cya%tg;{C zRG9eIZNx><@lE}?qbP#MP1ztCrONW50=${N?#bXE~Z`?Ik9|7l}`I$4Zu`S`gTd) zT&)yAaJO8m9@0DGu$md!rU&WSEq8qhi;a~61vLd1+$rG@V4E@bCohBoI$TlJI1;F(-wQ>bbgdxiaO%Y)8cN$w|Zbb3sMy zy)-xI#$S$*cm#VL-LgCsj=Gv#tIa4ufrCuBqsqgGGGS8Qjq(}$BYRYVjg%6GV zc_3?w>IK~eg*pj+eUtot{c3pwH4nRtkvw1OWQF3_@wTk+N%&?kOOwgWde#fY3BPh+ z=slt&SQHR0tRAN(e@aw`fqg%xm5L$m?)m~#?f2xnU>;5O$O!3?l@U7>xh8m>TwREo zBzE*T0Yo%hGMW%^oSKsI3Uc}iBpJaU{?2y;1@`t%Y{R7(bpGbmy${=FK1;Rps4Gd) z*BxXk@8Y%R3Gh6Da9T($Eq>rvj=uPGiPjrM3w5=bS*T?s5OXi;{sLO%l@}78rqi7% zk3_N|S4I`^BTz`*PuV1|0HP33-jv)b?Vc{yvo)^)SOWXZ{N2$ve7JmIJ^Rj6Y3jzq zYBNH<(o8F#iSca4Av5eo9{n~A%qEqjA|US=505qE&Lu`0o}5ft2CzWM-6LDRJa~-= z$@A;Y^y-h63XZ33P5f@jF!qSU<;umxd_tI!iwc~QYOZMW4~o!{vq4VyQ&l$Oh9f%X zS(7eMP@!lfV$p=0kh3x927j&LXrE42IraH!mqq6H}l625B`J%yoO(ce`z@y#Y-ixck{V1 zw~t_2d)pPThfSNl+C!GoQ`fV4h?j^Eha7w z>4AEm4$-!ZdF%z=Xf)c8F}4X!ms$`S)Tgd&?J)>={S3qGfrPye5Sf4~|EQtY0i}=g z4g+d>VQCfwBJEqT&r?fs{GrxdCVvy&uyxk6SS2N^`cJnh6Zd}@xZ^TiQJHoJ5JsujG)K(7o3|eC4b}Fr zqmv4RK^~>Hn9S>8hgca>`1qL2h?y{5u4`QPSp~Uf;om+|!iq(gp+fhlZiR#-PU$V10b|GDqC`^Ibo>$?Og z#$HRpKm?$EE*c`K?d+M&G}kB94Zm)xch8gjDzq z^rMiX>Xokrr3{uV&1)++((cyx_=LLfd*DhbKte&_p_!bK5d|KNSmfYeeekAlg9#JY zAun9y>_`C*%ZjopM9`yJNgDMzJhqrNI23qW3E(J+(j*NsX&XH&D| zhm}k0%HkO>vLp<6^)OlCQFgq~@Agw)MgmYKkS8>hKaoA{3552ByCBn0^3TzH!^NC@ zu!Q2Ua5uMC09aT%8g|7iX*(jq+uKFS==rEPWNF8&&2uGujzqa}QRvW|T<@aUZi-WTBbz5B_FM>~uI85Og zT*cQ`X~>E>4ZT4tLLN0L?KZi!%=FsZCQ@wvLdW`|r~yMJX+ADbWr*jh5PZp2%u7=&RWE;Bx? z1ZL6r8`F{^rxu+bngztWYL#@1P7tI?nA|c*An3%=bDkBp)a^#Vh%+jm~Sdk_oKVpwZ!1Q zeNJb74~nNkB=lF+MmPE$^*SUQ1>EW9HWg?D2T-VPa~EI1H?GO{dxMb-q+AUNZQHt!krR zFE*wJOKWd&PVmm}SztPjK+)q@W~v)u45z2VVvg8LR|} zeg{5X$dP8;U&&a>9(R{EhMJJ3KcE0s8kc;rlhxmrFhZ1>`0PH~p9*EX_!YyJ^&Da0 zqnkpxg}Xb=m+%OA4v(jSMIR|C)e0M!&ccE?II1rbG6+G0SP0a)SA-?x?QaU)(84H5 zltfh0U(wSc;l-5%C`)`t$h|^?#*&s6lSxa{?GSb!zsou+@lP{X7{1URcA5Ew0`|#q z(viK~_gX^dRWbRlMa^oj95L-Fmw2yKdHHiWMZ4>pQ}n34KN^u6N#7iq@8*Oaq#l#@ z-kh%vp3;#dH#hfhgALKIuhl-+mb-U3Z+9NW-=MbXxR{od^ei<(VYM{kC2O|2K7aG| zH=PfcM376i811B&VyZOCAfop4SEg0}7P6j{wJhh#Y)z)N(7T-jKScCrp5tiT)(dfO zG?yLae0hcJdleiE;W93712>fIc?6o2zbIGd!kqvteA;4ivtT*f9PRy6{E6fr;4#^& zeUWRBUI3tPtj9dPPEuI4%O>`k-;E_{mSbsgmxX`E?G0MTeWwfype$snWHyI!81k~$ z#0VT)l7|jGChuzrRVBJEc&0rm=<6%-SR)g&$EcB!rLE?oD!^BWgz-< zV8FOL;5F9<)k1r$Rb#?rnH~pX8FHfq8c@ z{_Pun*RrZMn(FDvY6)TiL0L!&wx$?H`mBN&nn%auc$zn{zrTOMC-87;o%7$V0^EJ5 z5Jxg`n=UElyuWW5h5BwP!VXm}H6KOr2f4Ciov+uPl;$ng_`=ViG-Q&01M|&J&47Tn z#>2u2`2L;hXMg{{ueE1aYZ@m9@^sfv+8jd{(lt?k36myklt?H%hK7a|b(d?i!onWQ zY!_O@&UU9uVwQWL?{&y2ixR9F4D63$ET%Q~mpq^pzYiX#DJiAgBLBC|;%()SZFVgv z5$TKAJJjzK;{R*Ji#nOHqIWR$THLG7x!AC2nER02aMeqC*nPiBh($VEoPt5o!HbN90ryV%YC~qQ~d`vYNtgmJ8is!I##7uN?&nGBs9K<0M zc2-s6Z!DNaZy(RPIx}e7u*nbgAQH1(t9gVqbLjdU`FBjEoYOYzd>H0i9uz51PjFBW zIWO<*rkul?xp$cHmR)zR3CeA8(kavYMob+ss+TK)hp?^z>Xy7WQE|!Xw?h^)R!%NPy7s|ttTh%?%Co@v9h#lwLIF`n%<*$ zg8!lv!7LlH8;;H_+27vdhF&NEs*X{=W8Uy0oX(F?ipHC?Sn{}|2@s}jC zyw=2Pb;t~prcza1Ir;<@X&!@>ot042b+jy+6?^<&RP=m-5dPou-$d!XeXMMTezibP z4t$5hJMT>(c)NVG1tZ}QO+@J#NNUA(&|_}J&07`fJIa|0Wf!-1_iF-K0TdvfjmqLK9N0@_o2ahYTry&zde(M;^CUVj(n(9DIuq9#%)e zfYc`I*~V5Dx@@9Ae`mx`@O#(N|EOg>zBp2Pp3Dg&I2D$`-S3@)z&VMjtzQktLBWhkY{K4=qDOb&gKusxrX z;@od*Mm+{%NW>p*FEH`&Xuk0S7AyuxM;sNKREF#41%ywM=K+joPB~#Zu_dn_I~FbU zD&b~qXW!Cs5-l#8M&dAVxi92Pbqe9(JWsl;b2U8|MC;b$;~ffx<0u#>mX^rQXwfAe zw(T{_jBG3Z26lgwT1{iP-k-}MRu-nBf`upwS=lJ@U2M#UjK)T8v$0G|r?F55?&Z)0 zzb*i7CU9pH8HbNAPB4?fc@oIh3oWV|qyUuJ0=_#qI*OaDUiz7rH0c9~SuVjOD496~ zb-3s)d6&LO9!exN=lNJxB|jp<^Z7qV{+Fmm4Ze*3KA;N4rf_n1b7OdzA+Ff^K{JJ) zrpP~qcGIs0+N`#Z;)2_DesI(tw9dijvKypYsy3T>wst7)o{$BTB~SZ}p$5N{YHj1X zQ0zUw^R~a!))?X_6=h!AEI=qEV~F1|vec;{d{#paT{i;z0|Y2#nx2BPjA}85>w|PQ zqq&IE$ort6mt&X&XwPLOwLEB-*W{aj;ErAiUEAMEbCfA9Zo~|nu@Y3<&iN7x`z#*T z2L=X0#vuNbYl9E#!^5%*%*V5(rKPLh8H0}o*sOnHxqEpa?ndm3s;c4~vy&Hkn&vhO zdR>A(e2AEkrKG$yA2EARBbQFaF*P>~V^|O=t^{%Yit*1$82AJc-9G%t(!;OfaB&ED zH;O34n?w_zdy&Ep&(R!Is&1zZGBB9a|KbIw1mjTIK2PXU5+deUZOD@)A9@}dd(vAG zh*sQbyTC`9_yHt8G&O~djK^5TyLH}2#N&HNFDn`7UomK_Vjz9wD)v?(i!XR6kro+Z zH~b5l3z2+AyhPu+n8f>(Y3oK|vLAlN`(+M6?L0b{vS>Np&*hURpyh zVJpV@*}3F!q19_#*!?(>Ebc9plY1xHo5@B&D$S5m!K2+B!=7SfaP`w5K`Z$2pmAy9 zhtk_tFg2y85;Qclw7k4zv*X5TrXg3x$?0*L^zcZ4-nf@Yg;;{=pHd~v_X8LB>8*|% zA5e&SFw{y_EtoMe1(#Y}_8^xltTZzjF`PB20MQCC#^EWLVY65`s5dAL3nlz+hvL?Y znf9t6yB8uH5Icbl>@w?UMk#JW!pwfBt4(^*j}i5j#{Mv05}m_T6)4uUC?3yUEIpqG zpTBp!3!@p_`4W+MmrNf(U*S7CI(}%?)2mhL*?fxW%wgG;R1KnPUEGrJr5;8qQK5?r zJ7&%7aK?@$6UI#NTK$RmUndeW8L)i=DdyraX^E)z3HAJ`b^6E=85xot>w+Dhq4czs z)ARb_WRGHw+se#b_>@Vf>G2^m^J6$y=!<(o`zAqvs7I&iczT6Fz5h;Zg0ykWCc~&F zPES!)^*S22oFE!br!y<07`fB;7KXp!>F%gAUpy4;l0h0|k`K8)G_%@b!}GrGicUaQ z5}G8wT{{s~MlgD4tO%7+tx>k1o>JS|q99+v~KvV(cM5|B+7a=6>*HlD&B(d@? z)M_#zv(}>R+eoQ=HMElG`;1hLt_S~`+>Wv#N#@oog;}>Jir{_Fmal*phY=-%h-0_S z3JAm^rlj(f9xrMzBKOtr8j9gq!arrjp2+`!kH-u6K#MKCUg!4Lp}Ifzn6d;E^nB8M{hcT==nk$eYHI3SfV{dNFMshr1opiWNH|8zU0-^fz|7t-(R-3T z*40SkG?b1sW|6pSSS?6HH5$tHJIeEz6o$`U${Yjp%(C-qj-XHXx@@2tgqxRY9Eb~J z_QL}@vg1tpE-&{70ctw-AluezO@gJ6*`U4f?Hl?2Y%M&H7HhTTC@ClN^77*1;tZU| zPHNSwww7g5qu`)*uq!*)!xF?#O%)b{DAP&8y%zL_=v+G>x`U2B) z8ggzMyI{4I@44Yf9P&2LTP#>LL8-VcjL9MPtDfhwp$_2@1_lsPsAA?& zyF^{OVQ)f8Nj{O-#U6!H27Bw;z8SM%CFgukl(^{^0}~oMTYuT%bLTEgkJ%gyQCEoy zLe=lqWL(i24*7y+qW36&#N`az5K@c0R-72ee^*S<d|Ps(6c$3F#j)cyA&S)S)>;PsbNA8_#yP)m>^9^Pv|SkPDxumt~uEH{xxZeJ@gY>?F3+dD2T zEj)|g?O$e+BMWYd-J-bjG!nw`39R|5P9!7452XTPG^*@@-T{Rmlft4RV;}?&5D-MH zt;^ydUtn{XrSoT#!&;CD>ftP$4?pJ@RjjKjT+Da|Wt`WTxIHHqW^z4^zb zFmM9bH`gZwZm_{v&&Z4NObjN4F}OA+^I0;WSK+{FZSV2wDc>4Yx6GhowCjcH$b@ol zR`wkE5jR>LrlX|e()BV}FB2IAc20&6K+P$t6EA<2^G=2EFdsEe0#5OUDK_DooM7=)qB^&&& zW+6lCUC$2O0s>u{a1de=5QHCf+{sHXP7}$vUms(xepU;SFTzfcxulR` zC!7xd**whcuy!e!I}738o}PSK^`HCvSi-{eM9j=+0s;erO(cR+x)WxN$v{|HkF{He2kQP8as?aLBT;2In|3$kvNuzOfI!!lwPb@b=eqt z;EH`TxV(O3C5UErfdLgwV>8WY%@B*Jn3pRh!;tH~En%$7QPM>;9d_12H z#Mf;Cy!Y!yWeGuPYuFgrsuJ#(0LHL6Z!4}e1IeF;fdM(1kP|DJ+e}32M?zmg#cM!5 z(a5mx&in}auu>3gT3A>SSfZB2$oQhyCG4Lguiby*tmeLo?ieJkC98s4mdTAKw5X@3 zv+Yitg%)S}+DYh^u#|hh%IW|hsms789$#HGTIo_!O1=A3h~j#*7_HrCi#ROcdx=D9 zA|w`=TB7izOq=qp#9tp;AvMZ#|F55{yRVTEC6G0O0h@M@k1MzfVaAtw-CwgA85!9h z&ht`u*u`u|$}{bC8&x9#VC_ijE3`QHYpm}Jh|O>7m5A2AkN|%UDGl=HVkz=+2t<*3 z7W(^3`a#X&e!1&m4?VXgN>vdH$I(j$fg__h0+Gsqc$ ziFsr53P@r>-BSrg6DS!~9gGM_pZ@ihjt;>KofJPiCm|tVV>l&WE`y6sv*!D;HzlR| z*>X3GO0n{5V8K`k_?iW;)W(Qnp*mkF;`jn8_kok-+Y|-o117Vc9M;zx1c}HPq!P7* zuEM!SbAkVft3NM%*RR;?ib!rOTelk3eIGi^!}CeM(xtb{w%nlpgV^c5MA1~YQPQOO zNNbaa)O=wZ{>h{inc?bRV9Xb@ zcsIzp`gaF8{SS}hI>Nt3yK&=R;dJ^RKYS3=(aG$}AHKMDegxs5o`*8F&J+To+KXX% zBV4Bzm;aI$Y532Mtf(R3GF5e2C3wukQd#K()W_AU2(Au))suxgRlg_wdR$ zOR}dcDfQvo(1#q>G0sl2u^}} znTArEunTM+{a1ggAu;ywu=%t+x-dF@JmEv3sLS*l}Nh4(#E5;%>$DaPAqKf_~hh}2AjEO@}>q!Qdfry)eiYhnuz+s z!i(!-E98;R}!dO1_z?W0R;5%JiPz2eC*?VN6Av#WhJ^oLCmVl@;|9{N6qZVvOMlz)lm3a6_(TRX2Kw31Ndpn(Q=d8l zR?S+%-VoH@-vu8zhiNGt;WRZhH5zS6;LwQaIJoKPFr4)KP%rX(bWgS703Mo zqF0{ot^^3QJQ)A>`0T9qqadZDEjpRd&+b6@$sz?Z0e~PkAE6^6islIUuzFwHOaIni za#mBtg$sl>G&IbctB)MZ;x~qjSvfsCJXARzYCufH$k1~p!%veB&3!mOP*qH}V)mOv z?z8nm3+};OBbX{9Y;dxswpOYBv#L&~udan{aY7%f`Knb|g+@fiN?6$GpasWrsa)&H zp_D%I(AimOpmM=2qmY~JIvvHAXTl;M^+H%W0|Hyy&yQD{>+yzNu;Xl;^cJ>1T-)y| z0_tAOr8-|3x@&G2`^ftqRY?D;cC)_Y@z#pN@;59X7N^_rFnK%2h3-(6yw6Zw59f08!cOCQdIw%C}u%e=(>E|a30Ifj-*w!Rfs~tGm-D!K6Rl6*GJ<(M_P@w9( zl&os~3^jdSy+Vh;>*`R#-oA?F1_X+6Tpz@g&E&y^45+4Smzc15b9ga|MdAifIlYMq zqpLjUBOx{2$b0;lA8eXTf3cW_Mcp4t#`hiDiShY{&iKUtwlK6xt}8I6OPs>8sDTOI znU5}F&t{J8%{hkA;gT@Y4dP8|U&*{>+?GCrq9)E{pLr%m% zv#_v)Ub1&&)2X) zH3szxkeFD&@qFrM+~bpdrQ^#}|DWZhAQ0Dk0#qKS4K6)WE^+B>d3EBFl9*Qs_{fMa z^*H5meME>3z5)F;64|9QdQS``OMJ@i>bK79ReKy=IV z0~t?ANy+_UTK#0FD2LB^Zz~M*Eyu9l)07>+G14~@T^ZF7VOr~d*GU5Y7N4D6VTlUB z>h*4EB?mzWgQ~zAGI=6676r6kOw4e~A*5e;=whLG)ncbAoUeU-3-DFTTb!%nM4t5p zZQpY>SghJekQ-_fvx^C;fee@1O2@D5){$tC*&F-k@Dz;rQfiT9^pf8bNx7D#*I9O0 z<|lwpp^*xNaoPM6DVj3u@`wK0V3UzQ=UD-nKdT4p1?p9X$`^^pk_y@{cR>T1ZMHnr zs4pVd?PxLGB&XFCznCC4cgi8Hr8M63HSTay&p@q|rP|v0&eR*u53NA<^U|oZz`VUY zU=V0@Rgv(1I^lr5#`WLNLzlqBPf@vzKfY~BxrOo+gkRN(pEZ(5 z{7WZGIH_Mt2)R@MUHJZ+CO|flA%iiK`BKQ^OuBh=c=9Ajb_(bzzH|{`gPNMuh@*QF zK&Dbuq4cLkEmgf+_4?oPLtmD!5sb9eqTgI&G|?M+7rY*v%p7xQW0XtxkBqYfH%mTL zN};Cu>iFI0XO&7l5>z6tKa4qHnd9Z!ja5c{NZZbQj5xJ=U9Tj23}4Sndc z51yl3WxhN4DLVJ;MI}lKX&&|EoE3TKzFaYh-v( zgPv-*$R8iC?S?P_`9@Spg(5A+U}DeVrk~`#YA?==6XHbQzL5hTHRzE#Y9^b`fe3ts z{^jmKl7*N}$T4$3xuTHp;Q**2>gw8HR$uq8=4PX%9IA<8O!Zwv!0*1uJX>c8xjor5 zc8Mv#apI|D!~g-#F+_T-{4ppAA&9a0P+SQFst}3C4N75(QE%bKPqwV5d9B+a5cX_p z(0cN7*8YmlY3Ez-ywX!{2d!xeGWD%>XQ%v?6foUq7X-kl!QZ-A&f%xqy~>q?gK}Vp zc|!rQT&B~ErKhKt%J0TJmLq($oxJ~fw#JlBze50Esa5VLMshJB2aD~k$0L4oKG!G$ zvXY~xGTLU5uX}G@L6dPt>C@L6Df+B_kKPb-gJcvGz}8{JQ2#h8Q1iQ?z&=!fyKw$+ zzx_M*SG^T!cx0$KqOLAyE%^1bj^f39)=?YcollGPx@t2?$|8mS9D*HM0;n4w@B^pm z5eJ=!6Nkc>TG7@{@}<((S+pBa03V+KQcvbUCa)6)5m97N5Uf<=Dvo3{VK5NSm*{o?G;&&HdNMCX|g~cja z0YugF1qR+8fP1+bqUmU{X!?~n6Y#UctNtiKqC*l&=fn~XzJE<61pVz#OxX3eGJn=* z6e=>ZcZPu!k@Q7Hb+Kw{6O*;e8=?OSbzLsjDTS5ljcwQk04^;s^sBGLMR@Hrn!o1^ zfXe7<;{l{`YRWB#{l9GghpDd&i}HQiUK&BV8y5V5bc1vwAR^t3NOyOqG=dT$i%K`r zjf8}Nlt_1X_j}*V|2dA=Pl~WyF>#(VbImm0>^FIBWP2uldTzG(w`I1~pJKuPf(o$W zstqo037Iwe)&~y{ohSvJAOJ!q7+HEqx2=WgBuR{yQ7;{5s+_O@ESIi?;kQ9!ux(v3 zU$N`<`17ZvoD$W5Q0Y?&dpwP!5SV0`>E(bNwDF<0qBMtLHxb~CRu>k<#4LStUiwF6 z(Cnim19fq6=+-NJo`dnfARak9h}Zwl#dYl}CK(^qVkV34t1-!GbUDs+H<&NC;#@rK z-uc+JEW9+$^X~UgeaDt(BI&^WobSzyR+vfyAnNaEsbCAee7wn0XL!B$$$vJJC18h2 zk#}2eLO*+Y%#N135rzG>FY?~07I~>^U~r)5s)lW(oLeZ!&5;uw_5Q+q>3{epg%rj6> zFg;kws9SIETYOpKF{~f?=99xd4JZ`m_tmVMV&Qpzwq;SqTUV`WpHZ)GwgfuQ$o4C6 z+Q+)08U}vibSVS=U;XTR4x2v-4 zX_VtGF^(L-|x_&tGT9{361agR45jqpAm{Hn|mgyV7C?*{d80 z6qCdySjL6qM@H&XCYS^EX}#ZYRwa-@qzT}{ojlQfFR}pSL&U5Bc5R`Wi&3(jsb%vy z-3(?_`MkO{?Ca}m4q(-gCLf!Fj+M*HoxQ_jUh8C)e<>DZrq4n~Ds}CZcTTq_z;eOS z8dSW|*uO2l0^nd{-GE=qvX#!=uXI?6>%2HYP0=mE|0hryYU;m?^3}VuH-AzQLGsZt zgg!b3xlKCRUrY?ptsm5TCY5?~W~A7VSJeMyH98UD{NaJWWKEYZz-ypWNal3k)&Lpg zuV2679eUaPZuaUnr^;!lMf+z=s=CeaW=9_`PCkbU`d$$~MKuy@Vy^BE0e&%8g+7b& z&U?xi_&ZbOQ{aNDDMftmg0h1R3wE%RF1P1A;br@@Bgz!czMNlsrkkhe&*P{aClTlBD8zF zCi&K0If)3xF>W(_GR}r>%8qCxeFad!kMHID&gb}Z8W1TI8h&e&=8Po+DQ`9-|wW}{+&#qr} z!q@BelArN&G`w;Hp4sS&d&W=@8qBB^Y?nEen~K90jp`0H{a}Gt`XUP(d|>dy3oHI} zBLnM{1^3e7QrF?p5nh^tOng#iW^9Y^1(cQ+A{#^4ksyA*xtVqxezGx~XEh3Ysi=4@ zAGlv!-ukB?FW-{&7Jq)8(#_-Tiw5HNTMN|gc`VjOwrATDm&>snAPz@{fc;qku+3M> z^=_HPO0ABZL7;(}0qVk-%s~HLPf#pN>`xCv!?U1tnN#?guUf$kri*#ogP4tAB1XHN zdVF~T39&Lo48AN^OmgY}{E5ec*a?>nEG*y{b9@e|dKNNgNfGoq zzbnO#M{ZarCiP~k(>Z zN=I(AE_8Jv#lTEX)`v1qPZ(IxGCe8VD_VyS!n4wgiVitHjg(na0VK3D{w_LWvI+1S zKIgknaBy&xcW_nj&z2%-mS-H>CRp1{f;d{e4vf*62^aZs?f?RyfnH2`qg&!iL*zC!nn-&9%Rfz)s;lM{7Z zM-zC9hlGYb&MMAOkE=D588o6sm&iLS0T_F;0!R$uao4r|&2g3~C!qie^3T-%p_W!z zgLm*-I)7Ah0rmXf|Bk1paa7jVjhN9zYM<}4Sm%utScZM?J@}jX8jUF?hF#hceZcQz zNC0Gzrrt6Zfm7wiU~>j%U_Ahc@*rk6hypGLEPiJ)sOOBQd~uw>13udU)QI%x=!nUv z`EsFEC4Vf}X{j^AJ{l?L=UBRGn}>TUlOAad%tkA+hLHuMSyZ`zLKE)?jAf|;t)2WQY)$ziCxTmQBPJv3l z-=s@?7P^3IMg~kAy+~%%aXw&sz{Us}Yx3H0?UTcnxv!33lo5cbri&?pGE~r#PZe64 zC;vl|FvJsu@8|^voO8_f<;mx~YI9jLG^U;Up2bz}Azkb$A)p_yvSry~_B)QW)pG}M53*VcQX5*ex9qkkP6QuV9{6Q7RNz7w-~Q-nR4se5h1G?S zJna>?{z9?+mv^K!iRvgKct}q#S5}|-HSGl@Hmm2q&XtMAm#T{DYX#^IU^GCc0m$Du z;Aient2K+Ya%E$Qz579}N5vvXE}poqJ0KGe@cUf#5bc>35Enb;(Dh~G?T@B6nnaed zM57-DmbwItB@bRJ1ABRsi-=7n6vn7>-1-aHPr0b?IW0H0TJ>$ZaP802B+)k`QxwSz z;tx2ucu8ZbnnVHf|CQ)*{`#J!l#V)yHx6eeNz#XzzMxj&lKwRf8QtU}>Z^F3?^^8h zQK%8eWk?Iy5*PyE+;o3aQtPPxx6YBKNVED39k3N)Gy#_gfE4uf#8FVqDYkL~j=Syl zs>XWkIP2;cW6t78uC(|4iCN#@nibkt|K)|=(2iQ@MogP^w;AokvzK-|#>B?O`R{+1 zhQc1eH}%SyF*M_MD)iSi;R)$as$f=nkx@~A!^MC~#pVKRnEz`*I3kkpE5gag$2-~Z z0CXRgh?i4QCc_+}$;=n+;s~z8F)?a@cVy@3{}KSSKSbNedrd{WT)Uilvdl1ZV>SYdLdv@b>xMI#4DHPp0cgv;gzqtL!COPf97e z+lR$z6P^e`Hc@Hc7-&CQ=q|W&kfde$`X$wQe)zB55K?RBe$q~I(o4tK%UZ;j4;$M$ z6l9?g-+N)_Itl3Y(%RayFW{Z<*-m}|JZQWFy|FKVf4Kl$^>F>E!=ybNljJz7!FA(z z8Llz-_>#8^>i!%6NvT~+8*l4rwUZ*RT>_~x8s^j zxm}d9ga`QaN59B<`<>749G(;2Je&=SKfM2)EezIBP4WAl+l?2hZ+yyehPhc8rAyDr z$T%n_6O9+BQ4j6NZg4F<*`L)N(YI2E--Ab|BiQH6Ktrx4gH56_A*$r4+&%L3yP5bG z@7DL$g2MzzX0lGYNsQZvhgAUAYoRib##TLm_11?AKq)undA4UhqVxV2b0Ql6^W1SX z26&YHXV-`5(CBw2$A^IExKPTf;%YVH)|UC9<$jY2vjnRH@>Oy`L|4WL=rRb$QlG0I zZjRCq?O<)r6Q9{?hBF)$094fTYZ0@bfAi9cI_)so)>a!TB=-n$1|DFj!i2|38opMb zC@9eP&txpGnYRDYrKcOvDk$=e@A~vg&TiVBG%brY)1^Q4d8nBGHTrf;)b2Cj&+ts| zOJNqRlJ)}Cyu|Emd)LX`>FS)T>ohifyRsvZsT8$}x4j3Pz*)~!D{{x668+2wcQolp zHI=HSr>B2GD4&`D;(0)6FE3XT_SDLCk~%jR8lJlh5jTwkSgN-|=h^$ssg8&+%0P&y z>=_(7T5vq#EH3wLoqVK(9)wyfFNF4)l!#n!173@1(FDbbEjO=AK}_Kbn(7y}*QZ+| zicx(iU~eXN3a!l?{bnz&M)#cvDq)Ys&Tvfc-v9;dm}~NUI6tee7v!~_WF5ZZ*jCWO zJ{8Jdckes0&<#$gBl*V{-;wXyyY#IVE9|YjoZK=1d^?Y$`mRI+Zb?Bx0@gWbjSb9D zDtEuqdQG%u4OvD^NhjE8a# zRkTY*{4NYQZ`kW>A@AQWU%&1--H^72H74XwIjQ2PBH&Lb=yNAcdPCk|6 zP>cNieoInmKNTSIs~>e^>6kdCU$xD*ICZQJV0+ zDH>ks=2vT>?9XJKjS3Us>h#f6Eq|8(hWJoT#ePRud+^WEbrSG3@HM-gtSkFH++D;w zgzkP-$q^f-WCD}ZhVjaLhgN%jTU%Wy%=nG`vNx9Q;cHq2ZxHPN)kymIG*-NVx-@r1 z;^euk^?wxhESs~@LdWTXko%M{~U4x>|-3PfJw6i9RuZZCNUXxF+fM>?^jIa z%%XXi4?J%62cxZ&5m+-8mx2P9yXIN^kmQp&efqBzI2O|_jv*gk$?Xa`Q9if+#J_e3 zX#dpdyzjw_MwBX^ywS_SDMsatuiiKU$z!=2G4*~5fJEq(eB}C0BUJmIAg#&&#-qk= zIvJ@|#$owv%2QJV+4rT9i$^Zh-ktg}sZ zY|s~1c?41!a6$Nk3&))X5=qbnN(63u@Am`{6JGL>MrV7{IMn(hg~E{?_xkj-Xlt61 z+843Xe1HYx;UUO>o$caAVexLJ&M|O(XDCykHwXcVch7Y6-GZ1WXTmH<-i}Wt0KftH zd@lx-GBbl3`D{G{DTHeF5Ms2|(4`r=~~%yE@ivG@VRGI^9N}Hb@!G(iRXL z!N>ANW>g9=EcHjvXz}nwBVo);`mlnb7rMHP&iA)%%o;^CyUbsXmm}dhUSjx0vUKv~ zn4d=UU0obp;3n6(o$F|nzCo?po!3WrkBU^gFJ$5RT1pC0N?IB&9W{;%Vw8eT?W6XI z*}!6U8xEr1zaOuOM4UI8#Z4auQyj}MPTO0+NcUn1R0B_)CkC7I=K-@lt3 zum1YTX^NO3;20lIC2S)$V#E9CaQ)Ee((9b*>A@6U+>YEbV4)Zki0>~z<4vupsyecO z)^+WHo0t~LqAiB8^JlGJ0pYO@F=J8S42+Vo^77Wn%Jnq0Sy;ZVd%;pB_jWKx1Ps^a zHlFwhLjJ>1IiZH7Ce_3<_*QZ*d!?iRGfB^*s1wp4p2-vEl zc*3Tx@d-zr+}*#?sMz=*R;5G>6pRGfXHZxhn0%U3xe3cEI26~Qt)Xzb^HqIJ{lMTA zJV`0!B0Al3O`77kTu;#;nBU`r+EftgXd-yXu>?PQO|Ao$1wec*k^sLB&eNww185+z zh3@^#6c#EHz&{Ig@2b-s7W;n1a*}!#W-sWnS-M%N5pa0gu!+ssfUm=x?ylW|6`v0G zRlNteiuvJU2iO23fm(Ez`zeWrAjFw#;&5QwlM`kvKVC#0ku__eQ-UA41gL|fLe=|^ z1w~BM_^eE$DrlQV1cdOoDg$BLH&9#EJohOtxoN+65#j05Uwg_(8msg<1h~rNBLU#f z7l*p;#m>)Y0z@Gw)VC4fCd6!dtDF3N!LL7yL<8PQ{6rMM1=0;JYrH!hRwLqf5->ky zO@urI(z$Xr8?9K}&w&8X2DqmwAojzQh|i#VcK>RgOcZO&TUan?mFNVgFz&h9%odCO zGus+ey)?V`3L|_HW|SZLvkeVcJqkeKF}{T^9jrWKI(h;rBnpxXc`S1XTLN>;Ndz&m zGlu)^!PBA#3k&aDwQ|@?IqS7WScz|_W>%PlvZ*ira34;5H{^e}b#}`a-8XT1us|c= zFze(-v1+1}&hEdyv)2DXT1V%jYuEn?P`>q{Oh%BpL}q1Cf{n`nWljNw*v#A<@UP%NO@Iq0 zTb^B%-#`Ld_XA(OXd%rT4NAS)+7t+2-=9~SDtY})@xvitMe}^lwlgMZ`wsx_cse*t zGO9>~(^DT-7FDcCh2k$_TC11K%pAE0_8B?#5mi$g8?8EE2re|zlGsPnX~QFhS1ef& zl!?&+awH}e^iT39r$Jt$YE%hn21f)~>3mKj8Y>!UC%#?t>Jv9yhL&+z?(#S6G3-WR z;or~P4RGWiB$F+|2%o>#JW!E4N;DbT2BSo(s;Z=XHiX>Vb`PS-QTH=|x#6XW7~9DS z2;hy5?fj#gS7AEsR%1ts#F0Y%!V;4ZOr0kUX>7U+rE=&?nOK%R7py;<;i$=^qLK=X> z$qayta)Gw^ocXNp+~41S6!UMiJj41Ar_4P60jj{Qh{0}~zqtr?B7YU=iZ%SFkgUpDPs<<3joBi^J6bad zs4th-p~>aVO;>L(nvs!_PLl^42v`OOUn$!XG-PHAd&0Cz^)e={C`sbH9h)P@#LFf2 z3eZ2kN7Z?T-3c5xKcTFoghbHctbX11o#C8>?dKwoYsPt8GlP-8=AM4*?c(s;tK=bF zBp)}KbS*5CN<9>}^;<&HiibW0I$Z#%<0ebXoSXA!Qq3X0$ORCv$jO;p9CwOs88DmKk+!>?PVcF2rs)qcE3B}xNm_V6wN08w ztCI4!BG><5AU&PJZs@r_H?m=7?`q}>QXDE$e3oEr+)MybG6^|3Gecz5E+ajC*C<;e z!M_UAF4u*zLUl%En}_2a(_PVz=%J(Q3GOb^F|cI?5BA1dhalyg*0_G6fterofEshz z|KlAmP(>2T-Vc1ZTF-Pm?4=2$Wn&vEyTg5v^7E$*n8E=oeB6Yn%*@Pq3&)shQRm6$ zaC&R~LxK&-yJp6ah>MYbcK{0(Er5u;mzY^U<9peP1z`(bWa@bAD*u4~h!nyY-qzqJ zspr`U0!Us;ixAKGw+h1$5ND2Ojj^VN8JHaTGJ;1#Vc9e-mf7;Y4oxstSLK zKZ^N3%YJxP>bX3cr!G~sEu$I6mtz-+1ZjC4DX{Lxz)<`$2@0k^l4WJiomYCWvV@+k zAE>x+03N-szu#xw7wlGVHA1VrJ$$>!qr-~tCPba37&vV9;Z3!RZu5&8ilF>VM=JY` z-`P3<@xmZt4g)@B3Gj6B4$&_^N=IUw4u2Na`C&8h;2^&{-y#f#i7yrcjZdx}5OcEFFH+J%}W;(h$-;OO*J2Jo+ZJHf8l zr_e!SmW&=*)1JTgQi0`oOWub$IRn5tcU}$ISWa@n5)~T9Ke>YUVQChS36CR!xIQ77=Mgw6*ZRt2zheQHFH5 zBFH{+rViG$LFCwHznGx5^ZH#&OQ!t-^80}P`<<@gbRGj>fPpnOs>XBc&$Gb{z5&1& z#VWmg>6N>pd15y9oBnm>QEXO@X0ob4BeZiM&B=`=(0V9?@9^XVj0k)dwo+VWs$vE6 z$d^aGG^1^SfnMI;9ag+B)2;~67>Ql25=5SAK8I57h-HMx@v%rGt@*i@0{G=OSpL!| zNqFsM{Y}zG5*fvvs#KEm{}&%=lxyA6!nojGenujUL+K6alfs6+KxC) zR_345t5`9q zAdgRAt*lCDVPOH5VuRg`!HiF>{S3_i<{Yf(qWqWnYYg*89<$kT-@MM%Zzc^44y@|# z?UB!5iEPZYo$N-MyDF2!Y2lNLqs`GP$&kla(nh3hpFluX5JXcli6)h7Bd;mdLjHkn zBX33RK)mngMCL<6i*Q19;r#>TUT9)WnyuHR-AR6LpT^ zUmc?vWKtFT()L)SdNTOT1tv|;8wPX(0;3$Ouv~wOdU}h};3Py^?)$eH^IGn=Z1^wa zj9JRlK~n&Dw6Kf5x+YWUq~aYXuHvlCmxKJR4wN=mX(D`6G~p)(-!uZyL%|! zRhop|pdE})Ra9^`pH1rjZSk#}PGW*6=ZH~(Szpi!wD`D}7`2eIXqWK{6GKd1E)c%* ze#h`V@DRFxkoG>&MA%}wDPH_{jRRK}Ve1`th2I=;bslb#LVxOWa}VTW7e!9UgFy2kJ5T9#vllLob z)7@ayA1qyKE}d&>NH(DYcz9R;MUI6tqMdPmalw9_b6>(0gn>#3Z{pGTyj3|H&@YCw zownj{-{b#$n_QfKPRdmv%%Cdq#lSI%0S^s3TcTWkVZ8Kv*HX}&!&E6X9@)Et`IZ9R z>hIHBeQ1OpxJ%id^BtR`dG34ml`j+Dq7=7&3c^>Vdo$ktcUe)EF$OVY*Te4RTqup! zZ3N1||a6j-#7{woIxGass13 z{AcE!BVS$<31+cd6-7Y26#=~qkV%%geYY;FJX` ze0b|Y$Q-u0X$2NtXcZGBGb{ntw)eYo=3u=02b+naEu)iTJnx53SM+k?^EfbCCv{w+ zm?E1#4yP}Y+00g?#zbc}S{(@klV-i2d8_k?`qfoL-PbO_ma`!tA-GgjaUWQ;JC~d! z2xX=3hmr#O1_ysOxNaN{2`rcZ9?P<~p)9_XJlDK9!fJwn8pb3w#=A-)I211{Bcs7k znlaurC0;7);Za-WKAdAr=Dqr|_y+R$95G5Hf&z50BIQurIx<1Er$W2f1$tb>vH5Yb zMy=A3R;~1ceepJ>Xghi-z&H9S+1c6UCj4Y@<+T|{cWgN$2E&&^?%M`em*8Y106RXV z6dXBx3RY08^u%Gh>q~O(KX*BJ&33fTgS6z zpQXIrp)Ltj}{f{u{xpaU){qC&#U9txjqv&Zzp}3 zbVbN*f~dMIe@bM*m~drPr!r(LvZ~$=Kt)C8c1aOR0nAS-3b913he0KiXcQFn#N`0^)RRHsCk@)z=)utxR)NtgRHzU_WXlPf z7`W{#DtH9i`_0)lO>3j$!lghkLF4$XJ>(tiY!{7UVHiCkv14*RnWvu~tUr3G={ zl>wHJ(tf5^*1*6ZX&V5eXm>XkQ_m-U^t>kv6`1$Rds8>p>Mx9skN zU$scavmY8AL~C*pOfTrnjyBxNHM;wf`sj2_MH&}*yxe$URi{WBDOwqBo0+Nd>@89)O*sHVkk%|ML{mjz@lDN9RRMXex1Q{te7BA8J zp%`f)`|h-MVuevflJ`(Y3xl!>CY44sUZRHZd}wEJB=Fth&^uIUgSBCG9+}PU$%+0X(aTpj%n6-tjiC{-Xq|4G)~Dyx(==(nmS1Z+ zSmp#gH%h8joDJQ7%my1px{#`-G`6NA^=wRdm69O|9L$QWCWx!)S?RSpXg^mXPLP`7 zS6*6ABb)nc$6rm4J8Hj(*ZWz(b^R4s>AAFYZA7W?`n8;*VoVv|fsB+CVgxpojgEhD zH)1f7^H+L+>ZcJ|Y^<-$$Fm(x@`Q3)@A6po9)AR`F;^*_ee=&Z%+G=M-jGEK+2;2% zdvbsu0RIj4)&~fb{6HETmgwik0y9|NVJE%+NSi)OA3GXh_g_=6Z=)lOiMdFdIdXWO z+FIi5q_022rw!qMm(sxkK0u*fcC#FEY8u|s;@}Y`={ARYjV44Ps`>Z`u^ZHzc)Sm) z^4QZ8Ic!IYijU|2darHlAAWtdL!s7Gfh-Al1%ZQq3$qDwn|3K$Fq zz8EqA%+cQl*Zk=t2j&I^L)Ppgj;H%`2vsFPDn9^XhubjAZKECARX=q`6EiTBO+H_- z6nPVgH74c8@f!YOUx2t82^UbGChoVqtE*45Q!|_D84~mK=YBbgqogZM0)8xsL$|^N z;rQfaC?1a!93GR#rceGa&*E1yNGjCS)v@+mS->vmUlmfG7Pgplql)+$%ZG&{yMD=j znrlr?$u6)xQ4scTV);#;3(BiccLaapzh_bnnFFjU()(mRSF>0P9TQW!+H&Y{HASU! z&%?MuSwkZ=`icKSI3@|0(FA1B?|LggkzuR~{yV~;IRH)AWSlJ^<)S6`_Dy9!RI+r8G}GwSA%5kyrts!=ZI95i!+^cr zhV4PnNpY%C6)zXTpDVk`JqnRRz|Q+MSpT1uwVPxFq9eHk>b)c*0emvD=%)Qf%&o1S zr?}ttg`#5C1|&4Sk!i@}wFuD>M@rq1cuTcWDAX=Y#@pIBh129qb*oV!6;`8A@b#0n zze8u(6DFf(LEs9(coe~(tIuaUQ!9r{T}}-Kq}&!!fPdgK8+pj}ZCbA{wji9XLe#{X z-OvH9E8Uu$Hm3b1FTxD?==K=nx^1nS@xr&b-SrBK@JAeYWREr1sN~!-EO#pjr2!3I z&ZTAUkvN<2ik`hvs)U9+*BA&v>ez$g!gz-GeMlXjiD_sO{(Mu)r0UB1-z0i+HQ(*o zoY8W9mioEbW5u37HA=mPvhoqRwln`(&6QBEvl_$_?F97hBgxY#Knb65qVeSTY!|Y$ zsVFLj09q#_gA8B+R0wdZn44@aWMpJcLfL{|hqTmzx6U$ha%OkeXO_KxG>a{x(Q%%1 z7W7_2c?Ho!XxqCF27twDt7>BLXQHeak;Mx=?{X`(ue3-d6Xd*UDQ}JOQk{Clmw}JB z7aUJ*c6Peadhq_EEI+M%m|Ed9e zG1ID&@}Keuz!5;^)wqEti`RhSpz~7+Nl8XdT@$OQh_=t6>+n8%ZwW!9}q_c-76o7>sHTEnb2bU@yIO9A|3p=-LytcL%CLvI32;6x8D z^w^=YvQp6d`0F&+?fgit^wHHuj_=p0qm^FORzGh;+xe};uCONrhk&!%$ylqJ%JIQ% z5&jXD8T!`(QI(JmJxHDSa8iD((xf5K{+>H|!bUb4|5+J1p-0nPfl-T)M`H}ehpu?-mP@kS4Q%o;U4f-+eRr-Q5XjRPUIEt zB%+s(=e47KO^AXHUC5DJuNN77n}XbYzIvDVmMm7=p8LfIN2jK_mntf4>nmVeO#=f; z2<@|HDE9wq8n)I=C2_wnVE|h1zh(|lj48WsO#}eaZ4XBJDCBOFd$8gd>XiN~CZz5& zRhZ?OZLVFRnN#B|z-$~|aMsxYE|4g^sD89WviY%tgN#fM{GnA6LVqfm*4=Ew55r*0 z#>o#vL?{2XAZp`rq>i377_d`!y~J>?p#6>y3il2iUMRb3foFMFkDHd&C5EdSl3^YQa*bBvW|w(E<{=a_d{= zXLQlnb?qr9HKh3FUTB;7%a({Nr`=yphe&!Tx2lZ|^PfL|+)8yDT-Km-ZK`>41={7G zUt8bj8`$aU*lS_er)ZDxVga1xQadt}D!kw&!_l9CtQBJ_>{y1QA$DNrLh2eQSS!WQ zN{`Xc?+js3LBU4JgZW!;cmtarxD`!`k9azYOEztRx(Ipm#rLja1eXb8L8Q<+J-%v8 zz0P5d)=3iwFUk7)e6QHHCHa{Wl$*P%kI!bZq);99u(j~xS&Y2A#99Ta_Y>u@MYqPI zeN5@WcqzADLe1`(VR7XAu_05CejybUMKlehN%@_{2x$z2!d7`7&h_cEBF*5^GWEZ; zR8^fnjgPYzg9zHcyfeLU4;x^kP@B?8Lp&W6HZcEP`GEiOZk)lje*^#qC|FN~=zJqTelYL!yD&Br>% ze@mftU7*8ysme~3S%kxAP}XJYdvQMMY4|v64lWg6Gt=0H#R5(@ce&*z%DuE}RKEdu zOr-3~7YLhPO*;UtunHfxi_5pQSeTg=>-1#if3KyD{!k`(bv4DaIk81uy@EUUr_G2w z#yh7uCb>JW9$a#t2{}Z0g7Y@Gta02`@SK+CWJ=y=21RDPri0=~_7_wSL3 z{349*M^l|+yxf_Mgett7)~$3pyW!zZc^kP}liSvx#z_hhFHW)hOLM!KTIH!mnfd-D zw_Iy8EZ(T{nE#^PE43?_wPm87`s8$Akgq{h~x9aglio z)MfK+$b-}cT`YcwUd-nCS9p{zju*md9f}~F-)!X>XiZMa(9V3^&cJBW4C(q6D*|a( zPXsx*CMFy^t^#4B>M z-fJnJ2bpYkM%+K}!2uT5zU3yhL<`DXZ(&gkvuC=jTGzkJvkP&z!Y!BV^2WDHW2TTx z6LU^7%nmS`)l_iI`^r7>M>lrtCkc- z6{vs!2_PDc6TI6W#|Pe%F5j%B8^0-3dp5U&@6Dr(1i%~#$=C>Y2txfvz6u1P;w3(D z*=!h<@7Nb>@^uc~cOC}E=AON2a(O-zoa?{PAXt3^9Gt>{fU35u@nL;alA#ykHmJ*R z@D#GEHKPfBG`e32;O9o9y7w)wlH>zgfcv~6jO1~)gSNJZn%&5L>N=Z^{zfgKWHkRI@)VbIOLss!4l zF$XIy*<*o3L&o!DXH2^u6olav%j{#k$`@@os)X*P@-nL`05r)RSUd!99H&V;gbLr= z$jXWdoVXcBAwb-a#)Qtf8;q3Haev>^&NiSOuu$QkX>i}~u&_I4yFYc>^{(=!U3Jj7 z6~Q&&w)^tSYW2T5yP)H@F?mMWdM_OekTSkYs`C*FW?9P9gCH8PVdNkBcMSNbK7B&- z{lF%?JeI;JE|%-+4Ek*hPIw-VIpmpAm`rA=3w`@rorD5h7LE)G*5}Wk0FD-PK}p)a z0GZ+PIknO#*4j&}6Aio<5xd^ZW9b?IK;e>;$!xu|TraZ8Ab%bBT#;HtCq1~{{2h<# zVybBpJC%aMDh{D+%htwQS;-%Ym0oK{0R={tB1RMfNI`yMH*Xe6-Hbim9LR9$O1>C; z!e_!Gc%rMd=y75j23&#u^X(y;hq+I;Ht&Y-v!0rKN9YC_A5#!Kj8cHUy!@SA*zeM` zYnxG^G}eO+KPmlyvjh!6RaF%o3yTlF`pg%u|5^S%jkCos)vx1UW!i0S7BSrRlyeRcx$M zrdbS0EFe>I=vcM$lkMe2%!wZ!CY%N1d_jAO=gm=fgU+mRY1K2DNAJJ#ixK`sHn^D2 z<$9m+o^BC1Y&M8W9~^YVN>bzEuy5c7R*UF*wNRsGWFH~W3rtz*f})TI0z z5=4w;D~(iPb;e&ZQ;#y;a3$!rn;Na)Z=AjDOnPskqEY;&YQ(Wo73`8k6sd_1NW<|l z6>2^Mphgn;MF2oRzrPzvDFm#avf(G_M{$7#0LZ!E&CT;Q42;(_u7dXvlfm%a4eG5( z1W`RhcVs*S7}N6Mk@}|CDc_uprG*9UfB(sWEiTBct*r|T8mgwb`Y=m%DiQ5xYWcp_ z8X0>6sK;#mGTn<;XcQ|729XGo#W$_gaauf7*qu#BZ5oQvX7r1@HCUTJ#jB7qcIbSFVWt6B3fya|M#h;RNOY_*Y&z~`GFLInPIxatIy z@nf7eq}UKMXsBrtL%q9D#*lFoV&aDsM~|n_W5jQYAEXkfMGsdJ#51VEn_tF}^Wl<^ zywmYlNCtzyu_PSCh4D-=LeHSbD(Mjwy!9GHfTcY4{2*aqr+^+3BDtt;~Xg$SQ4TpQgE@ z`Y^-7!sxw^hCHFH4#Q_e84aDHqUB?= z>Zl4_h{PPZ^X27PFKrAoKBqNE{G2MR8@l8s$KDgbJ6BWUC@w&Xx0G9<$7Y3VOl8_p zTP(&AgIIg;B24`q4=u)naI@qDZz?-pXqesy$Hs1DtyBu^LB8nnsINcs5g;)zfPGnw z&<^++f$tw%889U!B`YIwIQ-DsLoM^+LpK-Tb>Xm?)U1+j`+wtoW~&kg7GCJgA2{M} z5hG1S{gQ@kI-0wSlbpE*c9~>WpMe#i04QIY^IYH)#_*1QuKt~LKF@BtO17-hdn!lg zdV$SqLXf(uQ<4Za&D|)%q72c-j_#H}RLqo7 zYY-qWO1(zVDw*?}=vCL_lawi-!j%zuQ9%*1%Ma`z8X7R;PD)X?`!lpf4VTIx^t%l@ zMil_4B#;O-TlPo$_?SOP4>7Nm-&GgtC4ENfi$T?0QXK@VVF1{Li$@l>TLQBmZcTds zUiokB-WbB!PTh4IJu{*!<#Y(d$2E z#}e(R4aG~z7_Coe8+Y%|HGvIuKT(Chnk+LE2JWA1ej^x-x;+R1DMmxuxhctbv7VP# zT1NJJ5&3wVy>_i`sX(*+9r@a5!Fkt|p8+ni#X8{^Pes?4FmwqdWCZsjZ7CTmTwdkS z88+#g8UKBSl9*N03BV4s=fwEr?b!$B7ce{RWWs&HGk}m&T69%Q-{X|WWm@XwV3P1l zq-7sKHm!9C;G(PaD7wnM@ybIeRakqf)r#Sn)G35f$OwG{o+c9!dtiEzgMgCqJsOQ) zB9e2MJdHo$WaiTjugtLxb}223YIEGHZv*mfqHMcJzNLY_8J_EwbYXE>aR~y1D=McrB{T(?Z)NavT0Qi9i#%Rh2 zOooudHsfR_XrF_$RvD7M2X*_TIJViwyfpfgDrrI#r`O4#Wo!Oa!n8SFgaCkK^4aHT zLANp=n9If9Il&8QkLg9YKSV`s^8fr=h7+W$Ga~4tp2|4e^`a-4jhOV z?g~A57oL(bUQDdX$ml`eZM655;Lilv0JJ=Dn9uzh((AN8+bECVXW}LRUk%RV)nbvb z@0KHHaS6*w;LGCqVpH?LsXoMHf3_hiA%VjsTlXYTt~!j7j)l_WmETjH zRr9R8oGNYx9xd-%vC!@4qw}wDxzpDGk zc;^u#@ivL!)q_I=U@fEiVy&sO&|QSG8^>9`ER-!{73dxHu15QF&f|;Kpbr(3~ zB_$EUcCs9qqY!@^_jO*gH60Y>OxSd&I|g7t3h+r{^)#6!?q<>ibyY{0DzX0wl*iI$ z67grERF%w5)4=n9o7h$v5q)opBO47Rv?goihB9rrcBU{%M8@F-Xnurn41@8Um-l?B zxp_{cdW-?a)DGQxn0ok0ph63UN`WosR=tTabC#1^0ik5VsL(AHbms@8=q`E=O=WSQ zAr|Xk&jWzi4;xDamnVnOwhkvJ+e2P=(u?!#_9G3N&}8y)Imuuo=Be(-sYX^1snAd- z<0%WjYNsDdcU9Ga-M>=QCm~GNjSleJwQbe|R7L~+1GB%*{^qDk2I7l|i)G6M+h|zO zU8Ks&E4kYJADMi%JqQ?3aj-SyfLd0wv=3;-}y39rZ-w$6xHq@~CZDA-F1 z7K8}8+f!P5eiNe_1OFO)`O5c*hv1PPX;Xn(TlE^0>CH=sV3FB z6|M(#yR(6WO7~4fnRVB{FH+fraTY7Na=vIQ;-j9vjObd6fE7@SzJ?dGA;#)!jWl{g zi?rT%2TM;m@J;GFIoS>Iyc46qTD0$aG(E%cNZcsfQ_@5px8C@RjJ??xlZQqZyk)UA zL<4jSOPM5RaSRH{(|A#R1G2KRV0s%Ix>Mn}AXdGS7?#bXVq(&NK74l5z6{t_fK@RvtV8*3Q9COVm4OFCW;Qj=fl*Fk#EO znImu%B^@0CJXweB{RS{IgYi!Cla6x<_Dy4nI>k_dpFSS}vD9M~^x1}{l}(3z5-*dH zi*Us5FdSZ5$v?U*rpdF9y|x--MerB;Arsv7M0TwQUKsEvy`;curk>cx-68xcc8YYC zjZ}gm=xOK;I2taAgW%C}?@38#@*=~q4o;3JRKA!ssJ+oJdddi8jwvW$+MTJxdGh25 z;74$-mSZ{IGW)1}82>er`U*aWSYljRXh z_xtJYg`{Wm8Vnv3!ErMebOH+q#o?KrpmBLAp7EWo&p|elLF{qT(*gWtCrtlM>Ta$? zS%+Z>V5#Y5tIV8et{JJj6<2e1J%qK&jpBecRp%w5ooU_~>!|w8qXNX3x znsK_8V*gBf!qww<7(SgO0``W)X}hE|%Hc%H^rVtLohqF&)FLu_~6g zVebb;9Ae=vXHL-2fwx%yN!+#;PkqW4vROF(_IspV4p1)V4w+20*^%NfWqg~(GPUk6 zMSfHpJ;+_P8_oCEJM~T-;t4P5p@0oTH1CZEC@k6PFUB01vG2;tTXQg3O!ezOdr1s< z$qY~y0h$Bl&ylSv@4G#fCO=UPxP{P~ET7UG4bG%g9*jKsTk>!2`_=!|0$fufzzspL z@+0mfj@u^v*K1aJY)A#S59*JuDY{fYQp>&J{WPtIH%0Fl&RJO37Wdakaa?4C7gs2k z9@GDUH9-m6b4>yzTEzg3f#8T723hyUz0TnMmD;Ky2&1KC=!?ILd~NneTRd0xAfL;A zAHUk%8%w$ARPMcf(|O+1p+E)W9uVj(E8%plrC}8wsLVQ$WvY7uHZ#gkW1>hCkAlM1wyntiZR>&%PV3=phi* z)i>}A9g%WCFcMA6IqCZsI9<`Vw*(b4JA*u5Md17txU$Ebq)S+FJY3|sT1%tCUU^|Z@j>W*>H}%79i+>Km+;Z8c#xWlCNHez}hVJYx zn(N@dyPT2|)=**Gq-nYLq^2Ioe-ymD1SiXl69CDt&imqXw%s^>CF+j$|A_kPu&TRY z>jMZ#3j)%hfHWvAtu%;qw{&+m2uOE#cgLYqICO`!ba(gt9o~Dt?+>2`d4Mx}_ROrc zW|xAZ|0(&uCqZoCzoodGsf?-3i6PS4^xb+D-~Y&4Js>_OHI5xP^OTykl8+fZ%Iv_HEOi#@ zSKk*%KO3t&k~QOebtGiXDQJE0p8omun>$1IUi(QGO2G8T5634Ra2SciKK9U~D!F-i zUoWEHrxm2TO0@q>^=~ecO!+6I!hrS7*Ux%w>dQl=JKS1+1Vrc&xLPBbS;pt|v}zP@%aHC2gtcyRAFeH-Nj2u3Wv#|!xyy)ok8-@gS$ z(|N0OyAfikWxEbIm7&*=&tX0!Pyh58HuRs|6uC<}SE**l;SFdFU#)QeHGVAJovoNM z6utxIOFd(7<-7Yh5uRs9{X&B*Q?d&Sm20XIxeQ_%G+p~0F<4*JE$Pely=+3cboOf3 zvOUFTE|M_h?!MKpU;n9*WD7vAXnby2Rk?sWc$e}ADJCbZY^@aU+dkC@@CoQGucA?q%WLqPal}i z7oAVjq_e1*8TGdkiJAW8W%LUabmp86&K~FJSG_KzQ=(hi+4Rdbll=%?X&3?E_^~e~T9STZVfe#qG1XUr%

`D zWf5uIL8GBE2f=!v^6oid0ydfg27v@_&k9?Z?cnzu3>*TUT*B`b`9|B#u^J<3=!rzY zafEohuB^S`#6Zbo_6BuWg2@k6xY%=#Iu~Nv*txNACN@<0vuypVcm6VCR%a&?znpY) z_s%!@jf(O0+Hn|__&1y{Oze!eAi~wl%d*=4MU)q{Y_?NtOG|g!`S~Q;{&<$~2XiGP z%cOU2Tfbf(Ee(eY2$5XPn^oA5jYu-6*P|ZJ*8$Q6Um#jYg&wzs&4hiX;HWoBua|e` zgWmVVr?*^G9H9R$g%$GOh{vJeC<>`OdA+fvPxv&!+~E!b8-{vhm~dB3RRu!tFZ~{qw9e{)*+-`tlS8X!$6(4UhRUfwOS$Z?LfhQ z`UWBk`ta@Bx0A;SZP}b*!JE549HM}8St{xDKUG=2fT7>+5+=NGw z)22P4_$<~JJF-Mv_OGE2@WwN!>(tEgq#H~(NqLicw!W;m+UN^^?QT*WZ_sk^<7$Ax z21R~AGktmWk1yb5+2lDg#;%awx^s-gh~+%fWD!$YC7YUzievNuPiP|$sO0`0_gA`m zQ$?>aFgUn>bl@}Vzs6Sy<>G^nP%}GS^e9f|cuU&cHAuG0s_1~=Knh<>R zJ=jV5xdF?8=)K5;uN@seaJ;Fn`vyif*HvB!Izx47ri-5%f@7b^ z?U^_w0*2$`V8_Q7YC--VwZPfcBI}0pz{a>ZAuTRmn^Kj2@Oqyupp++p1AXK4?Z@w* zxP!GrLPLd2O=%I(2=pDXE20w;jwc~OYV=sXe&#oxyRJOUt{>W-Jn@-pY=^=(->t2! zS>D}lZ0KW9HvTb<`gE{~^ctz!t(NJfg0iCtL=FddE`_>1xE}lk-<(OR|I$T!ks3xX z%Nh@2k81)l?GkZX_q8fjBR?_~7~iCz2yH8u-t38kG|OggUI*c)51ZxVIf=(mvoyG`{e2P0dU^)igXw@~6#G`-Z=2Em=Oi0RL!*>Uu2`)7`42N&VaM|y8*O|@Q;vIeb`N4OUjzt(2q&R7P z@dy8c_>?_x^?R-L>Nk3o%8uCO_PnFscwW)>r=J;KDqD2E2{rCp17nN=7Pxmzj2qA$ zpqR4W>m~8P0BVYr`LEz2>tOq;;Z(scR=1?>JOwtbdxh#bAFzcljkPoKO{_UJ0^-1G zE{pFlmr|*N6M0;iq5Jd=2IG^6Aq9VWd)HE|+P>=R=g!HtBv+XKtro)qeD~R6nc3cR z2Pc&)Mb|OcJ3c+#()5|5S8BDY2 zp8;ZoQE)4Y0ROjX8xJ6@FBvKO-UK+@eKBA(z#0to)aVeh-PO+9_^pgcZ}tBRPO&NqgXEg{=*q`1MnY2Qg`pr zP*nGu)j&hYDKZ=)`k(nav(GvmpudNXHS3Y+fN$y#?%qD8yCZ&Sga>{`RonBHl2yu& zesE_ZPcrKm?0RVmSh!vsvAMCKv(XpjD%wIG7@i^G>GRf?ERop%e`y4aZ~p434{s!% z)l`1N`AlS_92_d`%a&C>Zzx1Gd(KtqO%iTxCAXuk|7blCBmET!FSvgDQ%J~@ z^5aJY=y3w*9YL7nA{{@g5{diQgWoZQFEu;qoo)0(pL(9N5u;)+frqwzV=C-eG-RRY zE&LGBU_oNhzbe}9_UeyTvIG$?4j0~`2f~#om3@P5b#Xa2go|EotMJkYn%rjWFpVmA z|NiEc3jM@%JH_rt#g`M6k%h?tWF_84Bzu-JfjkXM@1hAfW_-en|p&UP><@`~1AZf9M*vrEI znW+!B9!Ns>Z|7SCKQhv%X&EdOu*kw*AqU@|eIf$=>4_IwGIi=+wFe-WiYD0Ct+^g@ z#hZ}?M8Hn`Kpb7*YIorJ)BSPa#$(~cT{ZbFX#)%z)1b3O@*Na zBkG|meo1t^)>61H_*U?1Hg(sgt=__sA71CMweZzny@({@PMPZLf94{%Bwm+#Q^i;) z08L0Z?g*G#QeOHINpCz`%J`w{+sVP0TLnQdpfW#?M@7DZsiyCK@Pg-{0#&L|U~hTk z2N!=+yfwso(Zx1Ri(;jk@YA%tKcBPSHqG+8*z%~=Po1nh-$7Djp^;E{@99A@i?s&E^^jNdFxK$YdN6rUTkn)EwV%m~QWrA`)ar!=5Ma2nW zHr~QJJ1n9;oRu7t$RA6=ZmtX!u^8Fx4b2gHYRT)i(4l|jbF zhLhobhD48x0=;7YEtXc9*=)JgoQhN z0THtg!^&o0OtP*I}&KV|XQS}=ZQlk0*dlmphVg?)-~Ei#8b1icYD9qfcOMr1j3qlyZ zGn?P<-`{bF)0lVHdRpUPI*~W(M~0XDB-K|R2D?4vWq3YfF}~CGYLrh#_#+gs-l=u? zN$&|+(=5cRnOZ_fA0V)>Aw9mh3~HIeA-EuKAbZHUxd{O&`Q{Zs&z$K;Gr#(cfvvP7 zt|-obXa9x`d@CLC7}{;kmBQ=Bp6>N@Tf*mfbD|qF;oP3)g_itSZHW?qal;=Qitb-* z5@~?XkUS+ieMXsO`s@}HlwAmRpA4c1ip-T<5mmOD_sfAuYq&fZ)rlou9U>F)+UynI zA4sM6MdT0BoMmh~->DKw^M^p5Zr)y+DhpoMOJ!0sD<}#VVNs4Xsq&cPB3Xh7ixEgF zpm!9&NOV6t7?$?J;4LjIqB0U1qNhTec_(ScM#oKPju7W!^i6t$a)07 z=k$80c+AJ7#6Bl?_Vd7!C%3ayN`S9x-loTE%Xlv@$&WpfrYt^Dp4CJ0m^`wB^+4H^Ld1T8HYg=B>Q zLaAFbHx!QjCjUoTaF$>oZsU*Vqq5WaJ{W3iP6shX=LE#NkHc!dqehR`A?lN965`@t zfnUeO#{R|s{BT|jqmy4xM5`-6C5jWybG`b8b)rVB50 zIgYyHW&1ad?50YGjkfWQ=CReX20F-kYv`Sq$B2OzefENW)rl@3ed?hQNdAg~H((W{ zGe{a(b!^SGHV}XEH!8?61?OFj?dP4_Yny*#ND&vP3IMS70IXy<{43u-zka*Z&W*;z z$J3HRY<1YqPRBypKxqGf`qQ5{Is<`e;OoL$Gsi2h7)uZJ7l?b;=2ee^iJ6ow{I@gLRLx>+mk0 z-Vp_mpM852ZSkM|IP^bbjO=T!M3M*1 zMo;lcRw2<}@409-uIGPjmq%6`v=gPK@j3?`z3h|gqkB1ej$G7+^OwW$a_@abZh`?U$qcS|&1L1)Msf4w$B=b(5PnVuewM##RSR3R0^RjAs?gRMATwzU%E3&Q!3 zj(?Bz)Es1GN_Ix6*DHkxvNu*G)m*3(I{#x@sYcmX{dt868qP!6CIqa0;UVqLHnLWN z$WTv3DX&EVQovpkhc@(vh%*J|S9^4PuWkePYHVXqRw1Cn$Z!&ej@`LGQ-Atu%O+>Umoc4rz^O^(kw{=rpaec4K~F z@|zpE*#?6{fkPuN_lBo;LSGQH7i@Js^nC2_nTt4FdA30M#|i?^MUe%=PPMm6R(iU# zy;5a(Ly4kA>hhdNY3`N69YcW$rJajCW>IM=5sxMiUQz;q%FBrW7Fq%yuOI*tD9_H$ z?8|0rB-Q(Z+ZNJGl~B-0#34r7Ouf-;DfVT=(3e-jTUvO4;kha6e9j+8p>E{BK@Zc zkuNR|kd0UhTmy&?JuYjBHer;mqj54qf_f()5WRN)7Qb(*GYhWGDX853Cu=_6MtY80 zc4}@3h&z5DA()kHZL7;KO?%@eEE0v^fJ%CN1tCAO?#DhiO`)>(_xD53B#afwQ*8j$ zzdxFCX8~L>SM~czq^F<;cWqJ?<@5*zBg^t|uTZ1>RVpKX@xbBK%dHr>)L$B$Pup(| zw_iGmqc2TOP1)8vx5FQ)Di`LSPgVMg!__czhN7^bKeNTb6Q5V{<}2O~#M6dXj**FF zzd|Fx78dsDc9^^`DTW1?W}5G=QES&zsIirY7?B?NXDD0s-j%tXF4TMU4Gy!>ds#;k z8w|#J_lBWC7p{oeN)#GK#GcbIe@QOb%U_c~)^nC8i%kFF^SZJE5C8{yje#MW!RG;e z?qO>!uWOiyKQQKY ztk`59xr`};sqjjVgMCT`OZ@=TEu=SqAap!K_ATL6@CSmZY-~&#!6e&}xy?toOSWKS zmQg;Vf$JEQ3XQczGn&JL)>*C5HV;yX`ILir3h|nc5i{Q93{%;vdTiwhB`Q3OvzKl6 zjyoJ88nbcxP00SR$o{^`pB*FfNj{5_dNlobw|EjcQ>K30-R`eY;mCltKdX2jEaBc? zq4d`t22|+#BZ))2DC+l`<1NX4^rLbx*v2`zioJ_tKn} zWk=I_W5#f-bw-?03ft!|>e_&R;ara_QiR+>M#LPdCPh)CZKNPjCbKn>@>|i1>9cLE zb4suM8LB#meLGC;E*KCfwq8!!Sc zgJp{8KCzf>7i0uKWCS={O=ZeFSv|JvQ3V(N7K^`)Qg3-37T~$Ai<>VZwue{DtB9rR z5AjCJ9C>K)P>T(f>aPwMzdLe(KzMJ#7A~8MnC~j-dw$lBG~ZeBUhlaDaFwLP`gAaP zYDbXpV*N#uVIO?DB`wpc`O2Z;UcIMZFld7x3V=9*46zTB-RoyTgj2>J*J zbn3{YgS{e$j&nTCA#vMW$!wO9Ot%HY8T1fcvho&Q0BKPddXF5 zvF*oOl7LlI=(`@Zu9th-4^KAhy_A)NndW@A$6RUlt5SCdgBB7<=X~@aP$AEnje09L zn{tBVaLf`i2n3@2jIajEoHxB&ygzozpgJsMIuw&Y|CW_0k@LB+ zoN9F8nIVd3R&As1pE>|FYRM2Bum>N&UHJBgc;@f)?Y*-L_`0gkT%pmRgtXh2D zgz(57)h-0<1+$8TJg1HYJqNW+kh#`AnHl zi3g5=S?Iz0F2!jnWkKZkRLwZRHxz!N{(%u-n{2G%;O2k!jmT~7&a;Z1=*ASpn5w|%*_Le9;sK|l?(Iy!(Q0`L~IR{-h$Z_o~mNyzo z6J;TFyVBx$8F2(kqoL8}XJGBfgWeCGQ zJ~oz`jcwYQ5tO+{O+zz&3L&jPBysLAK5Bk$P+hg&iPXY+?eBYMdd?n}ds`6)5 z!+5%PaJg~<``+K;C?aKoFO?~#`nmm+osR_7wO-9@lT2;ogV8gkE7{5`s+)F@{% zvtLjOOlJQ21ox~V-f9=822TFpUH~NEi!bzp6X_np1fLG=yZc?DZS%#<(mxTQ0>ecP zsObb{2JbH~P2OGR98uG2u#7ld&qQ5)to1Y$X*x4C*lkGCo2qbApB@=j5;vjG;w7S& zmgBJ`9Bq9%qGC0)mPS}ZwILEkL)p7yeq$83-p2(JxIDSt&LAWWNa1&zc#qKy4;$aM zTrXdsVOv7+88whLPs?)z#!KqYy3OV8v}X5xEl|*kqms(iRFlqlr0dsrx@zL5rtPCgHIL{d z1U-Jg3Fio&5*Xd{>f|dg&eV9!?K8j)nFD%c=%C`lqa|L8CD);e>5}Gj6}f8F2{2L_ zzo`ty9S;YnZOyTy1n-OuuTNGcwd+wG#r$; zPWH>$_ymtv1NvZCP+>rm{q9em8qtLSWO%qhR-TMxtnqg3djYc1ncJY+L zdi76_cM|aQyWP|KwQEb#0~#DjjBTqh7IU(tpW(QQ9BL47?(TruRImcdPovB2eAUv1 z$Kg-^?kw&#n z=y`04ygaw#p|~R*$lG+|YQgH7lEacau=arr3w^hLdu0p<%yS^gFObb@rlOpbw42QN z@QJNFqarzJH~5#J!TgnyR{Hkt{Vn&b1I6R9cd>yuIf8v0rKCqw{dm=+$j)KJjByV) zso8yb>-^o}$geAP5+|OMU+E9+&pTU^o&{HB1T5HyKoKMwJy)m^X~G>QD3Y{dZp>;{ z>FH6YgAAJj44xPg`J5*teyZb#X~Acvv)t zhl43|c8VK60O}4k_L>I_sAJS(pEL2llT4j;Q}eBN4g;*s-) zYT3PQ$f>8d5FSTqL@&l6v`m_;o$wW>CFD(s2}RRhpiq?W_d96%cfZfe6Pl_v@ex5k zC|4RLD70J1pRaSvs6Q?xVlu~$i#45NsWAT2K>`%p%kCLoAoPPj=%@}1iA@JP@sA(1 z3M0hS2+AGJ%B|ib2tpC9^W@JvP_g$%sg`-S>t*M^`BJU%Zp%N!pV~HXkrDK{%jfHj zbJZESRaM!TvC;i$hd(xG>v4yN<1H#GGq5$Y@nh>FEK$rl7%Z4qgFqA`*}@_2{bdOm zIU?|NigEkbAW!4v1_6ijn&}ERLl+CT$b>KpwZEd@Z^wwi!oWlhtF$f31JZ=~WLUwyU*zt zPfVCz4h|BIyRY{y%o3m?fItH9T>?ZP1vwWN7Q}+Hr7<)MpL&ODINde%ru(kB%R!HCOcf5*-b+B0YPOQcr zT^32A_0*zNaVuxWt}vt*-mcOv4qklD-TFb0e4qqm*;C-YYjH|Kj@^&cEILbj(9L6=D874O^5eNSb{oLW>5StVIBxd&=9Y1$ijyw!V zz<;eN-PLJ7*LveP7&6qNzuFY8;WR27-HVU(3hc0J{i6{-3^eL@<%eYTNdWm5eih>Lk6JRJ1h?!sg5c(~@ z#v7Pix5>4FZ5;sO=V@rYLelhb4Jov`b}UXxDpy&Ihs>p%QMjaIhxI5{IEDiWSa^;* zJT{z=UW!D12Lw>Ym&@$^x{5*t9}pM?FuiBBp?sc$NoIwF7%ksYo?=4B`g}`^)GrI_ zXMu)pcu<_k;bhzG)*N>pacgcFQi@Zur zNtHD^K!D74#3ZNX#?(eQWGvVyV>&nEqru% zXiRoOquO1QkiDf{Y~JPYXsJSsRzhsNNDpD}*hwSbBnMsou;swiX)0>H;Y`C0<6Por zxQ3q#Kxe89N7K`$?y_`b_Bq_+rbxU=Lvjs_XKuP3x}x;3Yc=MGYFx%NW@CsWid4H@ zxwe1NxLz$wI!wr}PnFn}=!5fTSJcbYNj$Zb$jC(209%AtIm=xgO!Fpjzq7>ShqM-? zaX-7aC{T5)P(#TzDwJHqnk_od+R_~QhY@b)OV%G>%6-r^m>rs02W#clyc>}j6sc1r z2xI9p=+GN&D<}4Mo2%7%gP{on3bG&i4527<{De2gP3lp8nNm+#hWHNxV7>jhFvMhF z$CH*Z$d<0%QP z;jZ@x;iv<{{c@lBNSpIH%S%YoqT9d1fn+?UisVHlOxgWG}O^7pfwSu@wZFp^3gi>LXW-jqKi*-BUmrsKAW z5)Rk_JH?n078nL6mOUudY=>LTQ-mk%O;^+sQax+$9L-9M=|?0-DQ!SDJ|GNM zv)Tx?L-ko9g5&XW+2&vZ;b>T|=Ogu$7-z28vFoC<=06>K^I0_%!%&68RC@&Gm_8c~ zczQ_(y9uB)+fA3sKK`2VI?Cg?faFCOJIQ|k>Ze`{@F4WKZI|L0b$Nhvu%{EFvYSs8 z#c(V;e}c}sFEHz3(Q0)fBPE({zEn8F3GA$5`iJY=-=}i3mNs1&A;iSqR7E0TL;7GJ zzki(NT=ka8FS9LRV^fvxcJyVvvF2k~v>vJRSYI{$;hb`5!y#S;%0qm*Cil*i%X(Z! z+{YB%hTL~iSFK1`(b$!= z_?J*?wIFd`5GGTWTx9$Aqh?k^Ih~~3@kp6N&zEzyT(5{<(^^kMUiR13UNqzkl)7wy zS3L(DH0w+(p4u!74XLI}IB1M{@Sr%3DFI-qOe-sW!_AuEvC7OctchD!3C7W+DpU2_lIDs_UA)z#8am^Ep+bvsUAU*GOO2l*|4LM51$q@m(%Po z8zG-jog#Up?=x7ly6iNREs{B=`h0>G!hyKdT6RB|@_0Cf^x8@DIs%0!m)N?H0tRDa4<>QGN0Xy~BqPSpe8m1wO1|<+J5;>oV*<2DwG&wbi=fUD)OZ!iEwO6=t)E(nR z0LFuf&Sc3q*mrgBW8>?y0X zamMudiwEB|9eU|Cod<}9(^)m@OgC?oKPhF0lzRyj6=6O@_0t2g^h7C=;Nx#-9^*i< zn?k@3;e0vXaVpO7W6PC)0n=D?PYpoJ0#>8@swu9hZXoA8;pVWQo9x0uu~|*0wA00_ zvJ}%XT}=a0oiRotx2w`OqnA853B6Nwn(|VllhVqm zF0r^nctr=v_{hXMC-p6S_iK_K4X$avUo9D6fx>r^JMPy+7>0_#0-MHd=Y5o!&)XI-!Iz@f9RJo`7s{a7oVsD$<0(~A#?Ac{OB z%343r!U*TpF_x{v%T;ZCPq|XSo+a1VRJ{P`{jUicwn0pd+f00NRoZx4o@?54?nOvMVxDEm$(*GH+2h95 z>ONlqY_#O2Wu#Gz@zv0D8L5&8N?gKry7zll5H~S@u6qkiC@fI10Z=#VI%(@=MnNGR zYjTih4uuDhnyd&Q>>$!mqFs~C0kbx>nyq<}s=s%?W{zeWTS=N$dFm|2Bg0e;^i|`L zuj8o+YKHyi^X|b^C#8Mw!uN}{6L2?2f`mRGG{Jxra0odpr(&e}b|oFP%1Se=0#ROF z^aL<8ZI2sy0A}|kL8KLjhwQboi@Mh8bJ&Lo_6F>>2uD2~dnEh?zpgg|erxYN=gG^< zr>4YWPb7$nb(!&nUp9G>w2>63e_*^_ib|9-PBsJ3yi9gn7O<39qvNweex#KwFly!+ zhV9Yda&QhQ4!;7@;qFSp<6(|VFDWJhueh|7VC_qeHqV#xf{$qXadZa#(UbvqfXVPy zEvEr|&kuI%jqTc;f)2Jdn@!L6on%DlyUESzpR_P;C|MwAav{C&)x=bDWgSK~3rdp+ z4i3|e=ZfSklPX8S94pGKh9Q)IU?m$TnpLdng-3WRNog^T!bHSxHAZH+1~^&45o)`Q z6#q)YX%^t#fM1G4iv=}Dt$VD)1WpYfR^MRM4)jOsFBl>sbAyX32zF`AmrsJW#c84{ zM>JVWG`^_Un}43d1Z7s+5s6ABFkVt}<5gm#J{@m1Q1&}AlsVs%Yn#IIv*|jM4ioTm z&RLN8tksvX4-@Y0?B;|SJl$VnT+E1SQ)9Eb5z&D_9adEqqBXr}?CI(%H^LA!!XJDy zFQAAO41nUNT!(#WL&-}HF*ia9H;*y{kL$a~oo&8vooDA$s7& zzq9SoSvYk4uEMWehIhtu^~0BfY;Qw)SWBhV>#XMbRJOY8o=aPM^7u6h0Pj%*PyyIh zWPy@XNaa;h&TvWv5Sw-o zOMR8L($&G1twGx83nRaXr!A2(poL`<9ES%}tK4sGTh9jQ5Sn2@3Q7@tcZE!i@wqb8 zxf$VPU()lk<@o42yfnG%CSzqiHs@@#{ubsFL8fm*jEW_j4SSgr_@DDX=+^&%0eSzq zefslT*bag}ss%oh#$1Q0G%;$ZvPk7}Y%#l?uQLEuK&rn=tF$xvsYNh2GBVQ`Vz@h{ zM>SZbS4|v59{hN)cw6ynYOrr; zDq}<|^UK>ncPAdxZv#r)@s$38TlIv_=~Sb3VVkQ7bKyDTey*`}dZ%kW0nj2~Xkuc5 zR0es2^uV4e?KGUi^_^|vTT3BKV>y*%e0Un)gxGS4m1kyHNVd(qy*0`5q4g+8X%=U12hqs#@3d(r&&=j&VG4XMcaG#%`6(UBM+8cx_(N zFl?jdiKG(Ww$!L@^l-}V@B8psbwjw z-oFg4*zxw_?|Xyf&gyUhN~^Ihu4efFQ0Q(aqS@)G@kiDMCsA@2=?axR9d9#0Bt)u& zulLyudG#{Tx(cbPX3(>cKxz4}e~dVw*sr7MbXE&3E^~i`ECS5t3k5i|U4yw;OA21v z9Mdxq>B0->>Pxg#(sTGUn-c&2Du4=?<>ci(2Kf$uHCB6_*$RHrF_~^EYnv9jWOvcm z269*?ujiigBM!ejL73TEUz&F(a$Z+=k36PT zU=~ksfLXNutyceFzx7Ug(bGMfy_`dKMBABsZYZ)N5wv`s5 zIfxF*&su2%sW#nc96QkK{QlyAGMQaAn&JoI9y5Rc-S;|&E-;OYO0gL*!0x(HZDE6tG@cxYXmsg>-EvPtCEVC zCFFr33W7-*$aiXO5%HibOAsh-*#Di@j^t4)O^!ml7O@MzOMhofY(m1mq?Cd-WSHw4 zOYeoUjd-js=R>g$S*EO?Te0CV!RE-_cKeC~b>E=~G~m*=i$n~YMzrj6F?PS^)n`<%lvzrC9Y?NJw4X=x4%kx9=RklLk3kKjFyavgr{;fY$ziQ}qqsBshFz4*ACr+!^X|i&VeGCP7gr95e z8W${LkBugqm5Mq_egwUf5(SnL86N8x4)F1vC;5fIV2tmkRADVa=E3OH%GG;x6~%Cn>8TuAPTusW;h<6=b|`opdXM@L3RaQQ=n zHeJ#u1N_sED~eer1n4SF3-e!~zpLn5}$ooJ-mqXk1@4s*Tr!_SSp>U9ds~JHU)G}~89)C8FfV(d-XbO4Web13(Miju$<@&` z?{9^Bn;j9mOJ``-W0M3HBzG+mNcqrO?8n`M^;pgD9QlwC$;%85C0i)p7o#3M$42+J zpNhrg%RQDcG%VR_b#sH6#r{Z9pr~YS{jBEVHbp-s*W&GlhL=&n!)g zmur7${bPw`+4nh~mSS7ZD|QzR>Ee_at*wgo(_X?l$3rzgX#K0+oh%?68%|-D z$kI9ShU7ja0&;BNyZoR?UD&kfrQKcE+}>1v(VF>3kuO4epNTolLIW@k6w6Z+Rsq;f7;yUlB{Gy@cr-7}-m&oKj)c@KZQ}XT9_PTdGuHc{aY3q--DCOL ziPUz9{QgHGGlj1J%VjIKx+-Lj&~3=I)0B2Nx3fP89XA+pKIufniGZL%NthV)R_@m4 zr~A#3B5e+7zkBNdx#O+`{n!8v%#o>CHBXmI>+DEwwvRtZPr1Ibvnm;g0 z<7VWSN?Tx;4}G74+&%fUN~6Gg+E9knjUn&qDw5RpG^1`coiCMw>IB&L8fxEg5(`93 z>5G&ZmDzHydJZqtFETwoJB5)?*6t&^td=7e(`*M-e5#5~Bi51I1HAADUBO4d zJw!yKc8W6G%A(TvmRzb*$=}gm+Rxt7&~=FC@zTxvV=yV>VN3=@+l72}M*oArr2(c61AR%{c#7~>%A?;(%a9m=xP8I$_SKJZ4 zZ(fc1RRmMR@}G20nGxNU+72z5+Md$c&6mq{A{Qb62ifpQ2Mc`zKBi`M!-^vBalsne zVs!5!rUBiM9;L`trf1;H*Ba-}tt0g5vJgeej2mmzP({$5&mRA+5`+@a&TtT@zh0*k z7WxLNiq>=b;bw!u%FJd%dBD}-&r(ta1|z1_{3=0LT5<-(@bz!@N%#NnUVxiBbe-Pv zF5bIy;)o(vf&>E_duaV5T5x17R7T7*h&`9&WiE!^VDDu10 zqD|mY^D&>E%X%Pj{>2n_+&+1F0W&E$xcG2Qe!I{s;)*t`chiR0M`Ghvd3j?0xXkc{ zjB-N4_p4d9@Q1%k5-K*UvRJjeaQYkHW?4{iYZQ5FC{w%p(^7V}w3(!vdl{S6yckd- z65baf(_1!+oFiQ9GD1G51XbH$&Qj4{B>=Szy5cxJe{-re(&V7*S>JLf0u zhrxNT+^tqGuYP08gc-Dr2Gl=_*ya1XXEosr%DtNj+LnccAOU7<92}F2opD-PTIS?^ zXRX`%ZVxIfbQ(5FxhFCOq(gOfrIn*YZ1ABz7&dmUmto;@u1N~{RRbClK&&7!*p<2B z-S>Edp^*02?J}+|6*6dmb*E8-re_jAYxzm@HC~*vnU$B)-Fm%RTlHuzvZz5YM=7bK zZ!9l~Q1UQkowoThQT*YQM_Lu1W?-mPb_;dZ0VhlZS}(+M+n+8g?vA~l%~)$()?uZH zm$d7BvM5L2ivIi-Jyoi!2|z-uL{1gdwn60-@LtFz;TLv*xlp76z_nQ3*yLbrLS3aW zzEkgbdmE#=j|N?hwxL26sAJXjEnsSEI9oy^v>b_Xq|Dtysu7_h*iIxTO9INuy^<80 zp>+haQ465QT<>-ZUhIh#C2Ju?wNTLNdm%}oJo=bQQ^a;%C(Oy^>5a=HYpv4rp$wi!@?L7)tL-9HXt)%BY$j$r|{h_bc39=;xw7hF$)d@Zr_}g=ARx)VgV@k z{^d3e?s^Ym;%5VUFHMgln}PDE_mIsy7{%`D0&O`~+=M?u&!R3C2+ha+E<%pF2Qc(_KpJS>}{Ga#8@P4j$ zJ>Zxn1gs86qA-#v&1h`u_49^6Qco496%TMHz!f~Xw^Qxh2hp8L7FJ~}a}cg4>Lh=R zwG?&7rpI@EA}UUe72}*^0J7Y)FD8}%otlN=C;fTNsO5wo#tb+N+WhI@g!+W8M0uM* zt;SH_lfIrtm(8mE?3%VPjeK_VvxzV+s+z0UbTS|oUY+pJ;Jx0d=T9p>d#G+XtY1)2 zQ86z64Dz-L=h~xwTqk`#p}bEOu9?af_t6mSmW-!_=ajEhOJaBY&3ON410X9Nn~98i z-Z&>DH(P@1et`RBXP!7_$O6~>FB62fHI!`JLpsoCM%9CQXT0sv0mXF2+{5mJ*Z%%e znV%Ya5!Herw}!BxIBig4aLuor`ATBtw0YL)%w<5qoL((hnJqThK(+jh?sT!D>%%F; zV~`XelvV3;43YqfnQbr2k-HQ+Pso!OMwtFuy4&Kqy2>XgX* zV9O(UFR@mQcLzWT($ymh)piE!g3KD9xQ!($w4bAc`Uj?veWtAWxscawIVQFt@H_mi zDsA6r^k6_BG6Np!-XVt4Zqc?9MKcb?Z-8R;7t0=@;m|3Sj;R`@82~KGo0QA(pm1&N zr*^e;DvOrO7q&2u859+mvU}Sa63;Uiz)BUvFw#=9_|(imD;{O(rCY>0i`0sIwEg~Y z2qy{7bXQ14YXLW@t3;Hw*buY>A#}cB@~4AjZ>%ZFhgJPPgM~H4hz#}rf{kspzy2CM zG8?HZIU|J}vbw5d@CFqjW(gLHo&@YLA^Ub9&2Gixdbb;cpLGlq)KTkp)Cd6|JS-B0 zeQ8PfX|te`zu#yIh+BCGI$M0uhS^GSkY&?_f)wGBw!>Mqq8x!D5c=y)=D!5%eyYY> zJ}&S(j8=QmV53qt16e6E+)!Qs?L#}xR0Uu;iA@}%`U6?n@d+A%Z-@DQL%@_RdtKxZ z>Jgiw=^5`f-c1*tjnYDmQF%{GrAE89$18P*!})0eQ4lDG#N$Gi*sZh(k(i%D?|`h% zTu#S6v;RDIsP=dD1U_~VZ7=$M0VUP2jNd%lhARo2b>U1KfAK-_0?9Pm^@C>Ar}kNB z$HtaOIc#rcVng;3EjLag*dZ>N)au1y0iWL>W)4V7?z>lQIL13twbb{Ay)SeR&*1xB zKP)&JJaEoqV3@nxB_HuQ8tnP#O8x}|V!>t7Ixu@0j|rq92q7E|gIEk_C_bU(i@6 z?)<;Yd+(oN<_=zGJm>6MYoFs@)f@!3p^GFQIu46lyJ`UDQ%c*RrNo*U_%0BijKvz$ zAQ|TwnBxRv1gwK41X6MQoHJ{9!1eREBShKpQk%i`YsybQ0-)U2``gJ-v=Z4=M!)7Y z9F#RjY=S^T`8he_-ej*$XdTFY&E31x5 z&k9%IQDG#+r7=8uVTGfkOfSz8(yiA14yvaD*xi?28%ePT7xm`ENYtF8*ct(9YV)2T zpQki7OhjO9Vhsnd8DZ87EgkwDKsH;oKuQ(3S#oUKs<&R2+X%i%IO1-b4q2@8;j#$%!Z3NtOfg*4vd#j|;h!9-$~p0(Dq2X~8g-i=ja z2<4@j5Fio%7Qm}TB}i`t-Nf#z(rXyOwTrc1v!ZO0haH|gmBPY~P6bn^Ta*PFncle3 zu_>x>W5@3#y!7uaSeH-lPZtlbVuLb9+HTiTh`lc-+Ge!A{fhq(D-6Tf<2Y$*a|l^v)LZQguf!*G98fY99r%gi={QqoJI8mj_o# zqsjX6KX9WROlp9YV#Z$z*cgWYJ;TPcGaez;O9-j3X=kS7Fcc4A}{2y*z{F`@KQS;e%X#K;5MzHpWT ze`qM#M8$cu0~2~?zMmgd01K*>9TeK?*O^6;Sg~_q(GI5aUR^vyLLNM<#XwSqjOF6> zzEDoCX)2v}Ld|>lo8y9!OEueLO-G_6b$7S4{68fGyz8g>pUyu;;AM7oiIQuB`~qA% z;c#ygTX%P90n8baRoP(2;UmA~5~qUuVzA`mG)bZ`WokY${YP3%ou0hJVq}{VPfa?} zbT1{fx5~RIFLV)ob{GW85Dn=ij>KPDw%FbtRXnKd*TEq~1Iky&2S_nZm?&M8)jV3b z>Xe|QB#}kp8`$@CwwjcijK&*CM1)9=oa2QRvlI0zSAT+&k(Xz5ok1Y+%GjICFRrPX z5;g~xd6v}vC{f;aU&K5giEL#(XGFZ|E=}ZhZY}@TXL7ew0Mgyq{q4!5{M6(QIIre} zmM&8_e?gaMr=Pz2g}@O#-5I6tSo}oTqV7i+kl!YN+~o;pCf&o$@JCBEPj+pM`@aED z3m_GlCR`v8mD4VUfa}F}_4xgE3c}9?Ns(5sATX(PGIvS+*I*l_guso4{rSd(WX^(Y z#rE5(CHHZmmzG4v&v}iGI|RGvpi}GX!})Ag1)kpeM;AuILa_rXI;PNjjg48YMC_dl zQ_<l*)eq6XS6t>3wB)H#w&X^uX*$T`S|F=)CZm92AWuR< z-%{Tv8YodKe(Xd4x8g0)Hhs$htm|{#$wV%fP*G~&Hs`M|?TxCE#R>{d`_qH!DQ0Lm z(fwNuO#PG43oliFSH6^414ZKs%w0C7=aj=_U}X#8p`f^MAonKdIzK@mqo<|(=$?Stt z??Hw_v(C;d?mKshs}(+a%U)+>IJil-fVchdhbmpH64P9{FW|IYijRL!=Wk1_Lqq>j z&tY+*#z8D0DS_?;M8C)&OBW`s$bV7g;dGSYB7eTw)ZVf%dXZC8gB|D()mWAv_fJLW zg~LyV(olP$&awXebFdIIs>DBQubUUGcAzBFq4zsaw~9G-7p5O_5&?hUC)NREOPQnW zXtPf_n_rJ^D=#mdw#;)28`vSwIpbrgX*r10d=!EvmVood9|hPmiO+3jB9q?!=jSO_ znCT)*v4l@=U*-W;WaVj8nW*cSuHo}&*>ynuc@~`!@|LQXSQ7GjUSSl~J&k(N!RPAs zUk&zN&wTb)({VX^xA*u3|9qWsg=0q8>0QZGxI%KF5)DN^c$NeND&W?gRbA$FC*Uba zdcHbegvV-E-ss~ie>@-7aYXw8fWr(_ybu_r!L8bPp}s2WLcPbWthYFkjyb4G^S$eJ zQE6uKm=ySbg^K;E5 zSVjnyE62b!Hy_p%&Oe#Q#g8nu!iRU_ohw7@ecs)2M|Yr8Kp;GPf#=6^|- z5;vWJa)+k)ME*U8aOD21V1Ya~h)xjjlX*)==i>xdf?==IP6X%62}#0X7{C3)KwSM( zM$ZnTWoK|<|A0c*c9s%N0dcde(;3>E-PVM3ik3EtTFZIyeg(qJTV~Wi1Q;mq8kget zUXL$6x1Lw~(~~Wt-;;?xk#X%XW?o>Rm_^r}EszMv0~mg4?&;;|{#Jmd>-wfr3)G z`gk%>nJX;u78TayY2mJCONpBKv$NH-nl$^itRM&^?A}mT{7!yfcHt#X*xg`?mcfg6 zE0U{Zo5}-XwoDX?W+)I%ajo^ZkmN#I%{`QCSdOWMVL3W#H~BFuDIC@_yLP@0Da`Y@scf3$_pXY!EV`tImIhN33H6M!yDUpN69G=k zfhe^W$J~(uEy3%RDgeBW+8A>11*dEbxe)cPU%y#huWNm=_BnSxRDh>THzdut5xNsy zbiuxl^PSoRhs_h&0OW*@X|c}v%u*76L5EoVCsq9zA$v$MIS}k75SEu)>wF*-=b=$_ zAnn__ua*%1{#J?>l%UCAYyIz3KbbqLd}686xPSk+`GhkW2{bL;A4Tjv_;djm&f(b= z^TUp@ia|Dt{^^YQ%|UJbilWqL{1p2r5tmlKJV{X>+}%)7tOG{M5=af#;I8 zjcjB7=O;9!>QByFO+-n^kOhwaOlB`IX0YoHQa#uAnP*D`P^Mo%KO`3w?RnAWerDtQ z#b4B}!%iD>a2|xKgp&~8=k$ECZFP7G^~rH(JiR4;n1=(A95{hrruSWds~FO6@CpXp}Q?bLnUhTn?$@IWy)mfte;QF?!WOZ2wf; zLlD)J0b<5M^p}TvOfC4jG}27*54SSp9R_f2>hr75*Ie=`oTF~Xm68fmXOqE$j`Bpo zjuu^+8Tv14AD3@#FOUlB?T=xX-M1`<#$_`r`!GYoweai6L22^^R_6O z&%3PqmIFYI8@mkP2l|Nwo3nB*^gw{oP(uPtW;EsHPNk5gP+%loP{koCsS8TivGWT& zWiev^ReZwxnJx1%)tj-S$gSg**3Tc!$2ND6Ktw;x)4equw?|mh_{{e)d429#azFWh zd;9oMX{t@f6uHNI!+2Brsh?ARs!7{h?s&h4iO z`a#&|_Z(?@Ko|i&5PM2q2qfkqy7%?Yo0}7_4O7ro{M3;QNJ+zb_Br!Yl~r)}hu)1k zE3>>|2losuoF8IlFWe(zazSQY-fd%(vy9mrP=0xU2M8p{b#11HAFFgoMJ4Xns)vzFOc& zRhH@~6~p>B`mCV}9s#u3Hm{x2OyWC{^hilfQc8tpMNhDCLkgf0o`$sIKIdvQDjoQ0 zVBPi=a{y-lhySt9nvv4Z)SchW<;ARl0<~K^G1qNr zX5;0i=+UKvcvH2q?+B#A3(ZD5x0#_W5n5# z5={nQ8MmJEdDYKvP6ejg$Z#-CyG;)@>&xnW7cchvL^xA}>I}9Q+OQqv$Xbaq8;Q0p zCnWc4*@}@nB&<0}Ogu*x?qAh`AWw)nXxd+?Oz?2B(i_VmUZ^(rHka;}yM+7y_&49^ zGgY=!RPj8Wt5V*}74KT8uGb1M`;%EO^jKc0>tm~ClUvyUx#F#3Bp?`WSr4OEQ&IWC z@CHOz{qLVjs@r;KGLK{4*&$Wr{sBNXG(}Q%fdJxa)L9V1xam-(zudgA!yC0#s!z0S zJfxT&9gZDM)B&}3R#7b0I`wzI-4?PJgq0F7(A+lFup8ef6Rjo9lSlwmrg*KTncK|P zNE4em`@EbyO#&oHy%K@k-zYKWK+dsTi@E(SHW`gyMswvnWaY8raYFdcy%Dun-63kD zNF?>^)ye$4)^->UJ-gefVM1ae!7!}f&^9FeX!HN9%U;J%z_1%hx%O^27@Ye?Qc5T0 z`nJ@lnCpEDKt^#T94=Gk)6`_rF7ar@uli{Z%iuTETgAXrR$Of7DaYJ;SINxrZtQ*W z{M%rfcRQwUfN*PcPs@+ERMT?>hr<7pp}=_(6ngpIj*s}da6`z#_uvAc0{uk-#ekp8 zbT1S-4i5P@3@_140C?2#0?W5uvpHY-<$O&O`O^EPZd-xq3;RXwtp21w97=>4w6N*f z=1;}_oSzEkl%D?ve0Y7&V(CKryar!50Q^QtnYFWWE1sn9TYvT#uK2rjCh{D6ry9X; z)lfT8MYXHxsnh&2olE0h!+if26kQ~h&n+IXus3x_1+F`s`+u84zt`A=eOl%JCk$yk zpo`K#{(qs8#&^Hm_<0%wV=A})+U z3$4=PhZsgGg#ZP&2LK&d(Gv`-I7ZbKp{%y7jQHQF^4E7hf-48T_G6AGXesllA9r@b zy(JaFg?haYFn!6yNFvXIYd#H|iqmQ)qrc!83Dx_T(r5NHT#uI;-R>`Gw?|VDe0_bF z_pvbDtL`p#vL#TAkCD`h)d)7=UHX1Dz}DN@<48a$Bt_WamiBM{mUSvk{a`~Q_>_V3 zFHdHWygqc-00~494x3bBxKuBM&xuNn>{kT!doo|d%fCD3J!B-raUNtSD2#Hm4DBCo z&%MY40Uv(+@89*9bLIJRhXPSVM^dOMAS1eP!9m<)GXCrC@3A|-#GbBAsG{OM2?a%=%J=NSx4pPp>~IY zU{TZA6>p3h2EGKIT8<}%cH1L!bz@G1Yf~U$z#a-+6H<+7neCc!tHbp9oWIxQr%MP4 z35ml`V#s#cl3L!Um61zK0chcWVpPg;af~S%>@>Sah}DiX$mZuGCY4g$o_9>i zP9&k}8g?9f=PK@rFQk!O?2a=%#bf(3VSz(qmE;wQB9SWH$slvHWP|TyZy!h3CY^9);49OGP`f% zb)+CF#cyK2kX>~enXmHs7fOT@jaE8AY?R0wsZ9xB>uP)4Uq}?vJ;&Lsw}t~R_Afpq z^A69Mj^PXKc7Y%4GKvU9Rc|}ZVk;hE+PLsFOLYPZ3TRBm{%FrO0VXgUP3C~edw!@5 z#~F(@YF#9CT!@cZzQqU_HR~;He)IPw{Dj43MP~T`0pZSA>BdY72dgLeW5}*z9E~W9 zVLhMy<0IpU&LjObkzhD6f`ai%n0y27!vG@>fd+GUy|pInEz|E;1EC3Lv@`odKUQ9> zN$Xi5`v(QTDTiHcZ+DGif_t5gPQn1uNADF#)(<4}EyzOt`RI1nJgd6kesaME;dC^*eeLTA$FgS*nrvt$Nu8)$veE#rEfr~2 z&M295NTbKw2Swek$E^eTQI*24NLq|>Ws9ni!6E#C6w;>`wR;-lw(X&d_0i&{C zzkWIJInlpP=J#DkCM1(#<8_&;8afZK>QGg6^qMQgCt^w}#BYfu4V&{=e0I`lY>}S8AQvC zRM8+qNmV2+$c>v`T`XN(q+=PuK)Dk6e{TVzC+s%s^WJoa3PKgAkyqXL4|L^c*j zjX{;@H(SnR>2|gxg2T?(v33*AYXrYetc{Df*uhmyqPSUF@`|mKFCurA5h|eL;%p43 z2}49Z?arAO${s&0AC0;w7d)AV<}3~*Y;B1tvKqjC9~4H3vZwF4`!{oUwmkjVIW%O+ zX1U2Zi1;Tm$Qa&PtoB@Dxh_=-3r?#<5Rb?7z1fS&5{pdjxm~8|QEnp`&LQ0{w(!H)V;HQ9Em|Pcf)G?wX~2lN<`?W^hOI8R42PdM0xA@pspjx zjH>vN!LYEf+?9W|d09)f{N4nUC0``kRv|JI{|FJ*LujN6w9eEDPfzQ%Mn+=DWj(~- z4UHNbqEdk>q_9jRdjHinD-|CJd(|7(&R6P(4G~W3kM6}y2kr;eQyC0w9q7p z4Rn8!hRptMJ3?tK9mw?mY-yw zV}SN(pY5u6|C35yG!%=n!udd>+XDl0&l21fSfG*{mMK6mUEp`ho~g9N67vBO8e5_> z#$nay;W!}ZnoczfL+0J!aJEY5ZX>$|;$`)(nRdy=pkFKnZyEfD&=Mgd{&3jk1JcXU zw#cspK6HV(1BiIGk7jFWAmd>nBedd>!!4u#gnwE8u3nWzQMOOI9O@4;AR$zEbV15) z*AAi53tGD$Z8qyC^Ds>=%CsE)N2AW(p6N}eUi3pO7#)8hAY@nGX0?qN$eJH(BoBg= zcW)J(x&IvV&`q1w8tyH$YBl`3?c9u#FX|fa9};;>=J7!Uv=3A0f7Z(LFBeVsx8-3u%ngqqs ztV`=c3qDy1CST!wzZ0_KqJ{O(hr+6e2{W2BgkO|OCK(!5-8FD`EPWg<(W zKq=E7?431)6WB}EXlfycEuq`&{3g5ox%00ekO{hPj0HudhJ4iAV>pZYmIZWj^3@nwo{*h;eBvkpW z;3to2DYCRV953!eYleIG)_I2zC$Q6?g;qAvy|d5a>4$~0jebI%^pIWWuM0IuCoLCI zVDCKCgYR(?=fa6N-JVc>6v%TG8yFF`v&EBqi1PSriR6Z%G<%p5P;nHZ@eH>Pjh-Y_ z`~RxYdAZ6X8*X8l6d?jN<$wpvyc-LgDin1pBSP_gak6n{f{a$%oQ8}Bl@?V}O8ng+ zFnM3x@}ZDk1Y_-FC&0b^tHf`r){2)oJ2T0DfkTW;mwf8V%7sptCQ?ZYe+7X|AbxYa z>~MQ(B=&%T*;-;coS11fT@-YKe=EYLM*A7tC3x~wUwHk~BB3jZ(IpoO+5zStGQyiDTz1#vqG5{Q>3jd+X-j+ePar&?R!_7(m zxur4A3sR>`H47FM*pUXi;q{k9O+Dzex5xQEL4RvR!gt5B^1Z~(L-L@4n)Y< zpZ2d>>CekL;L;`hM*OG{5GadOxJpV%4GldXC7*ezqlD7B(*1_@vBl_3nxOao41c=! z9Q?CSo0lS_GQy&LpoKoYmF8FY(^_Dxhl_<(7Pxpsoxl5<$_K2qN6>}EFTXv?9sJ=` zN5&qAg>pc0nPj6t$fD_(?MKKTGmr>S2pEmZZY}|54#d);tskLRXU7D`n8HDDYxlls z<<}zxF;4-XM?89sk`93GfIzZIOcJF!wQC2~Lqx2Y?wJ56XfnksldF8hdvcJ}qQDGG zfnWcsL7!+h7hq~#hp!?;LxVx$WD%Ct_)%1&0Y7RW1<4BEn-OiO}CFPj3@T6PVa z6_or*Z!Qc%F53v11UjR=f4PWgXwIJv$j%jx2>>jB91Dl}E)E(!f0*$}T)Csv^Uc}T zr9{w`#bOS8*v@E=5jPn#6*4qCDEfUrcqo-xtL6)18byQs)jeZ%w$XWrAWaNk@uL2# z@>3kNwz3k10W1~z6Nm);Nv6ID38`OHG1W>5{&miGNXQBXC#^SW zpFQ8k-rU~pq${`kc-LPJ{o(nzz#iptpcFJ})#Br zo#2WKG%`pULx0t%aBuQ&tQJ+~s5YjMeka{?n0xT^iG>IRdn@(#)<$$ZcFjIRbFpb4 zmU1cnIhD_~?-#4F{YD>}>it{ga}MJ^)Oxh;Tp9e332&Xos}FF>81@Ixs)vFF{QPUi;6z*%hR+GTxVM1-`|`>qvErXQc?BQpo+)0Fz7X56Z5$^eQq~;YRZsm zL!=3Nfd5D#-8J}e$Kf!ZwZE0CO!-|=k;ujsOspm*9F#^lY_UYw2qocP&s(P+>bcRV ztBCR-7qNLV9#VSb)pdw8*%M0saNzw!?nJG#Uh2vs5{QX^I`dVtY<5GK2-Cgg^mJxR zd}7`B(@Cem=MRqe>`ILvjuR05{iq#WMs#b24ry#-`N-aW<)rVY+6&aJ^8dQ0YILnB z7DFq>JNXSbm$luAY$bKud(x~~N8sqyO0+QLli3|Uj{{{Q!wHPYt#{0qtcwBLvq-!v zxXvfGk2{izbN9_GsB6}kESf3|y5!w(x^L+Efo}v=D4Ew+jU~@pJbU2V7W z=6QeASOr}kPhp#Lj*7;5Ve(T!Xyh(s;~yp`&w9+J8p{%~+F+@lW}SG%$ghH*0^##e zfZq=XAW#c})*nZ9OAG1Pqj*}`q_G?x4_#w+(>|$l*am06Y&s3nBy+tri@HNp zW_(Ca%$ABJ!{c?L2|~sHq61GRlWe=v0stVHhAYJFF86BV_571=GHF|q?OG`^c1iya zwZremgRw=R>>C{hqC@o4017FERkjq-o0p1UR4EmM1D6~APB<+DHO7?30cOCr6%GRn z;VWa@5AuBG_1oWgL_9bF)vU8i=c^m0TjQz3+4pr~GZB_x`?EI`;vZ%YZ) z`U)>JD{N+;xr$tO6h3@V?mo>c#RND2KeuZErtKYKSoA?&XP715;MCf}$$~8XbLu5Qd9Ksek%KCC%tR@stN|brfto93dsMM_Ao?aUJEj3iAzCQ!K6yZDna-oyZU5zLC;j^Rq)%ESsl-sQ& z1G!;gacQw6COsDKd#Cyv#$Q5Q0QKFO$yRwGMTBM#fEW807$`dRCuoW)D~UX8Jw53p zEK78n9SDbq%ILF$L>R3$+u8Q#XOg5c2Qe9A_A+&(6V9kle(5-SlR5sAmcgve5Ivia zmhh|sDZ*rEazPJVMnSwQ_=NWzdtNal12eD<4dV5FuTTLZEwedMXID;y$_+ibo64&$ zGgYDjA6D{fLQonD4$9S-#Avdt+f34-c*C4IOQoj{f_`z?r7b?_P#S8Do1NZVeJ^Ra(UHT3 z(`|BiW6r$HmgBRt&9SzAMH%}Usa>&M8zOQCt`DEZd^km7N4qs0HX`D4-xi{y@XneJ znz<@o`+2h(q?(vnn7mkT2QeO{E0gx831}YH$Lu;hcH0^2SqE-~Teb4GyzYfsj{oQo zT_!_5-!mm3^Mg)%j#4R=g_C1IJ=EMwD}04jS`i9lf1K8?-dG!_hU26CwM=AGQAWzb)>LJ$-#y*0W`Krle%!rx)Ww**;JAc~>trnh5a& z>C^E>4MHJVtdg0U4B8lRG@`7#+!|Rq>(P-m&{!lzcN>o?^6DSxU!Hcvuwk`I-z|Bb z;`J-^3&*7iVN)YQA20caXv=c$CJGcHp`mqdjthx{|KO9-v!$;>3K-k3Jj>IIi_X9L zCs`iFKIM8l|8kE866FsZ&azmf``uo>H2~tR;ITd!OSwJ%JYUZN{;~aBL=d)muYWq% zZ{l8dAqKs0{iFTvJ+_l=0ZMtma>h+l7RwoI1fP<68cq_|#-94;+D-}K+iA2Z1hY=NYRF%+24 zes0Y$XJ_aISvafAx^T_QP!h^YkELA&N(|6L?Wnz>{(dQy4!>?D3Aq2Tjp;R=skF8_ z(Ce)H(Pnk9HwU-^0n_i3*VfRIgtA(WEKWpb>w@9#kNI+SF^u7K^$*|VptX`sH#=b`T1J&BedMazqPe{RE@ z$Y;d*L-+2v1=B!IZ|$1FW})VL?-!Aa?a}3xl#r3PxVXW+;kemApyrttXcxi?9$Ybj z7`30}Y&7cm5q1WP5&c0TAPQxFiqM89QuK#6>5w%08%49y_94^vG0oVV3H)$(Z!R5G zPWxi2by8QL+k_~p0xP`z3aerlEt7*1fk@qc;+9Kkz2xxmSx>^<$szO~QIY30Zy3?1SX=xj5>-ak+cXI6SB=f-e|=?oy(zfnD> z4kKfT3z?HtT(hUss(-?`CJ{i>hO%91Gk0k*>F+lS?B0V0u@~vTL$gvBX*=9oGp+@= zcH1!oq5P)lJQfcq>OC7?U~F)Bc(zuHv4Wo0fyUk0e-E(`&F@zP!`}Rq*L{e3dE(_= zGUt7Z%B0t97 z&TpmFGldw&o0mhPb@yM0qEuRvkFsgF@K*P`_=wOfkTkYYrI!8iKQtmCPfSV0-nEW1 zM>9#;Ya#_)vxqxHKfiNI7b zT*!A^kWtOj{jq}R8a9&;|b2SHuP3UPRF4$QndzLN4#b)S|vm!t3 z@0uQh6%gLMkLV0Q$adTr`^9F8OdPHdfdk*4sLoPl@D#Lt=l+}Q=x8iA)VVkc>+^1L3I(Xjrr9(};Hu^YTQAd*tqn-ae z{%$V-+lcsmqPp$H;Z@Xev_Ix_&vtuexHOC8ZT;f3AxPtQ_a95;%N7kp&f6j-^FExb zgn7EZ>ONj>mLF9v6-9FLtrz@eqJYrcZE2O*{#HUciMOa|y>*)-vtS8baxj1f8G}?9 zTSBW8i}R2_!n&U1%jisi+v6=O$R2OZFjZ$tV)|cTEGs{1X6JXJI*7ul4D=rb+@`vs zCjuCUXeihR==mUM+~EE62#Dw0LG(M_a{#)>Fk2RRzqUWE2!M%k@PTpnwt(c>b4NMw zYy4rO2|i>iL)yltwkOIBBo7_&L_i#Ox42)hn2!hF0^H-+X0@ zXu5(tI1Y!sOb-cMsM0}6ucQW7ROG&SK>ccppYWjl1O4V@*qB^Qzf6xUNX|ZohiEKD zJH0%AIRln2KS~Hx4u^zA(|w-;zj4=kUf|wh3NHo zo%P~MrSjGmYKQD@MdE*iH}HL`oo78Ydibb(WADPbIG<)IQ9}Bs z-)cQ2GBkR6SZC%&JGg5rh1(lkG(XWITJ4cA(PC@5IhckSRwSB;({m=J2I1Ef2UjFN z~%e)n+@=mlnUF!iG|Q`JPs4S8jAj zJKejr%qxeqL@^A93=l;bv^@}6yAt(nY76~N+$ z??0444@5HPZ~4XI?o`XW^>qQ)jBc{fImbtT+p8{>Kr&KNP;wudL%b7^W2Lb(kjOX> zd825l^c~&EizWPJHCReq`M8N(08K!$zbM@1Vqe8@FFmHXTE;6(q}%+3XYM<4ad;&l zSX5@_m7EGF%8el8m80@_oHNw-JOxsOa=Seet&sE>tAviM$C;Xp{(7gUGu3mm0Z)2f z>UDZ^!$ld+FkGNfN^pO<2a_~x3LtL}*8IuPf&!Xo{GfveebpLU?5&Azdl0D%R^1VA z!ROX5a8=}=``DG#(R*ScJ->J1uoBAlo0k$1cV?`?V7aw8oQVBdD23nvLaNuM0FwH2 z%tz06Qw6&BXhpEMpsr@$m+AH14f0@Au%`mVF6|pZ0F+iL1o3llh(J8=8*QH1DTP1j z+Df+D0NV@K2ezMCMSjTQ?*@YY8C>^vzTI&+Ugu!Yu129A|KsSH=CILcwAA1*S7Z6J zA9V%8z1eay4?L+T)C*AXktk@;?sO*Wina3MMKD+#Dye+6yHgd{xtLZsfC5>`?M|Eu z>xZxJfMlGw79|n@LOW8M-<%Hj32?g~ho$_RSlt?m#REz|S2f z@K|fG?XYUD3=V^S3tp|&G@FezaV(c3Hxc)@O+>(Y0RU6~MZBZ9yx1C9C!dXZ0`9gK zUjO^ITk?&=dW}zaFMju=eLHV%hCj)G3&+V=TX)h{&TX$;uTPp88^5TABtq}vY1iR+(&>7$p5-!`)*Y4H$ z7W7$LJJB2JzevRuk_kMbNcw#vO7JH;DTmD@&oH1tIMMH$>>8*FC)}!2@-p$)^P^&d^)A~zk1w>^;EJkluB4aNUxW_WkL zWhUwV>EYegM42=;th|H@_oX5>tZw^*_-rx<{kwp!EBj4&e^Oz9)PhUiA(*;zR?vft<|iMkB#rwOn5zZo9dw+mm1g0hol%Cz-vA0D8H z!pPrRCc~roO;UF6#VJ$R!G>*y`?aSt&m436qu?$(Ue3poOU7{8=rHIuV4xAO+si7F z**RbBtIH*`BfTnq4}D~4gSu@{g<&uLj31Uo@f*$ZUHA{dbj6w4yeyg+3gy{kabL<% zG64B)P@jd!R%{Fu;sJaCq_}a|K=?3l=yr+x=;h7guH|YXhYPK}P0ILc$ebcNj~eR; z6wMRE#k6i-O;^zAjy&4`dnbRw;_;s%g43%CTN-5=E(%QhL1Yu#^V?ut;%af>PNF@q zadpSm`VQJAkBP*0P{u>^m2$ZbPtBb@?D3N8c%%c6JA%*%vkcmO@B}<+K6_xfr=SsX zk^)Kb5bs5)#Zck$=pRwwH}mh4doo}8$1ik(`O`^3{-mNBV1%V;g~>jK6Y-s>OROO7nb$)Gj|L7zp)AA>P8jbCfhxs&`dlQRK8ph7bPP{}dRU zFZ)M-of3La97}>*muacZlSFcCxo6~#ek3YB=W=fn;r!%thlDb~1*t1+Qy-!deKAF_ zS**Vr#M8uwciX?x7-0=H?2!Z<%=zkd(Gkk(4G7Yt{QSBlr+lL+ z9EzvKvsyhGDH#gU7>%#Pa!| zE4~KlbqT@yU2=B26(XkDmp5Bh$?(O~Ve7r&LKn4_m6da~*0}pKr3c5q#(&+rE5k7uZS`9;;0}uV;m0+o!n`x^#x!8tH}TcgH{EIQYPNs+Ab>xz--14 z3FPfj$CLE=B;`gY%of#hx}tbGP;>X7QA7LL4S3PU!!eGAY~{GC&b$9?TF&;zLo*P1 zU3*Sn24X&xKYEIZi7DqR5L+Y%i3GqK^@L!;Q#`unhHXn`^3OM_LzGzthJi1=F!5sr z9J#-gElpKc4edOKzTO@y?uHI*^z@TNyJ#}2PiCAI4Es`sktw9MM=*LuL3Hv~VzP<= z%3`yx*^xRY`K#U};qya(PY)>@8&?j*Ut}Be@!hTW1Nv(gDn01_Q&R#;ddHDUd>Gos#ZZLKY+p=cK82cbLBuM5`dA$27oCy zBB(!_BshzPMz^7Y=_=?ANfZ{P3}no3)0C&Sb5XpwlsyKKf^9J5^7x!8tcc7mYJ}zw zb8s1qL)I=zn<+%R#q$Nr;D2ucWL|q*+?AEGqfx@XACRHf*NGcEHlhF^B!y6vCxQic zc`^r*I5(4QAE#~<=w9m#V0Ak+JX~od%#}&_=IKkOSnsg$NnAqWH}Qrd+&6QHr(HIw z6e`pS({SiyXl><;A_CpA%}N}FIPpVX50TkquWf(*U{X*Vjfe&|5wAD?Mw_00zG6DG z-z<7Jht(7!;N7Ca!k~qEJ3FoP`>TTuaCh+SZgw<_aUW{;{o`~Q`3{D98O@_tiI-CN ztM%rH6)q2!un$~_C6_jWdDZ0n%F9j7gge5ZX;r2k9=qVcvFNV~W^oL!6k_mCBYm0f z9l#+k`G8<`xhDfcEjEf>J^%z)OWy)#X=omCZ-#li?7MbNlq;L`>1ux(0{ld=-5EYk zBr<+xaab2G(E{`IlSKhI$TVzAELn{JX2&znF5RyvpR-#{@n*;+;<@7?L9sP>cr0Wf@T7;z6 zBdf*2EQx>@cMDI<1QZS8oAOQb>O0LwFZWO9-T@HBq)Q@?bR;qHoZt__9ZIBQr=KRa z!j1m@wXNWD6CVwqD|xpWer(}_8qInA^G2yy1!<)xi(;_fth1Icy29p!)$5}x&hbjK zxl9yu`y&>xxOEVcdoBttli@;*B_5wk$-Hvf*;dDmQoWTWH<3cYpqC?ngqYOD&lXPh zRG6!z^e>O+@HC`1^0@CcfjW*2r$vF}8TF@kzZAh?_tws=pH1#$rB%5^i1t0ei{4mEJgSZdRn#~ z1gYol)gU1QuWCc$KKT@LB?Lmh%d?K$AkfTzWPS!VAev_Ge-7st^BP?3NZtzxnz~?% zt~(f@S!wj0Pp3e%z~Sw=O>@xDdiZLGvvnHD+-`fmTRLj4UAG3$`A&JiiMj@=K(+Ze zB)F9`grVxMDA{C1MWGea_(PSmKS9)(8@_FY&Bvy(HfSPD72t)!Hg_M=*k)P^Bl{NH zUU}JIL3=UUbL%xt=WO|%utS1%{>=C0z1$W=(h1WY?KMaWb=|kOuMSdN zy&iAXkgKbe*7vXB)x#Z0j}1cnK}>qNIdbY81w|3q-=C%MvHh5Z_I^kz9%;3?@O|iHb#jh~i-yc@`pzvk3Q+2LwJS0VbdmxSyg-Y(1A5fymRrBP` z`X_oD95&XzHL_U#qY&Fi@%ID&+$J+s0_}fFxK0)^dF4(ymINyB_eOso`rF6?X_NHFJwM2Xge)DLg2#PS(j$L;hZ%TbCK+0PzHyjK z*x>r|eUa_!d==cP_YJX1o?Ph2?~mQ9tE)Dxp%GZPxWP{kH`y`?bP%n1jJI!%U2=Br zzX;>Ds1_56ccMh!Px%dyxmSEW8slP0pcEsBr%il=TrV(T0I*337$)7u@2rzaU+xxC z8@@e7%~v}8WHtT-k)4OACfeIq%sV)#SDVSh!onIp-CqS=Y1KcG0_h|AkM^Wv#_MyE z;ezC#smx9qReqs&>0;o(F^F|RJy2fL5W+kcuDQ`X?Lo98#5r<scls!2fmkhT~3@e&%Ag z_zME|#KJe`XgVAR>0EA11>sv@=)nVl)?$av6bqzhBy`c(N1l-=(ZPsbGNZdDJ&Pq% zJPo@QYZoFh-8L_LfXZ_41%W?B5rw`Hdi+~)UwLz4^mlI0zR5)JD@2ZQ)0ACatY{6i z87*Zed}XNH#&7l$>OUKMgJZ!TJ4QThPmw@i4;O1HaIJ_)MU}l zPafdwvop`K->Csup;;jzBP)*C2kLt5&bWjw>7X4r+C~))wC6TKfV1F^**#zec+_!H zO3s$)fe#MC0sgV}H&ZOngw!LpShM^K0ERJOp~yc)bVL$zjYM6(IZD>(Ki?B`upMRq z*cQ_7u5LqDw-b$qc2y3vu=VAD?>Qvl(+H~DW!;-3i9j||a=+LP>|NLEM;rge3IiY# zwvy)s8w3`+Y4lrr8VCa(c)&Y{x*-1%939S{@t^$h+Ti0RG|1=wkySvWLrP5+zRAiL z)}PGp$ciliKKm5^6`+jT${077v@2!W=(73VQU30uJUT4AyabSdU2XGbH5y*G&oEbU z!J=U|XXb+4O&j*b1y&WJqz#mbplO2&FJdPBRygI&&tG1{C zqgrGL|B~v8^d&}e-UsdhlB=RbHoh>)MMr*kEIS=#w)_tc;VQ=^d#6vhBw30(+j*vU z=flhsKrKS__rs=DG2Hu?%k8lK!_s}g^KiZzVX@vWm=qB(!bV?Y_ux-z$U6b#Sh%x2 zgi;5KX(u_t6`1*txdB-POMct%3fh6og?_kID}z?C6>ih$a%r_AhcqS`WD;S957$RQ z(?W!fXe7}On9n|f|8t`CZ-Z{->M&wfLT)Sw!gbBUQHWv$5Mt$yw^9i>)H}?x%v5Ie zG=QB?WO(pg9xrtr(!96cs1t~x@EMPDjlqHAOu->uEe!f?a4k_c0?yk8$}CbcyQTl` zt_8dGOhih#lxr2kMt-`FXbPu|Qj?RVT7gpMq11SSfTM@kllz;9!^QWb2O9qC-#U)% zMZxKzFBHho;%B4%bJs+?fs;GTVw)V_XEEWR!VG)=m=Xd|!d9umPy|v7L_|b_ri5J9 z*)>WQ;ZmlXqjDhq<8uuHP>RWvrz@@>wUO{)*}wpj=0Z*5>Pp6^*IfKPee7+lWV1bZ zazWUt+W4;lO_;yN6@8{ecCwk|nUn^Kokpzd1Us5bGYlp2c27{F@x;s1HsTvabE~}1 z^|nUfuD}=yBmi^v%~zYtkBSX;=gFrS*6K_&I2Z#}sUOJ3h9XHCR~5(o6Oi2J5B&tT zZ!-LANk14xir9erfT(G^G@YB)4W7zHrzFlI?KpNa+I;b0;S2lh{t%mnL$AT^q`K6n z7ktLPGG_ei^;Y^v_g+_>#D+4roeNCSDhx)7*=n2qZ?kbG78qe!nB-TJ!Ja%%#f8NA zEoHKta+c8`Vz?WF695VxUXW=$oNO^u5j5`*G9We(7Dla@mSy@hq{Y8yjC#C`87py~ zOpHR}`^Z|PR_M=Zv#|DmJbh(YlwH^MfJiqCNT-5;NP|c-lyrA@NOwqgqe!=ON=TQq zw17wq-3SOs*LPjq@ALiO2M2R7d+$}}xz^fiAFmHs4CK-GuR5*(YR72M##g3MY2%(r zjE;s4i~QdF)oF>bZMl|bExTXzGt`PyYSkc=5xvQG9s8F(yjoV!z{HzaRQfhW+3If}j_v3$I6^z7%fGmG3^9?FCcE6^fUnmKuMc1Zt z8b!36QR9N4jp<6hL^PYnp&nRrda3fIt2-D4o^W`^RjLYIM`@4uGQ<+un;@LviX~fJ z3Q$iPi4&1Y04x-SR^CW8a9o!%5*G~S-j3J$wCb(!KC|ja0d^pM<99x1+4SMAB~LI3 z%ypZtOPa?0fY+Zwz@Wk$ukfh1=ut_)Vcb4obv+2}2ciYHQu}NbpWJMphK^l#JF}}z=uhC@x=Yl zTN8Kk_=z?HbKm(oD7gam0xlY2u60+il;WiM3M@0W*~0yc@aAv|4o?PFsY>DZ8snaX zSGqhQ;rkEWv}yY%o5P-fNrRbFz#aA2H?AMTTcvY^1S>T5U3g2hS&05DSC5CW(QW?7 zCfSVI7O%#Rr9oYBi@~<2_U}GSZm?@<6dZA3X;@I+c>N;5K zb{JWeW4c@uCy3ez_>R997g9z>E1VCDo6f7dQ^i)x9fE+kf;lb6cCx{KebFp_H`nS6 z!qKd^#FaV?KijM5(X_5dEh7d$VMqO#g+4lF8dSszA~eNy3a!h1g_sN;#o6oodvnTx zfg|)^kA2FCC@Tepu#L&{Kc$YSsJf8LcH8M9KB^2T;&!lMbbc8s23jw}5wAq0pA--@ zRyU}h;oT`r;pcQnQ;^2I?D005O+3B)H|#JP_Y}ehP|Y0whXI}x(O~2}g)DwX?b#Mp5Oft1fgB_!ao=zhc89?uy7q=pAA zJVKsj)o)n=h|}NG)2Rk~6+$R9^!Dm>ePifj%P)08tf}ql?uRhf{wcooZzYd~PD{YQ zDWt@R_)~>Ez1cGm7P@zaBs9NuG_U>dEo8Y~I@L%qigQRP}}s#pwoSFQGh7pRr04CV+61LV@u zo!lGEaxAku6oXi)-P;X_b+K<==>sz_K(8T9)?kueKtKn873QSH9>TqUX-WPVDrF_dExQKc|&BTOg+z< z6!^aI#^n#XDSETjh65WzZT`p*02xYzV^d(3-t8CZr#5@SneY4+9#d_>Wj4|jpMM1W z_?k3IbgM9$ej5@9W;lrL!SppT*-gOB+oc3qjf7XK{L`f>_{_Wq+;6e*yr|V@00ma+ zfFZz$hN2howAqeZL&2S^7Aw$bR_X1)TX_M)vHUIn-rJ9i$F2*2+F-r6uHadf|AmJ# zwGV&D7fi9S3X&D)p#&(l&hw|TzYays^2l+Y1_noi2zSjmGpM@5FwE4FwYl;r1dTwc z^Mjw+@MrlHZ(z4wfP4Wa`d=0TdZAFE8%}gY3Qiaz|`5@oS+vgaMPUAbGmHT#uKRvmh^@31WSq$NSv71^}WW0UB3dn+%O-ay`Xw8-6NsHOFc z-~CU7a)Bg?iXou{i$R-Kzxeb8;uG=4H(An_(oeuY5d3!p`RWT3b@CK^>T`b>Wxys| z_F703^kqb_R9pr{c!WS|(R2G~40)0W`^x=%m7$>2W-|&6`$bdtdg8t1PGLx{#b`RC zQZ9s0B5=k=qVvNYAN57p)<{}#Fbd9h6B%DyS{eNFLwIzhZex1ph+j*Cb%4$0vRHP6 zT!J}NaUFjo4h9=vHLWwbi|e^O-hLku>u2QI32Zn|yUt=Tox6Os{ln!dO54q0tApP5 z@2{r(Ut}l&TE_-35W?36dKdHIinn2O#jMm0aU)8E>KE6bzHewFiFU0s9BX}BDj6Tk z?y~Pp;peKium1hp@Wr$LQaP1i#~0GdVf#(Xdx^gm?!_2}|Ey!PbNphbwpr7_t!I|m z)V$3cOctc;$NCdv|DM#ScNU|@^a!u6pnYO*ru?h-*`Dyj>CR*U0)*RUx{D#l7ma1*%0IyiIkXVk@Ksm9{5V3x z^L&qQLEA_vVr0T6MBymJ=u7=@@YQP^&$o3&Q?JE)_>a}=_oM0DT6Gp!kX-#%cL=0- zDiRrsG#8MjW3%44Z~LePAmeqXe>=)rEJPq+>k9(>1+DHkJ0Hs*;i1rxCF_j4? zb9G2QF=tcRHd1!2albuY2pzTWRHDR zKDFqo1)IaS?Y|^yOzNlrC3dmZz^9L*{bl#oY==z`0J2}*4|GC8LQ;i9i16?NYiigQ zTRo0qZL!FBFvY_#Cu!7AaLLJIUg>`8vT4}Ux%izH&F8v@Z==rg1I772iEAq>7nnGW z=+z@{v+DDf44Xm%=P(GLv7PruD*IvCh=HDoDF464@8k7@*4{q&2P=1MZ?E(sQCuY7 zpOtUYr|cYZ#ykS_j@)-8Y7bUp%SqVD~ZFPP|n zY=dM!(=bN`w!VK^+Vt=j4PP&EfK25lZ4@n~P+U(YwBJtPaHgCXbVq;`>A1M2{~0$4 z1Heiyg}=^nTvARhhP5FH129F3x9-&DKR&hmy4yK@$>+<2OeFI^S3x8{wPIVB@6}_2 z?9qbY|G?#kX%FtkjPLPc0gZP*1+BL=unTo=-}wS4vj1=Fmq9_GZ+403w%w&0(x7j;y?_X4($PgC-Zq{lzE%(~65JFPV` zC%`)H6qo?w;7%zJN>_XacO)Vt3;`sN>wYlb@@qzSJX_GM^z{DbXkUe)StgC+`T4;@ zIITp~j)u!|^@8K!k5|(8hL-;sm--YgQ4%7jWUFalh8E_aMmHANNEv}?%>H=MYcTC} zJ$ic#ixr6vtJMF&Sb$PUCLkyNDQRQt&bukDlBgikO>{(Y;jVz!MLoufY;Qjsr%y)) zRWMo%P`~>OD&liGMVREqc=FTI;JdHJvv_}mPyGMDJ$%*Nej)$e{+uR*Lm!CiEp~*v z%S27BFH(bmPK1Pn(C4P!h4>rSE`pI-0_IvM1N{5DJUwn9!4sX`!_cW?e`^Hv>*2d_E`MmeYi^g>GY~jBubxr4L%~5f1 zW*e&B)>};?0wD_Q>eDYf9e&-p^J(xRBs7$Uh6cf+PP;KZ9)}`aaJabJF6lD;G2tEl z2Kp3gl3!yp)_Una$^S)fs5ed!1p$)JD58Gm&FaZb97xTP2lYlO7h4^jwMV8w((g{4ym%=%vf)%c~bM z_)a^X9M`e8A{X<=ZsjIBs&FL^mXLA0DE5ym>vHdy`sMD4-gs%i_jv4n;{tJ3R7)$> zo*NFv=^el@6076m;|Ka<$=XdI6Vir$XV5TgiZb2yMnlp7A)JPwnf}Td5Kna|5K5%V zbFdbguj>7fAQy5%{%FZE-7L!pVK4W+xfT_`B7a)GSh$Btk%f@M)_23vz+a zA%(X7J@L#u;z_+^J+d1fNTN+wmkXU2!3h$0;x`^rjz(Rvv4!c#im5EIWZI=KQ;@lw#!mEhBy@Hi&vJZMt(Mcvqg`4aD=HD8Kq}i z4&8SqXuupuX=!O_$2S1AXmx&QCsVm9vWK^Tlb>5=bcI|5m^2xC21r5AAxwjg2vZKB zMpsgY(hbP>^i!a?9;tr&qa>{|IPxPI<<@fH8$X$=g9vaIUpR+$ATZd|$-JH@W@6R~ z|G)DM%chGnrbpcco?rcOmiQbwi&eY*V)xfg{i!+49OV0cfCbhGErA&>k z*C($5%BTk8WBpRJx-toa?=Ku+FD`#!ZNwuMf1xe*^GdBv_)KbJ-;>-K;M`y(8Hjs3 z8dItOoIGA|=v?iMBnWL8v*TGGpz?yUI5Fn=}Lp`^NRwz z(wbI#aWJ(*yPS)l^l+<`!y+C?$*q`&^;5fgi)gd5;4E}6K11%-a zb09mS?faiOvH4`?Gges@^TzWz7kZM~-f&5epIlf;^lR<@IAW*$STS+pFrn|Ij0yC3 zGbY2v4`sWlg1gI-QnWazsOa4oif3K2r_Z&*V zRcHs81#Q~S_E}8m_W)ZB;H@6Bl?D%q>v+$MCMzi)BO^}uRHQ;BX@N_|3;R-s#$swt z7h6=ND?(h4YTd_d76>Ni>mM%PQhHI#De}Tt-0ZV!9M9yOh05svyYM}>ICiRtb zRN9~6MF>&(w7jozM#(e=%HA9mIEJZ*dH z7Fgwmf<+qg#%0TM)1s-<2CNiU4~>qxiQ_N780G+9BZcp!$#ML$zoU4e{j2iDf7>k1 z|6ba3%JWVrK#m~yUZ-wrVFvyij534IeLZ3#+9UxVNgScE+HY*|F%l;_JD-usM)PGf zZxERSI9HGT>y3Go5&F^kuDJ*9NNa*N#V1*jfoYQPExd8iy4L>4PGvX z+A9;mLcf&=U`c&|5BSq> z)=}(paoBX!aq}0@`)0l2RY#M%^E)fOPGlAf0^UNfCm@l`3~>b44rzbz}KC(Dk(Yx3OV8fzhae{Xfyxosrw-L`T1?F7f#Yx^O4XGEcA+BZTQ{Y zPH)_A3bX9(!GeRDRhy|I%%D&%ExQvjP;!8p;!P%cI!{TYH#ZX*MQDWYCsk#LyoA}1 z(yhF%bSh458aaumB2i6I$0e z;rN+Y_zhW}#!M6{23kP8VSlRFm{BQZtfXe;)bj^ww)Gevqsk-PL{We$N)3mF1~ z^s;{!bzivbj9H*TeGKHPfBrE|%o==7=gMM2pemIO97HV7}%V`4D>?ag2Wb;(DB zPL?Rs0pu8mK{4C4SwqL0$mE?=zNDU)&=|POE4=ad5v|?3BN6U=D*awxNWjz?&EG5; z+7qVk?h9-e{;A@5yMaX&f5+vjXWW1BHgw+u$KLn?f(yf=8rO?(Ag<=*kCQ#G%0$*q z@|=ns#6U%)MyDNlL88f7wca(UDlL!_rP!%$=2~xZQ1G<}*Ca7_^?_;|3F5``w2rt^ zd~Qc&XY!m9H7KkkX*Hr+_iQV-SykEI-e-NC9_mrcV!WJ4|Ih!54ia^EW~)&eFup_@ zl({GXx1Cx>3xa%Pq1p9iG9VS!Z)`Cgi0cbMBXBL(%p6T+e-ey>!#d3kUc&4AaJLA^ z{S7&`z;Q2uR;^j&aKm?5n9;$a6KZ|x)dk3M5_3>%QjZsHsZ4K%ZpHG0Zp;o1ggB?wA8zoaMe*FuW1&+KNmN85 z4qDFh$MJc~0_ykJP7k_wVFiA}$UC#G%uJuKL+8v|&dO1-YTjn?q9_KCi90N|yjvPE zOJYB0-Mn;Oa&DZax>ob3?`%X+6FPDw)Bc?BF(dvNC}Pu`Xp3gHg|;M2qKJ*bCN zJf%byzZ<%rzK#wlEgc;^VXzVWeWqMYm1y6ACp#8A~9%^@*uD37W-tcECpbUg!<(4p+3@3lr z?YS}eyE7gWr|Oy+On34wy{LP)?RYktMRo!rFjx1AUK!Rg6Gn;iErbzqD23yAY$L6) z(i9uL@1`JI30KYC=8yS}esqFv3=nt!(jT+SYbpvux#d>xbz zh%lM&f^OyCEnGxqxcHJ69=KT|5p!3!s-Gt|y6{6Wci;%ayaxkUr`vAT5AfPbtC@Oi zIgaq%vlpm{fU|f1^EU2PE4InaQbEhihuy-ITbpOt5S!U5{V)4MH-9)g=kkk-ZDz`h z`(sF+aakn{H?Zn;gz04T3Io}`!MnR%jS`&AHFJk=iQppr{)63iaxZ7M;5V_u7pjpb=EZVj zgGn#5GWvp+iWW71F$$L&@;!E-Y~iQlI^(49!;SsIBtVTB(f!L`MS_!pg3N7sdhA+` zuK+B3JP^tG``dO+ws3=gM71WxnXOuXD)G&Cdk?Xxdc-?{2;VdoSw2rLwAYgJNmOwcCuXab;`b+mnGkPgB>J%s#Kf_bgN8U8yO$$;YG$#W2M^#~9UPx*|FE2WaICfcS>pPE18$V`d}Bk~iT^o;gW`SvD?#wu z;kWk-Ers72f7hC0%-371?(Fl0sJo(}p;`T^NE>!*X?P!|xI`Y*=G!s$Sk^@eqVBS( zTi^7sG#WTM6m(j7cuLFHMCt4@E`+3Ku$xx|;)q6iQi_0x61J_;DO_@LTKL`Dp^VZp zt&BqTa!;(W`ot~AK(+))T6 zUj0iMJ#5nMv8fYRR>SVWgNK#bpyFVCm%VERFhWqytm*P`U4psL=qnawLYgSrZwFUT zRJT}N7TCxkom62@0)ACSe=G4hUhCs$IHrmv}}qD$^|Mf!m&OZ5GiF-Ow1Z=Uc-+Lg_7)?JusLc z;XRQP)EFkil1A%6GS37@&d5X$PgeY5C(2|58PVhU>3|cHNG~#$US_pgYUDd(JoZ9Uln+frzuJ$Z{2K9nJ2wh5tv$@%#I`B89BIZ}UqQ*Z+1yJ1n|`p4KdQ zk$Who%Ds@w+_^>x8T+CqnS*yEo+Q80g9(N-|C+bto^7_aF`iq-IPRk;jx$Il%n1Mf z8ks)cvRWM&j~pyFGi$56`7`J5p*geh%;Wqeg*u{JnfgXiriqw3HxkOzT+st2&o8HU zN`%lCA7i4-Wea@MN;>`2S#;4g!lT;SeB&)DbV^w$+g@isVnrc2Wp%Ym!r;w)kDYsu z4|_A1(-k@ho2L0qPUoyJJO=KBw!L1s{b>KD}f~nI5^>bCf#9JWO`Lla>9tPhV@In$96XcLUqyR9ma~e zZ{xv7X>TEVQfg|m1N+lyZ98jyR8`(3?BFmL`CBsm}Gz140dL* z6pDM^3zk>`3zPZm&?+dA1fwTc=)L+BjoU{Stt4L?^3ug$^5jabadYoPMAcWog3ZqT zK8*$tpSi`y7}Ln@NuNPJ{rw-cNC21rp}lTY_m;2XL>LJY#7NCX4bdgQUJtf@IMzB! zq?1~!8y7m&5_YJuo8$QRXRdBy_LWXOYP-*cyEyG~qxmpVz4cW8CjVBK5liL?{~8la zT=~kF5_F>eMoH%6iUQ$FuVXG8x04`ZTafLXGe=O4v|tU_fy@^7>MCNQeK7z4}5ew|85T7yZ7MyT4qzC+Kw;Weoh<&VJr)) zkFkr%sfEbS9{#m@kk8=hHskQBw{+61vycZbXYB6ohPE7nZBf+*?YI;a2SsKb<<6Tw z_*{2z%M2yw(S*ZO_0 zy6d=g>-KAX2|$n$v<`;A=B`=*XF_1;Lv*0S^RxXtp1ILzDd`Lqv)+*rJAfL%f}@x2 zL*AGqr%&UugtjN|DfnGQjEr)ne7vIp@=Unff{aDlBrv^MZSdjBTz`+8&l%rceZ%z8 z?x=?9=GpFoQzpy2#N*FLM+KvLd%fbl>tBMaZlVFg-6!f0D~)X4 zq%Z8+a?CY|)#{|w!b9rXBV_Yg<*!(3T1dd0Dpr)z)61l3k8X{EGSj|%i589x{qV#^ zObihj^BF{DZ9yeexm4vj5GfSh)TI+hV)r$}^Z))Es6#gvw6LL8Gyu?&jPs~W!{&xN1ayYKv*p9;-~cLuX$OtI?w_GiQu+APedVzh~jMKht3sCHB(*WzV03j5^$|= zmgAHl-q_~4HwI#CL~N{E^H{bxgVUQ=1hfw^V1fTCwzDISJHrgey(460rFg5i-{wy6 zH#q$3mKD-|d6c(Ot^;jJ+m)VehqeC$W3+_dGr@ zvA62VlzKPGTr)>uU)p(@a&URLE&qY{({ofrF0533OLq68O~k;p3grUHonMu@iB*6; zFm^oL`(zPZb*L1+GwupNV4a<7JB8F%Z(qaLF+e@&1aRj_+HKi@V*k+So%TD0s-Lq3 z^1zV{R)d%8+EkL6j$T1Sx;PIHId6)&rN;-usQ<41BrYin+k@sWYWN@YzgL1nGjjoq zaDoFEg>&Bt7%djOS#(;-=H7UHXy|*{W%3>Lr^BC@W8=i0{@qM5ToU+ESMx$H6(T?7 zs<3ebxXdEiEGiAVAQl%oesEpF%4*PtGPT|(ugJWcTv!8SDfVX@KkTpegYLALZxsNk z);I2}B}3CM7YN!V74L}xiRu`15*MtM)OeR2Pjh_0wD1ST8Lm&v(70MLC}egGCepb9>tZ3om~V7A za?XO1xr?udcY>Lf+o&RIR4nW+&Up7@M@y)P5HWYbDEQss`gTRMR-L`f=DfIU&=BGjas!9F>T=cKXIWrq#XpZfhWPb*}&0o#JOW z@q6)6NSChhP$**2w_H)^8_4S?58h;44AP!=|`WY<|zM?}lays#6|Snsl}P(bGyY7cM8 zGavpe(Q+Oyd_VCf_J{xp;rRHtW!n3oU=9@(b-Iw~zci@)6Eo!=&h~ku+u>~Ts5%5b z3>gmp!|tO0&U6kq)7NySqp~7#3pXtKH)JC(<_`P@+qvY$%{+0Pom~I}1p$(nvifvH ztmwz<8TkBweckHEG~v~*NDPBhQ7mOb0Evs>CL|^Y|EnN3Gux&<^=38jdmw8&8*02X zKRuEhXe$7p-=^rc1#=k^9qwzrk^C=?aj1O%2C}*-FchzZZ<00PWr4X_9Ba~q6bJEm z>{nl1b_wg>&rO)NR~y`685>t>dK`}6qyv65@xJZUbQ1>ipKNQ^P*6XW)bH#Vt@InQ zARwKs3`<(gn`lBCX+=X63lgt+wVF_6PGvJop8P{XT>WZ082FAOE7Bh+vHkuUcCHR& z>V4g&GK?g6fi}?S?NjVW{=`DWIcCrl1;2@3U5O-YyXm{o>i9tXxq3O&eBYhs6&=|l z!}HMudsX5o1;J}AuAfX^^Y9RTWKd-N*}%@uzB5^X5?+f=BOpLNG&IELds#Ewz;hrF zz~JL15+H$Wc=(9aiy?JrIQ3Qm8>gkx{2oJ3C{FS4RmycF`w#Et=Fk`fCO{5;8{y&m zB{O0Nn}tYETG3E=0q&#c_ z-Cd&nQF3BhsG{X<1c`7(Sdld&>#2AW{dc>h#I&s7S7`o5I~j&1BOA@BJpRzF*V?wO z0x3IgT!q!l`Yl{wTO?q#;Qg~l%5n0ZDEeuRf7S9!Re~a46fDH3A@To4!=>hz)uhU& zo{X`@pS8Ka#~Rt7`oGjk(+gYp5JbvLO zORfM{K&ZbXpklwdsrpKAf)FL<*)XvLUM)Qh5LdcQEo*!p*hh40DLd`%DYR!@j2qu# zN^ep+v>hXxY4h+)e$~tqd+K{K#30e)zWP0M=Vu|tk7VXz7p7$>Az=s@7ti))SZCkK z$%vnZaVvGm5vqNd2nTiZwKXYqB<;J;sC~bXW_j_5VHz6VN_Kxj7h__a-U>YCOKuUt zIL@HgdY-en-v6{Cmobedzz2EezV&Lq>uX(!b^`}fKa6OYuB^le@s$Mle!wb9A~jWh=1}H%%2O1;zFVed%3zc&JC<*t5(}z`*+Pv%MC6fZ8eZ9 zJjLOvc5uZ6KP~DW{^Vl1GV1H=^S-w6<$u!SGMq%Kr*DK!;oqL1qSuQg!KC?db}@n= zGWd9eM&0w3H^+A;ub#d6z)$40^CEH*5fL#aZiRcvd;e9v?QE6wtWKpy%rkbeZCJ;s z83#BlW2W#|CRGZ+u2K6oSu8+Ac*3VXqtxun9&(~E(;8z0_h=FnS|5zH&+cG~#h6FSGy{nZk(RqnH*L)Qb$AX~!IoF01onwi1 zaZMHiT1h3}HtJPsFW z^>dPvf=;)`3v4xo8d*zfYS_qyKafkskvlbOyxUV3nEv*LT!D2Pyf<>939xYLcYxcq z$8jS>NX1&4E)3azzgxAgND zIvISYOHYpLxLc9NbRSKHU=|tP_}^f%B@MF^kLXSIgNzr|Vm(I9vG;gSo6pcK8UnpG0v zxUsRd{_(50Qk5JbOu5U!=e~=0`97x2H)Hq=he{}>#ch8tVOqGU#hitm9nZ)mmI5pjn}3txuvIhZxk-%#3t!*_)u zz*%di`P&F{iU?Pf`+WaO!`aq$!;Kr}1b=<}xB5A-2|7hR{CJHtzw@IxT1vIWN+@sf zidm8qmHW{|8T(;uHJZfLO+qvy+Hapfh#D1eJ2^IrHuicP{%m(F7;pYaPR#scn1QNq+_=DQ)CIY{T#sj+O}{=I zwwkKv$y~$-XZvk5(ku5C*2`h~9;jxvIu42J(?LQyFf#q7#IZmmE;S^HLQVTqvj)}7 zr+z*CRcC?j!WJWy>fU%P=>|8KNYcNzg17COIiYhb59`R_;UrF#QUf?sE`tqGv6G+V zC2{q$r1ja#PVk59=YoE=caz}t#RuHYAF%FSSK8Oh*Ns$~f0z(iEL@w8C{s|@0MpZJ zSz4YhQ^$CMg5Arz?5p}^|FvFIfZxqIjgXMRwpquJ9SNb`wE2_{zKx?@CSZ(4io{GFo-(851;ocgC*6v-kTtXvNm+YA^#eQt3W)FM&s$uS1nW-ZA z$z0K3TEmBaGKUoqK)*P74~`DEZ`gXRkD~p{p@8Qx4l_t7AK#i%{1u4~qufiypl%k8 zCH1%azqN{jYt6wY=IzBNRO?yyBfusfxHlbY(_RTB$7Z+|{yDkx3_N2gF8Sb|-H<-| zf_Ht-MzSx_h5Rj_0UAEAV`V@i!1b(-p*^Z>lP=Giv>x#Tqv%w?3iWKffV{M{$cGOd z!)~`j0By%5Cf3~8Cj%l21~KbS>YztxSlFJ%uuX$l(j{pM7)zI}(~)KHCmW@7#p~oDNF@dd<##g{1dIVxj2I*iB%G)#F1$qU9Quj>l^f6slKMAMUPqRV?SO z_4zyn(2@olaUS#HS*ps)jo!}{8?jL8M=N!!Po{17!4szOm5Pq={SgI4B<%CLI89nK zIz1O0Y<$z@#dlP|#I!dXBQfk_)2cRLm(;!d0jx8iOQ`3SF};YV3_;N$yz`VX{DMw2 za=!xw;uS9cA{CAdejcNQ&yUwph-O1)jbbotROmcFMJMyb!)Xn3K79%S7Pva!V9Q^0 zB7b*tF< zlvtnAFCRK*#!)5+O)Rr!gyO}WU-Y-CSzd;u)yz{Y;)8`miTpBB#E}=E`^cK-;^vPt zrt~R!h5XMx-kPc{imr34H*ZFDqTW~@Dg8Mg`4}QqqBnDT5rBP1He)Quv)!7*gwwgK z@!usgsc-D_IxJNz)TF#F*KK44u*VnK%6%{Qj*iEn&*Ft&m{ik3)+but8aB+ z8^N@k0k+R$b{1>gp)B`%uk^-8dGiIEMR1Wy%AzFo4IN9zI+6`VlVB#IqqEH@FWUIh4HSTU@_dN6?|RmiUe2tXQMF(M&@)g= zalv^B<7n9S*J+7&qc5;~yy&oaKOD_BSgojJg_eI4`%LdA21c3XpIlf``9Syxs}UjT zLHXCaA57JqRK46^?;%A>QLV(J&KRz^L>e~sKArk41CD~koy*NaW#?kMm8LqkT6Fn_ z3KpQ@S#x&%;Zz@XoUBb5uN}4!JmT(_zp$ka|)`%qR&J5!&=7zjluO7lXOP2I`xB zX*zNSJTMnRP^Qirt|7lqYrQYfc2jLRQ6wQ=unt!){uTSx?E3AFt3Tau3Wb7o2!vdL z;YIs>j4b#0(Q-W1hfC37*nP3e^I2qk1fzrbhQYzX37Xg=Z-8G+=1X9t>{l$cP}Pp$ z+&#vUx8XgJRqr#5r&olKpASM3ba|)Q$wsq0;y1c^lwR=TfT9i7^deDZb-pb0T9=cF^7ZyqHXFzTm&B;st4DJ^JpWxN$KY_I*i_Uk8#%-ZGE$2gJ=Czh_mM%d|7?Hu3~e)f zYdq)3Id*e6W&L|72LG31{mIEm@G4ep9Hc!IA z(`_1YF3KzrJvH{qU;*I%2M6_FLMvU`T(jpUe0Om0s}tkcIBHOrDfougAX`~k_p0mC zr86HcK6!)?oT}^Jh2+{)WBC4(;#7d~UF&)|8Xeo+UCQKXHxVjZ#)=vt+-G-aR}F0A zOtJa1&=@~Z)RJp9l+-egKDhq?8~#D_e_(*a(s$yh%eo=u@!U@o+;EYq-Ym z<`jTcvrC+hwh6U;;rE#|Qv~0?Wb2B-JMxWe z(7hnqy{5$Z*1JHL6GVS}kqL+)3P%)|dKll?`7|MUySI3Xn3|0Zl21*2WkPyRDWI{U z(@-{$r_*Ou|9%GZ6`5LSAH8@Ij1M{!EBxf{sm{m_kClMJ^X!+-u_GgLl(X8f=H_P4 z+v9<+HZx2qw#QxMT50&p5CmfLLOofhN%RIHF=(LP|JazoTui<{qcPAuzI95FEU!g$$PD6D(cUl07>6 z<~V(Jb?f_Xij!ln%{-e1c8UFyetZR^GEKx zP%GeAqw>8E*pgUeetZ2T%zn6#lBi<$yLQDS<&p->zyOm;P=-jfVZEZNj!>* ziGg{0dnZ;?9m~CLo2X{Ca}SRzeQbVQ0K93NFLS^A6{Rf?!?T23!hne(ktC<`J+R$F zD=8LR1h!!hIMtgctDFH95W4GXtxf`Y(qG*LSvr5WMA;c+Xz|afoU8#)0>i`Qa{+s( z`x1(*C%A48ysXm(l|IFm>8_BL6@9mzgJG z_#YJN2WD13E)WJ2UquR8-osQk z>+!-9tsX~+Gih&JUZ|+5p2cjrAU=5lj&@10shIx+`52i~`TVgg?ZQX+Z_hJF6kZ8o zrQtj+m_&XG#Ql7esE;JD4Ptx(UG55fAA_C`9Iy?l#%hw*_0R9) zD(A+UT{swQcuh<*c&k**7oSp3C*1GzyzfaK@r`KnQnD+Jin?wnFA&YzlazWyXYms& zj(m9dq#7IOFJHdo-#Qk4{_ay306!njk64I?NBt=|%%lVNeE&^KwQQY80D`WUovprg zW9F!_@deY(ULV^Uc zS8ob!jlZP|E~TJL&TxH-XN~;%Ts-Cx`@<*!zdlF5UibA$w4Uj7iSpq1_$+(!Lu6!R zx9sb7*HeFg0O{~PGy?$CYNl*P-}d&JIYo3)9zZ+%9*2KaV!0P^#6MrU(>{&^&MGS{ z(c(eBm@c=_3frG8F0h%|evLy}!Og{j1iCLWUzF&G+n9ij4eWz}8X6I6NFe1whm9#> zToymdM;5R8*U-%TGDZRl^T%Wt?Fp{@6E1m|(l8**zKnhF-p=y)yAsSx#Gg9q|A4FP z>iSQ=TKL25r7+Lvq;UJ)`2wr{!|-)w0)0pJe=4P_jjpPV&;Ny-LcZo;7$t z4+Z8?|nWFg;ePu-RC7M7f903(e=E)@(nCsz*VHr>eWIPd=i^MOTLZ~SYEuRJDOF1m@s6`bCtVY3in;1dvhwTHKK-rcOM82$S*cUm=D zTv#Z&vCrICw{QRfi{jw143#okP1d|f=dLg}iYgx*@BI1AZtgx76BCTz$m^{ku#F2u z#Q8H+jOf$L&rRl?cHhINi0Rh(dHbNe3dID1ly;GqE$Y#(cU@8o&W}^r2ZsUJ2ECd; zSZG{d=?>;EIyNkPmn?R3e(-R7gn~^T@%#7hRs&(z9VL(6{LU5YQt(h+8LoK9mONL{ z#5wu0G)-!270!7c)rBOA;2E%nu}nN%=!|(HUc0|RJD%!RB`kXsO*<(jLZtE0lIVDs zHNNaWPxOd*JdDVx(pp;l8Dheaw0 zJiv&baDEY2c7AxT1p+D#u$&zM`NX2_qXC0e`_g&8*7=G}*RLq`^GYq%Y%T)T6@#ah zKy`_A-W@G=fA#Sjqspu!P%&If!-mX$zQlqn94)o8JFaxS{V)!E?N`@b6`bcMLRe%x zy?UYNXL^d*@ zA|gJ~C%b>Vhu1W7iUWI1WB1PwRUN~hC-2Os|_K4GI4OT$+_|_1WBOjEjL>cdbyVMpSLT7qr z3S*^~m>3`=3444q!%(fvb?vmKCIxGh?Xn@#({GI>b@YXFlG*>G>MaAJ>fW#6QB+?iym0?omMy5UGKYMvxr3oA(_2d;gwyJ}PI&6>F_) z?|t^My%%hgESq?n3;d{A`_oa-NiJQe|0V-Uy#N#f1@NsoRC8t~?Y z$bgI@yW2zra>aTv1S)PcUZ=IMZr!>v^wp%fe`awWTX`KM#zHJ$YUc>fW};l{W}8n( zDNeU>Ka!_?%O0aq+Y2t>efI1%Za@sjTk@?Q2Isd|xAr~b%1Vn`$__S7q4u-^G|cDo z!|XN<#}V@LW^r-0>B|?O0@``3rg!J=)U)#w|B=Ys^b();yc3E7BXG2XCG8EF zEDfFr!^{Tx2;vyOZb?ReCFtNZtW8DgU)^#mGt8HWPayOD+aE0&SxrwR8Smjn51Vqs z+Ev&LFQ71q3a^+y&q!0344@JYi(b95gT?34(%>ph*OmY85z@d9&HYX~ z6XceT${5R@=JiK<$GNYtsZiJKur#;73KAQ?x0-f#77#);_c!5-uIZ&u6;eu^?RFwu z#sNcfG78)|t0y}kfANQMGgwKvzcas`X?87){c)=Cgt1guLW)cKyI529twTKO@W1y& zM&;%7w`ZEbdF+N5GKVyzMoP7p>Llubj;X~1eudh#NGmBR#UW~iNNv-KEUw<9+kr;G z^W+?bJRQ(OpGgV-ESZ%6>0x*u>&gldS5~M~qkba%rNX$syUW8R0A^N46Y|Y*ZZ;{x zcHquE7Al(1tM8;JXzF&L;j`|UGeM`S5qvrt-lrc2KJZwGCU*FXo-F6fwt0Q&ar*xK z=Whqis{_fT>FuvRY?9Vl@2X!Ahs(N4(x0X(^Y5=dke8xKdQ+DzSytxlz#}wxH*{51m8XjA zCG>d53ZLT1jn{N9zP`Z`^xLz2RXNe@&7GH*H&awd+q}ern!vQ9HRs-xc^4wMYHIk|Mq%!iS#b zWL~h+YT+62@u}n$N&!Ad@Eg(K{8@WS50G&#;5%fcNptw=Ml;cHu{yVV4EPfHhwrBu zqQ1m4!Iyz?h*&PNw|ae4UsgEU(ot8Q^zjhP{IJ!-dy7fqF0qz#!>el`reXP{WJN9$ zTK5qjBy*kr^2_%iMGa~%4Yc+5`fT>zJeu#IxN+;l!>wOx?R$Z4)K98*eLUH&tkqpV z#`Alj*ci^^yK_CB3U z)tC5se^>8ezkR6UA`e{7pe=}5#u;;!&TMJ$JRM6M{D8b@W$0pmHbVQ6u=}6eK)Gg8 zxw}%F{fLoKHbQ3F=35jQAhdswOl?ojo4lwN@l?9{c*cbSz{%Vs;5*yAf{F__Z()P+ zEigyp_b9LVXqEk;Zx`$fR)`YBGJl@*!^mO>sxf+dtt{cwp#rfAQ)+q-{?;H`V(#o) z*&W}%wm}a-zQ;%YbV_T|u>nfmAd!H=dSU1)9Uq@giXjl-!-X(`?*9I+*EeO6Gw6?| zoGnKv={vcvdarErUH54-7S;od?TgpSkJF`TBVj>#sjnV_{}bcmofTQ~V$pzuKO$WP z3dD~s`0Cqb3iBN1m_07RCTxts!y+T1@2sElBJJuswG^_Nnn*b-RE5E}LSaPfL7LiI z+l8K_CKQF3Ukw+urF$@6ejBKE@2!jvX)4oQ?w3u$a1f&NS}`$H0*^R3Rf}aVIDwi33!BihQB6$^ zMrhZf+y#@JX&Xe?`18R@u#5ASuY_mhSti`Rq2A4;16|>)5G<=0S|oJoq~HPOmk5|& zWCLT>&#E}O1g^i1fLQUmzryddn=O!G9| z0g0IY)60$Q0FWvw;G8$sGh}3O;OeOF-MiptXG5_b+x>VEN-X$3#T&=m-040ADOmGP zg1gZ&VXg<|?-xBU#A)tiU00%_DIUm(^4rY@cjmwiaje+buO5itsIannsSrrDVmM3G z9YGrJ1eV(hs>lt%Y9a^pydo*c$}1|Di^Y26y*_?3A{0y3kAm-etwj=eJ=^i_5+*2f}ibwo^^eMu&G4ymB)EyD@lnY(*x`UMt&O z577K*W9>@e*l97%->w~nIuHotv$|0HSrAeUGbLMF^+dG$4!(0~YF5w+i+UOsB&5Wg zju6tb9PxnmLu%&vESc75mlh3TzTpw~Wyyx$9)j}eiI>>f8b{#XH+M!RCRUz@blLqJ z=-S+*EIj5%EdrMg$ARMazXJW_R4D>lPWahm!*(Z?NvX$bY4K{7|At$kp6t$;O?Ij8 z?qbvLaH<{?_UbgVr^W5pAP~B!N6Cd*CgUZVt!cE3f&ksrqJ2@+5Qq#8mK#KakX@hh zl6`?N@j5~Y+6tS83rw38yw%RqNgw7KPV6!rYEl(ROb^z1i}Q-jwgRQc%#4v$k$@AfR~C5hn8s8H{wO1!RwB7mfG#$M|A9-{}o&?UNqG_4rec zd(Q+G=SoMcqO{d@6%`d5F=9Rw&-a&6hRIgV?+JnV9YGlxA5QrWCuQx=ATRml?Vo(E z2F(jy>%j_PuQS#uR+~94EoJthVPG@$cgdA|*DUX?c3PGdxQ;a@CwUF2USx2SdA{f2 z!_Qy*bqr-(9ys2jkjgQC|3 z;hCAG{?1L`^KaaYf2}HJ4;VWyB;OwLS++MNmVB(JuV?SY$3O4Benoh$N$UjoZMb;U z1S|*>EKeEGM;ye>Y$jNZxl?yxY=QPd93I_OF3$9e&+@gZL~GK0TQkqtp6|jGrkD-^ zC0}JmM#hJ|!~$2ra!IRD4fU5N)$|&XANBd_E50>MQKe+TWNkr;LFE5VoCAkh= zhd?9?wK#<|IZsSBlqxh>3MA=fl`7yY0s{JDV`Dt)LVzyI48AzVjR^J&w57WN zHVGBKIIJJVcp15-Vi6R2z9=)$6sMrms9sh1STKhd~jjf7_y>qoJb)HY<72TqoTF#=v+pggo|E%MfrN5!3h`k|Pv@UvW#- zn?NhM#4&tA?sJQ~R6q}G+}Zu8D5#O=HbL-C0di(W=9MH8dG9`Q%U>O^8wB04A94%g zNK<_H-B_*M;7jsw zl?N`V;2e_H1?|VaXKHtY=kM01TPKnXbw_3{4g?VNXUly8GbkYI5T7i%*!cN%Q&UqV zE)-Q&M}V+`H+n4THJCThGS{GgL@=#X?W*f{RFQ{+whntS0izs2R3!Lk%_?3hSVmV> zJ@F@`UPkRP0A0Gq^kX29JC{mrPNQp0^{>C2b^iXS-Ni>jLc`3!DlR~c-###B<*>gn z#BaEBeyi4QL}RbwKER-a}C zVXLU}+}C+IdgW6(m4>GW>)L=Y8v*Lylp$R)AfkHu`_XgXqFZ5FtWevL=iK5x36!YH zTn)iFC;-8bS>_?F_@wj{1b3Us0x4z=be|h*UAzFgjbzQXv!BQcrSv;~U|gy{7))3RzTQ#%)M?y@CV!}L zIYWvvxcfz+o^qU%ej9c&1T>+_Fsd@Jk@D>EKyWbW(N zWE1ppft!lQ+5v5P=kW>ZcGQH3ZDBpsy2OWK^^|fHHXd?t{_?c$&NGMd;s|B=tgRdXR$M_ooQ-2rJ@~PAO2t4eCcGq5+MslH zd9)RGJ*++1)Ys;fssAR4(?qKowmXr-3j9Hh`;;EP-K1oyUH8uE`OaxTyxUrj)-WFc z$;{H0MdmEX90f$%sbZr8jhJBxms5HPv^v~T62JhRXLai(n&;zegkvWMr?m_4q=bZg z-k*Z)g_gR*cEy=RL=tBj0B#-%D}{NTFDxyG_x5Y)1_(di&(7I%!n-v?7sz1n!rxyEMT6dw?r{(z}GlzIof-nHsy-2;ht_NjLA6uqgY!y>fqWrh&*rC=)T3mS! z{Z){$>hRm&TNyMR95C;VRP7La3q0RPuMXx%D0jn)Qp|!3QjFc-qIuXdQ1*(HzSd?j zt;;GvBqduJh$aFF8=} zIj?P}&j#H%jj|p)59XCkklkBU#8nnvhL+>z%jqSDA8u9_+_+R%i}Ijm<*!ZqNfiLM zFJSJia}t7CPSmkuxsPbYqXLfvX~ea3HVotUwjOlay8^R_puggB$+cq@?PFOf6W(K< z3S*-Kc9DMZQ^&qk-mY(!y4r^uQ~T@nGk!GrU&(z(SDIcZxYW7~DlmR|&&=!uDuIQh z#1A>lmnX2Svl(yEl^OVmU}nJ1-Vfy2H^DVU;14vo@H?(a-b~60TAibpue+~o4b%B0 zBV1r1?SAAUmnk9>>R+opFSoB-CYSc?w?e8w2r~J&yS{E8_=?Gk%c_;*qA!Ed>tL?M5_tY zE)cSNIA#x5Ht~?d724T5EK*R#<1?lQfCqY(pn$02#=BK7#!Yi!#_I8^X16(+vpD^? zt9kE>k96@_(?co>C1tL^?#B%|vQEWmIb|bW*^!G2spX=##j5SsiTqCUxogcav^1gt zI(_b|Y>Ye_tHX2n?HbP0cg3E1zz%4BcyP`L%plN=OBqE49XHVSz=-G$sJk>1bg zq5VC&;PUk9XhphmDZI#Ue{}^wfAq;drWH1qD)||4Xx5+}=PNJ?CGpeY=+@Jp;Merrcb52cKUF z1IQ38k%KB3+_;NPIO*f7q@>vU^{Z7MF5Z3ryA98dcG5TZ7X)T7zl+UABDlp$PHG)k zh=nd3lvLX_q!gdCk2-sarO((jnUa9GzX0I=NDrHLCLqt6-xt~A-M9`nZEZsHi0(?D zP4lFfy|wzv@ePt0t$%~iNkpv}H1p}4DBJFPmBo3evj&xbn)BWHBEScXYu~XX8TWeF zJHHEgyvxRKR~$;_>@0u%+D_WAF2k+Jt|)5+K62`!U*_c=r?V7^n$^ER^&g(5lQJ;Z z?<_aJZBu5NMy&hql5Rb?{89o$f;#fdRA-07q}oJ0d=8{_u`brjq{QJO)#1(c4O@)c2JwKY-$MlO9f85%&ADUhr`e}At%|5?3EKodB8Mx@Mzk05zb!M{QJ3IT( zBxpWxyzy|>jVO;IJU6rXP?oXZXfybSBnV&(ia?zOfP!h4B%{YZWBY#R-UR>F9}-gh zTUP!xQn!IWkx2m5`*7YT;ToqEsY7AI)|1yHVz&4Yi1BAp zAwhe`OPxw9TVZ2kytGMG-oBY#PSI^=dfCCvau@T0nSFImnFEc1I_@9Dk@&oKlEy*O zCFl}5$dO)xt@oHX9PccgY&35gD}?#%Z>`VoA7#$-tsj)>7F!bt!L%6>ZvkSJ93lDi zh;me76uXx3`)2=9rQNX2j>RN71k&*4l_2DEzvGOlOP=S)FCWd#TySrUM-<8+=o_s! zn3Wmk>t`+=C0A&M`pYu6zed#9Za+CbZ!19oHLm2I5G?=4p1rWk3vd0^DgVt@+*_fQ zsAmiM&3bh|X#(IINN7!R>t)SP!NPUvlOBV6ccN@9Q z_6-Sfv!dH*Dv$_+xH5cy#Rsghs(TKFbwCwjW zqo~w#zY=R}==ny=HQb0;F}`_07>C+xkTHT1GZJ&&9*PF8-TOfP_?@47#?)FN7DPkQ zO`i%f+psn*ChP0#qAqd#CzSW=@0^{T#Y^d)i-^P)Wu+C4LM3+{id*vs>k>I{-rV4~ z(@|e3?S5kU{jq9l5uoLI_ELe8d+<$jF-CIZcgaFYs;+eA#M9qB^dR5%xKZ=N&E%u+ zAqKv*oVla%ZiU*9RTVaQuFO4af?JHhb^YevGPl4q*@7E>SKXOT8*6 z*~xKxy{N}v?b5tpAs*~XEx+$zv(nd?{jJ1;Uy6~;okU23uHJHjXz%E|f&E})t*>)k zQU0K~5ROkEhHmJ3+wBug7?=5O%}QBWSvBf$XH}Rq>p4x-?F)CtI@5|g>+XKrwAM0A z278V?5$>(B9IK<27o`IYO^x0DSce#YrK1{=U`L7cJB@CpRKVp(tlNv00_HIL2#E$d z+I~4Ks<>-d-*y(B#9n-l&q>(sx%0-?>YZKmMAx)4b*goqIr-i8Dz=}xat}C+^N8+U zoJH=x2{FrcWq`LjrAB=`1X5I^R0u}jwXj6+!1vGHDesxU%F%c7W%>v`fZAGMxiu(K z6SprfGJ}-v^E>FMCtG0@q`n0Ug4kA~0x+T0%h%4Weg(8XC;Q~kDb`Xcv(oOep=3M@ z6e*D8pZCDkg|;7QBXc~8!DB7Q8`ZnJ68r^$;&v(OJxw zs7to-cGq~)EybYex4<0PY85($I@sl#3_Zqc>;nI z!Z`13WNe89Zp{TYo#Z{Ve_o%g^!i~K{op0XH8AI8gR)E03`Q&_p~VF)nzatdeYba1 zASr;VU-F0J*h`KSPru$|A{&=qk5>wY)%%!(yi615+8hACswR0c;BUHL4Qx$ z8$>~_2{#?2UQ-!=G*GAh4!du^G6blZvIB&(Sbq{pz=MaI1SpKhG!CCmIw9K zqs^rO52@*%*25tXbw^-yIR%G{{Sxd_@l{n#v4ZZFQzb7xmS|LQ$3@}WA-h=fU1qyY ztPCboae?Pj0hnC1D-~{4Ckm{wed_?#FMpmJD}JIN9ggo-VQt0dw`bdzAv8RUFO9vY zmrc7)`pJ=C9W9N66BPFBu8=$F%R3;WMpA%FM)4(pg5%jV(b-G&JMeUV-tx&}5^BRa#LQlpU+L%5%LIy_#y1wap>DzNXUGuq3>|SCXGta08e|q}AI8 zGoUj!k5sfLyf)DT>=)$oKi?{;OUzf(%TEjOcg6r?q0niaDY*_|Ygji^I@Ra1-%~}_ z)RbkH>NqKk`F@xbXmC1uQxUZ!v;k(Et01M7i&F6imY7@&enrXIkkUuZh_D-%!)4lG0^ zwOhpcdgBddl5h|Iz-ERkwhQBi(Pu2mTwHAMuB_WVx^Z-0hJjd}a;SVhLeg+G(eeb!J7P4TO~G=#Od1JCMYcJFFTVs=t&x2!C)|YO1AWmxBm3@@#2X)G4BEWRj zAnYhJ(G|_+Ix6YcAt6uPcA1DEke4sFUdTTP*h18QWFk`fNfAHzTsV=5CP_!=X=PiW zOVjlr31&5*sScd>$B96(;jroR>_PBFrUMkwdSEDa{3oGuqzoUQz7AQnI#Hd^g?v%s z=jmAVf`xi`8h8w%)X%57F6OFGZ06To@cbMq*>6eJ1aDOuj())LT&taQItfFuCq=UD;Q40Fz?X_|4@4=~(s?Pg4xo9bfhz$r3` zg9X8C-3!-=T?z^@{WNR$DsvqNC-M=?iDT2Qlhi+d7&1|XwH(dv#@j%Gh0I$R)EV+S zm2~{!3#|1lWamd$7!?oZ%QtJ4Ju+(cs@P~dbzWGKKU_M*1O?YsBG()N;mk@JB~yDJ zTohgY#Hq}g&Nt8^JJVT^-Jdew`(rIr&-koep2rRV{GK-?fEssW6*!#o#=}F37t6=) z%c=bg%vRtRPoPv#+7%v;C8QDN2&D%_`<#~GOFPN_I(u~0C-(ielT~&CTG8hiG{U>k zRCZ^jINSc>?3nVY>_~XYgRc8S}*(@YiR2W&*FCoBCdie>-n3b6}zDUuFJHp0#ECthE zw&J5;y7KRKPn{EgZ)w>EjgkF+cts^DL%=2L7Cw1STi_`h@!Rii)M9%B()I&6QiXm4 zU+bJ`1YISG#P;5UH;}fTZZz|(Ow`H3il%tfy5Hik;e=TXfrXT$=NK$sn4fo~(ZnP0 zpb!3G@0|=`t%{ns%?s0MdG3y@LDu%{PG|v|-+iaM+4*EKo!9?HX!EJ^X?1H4kNtDA zDgNJDNjJvc0nUz;4>xCdX|O8nX{>W<>@{X}(rb3{Xt?~>{cGSWIc5RdVfQ$m=M-lL z2^Y0KXqy+_Um2BqixV{Vv>4QsknIkeG()6S%1@-V(ht9E7L_L|Tn8++%pA-@(>O(t? z=h9{xmPt)H3c*OOkRXykT5HO1?BwN&{lemcd__Otvb_@wqAVvzGF`w0{B&2|D8}b} z9}zs#CoogqoxtqSm&V`I*B4`V31t4@8}^^Nhp-CLxhQa}i*AFFk#V(bbF`yC-`%}h zW}=RAX{kSumx%zRGgl^{P>An@L05zJ2H&LAJNXx!UarK`h&rPO#hcd)lkgn6a0A_BGbgg#N`Z+pMx<1(A$ zd>GZa?(aF)7t10c;YUP&p3cfT?=qCDY%u_kL?SV`z@?ZH6ZJMWRrKQJ0t!(It4L>~7u#nJ4X$%ye>s-%^@^%^ zUP($Gss^hfTFzt1J(YoK-(|@97JvpNAs|i7N(jPG^@x1gX!lzRixzL+DmsOJfofvv zSM!%h-#!NL9CBxq_<9iM^D4di$-|ncDBM=qE-wkQ8tT_4i;+Cp=SK7p5oqv}$K$sT)ZAZO{${Y-0^2R?jG-zv zthGP)IZ2-vYyyx#tozBup-q`Ly3SsszQ}&Gq|#OJQE#1CH@IMx^ODzd&2z)6*o@-; z?NO3-Z~4kA z=D2RLrwif*3rBCA)dV5)7cj$*RD-hBXAl#4(Jmh-7t~neS6k?uXlWMYfUfZakmtk@ z$QF0ASD%kMo3Ck)zbWn}!YtUbKVROvNI^=`Y9gokVCZ+1)6+w+<0#h-EkxydVv1SL z2vwaQP4Tm>V-@~#GktFLX4%jaPwjlaTA&bdV`KxfSO`I2#NZ!xoh|B zq@rp0!a zmvX`lm7{jWibVH<=AjuM9Fn!w6Dmc`NVp`7n7y5f{c7z?7)TfS1VOS{SQL!!w_&OF zOA|M#dJ>hX#Sqq{EPp}#jR`a_7{oalb&kFq=(^!%Wo!1*J?7S%*Tpk`l_n9BkmOx$ z0FxzIjoDAtVv!2#hKnq$Jaj<4$9o$atVd;w%;A29 z{DACNi`a>kD^-ru(r!jCk3)A)GDV8ZW3&L8(YOUh((LQ220%pk9wwJ@27!Jh4X6^g5uhXVFT4obwJ;mF)w~~oVpSYLi{LiP zoj`!=q1EAj=A#i$RO#7>{yf*yU}W?iDLiAILIB3=>DdwnS&ZHzJkdR-rc9+Q_{g#d zH(EJ(0gOO4iR^yV;Omz$ZTtO(&2G(Tx1NP}ZBx*}tswiU@;G9W>rYfeA6_{!YwI+? z?|c&v+E2nQ|4CU)k(It6#zpQE4iGaphcTckjcsw$8jJOrNbuJw`ce+`E2(Q~P&cHN zu#aj*V}4vK_Ms_OF%)npS-RlU48CZ8o;gq%2K3=h9bx{r4>xLg6OmfoiZlbwHFp^; zOM6l(9pCi{2>~PcejI8~x^%H?s<}UX5y}LS{MK110)cQTsjj!?=Dg6wE8WvKQ6&$o z?1WL~X$TUBS%MWczRGZ7KVOaK2vgwRu~uO99CpP_gKp8oG0leQE&>zbnn5V?K@lyD zkwW~VyS?)=;3&3SSQXz2Uyl3NzFaRVIq3e8ao}Y--Qkfp&Oqq7x>wKMb{X_6E%Cxk ze8Dgmg*vgp4n4gU)-JnqiWMbOr@W(B_2*DPlR+!bc-Y|h-&ufy0?H?Se~P1{-1Qa1 zH;egUB~Z*IPPvi5O8?+l5V#RFb7jMX*RQ!Z5(0R8t41x=0J)vhQ|)9_#|c=fv7;>E>v%jnL_kH0t7Lb1ru4YV|8Om#@R{_1z)mKzy1D+W=!p-SQ|Js$s`-{a2P46-B+=G z`<19#&9%gbD_bmz2>DNnrT8?fwo?F?Of+n7zfWWdFn;;3+s>CP#yI2W7n)?3RP1=njGyo+1?_G=G) zGVee%o?azw6$+Jwhl~64Ab~L1-lU&o+-|j?H!(ByXZRyomorkI+OI2!@CO)sU=PL?eFc# zp9B^GZpBr4%*c1LwQb~ZWi6^(vL>1AUbo54qC(mpCa3AHBWx*=rpdfEyb&*dur`2`t7b1 z$EXLb@bf!l1#kRrcQ^@eH-bI2=UXJ2sK30dT3%%TEH1g8nV&QV0YIdj)_|9-UC*DIO!E5tnzyP$u!=;yV!(+yKi! zPxtr^0C^!hPw=}pcYFB=Xnu&m>E-b($-9CU<^^Fx!^4T0acmQA0P;D~wt>^o_;%wg zsP$y}+0<#rOx~2DR^F2*CpU;5n6??LPG}pNUM?YwT3ZdOSsetN{6rrWUVJ4{WK>i8 zSqi0(TO;iq_p{xcUhviW8oahHqDJ>#r?<*|*inRG#g?z_;o0lekqDND5AEdp(y(mG z4LthJO_#1g&Zt5CDkIgGMz7xAZ+7g*Qp1$bhoKaJBYe51^W$Hz7!HE;(S_fB zM%@*+i>IFr$}LGZnKS=pm?s>6tSFlnB8DTeHsy?{KF!uQD7HAHMB|(_I zwT|}V=fv%mjXY~NU8psjH>xdEJ=PTz_Zi51Gj9Vr(}4R1@DTE$#ASk%7QCQi-qd0Y zM!0zYd`*W4@_#=KuVgFC+C!`sz+)u16{t#+e%+Kb$0Wm%U11`~EIFtJBP&<5zPcfO z4%Xmg)uIm6Qk-BV=hheI~SI+ee#CCV00iF zNmpW%w6N$Yi=sk_E}Xy4DH?0;-w<45okV(m_UL4r;(vQcNvJsfJ7uSzgVjLDVYsO5 za=LN1tluXS7XoLy8cSZjyhJuBZkRx#b>Bs31Q~wVHx=rCO>MYR%h%c$lzNoOnaWEU z_iAen)(iV=rOK7*&xq^wWDS=l@J@mw8hLy*B8u?Dv~fB`c+jLnR?zXLuq%sMc(=lT z%{k-9KcW=RDMs^X$i8W(Vx$oj;dVJ$yEu;y_sztHz=^L$R0YtDN5LPhim{f!Se1l& zyMK>QzMO!qqPs_-`tP=capCXRC%R4roKD)CGlj)OP#6w~hVD@dfXb3*4w`09ToRQS zWMK~9>P$|dzqf*-WXhN2u>P;59NbTiMl^;Cwx`pf;| z3g>EpkPY$aGI0b#NM={Qpp~TX^}Ol!)}J!|0vCa>uU!H%*#E{AWb}Cx%^aQLa7Lr< zN5fbNZV}-|w0G?rcDW^{rj`!o%cqnTTN$K@2Q~qf7XJLq{GA}AnM><_viSMlu=@!2 z17yLi_;=U^vz~T`ghcHSNn;B-LP*GmlG8-bWqVeiNp`i>akb$KpQQK%;srr9(&}sf z;;oi}>Bimp_ao`Q|->qj_2W&x5Ge=RFn)%?2k)YQ5v7!oLf^iP%Z14 znoh@tx)<5mPt-Y4N(8m&AxvrVNxS2R{_GZRDl0qe9DIKDU`p5_IqpIJB1FUL%C-1^ z{;2p}%zg=e$Hj5hrh{2TK2_!C)v}xbiRkZd_TDgG?cwK#Q=Xf=hK)hdXg9W&htlD; z9g%k>2wTSLDw?1e`(Gc;X_-B`h6fjzhFOjXO6}nfsd%49`;i(t!(l*;zQhG zLAZPQV(SI7$Hm!G%(Gl*ty1;I*Xmo#FbtQ@W6uEjsDG{unNP?_EV=ZZ_3b;}!b9YD z$RJEx6F>N@>7~7RT%wsT2UfioeGaNLU7Ci5XxnuF=8h6pNQc-}ET*DNw4_>x%9m%F%V?1pL)ovnwqe)*MUfJax+iB7 zx=nsN7N+uV)9|<&Bm_d*8Lb?_EB;ZA=MI;bbn*Y zh|{PJeQmls z8gzFeN9yl-a#5FkFI-nJnKyZ$QV79llxMTzeCbgMrUh)+yuaaNQcLKw=Wul7^+FyMUuvq;`F zK4gv*WQq=j^>O-%2SQcbWpi_6rCLV~XfZc*?P{0ge|S>)1YbJWRmSFBn{p#IVMoEz zh`w2cs~~s{u-)g7YdkABhij9K+J}=Ku~~r!Vk3pVo7qhGl3V$&=gawo{4uAD}XXJZdbviN%g)L(|0k>o8&mo7okvM_cP-W9<$PM2|LfD+M4S zf}J>^IW^q2!)5C--Sa*9wtdjQV7cPVUhnOmkp6?QkaTK!c&e`S3}T{O?9|<)LM-Ua zCx%8uugI>+@IIM55JUW)%NjA%fHp&#DJOHd)dnec-y2=m{%tNsMp+sWFBFNo z=)*2I%Za)o)$}54S|7<+@skof8ZQOh%ZL2XBjxo85t}BBJCHfrs{k>+z|vAI)sx{! zcl$3U^XN!n%W2UOO{VB74zxqw9522!36shI=q)Pe1}UR@XK?k<_$ORMN2lg2J}-~L ze6(!tX?cnuQyX|b=^i>I#uFDgp!!C~4t_AXz8i?=L?NL2TKp^!GDt?kKz zlynICwP@IvD;)s`8m=6XUPz3K%YB7~%|?p5vfD4g?LpJkt#3MQjGX7A_51T(B-o4R z5u}4gEfNbt*SU($-m+%#JQ^M<=LrUu1wsGK*7^+HD2A{~hcWk_c4y1$AwC^oZqbrI`Geu5X1>GPmlnA$ z!%q0G6YSSO@V2eKzQIX*Z2qj#p7g$p%k%n; zLWu@ape5X^mY7^C5`88Srl0IpNvr>jMWa>-mM{4kk;Po&Q8>+yHqHkUqCpF9+Xld# zruDjs9?gg9*hE3oEqWEv>K>e=kPaLW(YI6>IFdG2KE9EpQ!@C^o#TMH`zS~FKYH7s z?U|TpFGIIW{G1yXlHDtaA`am>FV@ewzFiAx_;O{R?w=b~BCM>%IeZQq=@(JN#N_|6 z5gp~D2q^z3(R38o)bLID%*QO0XmIqhZ6s$*E(@cdU6Z7K0gyZ- zmReMB={!E3Gg`TtiS;4f@Z&r(od3YTugY)eJQdrbNXGD_!R&HfEV+E_&rD{ar@?CCK0%+z4p55Y|*F z8Lr(}l~pC`SpZzG!m!c7aq{StiAx5YrZa#_wDxG{9}KJ&Iu6vwmXX^3$;-%Fy;b}< zfT32wcMxvgya|MTfDnMOBQ3tW?jw)iag3Dk9Qmd^x+Yn8h1Ilv8$$E!tmdhSZj%l{ z0&Z|(;z=Pm&A;}}64wRcS$eAHYVI~HTmW2(XZI%=MoJXf!A+XI7T~`Hn?08Yn&h}- zuZJjI2?J4^3str$?B98FIryRf8<=bQ-<2*kr?e)$-Jc?xo)!1z`lq~S)}N6TBMZ#A zNdMOW;{6a&qV>0GL>odf%(U5&)I zA#6fdcAiX*W%O6J(|hyvZrC$(H@W|*=IH0iCx;v}{K=OyYh%^>E5+GOdPLiWhPC#T z{PsGHn6>HiWyt+G$p0CLOw!QsoV4`B4S8{l79SYR^tVcDN%|L3cL-okm@3V7lQIF1 zMHRID!;6gIi{ph#O`V!=*-6k*hO34;xrD)nn!U_C-@}va`&Ro-t z;%wA(tsi&WQ64THo=I865wH87@9q`e8KJ_SwosQ8u$^yL1NuDoEu5iQ$Dv64AcL6X z`XJ7K;{bS>@SaqQ_nC+~? z&2*Qx!GrY)aE?PcQdlvR{5@exUv_Buu^~y6BFhkn4*3N`H|t2$dNWbUM?seM$9akW zP-e1T@?uMGes2@4x2D6C8eH?(duH}UrCY>4V)|Vy&3o6gY`*3 z;pzL`p^Vt1^pyVp2^7saL0G4;d!@sEfOxh;?VaGx*av1Cly!lEZo@?mTwV_eAv#J| z`v+2<>%tbkhK5V8hbq}|$6r$aL*0;Np;BoJwEa}@<$2}Ev$b-=TBXg_!>sAx%fP-& z5&f{RFwK>O#6+D9%G?2_<8ulc|9$r-CXqKFNrb0Mz)V?I$_nY7l&fEAWRvVKuUh~W zUOpM_e*n|_7@v$ZV!unGAH{4IfKlpm734t9Y$|cepwEtX`7Ju}!E@8&wGQZ5v&|VK zvZ|_z^!5$#y%H93d^S$bz9gf-#UH8p9{rp%rEl!)&J*K@>XzC9hml;~KonT|MBIer zT(_G%D*97lw8=4&hSk`Jd*keXWy77DuzB#1_mI~0%8?cTx%UR7i5m5Y2DhjEH|>tL zej|@u@a}OOZ6nQm7swQcD(a`(7k0MuSUFU zl8hZmrQop&0|T22HMjwszZdN|bc)xOJhK_C^of$*spPXa4O;5c z0{#bUW;Efa{6wN#AzUq6&-T|R^xGqd(E4V9g?V{-T8EW-6>0Ga3CgV-#nvfC2A`;%rn_5i3lwLnf<)Z#V@UOq1pKb-HL2ox8@k^ z#!fQX*l^ya{!foD3BuyziT9Se^}F3l7k_=SFabKC;BsyN?*Q-l`Eyicvqc*Y{q6dz z7XM$(9%d22*sQ~Dn$D{(`;GC5u|L;;!#xYGb1G9Uv$mhgdkbV}r3uE6Q^(OJJFpJ? zRvp3uBAN90YD_BD*3EsdBm>K8FkYLzN#;-P{1112)#Y0g*u&NIfF!r+&ssO+Z0O=s zo$_7EtVGlBynF-Hs>X=Z-W>*~NKI}?$@9Oc(cSBR`P;R@N8b)Db~ug=)LDa(|1b!` z5Y~Rz#O(_mdX7RW!m(XEh6$Wlw*9tkU`=LMpU-b-eXU{RAhp~=9Rd|T#ud1f;DN;RkjnbDz6{|lGJ6)@;M4MDw$Iu3B zNue!w$nbBOl;IrW(~3Ulwf->x-~zwb+H-yTN;9Ns0+Z^= zNU%h}k!$K|nsxH^S9|`wX?T#iYoLtNkqkWHm^97r?50v~-WZh-E7lO?XrE2|pK||J z<=EIb1>T1tw%tLZq@*-d-7j7SXn;mNstpAN1q~oS_kZyA+X8G;iOLWUY^rRSi^>i> zj}e$2z~gSwC`x-JSwXY1LbI4CKn2mUzB*YGl0EU!yk{_9zCN37EVEFdbhM~|q0GN4 zxyn&QX$3NS`Kx*Nu}JSu(Cp!uwW4*^-6|5g8YcD_xG=_#tqAS`Tc!2(ztrD zr`OLIKj#6vwFw}aEjT%wiPo2&(=NClir=iWJyzC)-sZS6G)gs|fv2GHlk+jVUU_7H z@Wt*nVAT%B6l1OWz>=vhiHq&$b&duB=NPk1x6#Q)kNIB~vFmPXg<7glozF(P{HbpY z^!pkSLIP30BW8l#3*%=7rl10oL1v;=vnwq3sBipTyV%#LVB&F`gl&fnUz(X5Htr26 zTlA&zL#IN=1J@nQqGJR8&>NtSVl?j#?yZy?LH0oTniJc!kZjD*9d69&fM{KBi%iq| z8}SqLDbcnpBbV(fW*0jtm^SQMk@DCC_6fSTuh{Qkyj*x9+UB~y)Njzu z_THGcCYR;p*z9zc=OgE-g?~N%MV$NGR#Cza(aYKU{3}hsm^iM>K40bzgZ6>k~miw3E8- z$l@ex1r#zF&}BGVaDwD`5Khk+qgbwr#y#})yN(4GMB44$Mc#Z(Kb~;W#jl9Bn#?EY zN>iT`)O6t-Su>58>xEwNZ2k&#;-BxP&d)$QWVN^FvU(u^^)|M$5|#33fi?NMq`+GO zrbN@|agQYmQOu-kWvFu`v9T#oItSx4HHf=;N<#aOJ$L3smJ(f7V@vHLu2=Okw^C!Tj?0ftJ z*`}Y@Y+5pPr-j}#)CD!K~~F+Hc+TAV|JyPLJUsv}tkvQ~*sFFJyb*s0vB z&c0dIxhi)20tf@J{$iEvG5Zll*Ef+KdTmjXsj|n5PqEodu>en|Z8gFg$tT3gCfp5- zcbL7`ixKl0(qIA+QslG1m9wCdd>S*a;^;NIlymh8i3}F)n8z*RT)Y#Vh6+m4k>;Qy zOG>@NGk-YTRxl{(L1_4-JFRKlshaPqCbT`XE`WqKQn5Q+oQYn1!=u-Rm}=fHQSojA zU~!iWI8Mz}>N(Vq>#6&ee75zWZHW}u&kV@K=MJ%P4rTC+#VBU-R1c0=NuP|})_Cxb zcGI>0dI4fa@~h`~m%6-LEja!qAhE_y&J-MIS#NM4It1qAA|Mpq&yJ^HgWn8IkBf$f zhne!3C>}kEij#9a94)a}vB`-cpqasWG)N<^O`CF}$J0YScT<7_nS42SL?iG6evWn4t>&pds!dtK z=t@dY7Fl?3J_m>hOdk<^Pw`q#{j#f+`Gs6wV4r=-v>PdKu9)H_qn;bEL{xVuhjFj@|?EnYKHE6Uz}lz zlz0FVHr!fyA1x$Y)a2#4K%3x93xB>{xJq2~kF$%|;%gaRAhB!OZ_P?J&e787 z$#54C++S~D1}ai2r-0tga)#TlR$u)-{S-3CnC-(8oJpjcjf-wFXQah!hVBxfE`Ds@p#Ji8=3aYrSLltbY1b`FKe(Km7s zf#~3zMJ#*HS?n{_VoghgUibHF1pS7DhE-GHXvZB!x0_|11Vf6~ zMkp;mD5fLCj)77(VBNNk@U{36`2&FDe@aqRuZ*K~#?=bz1jWR}S>rl0FRNM~70ctB zzV@+d^dw2c{8qmWVqZM>F7umj!xVUQ8u|^D-t>QpEku&sqiLNnmApHJ1h25E6;gYI${)8 zt6diVC`d*fue8fd{CtyAE7r^jeEfjDUp*EnoYworayGO6D}`Z21!g%x5O*t0D54`q z=` zE`8t%2I|2lIB{P8Yqs1wTl9*WU#pgNm-aNQxEr+uym>vF%6+wU8R1m~m zlRkXv>gt*iuW+m?x)9Bj?_l(ExBJ3P$rhCBI!-Q$qxoObQ&ro#{Hy`l@!pM$rRH4- z(l73c_&8)&$rfboz}?IL1IQ!~Q!6T3$%04Y6-{!qLO}{E;n$k^ zio?B``7jyRkpg(cXynl7{`q>x3byo6U-u}9HA3Qv6sEf8xs}z@#nH*9XZNOdlUnsc_UKTdNk zEuTy!S+kVsC}V5|yR7l%{rPzIx|5A4NFCC1ayY^z4@PMuyB)WSHmkc!uYf#0e-)E6@;T(=Wb^w#(x z<@CuEW_IQy`Nq@VTky6ED+6DEhzn=EXN}T-j-9Qnpf8$!fY01bhoq;@|HSW49GZ75 zIM-f$O*hL9GA}LEaL>~%3dds2m(GRRaYzyNG&~FAoQ;k8i*u3Ft;V^5lyg_R@v@w2 z3e68$s9>VyX28d2EP~8*ONmo6pNyuNw}yIp=T@VWhJLVBYw_c~|E^*r{I9HHu~TD9 ze%@X#rvBp0>2$lb6(r8|ytP};hM}vo)b)qY8Nh(+H0_Jo=dp%)H%jcj=K3 zKteYs#nk8ev?&*Et#*Gqp7>nSZQ8<2D|bk12e0k#oVRzj57T0{QwJ_2*Eqk8K}U?| z5Nm}Y0RaJDh3pN6pQ8q>Kmxg1rUbESS{z{ceeoF3 zz`lG)BS*z8V&Ut3GC{~{Fy7yh=(HrO8iWtQ=+9bQuO7Ku8mIS&ZZ#0jMQf_IBU;vc z=IE#4lxh->$t3rB5u}X^yJ3$lIs}y(`50tuu`oPS%FP0kZz9Y2SF+MaTPtY>U$jFt zM+}-4rKhuEXEW;3BRLljDQqw6?BuOy&^7gif{Ehc#F7+s<&+m>^1#;aG-}ir!t}KV^w$f z$4ZLQu+kySz5ZnKvD^CS$QeIo8V2JwC}ZC_((NYiSlTVN8!N#Amv`!4xV0Sv^`~cR z#x>}J5oyz(GnCYr-&w#%)yHdB0GZ92i}Q(6iUQ&1%BNaQ#FE`L^Kq5UPMZ=E@ruFN6h7RcZ(vwh$n~DE)wn2fV@p}_~v6fI#-|f1S?o;{3cK|XQ zn`GVzoAru{3QWwrM{?S??e{0@i1qb#I6TgD zzlJqE^^L0*=TUWx;!-SJH)3P~!Gf5gC>CWI&facao>QcF{KRc+ArBMAd{M~^0@*}} zzVH>AKlt@7cbFu-=pPk%5|i(od5eM~&J<>KJLGB<=5pB--HtTmNVw{qpH0ftHWl7e%rs_6JCep8qWU-BFHo{1DG1!u z%szMScIok*X%OJQIHF_@CTE5XZU)Tb*|x5LpkT8*%*QzKLV9G#x5#F|;p_IGW-Fs1 zChYu2AG5uk#By0nOHQEu_}*lGXU1EQnu?!#jhEHe%x<^#9QNd+fUO(Ymru4&cQ2ex z%;)yB`0Jg1^_Xj-hTE1T9dN@m|{cg^f8wo1V%lac0B~?wAcf) z*$@hGZC1Kc9_=U}KP@@s^>NQ7@<~%3X=MEy5&ybodRySGm>89Dg}ouFk%CpD>#X11 zk1A=wc0N%7U)TKRu~A*0{1qBF?R79_Byn+?Tq#$}_c-lu6vZ5Cn&_Irq&oaMWp`*x z3x{qvt;=W%ui*hww$$ODhBaCcto-Im1LdPNRcd{Q6ZH-!oU`#BF7qO|nK7`UqHh0p zzIW|ZS_li>K?seQ`OZA0-!8_$WSd(L6hN0H@$xFX@akNfz1Whdl-*ZYvj=;iPZX^KV5YL32>OSJtyt55kOjb=Hh*#nX@?M zK2&Kw+PnDUfkJLIZXUhNf&$UA;?lJbcxq3xOvW7}me+k2d*M~}x;Ne#eF#y)4V=55SNXh z1(4X5r-n`C1h>VZ=Gm{LHcKXcXU4BM?+k=r#SLB|Yqi8*jF}p_fDB6_%(rJJI^giG zgiXoUhg(Rg@xe0GzKY5~rTIb|{OcA`y&ohH4)-`g6|JtXjvuFh9BMcxPlu7;vop4I z8BD>WE2=h&`7rdVfus}2=_vYWS>>51(dZuOzP@5H>SEVpNb5bR zoiFpb)TR?+z7diEr(Kx(l!PQU3QW?_5?^)zSOppC2xmO#S33M7m8QSy{+LW~{F9=xu#-cbr5N-zpY;5iU`< zw&z!`m>mNA(Cr+@37Wv!b;sENiF|exsJuGUx9-6g$}O)>aKD|F?1QJa-9mpHIenk<%YrKagf$zz+ zK}5j$_mj<@M~;puF0xY6qA|7hjEvh8krrQiJV|i(_)nEN&AFR(#!`}ZBCY|0QNn#d z19Q2}faJyCghU0yW44%DpDEV=;b!zUwbZ4t-fEHnSZzmH1Fvh!DvK`OHErcX@Nnw? znN7iGiR+@bv!+76y4GT8anZMXWg(2<`v`UywL+1CJBy|t;oZu% z+2>YRCbys76s?MixAAVf+HtQKK!|Pmd0kUrbkum#5_a;EQ-4j!-q(WtlH2EW7Zx67 zM_kmCxJ^-sqxXjAbc>Q*bchlNWsI=~j>yzw*q(%ck?cO|yHok1Sp8)xet%?7xB5qP zrA5hZX^+XRQcFpg zMjgx{h%-I^^M`REdesgt0i@$*=cknN7J2}nVFgmAAMK;XCw?w_%O4>ou+79Hr;~iq zSDaKDc6bOplh4r$?JPn{4v-ZToREqyetjFLtOUpeW2Yu4I{%s1RMK_1aE&$Iw!3QW z*HK?x0sj+8QYPHZPqcEho=i%o^r(O=nV-44>>nTln>(mSh46GpR@sd$q_en%!pBuT zYmQ^R(=-%Ve#_x2F>l|@waA+{2SX4g8w7w2wivsozTKUic+0)<&$aaBajnaBu@!8u zddk5<$G-spE4a(rmirv5Yg6B3cN>gI=GiQK++~V5 zB2EeVZx#w?d97TgD=JpQwNh${O!1uFODt9+K#=2TFOr&;E_qV)0~6!sg&?#nkbJ}Td$NLp~tVia;XJrIBL2mm;+ z^|XxHujU&}Vt%LM5Q7@$q4=+uLzhC;t9a8ZtCYT#E`QN1Z8=E@^kX{@UpA|XGdiud zgh7T!5a;8Dn<3rZg>9k_E-U9_O0Rh0R7KZ+X53qQ0Q72Ntx(Iz;|8+%05>pqzpH0o-i8xyuyl;9I_5ZK`;qpe#SyQe&YbZg;0lsf2#=3cF6U!D0aJZVq2eberR8-@^L80O=y`518?OUi$RLPApw6|?N1IrG)%c75HTg9A^HqcX3&jAUEm z1l0Mli?f=gD(R?N07)@Ce)h*_tKFLY&OrXf9Oc#-D9V#|+%}G+gRFG(p_k)-e3q#ZcCSlCe*_MPQ=8Uu zGr>*?4Y96Lj$Pux#x4qoL+FpQm;3sdo{#_yj%GzXn-Ye)#muEhpTEzCoVE;u`T0>c zR#UdvjiDbQAecBR<&=4Mt-(B+?Th!0v@a_VuhoC(bQF)2O_mf=P*8B`3MGDqUc*oH zJI`Xz^uH|F7Aus&gK9S`j+v6bL}o%I*btGtJ$LRqE%ZL?W@poANYjEtm_UI8;SMDm&PAZ_65{t#Fh1H(F?I|b`$U;5t!Bl+hWb1@Y zp*$WGei>U)mYFOtfDYBL#`A>Hj1)k1Cl1u3FFTlJ96JuWb%5o2*vpDuMmEFvU6)lU z`hHGA9FV(pqdMVjOFo;Vc($RQqf=4{f^9rjp}OW_yrnAJ#u8kZK5#I?LRY_qJ;Ok9=Ayu=P#ob^LrP%F@yL zT-jGQ093y`7Os%18#U>98VZFGG!>H&sz<)}EPH%eZd{GP7v)Lp6taY4by(6QSx>>E+zs0S%`8d^FpFKcUGK*BTShYm6N> zU&5HXVnTymd387q;#kAW8Dm=|f}^4S@7aUkZ0(5WO}jlX(Zz1Y7$_;QEQA%)Qpm9m zGjHak=j~poid@(ToCA~kbS=)|Vp@D8%r+=^YkZI8#fzr0d@%4boh{Ki}G+YklfO9;SIW+E3Tb2YXnb zyS%!58>9iy%+X3O%}a!a!S$wNL1MeigOzCUMYzxIl zzM)^=T_mSF5^GNz2U4WYV=i_;MovyZ5V_KEPu5oa+;>cgo;Ah&GVBw|mK%f_Ka7ar zm)bd3_70^fZgm*QHr*@7(jH-~_K*LS#MHW)$Tn{DjFcQKnoiP_W@?P%_l58BfAZmp zPbfJX!vv6GS}E0h@;L@T39-j}_)EPxSf606**HE&+3KI9=fUoT#1@76Or?`vj%=kX zsw%#{oNw5)_18*`QCh0K>Kb4t?HESjFEuw(FZI`P>imGK2I&CO#aC@hzcX|2x?xj@ zY^py$v!Qh#eP?H7m30UBZiw|&-wtE&0G|wS(;s-ADK}hsM|n zPL(XFesdcwTZw!bS%0{OlM)*NkK^-LXLI0;72Z$5U{k<|n*AkTS21n;UoSvPxLK-t zy(K=bbaD>awvZys)`qFiIlvYDIf)y-H}Z^CanhjJ#>H^LZ?krEc<7F(`{n+uqF7qG z`4AjFN!E?H4%m@nO(#XbbvXlWy*?a&QGI-%hjf#jcV@!-{9Q@(97|+<8{#T>tz!tN zkemYhjWG%o0=dC8ec>q}EI4?{xk)Umorg(**WNfWGJFq?pii9V6(&tE^ZZI?2el8= zv3QO*M-rV7u~IOXKw)>XdmnbCT*Q`&izOT1dR%6>CS*5$%Y8zeS4_O~OXpAy9(+kS z>COLqT~ou)1UKuUUcQ6$=2@J4Ud86iHrW^QYw$r;_2DL%w6Q@)U;efMF6+a)~M*z+gB z-L0&w)$3~A!eev` z&Q7yT-lu2UkddkC`*Tl#EpO3#WWe%(kX_Z$T3A^6$a50(FrvG=yJ{y(|5c7wnqibH zere@F)|?(d8f4rO(Shi2v`ac3)U;xZgAxOWvbaSyDUX-VN`2!~IWp-plBic^XE9IA z%o8NLIizE~jI^DMPo;C%E*vyh$YA@qdx-N=Nqnh~~K1ETXQ zy#Elz`1m>3qV3g^6G0kv=!>1**>41N1XgNPm>)vZhb{*>a*BMpMwk(gZXn)t`M%o0 zKM`M??d2N+4V%~(YM14An1g>TWK`Nao%c8dw*+XchT$%K&6lGi+#D}R?uuo{e$3dl zuLon)hKh;`CB#77<$!g2qJ)|?1#^gB`*JQhIoaX+zzk;R1r4w)3@Y3Ag?#J4$WY0_ z*!`Z?@i6okcgSUdveWh0GfYYF*ZKd!U6*83xHo3zq^ttQB6kPZn9eUQmxcB-xlE0A z#Ba!c^)q105Q|KHR--@99NHHQ;)Y8OV1;$8I)#3s&Y}@dao6wp4rS1JZ%7HQ(G_Wg z!A023mRR|Dt^kg039o_g)B4prY1sw_Zq*!SJ}PU{u#Jec>YRHE_;vhT=UjbjQ`EUW zA5yD$i-KPM`JhJ(F);WIOLa^Q>dzPJAvR+iEC}h*3hR~K?cQ`X`P7+LxVnRXU6FxG zN#Pbzu4x;B0H~#Kd{5x-m#4vT;hQsfra4-k1Ye7G9c}d zj*qzFDvcQ*gN7N$<1NMHx0l0yn;2@1UkXCQas-E%n2uVA$L@T74bYH>(|C2}g%*3X zt%2vtoyr%P8rK8y)DOUQqZhQ@ z^d|oFY7L@)GMfu^8j&Rip{U|N<~|k4Hv6-3U8nL?F;imz5gUt4@mw=3f<0Lf1A19; zigN6V=W;}^uIf=^(>9psQrg%3-Vf|l#2mu-T1K)G3z(8lcV(Ejgyy`_HqSSJj+j?Y zL>fkV-2;}f%_a@NtJ7CaKjL*wy-x_gXhpqqy3D@YNrfkF9{Cic6NX9IkjbQI{y|_h zQg_}3SG$le$aAb-`!%|xqB&PaImN_?LiBUv*T#3)d-`N5p9_%5#hwdEa&!8}nMc)h z)poeI`F60ja_cTeb0(wIJRUzy6itS5dlqq)j2~ z6}|gs_ReFMgQ9dT*ROag?<)%ies@Nv*TaG)>9>rH4J)v@qDdEMwtU}j-zZ24qnVmZ zbTsy{D06px!4s!+8B_O^!Tk0tdovaqHezG*O~i5&2|G;!bN2>EteX%;S5%A%%M0*^ zTCX|{_Y~Ii)6pw(l;P`C$$472`nn!Tj|`cDt`bWLvtYv?4x-*$%F3OJr4_e6l}4U- z>m8rwfR>+aQY~=V&|+C_ z)b!&`m;Q(cp@J1iFlH*T5r(k8H#I7r#lS0ZV5<0$LyeU~v|{t^O9v)rqfppuCV%&E zGh`IH1cn$H?3?kg{cRzJ=SvhTK8hRCbuiC-4;$n8U8W@5Fm0;U8bR(12-DilW!aL# z9Zo}5v$b#vrPG^?{!+iF*|2vr#wcya4|EpQ^D8z#v&F|+^$-wf7ZkA=UCf!KR(~=3 z|A(WKK5tkE3!~J{+^^c5<8lnscS(k8^bO(ZxzjfI- z0|3VlurBT|Xwgxe?F!8lN4oVb2){6wmA&kQ0e}oF@#1@J?E1>VIF)ZT3;s99Lh4|? zbbCE=35=vP_%bBWJK_-q`l)qSm$*^ys7MZ1L2Wj*G;@efe(;C?&KmHSYE3W#UdtHP z9JJ7yw=nFymNU2gRyno%QY(thd1a~v>(Z{WU?%!Z>OvlNR=hb~DV$wX4XCRPfqJ_E z^jhJztJNL3bc{|bBry__p2sV@n(Z|U-6!eg)wfNLS|jAVTPw7%X7~Ch2i1))Jr|o= z`qf53wvngX(B@~icaHQbiyxU7Acb~dZ=cvNDa@I_)Z~a1ZtGrfF7{lj&^tfvXmg(v zIAmjSOBUe?5E4F3nV1)bg$s`p1O1U~3d&;^8ghTy5gm|dl@)^Chgc3v^l5u8qH233 z8|G=(gwWby^0sy2HaRz27kHx_-5G`fi^i0b6$S2X`56XOktl*VOY+c=AOplPZCB}_@z)#7J1x243N% zyS32k=5Fwyxfk_+)j`%OSFJHs2TKBO7A3wz6qIsF9W^sI)wU+LA-eMf4+u)pYb@*D;{L$boHR*ld?Qi7F)M%8LX6HXxs&Y53^Lgs$h~`ey;FM_* zEIH}UsgtI%Cy&6m&+gB!~8JyL_3T?~jq8_TiG4*S`?2D#H z?j|a^QXbbDn6!^#D2lRyrUcbXOtdelb2G&7e8KO=( zC=4|_Pk%p@zMWln(c|Y%dgBy!up!t^Yol=JDcJSW~$oV z4O3^j6b0Wb^N#<>}_-zw=PB5S-J|Li_PBW-k6gs#ksx%oMb+g;CPucyfJZyDD zOn={2SNG|gnXlp<1i;<1+K&*g7Bb1b(}eMIj6cfhJiRu9)A6MtqAKYh;Pa?*C6+TZ zMOkFlqIZYf$}H>O%-tqKJ@FXA;~UY}fO`W1v;iTdo*_Cvb_a@bc7bKTm~cJ%b!OfduZ zwri&Cn;3BeN*sK6@RP`1eeaK`llbsPBrgBZCMqE8NMp%$Ilrg=?3Fi$StD}#;Q-&; zEg>dgM)%p@&+E6K>=UnIc&BGf(c@#?Q~uY0?_as`Vi+FnzzpKz;(W7RM~mZWQJ7UV ztK=-YhuVuGxY1;CV&*Y6^M|y@Te~(6^6xXg?Z!gdS4qb!AVuAFTDtFLUgmHpMBdww z27tGy{%@qJL-J9U2*w5KKyt^v#V)lvc8+;BDnMqIQ(+DppqyM+Z*b)0L@!wnb(Ko1 zcmHJ#AI8<2?=nfm=5(Rl^2|pF#6F`Zl-|DUPDv2zS3&c>Dxe#3ol5Cee{=V{I(x>u zuYW~=mvUJ)*3<79%BztHs8@cOSsB8o{av3N9ufd-JP{~tT$@lY7{60W&YV11_Wd<7 z9Xfqv9qh33j>FLg$NCiSH1fF$)T_(3`gQUdU9a%4Ku{Ga>nbXi^s5%6LQp2(J9`|n zG;?*eMHqDMwpmJm%d(yV3!p#0J$AL)A)5gB-Zayu1+LcTcp>97-;)O>%>R3PoS0wZ zfu@@DSh%XFa1erjp;gA_4PC&_jSpUk3quF_rxgEOd5WHlD;Oizwb`>8&?<0#Za-C_ z1hef|kF{xAD}MVD`KGY>1nQr3f_v0^;S;xWTy*U&YbK75Y^?2IRGyo9I zj>Q-MfA5fGOGGF7tj5<$EMZ^W?K&nYFpA3=P|M3yXJ5Cr$GL&oFLQl6Rvn`CiPDR> zi&N$Llxqw<4vATvyw-&evPXt2mIO8Z{8H^uI_!TWi2-&>JHyDoZo#JQF|MU6ZZ`lK zv$<;&0Ps>zKHTRl>TP&MeiTDNjn(v1m6TGgU$3j=CIfKe%;<3s3`V9rDHAe}L%q65 zm^WGhJ$ejoHUPk-|=GBbXft#7H`7&hqmfc7c7RoI;KR-70`Q_^b_}AcG)xB=N z)ZMq+5tDcEQLA}-92w}7!B*e1jlhW`(k0+&^!cS5dMx`~R(jCh_;{_=Vr9n@xpPvx zPIC>&)OvIGM=FxpMZ5*3g{i?(vBGx*fXr`kS8EH?oLaVsbf|98biM^dsNH-!3*S@E z?m6uEXRzLjq}PQ|J_nLQaUN=>?6>i)C ze3>$oIWx)+^O!{Av!mbE8wfi7QF)6q=OBytEFs6q97jLBb+_A9Z_zCUR?%u35ne~+y*nZXnEV&}dyB;6 zxdt&eZUlN3;5yFLOK1X8Kgj4s)bi==#uYk=t11iF^vfZ=_BHNqp)@BXm?E$T%oLlq zBBJx>*Y*!55p%~v9bH`N>EX+-S4NTr_-nL)0-nvYEn69 zR3(?Nux`{tweTu;o4CYzP2{-XUWev|#z(L6+cyGkht6IQ12XG+SvjQN#9bTDHkfn` z(pv;qYTVVNCWmfeS-9J=`0|G@Mxw&+#si z{tRbVRlhcOSKlYg3hcIKQjCb8%LWE(Jzac{SF&5rCg$6tEKn7z!v%Q{RO00(cuhY1 z`fmB>D=5P4zU|al3!9^zh1I@vd6NESHMC}K-oSLB#(wSsbSU!?_HgH)k3NkFk^zb& zjeOAwy5H*>rTXIr{9GTmw{vA8yNpOcXn2QfCp}e6f5A(xqy}3&*Bf-0K9)dEl&5nK ze71LS#bxN<=lN7LPskWF5>PeI%lw9Nwd-je46B_p14w9DP!&VqjIn2j)sv`x%UXH4 z*uOaP(LBp$btOAR$IY#xq@+aP&4(I!)SbYdrI>a9sWW#q?U&m>-XIM)N&cxGApq|G zJH0fNun9{|HLr7hBpbu2o9nd?@h&hv>D?{-^#5%@K0%0beY38H<3YJ2vM|iVV{fyA zmpxR$7pF=VFg}nmh~;-Wpbgf`eUJD0G}R>UeAEiMCoOHdyVRrW>|A2v@%JkBgR|(D z%{#x%8KEj`v5E)YqKbC_MPNXnTzt*HuxeLGtn?HN#^eC zKR-JWv>WBCWIelAKb^NNo1#0ZsT%n5h*nobCw%S!%@0;f6w{ST;ni8d$0M%#=}otPpR80bC2OWg`R?^A@!D+7 z{4%KZDAp-3$XrV1@=QSAzWtBuX~P_%(U_S=^xtq}5y86{syNO3Jw=k_>GyG;Ye?vt5HJk?ET8jEC`crok&KN$*? z`>R7r74{b!DXjA0KioH`yml1g!rO@FZQ7Q32i=!N?Eg$~r1Ttla{~Tp*HIbya_?2^3K|$>viQUlS19V!2njn%AtF#_=66L+aE&GYvp>0&C?BYB9f+YS%s&&UmT)Tx z+79*xkkaoh#F@4rFLsOSGRf=X^VCNz*~x%ga_qPNB#2jTzo~QqfLqkKUDb#4M-2jb zEg8-!!t#&~OvwDcmzf4L?(u0myT5h5xueTRIC**1)Fk(#4JzztE9(dYLWfavRxgRf zn+ashW;VV4i^L-DofqiBNdq+_nsU#h^+RG(OxsA`;GPBH?u@p$bMfUDYlOtDj{@+_ zj10E@l>vF|Cx9~)qFncfbXzkwf4-U%bm)k<5oqs3jODO0+}QRT;N2B{eP)<({W+;p zhApUObp9tgQKx`YDM#g1TsV6t!PMEmSKTQH`MYXXWIt8wNpkBRQ?5=yE*Ybk_0D{I zynsz)ydZ>XH~EYxtpDLO_vevLhktRrDiEh>mbNwz^b?LUHl6*4Y67 zO;sO7{|^N8fQQ&~`koKwkG;Px2$7E8QCYocw=qE!_p!a4k@6}vxogkPKZ;uDOu?C50jHx<%BOgN%uGg@k7#pO2 z`@^6dH#7Z;{K*06+7zn5vP+z?;AEQBG8twc8X0vOK8y>+GZBZiO#Afb3dJMF zo(nOEqs0XC&;Prx&>VbcC`*NYtfByWK%UcFOZZT7jdhGpmSdd7&W{^_**!Z{VbV*D zYUjoW*(p=?DO_Cyz)hUZHdnmdm-bxsr&q*p)w zGAo+qetP%jI}sAaD5^&W*2DSw zt7D~BgXJ$m&lDry($v!Z4^SlzoD_X>2LLK@4s$bR>mLs3&XrvLMIPof* z@Gt7@`R>wZb)#!9!WnXGhjK-k|Cgy~YnyB0v;7=Yifa%qV>e6G(CbM?fwPePONpJEt0Z-Sffa*{Cc5{8N>P|)nc>?f(Rk>IY zkbA*ZU%x}tzV@>GLduZl_!rxeZ#VAa@xaW;=n|>w>J-vjOT5{>Z6X!LM1PmOW`WS( zoJ2NPhhr#nBjJXTEy<<4stO7@lBY|?v!~%hiKW)%ZfME71M`n@HOU73tdZ(9U72Ig z>?*&r&>Yz`3kZ{wRWw^3{w+H^-v8(eCJm|}L09Ow7Z>()bklyy2_ zGD-3=9lycWSIn)s8=F7uo#KOj&2adC-w1HK9sOpq#(hMI7kRsHJcoY?@>_V9HW++- zm9s6#7J1CD2Hx;S$}}_XzEy-s($>BmdbHH^!DU0wH4lkc6vbyC+4n#nx;Z78R-U39 zpK6R<=4xu(r&K&}f#sbZdK_t$}sxblg0}j}MK+{Ony8X(CHn6o;;;j%*z47>>i()neO6@O6mWXH zk>pH4gx1afZMosPUGB1^s_S)izZ|e1DG#p-u$M1xX1Jxg&=CXed{ik{b}*@yW!pMM zxc-$Kc=f+ezj<6qf0#&~kWkZsv*^9zU=sw{;gYsWjY7RqPpU>BDUy_`_)e?nNcFQdogR*IsUos7_FI({G0qbS#;F zKzMbiO3UOzVm3Epjq15Tjbg7qN45i zwB5g?OMuiK6_Z(>Q1RU8BdaDwQ0yba)nKkdxV8Os>Rfa}9B=1r9~E#r@KJk#lUe7E zaCh6XbGj0m0Jt8Shy8*BjbZWZxcET-alJ{VqLuub-`Uxfu!KEH;Sac_Z#JU|aN8Iw zI#PlKTc7QZm=^6Ubc!e52>Nc;9K70lZzqn3c~w+TG|**?gA5367f>1cq0hSEOZc9| z*zd?nc6N#e_`K!eg$1%}#8ipQkv&;#oS0t^E!NFX<ibo%7MBIAPL&ot0k{LHHm$=V@yVHw{JA^ zcZCuXZI_uXeeqL#k^l$_&d$O{dQ zK1Vws_Ho3;#!?L(W9|0e9G9{CpxW7Q-&i|!=^0K*5DNEpL`mf%yIKIEG(A8k)@xxDX982n!tg zx$$vNW9r8yxSNMp5Dhu7FOVCsHE{NTGoF@H92*~>3#vJGUf$sp)z`0MBu=bH_#Z2# zAV$|ypvJ#gaPSL5tB`Fsba0z4o^t4H=LC>Hus2$fMs)g+BoC~5<~)EBexeFR4N&Q9 zzeb#3(RQuDccMpLEC`=&R?@;N?J%$u0^c?~DYc1kn5QxT$m2ulxnWYKV-f6pKZIQe zCcL3Zc;vT%Rsr0QGJS{tmJ;w~RaHBdXIM zI%9JQUCJ7G@6Dd&`KJu=vY7iO10oW0Iw)>kmKY1+|C-a*Er5@rdH1J&hDDEZ>XfCM zRe$t)+O)5J>=#R7P4Lg(azm+o4HhY4yEt0;!&`>i$1Z>UWlhsbO1#Pi^Ka?u_ZUS@ z`3e&P*{g-ni`Hj(hO%WMm#D#9*$l4Ge}{^-II67&88<|RE&Jlf(aw)ZA*c$x>Yq7g zF0DUSS(IHXv{?CM#ldG~^pN@d?Az6rpsaxsQTp{|>%RB+H%0VG!U}y4x7Efe&n_rV zY+A{$R^=XDN$NPuYV%rwzHl`Ps>5Z)Cc$}w<6I#hNGPZZPzEQ&Q)||ZJm|EQ*!{@_ z>WpEUl8`pH?>8uf)H`b&!jiAyTp{$TPi$st^4)NS{8TlaR6hSqkF8k{HtnG7j`=xg zXA;SCec9+05p~Q`yo&Z}e+Df)s7)K8&);-;ajG`HKI2;CcNM8p5Ktt%mW}Qe*O51V zl<`0-L&?Bn!$hU2;Ow73qJHQkLEyhDOT-JwCa^jxLB~y9ab|%m`~U&`pkvbrAwURk z8h1BU+f9i4S;kiO;^OU|Od;*5c|C2vhr7+iLHUA^<(B?V|Yxkj?iSr~R_8-wxS=sRtFIrqQBq=>qnXTs=!@ zXrPZo@NW`pf*d*<{r-7@T>N9T@rkd(8cCq*5CGkD(=}Rkrp1nR8-BI9V2Hblk%9wZ z`%XxxE#|%fcS1zf^hX={^1too^k{>&ct~@xu(WUe#r6Tr;vcw2ElWBKz29toP86+t zBhZY8=tBk{$U*P3aPY}uxZEvR8^1vlo8$ZYna@J)RJCV|;(tr9(nVm-Q;%sr1<{0Z zy3Vywz-g|uFHI0vFVuP6hBCNV2T7+Dg={g&W2*DU9qXqrUmj8zzWK7O>IT5fnHdav za>6?fZAwuN%_2#|@C%{T*epe@I22k6TBws^oT|1(kScEKOEC?g3H@ zzr%5se(|vG;%N3xlJlRJ-=D&dBW5Tpt!eS+A-PYBs?Z%bwvnsoZ zy?k^xv|i?5o=Lyn=Y9jucwe@T=u@ePSBnkn44%XRNIwGH-ROGif^MiBX zybfd6uynhzXRs`d!K)GdPO-|w6^EGSxyv+e=&DpM5iqPIXfyHUZM3EJbdAFZzXwJ< zdp{EKT#4>I-nYvNixTyA!l!QiNe=X4t0tEfNRQ9Lz#}|oT*9PUY)_Y&Bw+4TrDe%w zm=A_f7kd6LX#c9zpeE9IcI&Uu&tP6=vpG@TuD|yUpJZ)sK+==5wQe;N{W_y`)9=PV zYE3GC0T$|P4~AH{2DBHeG#2NkYhrA779>a@Ze)PCQMKECYX6Wx{hag3uHf=^2E-2^c=yeEq7V;xEro_q<&t(aZFfk3Bk}1P zi-w^|rYBet_*{Nj)lGo;0mLWrg${-3Ljl2>62**W9V&xIT__IWNsKCVX{D6jNRb*- zEz_fDqSjxcn8sZ2Z{<%rTv^#0x5=ev8$d`?8~>Q6QqUjYSwRE80la%5HT1S6Vcp{{ zAaxDO!m|^m8js@+hUE70mWnnS_g6wrj!N`sG9Z~ERM1HuUCNom(j&+2;7)d0Eq(mlCdwkJ=V&gXjmwwf+Nvo_a52CMY zCuk(@i-z9PfJ(ZM8j0lDJ5BLZESh4}2NWVVuHjKM=u{ZlgL?3;*cM1MY|9>-l%w*+ zU%9449val3Cv{Mx9S*0UR80?sFq(~ls5Nx$M$z{VH#;BQkx!GQ^Oh##(B|A-l|0?# zb(m`uF3$8nJ&3YxvPjweGei%ZMLSH3AlroO(S|s?=bf;-_^TV%BfZh%9J@O6kdw+++2=m?j-AgPKS+_Jqg$c4H&5^!}Pl1DJ{O zmmr>1Z2c6(!xdCzGUkxuU3oezSefkN#txjtNIlD&M%I0Ul+FVcVbE&bfmn1_v(Bzc zVzG>oo$HD~(@_T{*;IidZ#|wUBzjLlH=1XX<@3FWGew|>@)aWQedep*?dFK^f3UGO zn9H>7d9q%J#-$@(P0VEEjeZqwl4UkYv|{m#L&wRFhr`oPxRmwv{Eo@l|oRWBaU)QKxr#V=)Tj8#~FUxaE0k2vOMj zh~yZ^FjJ0yet$2&^+A1WqXY?tLUs7rF~~d0^suA1ZRamOtG3y(*!;X1pO~g|cTkEA zNYDZ^!8~>{x9`G4&Gg2ce$NaoUi952)7cXyTwrvHR?3M8kxB`>-~&UcIA6 zydLh#o&T6s_#?D${w{a$TQ|i79zJ?sUm7kDgWoXv>T$ zJ2NXQeBf71c=gKnoAjDnQ`Ke(e_8wu(lNH11Np#rc4M@}Xp*JB;%f}}7KP7oamB`x z0T(5R8RctW5&Go5IqHXc`|C_Xl%!6cMr!$n_Ww2@h`zV$!231 z?ppO`a>^882XY7)D4b8&*g~ym8!*ir)0VQ2fY)xmhswjjckn+ru7ZykT_KkXL3}3U+=%c3f$d>22BOe}OLdb@KFH!w2Lu->~IzJQY{z>ViS~@xEC5b=*>mq|=)A zuNUmeqz(uMvt~_~IPMcw$OB~c3gFnZ8r$Xi1(TDY7oRDaU+Ce=9m!TXf+W^YApBk) zZtN`20i0W|NX)AJX|ykPWBDCpf`vPJY?P~Z=HB2GiNulAq!&yK!9iOP2da(qOgRDn;X1= zYyH==c}{Slnp7xfj^1Nhy~#r`HKBP=SrR zKO?>OJrEC{QNI?sW00)CL|Chqy(bdiKmM-DpM6^59J9nx7A%i zZvVZ`0)p+eV)gvKPCAwRNNaTDvAu{Wv|m23N3r!I!^@baqXKeHy=4y?=1-uLy{am+ z8pvcl>oss{B2#ma1L5t#Uj$6XB!EQ*SJ$qt2;A19mT@F$WEQakaul?^Ebw}j!!(B| zdbD9H-WuKesX?=n0+BRsCVW+>OI0!WZP7EzAq{*Po5Ak=P;?}%sJ2##w)?Y(_g;Yt z5?9yS6nVPSNsk@XF*%>dtS&8C&wOCgt)$u`O4CAUneyHH0rX%Ff69q%M~vF& ztso)od$Dk>lB85YC%VBYTy+CptA0&m<2fhTImke^W2Gq|`jI)8y$GHMe*X)~?0=rZ zZVI`rO|PKGV~`8_IC{Hcxoc^{dn*F!=YDX%Hhuxi?UBZO$u6^#YLB z#@P~w+IXsfL+Z8nf%x!`p=B)>C!u2&mX^gzX#yvES(l|?l5GcSUJ#j*yAwGIdwUhF zUS0#DyZUDNNa&5>Thph!i6p=~nrSaw@+uUwy17TRxYj83#G?W<3HNjh!I4C~Z!l?lwSABF6vlWdYoDV8mxeYLbW5_LYEJ^=oN za1Lvp)NsxoA*~GMRW*iQCFbpf7=qf5EmwYQ6nB1op-iJNagVEHfgSGC#&6lHmpA7W z9SctJCujlUk8C4dOhK>Vnsr|YozKrfpVu_=?{MD$1ONK>$6I~}-JbUAHF+PN9^3@! z6@ozKucS;w%d6aW?8mSKG1BprBdoo}wg?-^OXsV_rh>QDCLU!n0RpXq^^#|QK6FIW zqOFy18*J)?M>h8oHG&8e>pl~KHi1IvBXdT;#n?@|Q6KhVDwTbzywdZ@3eILrs6=L| zmspZzuW`}&%?euNImn#scmEPHrT}$ft-6IC*{Y%UGV4tU{e8XEal@4xYV_{dr4L-AfBYr+3qV3`sNsXN4foH|We(vq<{WKyoX`z=S z;<32~W^_{5?G1`ekY%?sl$Muib)Un;4rB3vezQ|G0JqU(js`AYY;Mf9EMv&R9Mdme z@?MNiqh{-k7_RZx?{Em<5uLets@?1JfSkPeWe71($)#*&z=gL`%JWxXzM|#fQIpDN zbMKHS)KFeNT;k|Aey!PA&Z3eI8%r0rqXO{l7A2^9wD^x3j-4GhlN0K7MULFlsX_b+ z?1V1LX4C7J@ZFmb_w_+Fcw$`1m=HU2vL+jU2K~gnF(%7Dlz}n5EU#23kDeF5YyF7Q-k8 zv>-qOg&6=d8)Y}E%e#GcVQEK5s4Z=1^&`(oD_`CFe6K)2{=k=vFeKwPbkD-AZVc9T zzs-hcYp0b&(i)V0J_nOlF{gD>kKXl~)N(4|Y5;=y3VdWl6Iap#q)#`o)qif>cAF2P zAYZSHAm=*iv!dp=%^O1)pg}a)7|adn7g{9JBM;e-UIaz2YJrr5`yh_bQ%L z?1UrUv;I4Hz^$w8`7}K$g_%>hyPgEex4M4NQ}S5m5YzXKzVxip zX?N)&41S@!j1R3GEtYdSpqf-9pBg{hGX@jH^yZ_5+t|&?J^%3v247q?DY&o(jH z(!9b#4smg#IGFIt-sccHb+9)h&HldLP35yeNKScqd39*9he6JEU*h6RjD{*Kf-U==FHBGXAOAY)x`x|wQPiM2<>FYSm6{Gq zHTK<>$Z&BXyYbJ}>3%6^Z%zfhn-!z*)0V_?huUf9?y4Gx>F#GlO&2HVVxxu{5Gp}{ z8Ccb@wJo0Bk*@*`t+&`2<0%@PiE&B37`rev?!^ye-1QcxlJKY?Xgbt0JoRb9CZ^96 z*(&!7-dvcX1XNd29C3PDO3sp8ez0Y>$>-d1cKBS&VbWbn4wgmq;Sf6Uoy|tnsv3tX z6h9@|$WkFTr+i9WppsQ)GhDD_~4 z-b<+-&N?G*`|q_qqX8a~Lo(@`vwgFaymQ{rSccw-Y%Sh~xL2vYqatp8zTJ8l%rIRu z8gSHkViog#s>&jHI`-aFg+FuVIsZ>@?K2-@`dFF zK3;~WW4w@O|JZurrSISAY1jTo|3bts<6H%g4>xBENhH$BK>A_Jk_`I{an=!jfz3~} zf;#H+;{M!`)5OEF>)z}6$s?a%-o(`$a+LkflYY3mvg@rlSu-sMWV@AfYGgRtQ?HgC z>$fyJ1qlh>0GI(Nb$Mrahq}43b3OHbb0)&-Qe6J~m#d;|ftPPngDZ$vHg+VK_IGqD zXQDrteG>PrDQiBl9WB+E* z_`qN^krP(t%Ids|UM;dzTlTPYSwXJAAXAbYUW1LC1vHv_NMO zq8Yo5FKfG!9^}~h_jNk2-+0y5gD=3SbrF@HahUn4_VD4uGRv=+u>*ECHaC8JAi**y z=iB6M00^bJnzOR=(-ukt)sN>Ir5gi!jWVK#+sU1a+vU)r<8OhMfs%Q%!nIsE zR0Hh|HQfsHu;4q?D;|+w_l{@$Cmp^G9~NhS%vHyGr2kpXntaUdIqsxOkOY>Tz~sChxUC4Q!)VN*C7VI~`0M*s-LT)pyF&Z|T^ zMJ0#FPduD_Jz*XnJ+992WhXqp^O7zmUzIIS{H7~*KhP8V18v6VHzP<$@bY=zB1yYm zQvMT3&hht~tgyu$#Rb@jo>>mu2bKgzZEj?(<$%;11;ye*EBo;~hvG<;;O1-*{$lpKOP`=) z11wo@DM8Sf@_Z(%pyTu_mz(%})+!01JRVqhnQz{rbHGF35f|@wve9sZO32hujFBmN z8+?z2g~j@yEWNv^yyE%MVoVvf%s98;!dXLfnWoE~3!Gq<0*3q}GgtkeC zI35so7DGn?rQ4qod5b53KtF7Sx+p?g_6{#3u6LeoZU%8w!G^UnhV$OP_>3ZM^A<}g z7%(be^=ToCnaF=p$xF@FwR${@XFIR@8jBts%?PMdd;&w-%LOxRiGM1QaEgvIx5b`B z9^I69|J10y2=}peqC9t6&D3(~XKdL%Kc|9!)=`7Y9--knDA;4Uo@7!xT3QXLdmQ}u zapZm-V7>-Lrz-Q_D#|ZgJ)c9OtAB3qliM>({{s+(0z7r7vex74 zJ_&Nqesb4KtiH%XLyJAxtv6(+#({&9sOw_%t0C4lk>7dZw#d7+qUNkxP2WTMAv-38 zvCD$XqfSY;wO_Ef*Bq4sYy+qDx2TTz!_=l zv!!y*8~^-GE0M~I>>QflZr1SW@Zi!{CNblY-aY&d z2ltdd?Y=BWrw8<>$d~wr|CrA*IgnG{U2mXIE;;;iYpUF=kV((MfNHQd;-Al7l1jTq zema|osMoTITsD2^x7Sta>lacQEz#1RxVTJC-TZ=h!?xe^#rW+b%1Rn=*6*Ix!Gc^Q zO)BOBWN&(l)Abpg#Cw=8ufbs$c^8ak%|l+xwT%+w*iu)#R-rQOxQkF1=CHGARWEA8 zMi(QSkQ$eFmwOOT3a+jC?(4_5LO|f~l~^$?(ZR&lrKHdsKiZ1%MkY7754k%GED{Lf z2KjYYw_um>+EfjPf(i*`exHG7(U)g;)ey?HhKTLMxx61zQU%L{OAq%(a#5nu7XF`m zj81yUD1M@MURRJ21%ZmP+8p~>=^WO)OZXyncUG+PBx5eSIhC&zaogZrE;)T#*EohH z#)AC;Ve&+B3TID(IU6OR-N8cE{_6q3ndrRzeK&sliCT+z9B;JZ7Lgucp;E%nE!%8^X})9lDu=Jya{^YN{^G*WYR~!Azd5J(S;eE zQWC|^uMRU#k!}Yl!H_W<;}Uwl2ecz!yIlDfJ`B*lKCTQAy?8}3=bb@Fh~FYCF!YQ_ zui!~cr+WXOA40F?I2k2K^m0;m7u!93zuIo~>8$fGfBo)RB}}*<>-D1sLB*=2p+8GS z81jNp#;Xx-fRk7QB~F&yT3Pdp)xL*x;}s)CYA=~&B7aqn8>96JSljF#ZEsUguWtw- zM$RG$y1Cidv;ww)Y`(BVsq_I)8An^&^8h+AAqzu8Dp2@bgHSb8ce$H{PQvTm>pI=T zb+2JEpW;14H^)1SF0`{Zj6t{ysq%PW$n}zr%pFmj$gzfyFmlswt)I$pmqIw9$^E~X zcz^E6sNbp|>+X@fs2&Q4f$>LUUw`|k*;|+M#o^NB%+6*R>r=(SAsd6GE7Ow9Z%AF{ z4mgeoCZR6_Wky3P(>-yY{a+wc-m5%8aNEMC%j#5PU1RL>+|CyL6m$*9`4;nSgB<8!05+N~s;T`Ug$r8d2q==#!Y zXJX$9$@%OB-E_BXt*@|Ht6aZL3qLlwJUKPN*4jPQy0{fQ&)iiLK*tDX0WFuTUXdoE z-zD??DV5`{z0bDQ=0L7C%;fV@H~8AGuVw|5;nI;3WU~vuEU3Ev-kNI?wZvZfEC+2& zBXXaL9da&HJYc6JP%8W|vkWJdbZ>=xRjtQ?{ zD_eOIUWe?C9`4f$ZkGt1Zo8=phmQFiFq{(?1V?HOWY4kPw`Zf7k6>2PkCe_Y77!l*Bmf4^goWTW69xd?X#DfmV7*i9h8PfyS3l>BL_R_hiG z5LodZ5_>{#&@qYZ`F%;n9aTL#yeoFYNg2WgTf}>@ydkhLv?G$Ju*?2{Sk{Q7#bUY!#F_eqAC^>8Xa?-| z2?@V)$)X3R!&H?Vg^^oqrth}!GA^l?rT5>D1ylVc>pn{*Ds-+oq&=+8X?}jblyE5BZ_2-Pj#3>`nob+-hop;2_}(*dE3%jBxOdc=z_l! ziWPR6i)<4;K#8MYm%FVG31LSxa+{jOEA2E_2eRmKrR)6GUeXtD%ydmVlA**da?2N! z?&l!aG{LFL7I}cHotf%Vhp{8gwLNjO?AlXUJRF~U$Q9(rH4a)qiBT#3(`E0%(V~*Z zO|cT);_o%7n?JMEWVE%EakMe1;AClj6_zOZ3sp~umu zwM@DB?(fb_#~4Cr?oB(?8gUxbaE>8OO!oZ2KRmt%n-hL}eTy6>$MRZMn8wALos4)1 zQiDcE`~3s55)lxitsOHNuK}w3J;^4Qh(3Xa-9N{BB-8mxzK+eOpM~CF1f}g17}&{l zaN8u2M!t4->CSVQlIM08|E!v}FiDss>XDL)I<`2R9kLR~M2|H-Hzep@md~@H$~sEM zH*4KlT<9r0XGXs!CFQXjE5#KZq48ddb^i1HrtD|c(A;Q|;$qR2-jw^e3KoiK0$lx~ zn8xD%Xs>zmuNu*!A~im|1XqR6J&`Cb-m%x6VupPdo#`%QE;)xuiOP%42 zgoB44HqzDXI6P5L)+g4XX7aIisV*t%Y(;SK<4fYOxw*Ls%QT0JbvtdOF>M3|cXUln z4OCfzM0{WM0b$VVIA(G#{RejAWeB;kj2hJA=DqfS2qgg;vi!Il)^Bg(2UZt4MCl_J zTiiV|1Y@E@d03@;g)93b@T{e9H#BA!TlOn1Ae{UyvobSNJSEQeCz%*+_m&UDNhBYV zNC>cc%)<88*0)N!a3AE#^KGcaKkOHGxL&M+W2p=q6nwqMA1}|6qs!TT(cnbgaqU@LS|5le+-`wzx~hfstQd^GOvt$6Q*2P@+#fe9r|2OK=%wBcWVeTlc8lBA4ui z&Y0Roe^D&cLbBDFj(ug_^O-jZ1FNQ86{9;L=GqmXPJyU|Yj=!k$NjzBj{~D_AUXWfjSFR4x`|?8M~Tg& z%+mTy3W)??lSSlH3i^2YtuSGtdtfKkUejr)PwsgPiwTR-;U5lWl^%j-DiT(?g9j9) zjFtp=Ihde!17g@bWky!6*JjQ&d;Qt5S_o`H-DL`{mN{lA!)-b}l0_i6cGi&>&oaFo zZNMR|jKoWjVf^-#2X!Wr%`F!K5Q#by`G}kz)2ikyyPMleOFtg z*{vu0Ig33}{((Y%Q9^k6-V&E-%(xSE2<`3)r9>kyou9e(*bYGyk~Tn*#^*QP;P7yo z`vc(QZwBH(nQUAgu@05aFI`dk9vy=}|dxp+$aVg{0k&Ly$$VtOw(n;3+ zDK5>GmTr4!a_-l7h-i?}WS`IN{iVsCn2SL0cSO?Aonq?DA|FmZ^lw+^^x8fy&9$(c zAy`}#WrcAPOL{q32n8}M1-+Q5ByqaMI&jS#&8M>C_1i+b;-{U5 z$`>&cUS|`fW}~yw(+;Hc5@*>*tuZZ>PP31uq7$9l#E*G@4*>pn#xaLwX3hyM@D^IqvRu6D zA78{y+uv(fe}ZPy)PrkAk0rfmyreyC_b_n0Cpe>r(eT*77&*=IO1SR(F1D$_*bY!; zA=Xcpay30}Jo~1q0hhMtgBglFw=?E6)EtOcHps1X?RneObo*)1qB=mODS-xKfv?%f z^ZYwSS(C@KKa)spsIJDIzs9!zvu1x~E5G?gOw^rDq8Rlqia(&4Zi7A4va5gI_XSIy z)@E~w9h~q1v0z%Km+yA9277WYqWUgIPPH5sOuR!5KOubi?AvVoW$`tA4@)ZWDbdIPS$#6XV#)3w%_Cryq8h zKJ<fGj1g<6af3ZYt)jnBzt~}g?w`$t(%d@}AtV95ej8w1motgx zAd!@bp=*1dRM`v(J&B5CN?T}j*{L!>70;mohIT~LLA~qT<|Hajmy_HhLYJPjn!Bhk zz~?QeA53ereo=s@wCS#+$_*fJC5c(CRCky*px!}W?7TOp9wWK?rMB}{Cmo%cjED3f z_ZDBGOWi#+c-vT-Qm7~07ptn8>h{vQ+c~wh+-Pg$)Z3ew`zDeXxy}=R`@S+zcpBcW ze)RRuFI^B6jA~HNpEfQOh&a)alG-(^t>%OAjk~p7m15jDObOd+~iz z1Ryekg@fJD5#6ftaR2;k=R3chWAU`_pe%jB^X`20&GJ2u`Dq^<^@jwUOsj385$Jv< z5?^1x8kqXcb-$GF`UFP$`HnzYOWT znd@xMj$$!Naa@c!xsJXA&1)M+%NFCZ3JRYcMe%^Z?@VE#FBs_24Pztjn2Xcxhgs)8 z8xrQdV%6biFL)ehehST_VnUtHR|lO34_heP7k;1ky8I}Vq(TYre6n`3i)uFIK??yR zf9I=*#b(g%90+1kK@9X#GE4!ps9Ev-U#!T8iT!V(Q5Tdvp1Ms3#{NC$&Jzr$?Q8uW zBsn0@c2!kZw+RLurQ!9xj$6{oh{V2sNfLc|*7laClpn+U7k7zanY5>hQ{=jRjPtb9 z!%D|v3+cEv#9@D^tfd&;u^+=2HX$dMf9nvwTE`dVOP6_HV>?SC+n&M?qfoxuZy@XB zWRh@BOJEHm`PUWNKn4W{VjwV5%w3^vWtgdB`+j-pQ);ULUy;Vws2zDn&8iG&?|D7Rlu6*`f;tVJRHPYn*E2K<&Xy5H>^ z)TxKLz#o9{#mIOtYf&=cRO`SP$1J}bYDjhdv@PG}9!A32qEy$}n%U+a*xAlivHK|D zRnOP~mh!R&!TS2KmLVz~(_jzR-^7YI_b#&j~j5!vRXP`6Pv;{b;cDVqL4y z-@kprq0qt9+)8$zyoaaJRocGcxMOD)c_ynkXM2BK@fz^b7_{z=@a=`9C&TC}XLHaM zXV*!cO)|)@{rU!l>`Jtd>HNsF<7sjynAtSGY#XZIYC0RJ?f5drr!2f(w5E@`pBQe6 z!_G(W5P22 zPEH|G1(c1y`JcRMdyF7ev7)^mNB~Ktm8T7yVWAT|rt!XvyS_3b7`&k%of(=f2gJ^L zE^!od*sm~rAn-GnTpU|6Q}k(vifK1KFU|-gUK;6Mn=New=bWwQT41{7V$=4RIx5nA zRvgLy9FQ7DuYG#3wKctz%%u^E|K>LYjo(bRnhrJSQ=iS})nmjD<0LyV$Sf)ZS5Q3$ z476farFhW4`x`b3V9E0yD2G)L1?T9gqiE^!b<~1SOTZ++@F0yJkXnSVMp1Eb-KW@Y z+?%W&abN)MDB;lMZ?o=%Pqp@wSuLdHa?({}?uT%zb9TCnN+B>4GCBmOb2PRPMSI!3L`n%i-Pr?#q3~jgf#c*_b3NDf z?9Wd?=6hz%EC;$YiN82wfMr`~J?64|eYJg~8qJuzC-4veg5wNRWm1`)f7A{{oEAvV zA;;~(lxrs_rb$qMQhbv?gXkvk>!&^tgG!X`lA<|2IV3y@Y-$wc=jOA=H9?l18VxCv&#fzck^}M0QSLd~zFK9861%{a2OUb+1ds|4ASLZDhTjD;GCIr53|&aA6GB z4C5VWdG2!(9y_w!Lk9rAtGn}U_7jGVM!FcoO-^CG8Ha8yCZ1>IvK=trZ5r_=e}2WAz_WeqLE?g4*5x}RkBk6Vg_03D z-meF^VL(A$*1IY`AUYM=glFQt4eBj%gk-4dRS!dmPJj?GNp2Pac z)9h*Jy*kr!Y6UA}s6cF!cglc;y%{OO_n3yA@}h^E@w4Z>>u&x@xVeeY^I0 z7Iy70q3ZDT@9hY%vy$-za-6!2Vg&j+%ZUWKV0m3U_&|l7QArSX@d;SdHMn>0+CG{SFA;zuEo@{n0?zu6ZHS17_SX`io zu?S3iX(q_$Q?cR$)||`oEb{K6hT~6~0nHF-&ZZux(Y$-(&Ed2TrUejxy&v zSKEgYjhrv?1=~Ii<}Q4}_XKH%=5(vtbtx{i+4KoJSz#2|jy8x(<--h}GEPqA9chnc z+FsBS00g}x_d%!p7i4Gpwe8GmJ893yrz*l1pN@Vv9E5=hu_9y_uH=H)Jh=q& zWEoAy@G{vOKv6iD>7<{;qdJ_*zJg6oF_|_I;{*6BxY91c|Hd-aiZYpMTAp$@opgAe zOtQol%)P*QoI03+99=5EFVSRI<4j7J2f?dIm$N+<mNRFx)|Q> zETGOkj{b0w%ea|W1gym0UN~|xx$A%kf;t8O=J`lnw29Dbvx~CWx7|{SVPv2ulN_ye5Z>d`oZ$89y5{u68lv>^f ztl6&~FVR6cPZg=fTUn;ecQnw2R-@~gl;Szft{Qj+O2_DfF-lQe={^s!%d;$cug&uF znG`}PEy#QG?7w_ZYvHpUaZK52IjA{Z{CBuNicg72DFIr94eY?Bt6(fxpB#l!US#o8 z5lh`y55))GycS-|zHGKlHfk1P?IEf~oxY>FM7ZYEBG>Yuts;SVa!jTNw0ea=w(}BB z1pvqnJu4PcQiE6a^+F3NJEZxFE;C-M_-gEl3%;xY&xiQJTBR>rIzD_ZE1fj2)vw%E%*-9A01uIR^;GE;->P=9`q*MQhM zAHE&5RS|uUl$13C1}inHmALoc4bb~N6&6SX=Q-~#F1=MCyBxB+dDO!_Y(%`dIS-l! zSH-0)+yLV(M0mWZQVMws<(Y`t*%2ly!<`VIiopf&%dwfduBfIal4cfD(=jzh#s!jq zW~L;h{$j$F!FBk=R{9j0v;OZ1!=?JZg#YaYSZyUm3^NM!ioBk={8RerVj>;Pa4Vy) zs-Xc&a*z$SkE=;tJ)P{RQPgTb`Q$!-j2Xz>UHl|rro8mO!bD!)P31jLlhYIa-+S&b zBvX6HwEe5dyN@MfVqEyA$0^KI_($K>jkerYdSd7MuP&4R@Asce(3$Z}E`l=YEn)SyQQ5EeZJ6qWs;1i;xi`+cu?O_RU+7z3jmWB+kq*22N5l?+4H zfCyCh=-Z=&n9v{Hr5eC5FC2?lGpj_=)a@bkLW(qvk5DLMcQa0ty2tnaD|T@9p`4T_ z0P}x8XT!h_U#)Ie4JTJdlai9wDre9Qa2k`_6ho{2z@JOIh_|hev76Q7BL}7N;8QcG z>wwhDcOz@_N1@>m>gg&2Cb8l zo}0GbK^iBzhk_4Xy16E>%Ly?-1NnV}NtRB3&VAep92)4B(3AvMdF}Wfgig3Qj|np} zmbL7C|GDg{*9pCW@_{y@-tOW9iSm)hm@r_ih(iRvVpf^xnS+ATm(tQ=wbpV=E`(PX zJCKqx$u`?WY}GWZZtVle{A8*^fw+rOTowkvPcvuYk-&mR!8Du;YL@M=mZ%t|oBs z+~dc;)T3#BH;q{LU1aJ;_%bE8liUS+F@Yg`O!ywNJle|Rahe6Oua)$#4jcPyORg!K&|Fg*&U_-2!r}zrFKKf6 z(V-#lmvNClIls3DG_rs?Q;6lN)D~bXg`1m#=RW(<6nX1kLn)KGE*@59MH>`I&~OF4 zczg;)K9C&?HKj?;-Mk;C4tLXSBS2Snzgw`8jPM@d4CljoT-|>T6nEXnb@A6(iHIoc zY(@dKWzz2)4yULZ?f=RO0#f`?Qwd|Gb$i(^-V>#vgCL6^byF`M_|k{@NhEi{u9~uz zhAdgW>&aBXk!R$-IKr0L9c!L+^{)U`1TF-X>@U1eH(gH>N^4A9_eMU62fWP_cItt4 z;D1NxvRlyN0VnP8f?%Jfmb&OK)!WdFrn-+$13a-?u?kIGkhKVqE_E~wmnk_QipvT~6y2e67MbvWgZQ9Xt{HvqF)e_m1b%U6*5d(Ez2|zq zr!spLQ_UX!PRz09)ljv%-(+M8%cV?!R0|F)rP_*>1jTn3N|OS%?W|tqkX#j%0q_fO z5lYJAx{|Cc@jDQeSTTh%;FQfFgZ`Z_(Xq21Pt_e~jk~0B_ngrPS-QJ}%_0%!I@MDtQ$(!R2r6oO~31u#juY!PKuN_7t)kD%c66Nnp`c7>+X+szX-Q}=HE zLexA1Kd@sVj2grf`rG98yEUPGg|haV<4r*??DA~N%0!_7S7@?xMI6LnG5r4UufU(wOaS7Bi+A*h?HH5oK240pEHkB`^*{Ey56tHp8UUGev%Nc3)q~<3dpY9o2cIsLnkM9&fFMya#d+d$!w(PgXyr6iZaBI ze-iG1h!8g4df50`bEE!=so|{0#r_XT89O_L*#qbab2!pztM}G@iOYB#C{f}->35db zZdcw{$JzFquiT`i4U9aDeW%ZNj%KVFv-Uioy$;rHOt~d?%9s&&*9H!hDaGD`#)VM| z*e^O&*C-cFH&=2u3)GJt%EWzTxrz((RwmOAFDr4_p^BRc*n~}CJhD_%M%)1DKR)N@ zDTbZdeXS8BbQQT;j0^7z|9`*!x|(??<~5>5GV_3^%u#gUq}^h0YDm5I>TW}9qKx!Y z^j(0D68HRC;og9X1QpNON@$q-q~K!M@lJAzD^v1b%~j{6TN!iS`Qog~nOO>2wdDDw zgM-5&U6eKAxPYu=tSw4|F50DTr37kwqm+xWLq>XeT#ps+NJ0r(!PjDYjTuV1;c-9?(46~0!qp6QORUzlX!U}S`^Ls1>*GhrH0 zvfV9$4@BXX2?2sU7d>(1u+zC?9cdL6C+0A92o9emL`7s~#`kyO7IChAXg+s~^mvsi zc-*ZgjzxhnYhcvvsZ9D5S6o-eEN42|&99Xv=(DeUTzm&VFd!RxK>{=Bw7sixRV_w) zSUjM9dCqICoR#T+mWw7lHauG?HYnzEaIJ1e!=c7|mk;kPa z*vuDmT2}>{{PS(7>ZEP!kyYg%qBD@v&je#CXq;dNt7*Kt>EvakJLn}A~GwJo;&>Js+UgaK( z`_51&vR!`}Tbc_Zuva*UhgvS%bzN1bn9CmqOadITZe#WX`QvkBLF77DqRRba<2ws> zfQSrc3% zmfVrmXx|D@2w!!EKi&_XG|XMMaN@O6*B<+LZVYO9G?z%_elD>IF$Y&Jr#W>~E*g-e8fZRAElJ<0mK2d(zicv<}|G;&CJ7#+O zTw!SsRtFt)D=M37Hq}~ja~E$g-55ECsV2S!{yc|9s!RtH^3JI}%>2M4BVCOOM;cc? zb^N;beQg~auw1bS^!jWO7ZE~WB%NLWw4Vv@?Vx!!98WC7V&Jen?NS z=1RUG@}#a-B~}Rf-tfhi@1VTG`iFABXb+Rly^L2G_{iyLkgmKEN+DPbTCUCOtHVO} zN{mLAJt0iN8yiW_?_T*h)T@aS!vjhU3x!xU+BKE%(c`CekLzacIROB83w^Xtu5T~Ng5!fHf z?$?K*u|NhSjTk{?=H$Mn{lQbbsA$0n?mbY$_ejRoK(a|pInj0aJfvmL4qxQSeiS$& zhLB;5BDEjpMFSC8`Y_cgx_IFR7{<;joVcQ<#KLLy(bGXq#qUuqeZB3 zp^SL|44u#3C#$1w8%XZ8#Ub&d@=F{W(`o;DpW=5_J8&>XFID@zWX)kBrtxn;#0aI9 zdJ?P7AXror$98EmskJ>3cJE5=+GTo0c;ueLQ_68exH#sy>^C9pI4~L3_hh{JE3K7U zwvCet-}#Wl#(CerSy6cbi@eUxqGB?74-fkESX=~^Vh@Z}d1Pc6izypxdt6lLmH3f} zWos6!Xab|P&5*d;p_(6Lo#3D_!MpzvO*Z}uyWR|j@?RJb)3aS-c#a?4Jx9t+d2@2u zn5uS$zAxG-Cjtt~Uj8cME{PR0onums%e~Bu-yX;Or)A|lOLS?OrFO7M{#d8bw5wc% z2lCNg?F`AvyBV*jku<^w=0gwmwi=VGJ{K0A>gVUjzpz}0D_fn7i_aAKihM-1A-7kR zp;>&|OCIDZC{xw_z`ORDNY%;Fii=6le$Ytp^sJpWGM5t}zNjX@7}n-@l=eWzgryj* z6_ux1wYS1Z!COtSqa5opbR6AmClw_c_eePwNjct2Hb-VdShs*}EIH`lE98xpvIqIf z`(C;CHehcLoYsm0dZzS4$oEvtryQIF?j9$EToq#2$>hGGCdao~^Q-Q3@1j{$MyV_=teqh%px5$78)v^;Bcz)5b8aWIoWwL7_XRXYusXnMVScpAY^@??$KY^TUlEVC%o>i+}?Okp_9Q>0tCE5%vZS=Bl>tD6i&RWZ(l~?#dT>9SlTB72Z@r?0mrD zPM*-?jj4$J!eIYZIj$^DQ3L5(!F7BvFug#(i`LRxK;n+!YfKLU*~n28i)MV z;7i_pW0w7|M3UgK!XhG6p#vs6RUKF2b$o=dJh8lL&>Z>?1k~z@b*&MlT+#>V-?-nse66$CSEPuY0~S@4|Dc1&>jDC~t{A*oY19PA?Ac-f%yj>xi;}ecsm<3i(no2=>NVf~KDm0mynNN1 zmDD>E0K=_MY5IB^BCYTvzO{jmulKIO7%rKiGJOx<9v}L|E2N1^x*ePfk4;3}hCoHs z`(Y1L_ci8HKpN5MqNUL5R@r5K_@Z(#@b4Mu{jsMHN{AeKKY~GmeaPVuCLKt%rcC~^ zVIqtlX)kXRjUpmlxudC1iimO+n)2sh5eba}*TZ|O})*MIF2h`3ij#N$LY zfMD8oUAU0HYu?=ek&x;WRWsI#3MbjR48wsw4x-z_{X6z3BYE*;coatg#sy4roja8S ze<5a^M;Gb)Pb04xdo5R0)xsJHP9-rTl=2s<*eU#mn8WN;h`Q|vt&4jzwSzKxR|`Zp z_FTPeJe`Y9aWhV@^)VU>y^{B4<&bwKvJSJ+V)~v>FWS-L`ue6+G60k}$fFeZ4TI9` z%Re$A>609qkZ?HgP9GKGt+!1<0A_jz+9M_5v@K;CsPlCYF^7wAAyYJ^>xtx5=cp_D z#go=@jhZKd9s?iM#L>Jx5g+F?8_*sWDXm`^?uii)+NilK`ab_kSsIWqENas*F~Lo~ zr;rZ@a53H8MX$>f-zc=Mmv!!Fky2(F0_;C@+Z@U-##(UU&Ada$%d(up9S z!K^x8CU(_M?wh5 zjC&b=T!z&01r@I{Y>Hg2)%IkE0hBrCwXaV zp76X#@D|`vNCW~5`m0jt0V$LuS zrsRuXE7u`X^0QW$p9QyN)t}XVr}&nSG!vmcVw7+EjDbUle(0$vE%H}^76Jy;UnlKb z{g=$ybF>k<4SD{H62d@>$nc~5(a$`b0(JB|6Se={3hbNv~jISTX_=e+e*=X4V%sGzHQ zyO96lmtgMU$KkMJnjMY>e4SCh14PgKD!Tty+67CyxCx ztwW~IC%dC@Z(*kR0Chi_x;UsPP1;SUu-aJYWE^*De*H|Gq9frhaL)cggWUU<8w5l~5m>JG{nIIZjUc6>**;#~%VoLYN_?qAKy)T($;mi@SidoiKTiH7*L z@GXG0`3!5T2Tej}<5GIMVqO)kfF74I<0qe6IBE=PK+GN>SM6!PI6s{m)7T6!DpA-+ z+V8eT+p~ubIa6rf%fR~0=H>BO#`d$hraZk!dHL_EEvx`$q??1GZ?eR6S+&9l#b|?> zian`XQgU19bTJ%W)OP6CvfbLkK}ztt?F(IeOi7n#hn)qk*ia@w#ZRG;^wS5g-(ad* zO0%3|I`Q8L`bG&#EX*7w#>uBmxjN@(lzPou@Nc4C?+o5s2&w^7C zsmEk~yw4X!u2nSKO(pd6dP*IbijgeOd_|N<=km$KOlXm_?!LalFETa&m|4RXj@={W z>*1SuU@AH}sPQWKuICjLM&cFZn2rKPOk`^HJg=_ienU*mv9&;}!>evZ0A8wlHk@Ld zsNwcGxrrQF%UW`hgc_<1F9H$w_x1Uj)Ytt_BuPVAgeTFLNAE_Co2I_oU1bN$(qc_R zu*&FFlxzQj`axhHP>fnr)7_l0y=t&2m3y9F+Lf-{hhhX#xAHDHSu(D>%XV^_quu-E z(;xrs1pw}`lv-NxG$F!<6LJ87vyA<&P*rfs>5%0Fe#O+TJi9+dn>%V?il$3abML{q zav>U|1dx}b)Onll1QWAIhV`^}>-idwGy6BBg~iN>CyFyX`O5g)+imvxsp~0GU`al( zmK&`}NcgIl2G0&X_5y6}?b%t8m2q#BspmUSGS#z=o?XAGeYSIx>1?Mu{5m9059(^q zrY}A5!^0_;uc_~=75Cs*Zsw9^0$8t@0A8A#w+ z#1ie4+=5A6%$RARhTKBN?rtXN&6d?_2 zvzz!eoUj23ZRVo1j;WK08jgX{9Wy@6MPu5sGZ-&*G!HPyhJU=oc)!?T^cpoiw(HhyeY_Ex&vwnt^Xf#N2bmGCN`f;dXr^P_f-k3*J!dvQaSoz$BcfR1{H|G-~dQ?9CbqC4P3*9W2eJ$ma4 zlYW`TMNXR>o4F|n&)N%SG;y&viPQ}E;1Tu7lEg99L zLkn3OB>bulhEd@lEt{{Hg|z4zxf>ibTbXM;sW&-4uqJT5eZ ze2tWl@*?DI543aesDxF~CnQiLjketv$=Fje!7mH2L=3xFUtKJBOa}~xSou>N5+VbH ziR<7%FWl7?yZenAhbAIye?Qlg(8D8RZxh`4Nn0_)DWU54_F2@)p%OnpHL7mutNXpj(_z&-bY%3Vk<;T+rn{T7gKqyLH@>L<83sQX+UpN6paMh~r z4`!1SqQ2<2J~InD4!}QG2&;q!qLHy*zkZF0u~4lAkhjIoo9G*R-W9{=Cid_0ISX&q zT-YCP&_1!C5G%!g^?iA>WrwPGnhjE{*{hPx!hO7Q-NPfRb!4RdjTSkUa6T$dK#|U~ zegZIj4VnXC%jxUsv0nVNymb1va^1n&>vZSP5l-sMKr=<8$yuaSQij2wVh?g; zk0sUoco4uf-$kKIbn+1Hj{}`OUUNCz_RieJnn(@PRjov!X2EhytJu4|qM;Cjv#llGOHJf0AWlrl z9(Q{abNG1KXo zQ`)3eHDddjwJNzQx1bmhyS3ELBAxRMnLZ0V zR1|~%QQ2?(EPh$C@CN?x26m8w?*+ol3MusqPy{^Rt1A$qz~K7}104WeFQ4O@0G``z zEP8~=W1Bb>bF)ruoJ{aDrH&>n=lltUfjX~iOIHO`h(m~FFvZ~~07%LmCB9!THV%i3 zx~fdpnwjbhM29iag~9rS3`>958-hT%bT4tKR6{G-b*Z-{@o-{yrK3ua5CV0zBUOAx zSN)9R0x7`Y$igx{-NUQ80a48*W5ITT+tM0xX4jP0zJpCl9NddQOS_D0`7D>th)xF!qu?FAH~?Q1M1R)7phV9<-%j#|p-8 zOx!#?*iW1)pm1~7W2K-txKA29fqbuCzOp;yvMSd@rAdu^b4MQX{S(l09UhyXn8{@1 z)3wFz_v=n3<5Nd1UQ_~E+`&R3UZ?V<#==0=gnr`Tk{ zaJ0T#V*)TWldh*2=0SBPWmS_}Bq3kFzVmWyA=*qFp8pDg7<*Q#bMy||y}13%TFb-S zVxm}JM-$n-lJmm^&$g@Uu<7-)My(`%9Pwa_AVv+r;`)v^Xu?3MrczM`(Fr!+s_7?Z zmYn$p5R4*L!-we!f6|B(v%cCDM zM5hX(%2<0bzvJd4xlYX)rr+J$Oy?mmnW8{ zun!zlUPJV;kV>M?Qw+#qb`|x$D4wGM4rQd(2SYWHFGvcS(`!~H`J9O9A-XrOEy&H& zpVS?NPTDPAoJu-zv-97b!^Mr<0CB)!VA)=8n<$H>hlucK{udE6fMoOODtX+i&oFPh zJAG=uFL+pHuGeuuL(wAuJ}M*}`-nzkySt*E#ihHUVRb`-?3!@?Q4UO0`7>@l`Epv6 z<3#&P6*16J-b7ifZj+{2w*JJsTwWJ$WaA}^6*@@_Fc|_oC+Ww^Ak$@HESt`Y<=fBr z4#W$&V`?uq5lEPCiiG^hQAMtQbCwLS~1#1TsJ%1XHjx9GCirT(1yy^b4 zUJJh~^%DbVL$2iXTeVgOC?(u+YH8Y-thl_$Q7#b-)#ZjbKv(cg+HOJfOL~8-OM1@P z=o@rpQGl8GGUe*qaXK}$_?^l9^KH1}lm|@9|ML&iwXyw3xAFC}A>Z&EwFXPXmtq~w zS;y8LRgIHe7I?L;Lt)!;aO95pw?#b1Jkv1LP#u9ct+mnJ_mh?*XbqR3GZ6FdHVp+v zg~(oX691@K>%~kOQX!Uq%^9>gekYQCYGD2&?0dk3BP`Sj`s(OYm0WigEcxaZ z_%=esX0}0eYL75fBPEXYZBuc=wkIKG+%3v%NA~DlP|e;G>^)~MJ~v-Psh6bwC-a;U}u_iAJh2{Eo-xicuHl1 z5V_K{{wJ6Ayuem-83G825`7a-gQ;Tc^;;|E7UJ2M&fyr0l9jGxLFJ4n% zF+TmC*Nvf9?Mlf|LzlJD3ZXjJ=CZ#s55=20=W&XdzS=-$VE&a(|?os${X}veJ3x=Cp%#*bFp~gfdrVL-XN_ZXHp0fz_k5bZr1` zdFlKqo*e&}_cQ7d-1`w-cj=F^904%Bc3Ht=Moi;w{yF)3qpDN(I_x))w1xmzK&ZdA zqoE=24<+Mf37XpdZfdDw4O}ct2wbz`xL$P@l_riosVsVs*s&49fA}W=yVi=YFKbT{ zH?v)DLYYiD;qbHdcXpz!Urt#hX}DVHlqk3Y0`lRh*Z+xq`mYWrk1vH-Sa`{7pJrFC zJJP_xG&>e(MpZ3JG|FJd00ucGPwF-k2r)pS<$|fyW4}0T`S^^aTbh~hs=MPXrY<^4(^2-0j(mr4fFw*hUesy}fO)dwPmVkgs`M z*F$|1PBp-SjMEFP7lcK0vSXbXs)f|U!!wW*b0_V26A6&fK8Q9oH5uehe`Zl@N#L}@1t%V$gZz@$EiL>--ve|@7WP^Z_Nik&5cmQsMMWbJ#XfENx+$v~$x30QZqlA4Ht zZ3|IIVYVwSlvyY?hpciQdrU@X~iOu2*-A1QlmWKfZp#K)PVA|2DZZ- zRYnb`S-(O)MWcnKBZI&|S*rSSKxkHInRZ^5{3DgRoaiXBIu1uHVG%?H-_|Rb&K9O> zovawxcBA-IoBZDoi-!^zwg90rvXcZI?KoLIXFZKjs;lSI+ocJ|!t^Fb#2V?c`=o|? zHbRlDwWal{3fHUUlGI$D=r;TP`^$fqvs$|o>vx9WRN^Pi?*h^Ag{??sR#_0~{h*}? zV$kXD*A6Av-m3cOY|^E#l{7(s<53~nl&?jG1qp$g(bB&ckvJt%5@wwKM@;1EclB{` zLk)uwDr5DXLeD~vB1+&3XWw3T9qL(G;8|Lqx2t>`GpJ;w%PA;kduKv>--8R_43Cf` zIk3>{GD-xkvcd(5Kn!?jYRjh@qlbT&F~dmvB*;8%)FdJfnVxbQ?W_+US)S`Hco?EQ z%f8g;%f2Lm5X%5s$qtLM@jkO?e58n=9<^9>_Ej8JX-q=QLalo52ZO)fRG9B^hT)Xw zbll4dTXVe1R`8iJXHI|q5OGcmKg=O4YDvNX-^BUzIDBvQ-T6P^mHXRuAHNT|g_Tgo zm8vE=6`WWNi}tyJnu;W7xJtuL+?-TA0z(LRW zrR8$2NCQep!XX-^ODB=an~*S4?$a7NYV}pi$S6~{L~w0MLR5tRaAtj6=OiqL3oQjH zG(G9q2wDm#CgT!xT&_qPwTK7<;6NG>PMLb%l~VzwXd~m?)`om9;=Nw@OIf!BUMRi- z)JdO(@Xed75HT!|&nq&}Cs65o@J_(+`s9AY$w;w>+^kPhgL6Lz3@a8_JJ;JCM0*?^tbxJ zxuJJ5$PKtR7>^gq`i3oDAgMBGg_syk6~C=lg3ChkgKXrvh~dd}Wd6-(kn*e`SiK?NG+4As^%-*lMHUG!Jys%JHZ zC-A$B($>H??|x>L=4wO#6^fQt`S$BoLaW^_p9jj7e&a!+d%jT#(WbNbVy^Ngns6m=#QK@HxKmoL8^-nSqHCkoLafMM(1r+h{}+R={YI*}x@Fl(p(L>VGp}e;Yq9b(2_re#1>0_(?OX{T^aiD^cd%nT*K71)XlYTo zCC)C{b1@KBD4zBoNVmqP(#|f7Z%VLM_s0gxxl`~Z7;r=U*MVd|)=6$K-jCr)^M8pN z%l(9d#ME9dD?7#TNuqrPvxGuJjj%=z|1b`LOL>sH&xg6%ApPy$n=czH> zUpmo+oQZ)d71@juV#L{VkJERvzJ_B!u;i{FU-b2&?kFCq*c8rl+bb&Q*;tKdOIv-0rh5ZuI6atjw2Xq{YNkIW3|#!3 zoZf8D5_oM!p`g+fs3xK4% z@xHz)*?w*Z2p+3Gt!gVW~*Jg_W@+O2m|R-2n% z-3!Hbr*T2wS&<67Gb?OYlO5#J%xbsLyv>f~;GBZWVJ>KwY@)(idHLO!)s2_;8Y!>1 z@)jO(Q0gk%>8Y3JhF$VFPd!Q|{y@5SOC`MyzATql>seUTtO0}iRK`FeY^`_kjbpbMU%~uZU`7BZ-MIWye4V0L@XTF_Iib7_-F0DO3Q#hiko_`0$+ny?30IyDi!z;+R1 z_3T#|7gD#DfQaXpU(`Yn$3pHM*mM1r};9n<+qy-4n)A47*^53*r3MHATVW5>-p4`UIIjM8eAJX=eocq@GChyoU8gx*vQ z!TRc^P_35eLB3O=uTk133IMTYOS&+vK{96H#|NAB8_BILjzaRH2MevqzFi@k!Te6h zz0%=i-b(|{NC&`{7x}n=C$7Vtbg!a?N;%8<>-cs$DZv*fMMPj?>Jl}Jnf|88e0bg~ zS(h2rKl~fMzp3^?as8=5lP>jWS0+6O6AvgTIw*WXf?EsD8?}-rhn>{-_V+oe1qrV# znYPO?+E5AulWB~MMc(y5rei2F2_Qi&ZO;zj*p9RN>M56l$HCFjNPFY^O=s)>*;@wh zt6vSO#d-C66NTEN=h1Ee<7?V?@f=>`Qc{>x5n@3~p!ONDvQ|Q_#>c$ngM#KYk9rM9 zuo+}8kD=qP^m>4=48i{yY&O@j)*&7|@wsem0#}aq@&zvvY}J~Mjzm&trZ=S639tHy zpBe&xB#Xk?@MtKYW8o*nLC(X|?&UC8JopPSf0UJmGX!iGrNQou#$)dms9 zRV*p@Cd4dojCz+!+O5!ixuW6wyALfuMj?C4DPyY)&SV=xupF5 zy^>b!N&K8226{sTxOaxe=btiNth;0D<;YfrvzOJA!4-AKWq&=|41)R1Rt^kBy6wPK zelV)E?kK!;uG*S-dMak%g zt)$sKMmo45IuJr%%%+EH@6R~8wbbQ+@055~Et_-BS_^JR{t)z=0Qks{oEx51_1D4P| zy2Vg~)wyx~%dc-j4Xr@&JY|Yll&s>38?9Kd<7Gt-P4d|{=SC7umAzAg4KC&IHa`nMH$g2(Hu({pdh7D@N{-pH6U9309Bt(?mTve{ap8 zb#hS9gW;|J@y^wIb5Pj_(dtQARu?_J^9A1{8~8>xSg7+!pcf`zRse5HQ7Qf&*6Go0 z$9bEY2n%WjUh}DQwz*y?Z3V>b6xkhq(T5wqMymGLoKSY$DvgZk?G?Y!YLd7rvzC2W zRI?bV5$M|^=#+ykIM@%Y>Z6@eWaPlIeY-zOsoqHg!xl}g(V8fR_TfbuY7XAKYy1- z54g4Vi(jG>MQ1aFV^t+9%Fu(ypxwbruUfZ%us&XRBH0v}&eJ7^)8*5O6nt_gmz$PE zP=WUWkIq~84w4-4H!+IyV!4}@0ihdEO)@MxZ1{Nei=aN5_TzmVTt6f9h9W=;=s1kK zZx;0I;?lsFj~bkrza~EK9Xit}(bALPBH0;h=NbwV6;ett4Mn#D&EsPgbG>wgLg-e} z?eJT9>3XCtqqDHExHP{;ub4=tg|5Wbi(XvXU&2t#UFQ1;AWj4&`4w@y+V7reRa^0XgjxLqkb!*~Nel6OWnu6u^lH0DQW3vVC%40A zn9yI1m{{{&Dny7GoNN1ur8yW5YCyu^sLUn zGsPZu-yDZ$MJIhS5_anOwI#PnU!KRMKZKaC1J7@MM3Jg2si@%m2g+lKa%DP z5>t@umY}Jn&^~Ub_T0ZAJP!=3eg1gu*hRZL0BjTg6d;!7n`@=6BB_#l@AS!+pNAK2 zkN<(T*nsRK`IgfOtkzcf9E=A+5N)tm{NAeQy^CIv8MN!XOro{j4@HaYj|NFxQXAxx&Rc=aS#W6nOz}4C0BG;oxGVD zOP!#%@5-38Jhk#BKlAucqyZk;FIPS3S3`@c+tAI^!++|PI-wksK=%>X2<4%=4UFnh zPszyM(Z-jJ1I%3Q>%VXL?xv0+*6pNN_PN8U(`^$@5(%(3D8)i8s`>l(=N6;I`EYhCtTEnEie(g$UWv>oW zUHUfcueQxq6%>xrQX5~K>G&=RY&r?5!zgnVuT;h?kyfR1X0e4fsX*1?!Yz`E4gVY` zG;f-;-~2*aB8!diCjQwDxoRyY#D5Q5+lu8T3!FA-&@W#@=)eXB^#-Nwk1)_LJtojT znxUb^7M(a zSy!OG>Q52Ez%;k;u9n&WYGBJk-0u2fBCg$HH?nO~SN*X+|AudV6>YzQo4+N`M~r~h+hiK%`z}Z zPGo%91_4Snc9NjGuZ*lTYmP3f>)t+qoLc}=jyVr@JdwPhS3?b>Re~9XUC+fA$K_{! z;j;dtB|Ww7CQEbixR_`SB}n5gf~&p~7spiA*6lc;JD^R4zRd#IlL*TA`d(JL%KV6_ zt6OZT^O*Ls{&5Fdy7pUws&Fn8-f6`H(o*kb+Kw>s&iKO%<+f)~%1xwuUTiyOn1=KI zuC(l>BJh*UEFL(|BB@u~$A_5jfrW(-Nwe-#Zn_zVRiZ}7WB07*x}rldH^77NWs`(* z5uw9CmZz|f3X+B_R zBr5syCbm%qnPtC{PKj^)#>-IMoj_l1YKr1%KVPOte?s0=pvN!mwqBmAh|*|`Cu}3t zv{-Vzgo3>l$}#sXKpP#qabt zj0tDQzA~2sgI^(eAa&ib(|sKmQEA!o|6J-(NAXC>rmp+kibl0yb^kKo7n>RLi=x#x zXeoan!d>>XwG{KBOaO;lG}wn3;)Kqls0f_CT7{5LmM*8QqILsFrXbe9W7lmZhBtXX z=N1^x-fzJ^n_Rb7OI2drBe)nrP`eZrLPK;L@e4anO7 z8O_H0nU><@s<7p%7;0Q(b>2caR>P?9%|wJW$T#&l9t5?4kS}2D?*6Ng(g@||TSBAN z{mgtTtkw;3iFU3M?=2pQ-FIVGhce_Isf%HXQd@(x_P}t#i3!(+@ z;ORCf`2a9{@I1^nxM0d;bFI@^(XHhe`f&euMPUiEeg2hnG7HT{!huPqo#Vvz+e~Rp zNZ99p#Y3S_q6S`s-(q|}xpS^QhCbZ;eKGA9ctLKibE!I$k=yBt3_6}&D}nDJVWa!fx zQpn8)-M3yZWxDwb*%ABn7gSYM%q#x$o@07Y|%g5>wKDhGlBy zuiixi1h-T#nL!nmuT8$2td+E#-U7#?*!_y>r1qIx(1)Ag;?KmyCkOAojCoyv2z^qb zsOYb3KHQqeBRBMqx>^Y2?b(IR3I>vh3y~ISO@Zo!a6y>jU8PMo_Dv2bMF+&g8@c(G z>yuU*sGkYf+A|b2J?69=Vg`tCG**}~q2jqNm{rQ_&imA9CW_q6ppq~r0#T}3@v-0h zD(8^l(1(X^Evq_SW=dKovQJvH>FlS%E&1&z6AS zG<1;g+Mh~J@j=2m09W6Y8MIM%c~oBbR*(^T?YDTOR*c0DNunOXEL_%fo6mhIxTXT~ zt^?1%k(Xf|t?HImSwzaA15ys}=sr?80f#X|7i%=g1Mp0|ks(2?^9z0KfFx}lq=^^Z zKTzs{MdCQ(h_a=tVME<~un=_$yc(yfMXjwl@*Pl5)%B(mcXAq@jOUc8=iiipQVM~{ zzNr`gI-L$7tm_&+REY$6{LlG)v9c3sMv0v%bm#=^m!SRp7rd4#&4Rba;5d}6A3j(v z9%s{+8g3Mu0?-5ST62T9!{j=)NQPp!8;I((pQC6a#hRK4Xmq;WaG~d*y(q6c==;fX zchD}`{Sn=*Tg&;tHPO3=Ow8kt{ZJACudG4u1_w&DDmAUf7FLZr)d<-Z83JF8=7W@> zCI*M{LhzCFOHOvwvl$U2-uK};+bjB~@xXd~XjOyDDtIU;J}rXEv;b7C&|6x9qNO#e z?6b%hVE9f4eP!Cb_vD_tOzIj0469Nt`ZuFRonUE2BGNJ)B7t;;x8K}C;Zv}rPQk^f zGM9*xlM_`PvRmBCXm6>}k#Fbw*gq)YnKerQmN{X*#NfL0h)(Zcc2%g-h*Mr?6$xBdQU6wKy> zMLY8z&avceFxMjv!N?EqVZRw%9crDdcVj3ais$M{V0raxX3>xnf>xxj`g|yut1Mz^ z-0SNbEURNRS8)_Cz9wAz`0C&=NEi);_SQn@a(%t>Z6;6cL>PFO(`=ycNU`W=WQP?v z!)VEiE>Z|@rmEWQ)zlx*2{!Pff8FfWba1VztQ}t<1-K>gIAW67vkt58u>O~vYH+Th znB6bCI|lp(v%q{U_<+hAV8Xsa6Oq|)YD34 zeYC;)5XO8-63~1uZ=YA>-#Pl`Y;bTUy=YN?PQ%uP>CKPW%Kp^hQ0m8jss*PjYA%;5 zMC9bA>sW!3_J53w+z~o%X51%qFct8Dgar`q@Xy_bTY|9n1fGLk8{VF&nonBSrkMhR z;>!(qv(ohUQ*Gx2cjaPlqj{4e)8jDx5z&G}ITd$^6txK?!kRJ}Ew`6yve=T?pYZak zXdV1~Bhg|9rCcCRIc8mNoisL7zA;*Z)SX@7*vqIfJ{MI@-u+5aB!W`!xZZ&K59pN= zD&9ff^}l>b!h8v%{`UkCa}B2!rSbq@A52!VYf$AjVsE#(e{$WVh}%IzBI~exc9WBr zz2W^wkz{$Os0uVMZxSMO(LF~Yw*NRy(RLN=TiaG{pEvmwvC5piz|I3st#viWQo=DW1E z7JMwn)aI^YzPjI2TCROL*olcIEbC5jW!unB1XbVY;60)0h4N1E+KYWIdm}+uV4#+^ zhhAou{6tO?r>&g466|?Yql=bgahjR!TnxN_dhzMS)b4OB_BC%(#9UNzMcFxT6X7b1 zF9hZ%3iS2I&nf~hO!e`RoUl1XkL@PpwT~ub(fby(yV!a&_VzXJ1msfASHJqIBP84Y z!qKSUNz2H-8~Ms#3fapenW4yEZD7ctQ}(3saS)6B(#Bv#?RrfvE9o`pojju0XgL2` zr>D>+@Hl>|IgsOZGyrm1Jn8I+CN?S6)|z)chTIwW=EGCRuLGE53wmt+gz)}l0*@}g zyinC7?F&VW;zJb`g{!OUTooT#w|)0W(dC`$O+jm^OgN2t0Icq*+pJ&7?VypEtMPSI zeg_3fUp8OI65TcpB+&jcGqycw<3j6 zx4xL1)j*nh7oPHitHUngmA#>oU`^`1i~Gp^Yzjp*Khpk2#M>|tWlQQMy-DD?o<)R> z=Kb{lGdJJlvz-EIV|I?(CqB3M>|dg!r~5yeIu%A`Bp}Guh&I2pu~B&#qo1i0I&%l2 zCH-IPt%Pd#uUdtCM+Y4@OW49wp8xOaZpnGc^5cv|y}c!mq;7jyk^ie;3@ALyxa%bC z#n4YQXlCtjexf0i48s!bga6aiqmoF?uch4Z{Mpw@V+eDc8xp_cz)JcH$=OQ7+_@G$ zvQD>q5a9=)@_$#e7iz)M(=&=bJ`~)0=Y=Bvgr5-@;{EA8?d~w}sa9utIAJPB7U=L; ziXyeT0$*Vj+~+f$3lsdut!Vy>qsQaor3c3ZE{`_&QOHe9510|Bxyh%dkb;^b*?#^z z=(;vL2*L7Eri?pH*r`jqpo2xOGy;@cu9c79C!gKpWKK*RCwaQ=G;wK~Oif*f0xHoilaoEoH7-gmLxzM9o^Qodz|`t5k9$Ih_si=r zc}PLiXM(zKYKp9f|5Q`JSY?$P3%c<)LIrs21otLykt{HMcLO2n`Li%8hxozX&-!cS zF+-OoD_@Lg}H!S_t}e^^jVcJ&uo+*(_mA z){Ur{Gdr&pchtK*z-UMT5QaDQt8H3G$bPB2gR=bm*MQEHyN+}`3m4t|EB|0FG{#p}DNj_4atF_zg0l#M{2YVcP*pGWP;z1|Kdq3o# z*bg{n*s{Esf>57igmmMnB?LT8?Os|;o&9`Pe|i45PEIG;_+%7;ny*DT%yy=d1ceq{C4z$ZhKNbtYek^UpBq`%)P8~bt}LI&o;5}J z-s44$EM97$ZFedO6q1O_Sx1QzVgHS+bMOkD9lh z2DzQoa|()Lcfr9s^E)3-$vnB+Y%rib>^ekWMXlx5am+JHJ&U!#*e!CYC?qr1w0pQX z?%#dplfX|8r|`WY2xo9y&6imHks(ygt5;ojo!XNR^+JI635-Ao-5W29U7z_VsSgDc zJFGCFn!x8E)9l3#G+i_QHWZw6mA0r-ON70={MPd2*GfEFeBbDCQ;Tu33|WT-904f- zWl!1aH@E6>L*8o7PLJ4MzAi9zj^5z4^9%=dif}nOAMYk8-4Ui27c{PaZge|w|8@<+ z!n?ZswG&y`A&8(@PbH{|%F@^x|0)a$rNrjvfaz~|9lK;c2ESX5-x? zU<_H*8Htd%a2&r+C>6!;aJ=am&Cp>0uk2af??^#J8y)66o4trSY?7^Qx@!L&Q84{|$Q{rpV64VHhUhxF*ypc*=0{&Dty8APgHikEBFZ4*&l6Vn_E5(11i3 z6{pqIT^+$y@y9gLX_YQLgltA5LS6CEozHnl;j9wmZzr$SVFON5-*G1nKs}*>UNJjj z?qE5@@B=>uFE8h1>)Ejm5B(b(Xz36E1rnCoBR@t)N+I&TsjP&}1TQt-n*cxQLnHJ~ z0;jXcs?QHv3x#&Rw-!hW5JDJ#ilgcY@5*iGx>hM$#6+y88x!JCnh?JKqJ2K*{M2~% z>!@62Ts)Obk>|O7$b`pXOl&p=dIL0j69jLWq_f?}I07z6;ji$ebr~_Xz=24q^<2@7p>%3WLOZoG~#dogdRMnNKLCIo)lO%cCzn`C&L-eOooR9F#V3;HzeUq;o>xF z)@M8`BY~gpk!;=*86G?cgX(vveE#=ciHHsfjJ8PUjmjSqM1P<+c>k|Q@woCJI5yij znCv`rfDKJI1v7uOQWX<0?HZvhed0!z6zK0?-qhm)9{PVEWReuKRQxMJnb9*f=DZz9U(%!|6Yz#Z?o3RUa9>f#GK-C?++N&UP|Sd zmZ@`4m}Mgy@!`B~aF;~?>+IlT8tedZveCuBh-YZJ!eGxhMiA9w(E)_;JXcke)EitW zv%P#%q9}cz3@3<93F{B!5B}Cl-OjsAD$a~l$mjaQAr>>?ur%cexX4%lE5<_PCAZj zEQ*(A#8qb0()04VJ*5G;A-gi2+TKtVh2E|zjclIBL)hJ~sMXW48kg-M-=J(GZI3yv z?ar`!3X&6`1oa_UGJ~{YV)r>l+kKp(Cqo72-?1rt&OMqO-R`?@?PD4+{|RB>S-(bU zmS^oVO)x2k+0I*rVmDV;3U|QwydxsQ6!|Ocm5q)qEmg%uI-DpacU23{*4KP-gop&s zzSCn1*q=`ua>Aj$ND@TkGwa<2kL9lQ^#JYoL(pj4dR682+B75Jzr6siskJ5abC1m_ zDD;~B11R;r8<$%p5|kKUcKwAj*Ark;^)PjN5$Ar_Tx)$v&E{moiLp5yOJIeaR8QGmAYuo{3d>ESiRhXX&Qu{OI-qGL;gqo?f#Cu`Q(2d z$-ZYWFX#F~R#t1cFC?Vp0^<BaZtn=IlJrjJUK+|T>CjyFg^+_(hvlSFV-&&l)uEv(_dE7sPH zeunTtltG(!Oti+MR0-?SMT$bchfkY|k;#=VO>Kz%!@6w;cFhSV@1_sIFCxU}k`pN} z2T(4ysRQfjnCyG#eS2>T-(3fI%hcL5c6k1qC^Q>%fM#+skAk8gGQ`qul7>QT;Gw8- zF#Qy*on2@B@R-;5lB#b`*=}km?1qc}-GFDfO2H@J;UObm(72j{?bLHey|tE#^K*LA?XH5j%UxpM;hp67o&DiM9bJd85F@b{>i%~Dbgn*UJKrYf z`Lq(@G(XkLudQB!Jn{$-cf}`8y{3H&woa3Dsk?G zJNTG{(ThFJ8C>(nXy<8$I#<$VA^g17i;R6x@#tMea z0qxuPc>%9*xfq3^2t60g-Ou%R^jfcOR^*yd9!Noh{l`l(2bJ|i7*=R|a!W5a` zcHq%StK_{$@)1CIjNA6x}S z)-E=0zD%h{P9}NumcyW?VpFW<7Pv^CseMJKG@c{ki7!^CiV4|PEDqJ6|M`BTKwuTX zj~rZ9R@K4bY^iKDJ9_rv!@A>chfLU>wVN69p~HixPw3sk(I`if%PBNsoE-?i6W&F)qSf&m2m?r(?IX6fz(j+J%qp ztM&k{s3NveB0Lj71ZiIjtvwuLj}V>D<%dU+F@;_SiYwi&PF75f<;H&Py1_x6$)x1|qPY z$6~k>#;zaIDZc)z5z^Vw!RRi2G!r;4HhtW4apbO+sq|^1Y@F*J#~X4jLtiKZs(G$? zSXNevL2zxnG^FpC_UTz8rHl5z0S!C%0AcqeEHhKcC$re4UGxDNOXbJ#ae(}=!1IyUsA1VgG4`BiPvpi{rNEdNpLi@*)&%%*gJCP1 ztKklb-kwUeTVK_Si4`7fW+ZxVRysA`)oMxDnnBNj;x}yvkkU=6&joz0b$Q&TbwtdMw8p&Q|#E2L*d>G&lwccOp-I*ox z7q+ij%@kFhprc@q7ZXkPDal2_Mxiv<3BDAlQn{ds)yx6)KMPZC2~F(0eLM=V^r(r4 zV&_NmCQDs$JNfRlxi|Z(OgiDTCWARHH$z6ubm*`A^NMHcAM6M{iT_>Nu$>BH`b0`w z-jvjDVFr_;y5U4YK}_%ZYQhVZw=;_LE92T58%IQhE z648E!xZ8_G;}39gL=PZ_-6!Er;~0K5

vtctl Snapshot -server-mode ...
+ Use this command to exit this mode: +
vtctl SnapshotSourceEnd ...
+ -- spare: A slaved copy of data that is ready but not serving query traffic. + The data could be a potential master tablet. +*/ + package vtctl import ( @@ -51,16 +130,16 @@ var commands = []commandGroup{ commandGroup{ "Tablets", []command{ command{"InitTablet", commandInitTablet, - "[-force] [-parent] [-update] [-db-name-override=] [-hostname=] [-mysql_port=] [-port=] [-vts_port=] [-keyspace=] [-shard=] [-parent_alias=] ]", + "[-force] [-parent] [-update] [-db-name-override=] [-hostname=] [-mysql_port=] [-port=] [-vts_port=] [-keyspace=] [-shard=] [-parent_alias=] ", "Initializes a tablet in the topology.\n" + - "Valid :\n" + + "Valid values are:\n" + " " + strings.Join(topo.MakeStringTypeList(topo.AllTabletTypes), " ")}, command{"GetTablet", commandGetTablet, "", - "Outputs the json version of Tablet to stdout."}, + "Outputs a JSON structure that contains information about the Tablet."}, command{"UpdateTabletAddrs", commandUpdateTabletAddrs, "[-hostname ] [-ip-addr ] [-mysql-port ] [-vt-port ] [-vts-port ] ", - "Updates the addresses of a tablet."}, + "Updates the IP address and port numbers of a tablet."}, command{"ScrapTablet", commandScrapTablet, "[-force] [-skip-rebuild] ", "Scraps a tablet."}, @@ -68,149 +147,151 @@ var commands = []commandGroup{ " ...", "Deletes scrapped tablet(s) from the topology."}, command{"SetReadOnly", commandSetReadOnly, - "[]", - "Sets the tablet as ReadOnly."}, + "", + "Sets the tablet as read-only."}, command{"SetReadWrite", commandSetReadWrite, - "[]", - "Sets the tablet as ReadWrite."}, + "", + "Sets the tablet as read-write."}, command{"StartSlave", commandStartSlave, - "[]", - "Starts replication on the slave."}, + "", + "Starts replication on the specified slave."}, command{"StopSlave", commandStopSlave, - "[]", - "Stops replication on the slave."}, + "", + "Stops replication on the specified slave."}, command{"ChangeSlaveType", commandChangeSlaveType, "[-force] [-dry-run] ", - "Change the db type for this tablet if possible. This is mostly for arranging replicas - it will not convert a master.\n" + - "NOTE: This will automatically update the serving graph.\n" + - "Valid :\n" + + "Changes the db type for the specified tablet, if possible. This command is used primarily to arrange replicas, and it will not convert a master.\n" + + "NOTE: This command automatically updates the serving graph.\n" + + "Valid values are:\n" + " " + strings.Join(topo.MakeStringTypeList(topo.SlaveTabletTypes), " ")}, command{"Ping", commandPing, "", - "Check that the agent is awake and responding to RPCs. Can be blocked by other in-flight operations."}, + "Checks that the specified tablet is awake and responding to RPCs. This command can be blocked by other in-flight operations."}, command{"RefreshState", commandRefreshState, "", - "Asks a remote tablet to reload its tablet record."}, + "Reloads the tablet record on the specified tablet."}, command{"RunHealthCheck", commandRunHealthCheck, " ", - "Asks a remote tablet to run a health check with the provided target type."}, + "Runs a health check on a remote tablet with the specified target type."}, command{"HealthStream", commandHealthStream, "", - "Streams the health status out of a tablet."}, + "Streams the health status of a tablet."}, command{"Sleep", commandSleep, " ", - "Block the action queue for the specified duration (mostly for testing)."}, + "Blocks the action queue on the specified tablet for the specified amount of time. This is typically used for testing."}, command{"Backup", commandBackup, "[-concurrency=4] ", - "Stop mysqld and copy data to BackupStorage."}, + "Stops mysqld and uses the BackupStorage service to store a new backup. This function also remembers if the tablet was replicating so that it can restore the same state after the backup completes."}, command{"ExecuteHook", commandExecuteHook, " [ ...]", - "This runs the specified hook on the given tablet."}, + "Runs the specified hook on the given tablet. A hook is a script that resides in the $VTROOT/vthook directory. You can put any script into that directory and use this command to run that script.\n" + + "For this command, the param=value arguments are parameters that the command passes to the specified hook."}, command{"ExecuteFetchAsDba", commandExecuteFetchAsDba, "[--max_rows=10000] [--want_fields] [--disable_binlogs] ", - "Runs the given sql command as a DBA on the remote tablet."}, + "Runs the given SQL command as a DBA on the remote tablet."}, }, }, commandGroup{ "Shards", []command{ command{"CreateShard", commandCreateShard, "[-force] [-parent] ", - "Creates the given shard."}, + "Creates the specified shard."}, command{"GetShard", commandGetShard, "", - "Outputs the json version of Shard to stdout."}, + "Outputs a JSON structure that contains information about the Shard."}, command{"RebuildShardGraph", commandRebuildShardGraph, "[-cells=a,b] ... ", - "Rebuild the replication graph and shard serving data in zk. This may trigger an update to all connected clients."}, + "Rebuilds the replication graph and shard serving data in ZooKeeper or etcd. This may trigger an update to all connected clients."}, command{"TabletExternallyReparented", commandTabletExternallyReparented, "", - "Changes metadata to acknowledge a shard master change performed by an external tool."}, + "Changes metadata in the topology server to acknowledge a shard master change performed by an external tool. See the Reparenting guide for more information:" + + "https://github.com/youtube/vitess/blob/master/doc/Reparenting.md#external-reparents."}, command{"ValidateShard", commandValidateShard, "[-ping-tablets] ", - "Validate all nodes reachable from this shard are consistent."}, + "Validates that all nodes that are reachable from this shard are consistent."}, command{"ShardReplicationPositions", commandShardReplicationPositions, "", - "Show slave status on all machines in the shard graph."}, + "Shows the replication status of each slave machine in the shard graph. In this case, the status refers to the replication lag between the master vttablet and the slave vttablet. In Vitess, data is always written to the master vttablet first and then replicated to all slave vttablets."}, command{"ListShardTablets", commandListShardTablets, ")", - "List all tablets in a given shard."}, + "Lists all tablets in the specified shard."}, command{"SetShardServedTypes", commandSetShardServedTypes, - " [,,...]", - "Sets a given shard's served types. Does not rebuild any serving graph."}, + " [,,...]", + "Sets a given shard's served tablet types. Does not rebuild any serving graph."}, command{"SetShardTabletControl", commandSetShardTabletControl, - "[--cells=c1,c2,...] [--blacklisted_tables=t1,t2,...] [--remove] [--disable_query_service] ", - "Sets the TabletControl record for a shard and type. Only use this for an emergency fix, or after a finished vertical split. MigrateServedFrom and MigrateServedType will set this field appropriately already. Always specify blacklisted_tables for vertical splits, never for horizontal splits."}, + "[--cells=c1,c2,...] [--blacklisted_tables=t1,t2,...] [--remove] [--disable_query_service] ", + "Sets the TabletControl record for a shard and type. Only use this for an emergency fix or after a finished vertical split. The *MigrateServedFrom* and *MigrateServedType* commands set this field appropriately already. Always specify the blacklisted_tables flag for vertical splits, but never for horizontal splits."}, command{"SourceShardDelete", commandSourceShardDelete, " ", - "Deletes the SourceShard record with the provided index. This is meant as an emergency cleanup function. Does not RefreshState the shard master."}, + "Deletes the SourceShard record with the provided index. This is meant as an emergency cleanup function. It does not call RefreshState for the shard master."}, command{"SourceShardAdd", commandSourceShardAdd, "[--key_range=] [--tables=] ", - "Adds the SourceShard record with the provided index. This is meant as an emergency function. Does not RefreshState the shard master."}, + "Adds the SourceShard record with the provided index. This is meant as an emergency function. It does not call RefreshState for the shard master."}, command{"ShardReplicationAdd", commandShardReplicationAdd, " ", "HIDDEN Adds an entry to the replication graph in the given cell."}, command{"ShardReplicationRemove", commandShardReplicationRemove, " ", - "HIDDEN Removes an entry to the replication graph in the given cell."}, + "HIDDEN Removes an entry from the replication graph in the given cell."}, command{"ShardReplicationFix", commandShardReplicationFix, " ", - "Walks through a ShardReplication object and fixes the first error it encrounters."}, + "Walks through a ShardReplication object and fixes the first error that it encounters."}, command{"RemoveShardCell", commandRemoveShardCell, "[-force] ", - "Removes the cell in the shard's Cells list."}, + "Removes the cell from the shard's Cells list."}, command{"DeleteShard", commandDeleteShard, " ...", - "Deletes the given shard(s)."}, + "Deletes the specified shard(s)."}, }, }, commandGroup{ "Keyspaces", []command{ command{"CreateKeyspace", commandCreateKeyspace, "[-sharding_column_name=name] [-sharding_column_type=type] [-served_from=tablettype1:ks1,tablettype2,ks2,...] [-split_shard_count=N] [-force] ", - "Creates the given keyspace."}, + "Creates the specified keyspace."}, command{"GetKeyspace", commandGetKeyspace, "", - "Outputs the json version of Keyspace to stdout."}, + "Outputs a JSON structure that contains information about the Keyspace."}, command{"SetKeyspaceShardingInfo", commandSetKeyspaceShardingInfo, "[-force] [-split_shard_count=N] [] []", - "Updates the sharding info for a keyspace."}, + "Updates the sharding information for a keyspace."}, command{"SetKeyspaceServedFrom", commandSetKeyspaceServedFrom, "[-source=] [-remove] [-cells=c1,c2,...] ", - "Manually change the ServedFromMap. Only use this for an emergency fix. MigrateServedFrom will set this field appropriately already. Does not rebuild the serving graph."}, + "Changes the ServedFromMap manually. This command is intended for emergency fixes. This field is automatically set when you call the *MigrateServedFrom* command. This command does not rebuild the serving graph."}, command{"RebuildKeyspaceGraph", commandRebuildKeyspaceGraph, "[-cells=a,b] [-rebuild_srv_shards] ...", - "Rebuild the serving data for the keyspace, and optionnally all the shards in the keyspace too. This may trigger an update to all connected clients."}, + "Rebuilds the serving data for the keyspace and, optionally, all shards in the specified keyspace. This command may trigger an update to all connected clients."}, command{"ValidateKeyspace", commandValidateKeyspace, "[-ping-tablets] ", - "Validate all nodes reachable from this keyspace are consistent."}, + "Validates that all nodes reachable from the specified keyspace are consistent."}, command{"MigrateServedTypes", commandMigrateServedTypes, - "[-cells=c1,c2,...] [-reverse] [-skip-refresh-state] ", - "Migrates a serving type from the source shard to the shards it replicates to. Will also rebuild the serving graph. keyspace/shard can be any of the involved shards in the migration."}, + "[-cells=c1,c2,...] [-reverse] [-skip-refresh-state] ", + "Migrates a serving type from the source shard to the shards that it replicates to. This command also rebuilds the serving graph. The argument can specify any of the shards involved in the migration."}, command{"MigrateServedFrom", commandMigrateServedFrom, - "[-cells=c1,c2,...] [-reverse] ", - "Makes the destination keyspace/shard serve the given type. Will also rebuild the serving graph."}, + "[-cells=c1,c2,...] [-reverse] ", + "Makes the serve the given type. This command also rebuilds the serving graph."}, command{"FindAllShardsInKeyspace", commandFindAllShardsInKeyspace, "", - "Displays all the shards in a keyspace."}, + "Displays all of the shards in the specified keyspace."}, }, }, commandGroup{ "Generic", []command{ command{"Resolve", commandResolve, "..:", - "Read a list of addresses that can answer this query. The port name is usually mysql or vt."}, - command{"Validate", commandValidate, - "[-ping-tablets]", - "Validate all nodes reachable from global replication graph and all tablets in all discoverable cells are consistent."}, + "Reads a list of addresses that can answer this query. The port name can be mysql, vt, or vts. Vitess uses this name to retrieve the actual port number from the topology server (ZooKeeper or etcd)."}, command{"RebuildReplicationGraph", commandRebuildReplicationGraph, ",... ,,...", "HIDDEN This takes the Thor's hammer approach of recovery and should only be used in emergencies. cell1,cell2,... are the canonical source of data for the system. This function uses that canonical data to recover the replication graph, at which point further auditing with Validate can reveal any remaining issues."}, + command{"Validate", commandValidate, + "[-ping-tablets]", + "Validates that all nodes reachable from the global replication graph and that all tablets in all discoverable cells are consistent."}, command{"ListAllTablets", commandListAllTablets, "", - "List all tablets in an awk-friendly way."}, + "Lists all tablets in an awk-friendly way."}, command{"ListTablets", commandListTablets, " ...", - "List specified tablets in an awk-friendly way."}, + "Lists specified tablets in an awk-friendly way."}, command{"Panic", commandPanic, "", "HIDDEN Triggers a panic on the server side, to test the handling."}, @@ -220,70 +301,69 @@ var commands = []commandGroup{ "Schema, Version, Permissions", []command{ command{"GetSchema", commandGetSchema, "[-tables=,,...] [-exclude_tables=,,...] [-include-views] ", - "Display the full schema for a tablet, or just the schema for the provided tables."}, + "Displays the full schema for a tablet, or just the schema for the specified tables in that tablet."}, command{"ReloadSchema", commandReloadSchema, "", - "Asks a remote tablet to reload its schema."}, + "Reloads the schema on a remote tablet."}, command{"ValidateSchemaShard", commandValidateSchemaShard, "[-exclude_tables=''] [-include-views] ", - "Validate the master schema matches all the slaves."}, + "Validates that the master schema matches all of the slaves."}, command{"ValidateSchemaKeyspace", commandValidateSchemaKeyspace, "[-exclude_tables=''] [-include-views] ", - "Validate the master schema from shard 0 matches all the other tablets in the keyspace."}, - + "Validates that the master schema from shard 0 matches the schema on all of the other tablets in the keyspace."}, command{"ApplySchema", commandApplySchema, "[-force] {-sql= || -sql-file=} ", - "Apply the schema change to the specified keyspace."}, + "Applies the schema change to the specified keyspace on every master, running in parallel on all shards. The changes are then propagated to slaves via replication. If the force flag is set, then numerous checks will be ignored, so that option should be used very cautiously."}, command{"CopySchemaShard", commandCopySchemaShard, - "[-tables=,,...] [-exclude_tables=,,...] [-include-views] ", - "Copy the schema from a source tablet to the specified shard. The schema is applied directly on the master of the destination shard, and is propogated to the replicas through binlogs."}, + "[-tables=,,...] [-exclude_tables=,,...] [-include-views] ", + "Copies the schema from a source tablet to the specified shard. The schema is applied directly on the master of the destination shard, and it is propagated to the replicas through binlogs."}, command{"ValidateVersionShard", commandValidateVersionShard, "", - "Validate the master version matches all the slaves."}, + "Validates that the master version matches all of the slaves."}, command{"ValidateVersionKeyspace", commandValidateVersionKeyspace, "", - "Validate the master version from shard 0 matches all the other tablets in the keyspace."}, + "Validates that the master version from shard 0 matches all of the other tablets in the keyspace."}, command{"GetPermissions", commandGetPermissions, "", - "Display the permissions for a tablet."}, + "Displays the permissions for a tablet."}, command{"ValidatePermissionsShard", commandValidatePermissionsShard, "", - "Validate the master permissions match all the slaves."}, + "Validates that the master permissions match all the slaves."}, command{"ValidatePermissionsKeyspace", commandValidatePermissionsKeyspace, "", - "Validate the master permissions from shard 0 match all the other tablets in the keyspace."}, + "Validates that the master permissions from shard 0 match those of all of the other tablets in the keyspace."}, command{"GetVSchema", commandGetVSchema, "", - "Display the VTGate routing schema."}, + "Displays the VTGate routing schema."}, command{"ApplyVSchema", commandApplyVSchema, "{-vschema= || -vschema_file=}", - "Apply the VTGate routing schema."}, + "Applies the VTGate routing schema."}, }, }, commandGroup{ "Serving Graph", []command{ command{"GetSrvKeyspace", commandGetSrvKeyspace, " ", - "Outputs the json version of SrvKeyspace to stdout."}, + "Outputs a JSON structure that contains information about the SrvKeyspace."}, command{"GetSrvKeyspaceNames", commandGetSrvKeyspaceNames, "", "Outputs a list of keyspace names."}, command{"GetSrvShard", commandGetSrvShard, " ", - "Outputs the json version of SrvShard to stdout."}, + "Outputs a JSON structure that contains information about the SrvShard."}, command{"GetEndPoints", commandGetEndPoints, " ", - "Outputs the json version of EndPoints to stdout."}, + "Outputs a JSON structure that contains information about the EndPoints."}, }, }, commandGroup{ "Replication Graph", []command{ command{"GetShardReplication", commandGetShardReplication, " ", - "Outputs the json version of ShardReplication to stdout."}, + "Outputs a JSON structure that contains information about the ShardReplication."}, }, }, } @@ -476,25 +556,25 @@ func parseTabletType(param string, types []topo.TabletType) (topo.TabletType, er func commandInitTablet(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { var ( - dbNameOverride = subFlags.String("db-name-override", "", "override the name of the db used by vttablet") - force = subFlags.Bool("force", false, "will overwrite the node if it already exists") - parent = subFlags.Bool("parent", false, "will create the parent shard and keyspace if they don't exist yet") - update = subFlags.Bool("update", false, "perform update if a tablet with provided alias exists") - hostname = subFlags.String("hostname", "", "server the tablet is running on") - mysqlPort = subFlags.Int("mysql_port", 0, "mysql port for the mysql daemon") - port = subFlags.Int("port", 0, "main port for the vttablet process") - vtsPort = subFlags.Int("vts_port", 0, "encrypted port for the vttablet process") - keyspace = subFlags.String("keyspace", "", "keyspace this tablet belongs to") - shard = subFlags.String("shard", "", "shard this tablet belongs to") + dbNameOverride = subFlags.String("db-name-override", "", "Overrides the name of the database that the vttablet uses") + force = subFlags.Bool("force", false, "Overwrites the node if the node already exists") + parent = subFlags.Bool("parent", false, "Creates the parent shard and keyspace if they don't yet exist") + update = subFlags.Bool("update", false, "Performs update if a tablet with the provided alias already exists") + hostname = subFlags.String("hostname", "", "The server on which the tablet is running") + mysqlPort = subFlags.Int("mysql_port", 0, "The mysql port for the mysql daemon") + port = subFlags.Int("port", 0, "The main port for the vttablet process") + vtsPort = subFlags.Int("vts_port", 0, "The encrypted port for the vttablet process") + keyspace = subFlags.String("keyspace", "", "The keyspace to which this tablet belongs") + shard = subFlags.String("shard", "", "The shard to which this tablet belongs") tags flagutil.StringMapValue ) - subFlags.Var(&tags, "tags", "comma separated list of key:value pairs used to tag the tablet") + subFlags.Var(&tags, "tags", "A comma-separated list of key:value pairs that are used to tag the tablet") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 2 { - return fmt.Errorf("action InitTablet requires ") + return fmt.Errorf("The and arguments are both required for the InitTablet command.") } tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) if err != nil { @@ -534,7 +614,7 @@ func commandGetTablet(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action GetTablet requires ") + return fmt.Errorf("The argument is required for the GetTablet command.") } tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) @@ -549,17 +629,17 @@ func commandGetTablet(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag } func commandUpdateTabletAddrs(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - hostname := subFlags.String("hostname", "", "fully qualified host name") + hostname := subFlags.String("hostname", "", "The fully qualified host name of the server on which the tablet is running.") ipAddr := subFlags.String("ip-addr", "", "IP address") - mysqlPort := subFlags.Int("mysql-port", 0, "mysql port") - vtPort := subFlags.Int("vt-port", 0, "vt port") - vtsPort := subFlags.Int("vts-port", 0, "vts port") + mysqlPort := subFlags.Int("mysql-port", 0, "The mysql port for the mysql daemon") + vtPort := subFlags.Int("vt-port", 0, "The main port for the vttablet process") + vtsPort := subFlags.Int("vts-port", 0, "The encrypted port for the vttablet process") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action UpdateTabletAddrs requires ") + return fmt.Errorf("The argument is required for the UpdateTabletAddrs command.") } if *ipAddr != "" && net.ParseIP(*ipAddr) == nil { return fmt.Errorf("malformed address: %v", *ipAddr) @@ -595,13 +675,13 @@ func commandUpdateTabletAddrs(ctx context.Context, wr *wrangler.Wrangler, subFla } func commandScrapTablet(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - force := subFlags.Bool("force", false, "writes the scrap state in to zk, no questions asked, if a tablet is offline") - skipRebuild := subFlags.Bool("skip-rebuild", false, "do not rebuild the shard and keyspace graph after scrapping") + force := subFlags.Bool("force", false, "Changes the tablet type to scrap in ZooKeeper or etcd if a tablet is offline") + skipRebuild := subFlags.Bool("skip-rebuild", false, "Skips rebuilding the shard and keyspace graph after scrapping the tablet") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action ScrapTablet requires ") + return fmt.Errorf("The argument is required for the ScrapTablet command.") } tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) @@ -616,7 +696,7 @@ func commandDeleteTablet(ctx context.Context, wr *wrangler.Wrangler, subFlags *f return err } if subFlags.NArg() == 0 { - return fmt.Errorf("action DeleteTablet requires at least one ") + return fmt.Errorf("The argument must be used to specify at least one tablet when calling the DeleteTablet command.") } tabletAliases, err := tabletParamsToTabletAliases(subFlags.Args()) @@ -636,7 +716,7 @@ func commandSetReadOnly(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action SetReadOnly requires ") + return fmt.Errorf("The argument is required for the SetReadOnly command.") } tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) @@ -655,7 +735,7 @@ func commandSetReadWrite(ctx context.Context, wr *wrangler.Wrangler, subFlags *f return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action SetReadWrite requires ") + return fmt.Errorf("The argument is required for the SetReadWrite command.") } tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) @@ -708,14 +788,14 @@ func commandStopSlave(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag } func commandChangeSlaveType(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - force := subFlags.Bool("force", false, "will change the type in zookeeper, and not run hooks") - dryRun := subFlags.Bool("dry-run", false, "just list the proposed change") + force := subFlags.Bool("force", false, "Changes the slave type in ZooKeeper or etcd without running hooks") + dryRun := subFlags.Bool("dry-run", false, "Lists the proposed change without actually executing it") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 2 { - return fmt.Errorf("action ChangeSlaveType requires ") + return fmt.Errorf("The and arguments are required for the ChangeSlaveType command.") } tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) @@ -747,7 +827,7 @@ func commandPing(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.Flag return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action Ping requires ") + return fmt.Errorf("The argument is required for the Ping command.") } tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) if err != nil { @@ -765,7 +845,7 @@ func commandRefreshState(ctx context.Context, wr *wrangler.Wrangler, subFlags *f return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action RefreshState requires ") + return fmt.Errorf("The argument is required for the RefreshState command.") } tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) if err != nil { @@ -783,7 +863,7 @@ func commandRunHealthCheck(ctx context.Context, wr *wrangler.Wrangler, subFlags return err } if subFlags.NArg() != 2 { - return fmt.Errorf("action RunHealthCheck requires ") + return fmt.Errorf("The and arguments are required for the RunHealthCheck command.") } tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) if err != nil { @@ -805,7 +885,7 @@ func commandHealthStream(ctx context.Context, wr *wrangler.Wrangler, subFlags *f return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action HealthStream ") + return fmt.Errorf("The argument is required for the HealthStream command.") } tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) if err != nil { @@ -830,7 +910,7 @@ func commandSleep(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.Fla return err } if subFlags.NArg() != 2 { - return fmt.Errorf("action Sleep requires ") + return fmt.Errorf("The and arguments are required for the Sleep command.") } tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) if err != nil { @@ -848,12 +928,12 @@ func commandSleep(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.Fla } func commandBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - concurrency := subFlags.Int("concurrency", 4, "how many compression/checksum jobs to run simultaneously") + concurrency := subFlags.Int("concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action Backup requires ") + return fmt.Errorf("The Backup command requires the argument.") } tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) @@ -875,16 +955,16 @@ func commandBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.Fl } func commandExecuteFetchAsDba(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - maxRows := subFlags.Int("max_rows", 10000, "maximum number of rows to allow in reset") - wantFields := subFlags.Bool("want_fields", false, "also get the field names") - disableBinlogs := subFlags.Bool("disable_binlogs", false, "disable writing to binlogs during the query") - reloadSchema := subFlags.Bool("reload_schema", false, "if this flag is true, tablet schema will be reloaded after executing given query") + maxRows := subFlags.Int("max_rows", 10000, "Specifies the maximum number of rows to allow in reset") + wantFields := subFlags.Bool("want_fields", false, "Indicates whether the request should also get field names") + disableBinlogs := subFlags.Bool("disable_binlogs", false, "Disables writing to binlogs during the query") + reloadSchema := subFlags.Bool("reload_schema", false, "Indicates whether the tablet schema will be reloaded after executing the SQL command. The default value is false, which indicates that the tablet schema will not be reloaded.") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 2 { - return fmt.Errorf("action ExecuteFetchAsDba requires ") + return fmt.Errorf("The and arguments are required for the ExecuteFetchAsDba command.") } alias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) @@ -904,7 +984,7 @@ func commandExecuteHook(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl return err } if subFlags.NArg() < 2 { - return fmt.Errorf("action ExecuteHook requires ") + return fmt.Errorf("The and arguments are required for the ExecuteHook command.") } tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) @@ -920,13 +1000,13 @@ func commandExecuteHook(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl } func commandCreateShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - force := subFlags.Bool("force", false, "will keep going even if the keyspace already exists") - parent := subFlags.Bool("parent", false, "creates the parent keyspace if it doesn't exist") + force := subFlags.Bool("force", false, "Proceeds with the command even if the keyspace already exists") + parent := subFlags.Bool("parent", false, "Creates the parent keyspace if it doesn't already exist") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action CreateShard requires ") + return fmt.Errorf("The argument is required for the CreateShard command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) @@ -952,7 +1032,7 @@ func commandGetShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag. return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action GetShard requires ") + return fmt.Errorf("The argument is required for the GetShard command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) @@ -967,12 +1047,12 @@ func commandGetShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag. } func commandRebuildShardGraph(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - cells := subFlags.String("cells", "", "comma separated list of cells to update") + cells := subFlags.String("cells", "", "Specifies a comma-separated list of cells to update") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() == 0 { - return fmt.Errorf("action RebuildShardGraph requires at least one ") + return fmt.Errorf("The argument must be used to identify at least one keyspace and shard when calling the RebuildShardGraph command.") } var cellArray []string @@ -997,7 +1077,7 @@ func commandTabletExternallyReparented(ctx context.Context, wr *wrangler.Wrangle return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action TabletExternallyReparented requires ") + return fmt.Errorf("The argument is required for the TabletExternallyReparented command.") } tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) @@ -1012,12 +1092,12 @@ func commandTabletExternallyReparented(ctx context.Context, wr *wrangler.Wrangle } func commandValidateShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - pingTablets := subFlags.Bool("ping-tablets", true, "ping all tablets during validate") + pingTablets := subFlags.Bool("ping-tablets", true, "Indicates whether all tablets should be pinged during the validation process") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action ValidateShard requires ") + return fmt.Errorf("The argument is required for the ValidateShard command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) @@ -1032,7 +1112,7 @@ func commandShardReplicationPositions(ctx context.Context, wr *wrangler.Wrangler return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action ShardReplicationPositions requires ") + return fmt.Errorf("The argument is required for the ShardReplicationPositions command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) if err != nil { @@ -1064,7 +1144,7 @@ func commandListShardTablets(ctx context.Context, wr *wrangler.Wrangler, subFlag return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action ListShardTablets requires ") + return fmt.Errorf("The argument is required for the ListShardTablets command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) if err != nil { @@ -1074,13 +1154,13 @@ func commandListShardTablets(ctx context.Context, wr *wrangler.Wrangler, subFlag } func commandSetShardServedTypes(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - cellsStr := subFlags.String("cells", "", "comma separated list of cells to update") - remove := subFlags.Bool("remove", false, "will remove the served type") + cellsStr := subFlags.String("cells", "", "Specifies a comma-separated list of cells to update") + remove := subFlags.Bool("remove", false, "Removes the served tablet type") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 2 { - return fmt.Errorf("action SetShardServedTypes requires ") + return fmt.Errorf("The and arguments are both required for the SetShardServedTypes command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) if err != nil { @@ -1099,15 +1179,15 @@ func commandSetShardServedTypes(ctx context.Context, wr *wrangler.Wrangler, subF } func commandSetShardTabletControl(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - cellsStr := subFlags.String("cells", "", "comma separated list of cells to update") - tablesStr := subFlags.String("tables", "", "comma separated list of tables to replicate (used for vertical split)") - remove := subFlags.Bool("remove", false, "will remove cells for vertical splits (requires tables)") - disableQueryService := subFlags.Bool("disable_query_service", false, "will disable query service on the provided nodes") + cellsStr := subFlags.String("cells", "", "Specifies a comma-separated list of cells to update") + tablesStr := subFlags.String("tables", "", "Specifies a comma-separated list of tables to replicate (used for vertical split)") + remove := subFlags.Bool("remove", false, "Removes cells for vertical splits. This flag requires the *tables* flag to also be set.") + disableQueryService := subFlags.Bool("disable_query_service", false, "Disables query service on the provided nodes") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 2 { - return fmt.Errorf("action SetShardTabletControl requires ") + return fmt.Errorf("The and arguments are both required for the SetShardTabletControl command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) if err != nil { @@ -1135,7 +1215,7 @@ func commandSourceShardDelete(ctx context.Context, wr *wrangler.Wrangler, subFla } if subFlags.NArg() < 2 { - return fmt.Errorf("SourceShardDelete requires ") + return fmt.Errorf("The and arguments are both required for the SourceShardDelete command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) if err != nil { @@ -1149,13 +1229,13 @@ func commandSourceShardDelete(ctx context.Context, wr *wrangler.Wrangler, subFla } func commandSourceShardAdd(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - keyRange := subFlags.String("key_range", "", "key range to use for the SourceShard") - tablesStr := subFlags.String("tables", "", "comma separated list of tables to replicate (used for vertical split)") + keyRange := subFlags.String("key_range", "", "Identifies the key range to use for the SourceShard") + tablesStr := subFlags.String("tables", "", "Specifies a comma-separated list of tables to replicate (used for vertical split)") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 3 { - return fmt.Errorf("SourceShardAdd requires , , and arguments are all required for the SourceShardAdd command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) if err != nil { @@ -1187,7 +1267,7 @@ func commandShardReplicationAdd(ctx context.Context, wr *wrangler.Wrangler, subF return err } if subFlags.NArg() != 2 { - return fmt.Errorf("action ShardReplicationAdd requires ") + return fmt.Errorf("The and arguments are required for the ShardReplicationAdd command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) @@ -1206,7 +1286,7 @@ func commandShardReplicationRemove(ctx context.Context, wr *wrangler.Wrangler, s return err } if subFlags.NArg() != 2 { - return fmt.Errorf("action ShardReplicationRemove requires ") + return fmt.Errorf("The and arguments are required for the ShardReplicationRemove command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) @@ -1225,7 +1305,7 @@ func commandShardReplicationFix(ctx context.Context, wr *wrangler.Wrangler, subF return err } if subFlags.NArg() != 2 { - return fmt.Errorf("action ShardReplicationRemove requires ") + return fmt.Errorf("The and arguments are required for the ShardReplicationRemove command.") } cell := subFlags.Arg(0) @@ -1237,12 +1317,12 @@ func commandShardReplicationFix(ctx context.Context, wr *wrangler.Wrangler, subF } func commandRemoveShardCell(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - force := subFlags.Bool("force", false, "will keep going even we can't reach the cell's topology server to check for tablets") + force := subFlags.Bool("force", false, "Proceeds even if the cell's topology server cannot be reached to check for tablets") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 2 { - return fmt.Errorf("action RemoveShardCell requires ") + return fmt.Errorf("The and arguments are required for the RemoveShardCell command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) @@ -1257,7 +1337,7 @@ func commandDeleteShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl return err } if subFlags.NArg() == 0 { - return fmt.Errorf("action DeleteShard requires [...]") + return fmt.Errorf("The argument must be used to identify at least one keyspace and shard when calling the DeleteShard command.") } keyspaceShards, err := shardParamsToKeyspaceShards(ctx, wr, subFlags.Args()) @@ -1279,23 +1359,23 @@ func commandDeleteShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl } func commandCreateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - shardingColumnName := subFlags.String("sharding_column_name", "", "column to use for sharding operations") - shardingColumnType := subFlags.String("sharding_column_type", "", "type of the column to use for sharding operations") - splitShardCount := subFlags.Int("split_shard_count", 0, "number of shards to use for data splits") - force := subFlags.Bool("force", false, "will keep going even if the keyspace already exists") + shardingColumnName := subFlags.String("sharding_column_name", "", "Specifies the column to use for sharding operations") + shardingColumnType := subFlags.String("sharding_column_type", "", "Specifies the type of the column to use for sharding operations") + splitShardCount := subFlags.Int("split_shard_count", 0, "Specifies the number of shards to use for data splits") + force := subFlags.Bool("force", false, "Proceeds even if the keyspace already exists") var servedFrom flagutil.StringMapValue - subFlags.Var(&servedFrom, "served_from", "comma separated list of dbtype:keyspace pairs used to serve traffic") + subFlags.Var(&servedFrom, "served_from", "Specifies a comma-separated list of dbtype:keyspace pairs used to serve traffic") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action CreateKeyspace requires ") + return fmt.Errorf("The argument is required for the CreateKeyspace command.") } keyspace := subFlags.Arg(0) kit := key.KeyspaceIdType(*shardingColumnType) if !key.IsKeyspaceIdTypeInList(kit, key.AllKeyspaceIdTypes) { - return fmt.Errorf("invalid sharding_column_type") + return fmt.Errorf("The sharding_column_type flag specifies an invalid value.") } ki := &topo.Keyspace{ ShardingColumnName: *shardingColumnName, @@ -1307,7 +1387,7 @@ func commandCreateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags for name, value := range servedFrom { tt := topo.TabletType(name) if !topo.IsInServingGraph(tt) { - return fmt.Errorf("Cannot use tablet type that is not in serving graph: %v", tt) + return fmt.Errorf("The served_from flag specifies a database (tablet) type that is not in the serving graph. The invalid value is: %v", tt) } ki.ServedFromMap[tt] = &topo.KeyspaceServedFrom{ Keyspace: value, @@ -1327,7 +1407,7 @@ func commandGetKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action GetKeyspace requires ") + return fmt.Errorf("The argument is required for the GetKeyspace command.") } keyspace := subFlags.Arg(0) @@ -1339,13 +1419,13 @@ func commandGetKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl } func commandSetKeyspaceShardingInfo(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - force := subFlags.Bool("force", false, "will update the fields even if they're already set, use with care") - splitShardCount := subFlags.Int("split_shard_count", 0, "number of shards to use for data splits") + force := subFlags.Bool("force", false, "Updates fields even if they are already set. Use caution before calling this command.") + splitShardCount := subFlags.Int("split_shard_count", 0, "Specifies the number of shards to use for data splits") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() > 3 || subFlags.NArg() < 1 { - return fmt.Errorf("action SetKeyspaceShardingInfo requires [] []") + return fmt.Errorf("The argument is required for the SetKeyspaceShardingInfo command. The and arguments are both optional.") } keyspace := subFlags.Arg(0) @@ -1357,7 +1437,7 @@ func commandSetKeyspaceShardingInfo(ctx context.Context, wr *wrangler.Wrangler, if subFlags.NArg() >= 3 { kit = key.KeyspaceIdType(subFlags.Arg(2)) if !key.IsKeyspaceIdTypeInList(kit, key.AllKeyspaceIdTypes) { - return fmt.Errorf("invalid sharding_column_type") + return fmt.Errorf("The argument specifies an invalid value for the sharding_column_type.") } } @@ -1365,14 +1445,14 @@ func commandSetKeyspaceShardingInfo(ctx context.Context, wr *wrangler.Wrangler, } func commandSetKeyspaceServedFrom(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - source := subFlags.String("source", "", "source keyspace name") - remove := subFlags.Bool("remove", false, "remove the served from record instead of adding it") - cellsStr := subFlags.String("cells", "", "comma separated list of cells to affect") + source := subFlags.String("source", "", "Specifies the source keyspace name") + remove := subFlags.Bool("remove", false, "Indicates whether to add (default) or remove the served from record") + cellsStr := subFlags.String("cells", "", "Specifies a comma-separated list of cells to affect") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 2 { - return fmt.Errorf("action SetKeyspaceServedFrom requires ") + return fmt.Errorf("The and arguments are required for the SetKeyspaceServedFrom command.") } keyspace := subFlags.Arg(0) servedType, err := parseTabletType(subFlags.Arg(1), []topo.TabletType{topo.TYPE_MASTER, topo.TYPE_REPLICA, topo.TYPE_RDONLY}) @@ -1388,13 +1468,13 @@ func commandSetKeyspaceServedFrom(ctx context.Context, wr *wrangler.Wrangler, su } func commandRebuildKeyspaceGraph(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - cells := subFlags.String("cells", "", "comma separated list of cells to update") - rebuildSrvShards := subFlags.Bool("rebuild_srv_shards", false, "also rebuild all the SrvShard objects") + cells := subFlags.String("cells", "", "Specifies a comma-separated list of cells to update") + rebuildSrvShards := subFlags.Bool("rebuild_srv_shards", false, "Indicates whether all SrvShard objects should also be rebuilt. The default value is false.") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() == 0 { - return fmt.Errorf("action RebuildKeyspaceGraph requires at least one ") + return fmt.Errorf("The argument must be used to specify at least one keyspace when calling the RebuildKeyspaceGraph command.") } var cellArray []string @@ -1415,12 +1495,12 @@ func commandRebuildKeyspaceGraph(ctx context.Context, wr *wrangler.Wrangler, sub } func commandValidateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - pingTablets := subFlags.Bool("ping-tablets", false, "ping all tablets during validate") + pingTablets := subFlags.Bool("ping-tablets", false, "Specifies whether all tablets will be pinged during the validation process") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action ValidateKeyspace requires ") + return fmt.Errorf("The argument is required for the ValidateKeyspace command.") } keyspace := subFlags.Arg(0) @@ -1428,15 +1508,15 @@ func commandValidateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlag } func commandMigrateServedTypes(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - cellsStr := subFlags.String("cells", "", "comma separated list of cells to update") - reverse := subFlags.Bool("reverse", false, "move the served type back instead of forward, use in case of trouble") - skipReFreshState := subFlags.Bool("skip-refresh-state", false, "do not refresh the state of the source tablets after the migration (will need to be done manually, replica and rdonly only)") - filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "maximum time to wait for filtered replication to catch up on master migrations") + cellsStr := subFlags.String("cells", "", "Specifies a comma-separated list of cells to update") + reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward. Use in case of trouble") + skipReFreshState := subFlags.Bool("skip-refresh-state", false, "Skips refreshing the state of the source tablets after the migration, meaning that the refresh will need to be done manually, replica and rdonly only)") + filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 2 { - return fmt.Errorf("action MigrateServedTypes requires ") + return fmt.Errorf("The and arguments are both required for the MigrateServedTypes command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) @@ -1448,7 +1528,7 @@ func commandMigrateServedTypes(ctx context.Context, wr *wrangler.Wrangler, subFl return err } if servedType == topo.TYPE_MASTER && *skipReFreshState { - return fmt.Errorf("can only specify skip-refresh-state for non-master migrations") + return fmt.Errorf("The skip-refresh-state flag can only be specified for non-master migrations.") } var cells []string if *cellsStr != "" { @@ -1458,14 +1538,14 @@ func commandMigrateServedTypes(ctx context.Context, wr *wrangler.Wrangler, subFl } func commandMigrateServedFrom(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - reverse := subFlags.Bool("reverse", false, "move the served from back instead of forward, use in case of trouble") - cellsStr := subFlags.String("cells", "", "comma separated list of cells to update") - filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "maximum time to wait for filtered replication to catch up on master migrations") + reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward. Use in case of trouble") + cellsStr := subFlags.String("cells", "", "Specifies a comma-separated list of cells to update") + filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 2 { - return fmt.Errorf("action MigrateServedFrom requires ") + return fmt.Errorf("The and arguments are both required for the MigrateServedFrom command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) @@ -1488,7 +1568,7 @@ func commandFindAllShardsInKeyspace(ctx context.Context, wr *wrangler.Wrangler, return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action FindAllShardsInKeyspace requires ") + return fmt.Errorf("The argument is required for the FindAllShardsInKeyspace command.") } keyspace := subFlags.Arg(0) @@ -1505,17 +1585,17 @@ func commandResolve(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.F return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action Resolve requires ..:") + return fmt.Errorf("The Resolve command requires a single argument, the value of which must be in the format ..:.") } parts := strings.Split(subFlags.Arg(0), ":") if len(parts) != 2 { - return fmt.Errorf("action Resolve requires ..:") + return fmt.Errorf("The Resolve command requires a single argument, the value of which must be in the format ..:.") } namedPort := parts[1] parts = strings.Split(parts[0], ".") if len(parts) != 3 { - return fmt.Errorf("action Resolve requires ..:") + return fmt.Errorf("The Resolve command requires a single argument, the value of which must be in the format ..:.") } tabletType, err := parseTabletType(parts[2], topo.AllTabletTypes) @@ -1533,7 +1613,7 @@ func commandResolve(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.F } func commandValidate(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - pingTablets := subFlags.Bool("ping-tablets", false, "ping all tablets during validate") + pingTablets := subFlags.Bool("ping-tablets", false, "Indicates whether all tablets should be pinged during the validation process") if err := subFlags.Parse(args); err != nil { return err } @@ -1550,7 +1630,7 @@ func commandRebuildReplicationGraph(ctx context.Context, wr *wrangler.Wrangler, return err } if subFlags.NArg() < 2 { - return fmt.Errorf("action RebuildReplicationGraph requires ,,... ,[,...]") + return fmt.Errorf("The and arguments are both required for the RebuildReplicationGraph command. To specify multiple cells, separate the cell names with commas. Similarly, to specify multiple keyspaces, separate the keyspace names with commas.") } cells := strings.Split(subFlags.Arg(0), ",") @@ -1567,7 +1647,7 @@ func commandListAllTablets(ctx context.Context, wr *wrangler.Wrangler, subFlags return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action ListAllTablets requires ") + return fmt.Errorf("The argument is required for the ListAllTablets command.") } cell := subFlags.Arg(0) @@ -1579,7 +1659,7 @@ func commandListTablets(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl return err } if subFlags.NArg() == 0 { - return fmt.Errorf("action ListTablets requires ") + return fmt.Errorf("The argument is required for the ListTablets command.") } paths := subFlags.Args() @@ -1595,15 +1675,15 @@ func commandListTablets(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl } func commandGetSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - tables := subFlags.String("tables", "", "comma separated list of regexps for tables to gather schema information for") - excludeTables := subFlags.String("exclude_tables", "", "comma separated list of regexps for tables to exclude") - includeViews := subFlags.Bool("include-views", false, "include views in the output") - tableNamesOnly := subFlags.Bool("table_names_only", false, "only display the table names that match") + tables := subFlags.String("tables", "", "Specifies a comma-separated list of regular expressions for which tables should gather information") + excludeTables := subFlags.String("exclude_tables", "", "Specifies a comma-separated list of regular expressions for tables to exclude") + includeViews := subFlags.Bool("include-views", false, "Includes views in the output") + tableNamesOnly := subFlags.Bool("table_names_only", false, "Only displays table names that match") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action GetSchema requires ") + return fmt.Errorf("The argument is required for the GetSchema command.") } tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) if err != nil { @@ -1636,7 +1716,7 @@ func commandReloadSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *f return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action ReloadSchema requires ") + return fmt.Errorf("The argument is required for the ReloadSchema command.") } tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) if err != nil { @@ -1646,13 +1726,13 @@ func commandReloadSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *f } func commandValidateSchemaShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - excludeTables := subFlags.String("exclude_tables", "", "comma separated list of regexps for tables to exclude") - includeViews := subFlags.Bool("include-views", false, "include views in the validation") + excludeTables := subFlags.String("exclude_tables", "", "Specifies a comma-separated list of regular expressions for tables to exclude") + includeViews := subFlags.Bool("include-views", false, "Includes views in the validation") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action ValidateSchemaShard requires ") + return fmt.Errorf("The argument is required for the ValidateSchemaShard command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) @@ -1667,13 +1747,13 @@ func commandValidateSchemaShard(ctx context.Context, wr *wrangler.Wrangler, subF } func commandValidateSchemaKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - excludeTables := subFlags.String("exclude_tables", "", "comma separated list of regexps for tables to exclude") - includeViews := subFlags.Bool("include-views", false, "include views in the validation") + excludeTables := subFlags.String("exclude_tables", "", "Specifies a comma-separated list of regular expressions for tables to exclude") + includeViews := subFlags.Bool("include-views", false, "Includes views in the validation") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action ValidateSchemaKeyspace requires ") + return fmt.Errorf("The argument is required for the ValidateSchemaKeyspace command.") } keyspace := subFlags.Arg(0) @@ -1685,15 +1765,15 @@ func commandValidateSchemaKeyspace(ctx context.Context, wr *wrangler.Wrangler, s } func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - force := subFlags.Bool("force", false, "will apply the schema even if preflight schema doesn't match") - sql := subFlags.String("sql", "", "a list of sql commands separated by semicolon") - sqlFile := subFlags.String("sql-file", "", "file containing the sql commands") - waitSlaveTimeout := subFlags.Duration("wait_slave_timeout", 30*time.Second, "time to wait for slaves to catch up in reparenting") + force := subFlags.Bool("force", false, "Applies the schema even if the preflight schema doesn't match") + sql := subFlags.String("sql", "", "A list of semicolon-delimited SQL commands") + sqlFile := subFlags.String("sql-file", "", "Identifies the file that contains the SQL commands") + waitSlaveTimeout := subFlags.Duration("wait_slave_timeout", 30*time.Second, "The amount of time to wait for slaves to catch up during reparenting. The default value is 30 seconds.") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action ApplySchemaKeyspace requires ") + return fmt.Errorf("The argument is required for the commandApplySchema command.") } keyspace := subFlags.Arg(0) @@ -1709,15 +1789,15 @@ func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl } func commandCopySchemaShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - tables := subFlags.String("tables", "", "comma separated list of regexps for tables to gather schema information for") - excludeTables := subFlags.String("exclude_tables", "", "comma separated list of regexps for tables to exclude") - includeViews := subFlags.Bool("include-views", true, "include views in the output") + tables := subFlags.String("tables", "", "Specifies a comma-separated list of regular expressions for which tables gather schema information for") + excludeTables := subFlags.String("exclude_tables", "", "Specifies a comma-separated list of regular expressions for which tables to exclude") + includeViews := subFlags.Bool("include-views", true, "Includes views in the output") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 2 { - return fmt.Errorf("action CopySchemaShard requires a source and a destination ") + return fmt.Errorf("The and arguments are both required for the CopySchemaShard command. The argument identifies a source and the argument identifies a destination.") } tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) if err != nil { @@ -1745,7 +1825,7 @@ func commandValidateVersionShard(ctx context.Context, wr *wrangler.Wrangler, sub return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action ValidateVersionShard requires ") + return fmt.Errorf("The argument is requird for the ValidateVersionShard command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) @@ -1760,7 +1840,7 @@ func commandValidateVersionKeyspace(ctx context.Context, wr *wrangler.Wrangler, return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action ValidateVersionKeyspace requires ") + return fmt.Errorf("The argument is required for the ValidateVersionKeyspace command.") } keyspace := subFlags.Arg(0) @@ -1772,7 +1852,7 @@ func commandGetPermissions(ctx context.Context, wr *wrangler.Wrangler, subFlags return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action GetPermissions requires ") + return fmt.Errorf("The argument is required for the GetPermissions command.") } tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) if err != nil { @@ -1790,7 +1870,7 @@ func commandValidatePermissionsShard(ctx context.Context, wr *wrangler.Wrangler, return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action ValidatePermissionsShard requires ") + return fmt.Errorf("The argument is required for the ValidatePermissionsShard command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) @@ -1805,7 +1885,7 @@ func commandValidatePermissionsKeyspace(ctx context.Context, wr *wrangler.Wrangl return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action ValidatePermissionsKeyspace requires ") + return fmt.Errorf("The argument is required for the ValidatePermissionsKeyspace command.") } keyspace := subFlags.Arg(0) @@ -1817,12 +1897,12 @@ func commandGetVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla return err } if subFlags.NArg() != 0 { - return fmt.Errorf("action GetVSchema does not require additional arguments") + return fmt.Errorf("The GetVSchema command does not support any arguments.") } ts := wr.TopoServer() schemafier, ok := ts.(topo.Schemafier) if !ok { - return fmt.Errorf("%T does no support the vschema operations", ts) + return fmt.Errorf("%T does not support the vschema operations", ts) } schema, err := schemafier.GetVSchema(ctx) if err != nil { @@ -1833,13 +1913,13 @@ func commandGetVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla } func commandApplyVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - vschema := subFlags.String("vschema", "", "VTGate routing schema") - vschemaFile := subFlags.String("vschema_file", "", "VTGate routing schema file") + vschema := subFlags.String("vschema", "", "Identifies the VTGate routing schema") + vschemaFile := subFlags.String("vschema_file", "", "Identifies the VTGate routing schema file") if err := subFlags.Parse(args); err != nil { return err } if (*vschema == "") == (*vschemaFile == "") { - return fmt.Errorf("action ApplyVSchema requires either vschema or vschema_file") + return fmt.Errorf("Either the vschema or vschemaFile flag must be specified when calling the ApplyVSchema command.") } ts := wr.TopoServer() schemafier, ok := ts.(topo.Schemafier) @@ -1862,7 +1942,7 @@ func commandGetSrvKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags return err } if subFlags.NArg() != 2 { - return fmt.Errorf("action GetSrvKeyspace requires ") + return fmt.Errorf("The and arguments are required for the GetSrvKeyspace command.") } srvKeyspace, err := wr.TopoServer().GetSrvKeyspace(ctx, subFlags.Arg(0), subFlags.Arg(1)) @@ -1877,7 +1957,7 @@ func commandGetSrvKeyspaceNames(ctx context.Context, wr *wrangler.Wrangler, subF return err } if subFlags.NArg() != 1 { - return fmt.Errorf("action GetSrvKeyspaceNames requires ") + return fmt.Errorf("The argument is required for the GetSrvKeyspaceNames command.") } srvKeyspaceNames, err := wr.TopoServer().GetSrvKeyspaceNames(ctx, subFlags.Arg(0)) @@ -1895,7 +1975,7 @@ func commandGetSrvShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl return err } if subFlags.NArg() != 2 { - return fmt.Errorf("action GetSrvShard requires ") + return fmt.Errorf("The and arguments are required for the GetSrvShard command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(1)) @@ -1914,7 +1994,7 @@ func commandGetEndPoints(ctx context.Context, wr *wrangler.Wrangler, subFlags *f return err } if subFlags.NArg() != 3 { - return fmt.Errorf("action GetEndPoints requires ") + return fmt.Errorf("The , , and arguments are required for the GetEndPoints command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(1)) @@ -1934,7 +2014,7 @@ func commandGetShardReplication(ctx context.Context, wr *wrangler.Wrangler, subF return err } if subFlags.NArg() != 2 { - return fmt.Errorf("action GetShardReplication requires ") + return fmt.Errorf("The and arguments are required for the GetShardReplication command.") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(1)) @@ -1959,7 +2039,7 @@ func commandHelp(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.Flag case 1: RunCommand(ctx, wr, []string{subFlags.Arg(0), "--help"}) default: - return fmt.Errorf("action Help takes no parameter, or just the name of the command to get help on") + return fmt.Errorf("When calling the Help command, either specify a single argument that identifies the name of the command to get help with or do not specify any additional arguments.") } return nil @@ -2025,7 +2105,7 @@ func RunCommand(ctx context.Context, wr *wrangler.Wrangler, args []string) error if len(args) == 0 { wr.Logger().Printf("No command specified. Please see the list below:\n\n") PrintAllCommands(wr.Logger()) - return fmt.Errorf("No command specified") + return fmt.Errorf("No command was specified.") } action := args[0] From b2978cbca2a19df7dc4296700700a7001423452f Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 29 May 2015 16:06:38 -0700 Subject: [PATCH 128/128] Adding unit test for migrate_served_types. --- go/vt/topo/shard.go | 2 +- .../testlib/migrate_served_types_test.go | 168 ++++++++++++++++++ 2 files changed, 169 insertions(+), 1 deletion(-) create mode 100644 go/vt/wrangler/testlib/migrate_served_types_test.go diff --git a/go/vt/topo/shard.go b/go/vt/topo/shard.go index 71f51c8f97..e01f8be889 100644 --- a/go/vt/topo/shard.go +++ b/go/vt/topo/shard.go @@ -475,7 +475,7 @@ func (si *ShardInfo) CheckServedTypesMigration(tabletType TabletType, cells []st // we can't remove a type we don't have if _, ok := si.ServedTypesMap[tabletType]; !ok && remove { - return fmt.Errorf("supplied type cannot be migrated") + return fmt.Errorf("supplied type %v cannot be migrated out of %#v", tabletType, si) } return nil diff --git a/go/vt/wrangler/testlib/migrate_served_types_test.go b/go/vt/wrangler/testlib/migrate_served_types_test.go new file mode 100644 index 0000000000..e002bfe900 --- /dev/null +++ b/go/vt/wrangler/testlib/migrate_served_types_test.go @@ -0,0 +1,168 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testlib + +import ( + "testing" + "time" + + mproto "github.com/youtube/vitess/go/mysql/proto" + "github.com/youtube/vitess/go/sqltypes" + "github.com/youtube/vitess/go/vt/logutil" + myproto "github.com/youtube/vitess/go/vt/mysqlctl/proto" + "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" + "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/wrangler" + "github.com/youtube/vitess/go/vt/zktopo" + "golang.org/x/net/context" +) + +func checkShardServedTypes(t *testing.T, ts topo.Server, shard string, expected int) { + ctx := context.Background() + si, err := ts.GetShard(ctx, "ks", shard) + if err != nil { + t.Fatalf("GetShard failed: %v", err) + } + if len(si.ServedTypesMap) != expected { + t.Fatalf("shard %v has wrong served types: %#v", shard, si.ServedTypesMap) + } +} + +func TestMigrateServedTypes(t *testing.T) { + ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"}) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), time.Second) + vp := NewVtctlPipe(t, ts) + defer vp.Close() + + // create the source shard + sourceMaster := NewFakeTablet(t, wr, "cell1", 10, topo.TYPE_MASTER, + TabletKeyspaceShard(t, "ks", "0")) + sourceReplica := NewFakeTablet(t, wr, "cell1", 11, topo.TYPE_REPLICA, + TabletKeyspaceShard(t, "ks", "0")) + sourceRdonly := NewFakeTablet(t, wr, "cell1", 12, topo.TYPE_RDONLY, + TabletKeyspaceShard(t, "ks", "0")) + + // create the first destination shard + dest1Master := NewFakeTablet(t, wr, "cell1", 20, topo.TYPE_MASTER, + TabletKeyspaceShard(t, "ks", "-80")) + dest1Replica := NewFakeTablet(t, wr, "cell1", 21, topo.TYPE_REPLICA, + TabletKeyspaceShard(t, "ks", "-80")) + dest1Rdonly := NewFakeTablet(t, wr, "cell1", 22, topo.TYPE_RDONLY, + TabletKeyspaceShard(t, "ks", "-80")) + + // create the second destination shard + dest2Master := NewFakeTablet(t, wr, "cell1", 30, topo.TYPE_MASTER, + TabletKeyspaceShard(t, "ks", "80-")) + dest2Replica := NewFakeTablet(t, wr, "cell1", 31, topo.TYPE_REPLICA, + TabletKeyspaceShard(t, "ks", "80-")) + dest2Rdonly := NewFakeTablet(t, wr, "cell1", 32, topo.TYPE_RDONLY, + TabletKeyspaceShard(t, "ks", "80-")) + + // double check the shards have the right served types + checkShardServedTypes(t, ts, "0", 3) + checkShardServedTypes(t, ts, "-80", 0) + checkShardServedTypes(t, ts, "80-", 0) + + // sourceRdonly will see the refresh + sourceRdonly.StartActionLoop(t, wr) + defer sourceRdonly.StopActionLoop(t) + + // sourceReplica will see the refresh + sourceReplica.StartActionLoop(t, wr) + defer sourceReplica.StopActionLoop(t) + + // sourceMaster will see the refresh, and has to respond to it + // also will be asked about its replication position. + sourceMaster.FakeMysqlDaemon.CurrentMasterPosition = myproto.ReplicationPosition{ + GTIDSet: myproto.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 892, + }, + } + sourceMaster.StartActionLoop(t, wr) + defer sourceMaster.StopActionLoop(t) + + // dest1Rdonly will see the refresh + dest1Rdonly.StartActionLoop(t, wr) + defer dest1Rdonly.StopActionLoop(t) + + // dest1Replica will see the refresh + dest1Replica.StartActionLoop(t, wr) + defer dest1Replica.StopActionLoop(t) + + // dest1Master will see the refresh, and has to respond to it. + // It will also need to respond to WaitBlpPosition, saying it's already caught up. + dest1Master.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*mproto.QueryResult{ + "SELECT pos, flags FROM _vt.blp_checkpoint WHERE source_shard_uid=0": &mproto.QueryResult{ + Rows: [][]sqltypes.Value{ + []sqltypes.Value{ + sqltypes.MakeString([]byte(myproto.EncodeReplicationPosition(sourceMaster.FakeMysqlDaemon.CurrentMasterPosition))), + sqltypes.MakeString([]byte("")), + }, + }, + }, + } + dest1Master.StartActionLoop(t, wr) + defer dest1Master.StopActionLoop(t) + + // dest2Rdonly will see the refresh + dest2Rdonly.StartActionLoop(t, wr) + defer dest2Rdonly.StopActionLoop(t) + + // dest2Replica will see the refresh + dest2Replica.StartActionLoop(t, wr) + defer dest2Replica.StopActionLoop(t) + + // dest2Master will see the refresh, and has to respond to it. + // It will also need to respond to WaitBlpPosition, saying it's already caught up. + dest2Master.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*mproto.QueryResult{ + "SELECT pos, flags FROM _vt.blp_checkpoint WHERE source_shard_uid=0": &mproto.QueryResult{ + Rows: [][]sqltypes.Value{ + []sqltypes.Value{ + sqltypes.MakeString([]byte(myproto.EncodeReplicationPosition(sourceMaster.FakeMysqlDaemon.CurrentMasterPosition))), + sqltypes.MakeString([]byte("")), + }, + }, + }, + } + dest2Master.StartActionLoop(t, wr) + defer dest2Master.StopActionLoop(t) + + // simulate the clone, by fixing the dest shard record + if err := vp.Run([]string{"SourceShardAdd", "--key_range=-", "ks/-80", "0", "ks/0"}); err != nil { + t.Fatalf("SourceShardAdd failed: %v", err) + } + if err := vp.Run([]string{"SourceShardAdd", "--key_range=-", "ks/80-", "0", "ks/0"}); err != nil { + t.Fatalf("SourceShardAdd failed: %v", err) + } + + // migrate rdonly over + if err := vp.Run([]string{"MigrateServedTypes", "ks/0", "rdonly"}); err != nil { + t.Fatalf("MigrateServedType(rdonly) failed: %v", err) + } + + checkShardServedTypes(t, ts, "0", 2) + checkShardServedTypes(t, ts, "-80", 1) + checkShardServedTypes(t, ts, "80-", 1) + + // migrate replica over + if err := vp.Run([]string{"MigrateServedTypes", "ks/0", "replica"}); err != nil { + t.Fatalf("MigrateServedType(replica) failed: %v", err) + } + + checkShardServedTypes(t, ts, "0", 1) + checkShardServedTypes(t, ts, "-80", 2) + checkShardServedTypes(t, ts, "80-", 2) + + // migrate master over + if err := vp.Run([]string{"MigrateServedTypes", "ks/0", "master"}); err != nil { + t.Fatalf("MigrateServedType(master) failed: %v", err) + } + + checkShardServedTypes(t, ts, "0", 0) + checkShardServedTypes(t, ts, "-80", 3) + checkShardServedTypes(t, ts, "80-", 3) +}

yR-{kCQCwuvjg;LM)*-yPnzPjC*f+n%4 zortikys(f(VoA)tk%j!ahf322A#-u^fPc2H0Mnv!Rj9j(t7|a)`}fr+)TW1U)OuQk zDBIzPNu*NIX_V7^OLOINGOf_0tJ;L;auT&_?hEhJt%s?5B_U%2;HbqR3r1+{#M zWn2&8;rc}j@nmvgweA-f8^#VtDv$_XCCttQZk@Q9%>Qzp!c{KvcnAup&y}58w7|qj*%8HF>;@?dB(`<%_{Z?(X}5+%lrErpF&C<49hpy4H>*v;#JK=Ndoln&dB4k z1SWQaO2puKDG4M4a*KWVXQTsWn=)>}f8B&Jl;Y6~Q<%i7Fh(iK&5HSmZ$b_RiC$~j zaXmVfyZeB`;rBbq6zV>6&I|(WAP(X_wXKmR@OrUGv%4F$@$H`Ru0-0w8< z4Ep&GXaUC&s9e3gZC>Xt*5Z_%+!xtf8}vy|#}1oqOor54`oRodmJ{yNzP%F@iPrWo zTZUpk%sk^u!8?Qr1om*ErS5fV6+}YXvv62>rP{gj4p~n;0(|%&nC@09gSYMD(ZJEa zDKaLNs;bB9zUbTsRldg0$AXeG;xOXyB`yO9di%@9(lSJME8#2n z5X_Pk4d$XqlaGu5fQcnS@?<6Yme`}zM-LxMeu3NviQq?I#g*U%{K;2mYmP#4gf@2$ zo5L>7(e+YMdgtFWGMN1UENXr&PagxBiw{}xQEbW|6=1sb9Y${jm(adIq zH)C8Z`1}$v-4`&Ie?B(&)a-V6>?KSmLgv;&Ry?BS)h?Z|;BB!?R@Qw=5oZT)n85q{ z`%O@_tPFvlwgKN(l*xxP{%m`1ts_WvL5KI-l)7z!>4l$;uAjBr!6? znRl*A)8oDE*V{kYtpFwWfM79HTA#$qKFq=IVmZ#Vyrjew)`>mlw^T}a${8WHS)smL zW~)t}t0m>^%Vq=`z#pC62gC4`fW&9fnRSWNgKJ6r*!J-;rM~Oy$CWjF-QC?z-GamE z(L?tr247QDCno{>M~xS(8P=uo_+;7w(G*SS1sWo7lr@NABfmFeI`Q%A@OMxm*x7;^RB`-Jr3 zZji5AXd0Qs{VH#-Jp+8p1ol`(UYjX3CnQlZUxE|hqNecfGJvx#Uo_p-();=xqACl( z;hNCH$H@rnE8)F8yquv#_3TL3Fl=fnxdvC{eV>@KTY0C_;p~3#*66hj_|E?LPaN+(;q2+ zuAb!G;9xx<$S#Lt_pVK*6yCH)JI>cBHT&VVF?!M+$$@Ysp?%8XcUX*QCL$9jmuK)a zWrPjM$(}BReA??9BMoqk-ou9enR-$weq$Hw`U;mO-D#@f^X+{q!5ZeOYWk?_I0uPQ z%y?7cs^PPtE7fD$($0dQbMQYM=kH|E$AWI6Yah7x?_Y9SA^R|mTunQf4tR(q9WC!6 z5k(p-WGK#R>)P+!*aaMK@gGlNF_yzVX%Jn{aJp4$v%i1BhRqHAe)}}NlJ|VoTPhNJ zs$kCl)P91hF_;XP#`;ggddZeB>i*yl6ck^ZzRDYjTFf<}efH-&_aIA#+HJV zcB+iaJCqU*q!)ewhy=T9Ge4%AW%hJl(eMPurziE)Yky_rmyrx(#H&dviaaOunGm^G$Y=)?=g6DRItZ1gFFTal|(~l%c&5zaU5TMgO4}2ou*X%V@AVR)iRqZ zoaXsDv;4}&?W;W&zRDYK)uK0L6l`Q|sSBMK!Sb8MNVDR8>;!uf9$<^&P6oT(o^=B8aoM3NYmV-{f2urFpO!Zp@q|_ zSFqenbUA6u$0GB1VRYY~l`50$u1YvRN@^!20v@Z+1MIzC39zBJ{1k)v6Cm?jwP|uWyS8rdJIW`nT$lMfRD)!keM@pWiNALSsZBB6O#Y3n-+_+Zp zPd=DLWYep238ni*@_BAerm1tu-7~}Ggg<9j7V`E~uf!`^=sCAD$1O$G7x-gPtjq^3 zLwQNT>*dp3$c#Hze=Fka%Z#RK)Ou84Jp)qnhwWtax4Xl`X9v`A@h%0huJ>}C^VlqZ z0;%L~d2esSYFp*PNw?E;+fF#OB(9ZT*=*cKtI$sS(d3H0AY zz>3_)tm4enl!T@{OxQ%geYA~1wFl?w4WOe?ypyEm+Kkt6;rR9MSNZC5Ika1yt}R%N zthC3P{5tJLGqpux?{cD!ot>+z?V9MOaDCg2FUm2|O&?pjK<=-DoFJ=b12 zUh}SRUAwIL*mcTYs96$p@M!H>Yu`F&; z%{GcfHNw4aFUhw@yLf51&~bLUG~?GTkUzd4hujYQ;gKrVV>}Mx6KLidavi!|TShGq z^x@&*aXsHhxT;oC)t%j@G3&S0uKhD3htEdxc<$Ec+1XAI5<@E%9e{lIaPypSo9^l| z52NCJ2y8EzDO~k}2asRFdtLme---k^^Uu0@vpN3#9g1iBMw=6nh>5|$p(uRPbe}s$ zLBzh+;lMlILpOprgNIBe|BB%hx(?m!)Ji`b{K1;D)C_dQk1t%b9W7cvyo$u-^gTSmu%@Ai zo^OYLO@IX#aW?HrmJloUImvn;6V&vP(qJSQC4Y0$1?f_t>ck8>$CujzVL+9pZ=yy9 zpKkn177b&9=jl)mPl<9CQ!`>#;*4ZN1On}1w7WmiKU5c>cdpHm$HgWTdJ7*%swYr;Hlj8 zjdxyS*>A(fa6HH^r*}f!r+#N%b<~QM>&lGT|96gQkPSpLB!>ZI`2^;rIX^ZjWe!)y zl$MpXux2IRsy|!gzL=W^n6C_-Qxip@Stnd8rWRNQ`^FcOz5V@F!zTx1TeK9NWe-3B zu`@St1(>~mU-jS*bNSS{VvXB(1)b)ymt`|x-NoM(xoZW=ru|NpirQPt>kjf_IT|sq zb#-;=yw?kRhlb$CDiRX9X~izhL0!a`NS@>Ka|1c!UW;S}70kuNDY!t+4u)B7twcCn zEDZNf_1=|`sLe(SpRAcTKvc(ac3*mTr^8k?#Wt)X0DvK_vur8Zy@xg*K~)n3d*h*%F|!DM8bx5 z7C?xo=ME;@)%hLgK%Mp`BV{wzM{x1xT`FK{c|&t>Z-#c)o@iFJR6yYLQx6O$inz?R zN}caK)ju6jQ_TQIwPK{`g}(T|QQz%j?g3F?`yGy;zk7g9KIOG&)%hBoJ)M6(0m#L< zg?E=S%gfyp3M3Yz)r<#oG`LUE)}>=lks+3$A*Rn6{{ea6o3#ZLom~{WPw!|9p-)>2 zyc45HKTup{`nTcLojYsN1D|zDb=Ln5jpb&gY)X<^QBT_GAd5U_)+wQx0niw^_xAUEh8i9t6`u2DtkhRNSa;#C!y0Lh} z?L}JX>eO20E^+BlAO2ihTl;14TVE1U0A6klqjnjP>MvpLpaGW>F+;5z$N`>dgp;$< zN}iYvvJ&@cKkCQqgi5U5uC3zI49DR7di1AX7W$5VE>{6w)R0W%p5)kY%?qGmG`@cO zE4^dI7Rc8Z6KBVJv~lFoCHj+9%6h$RKs!k%;%I1RXP1WD94l31^xf}$eW9nUtenQrw3^_s`Lk{%HB;Q{E@*q}4eqpu z-1`mT)F(s-Ze>0F&Ka}T@?fmcreGNQw@crB5-RGt^6Cx0PMLU~qTl(xRwK^lUk&6 zI}5q3?xLebDwOa%FqA5teR|?D>LH z&+#2m6wzF0189a0zs#~<*yJqcno(?j?{h{&oLt@2Ki= zHtwnFmOC-l&bJ-^N|?RlH=~zuv(-t}SnJVvWrN|bOv(vQm4P)osJcRT=c@3o^ygpv zVfRkG4!ln23%o(nKWF-JDN~C1nLml**#`wl6|DId!z2e1`PE8Xgxdf^o&+()!KcnW zJ|%m3zHhZVSJT#x^8?nPv*_rF#J%*aEF1LZq)w%C#;``6mVS~Ox4kxKb=YHRwmqlHr76> z$=^*{LL#=HmxV0>*=L|7{8>0A*5C8u5)+--HUwbes^(VLWBbO3eTaYWalb`kn9P5AL7>I#Tlk>J9wVK7l9FeiPS zhK9y`8=-_!AjvN7#B>U>F;d!!Vv{fR*fB0(|NRVWCJ|ES9t8lQ(QClNNeJxg&az^< zsl555Q{$GpA*G|~J^UavNIqqap#jt&n9);zk(gT0I(qo-%=eNK0ZzS&zbHWL#u6ry zYCGkl|LKmasphHXwd+C8Bf1D3$mgkoQUVuas$5p4ffWfh_)FM$d_aGKz3?8gQ@qq$Jv zK{7WP#D8e!>3HhTGu~o8HjKJCu>idd3SpkWuf?3}%67uN_V(I%f%&#>$KIoR_}U9; zTOJZH-~Pala%QjybgfIJkk$GiG~5&-~@F4xtSkl%)wVS8t6$jasF`g@}ZW#@9$XxVH2hq zFDikeFJVqHF|~> zZ6nV>xr5;`#;=@hzJ7hOS_?qCy$ZP&1}pFQR%X!sC4#{`hhBuMi)gS}4OrbHU{j4z zT;|Oft+gR}gT8pu%Lc=r-e~DVW5&uI3f}TO0bjooShW;yGf-eC6`3GN_UG-ZKw`3? z+p6NT+Qgk}RZ`?nY&WzofkIxF4dc9MW_(dKN>t#a#7>uwq>c!GW8%!?MjmwojWnr= zy^`ynY0+}6OU3|&zu2?kt~;8mKlfVdirYQK@R7(6T_Jh({Q3KY1LI_?_>Qiw{a?=j z(^#4+cxqcQ*mx5}o^a%f%bGTQj@-JNlRq``qnJo#E$?ffgZguy#OEnr8oTlH+O`8w zfe~q#c`6^aQ>CC(37Am~43?T(P^Tkx+^)K?cU+Ty=QbNV`(UlN`^H3Nf8f3w?C|VO zpD|g(rTdw22Dpe2u+^8+h!heywC(ADigV$=F#au9W@fdKX*?-qOd?b)ygnZ(HMGzk z1?`oiYzM4kP|I-eaAUkS|L(!9+qX*{rX|T}>7o{lgh5Rl1W)^!nwn)Hu;}A^|4DK@ z{>}n&zn}+$#Q%7qYAa#sZIuO5J7Rva=y`dE%EfyJThoRJ|MSZCf4&bxQ^!~kK6B8^ zpnygK78L>lbZ_D3&rX2KtgX|H!WZ8@!mol4Ey50!J4h?&GNompbsT&M>tlwFJb`-1 ztZgq{T=bqRk8ko7s%42WH(!j$!7qf-NXD;u>FKh|N$$z{z|O?L;a>VI)@Bxfv0nns z@cj4w`TEFOo2{vs%i$9B%=OYi?RN>WXx3Ca(+v){YP;d!H7Q#*35LhiswyPjFO^iu zIWJUqOj393zPNbpq3nxku~Adw7G`Eg%_2Tfb_b9S`T(b3U2kgZmme=DZKhzw*LxdFw6fg`T87t{)h-qZO%jx^GG zbni`H!h_*emVO@Bid3CIguhxfdL8`>?9Kex{3V~E)La2a}UO+&ov*t2>S~i z*3Sa4xquy59%@N?>_f?W=jN&q>^oQRHx^1W75Pfm@|wJJ*394gB^L`}olSZcs7VzQ z)46r>pwp4mY34U2H+18qE#S85+3zoNjG>{Yubn#KQZ9TAykrvlU$EXtM10djAMi2c zf8cL7K2%**$gR)OA8ocv2tmTva%{yUjIAG0M28A!Umq&9+L{b{SAW8J)B|yCp4lR zbU>CZN8GqR;}n|-$qUf9awz4{`Q8r(Nk>phsMOj}?hTXJ^hbFk9ecrekg z>Th?k(>m>OK2q4CFx+_Qev1?C8uEZ|rBK~ZZkrRFjSJP%_-Co?jKbkR$5wjz5)I|y zv##={oQ+_}Y&&FyO#Xz~OaBGTb@n6W+TRozgW=!)yxl4t?Vbnx@LAA(af0jgE8&qD z04XW}r1W*OBBf6I(yr`WYOi?4=GC6|kNj(4{XPe#+%eQ+Sc!4-^(2=b5phYu)REm& zHPrJI!TN)$YoI1JNov9ss@R{i`*5qZLP{2^mMlf__N^(*U=m~&mHgr2?Z^IV=&3NW z2vWoK5Uf*b9m7?B=4}7@^d=Mv&0jk9p}$KxyyU4RbLo4W(f_`z0Dc_Ro%|3}h=eq< zB`+s2JzDT+_3mDQ_)y^Xl};q1$8+@N)}7W?vM;9I`tmMIUA!d^NwArQ#b^R+&S$)-N`VY=D^YXPK85pOT z?A%RU+Idk!)Xj^FTM;o~HW?+A^7rOzpo>7HN{f`-t$HC_$id!qjh;D!b7K&-HG|mRmEN@4eDc@t|GcHj9D~80Z;T^H zlD;_ZSoP)iJ3HG~IZp_iSWz{AKo=kJ*+vSxRfq|?%nluXP=)tQX&00DAVA^@E=^YI z)VRi=jQRp@>G$SroyFfQ3|I5<07Nbx#4&ve=j2#Zx~Il1DUkxa$r z-4G|I!bUY^`byGY7`1A5!?BbcYZ66A&UGJs~?~16q$Z%RKvjdn5i2x-Y;QPc|aad?hUPAVON0Y( zxjkZO1#2iGBTIZU6>?n?nd=>WEA7PxV+sAhLJzLlLf7_Bk;4TSd!G5h)sJ%vOffIU z(7#;&7T-${a?nI3OGtF0`|5m82g>YpgR{z_qIVP39Sa%*FM&vZ3*uUHr9L+^TUE`t zXWfPNy2q){-np=dpY3lliem%hC>ds*x(Q?^IAI!}eoFp*G+rPBpX z_Pk1V42lzA7F0lht4gN5cI77B%C;;bJsJafn}~Z-6U%ellbVJ!U{cF%Cd4!?Tj+M4 ziu<&9xUig?KZ_Ufk29bX8$DQub0Q%xTJE|G{XFg-%n$ZX|Mk}c^EV~y6wDm^n8k2U zDd8!1#nkM2F>)wb!dnk{&^J-(QtF5C0_OMiPCWZv2Uias(D!#71vl{1i1{`$|NiZ- zlcQHZQlBjk*>OP-Ce=BLS%(16H?qdt6#3+>xic@96FpKt`}e3(tU9rJXO@>G(z zE=)JZruApNJpNr9v>B4i(=~L9v2x2hJQl5jc}_xyR{;|1!rWBhzrrXfGeal zOyd!g-0+?RA=UB;XGN;V!RlJ&2Yg`N1|B-lUulAn$Qf}vmCqO=l|=WhAM+_S9zVy! z)$6VzCA(3b9QkC#ch2L>T#w7YM!z1*?B0;luRh%G)FX+YDK@8OcU!003w7J*sruoK zHWPt7Pa5E}_1vo2izu`GwBeh7?e^mjX7<51{a@Gn>7E%;-vn9mNRr}6gI&beBB_`% zb1~3XV2Sb*C9%vS)qSk=Z}v< z95YLVo{7?xMc34bL${_RZ1-wSsb=4NwaokQA*@nE!_bgCXciRkVkQWOP`&2G=}?GX zAejorvd(tgxsC#l7SX)QY6+he5B!ng?ojHUt*xJ3T{edbm+9XG+5B+0_wo;adefR? zrc$CXZ{obf-U+H8*vuXRp-l3a)4>MT_k_rDomwqp;0huqFE6HHm-7`q#H<*_2eH|_ zr#YNsvg>f!C?a~ zSy@}V&eqtcT>cGsNzl+w_Pb?S)rbG8&3Y29ogqqKT4V1yk`)``b7AYd$MZsz|47<)u_{BzgNE0_w?{rx4W*dZ*50sCp}_3 z3)z^0xHwC4iXa0V;u<~+k*X0`K5jb>Gu!3EH!c)V4Kivg;*dD%qK!usw+yax>7PS} zGCD~oNS3e4R8~q!J$htfYX7`J8#YI$3K)HFx6t&@S5Fp>Y`GT}7A!lWS#PQ;YuRsZ zPlwanbU9)%;8O(y?zkd)O`T=q7p>RY1|{RbwN^Kc`{X3@toVftF{KK6|L4p3$$)V8 zv2kT+JLf>*JojNtw30Q-*pk1mt~~lEri2?Tx^T2Vn5&iRFkKgcgzBoOM5st%T!~rA zUOPJ@Cl?n*=;`RLgB-7F;q-!~=9ycdJN|A}P-?u=q4JGCDi9u77uOK6E?+}dtxg6-}mGKz@b0nL!($CLD1%&ROwLm`!Yj}Xu+ObK~H5ahpI~X`qcmYDaM|d zEbf2qL&_*Qv09i9&|pMM1U+h`2F`hk`f&TY)K~SnkyoIAGJ@sqbxY!c`1`-Mz>>*1XKdC3x#$}WN>)Tg_hE<D~Ca)Wxg0IU#aP!kYVB!O8V7;1ZF# zfx+gAl92epfEo>-X^2ce2Y9K%_tgBNCFLC`fQOK$R98?~`@0hU6fvFks;&JgehI%e z@{nl5v(b{eDZvP@?mj^&>P8lDb<^+xwvVwu!auICZPnm(j^-5gp?@sGQ@lJzCT_D7v>WKlo7O zT{He5m0V4!@e7kO#xS1Zk`dh`u}sk-4d6}6@iteEb3eNLF{~R$G`kFnV>XMSl zfu6l6F76Rkv>DEadf7(Vjg^2SB@VyY*B)pFvy_kNL7dmF-6XpV`anSd5J_XuU&0j zKHL}=GPeJmo1Z^4eR1x7ur{bD6GYZUg#ZDz4`@w+z#6!6Q0U^elBov-XRE=_+G z!uMSU#b-5r5bjPaR}Vz^%ILbUyNO76B21IZd@3K<&qpeK{Mf>urz7D}K42c+AE_Gt zAz^OH|H4<_rCac1a?6i0Xq8@Ft=P=x*}I)#9ungJo7I~gD~Mj2icT4FmJ2_t&E3=a z{gVMC6xb_$rSPbMs(ks#Y*a>S8RS`V=&a?3hUX^fB?SnxjD;hgQ5cLaVY1x8u=^XY z3^5sltL;*+x|WvB##kvIiA=1X^QS3a^dwJu6D#LNrbu6p)JG#>(ETm~&0l;TE_!fC zyk|BJZ`8*>ec~(m)^OM5&i(x#U@n&OsDa(hnwn0pYPzt;qI+Fv?vO~;`mMJ5Al8U( z%t3*FUMP3nNTKJn#h>?=Pv#jfegNDvNRUPBl(*DD%GC6Y31@M%CmEx` z?$7i(NcD9V3vPp&H!R5>sk^%kE=F&`;7^!uAk;GrI0#Bksf>lv(m{=TIC4~x8nUvQ6TM)?}iJDz>Kk2oeHEl~X***Rra>xay9Kv0(mH`_YTASEq*?`fq|z z-%Cn}mzbC*{V6&fLF5<$1(sH0VnH)J89Zkq7ao}@wzArQIwKf%M{5M|6aCs z0vWXui0W9R-+uYs_V!xz)1~#2NXhwEef4{?J`v3avyTU&+STem|@j0&=D5w08k<;!>Uw7((0IUPo`u{o5| z=45|W6bK80dAdT%iNd|B3Kx$Ju3c+oV_y8(Ep>6ctg0w>ai@g`6wrBuYiuCR@#gVr zbPL_tem6^W@{kUov}YCY?x1Wj4u5)Qec;P7i{(80y<--*z+g%b;7pjMCzu$fI z4Gop&=4SI6h0{%kM=>S&9*$0*p5C~}aaD2CU*Su1&W8_&r*m^-)1Eyd#ge{T5+;#1 z{(wM~1bx4z?=fqEGDk{o9knDTNgZpKf|dWvUdSdk9SPS;3CSaCck0}>xJX^%4I%R2 zmPyEL8&B|qe@N*nmJ;XYSc^kHC+~YtGW!&TrvDYl-j*y#0D&5UaUURqLcB&trB)}4 z7Jt7;^4N*#@+kK^Ko zNmj-VQmor|t0Dq#F*6Ulu74|j&9>6PM+a(>BxvW_*J&iBmc8?#Es1$Qa^OgF!s~k= zc}R|Z%F4FJ(qbR`rOO+7bY?85gNKM9vsYvPp-8#MxmKEE(yHhWPr`WoMRE05NoXll zXQ!L~b}|sU1gdxhAF9F$Ws~e|_6_N`@f`XkvP+I9lSCU9C2lA0V z5^u&^M??b;2{TIz>d<29FUuahW2;z0S8EI6wf*G|%?gAnwJBb(B zz)t^B6%x;6m|;o+R$BM@k!jxet>HOCJTa)70&#?U+ylzeFGKXvZVJ{T2`z>-r!_j6 zOsse2;CKQt!_X<9p{-jWk6 z?hPzn3)S=eQu{aBp;OB0Nua!>n8T(_C}YDX-D1M~Q^7CNt^IH5bsdm@wgB-xDav8|$~E_$VfSbnXT8(xKd_mnEPT2wz2 zl`q*k_*S+Q2D{V>E|rp4P0zBKR8@sD9iW7iAX%y~n3vDv4W5i!^(BkY_N!IX*Ffai zws^1$X#Ckicbx`)Q+?pcl*KN=B*-=4W;vD?aY41>;Dj=td1L1r$C%NB)m0>STermK zbIL#pv88>K`}DDyS#bVz{Y7qPZm#=rZtn1Eo4)w$7&tU)AXAgAK1DtnSSX)Ci{>2~8&9rqp9*-Zcr7XAD zV1hWgvMgf3mjUo_5uxH;3FCi0R=Kx41`RA-i0L&Nm7sbAUIHB!zc`)%ka4FMY}nV^e5h6Vsc` zMN-bUXGy)qpjBLwTwIuUySx0m{r$u~bym0CF(8ni@VOSQ(Ug+Ne3(XTzP3w$)924- z?cpNz>uJMxwxZrYBOHqbxcHQ1q$Zt0FK(NZ`bFtdmK-xfqfMun@7Byfp2Sy%a$P?- zJwefKO%$4#PkwDpI_C*8dXtrW(z2GJNL+WP#S0W*9)RbVyg&Lit>n#3R1CakBL$DR zwc7ZU|I_smzm#MyGn>)y?JDQBPp&|6R4?8H(Q@3q>CSowuw)lF{#5O^XSTZ{v97?< zLd#j+n6^%4sDGc~bui@RdNZE0w(yb4Jm}2w4vrkmDMjfk94M|awR=gk`7~s_!pgz6p8YSjStlTUl?Y<0 zM-KgzvpHygzsU8Ng-!CD#d^}Ksp0J_U44=(*s=ya+(kt9m-JkF6Zt=fT5wMNGD=T# zu~4N;iP#utqjGf{#e;v-l|~*wXq$7l4`Lb(+*kz-AfMA0d?7}vU^A}Ezz)hX+2c8= zOHomeAw)|{Y6tRbA6(`l@5Hu(Be=#> zW)_6MuZG6?iuCut&erB=F8YI=idGGMHPpaMJPZ&iHNN7x)A!}8!&+daVHJElvs(_( z!mIj_(Hr?6fi;sACcH;fN9S@K!_^O;(}yCumB0#)w)mog5YkL5HuCFME1?dRRV2fO z^WkfMXZuoclIM(N9EHCeS5nR&?t}20#P34WV}za6)*tI|s)Gp0B*eUqeeg}yY@6Zf z=}J@ZGNt~F%h|jRQ-|x|4(f&>%SuulW9_|;nC;rRuX-CKJeAVe znj(|rxI00A$b*GxLc;4?xi@;ib8zr9mi|~1+W+keEn!)N%FL`=lV!Zto6GgIgQEBr z_ewpTCacC@RFGf6Dt1DCMClHK`#4_Q>|)mX^807RCwQ#0oGIqCz)7@_J9PK@?-o?e5bxPH8gN7W-(a6fAmW8af07Cfide{+wt=~U!<8l znz^BqRNecrNeR5v2zelcHBNzdi}>?hM@6?o=4CK+Swm~fx-aoqQFAR_QX*!wFzNlD zFESgrp`_Edk8Qj4ekr+29Om4NjSZG4up4I|tUBO*L`EMkB)o2fh2scXLRxA$k$*PV zZR5(ilkpJC4LJK9l2620cux=`Y5XF^rGoG)grLRG<8OSTXDT%qFPu-uUI;!h$%Vay zqe)z{8rOZZt0ajl#*j@z$g?la{Q?|&u}(5LFeE|_xn&$`Y)Bf z4!Fgsk4%5W?yPwr;cfW6{W(kBO#8yLZRW$bOtfoax=dhBu1lR>%{SU*;4K^e9OCX%236#x8V_s1xEzg#U>Ug@`m|rB zpWeM*NQFDDkS_VfshDwkh)VeybuhR)XrxrcWcIQs`Nxkp=dY*&vF?FKvA-l4>8^Xn znAM2xgq_t}qh*UD;&>pG3^HolDlW}h53u)La4<3mic%^rW-@uN{u|q&D&Pje%-888 zYi3f$#^RwW-Rzu`?Bj$wrhu0~VEN}=z`gvDbQeE=#M31QQX7M6x7f82lP#(TcQL#_ zp8>!2U&W+=BHXN1M9{{dhsLgp8*a6G3(wVFn#BikboR9gnXfDnBS^i05$ZLAZ`RY) z8#N}81xsMN8C)29<0Ed(-&DDBkyCUm`iq;8r{-*#eol5?tda05T+aiLa8H@i26w?A zzsXc@FoKK3+hDxRU&HNi5Xq(Qb>?eCMuh#0YGxkyiBtC*ua{%1ZRrM+VyMQ#$n4ad zv9F%EVSZBsoOe8*$jfbApj)?wqsz()=7Wk5v&SUSJ}o&B@4{OC(LHe_PAT#WJ{fGc zqTMTJb9ed55gLoT1^^^`9l-nHx<;?<(QAN=D*X~2&AqYGh+A3rqjdb;l)JMQhHT_l zo$um0jDRS9W2A-iCqLg(uPA%QlN>mQK7C6%6L{5nc=Is!?0osk2VQK7U5cC%m&vuvypo}i z+mW6Wq0+q;or#?Gd$Aa!5z&LE!L?^gEAZOR7VU$==I z?Qp%y%-7~Iv)^Vl=)H1F7P79^m?y7lr@NAVy+b?TeekE$wB(slp$0F7#0#?!)f0j%i! zta!Lq>Mq&)2&ww&yX5z`RP~8)m~*jC$x**|Ow9ZRRT*|rflsppgfu*)hByYPN8js- z?tQd5q@6{VH72?oi@zY)chb@YO7R)7Q{OW4#N4W232~me zW2IsjV|{&AQF_;a*c|YPfEpZ$Zex`Gtei=huBlqaig;apwcO}**(9$yUOrN;Rh<2Q ze*owd4QTkL=b4}~qV!cTdyZavcUO&Ek|<`jLnGJk57u3zqi6f6u)oe*;J(!x86Cq( zuLI$mD!wVQg;D;rBl+>aH)L!QP{w*cy^O)%_=Ds8MRW71dkTuDtEKE(=V!{p&o5!? zQV85`er(FX(0?YouWvQEY$fq)zNCa0J2{Eh7|vz_EXf&{`>Pz>*6JtF zmfmZ4t*kn+O_eo%l$C+llsa6z0+23ps@TWV0?WlgsZ<0};NrDUb*5r4)#Q@Cb`w&W z&Q`zR=M&U1Rl)VGc>A1{Wx90?m#^@vt`aG!&ZHiF%NdI5PW29`jXF|UZt46VL*Az2(Vx(5;k-{@AWG(*FQct(841T zKisTCfaLP?Ov#foc21otonp%TFG!2r{QRkpS3be|4#95ztQWMD*xnU0glgHQ4`E@t z_w1%6HM77b?H&A?pi@3}Oh(bq&57o! zW!bo0?8ZhysYGCr*iOC1q=cnB=#dag2khw9PNySmf3X9~Rx~N1 zn=iK85OaK^_82+zqg)F1kNw2I=&d=X%Y%jUE0Yr52Cg(Xa!}a5i(gHF7_-uLPFCue zC1@n0vBT)IjI7dLaBVgFZ+x4!@6jH z(gOsfBtWPZP>`Ylf^?8xBQ2q$A|N23bO=Ry4S_)D<(HhqZ+(BbmTPe`XU@!?XFt#0 zduA>@%mcX)!v8s5ZpyEIzHH7rx8rL2&>%c#;O1qPJq-Q+{zKXt;QB?OX;+}q!d7`@ zxbH;K8tweHdyBiEsAy~z@KIE+6ToKF;SXTYpGJ)`c4geGk1NAwW)BN0L~%le+h=u2 zG086yd1PUm$y-r5_2dSa__0;at#^qQ*a0+B-f{0zYx{l`)G#YO5EALs_1u7a6dKza zbSTsm4Z!lkU(O+ZzioYDR(G@Rk)41|rOufHXyQNek^lj^ViBllasD!5^aYfPzzCK5lky5*`!z4jm-N4FZY=f0^_cK1wK9A)S9d9w;&=V_x}w7A z%FiMsKtI@(wpKU6LeRX-0w|zmsuSwqNovExV33_;A*ttA_D13H3!A$oU-_$XdC#W6)N2DZ|j< zyo}=ET%7JuJg?zEsRbH$BxY=E%zW=@=unvzX0AbalC{u;H-Z{J4n(z-!lOY!JZZnDTbU!;`c0sE{&9cu3E{gvUXT-VZF(nW15Ip9ip)HM=K6ja=V0SVA z7eI7Myti-PzsMkDq^YSHC*~wJ=LgLN~$li&RF(-)$mqQSp1JVq;W*Xvfx?k>jo&9l=PX*7{e^%9^TWiZ6at0Rix6x4+)oZwT{)~cJssT=!onT2F?!RZ;rEBj9^D^{rJH*{kQvzPcMeI^odc`5 z;G%LvN7&$l@DGJKIf{dV=BdxYjtdstQLL`=IDOKk7LRUG0@u+{Y_PYPzyIcD-;2Aw zvd7!~)i8p~{M=lD>u_0g^6x@J?#3g0m-f1->ohezc7&sAh6`Azw?y}k6~RBJinKh~ z%03`<37Pjl<;1`%yf9TV{^}~Nk+0$K&FhH*(^pCNPVVU*DLo?nO#e0={MxmHU`szrpHS<6(3OpJKK;t*5W3#~Zi;rl^fa^7`(8J?G0$&;p z&pFgIpAe9+jj!ifi5i!)c?zi!cUQu{r=hj0+P)P=)CtPun*ZdUdzbc)Jn5cK-Ghg_wB6q2RR6!XJX>eUF&O&+b(kW`H6DA1v<0uqwl(r@x1awK zSax^2si?Tvw}7nRl%k%g6MdG5qK=rO@K9BGLM9>x2 zX=^b&8SwC`>6P!{ceLN%(Y%^C`RZO0J0Y3tA;V!A%6Dbr=C*=oR;%YM z7qbt)L@g#ClDdD|<>`j)z!d+ETPRqpl>&`d)e0quBL-kUw52BOhf4MjjS@+h9_pzv zqJi)Iqns?B)Zo;c{_ZCGIzb^|XW(1d#e|O^89Pc|i9Y>86@61`{c{<3oW~fa=#8#} zx)naXM>j1B09}w11|c7ly}f2RarjdYyw{O7oULgP%SK!9tR?;WLU;X%*D&z1d5>A`zvdIlWB> zI{ga*o>|`z@tnL(pR`j>ip9jH6uy!!y{8&@)*wm`%z5oRVnbr0Zo(qhp~flQzpdIq zTB8DN@#;LS;@YYW>rU7=>aGn5AndA4&tx|@g7U)etn6pm`4BP_EzQH@avb3XP4LwJ z0a3&yJQSyUo$+EC)wZCn;kA`?2ZugO&%^RKUe z}NQx%UjTAY;7znS?Yv zJr|86U3Ydk0n5~zkfxMUlw&6^zWll!m-DHm?j$zkD7!6|T{xqcVY;G4D(Jw+%5OUC z4Fuc%iv7391g4AqPdw0(FJDF4c-j_!ubnOSS)6@nS*GzJzQS;9NY&uWJV_u7oIzm# zr?pX>;!bU|xQlCW+Feh2qhN;}tJbe3`mQuN-=VQ;*J{t%wuep7eOjeucrbUztn{EB zphpT^-7p55OtOi#hfyEsyaY+3PM}7T!AC+hMtri` zHA7)-b;^G+t-vDuaB!sD4zk(Gz>bGteAoJZc#6?qzn&Ii0|3g>Q>JXj3{(HJt#yQH zsqH=f7RyOxSDualSI^CFQ24t5Qv9L7mr{x$VP0KVTp#cAWH^RdLeqBwV*2{sopw(a z?PPHct2!T?81w9mix#jyoE84ho$p6y!jUhEl6BHHJZVfPdz^H!DCz*1n_ z#jWK#lRL9>cS#2F~g%lmOiqD?3Ji^S1ETLpbsuQ~YU!UL#=0ZD75+I|>Uoy#BZ&YwM= zwe!+4I&Z%9^@;GN$%a)sT#UU%k_&9sSv$h$9M0$TBCfva61N0=IAGvrcS=Zb7TGTU$Odx&)2U+q zZ_e5)#oOzBTEc_hBd!;V6`0eoNN>Dnn1w;G1;SGSmRz~>gGK<^eyiHX*xI&N<W(Ho)lAuKI8I2J{h!auyO@>$uQ`2YaU zE%VK7()KfD@mgz5CNBD;b=n&@ZWP^rb#Bncs5|^JYf(LMHr^vsHcqxy#!wMQspLDC zAThHe_`_ERyb4k!0O37@POJj?DfnUnPoWkYZ#O04aNsH%vFb+=W6 ziA}FnUWC{}?UOn6NK-HTW~)=WjL$-iXPU6L2x*N=JO2e_X>64W?%!8K_*eU*&!sIV zWF+p;Q4`-G=S}T{4HD)>XR0Gwr>97-&`Xht5e&IiP?{tM!Eku2qS_~+o=iR5g%R@U zF<}uAYL~I&`|H=uNJK~({Otmz1bf)#rUt9##Cm~-vDq3X!7x+*dUL;$d$T3|{QT7t z!0L_AHoC9hMQ^Sg=B|o+Pf|xG;nTW|J>ksfIS)CSL;AaePwxs^>S;C>Hndh&#QLt1s5u&$hTulY`$yZqlE;m{G%~kq)|1dq$f{zw*%=Je zwzjt0NcxoRm5uVcJp^~QJ(G}$!9^}kpV6!Tm8^D?!b(&t*Jhz)*0vHGa*4hC>DRM{ z7pWb6P8u^Br^9b$n7k*`iPuds0J zNXe#6Z{lzlFJLl%&e=r;TE`B#jpT8k4`B)GdfrXPK4pOd+q9B<%tyZ6`ie^Ogb?;H z-zZQ|Nx^kUfK*{LIBs;@+1W|Qz`#)MPdnwG>s~V>pMWraVx*u zy48r=?u%<`@Ep|)bY6^8Eg@8n*eq4Zip zM^PqlEKeI9+ttlFH1`x~R_m2ZI!|-uXA!j0vAd6S4CrQUENz48unn|Kvx|?8IM#}Q zZdu{C0AoO$zl&nQz#Zhw8=s}zKt9Pws|GDYhl0gT0|V@C&XUid$xUz#o?3sZk zh<8}av#^%)r@eq^*l1?K7=Zk97X-|+LEZ@GW8I`qVV-!AyG4rLxj7A%jMu%lugm$S z-;Bn2ko-as4JCTed4c=&c_HiZZ9!1)ZJU6Z2<{c4^BWp8wGqtdHKl=k@ZIW`a-I9} zVK6wO-lOI*`qC0N2QQrptuqfnN`TRXFKTo+p8;s#ip;j?zsN>@`713XQ8Pe zN<=^zVt6&8$Pk#wTghe!t}fBe$+Y||_K;j$;z{PW##VQy<5<;bo%ar0bPV*}Zd0gI zyd-!=s9Q({Qp7=Up~h_k({->J;z8DX z6P+Ma6xdr6z&4gp5!DyU${D&AnZuKZKe4H2-X;!XEQ6byF1$gf;-I9A-4(&V*3YCk z&citFpH2D={77U_&cIlAI6|#HXLv2~qvWQu7Eq`FZ za8d?ag+6!(gn*tl|7G>7gZw3UyhnwYn1_e8{s(K0-9Vy=L*_oH@iAUn-|cOg{!H2< zCgC>nyg7u8I;@IlGuo-r`pBjO7f2o(!u7Wuk3A7_-^YZo^>O z=Ca1tTzBr{a5aOMflZQOiZ=L{pFZJEm!Kr=aRo~u%>U!S(k&Z5DK-#0iD6L)mq(sEVT%d4&1x1KaF1vKP0>+xGYzyHJn9h{Jm ze&W~F*+~;wcAmbvH}j@&zhFqMa&X8OEFI9iK5+&3DI}u<)7*oXXwcr5Gr@)S-S9pk zXg!Hy6@V!Xy85i%aL-5&c08@9sQTLHuF-(6#W)? zspaM2@WLwjgC@7Zn$0(@U!P9X@9)2A@~kJ}Qb9DO)r*h@9lle&v1zD!DRkII__x9^ z!PH+R7XxgD(JPJm;D#~zI(cnkYk&y0!NnMjQx2JZk^SRkjvegb_6;=Y!r0KUpHSdP zK&s1DUz6P@^42!}k_o)L`yv{A-f{&dV@+bF`PfZ{AYCmB@f(|b9rGAeZ| z=}uJ=cKO7Hx$Z8wG4=at&9-H*vNG)E>+Y-vjbS+f20>Au)s}}c@YBbG?#}XE!eN55OVF5rKgV6LsA|u$J)VagO;s^Er z+)7$L&cwjB;lXV(R~^tHgI_A56iEce!wg+-xn-$9xIz5;U+$*as@x5U>53nHvq?=4 zQVQN6n(Z;@xf?f5-X|xbN3ikc^9_|cL+^G^jbpgNzCA;G1MdC3AB%+wmji6KlIg(y z$Or8m%%Ncb4{VB;(9lRbzc}~!=ogk0?Z_0WoSm-btIh|J zD>NBNMp=n$YOGA;mbziyrjh3$tatS{o&bUc?J*=DVFQV+x9}H&Ud$bFFpm2?1TBXT zx-*av4#G<2I|Y?6g1O@68+M)8L}o(VjwYW^)*Z7qJ61Uc8V-X4_`|YO?~sX;#NfzS z0w3cXp%Czxi6W|=B8tb}+jNa0wl6U<&C~jiADeP>1Q6E{lSKP_6Eefe_(qI6DD`$)kq}9V6t}W zqSV@m$U;e9UuvlRaZm4?Zz3Wzww+y4syU~o+$<9hk&1zF2Jmb2|DEHI1@@-i`C1N6 zh&Oc^OLc(ll+sT0jK-oOV?9pYaFbKo=2bsR$Sk!$m$JS*#KEw^q_8m8^Ac`;_fOO0 zY@&Y+Nc+oo+ebCZ-kx4BIY)D@DIVc?ve!WV?}hTFp@Hi61(e6cWA^6F_wcwG{~#dqIPA}2F;AZ`_VGjZ z`Sf5dIq#yyZCnfeO^lU;1D9C9cFkfKVU;~~6-pw>dN$;4QNdxmCp8@?Fs{MMpaNn; zd}(^KUHkVNS$cY>$K{0&w3Aay@%Wd>$jAk{2_Hl0fI0Q8_ZHo!{QK)f9>N4qk(02! ze&~bC`Kmjf6;5-Fj`|IUBoL;(94?}Y5Q!v;D9k=T0o!ci+=rHc^6cKZC6?&tRFWLIJ)_=^?&Rh*E2$9v!PScC#u$ zr@1_a*E!HvrQ2 zdt#pUUk*KBJLI7gCsUV!Wt8cb=~02lY4w8)?Q{~uzQ)C+K2|8!vsV(hKHjeSx&^Z? zA`d3n-ls^4qPbOp5LWVi{`fJwn#*BPm6k#4vtH_>3M#2-43}(?6NA57_C2Hy$t~#7 zMCH8CQ3$|8+Glaji|Yz}{74@;EdxW1@+9~X+&eMH->UAYHCb-A=o%0#30-PIE(4yz z2@#QI6y{*+;=4IY2FJK|4_wLFeh5 z2M+}6aJ9SRJ^Lzl8W-d^wG;c?A;1Fsl2uMdHY%eGe&qOAFmAG{Nk!%69Z})t0wX7u zDGQAVPwFr~2+4>;UpYm6u)|acXdxUr7PnSfHm6($YM7WeTZb#aTGO8~8Da)pTXF$S-YK4lt zj4~&eHhe)-U483swF6gPSorui&(i<*?;T0f&A-Z6dko^`*Y|B9mFlzk!THe^ViP*D zq0%xv`dLtH*@%_?2dGtwf6%~Iz5=N+Xu_II-}-`qy>%L&hKD?~ zUPyjbIw8SS6_B){W37t>=UJ3nkR&LtP5U$vBXj;eC$Bytp{f9kVQ*H~67#6F0#3=p zlc6~jcK3ExNKMXM@!KAjqq#KM!yHBGa}Tekk#x|lIy1zFc299g&ou=lpD60x8?|Xw zHr5_Dnc|FyWHuxxzHtfmL;kuf(|MQn8^&2j`y-9iij2Z3b zq)~Y4NWyZ?C~PBaTEFhnuwHq25Yf>GJ;F`uX0!v^SZ{X|IQExnB}*}=EhIJlq^CM8 z%$q}UtKy>Y-9NL$t% z9c`ULdTKo7uK}}jG58R;*cn#j)Vu3s?+`76{QMmH-7&_0QG~EIZt~*kJzFsnDm^;i z#po{|gL0T(in|~t?$~MRuMop`{6BPm@Ks@6GkB#U*Zf|GRb62D^G#c^ZY(>lBQIYcXiRu@*r%Bx>y z={;Hi_=SI)dJMP=CD^*xz$~$TGn)A{S~VtV9Xb~BHeo>h!i5RcL7+x&e7rziR*t<@ zIK~)w$9xUt9m91Hbm~-o?_R8!SmW@*Vu;1Ozu02wFHk$dAZj>UPnzWga87|lD`Us3 zG#~O3h;Y!&eA0SJ@X5|AjM?dJ9fMDt+@+v?eqW!py|I*w49OAhK$Xf{HgbHC>XrtP zDjxHinPs0oc_NMOtkl6`3|nj(OcPE*zX<>v4XR(Zm(yPg7YFrF3tHFe^g9Xr$SpTB zUKh4Y=A#o0N#x7d!VK3>0498}(Fx@HLpedc>AV@f%Xmwv=H@CVvrzj zRv?dVHbsz3<&lV86wz2LOLV<&ln!>av@d6u_Y(}qpu|bdPLNN}+cGqODH@vBZLk*d zv;)MD>UYEB3kFNN27|g`AMdT1 zK~{fRCY&h$3sjx32-!tNiQxTNbHP)kkd%H2k7aZ1^x7ZTs^f~1yDsBuC0=v1 zXEDm&zCMj`Q`I1n)5)XK(bV`Z_ijL=Vg5x`-87eqaUp=g1yDd!I2k3{P)Z?A6qH>%dF$ zyajiNG?h&)I7C?~7x#-Kpq7iAg9KeZxyjw{=s1o%IuR_6jJ%OL4Jh`|x;K=res#WE zZ6gHs;5}O;zt*YMGdxvRufACx^C0W_zj)G<2Q4T;CMGjfuJWydkeKwMiVCkF=@Scd z|K-fwoX^*;je!M)g@ceDQ^poI1V!V<%Se!_Cq-eN($0m2XIlLIJ=E-nmh)MV!)$f; z72A2}9Dzn9cYNMJZB_vrp1^BC5<8vYDg0mb62jM>knOBmSeG#E0xKk*$K+k7T{ zqRVwstQDbjMbFGXcgUloucollSg%S~%hbb14tL9+i%YYJ2RyRjkEytZ>*;Rvd`-sJ z6>f5Jj`)hK-rR~U&Q>D_O=%VcXSZ36xl`JRTDgdI*n??xQn^2ses={?(_}Zc9#%bV zjn(WLFZ5MRUxN-kbCJ~b+EGtGkKZ~)tOL>}^dR~=Iq&{vzM?DGQs&u%-RwIOxR2Iq zORr4V6Sq$p<^h9RSTuy^j(@Wak_aelN@;$q0wU>tq5O%C`xtk2f-Lk4d~!`2{)=dJ(c{(_`r77WIJW zxd6@J?#W_-0)rOU1B06FOT&W{t?GB?YSWC*mU79&T6(sk65FE10^gBy`;=Y3dR!3^ z+_b_%lfm!enaY#$*{3XCWz;-Gfh+0GlWd{2f`p#ZDB^G;7ljOm%2>2h%av35E5>_@ z;R6G=`S??Iok7bZ4C4SKhn}Idd*y#v8$`vakntn8ncoU>pdVsq66n~opS}ms^N~`< za4~EBR+U)3GDmy&!1&p&ah=Gwms!h4^hAb+!#OjM^VVIFHpBV7pGM!^NaD7N0S>@>NJ)L&vK6ZN z67sDMlFa~~3QoDRe=vX$OkT$3uj?m5wdtM$C?Ql%QVMj`VbvfD#NA%Yf~XR!WVBlv z0C!)o-&<_WqPZmWc<4rWMv+o31Ke^_zTbS%^?<28r{HJ7IacfUFKM-yuBMC>sdy{w z{?V2#bCNS2>N_6|IEN^a$W8w$8StAv-#M3&#r|sd7F~BI73D}yBphCLTBe_!819a7 zyjug%S}NmKO2uh%6--H}th+lm_acX6xqh}0O$gBqx(}*<)#I`CCI!IG5c_|ktc;BP zn3=E|+}9?UAn#3+-J$Qx5pB1Flwx#^iKZTG?*=2Ku&vaeseA26Zw?(M+9 z^}+1+FF)7T%CR{wquqA_GvV~4^m}X++6!URjbodi_r4>FG)3ziEDavcNyj`$1uXkN zqETyh@mDFxsR`5GEaobftVyH$A1WvF+O-}d0|QJ$I87{UZ7%M-4wK~1wdeIfW#OG@ ziZxcdVkAop>6zuRX=I(d19b|%7LbFID!7J4}qcujQWl<=$saH(usypikpB`_WS3DMveL-q& zKq>GVux@KUX(cXBb#x^8ng*Xl5gplF_ZuId-X2oMmxly^RtqC2zN27pZ_n?9Ga07- zr3y6brWf!q3x?M3;#Oux;=7N@X!-?AK~u~A%Ef}s(a~9s@%p?7hxh#l?HCv}l1IDEa_v|cLw3&v$`;C zMggdDp*GEj5l-0RNSmWBwAJWBoSCWf@!#5%@qRoQYe9+}gvwIRuL)hPg9bsyV_0W} zg@vpQa%Bp6H4YXl&kV+qFeVh%OLDrL9MS3GYLz$2hAIYcj`zsgw^f-)yW!w#?YU3O z<4?jc+5on8Q<&bPh#{Qjv|LUJpYFjLR&6jpKyCjxIILy*zbevTNZwP0M>Vji?T57z z$yo6^EbuMH>7;93KGUZ*z7BY%y1(CMRE{+iBKS32@9J87WIw5|APf58 zz01oxG)*RAB1BVG&_`7R=gw$I-Y zB}Fg>dl@?zwG?Bs4MLv8IsG*5nG~gAm6!yKwWj7`zBu@xH6H|WECIsPRC<(CJgD9p zln!Z{HQtdZb=b;-5q`@J)O%~4efd?Lq~(<*?zR0O)oE<=3x^=tf@baL>2dvSf=urD zta)^;-?uwKQzx@XNjGQTN=mzlf7{hmfYGIv2kdrD%`Y_3l_#;vRSmZIS{53{E*Dkc zBj0wD+8;C{(NCg~kJQwZ*i1Rw*}*?>XqQ)?W^&e3^sb7hryv@KL06^Z1W^{4RXfES zx)8ASCkflz-0VL)BG&lJ*PG|PCdB^)o_%=x?5nJ|J?DT(n}8Nu`_OvB9f$Ri!hj6+ zEM!>^E;6_IU9e~(?-3Q9rfBJzq?X4oh3gPz;tNNw}5pE}gM~jov ze*&%gaGVg_fGYh-=*zF~AKXUFk~mok*bKh!f3;_E)43moO_p_v2;Sq>+Da_(k`uP= zQ9RV@P7hkhAY_9#9~xNVUR2Df+B_ zu)CgwKHi(%e3heY%F8{og{gAMGCbTNg$)LH+qd=ZeUBjZ!}ys6B8o&7j4YXdwO@DS zxw7H-I{TMB6r)jY-n~mB_+W zeq(~E(F)<}^(r^)-CT!tAGy+GiT=%YrEA-;>$o&2T=;FWB%=D;v;lEW_YGCsKI=F* z+$<(2n%a1w4$@#zIuY9e7yEy36mG1||>)Ir#_Bh*W=0?I_-s-|G4OCm2*?t&kvCJ+`y6%B9iN%b?+4 zXSqym{iu>zQa%3LGwW<3M6GZE*^-LRP)*GpUa1hIHM@v)mfn+hrr^EZ-q$UVO>{1! zMhAYMnZZwQmgWyc@7bC^QLSE?hazw>R5cIS}H&u#3hZD_cgt|D8vg=AQ1N)a~o z_pkluf8=*5+Hu+m>^HVy#B8Ce)9Hfk=& zf{^vKuCnbyc-Lv%HQ_7V34kt-sJ?QqF>*>Dv6!bnacO2JK4`&$F$tSOD)@)NwyR^S zkX4o`D94&K<5%)tsWH=)y}{n5_rH$KB7}l?y2v4%FQTgJmWrkAV?|*wubgv3IPkZ3FecFD0Nh#E~lcEWkmUa}MNi zaV&RW@h5l?Y`346PZ${wHX}GsH`j7zxok&Jm%muV?eBD+}CKa`P> zwBIqjyevF8qT$({EIIVv=_IcVo6JLZDT1NkAF5F2d7{KZm6l?B3u|(Wkc~<@T|;hO za$WQ>{pJP_R9~G-ySq5Ay`Mr(YJ?X3wOsBr=-5G}sHoz{v=5uG=m{}N#rwOK0a@-1 zGxGaq{&)5jVg3<4i7^Ot{SZZbqwYcp1fl&(5y*+0gjKXlcdP|b$2}{D8*n)+M~SsL zk5+u$ZT;?(nw}_C7wis*X=T>$6iVGAJ0CBx=rN@T^HOc&mw+0ia!&YntH>V4XAhRz zxf&mwk?H^)fK9C`j?RSVyOz=a zh(8ij^>R~lprqMRpy3%r{K;_-}__(e4#KJ&zgBdFrrl)NjmUh|b_N!y$^K2Q93T*|@}`OmNa@ z=a^wmY?}^5uJKbHmu(o^vGCRi^SPn5U+DymqBpJ`{^!=jQ=3^S3#FSVGm z>m^<;otKs*9ay4_0(W@2VaMZ;L5H2<4m}3Z87GZI_hM{fG7Yt+(S(EBH6we$BO7Z+ z2tjt%CK#lCn1bm8mnr&WrP`!a^>t;1I~02oQujw(VfPD-0Z)PQy&OHBIrEF8%mTup zqg>Fx(OS^FTAzQ&{Z~bGO7+uy7`orG@3Fy}W-RwJb4|@%-0s@GEsC7G2ptRiI~mY} zWv)eg$0iX$LF@%&3Tx~*fgufQDO~M3_?SK+Vde|*xfMyE%9A+w13%9eZ!f&vbmy+U zrDBlFEG~Wjh4N_LzyN=z1&YO|`;&@e8peVAig$ls;k#1E-`n$(MS-8AB~8kcN#mJ@ zLX30%Y|PrH`m$zA5uGP+{d;sX5aOyBxQ%ci0zHvfO&D>Rb&Fx{kcpxFCf> z82&JDr&Tpf^`uP&$02qah$M7T9e$7zP*GO9%|cijH~;!nFY8Kv>mVm7jZxGW^<28- z={>rvJ5a1Hty?l9LQB!WBXCJ-D)?miXGHTYHEN*maIh~$}Q7KPsZ8d8KTNmuWSis|TR!O5W z$+7|Ej)BcTu?@`g1gO4dr-~f2)cQep9Ykts4k(DA$_%E!Xy~!ap{JYMOf``-ah)BH z`M?Qro_LIF+|Fd?ZttiNxKVWH2P#CdG5@o3#<%IuCT=5T!uj1s4z~)_~%9FgQL^`s30#LqTJd|RKjc==IPA|vnJXn&@2>g zJUiL~U7u#!ZOaulC#_BY-)p=ZX47YNXABNm6$-)b&9=xT3WzAxSOKBU0;RumMkNb@ z%~T6oC^kIeTjQaxP22JqwuGV5S^P@*j)Kny%YZMvX=CKD<7`LGZ%QXO2R3?b?XJA? zoN{T=T^_7K!u>3ERXo*wYq_ne@%vtjaShi%Y!}2TP zCa7HLxe++?F@E@?gu!!&t!+xp%#8mT%gbcK=}lSA8*=98CPI5jNfK+0yp42&F8ol- znZ)-kPVwi{#?bkEe8=vK1Jp$1s1Pt~+fPpWjoxaJQ6J1qsxn^Z0ODkpl1{wD%wR*&q6 zEAfOcW*DgC#p^FYnMk|ma7zU%N71vrf?~Y?KDdpjLG&AFo+0BK^*oV{ z$hsTfJIkIZm)o7W`tMbhEA;=%w;ybs}^qzY|AgoDfP3W9lEV3@&?Vx!dVMZqTx|YGW5P-f@3qo2xu{X4^^?0AF*$>)TMkKiHw3M6o!YhIraX!Z zY92c{I82^9z77I`m;-mFd^(g4_-?$52%~=aF^w5KDXGAF1vS~{rr%Vu0`V%A zPSp&q`dyeaH8)p-tV;SBFZLu(og8ho-L&Qgp#D#+9%X!{uu~DF)6@XnZBakYvu-?QpG0B1%PFJs)MM0|d^$^JooLhvcTRqxAVD=ax3XqW{iGte1{Xo_ow(3lCH& zEbL|z;7!_czV-S0sxr`xwd+vwG~IFspVR!!kjvh18PBgf8PZtC^2>mxa)p;J#*a0> z&#fySv@ectmD*l&?Qe{U(`#q(F9^3Bw9&3y;#)v<33#tyRZfo9(m`su(9Z_HSKeI| zR?qo0)6#d*RPWXEK%L?=DtGkfJp}tkq?87B;8q8JX6IxU|$1lp9Ul{ekVqFZ7? z;vpY~H{pn9;zljujVJ>4@aG%5c1h{sRq{DdH81`S$I1}4+w~&#IhF^-_jkn!AGjs=^lXDuHS;k9(!8fq-C&oIJTjvcL>dTsPgKDrnqf2Qtx zoc-bn>B*m#y%mQ8*oBAcD;bQIlO+Zt+F6U9p=l7kK6KP-4Me(LCaP^1oRM3h{3vef zSbd)UKJ@Cu`NG2U8WmForpSpJFy?O4JcNAh*0sM@VqBmW3o><^kUj2*zL}wOp!CWq zCx4~rg;P<;6o?i__JrRP%YsqSa_7Hyw=Q;+1_EL?wyYDX>dC)%Vqu> z@BCRWP|YBcXNK#dEhJ!l0^JsSxu_fK8?pxQEiHT(&LdCHxy2WYX=Zv-KJ)!mBT%nM zrTmHOxXTadNm2bn+O(iegrJzs$meGthxK_k%8Xi#m-oMgIw-zgoj$DxwO~>;(eL~3DHGO0@ zZT~AN8{*W<$+afsWmOH(MQ6yWJ!A`N4ST&Oc0HY?NV1kD0X~yYcm5WLDg1T@m$POa=({p)Fen0e_U1v-8>uVAWI`*qHF90H!@{Ub^jUb_sg zEY!+D1*9Dpds^Xq4KZ8!CzHF*3VXfpjIaLJCee-(pkE&;)W8LoaJM&L4P@6k3b|Rb z)ZE!ih{8u*dL72G z>~c*j4-A4h!+J<>cTX9!d4=s?52??+BJER$jC-~lyR`kvdYJIxLsqQffqH+wuCGj1 zJzJCAXC16}4c|ZaTW!~FoWE5b#f}!#+QjhDsuBoj{;InnZ`c_3KecxhUC+40VW?=+ z|Ea1fb)So?QkNi0zJ;YDiekFyW2X;z@l|x%ags3B1u1u?aT|s-|CbM@e<<3xn&qMB zZ>)CKvkga98*c@;_T@xqYH3M4|D!zdyoH9y`a=$a$TzAE4+#qi4Iu$`5HpVu_n3R5WyuKeu<6s6!*VOe8$@=^p&u#4lc98Wh43ReQTrOd( zUhG%X1DQ4X`fN@_ZHJ|%qV_o%Ed$}h5oAuI!$aob?>W7^g#)D-$Ab-@P0NwUjeiKP zc=dexoeT$O8<-&qynL+N)Tg~$Yx<9Ce_r&tON^PWWwol>$>N@1_dBm2+@`0abMDX6 zuG^b=H@~&NJ!op<59dN_#9SV4u3c|h4>Us*9uP$5q+{-UDB$*|GV(w;bl!WMfJi8= z5y)Y<``9q^PNuJ;;I^zeT&S< zZJ^f$h`-%`bXj5h6-LTE75^nqTQ`7p-ifqEh0cplty+-=_#WL54Q*4weuf~FtEKt$ z*u%=RmA=Rnna;n}a3kW;_Q@e`wQjYnKUu<6z8k-kme_UgQrdWZ{y$&e+m?T{6@f;d z*|5larXFt3oJYFw#w_`?6+LF8_leuu-y#KF)6C)A{oxL;VV{tMM{t;=d*C!ZZx?(h z0|x<4YoKyrruZe!Bq1wFh4@4%`#&+P9`;(T*6sSSLaY#+HbbGq>;XK-SzsrBYM01Z z>K!W&Y=RT{SEryDFzHHU{z|wu4_#k&tYVQGdyvuDvuD#CW#%%WbBcXkVs@iwTZw*bZEpO$4fFSE&pFLuYNCnmCFrMjSjaE1z)EzBI>=cm8gmn+un-F%?a+smMvh<-k$e4S`` zd$b3fI4M6AuGowEK(FGcB_R~X6LoMjpVR|~jzNY{N?sSSzURR?GM6>Ea{YC@i1E+$ z5zp!baP2q;mZ{+l5#f~+7$CkKwLSc2e&2HZzAe}2^k8g|IwWW{p4TGi zv?Gcj#-n@jv9)SVfjceOk5hc{DmKF8>+ogjaO~^i@h7ySrhfO_p%jFPvKtCcac5*@ z9xenH8k4bdz&d7ZSBT?nP`3tLJf-P74`*88K|S!363Xf$R0A;_ck{6-<%JVy1TXK) z*YMjUQr`W%Br14UX8|1l38v&-nq=U$H|@SNU{MvcUIaCC3)(} zBtMs8Zs?Uehp)zWF-fncrw%7WK70^l%1{_&$(eZ&VQsUwP<^n{9YXh_GH9KRD2XOl zONo+|zYb^v<&*@Q#ALEJhcS|8ru?L?D7H3C&ON#+Q~=DR|RRXL<7Cbu*$f-=>SCzgsX_> zz~I$`i)1zHJ)Y==>>B4NIu;5n=(>>|sn{K2Mdo-*odFn|Du@9lFAtSKsM$V~hq23{ z6pKPl3S~8Np9~e3nKut2cSob5`l^h~XMPl+Ub~Hpz{7EOlQ*T{YJ`%lE;hk5C&#BWdbQdi|Y_L`q_b?+SB`y zld3KMOn^=!GD1-Sn_}2&7ohhmIbzq>Nn`icNsSs59OpOoC>jw994ZRU2?*1MFCtaS zxbD#RKc1@z^7n|VNG#v{QoFW7Q+&gWrp9d!-mMG6gi1+k{{N0n9apHt5-tjdSp~cKzIeS(nw` zapify{sd6rVz|?f06`^K@3Y{Wo9|cy0X)wDx#^NW5tVy0ai_Pw5uuEfO5Th=C!`cm zRL@dY`ZLJkL^(gB(5?}EWmj9dR#BQk&EMhs$rHjCsCF$kY1JWrm$?Ie%Iz)KYBv4d zj`~(_qXyV@_5l|nA!Tc0K8m=`;(1NMl@g57`)dSW0Pt{J3#rxfVijIZCu;dagEn0J z(c|74T~^uOv10C9XBy%|NcqUDYH%t_->!3QyjD!NqWirqcmjB*lcg$Hqqc?5n?cRx zAE|sZG9Sg-kkkHb2u1Zc0@Pyu9dXUhpagf&#tkExJeRKSJE4{NTWhbBBeK0psDJd>-)eVs2u<0d6I`qnhE6~1ircKq7u{dci~ z_K6nciIper)XtI$px$Oxz6YD^H$r4lpe6)6Hb7<^=1a@354n%!pttw5o2PdVi1EFv zg1%nmVH|+`QI^%!71Pv0O%JJBmN_*twe50MzS>fe5&5==2T1-WS`VJ0-Sj489uZL_Zx+PXYo6WpZ@eB;$@P2-;-4Rc-4xvcSW2T#_ z*xfTMjah_cG#4AIr)0Jp&4DW!!&q37eF$Y9-~HpPqdTy~!?LY1svT{0a&+V1j`-WT z5V`QT`Ddpg@s7#taW9m8H?kg;+<-@-EadA7l*Q-M!};7c%rer`ONB{m?8kq4yU%At zZw))OyG0qVl^r>cNN2H7efA^LP%|W#s5V3@DqztnLXkt~FadCgx^A$UTrwV}C6=JM3qriyOhjJ!l}*@aVv4!;oG&O)X!) zMETT&*=t)r-kAHqyDwYlXHq<&szg@5GJ=_s9ha4g0?jvP!1$a-qvh8Rwz|@HndHQW z08j64Kegp)jIc6i6|1V7MI{L2mj0ehss^yrvpUE=I0LQMp3_OEaeF>Jv1vV- zpp3kc7R!6mJc8j)Clg$z<@sU&p6ign+I~&+DAwJQ7|r6$5P9WdqrroT3?#u$u)Bgz zGm6_QBSVq8en2m;YV20i!@|No6+rf|0^ZE9$SoEP=>V%$#Q4yWW78$XGEn)0fYJ+0 zep~k-VI1E{^XgW8g_D?~?g&{JA|iIMlAd|i&vkPnQKXR-N~xwuZ;YQ=N9~ zUL^a&b$Wj())SuI)euO*&uY9V&TALDV4l8s*bQN+kJ8V6jGh06xyFB8IgdLD&$bx}wpM(?%&E?n}UFEtMS zey?6&2GvZ~-*#vh-TZR>4$abYMh)-Nu7Z`DZ3=U-cShY~j*p?fYSWLT zwty8pE3Xb4c04n2`f?z_z?bgvLSvS~a=L4@`^!tVlL$qYWWV;Q2qBp=XsP+Zn9kn; zhIXOM@J}a62OX>tmMTvCl`y4bPL6$j^H*$nY}lhBeVQ^)ozd&y(fTYzn5!vacBbue zHb;A4KAw`i8E%}M386tZnLBxX0#?VAa1owHP$7Siex2U?*4Oay2M*vmD@Z z4PqfatfC5YhN+=5v)X5+)|4H8={+C|l~Xwi(oB|W&&&8EgJ>)Uf?7L-i4mu{pd!<; zHa-|eyUsSXyiazqZjATEcLXbm@8~W*>V=0PaeQ*E)A$#!@=+os40JyyvsHX5EE`1a z^oZ)H0{6Xq7C^_ZrMGhM{d>hiWTb09< zp{&CXOC7Aj9&f{FzHuSgW<`1TgLrct*{y%*2pU=-(eY!l7ay`FQv&5$AiDgEIOKA8 zCQF@&)E6}dY#-_>VPmXIw(C`PG_EwqQe5|0GcgVK4SDg?6O_(I(>$E?3{a)9Tn5L~ z+2{}OiF`ks6b@Q-MvP8*Yk^U`H{wtBdH6RiVba{7kKei+d__=%GXA6=V#pWj<`6c_w<-2o-b0kM-Hle z9kDT~C^^rF0i?Ni9LpuWEco#1z2OGXg z%yu{v2?PhnjpxPm4CT~HHZ`SZW<@(Y*vVJ(aC<{;A_EWp4ik#7OC~(fl@>LpLaqk+ zL#o4Sd-$`5GAQr~JG99^)f4X>!XT3mVWP&#?q;%-Wdb~pbp$+* z*bhIa>XfEmbbg{=s_NJYvyuFx5?1<{_Kmn~&TKD7X(H#U>z4qL`X5ys_G~*X_j92J zs^p!YA=so5!5X4A*`{L3R<1ZYSWGc6RGG+@YbDw3aFKRWbZu?}D4%*Vz7#ka6`a`$ zg5u?mC;O-@D%0|lG_!2NvPBQUD4vRe%k;w0{`{&W;I*WxsY~2A%%~?6?c}=D^pDe| z))t$1ifV+f@g2>a*6-5j5zb_`*%PwD@>>tLX!LmkrZ=qd($Lj^l&%enW&etI9ZP0n zS*O`8#F#zB39%0@Y0Tu=CSrf1?6$09h(odK2EDxbHBHQ7gzWv?Cz4vu zzjfIzSIi*vAvUr3EqLB}nM;jeUHDmY8lt{%w1nnh>*OY4odmS4xqmwA@-}tR;K(02 z#l|ghl~zk$>G}DC-6}eyF7h;-0Y=LiW_Lv!hu0Q?X#$RPoW|ZoMcV<{Ov&YtAxNHF zbVmZ-j8k90+zXd!t=OXI3zNvq$6po$ndq!y#maJstwGTR{8%*EV+UlPY=c+uSLoVm zO1V#mk=Om*;W=U!+D4~kSG}FvGuXbrpd*qYQ_+)z4~u+ zF$2TQou{mOg1ugo#4g{!ZmuPf2c^H<4@w8VP>snu!GEy3i_v5&5fvgk!69#-rq(@{ zvAnIayK~_UH`sUqJ+43Oad|8l;*RdUV>6Nb6b1#&h)w2V$HCt%e7|z&OKWNEp^1sf zNL=QO-nPyD2$G`F^Vh=aO1u3oqOdS@FRBEG9g4|%Qo@c>1}c(vdm3Oy<`JO2iFKa8 zv_i-5Q4k%0o+(2d`KBMnl9%U~O%x(1{)*b8!_D5A;oAB9qn>F8_AB7K3(6TnnG__w^tEkenw(K z5HY8r@4(0#Gi{t)wBHu*`fnwhrt8A>NzV94p($P1ETQ}h! z*a!9&BC=sf6FkE;A%82Hu* z=&;0R8y;qu?Z0Bh(SC;JqBe2IEpLr+5vf3T#wM=JY2*{xq)xZZ5sqVv-5icu82^64 z|7BY=gLT1)f^V21dmYBmj@8+4#p~H(a;B5Rf`eqe~SCpSg9D*dO1oCZ>EUk4?w3-3ALN04o$Oa)?r zZC3YWr*HU?sz(Szvfji(A)(iD(u9&}qPFv|n?|zG-1!4XSgTUDp;O9UbnZA|<*~gy z%NTLfCrHhvo_W8&&Q!>%jd>P`J(Gk*f+?xBE(DAMx@WOn_@eE6FHv1P$?lZCQ@ zuX*6c4gEt}SaT0iuN$cgk?5srcvsj^3Gnp#*FXTL&nrLhTu1T7kkSc;n8TFjUw)8$ z2h6T|4syq*zZ6;=aXpn%78qln&F7-kG3Og$;%ugBj~{bIy*oK6*F}lBY@8fQ4FHQ6pxDJG2>JwRuEgHC#4L?e0L1P3C7ddvo8q|`T6Wsb|9EO z|F8+n)*!;~dGmVQ7N+BExNhg0%|J*tE?1-R_Oh^%Q<^Pht(l`laxy^zG_;TFN+QwM zDbEgE6d*b3Zlpyh>S>F|Y&<1xg*;h4thnelYr5+<#xG@O%P-!vD@q0E-&loYSTujz zUZ%q^whbu_pCzcx$1IaOB?|_=>rLH2Wkz_4NQ<4rMFZWS(nntL)DoS9->zBzX7&PUlNOFJdOd?6^0Id;a^q7s#D#zVJ!JXZ$cY@huLz z(*^F?zvptvxk&Mn&K$#r4>HEGCb{<(8Z~`>zNMd5R~{MKkB&G{{8_10>_lS*izlD@ z{+)r=S&IaC9-gM^I;Id7f{u|4DU|(-;|ABC`Z3?Fbid5t_j!d3JLUVKdTm^|lX>T` zc{Xv3xcA5fes}A*f{Ik1^n^ON%C$S$QZY_81>B6H4h+yy=W&Pakh;94WvV$Yljg6G zj!TYx^RC9VW3985XL5;0ORJJ{WBI%x&`hCn`A&MQZANy3 z$(@N@%_^=Ou>#Z%qrDaf4>6q+i$~@q8b7PH{upOb_Lb92UA^>ek3v~#7>O>(5Y(iL z5^~T@l2>PJU@%{;%^LWjwD7FWU|7uQv^c=~Degk$iOKrx)vPyhIa`R!V(6_lwkrxa! z9Tj_zA*C@Z*$x8>TUb=o;ZpuYsD)?Buhzf5MOC{Vct@5ii^?z|B&}6}F?y#)BGv(0 zUXJyz`XRw~zg+iEl6{sc%4oAe; z2;Y4A$G>!;MCHI^E*fnU;i`XKXGEkV9fo=ZLs$9vdwC z9$AnvDVMbc<7}%|a9_do=Bhh6-beY$K_M>u3m&|BhW+<#wXdQr25u{037z=CQW&Ly zwnwH*LxwPLc+21ndIqtK<~(Et^>K}?(k!z3Zf~9}n(yCSTqxKym0RLBW{Q)<@ticS zY`t56j*8%sd9C0fxil13#C!c5EB=WLju$5yG)?$I_3(sMdUm*<-VHtPH4SN~vjlZi8|^oE@Ain^hr$+v@0N z=})#clDjU?dJGQs#Emz89MG{Ug>uflGNZ9^#}i%T-M*=%d*|lW)^IpP=U%pbsa4P< zZWntZ+cs|x5fmOECU5UInC}2)O!sYP_N{TsQ$hcZH=T6*)&@#TWUbJ$l*?9J+lCjf zn9~f&Da)5Wx#kvJ3M6-^=?Ge14wn&6j@xoBHXB1bIyzE`ra9D0F~-<3Sk(AVyytD1 z5RT%%oULEYxw2Ij{tC$Puz&FzFKw0Rv*56II+K4sWz8>5=pVWkl>XOmrsHq+j|B2| zcR!tw78Ym_wAf&;IbP4Br2(p6uj=<)v@4I!<24;B1TG)v*4uU0qMT$fAe=OlH*N)5FWJ;}WPZUL>=WcNW*=b1pAhVN#XBwNmu3%jMgI z%M(v!|c3$STg`AGrEgj>$q3xCZc*(-`~*gP|9l<32#L`Ttx9&ZTUUnU`~9 z^0z>iDr)>(WdQ8E$!a+t>qYIA-R;~kxvXlTRSoE4`| zj32k(^jw;lmCc4myXv!6a&YiETO+I$uE3x#+V0ICn#{T_ee6#{oUQ6)TzNvD)W0GU z&c4ZI<2aHp2TwZB!=H?Gs`T^%cobfMURTai98WYw#`zYz=^L5ByJQhsj^H#&d^jDOuQF1B9VW>&XVvXq>JMMR(6k-75&O>yQmUku z71Hf|Kpq+MHVI!&=soT`4Hgp~8iS0qAl6JCS_$y9J#6CJUt3$Vpc8=}d2|Xj?113#&qqT5qYO0Ui%C88c`bSQ2-lhb zMSeX!IB?fMXwqh$Pq{Vz%|AUQX7`(SAi?d9MS%Uz|C#FVY!5&2FJ%Axp##*+N%)u1j&D6Q)J&d!R8lfHzBbC$P} z#Y^i=;cLi^1croYx}lpfkjmLs#F>pnB~-WD=%67_K?UzT3Gt0XjkUl-4~$`i)PKT0 zM;Cabbv!p>eLZ?LyXbmcb>PKrbA)b{N=p+9bJI+OEtr`*O2v9WmqWVfOP--T3SV&WGPvo${QFJCOkx55z}9#Z`T$2RTv(r*~9*A#f3Fil4qiV^4T zN7+$T^?E(%w}Ujl;+qC~2Gmv#mUUQSBPFbZi>pp3RLbiZm8rdF&v-g&Q)*q>yIJR1 zlZrf5q$QiDr)i*Q#Ij=;jrVv1!;y0|(UdQo1>Q0uq^X{bsN0TmX|Quxu22FtJ6yy& zU)gSB1n)UmFebs$blPJWkI)8Y2>U&IsTL&QkiZ9FE&OYJjfg$xW)qNtP|8y^A}~Zq z6Rse~T9Uu>;Zm?92kZ1IZb0VC%8Zo=a{3ygudOS(J(glc#T?A9maE*eYMl5E{70iEC&7ACS+`q2VTNpuJ);DUJLup^a9q2!LDIaxD|w=+Jsmsy^NCNNwUFP=F6Q(dCAGE{h0hrg$vBWRvT|Kg68XM zAtXXV1|ozRg?i`Qim`~^@NMX8b3gjz=OH0s87Gs7zXPmgb&N_~>4jW--o8Fy>X@*j zX{%?D#1nq+iWOB9>|b6~<1IlcAgu7|nBkWx8=_?-CU31n*l2RDz%Qh9vw(zA^0l1v z0~3YRu*2<)?qU1!WZ;>KU5RFuV?cs_*^m64!#>qubEN{# zMp48tXiq$95Af*Z_eJN=@ZH(tc+b%RF7*iI4wAKyRc{4JkqW74;wY@&yk*(x0^e${ zft%RfuJVmVMq`8)6c8X^cUWIt_ixNcmZ-pxD<}Iw7ggdcP0zW|_)cRWlLz>5!N42L z%m+hIh;caCrdFSQ8JweZF>@>He+BP_-D4Qq~Wf9VLrp_<&(7& z0)vw}KX!d%N2hXLGwGT(=oAr%An&bp=^IaTIGFVjAo%<9z05y*|4v<6@To1CXDbEd zAWnl2t=dlMGYZ_kMbvH!IfHlMENmRdc||qbO@be#QHGRWCt!L;j)!Xub{tgmZ0zwT z?ZoV9MYLsQgxC!!W*zvGD$nw5A1-YM&4!*3qGTCRAGhvs*YD?-dFPZhC07p;iRvh6 zuwI;(;FJb8X{Wk?As`%=_EZp}IeHv+(FZ)gj9>FWKtw8gqUYWF_p(cVU5kA|06bQ-@>! z>xO;}B1*mZhtu9^RJQ7lqjTCV6CE+++|Cz@pDc@-d&#;~p|Vvv3B2kuTXvH(yras# z5wLnuk~6+Fojy?;1Dh-8d#RCM+(8FkWMkIb(b8BKD6eSC$U zQgX7&7Ep^>_(GCzj8R~cKI{+e)=lqARgmyq&^vBEdl6grbQ4$iv)~gH1g*?nGSW^{ z4XuhU^4uIz7>$+n!voS{Mas0*1l54bxvHc~W{O$MEce=R;yk|BFL^tlpI^i^cqtNj zvIc|A_?Thah5?$Ux)C_V-Z8W7%YNU~Ak&<#J$7}*dTQr^3cQi^nA!eVZd{dhGR+r& zbaT3A$p|~gdD+{IV_kL(jIvcnOo|AfSygpLO*@aH%QM59VLvKH_lk`F8y6WZa?NUJ^lcjuO^th^t)ET#~TfBjW{}+vmQ&U8+i#E(>zlCy9}L_0)Uu zhaKDTHCh_k@3+q^o%V|ZPW^^sj0xc}v5IIW92GdOG#H!$E(op~#S>N8J=>aTW-zlV zDMW^A!o4staE4+GPmt#ph-O}ORH8?()&(9+S%;Ok3_|loqfKSj**F0u#KMB2qTpZP%>IRu#+43J zhuyt2g+CW&a?kAC9llk80dhu)&;ndf(o=-NoI|HbrGEV2*b~Yc2yAX%lQ-cKLyX$m zvP~hcIYfXLi*&Xnzs&TH4sn1xyg)PBcP)AT`_j!VRSEm+?040seMPX<$T#*Gx6o}CGQt}7_oQc`cgSrQ+N`SsMG#_!Wk?+WbkgH2(s#x}>v`oZwy&zEaO^4E@u0!rIBKe%e znOp>T!#j7XWxeLGFU4}KO!B9>eo}@6fmazT!Fx?>EC&h3$2KGihq!1IOp{|iND?u5 z`-;|Sv~~%M(LIq-QFhxMN0x?lF<-KeVJu;ydaJ17iA9`i;??z)>tt6W8iS*m82S&^ zNS|@a%hd5;5~eKIXG0n`6CC{EXiPLwdtm}iZEA_C;?;GS*P61wmM1E4 z0Y<4NPi*b&w~Z3Z`?)uBFt-&#P_K_?w*x*7ZQ}*TD!O$FxWEr3EjnlIj#n*FmJh#U z8Dq;T6SIA7s*mMAZTE!Ho@*92%^#&8Y=yxz>f^%e)1%rSH(T7CDAcVaqoye`sd5K3 zmJ(4YY8GE76s=dBxzH@vdrM^_yKJ<(Bp}Ul9g|;*7X==bF}Wjc>Vx&oB7<{W|h)^}bxU)8JA zzPHR6AJbedVomI44%b)J z-cs5(HBUK>iWjm`QvTF--m<{gMGTK<_$+JIBq#(6K?xS)IxtFR1>3h!i(^FJ@H-~! za0o0%9a9O8nxU`(b)KPh`>R52!AC(GYj<^5p3}xgH?v`!j?5TEY3y~$kH`TcPnxW) zse9~agw^HWE1h*au{mGjk+|wcgX?0ZykS(eXGPs_)J9_YR)khq16%jYY&J)P96Eo>bZE;Y6F zRTG1T6{I(Y#x16S6>m2_eoE=Zo#3w?Fi)V~92oo&lall{m+?Gf)M6~wdS10*YO5xe zy}xih`SVgLQ$<&b>L4OIY4Nhl#ns-CrSa2(SS0h@SnpC{S>fkMTLyX~xfZj>y^>ye ztxrlmRQZsg8Dd||WxP^BY7lu>zm$}!b@aTZmSNhi+uu{ZtuffmH18=tY(@hM-AO0kP)bG|MtSiA_|pwRi%dNp}^ZS5qbM~Q48TiB97bwo(F zH8mAZ?~Zd#9{o*zlb9iTe}FPB`5OjsIb}Hg=Y#1HJd49EFA}snfuH7pRp``}!KFz4 zqy6K}W~ED70&-?^zch}viqiE-m@7o+X#W;iv^WOXUT<6`&XU7>vnY~ZScDTdom3&g zx^=3qaB^ds27Y?{H;%?ouAEsn_Ch%$4&Af-@auJAg?6#IBTgDYB(B()4Q`!eGh(lt zugZ$FgNV!MblB)KTEBpEDx3t{<1We*g7XZdX1_CrYHeAz3eUkqftje1$O%=e!BM8# zyy>k?g_3Gb3$K1@aw^NC|1{-@Yzd|H;|>%BounD;j@Yx#KGF)&EY%5zA{i`Q=Y~S> zI+odrR6)yC3RDhO z9}&&EsxEGD5mq6+LK2>5>3FBbrz0(04f*Wkn%ihIb<_iA5zQ}SvRKtp5x(%lfU4F` zEdY4;kt}=MoDa3Q)ewOOb~UkI9ppDrlDk($jT(=5g!~-3KM27|l_R>0%8V>>OAPhB zlPT4+*CvsW9@Hz?l1wNGEs-gN*RvndDm488osKXtJe0H4-Rs%?UVqcE44!@BS?2hz z=3EY)iLV56a-^9?qHcGvcz)VTnRzwrs@~w)nw=`CYf&*VKN$G%O#5P)Ro3;_T2C#W ze@>~MDqy$Gwv5N$(&lYP3CToqjvC!|{Tg8)?k1r&UhS)Qp+2}}3?E9#aFlW-)PZa- zXWdJ*gEKn(ndZ&5cYMr%kZS}4b^mlYvnX9!!n>2H@odfNADFG&No0ErR7!*5V&NLER5QmO>u}Ht7yzWTi3s3JPF>#dKXp( z184hvoP`C9?~uWPukkgq!|ox%uVCY#K0LDcE$M=J*FgSB_!Vz10(VyyD|UL0Je(z<;$3;Cv`<)ywLLo#-3x1^q%1q#ZXF|SrmEi?G zZbYc8AV1mO$nJ&?n5nTlm@I!2!L&0|>rn5)H#maCZuHxiw(iv8vZPs2wwj2nmt(~Y zZfU%3Z}V4zvxJ5Z@eLQ->7G6>Px9Q>N^q^o*7^ur0;HYT+G=3oCt@O$o$&qhmLG1@ z)t{{uSD!wycglQTvuz`zKY6`wI$K9hPHn&6IIMkjsNr;CIeE!Dt~z(tYxzjmMT`HBKLR-3{9y>Jr;$71cnJ6J?5~UhTYtZDJiO&xbGB3OEpJJhO*uVzGWpwS_qwWTt9W^w@b}7at^TZY^=!6E`N5if z`>Di5E!UXI%}wQR_q7E>8zxO*SoC2|s>&i+qVkgSg?+fv8_TPaaQc_8Dr!k;Z*<4n zL&u^0cA880ji=7a=aw+n&cidd;s%cK(p?5=3#Td)OY*cv#{0Z{GR8?{ts4ps-*DJ1*1SGgbT9U1wWCfsR|G)`wHp{l?-IKKpu*u20qWOSvu9$4Lv4V~G zakf)9zb#nAv{H74R!^n&cZW{x#>oz|=ZsT1U0bxvd~QeJNz8O7w`=j~r0ERBF?a&| zwHK=zn)a^Mk)`5FXx~566Bw70svS7{(XG9q5!AsM?O;%D{T`iRT(=oZA)bu zTVtc{s1K$wbjV*^8m=MsH(uaWm2RX5a^_4+m&+QQK(I5z#1&F+%5>K!xXKWrlEW`G z)e^I8Z(dXsBUvgcb40wcb?ZP=YNmpVpaO)gmPp!lIdeFE{8aRlHA#w~)Gc!BXHoW1 z_3*ErF9?@v+L~qNJ&yBJq}Z?A5lX?97^3pt&(|m{O?fQ_w7DiwN1(I!ceq+SmG5mB z$u?|jlB=3Cbf}GI)e%sfh@b%;j_n~=;IAuO2g>@Bi@t@^x9B;g zjnD#{tC2lOl!#E?m+s|d$d}cn?<%hw*@wl62u~or+lGbgh52SLX5FnNafJ+y$|%)1 z`KvS(e0Yz7aTVqpr)1C5I0hm5^(2Jn7$uKs0|f`2h$t_0n{?Q`Tk`F7E_ShR0)Bw>m_6t-zc-&SuwX7Q{qXK=d-@aFJEC+{kG=u zy8^h?NV-%}=u~Hgo`4lUV|eb${bHFbTP>V~%vq$uh7I=06dL#HDU|7TuF4J6KLf6g zn({LpH~_GP_s@U>1AhVdw?b&Ewj{X&-!H<94FG=k{`OD3u+q~u*8cnN>wiYzeFBO$ zIRyL@0ALySKA{!Jy+?Qhir}JNDQgP+S=I**sPGR6Z$S|vGY&7afz!zg`11sJcB19k zUxJCLmA0jrx}}x2g(dB+FF@`RY=fM05fDQJ1^`gq!TkhQ(SLU77QvEM)k53oA(ZBy z@7`}LUjBU53P=E;5(xl!d57GEc8e@+q_3s@ALO6MAT={6n@-As5JsRT#T{5buqFSW z=6_(fEdNDF`gq(=eRn$EEICFgJ+a{n~NA0o)keO%HV{~_$Y+-=D->WAEaB^mxVN!2$GVfQ8fAVEqRAc*=< z12ZBKSQqa~n!J6;{a2Flf0JBgeh9lS`3DJ7^2PM?*cD()Mm*^56PAbEe_e1W#lFa{`2l?S(C{Efo>(Yw}4w1mDv9uSMssCanBJ+5^o$OpuzrgkUt~KuuEM z1B2;km2%N%JA? zf#&0t2h5D5wh*Yv@}M=1v>t+QY5v#Jb63+){~_&x=Hs=;)ARMs$Nwxf?smF^!9(yZ z&Hq|_?rIX7JfuC)e7yR6=E@#b0;x%F`Vf3e^S_p#yP8uL4`~lHAFn^$iq@g4KusgK z`%`Gs@*((^=6@|fcQpxZAJQIZK3;(U?=1urLAK@_yNBRgn*X%~-PIg-d`Nqs`FIWb z;v>Vc57Zn37RtM0bIs`?_?G5>EkbuS$=x2(9%w#Zg?M>)YBzvoGAZuZxB?>a*9~T`=1c7@X z`gq6>c&TK=kpT=N4h<8UUUu(S*J)8lD4!d`lE$(myU*o(}@|K=kpb55JMu zy8zDmZ=v@^TM9ssZ;67;`o~3)e}TX~5PdxC&6KLSmVu&K4@6%!fgs-!1)27bi^`0E zz&#LsJnnPh6E^XH(KJ(gzeP1iZ;}7+Iix?Lf4=~kqt(IlJ7>U@I{+Ra+|ALx4G_3n zqPL(RbJSeJgg_NIndF}W0ROzG^Y2Eqb&LF;s0S%WJ2sot1AJaLFy!(7eLnC}^X?tQ zEv?5_?z{mzAydFfvkr4#XnyYw?N6J540G)BCv_C>1!`1{5rycYd5+T2XxFz&>z)Apk zkBoq0$j9}*kRHVy+8-g1p*q*3hXA^T(CP0WZV5deq`Az+vWvjZKX@?97#Qx*{s@5# zPXs_<3g~J7iTMuVmeAvY=`U`0K!> zg~wWgeiGPeYrsyss}v!22lrPAWLRgmvY&ufDwDj!xUKYfT3aDff7$`Il9ShcrQr{E zaDSCR#&xI3vf)2d?O`jyDBoe+R(d?Ip&D7*U4fJTGWEU^smdMPUnP)%Esz?V54x2$ z4DK*)D?Ogr?8k9|BtWJ2z}3Lrj)OD0gZrxlGP0>AB@#d@S=!!V+*W!#vpF*Wf&aJ6 z-c<^=yMz0y1TwTy<`E=;2@9g*iX87SZYw>W+U6f^U7&#x&`rK9GaKD55s{}H*Z`@K~KzE#*&mG2XrN@&y z8AS^VAGl!Cc@P3gzISkcl|V+f;2S}@f09xT7$$df2`b3zUS`_Hu#|8D6Kf1hrpu3@AtZDnVq4HB@cQk=mB{N2vLCIKD> z+}g6E{{8*Ht1;BA^i54bDX3&)XjuXkWPnGxcVD!iiu+6W+Yw`~q008;V`v?T5hlsZ{y81fzEpA`dcF%$h z@__|Cs2#a|sm)zPG&Rfv#Gk9-?pxfxc;fE35`GN>>Ixl65h!3H0@obCKN Date: Tue, 19 May 2015 11:35:52 -0700 Subject: [PATCH 044/128] Adding restore code to vttablet start. --- go/vt/mysqlctl/backup.go | 183 ++++++++++++++++++++++++++++++++- go/vt/tabletmanager/agent.go | 19 +++- go/vt/tabletmanager/restore.go | 85 +++++++++++++++ test/backup.py | 54 ++++++++-- test/tablet.py | 3 +- 5 files changed, 329 insertions(+), 15 deletions(-) create mode 100644 go/vt/tabletmanager/restore.go diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index 4e312ec731..2e03a5c716 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -7,6 +7,7 @@ package mysqlctl import ( "bufio" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -16,6 +17,8 @@ import ( "strings" "sync" + log "github.com/golang/glog" + "github.com/youtube/vitess/go/cgzip" "github.com/youtube/vitess/go/sync2" "github.com/youtube/vitess/go/vt/concurrency" @@ -36,6 +39,11 @@ const ( backupManifest = "MANIFEST" ) +var ( + // ErrNoBackup is returned when there is no backup + ErrNoBackup = errors.New("no available backup") +) + // FileEntry is one file to backup type FileEntry struct { // Base is one of: @@ -52,7 +60,7 @@ type FileEntry struct { Hash string } -func (fe *FileEntry) open(cnf *Mycnf) (*os.File, error) { +func (fe *FileEntry) open(cnf *Mycnf, readOnly bool) (*os.File, error) { // find the root to use var root string switch fe.Base { @@ -68,7 +76,17 @@ func (fe *FileEntry) open(cnf *Mycnf) (*os.File, error) { // and open the file name := path.Join(root, fe.Name) - fd, err := os.Open(name) + var fd *os.File + var err error + if readOnly { + fd, err = os.Open(name) + } else { + dir := path.Dir(name) + if err := os.MkdirAll(dir, os.ModePerm); err != nil { + return nil, fmt.Errorf("cannot create destination directory %v: %v", dir, err) + } + fd, err = os.Create(name) + } if err != nil { return nil, fmt.Errorf("cannot open source file %v: %v", name, err) } @@ -305,7 +323,7 @@ func (mysqld *Mysqld) backupFiles(logger logutil.Logger, bh backupstorage.Backup } // open the source file for reading - source, err := fe.open(mysqld.config) + source, err := fe.open(mysqld.config, true) if err != nil { rec.RecordError(err) return @@ -379,3 +397,162 @@ func (mysqld *Mysqld) backupFiles(logger logutil.Logger, bh backupstorage.Backup return nil } + +// checkNoDB makes sure there is no vt_ db already there. Used by Restore, +// we do not wnat to destroy an existing DB. +func (mysqld *Mysqld) checkNoDB() error { + qr, err := mysqld.fetchSuperQuery("SHOW DATABASES") + if err != nil { + return fmt.Errorf("checkNoDB failed: %v", err) + } + + for _, row := range qr.Rows { + if strings.HasPrefix(row[0].String(), "vt_") { + dbName := row[0].String() + tableQr, err := mysqld.fetchSuperQuery("SHOW TABLES FROM " + dbName) + if err != nil { + return fmt.Errorf("checkNoDB failed: %v", err) + } else if len(tableQr.Rows) == 0 { + // no tables == empty db, all is well + continue + } + return fmt.Errorf("checkNoDB failed, found active db %v", dbName) + } + } + + return nil +} + +// restoreFiles will copy all the files from the BackupStorage to the +// right place +func (mysqld *Mysqld) restoreFiles(bh backupstorage.BackupHandle, fes []FileEntry, restoreConcurrency int) error { + sema := sync2.NewSemaphore(restoreConcurrency, 0) + rec := concurrency.AllErrorRecorder{} + wg := sync.WaitGroup{} + for i, fe := range fes { + wg.Add(1) + go func(i int, fe FileEntry) { + defer wg.Done() + + // wait until we are ready to go, skip if we already + // encountered an error + sema.Acquire() + defer sema.Release() + if rec.HasErrors() { + return + } + + // open the source file for reading + name := fmt.Sprintf("%v", i) + source, err := bh.ReadFile(name) + if err != nil { + rec.RecordError(err) + return + } + defer source.Close() + + // open the destination file for writing + dstFile, err := fe.open(mysqld.config, false) + if err != nil { + rec.RecordError(err) + return + } + defer dstFile.Close() + + // create a buffering output + dst := bufio.NewWriterSize(dstFile, 2*1024*1024) + + // create hash to write the compressed data to + hasher := newHasher() + + // create a Tee: we split the input into the hasher + // and into the gunziper + tee := io.TeeReader(source, hasher) + + // create the uncompresser + gz, err := cgzip.NewReader(tee) + if err != nil { + rec.RecordError(err) + return + } + defer gz.Close() + + // copy the data. Will also write to the hasher + if _, err = io.Copy(dst, gz); err != nil { + rec.RecordError(err) + return + } + + // check the hash + hash := hasher.HashString() + if hash != fe.Hash { + rec.RecordError(fmt.Errorf("hash mismatch for %v, got %v expected %v", fe.Name, hash, fe.Hash)) + return + } + + // flush the buffer + dst.Flush() + }(i, fe) + } + wg.Wait() + return rec.Error() +} + +// Restore is the main entry point for backup restore. If there is no +// appropriate backup on the BackupStorage, Restore logs an error +// and returns ErrNoBackup. Any other error is returned. +func (mysqld *Mysqld) Restore(bucket string, restoreConcurrency int, hookExtraEnv map[string]string) (proto.ReplicationPosition, error) { + // find the right backup handle: most recent one, with a MANIFEST + log.Infof("Restore: looking for a suitable backup to restore") + bs := backupstorage.GetBackupStorage() + bhs, err := bs.ListBackups(bucket) + if err != nil { + return proto.ReplicationPosition{}, fmt.Errorf("ListBackups failed: %v", err) + } + toRestore := len(bhs) - 1 + var bh backupstorage.BackupHandle + var bm BackupManifest + for toRestore >= 0 { + bh = bhs[toRestore] + if rc, err := bh.ReadFile(backupManifest); err == nil { + dec := json.NewDecoder(rc) + err := dec.Decode(&bm) + rc.Close() + if err != nil { + log.Warningf("Possibly incomplete backup %v in bucket %v on BackupStorage (cannot JSON decode MANIFEST: %v)", bh.Name(), bucket, err) + } else { + log.Infof("Restore: found backup %v %v to restore with %v files", bh.Bucket(), bh.Name(), len(bm.FileEntries)) + break + } + } else { + log.Warningf("Possibly incomplete backup %v in bucket %v on BackupStorage (cannot read MANIFEST)", bh.Name(), bucket) + } + toRestore-- + } + if toRestore < 0 { + log.Errorf("No backup to restore on BackupStorage for bucket %v", bucket) + return proto.ReplicationPosition{}, ErrNoBackup + } + + log.Infof("Restore: checking no existing data is present") + if err := mysqld.checkNoDB(); err != nil { + return proto.ReplicationPosition{}, err + } + + log.Infof("Restore: shutdown mysqld") + if err := mysqld.Shutdown(true, MysqlWaitTime); err != nil { + return proto.ReplicationPosition{}, err + } + + log.Infof("Restore: copying all files") + if err := mysqld.restoreFiles(bh, bm.FileEntries, restoreConcurrency); err != nil { + return proto.ReplicationPosition{}, err + } + + log.Infof("Restore: restart mysqld") + if err := mysqld.Start(MysqlWaitTime); err != nil { + return proto.ReplicationPosition{}, err + } + + return bm.ReplicationPosition, nil +} diff --git a/go/vt/tabletmanager/agent.go b/go/vt/tabletmanager/agent.go index b6de84f9f2..0410a4d7c6 100644 --- a/go/vt/tabletmanager/agent.go +++ b/go/vt/tabletmanager/agent.go @@ -188,8 +188,23 @@ func NewActionAgent( // register the RPC services from the agent agent.registerQueryService() - // start health check if needed - agent.initHeathCheck() + // two cases then: + // - restoreFromBackup is set: we restore, then initHealthCheck, all + // in the background + // - restoreFromBackup is not set: we initHealthCheck right away + if *restoreFromBackup { + go func() { + // restoreFromBackup wil just be a regular action + // (same as if it was triggered remotely) + agent.restoreFromBackup() + + // after the restore is done, start health check + agent.initHeathCheck() + }() + } else { + // synchronously start health check if needed + agent.initHeathCheck() + } return agent, nil } diff --git a/go/vt/tabletmanager/restore.go b/go/vt/tabletmanager/restore.go new file mode 100644 index 0000000000..539ec4e5d0 --- /dev/null +++ b/go/vt/tabletmanager/restore.go @@ -0,0 +1,85 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tabletmanager + +import ( + "flag" + "fmt" + "log" + + "github.com/youtube/vitess/go/vt/mysqlctl" + myproto "github.com/youtube/vitess/go/vt/mysqlctl/proto" + "github.com/youtube/vitess/go/vt/topo" +) + +// This file handles the initial backup restore upon startup. +// It is only enabled if restore_from_backup is set. + +var ( + restoreFromBackup = flag.Bool("restore_from_backup", false, "(init restore parameter) will check BackupStorage for a recent backup at startup and start there") + restoreConcurrency = flag.Int("restore_concurrency", 4, "(init restore parameter) how many concurrent files to restore at once") +) + +// restoreFromBackup is the main entry point for backup restore. +// It will either work, fail gracefully and log the error, or log.Fatal +// in case of a non-recoverable error. +// It takes the action lock so no RPC interferes. +func (agent *ActionAgent) restoreFromBackup() { + agent.actionMutex.Lock() + defer agent.actionMutex.Unlock() + + // change type to RESTORE (using UpdateTabletFields so it's + // always authorized) + tablet := agent.Tablet() + originalType := tablet.Type + if err := agent.TopoServer.UpdateTabletFields(tablet.Alias, func(tablet *topo.Tablet) error { + tablet.Type = topo.TYPE_RESTORE + return nil + }); err != nil { + log.Fatalf("Cannot change type to RESTORE: %v", err) + } + + // do the optional restore, if that fails we are in a bad state, + // just log.Fatalf out. + bucket := fmt.Sprintf("%v/%v", tablet.Keyspace, tablet.Shard) + pos, err := agent.Mysqld.Restore(bucket, *restoreConcurrency, agent.hookExtraEnv()) + if err != nil && err != mysqlctl.ErrNoBackup { + log.Fatalf("Cannot restore original backup: %v", err) + } + + if err == nil { + // now read the shard to find the current master, and its location + si, err := agent.TopoServer.GetShard(tablet.Keyspace, tablet.Shard) + if err != nil { + log.Fatalf("Cannot read shard: %v", err) + } + ti, err := agent.TopoServer.GetTablet(si.MasterAlias) + if err != nil { + log.Fatalf("Cannot read master tablet %v: %v", si.MasterAlias, err) + } + + // set replication straight + status := &myproto.ReplicationStatus{ + Position: pos, + MasterHost: ti.Hostname, + MasterPort: ti.Portmap["mysql"], + } + cmds, err := agent.MysqlDaemon.StartReplicationCommands(status) + if err != nil { + log.Fatalf("MysqlDaemon.StartReplicationCommands failed: %v", err) + } + if err := agent.MysqlDaemon.ExecuteSuperQueryList(cmds); err != nil { + log.Fatalf("MysqlDaemon.ExecuteSuperQueryList failed: %v", err) + } + } + + // change type back to original type + if err := agent.TopoServer.UpdateTabletFields(tablet.Alias, func(tablet *topo.Tablet) error { + tablet.Type = originalType + return nil + }); err != nil { + log.Fatalf("Cannot change type back to %v: %v", originalType, err) + } +} diff --git a/test/backup.py b/test/backup.py index d68c57b0dc..ecfb2be725 100755 --- a/test/backup.py +++ b/test/backup.py @@ -20,6 +20,9 @@ use_mysqlctld = True tablet_master = tablet.Tablet(use_mysqlctld=use_mysqlctld) tablet_replica1 = tablet.Tablet(use_mysqlctld=use_mysqlctld) +tablet_replica2 = tablet.Tablet(use_mysqlctld=use_mysqlctld) + +setup_procs = [] def setUpModule(): try: @@ -30,10 +33,12 @@ def setUpModule(): setup_procs = [ tablet_master.init_mysql(), tablet_replica1.init_mysql(), + tablet_replica2.init_mysql(), ] if use_mysqlctld: tablet_master.wait_for_mysqlctl_socket() tablet_replica1.wait_for_mysqlctl_socket() + tablet_replica2.wait_for_mysqlctl_socket() else: utils.wait_procs(setup_procs) except: @@ -53,6 +58,7 @@ def tearDownModule(): teardown_procs = [ tablet_master.teardown_mysql(), tablet_replica1.teardown_mysql(), + tablet_replica2.teardown_mysql(), ] utils.wait_procs(teardown_procs, raise_on_error=False) @@ -62,13 +68,14 @@ def tearDownModule(): tablet_master.remove_tree() tablet_replica1.remove_tree() + tablet_replica2.remove_tree() class TestBackup(unittest.TestCase): def tearDown(self): tablet.Tablet.check_vttablet_count() environment.topo_server().wipe() - for t in [tablet_master, tablet_replica1]: + for t in [tablet_master, tablet_replica1, tablet_replica2]: t.reset_replication() t.clean_dbs() @@ -78,34 +85,63 @@ class TestBackup(unittest.TestCase): primary key (id) ) Engine=InnoDB''' - _populate_vt_insert_test = [ - "insert into vt_insert_test (msg) values ('test %s')" % x - for x in xrange(4)] + def _insert_master(self, index): + tablet_master.mquery('vt_test_keyspace', + "insert into vt_insert_test (msg) values ('test %s')" % index, write=True) def test_backup(self): + """test_backup will: + - create a shard with master and replica1 only + - run InitShardMaster + - insert some data + - take a backup + - insert more data on the master + - bring up tablet_replica2 after the fact, let it restore the backup + - check all data is right (before+after backup data) + """ for t in tablet_master, tablet_replica1: t.create_db('vt_test_keyspace') - tablet_master.init_tablet('master', 'test_keyspace', '0', start=True) + tablet_master.init_tablet('master', 'test_keyspace', '0', start=True, + supports_backups=True) tablet_replica1.init_tablet('replica', 'test_keyspace', '0', start=True, supports_backups=True) utils.run_vtctl(['InitShardMaster', 'test_keyspace/0', tablet_master.tablet_alias]) # insert data on master, wait for slave to get it - tablet_master.populate('vt_test_keyspace', self._create_vt_insert_test, - self._populate_vt_insert_test) + tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test) + self._insert_master(1) timeout = 10 while True: result = tablet_replica1.mquery('vt_test_keyspace', 'select count(*) from vt_insert_test') - if result[0][0] == 4: + if result[0][0] == 1: break timeout = utils.wait_step('slave tablet getting data', timeout) # backup the slave utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True) - for t in tablet_master, tablet_replica1: + # insert more data on the master + self._insert_master(2) + + # now bring up the other slave, health check on, init_tablet on, restore on + tablet_replica2.start_vttablet(wait_for_state='SERVING', + target_tablet_type='replica', + init_keyspace='test_keyspace', + init_shard='0', + supports_backups=True, + extra_args=['-restore_from_backup']) + + # check the new slave has the data + timeout = 10 + while True: + result = tablet_replica2.mquery('vt_test_keyspace', 'select count(*) from vt_insert_test') + if result[0][0] == 2: + break + timeout = utils.wait_step('new slave tablet getting data', timeout) + + for t in tablet_master, tablet_replica1, tablet_replica2: t.kill_vttablet() if __name__ == '__main__': diff --git a/test/tablet.py b/test/tablet.py index 39812ba65a..4157726d78 100644 --- a/test/tablet.py +++ b/test/tablet.py @@ -487,7 +487,8 @@ class Tablet(object): self.dbname = 'vt_' + init_keyspace if supports_backups: - args.extend(['-backup_storage_implementation', 'file', + args.extend(['-restore_from_backup', + '-backup_storage_implementation', 'file', '-file_backup_storage_root', os.path.join(environment.tmproot, 'backupstorage')]) From 6e9d87f6e76fd656ec28ce17e6048506f04d03bc Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Tue, 19 May 2015 11:42:34 -0700 Subject: [PATCH 045/128] Renaming checkInterrupted to checkDone to be more correct. --- go/vt/worker/split_clone.go | 4 ++-- go/vt/worker/split_diff.go | 6 +++--- go/vt/worker/sqldiffer.go | 8 ++++---- go/vt/worker/vertical_split_clone.go | 6 +++--- go/vt/worker/vertical_split_diff.go | 6 +++--- go/vt/worker/worker.go | 4 ++-- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/go/vt/worker/split_clone.go b/go/vt/worker/split_clone.go index e097686609..144a2759a9 100644 --- a/go/vt/worker/split_clone.go +++ b/go/vt/worker/split_clone.go @@ -189,7 +189,7 @@ func (scw *SplitCloneWorker) run(ctx context.Context) error { if err := scw.init(); err != nil { return fmt.Errorf("init() failed: %v", err) } - if err := checkInterrupted(ctx); err != nil { + if err := checkDone(ctx); err != nil { return err } @@ -197,7 +197,7 @@ func (scw *SplitCloneWorker) run(ctx context.Context) error { if err := scw.findTargets(ctx); err != nil { return fmt.Errorf("findTargets() failed: %v", err) } - if err := checkInterrupted(ctx); err != nil { + if err := checkDone(ctx); err != nil { return err } diff --git a/go/vt/worker/split_diff.go b/go/vt/worker/split_diff.go index 88318efe7b..cffd9b28ff 100644 --- a/go/vt/worker/split_diff.go +++ b/go/vt/worker/split_diff.go @@ -119,7 +119,7 @@ func (sdw *SplitDiffWorker) run(ctx context.Context) error { if err := sdw.init(); err != nil { return fmt.Errorf("init() failed: %v", err) } - if err := checkInterrupted(ctx); err != nil { + if err := checkDone(ctx); err != nil { return err } @@ -127,7 +127,7 @@ func (sdw *SplitDiffWorker) run(ctx context.Context) error { if err := sdw.findTargets(ctx); err != nil { return fmt.Errorf("findTargets() failed: %v", err) } - if err := checkInterrupted(ctx); err != nil { + if err := checkDone(ctx); err != nil { return err } @@ -135,7 +135,7 @@ func (sdw *SplitDiffWorker) run(ctx context.Context) error { if err := sdw.synchronizeReplication(ctx); err != nil { return fmt.Errorf("synchronizeReplication() failed: %v", err) } - if err := checkInterrupted(ctx); err != nil { + if err := checkDone(ctx); err != nil { return err } diff --git a/go/vt/worker/sqldiffer.go b/go/vt/worker/sqldiffer.go index b83ec30503..e2fab4cb6c 100644 --- a/go/vt/worker/sqldiffer.go +++ b/go/vt/worker/sqldiffer.go @@ -117,7 +117,7 @@ func (worker *SQLDiffWorker) run(ctx context.Context) error { if err := worker.findTargets(ctx); err != nil { return err } - if err := checkInterrupted(ctx); err != nil { + if err := checkDone(ctx); err != nil { return err } @@ -125,7 +125,7 @@ func (worker *SQLDiffWorker) run(ctx context.Context) error { if err := worker.synchronizeReplication(ctx); err != nil { return err } - if err := checkInterrupted(ctx); err != nil { + if err := checkDone(ctx); err != nil { return err } @@ -180,7 +180,7 @@ func (worker *SQLDiffWorker) synchronizeReplication(ctx context.Context) error { if err != nil { return fmt.Errorf("Cannot stop slave %v: %v", worker.subset.alias, err) } - if err := checkInterrupted(ctx); err != nil { + if err := checkDone(ctx); err != nil { return err } @@ -195,7 +195,7 @@ func (worker *SQLDiffWorker) synchronizeReplication(ctx context.Context) error { // sleep for a few seconds time.Sleep(5 * time.Second) - if err := checkInterrupted(ctx); err != nil { + if err := checkDone(ctx); err != nil { return err } diff --git a/go/vt/worker/vertical_split_clone.go b/go/vt/worker/vertical_split_clone.go index 471e371bbb..2eba8436d5 100644 --- a/go/vt/worker/vertical_split_clone.go +++ b/go/vt/worker/vertical_split_clone.go @@ -176,7 +176,7 @@ func (vscw *VerticalSplitCloneWorker) run(ctx context.Context) error { if err := vscw.init(); err != nil { return fmt.Errorf("init() failed: %v", err) } - if err := checkInterrupted(ctx); err != nil { + if err := checkDone(ctx); err != nil { return err } @@ -184,7 +184,7 @@ func (vscw *VerticalSplitCloneWorker) run(ctx context.Context) error { if err := vscw.findTargets(ctx); err != nil { return fmt.Errorf("findTargets() failed: %v", err) } - if err := checkInterrupted(ctx); err != nil { + if err := checkDone(ctx); err != nil { return err } @@ -192,7 +192,7 @@ func (vscw *VerticalSplitCloneWorker) run(ctx context.Context) error { if err := vscw.copy(ctx); err != nil { return fmt.Errorf("copy() failed: %v", err) } - if err := checkInterrupted(ctx); err != nil { + if err := checkDone(ctx); err != nil { return err } diff --git a/go/vt/worker/vertical_split_diff.go b/go/vt/worker/vertical_split_diff.go index 76b842cef5..72999fc7d4 100644 --- a/go/vt/worker/vertical_split_diff.go +++ b/go/vt/worker/vertical_split_diff.go @@ -119,7 +119,7 @@ func (vsdw *VerticalSplitDiffWorker) run(ctx context.Context) error { if err := vsdw.init(); err != nil { return fmt.Errorf("init() failed: %v", err) } - if err := checkInterrupted(ctx); err != nil { + if err := checkDone(ctx); err != nil { return err } @@ -127,7 +127,7 @@ func (vsdw *VerticalSplitDiffWorker) run(ctx context.Context) error { if err := vsdw.findTargets(ctx); err != nil { return fmt.Errorf("findTargets() failed: %v", err) } - if err := checkInterrupted(ctx); err != nil { + if err := checkDone(ctx); err != nil { return err } @@ -135,7 +135,7 @@ func (vsdw *VerticalSplitDiffWorker) run(ctx context.Context) error { if err := vsdw.synchronizeReplication(ctx); err != nil { return fmt.Errorf("synchronizeReplication() failed: %v", err) } - if err := checkInterrupted(ctx); err != nil { + if err := checkDone(ctx); err != nil { return err } diff --git a/go/vt/worker/worker.go b/go/vt/worker/worker.go index 3bc675cae2..b3909e47f0 100644 --- a/go/vt/worker/worker.go +++ b/go/vt/worker/worker.go @@ -66,8 +66,8 @@ func resetVars() { statsRetryCounters.Reset() } -// checkInterrupted returns ctx.Err() iff ctx.Done() -func checkInterrupted(ctx context.Context) error { +// checkDone returns ctx.Err() iff ctx.Done() +func checkDone(ctx context.Context) error { select { case <-ctx.Done(): return ctx.Err() From b53f7e508def88a9acf0cde705917542f87ec524 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Tue, 19 May 2015 11:50:14 -0700 Subject: [PATCH 046/128] Adding fast fail when one routine reports an error. --- go/vt/worker/split_clone.go | 9 +++++++++ go/vt/worker/vertical_split_clone.go | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/go/vt/worker/split_clone.go b/go/vt/worker/split_clone.go index 144a2759a9..9ca90e4333 100644 --- a/go/vt/worker/split_clone.go +++ b/go/vt/worker/split_clone.go @@ -438,6 +438,11 @@ func (scw *SplitCloneWorker) copy(ctx context.Context) error { } mu.Unlock() } + shouldStop := func() bool { + mu.Lock() + defer mu.Unlock() + return firstError != nil + } insertChannels := make([]chan string, len(scw.destinationShards)) destinationWaitGroup := sync.WaitGroup{} @@ -488,6 +493,10 @@ func (scw *SplitCloneWorker) copy(ctx context.Context) error { sema.Acquire() defer sema.Release() + if shouldStop() { + return + } + scw.tableStatus[tableIndex].threadStarted() // build the query, and start the streaming diff --git a/go/vt/worker/vertical_split_clone.go b/go/vt/worker/vertical_split_clone.go index 2eba8436d5..13ac4dda17 100644 --- a/go/vt/worker/vertical_split_clone.go +++ b/go/vt/worker/vertical_split_clone.go @@ -377,6 +377,11 @@ func (vscw *VerticalSplitCloneWorker) copy(ctx context.Context) error { } mu.Unlock() } + shouldStop := func() bool { + mu.Lock() + defer mu.Unlock() + return firstError != nil + } destinationWaitGroup := sync.WaitGroup{} @@ -422,6 +427,10 @@ func (vscw *VerticalSplitCloneWorker) copy(ctx context.Context) error { sema.Acquire() defer sema.Release() + if shouldStop() { + return + } + vscw.tableStatus[tableIndex].threadStarted() // build the query, and start the streaming From fec083cfcedef0c38c2c5b42019b7b703e3f1196 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Tue, 19 May 2015 13:50:41 -0700 Subject: [PATCH 047/128] Add vtctl ListBackups and RemoveBackup commands, and tests. --- go/cmd/vtctl/plugin_filebackupstorage.go | 13 +++++ go/cmd/vtctl/vtctl.go | 61 ++++---------------- go/vt/vtctl/backup.go | 72 ++++++++++++++++++++++++ test/backup.py | 26 ++++++++- test/tablet.py | 9 +-- 5 files changed, 124 insertions(+), 57 deletions(-) create mode 100644 go/cmd/vtctl/plugin_filebackupstorage.go create mode 100644 go/vt/vtctl/backup.go diff --git a/go/cmd/vtctl/plugin_filebackupstorage.go b/go/cmd/vtctl/plugin_filebackupstorage.go new file mode 100644 index 0000000000..fedb3f853f --- /dev/null +++ b/go/cmd/vtctl/plugin_filebackupstorage.go @@ -0,0 +1,13 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "github.com/youtube/vitess/go/vt/mysqlctl/backupstorage" + +func init() { + initFuncs = append(initFuncs, func() { + backupstorage.RegisterFileBackupStorage() + }) +} diff --git a/go/cmd/vtctl/vtctl.go b/go/cmd/vtctl/vtctl.go index de150fe4cb..ce0e7c79e1 100644 --- a/go/cmd/vtctl/vtctl.go +++ b/go/cmd/vtctl/vtctl.go @@ -10,7 +10,6 @@ import ( "log/syslog" "os" "os/signal" - "sort" "strings" "syscall" "time" @@ -18,7 +17,6 @@ import ( log "github.com/golang/glog" "github.com/youtube/vitess/go/exit" "github.com/youtube/vitess/go/vt/logutil" - myproto "github.com/youtube/vitess/go/vt/mysqlctl/proto" "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vtctl" @@ -54,6 +52,12 @@ func installSignalHandlers(cancel func()) { }() } +// hooks to register plug-ins after flag init + +type initFunc func() + +var initFuncs []initFunc + func main() { defer exit.RecoverAll() defer logutil.Flush() @@ -81,6 +85,10 @@ func main() { wr := wrangler.New(logutil.NewConsoleLogger(), topoServer, tmclient.NewTabletManagerClient(), *lockWaitTimeout) installSignalHandlers(cancel) + for _, f := range initFuncs { + f() + } + err := vtctl.RunCommand(ctx, wr, args) cancel() switch err { @@ -94,52 +102,3 @@ func main() { exit.Return(255) } } - -type rTablet struct { - *topo.TabletInfo - *myproto.ReplicationStatus -} - -type rTablets []*rTablet - -func (rts rTablets) Len() int { return len(rts) } - -func (rts rTablets) Swap(i, j int) { rts[i], rts[j] = rts[j], rts[i] } - -// Sort for tablet replication. -// master first, then i/o position, then sql position -func (rts rTablets) Less(i, j int) bool { - // NOTE: Swap order of unpack to reverse sort - l, r := rts[j], rts[i] - // l or r ReplicationPosition would be nil if we failed to get - // the position (put them at the beginning of the list) - if l.ReplicationStatus == nil { - return r.ReplicationStatus != nil - } - if r.ReplicationStatus == nil { - return false - } - var lTypeMaster, rTypeMaster int - if l.Type == topo.TYPE_MASTER { - lTypeMaster = 1 - } - if r.Type == topo.TYPE_MASTER { - rTypeMaster = 1 - } - if lTypeMaster < rTypeMaster { - return true - } - if lTypeMaster == rTypeMaster { - return !l.Position.AtLeast(r.Position) - } - return false -} - -func sortReplicatingTablets(tablets []*topo.TabletInfo, stats []*myproto.ReplicationStatus) []*rTablet { - rtablets := make([]*rTablet, len(tablets)) - for i, status := range stats { - rtablets[i] = &rTablet{tablets[i], status} - } - sort.Sort(rTablets(rtablets)) - return rtablets -} diff --git a/go/vt/vtctl/backup.go b/go/vt/vtctl/backup.go new file mode 100644 index 0000000000..a3a1bc7771 --- /dev/null +++ b/go/vt/vtctl/backup.go @@ -0,0 +1,72 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vtctl + +import ( + "flag" + "fmt" + + "github.com/youtube/vitess/go/vt/mysqlctl/backupstorage" + "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/wrangler" + "golang.org/x/net/context" +) + +func init() { + addCommand("Shards", command{ + "ListBackups", + commandListBackups, + "", + "Lists all the backups for a shard."}) + addCommand("Shards", command{ + "RemoveBackup", + commandRemoveBackup, + " ", + "Removes a backup for the BackupStorage."}) +} + +func commandListBackups(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { + if err := subFlags.Parse(args); err != nil { + return err + } + if subFlags.NArg() != 1 { + return fmt.Errorf("action ListBackups requires ") + } + + keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) + if err != nil { + return err + } + bucket := fmt.Sprintf("%v/%v", keyspace, shard) + + bs := backupstorage.GetBackupStorage() + bhs, err := bs.ListBackups(bucket) + if err != nil { + return err + } + for _, bh := range bhs { + wr.Logger().Printf("%v\n", bh.Name()) + } + return nil +} + +func commandRemoveBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { + if err := subFlags.Parse(args); err != nil { + return err + } + if subFlags.NArg() != 2 { + return fmt.Errorf("action RemoveBackup requires ") + } + + keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) + if err != nil { + return err + } + bucket := fmt.Sprintf("%v/%v", keyspace, shard) + name := subFlags.Arg(1) + + bs := backupstorage.GetBackupStorage() + return bs.RemoveBackup(bucket, name) +} diff --git a/test/backup.py b/test/backup.py index ecfb2be725..056d682281 100755 --- a/test/backup.py +++ b/test/backup.py @@ -98,6 +98,7 @@ class TestBackup(unittest.TestCase): - insert more data on the master - bring up tablet_replica2 after the fact, let it restore the backup - check all data is right (before+after backup data) + - list the backup, remove it """ for t in tablet_master, tablet_replica1: t.create_db('vt_test_keyspace') @@ -130,8 +131,7 @@ class TestBackup(unittest.TestCase): target_tablet_type='replica', init_keyspace='test_keyspace', init_shard='0', - supports_backups=True, - extra_args=['-restore_from_backup']) + supports_backups=True) # check the new slave has the data timeout = 10 @@ -141,6 +141,28 @@ class TestBackup(unittest.TestCase): break timeout = utils.wait_step('new slave tablet getting data', timeout) + # list the backups + backups, err = utils.run_vtctl(tablet.get_backup_storage_flags() + + ['ListBackups', 'test_keyspace/0'], + mode=utils.VTCTL_VTCTL, trap_output=True) + backups = backups.splitlines() + logging.debug("list of backups: %s", backups) + self.assertEqual(len(backups), 1) + self.assertTrue(backups[0].startswith(tablet_replica1.tablet_alias)) + + # remove the backup + utils.run_vtctl(tablet.get_backup_storage_flags() + + ['RemoveBackup', 'test_keyspace/0', backups[0]], + auto_log=True, mode=utils.VTCTL_VTCTL) + + # make sure the list of backups is empty now + backups, err = utils.run_vtctl(tablet.get_backup_storage_flags() + + ['ListBackups', 'test_keyspace/0'], + mode=utils.VTCTL_VTCTL, trap_output=True) + backups = backups.splitlines() + logging.debug("list of backups after remove: %s", backups) + self.assertEqual(len(backups), 0) + for t in tablet_master, tablet_replica1, tablet_replica2: t.kill_vttablet() diff --git a/test/tablet.py b/test/tablet.py index 4157726d78..4cb2621062 100644 --- a/test/tablet.py +++ b/test/tablet.py @@ -26,6 +26,10 @@ tablet_cell_map = { 31981: 'ny', } +def get_backup_storage_flags(): + return ['-backup_storage_implementation', 'file', + '-file_backup_storage_root', + os.path.join(environment.tmproot, 'backupstorage')] def get_all_extra_my_cnf(extra_my_cnf): all_extra_my_cnf = [environment.vttop + "/config/mycnf/default-fast.cnf"] @@ -487,10 +491,7 @@ class Tablet(object): self.dbname = 'vt_' + init_keyspace if supports_backups: - args.extend(['-restore_from_backup', - '-backup_storage_implementation', 'file', - '-file_backup_storage_root', - os.path.join(environment.tmproot, 'backupstorage')]) + args.extend(['-restore_from_backup'] + get_backup_storage_flags()) if extra_args: args.extend(extra_args) From d5c0a692bae8cda8f1228d80ba6408e7ac33ac2c Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Tue, 19 May 2015 14:31:54 -0700 Subject: [PATCH 048/128] Making restore code only use MysqlDaemon. (for easier soon-to-be unit testing) --- go/vt/mysqlctl/backup.go | 20 ++++---- go/vt/mysqlctl/clone.go | 4 +- go/vt/mysqlctl/mysql_daemon.go | 67 ++++++++++++++++++++++++- go/vt/mysqlctl/mysql_flavor.go | 2 +- go/vt/mysqlctl/mysql_flavor_mariadb.go | 4 +- go/vt/mysqlctl/mysql_flavor_mysql56.go | 4 +- go/vt/mysqlctl/permissions.go | 7 +-- go/vt/mysqlctl/query.go | 6 +-- go/vt/mysqlctl/reparent.go | 2 +- go/vt/mysqlctl/replication.go | 8 +-- go/vt/mysqlctl/schema.go | 6 +-- go/vt/tabletmanager/init_tablet_test.go | 2 +- go/vt/tabletmanager/restore.go | 2 +- go/vt/wrangler/testlib/fake_tablet.go | 2 +- 14 files changed, 101 insertions(+), 35 deletions(-) diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index 2e03a5c716..08e37d0022 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -266,7 +266,7 @@ func (mysqld *Mysqld) backup(logger logutil.Logger, bh backupstorage.BackupHandl } // get the files to backup - fes, err := findFilesTobackup(mysqld.config, logger) + fes, err := findFilesTobackup(mysqld.Cnf(), logger) if err != nil { return fmt.Errorf("cannot find files to backup: %v", err) } @@ -323,7 +323,7 @@ func (mysqld *Mysqld) backupFiles(logger logutil.Logger, bh backupstorage.Backup } // open the source file for reading - source, err := fe.open(mysqld.config, true) + source, err := fe.open(mysqld.Cnf(), true) if err != nil { rec.RecordError(err) return @@ -400,8 +400,8 @@ func (mysqld *Mysqld) backupFiles(logger logutil.Logger, bh backupstorage.Backup // checkNoDB makes sure there is no vt_ db already there. Used by Restore, // we do not wnat to destroy an existing DB. -func (mysqld *Mysqld) checkNoDB() error { - qr, err := mysqld.fetchSuperQuery("SHOW DATABASES") +func checkNoDB(mysqld MysqlDaemon) error { + qr, err := mysqld.FetchSuperQuery("SHOW DATABASES") if err != nil { return fmt.Errorf("checkNoDB failed: %v", err) } @@ -409,7 +409,7 @@ func (mysqld *Mysqld) checkNoDB() error { for _, row := range qr.Rows { if strings.HasPrefix(row[0].String(), "vt_") { dbName := row[0].String() - tableQr, err := mysqld.fetchSuperQuery("SHOW TABLES FROM " + dbName) + tableQr, err := mysqld.FetchSuperQuery("SHOW TABLES FROM " + dbName) if err != nil { return fmt.Errorf("checkNoDB failed: %v", err) } else if len(tableQr.Rows) == 0 { @@ -425,7 +425,7 @@ func (mysqld *Mysqld) checkNoDB() error { // restoreFiles will copy all the files from the BackupStorage to the // right place -func (mysqld *Mysqld) restoreFiles(bh backupstorage.BackupHandle, fes []FileEntry, restoreConcurrency int) error { +func restoreFiles(cnf *Mycnf, bh backupstorage.BackupHandle, fes []FileEntry, restoreConcurrency int) error { sema := sync2.NewSemaphore(restoreConcurrency, 0) rec := concurrency.AllErrorRecorder{} wg := sync.WaitGroup{} @@ -452,7 +452,7 @@ func (mysqld *Mysqld) restoreFiles(bh backupstorage.BackupHandle, fes []FileEntr defer source.Close() // open the destination file for writing - dstFile, err := fe.open(mysqld.config, false) + dstFile, err := fe.open(cnf, false) if err != nil { rec.RecordError(err) return @@ -501,7 +501,7 @@ func (mysqld *Mysqld) restoreFiles(bh backupstorage.BackupHandle, fes []FileEntr // Restore is the main entry point for backup restore. If there is no // appropriate backup on the BackupStorage, Restore logs an error // and returns ErrNoBackup. Any other error is returned. -func (mysqld *Mysqld) Restore(bucket string, restoreConcurrency int, hookExtraEnv map[string]string) (proto.ReplicationPosition, error) { +func Restore(mysqld MysqlDaemon, bucket string, restoreConcurrency int, hookExtraEnv map[string]string) (proto.ReplicationPosition, error) { // find the right backup handle: most recent one, with a MANIFEST log.Infof("Restore: looking for a suitable backup to restore") bs := backupstorage.GetBackupStorage() @@ -535,7 +535,7 @@ func (mysqld *Mysqld) Restore(bucket string, restoreConcurrency int, hookExtraEn } log.Infof("Restore: checking no existing data is present") - if err := mysqld.checkNoDB(); err != nil { + if err := checkNoDB(mysqld); err != nil { return proto.ReplicationPosition{}, err } @@ -545,7 +545,7 @@ func (mysqld *Mysqld) Restore(bucket string, restoreConcurrency int, hookExtraEn } log.Infof("Restore: copying all files") - if err := mysqld.restoreFiles(bh, bm.FileEntries, restoreConcurrency); err != nil { + if err := restoreFiles(mysqld.Cnf(), bh, bm.FileEntries, restoreConcurrency); err != nil { return proto.ReplicationPosition{}, err } diff --git a/go/vt/mysqlctl/clone.go b/go/vt/mysqlctl/clone.go index 4aff118eba..cff85a399b 100644 --- a/go/vt/mysqlctl/clone.go +++ b/go/vt/mysqlctl/clone.go @@ -92,7 +92,7 @@ func (mysqld *Mysqld) ValidateCloneTarget(hookExtraEnv map[string]string) error return err } - qr, err := mysqld.fetchSuperQuery("SHOW DATABASES") + qr, err := mysqld.FetchSuperQuery("SHOW DATABASES") if err != nil { return fmt.Errorf("mysqlctl: ValidateCloneTarget failed, %v", err) } @@ -100,7 +100,7 @@ func (mysqld *Mysqld) ValidateCloneTarget(hookExtraEnv map[string]string) error for _, row := range qr.Rows { if strings.HasPrefix(row[0].String(), "vt_") { dbName := row[0].String() - tableQr, err := mysqld.fetchSuperQuery("SHOW TABLES FROM " + dbName) + tableQr, err := mysqld.FetchSuperQuery("SHOW TABLES FROM " + dbName) if err != nil { return fmt.Errorf("mysqlctl: ValidateCloneTarget failed, %v", err) } else if len(tableQr.Rows) == 0 { diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index e365b69e0c..b07613f0fb 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -10,6 +10,7 @@ import ( "strings" "time" + mproto "github.com/youtube/vitess/go/mysql/proto" "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/vt/dbconnpool" @@ -19,6 +20,13 @@ import ( // MysqlDaemon is the interface we use for abstracting Mysqld. type MysqlDaemon interface { + // Cnf returns the underlying mycnf + Cnf() *Mycnf + + // methods related to mysql running or not + Start(mysqlWaitTime time.Duration) error + Shutdown(waitForMysqld bool, mysqlWaitTime time.Duration) error + // GetMasterAddr returns the mysql master address, as shown by // 'show slave status'. GetMasterAddr() (string, error) @@ -55,13 +63,23 @@ type MysqlDaemon interface { GetAppConnection() (dbconnpool.PoolConnection, error) // GetDbaConnection returns a dba connection. GetDbaConnection() (*dbconnpool.DBConnection, error) - // query execution methods + + // ExecuteSuperQueryList executes a list of queries, no result ExecuteSuperQueryList(queryList []string) error + + // FetchSuperQuery executes one query, returns the result + FetchSuperQuery(query string) (*mproto.QueryResult, error) } // FakeMysqlDaemon implements MysqlDaemon and allows the user to fake // everything. type FakeMysqlDaemon struct { + // Mycnf will be returned by Cnf() + Mycnf *Mycnf + + // Running is used by Start / Shutdown + Running bool + // MasterAddr will be returned by GetMasterAddr(). Set to "" to return // ErrNotSlave, or to "ERROR" to return an error. MasterAddr string @@ -143,6 +161,40 @@ type FakeMysqlDaemon struct { // ExpectedExecuteSuperQueryCurrent is the current index of the queries // we expect ExpectedExecuteSuperQueryCurrent int + + // FetchSuperQueryResults is used by FetchSuperQuery + FetchSuperQueryMap map[string]*mproto.QueryResult +} + +// NewFakeMysqlDaemon returns a FakeMysqlDaemon where mysqld appears +// to be running +func NewFakeMysqlDaemon() *FakeMysqlDaemon { + return &FakeMysqlDaemon{ + Running: true, + } +} + +// Cnf is part of the MysqlDaemon interface +func (fmd *FakeMysqlDaemon) Cnf() *Mycnf { + return fmd.Mycnf +} + +// Start is part of the MysqlDaemon interface +func (fmd *FakeMysqlDaemon) Start(mysqlWaitTime time.Duration) error { + if fmd.Running { + return fmt.Errorf("fake mysql daemon already running") + } + fmd.Running = true + return nil +} + +// Shutdown is part of the MysqlDaemon interface +func (fmd *FakeMysqlDaemon) Shutdown(waitForMysqld bool, mysqlWaitTime time.Duration) error { + if !fmd.Running { + return fmt.Errorf("fake mysql daemon not running") + } + fmd.Running = false + return nil } // GetMasterAddr is part of the MysqlDaemon interface @@ -266,6 +318,19 @@ func (fmd *FakeMysqlDaemon) ExecuteSuperQueryList(queryList []string) error { return nil } +// FetchSuperQuery returns the results from the map, if any +func (fmd *FakeMysqlDaemon) FetchSuperQuery(query string) (*mproto.QueryResult, error) { + if fmd.FetchSuperQueryMap == nil { + return nil, fmt.Errorf("unexpected query: %v", query) + } + + qr, ok := fmd.FetchSuperQueryMap[query] + if !ok { + return nil, fmt.Errorf("unexpected query: %v", query) + } + return qr, nil +} + // CheckSuperQueryList returns an error if all the queries we expected // haven't been seen. func (fmd *FakeMysqlDaemon) CheckSuperQueryList() error { diff --git a/go/vt/mysqlctl/mysql_flavor.go b/go/vt/mysqlctl/mysql_flavor.go index e7f7248822..5c47d15adf 100644 --- a/go/vt/mysqlctl/mysql_flavor.go +++ b/go/vt/mysqlctl/mysql_flavor.go @@ -120,7 +120,7 @@ func (mysqld *Mysqld) detectFlavor() (MysqlFlavor, error) { // If no environment variable set, fall back to auto-detect. log.Infof("MYSQL_FLAVOR empty or unset, attempting to auto-detect...") - qr, err := mysqld.fetchSuperQuery("SELECT VERSION()") + qr, err := mysqld.FetchSuperQuery("SELECT VERSION()") if err != nil { return nil, fmt.Errorf("couldn't SELECT VERSION(): %v", err) } diff --git a/go/vt/mysqlctl/mysql_flavor_mariadb.go b/go/vt/mysqlctl/mysql_flavor_mariadb.go index f54b6f4225..013b5f1a41 100644 --- a/go/vt/mysqlctl/mysql_flavor_mariadb.go +++ b/go/vt/mysqlctl/mysql_flavor_mariadb.go @@ -29,7 +29,7 @@ func (*mariaDB10) VersionMatch(version string) bool { // MasterPosition implements MysqlFlavor.MasterPosition(). func (flavor *mariaDB10) MasterPosition(mysqld *Mysqld) (rp proto.ReplicationPosition, err error) { - qr, err := mysqld.fetchSuperQuery("SELECT @@GLOBAL.gtid_binlog_pos") + qr, err := mysqld.FetchSuperQuery("SELECT @@GLOBAL.gtid_binlog_pos") if err != nil { return rp, err } @@ -69,7 +69,7 @@ func (*mariaDB10) WaitMasterPos(mysqld *Mysqld, targetPos proto.ReplicationPosit } log.Infof("Waiting for minimum replication position with query: %v", query) - qr, err := mysqld.fetchSuperQuery(query) + qr, err := mysqld.FetchSuperQuery(query) if err != nil { return fmt.Errorf("MASTER_GTID_WAIT() failed: %v", err) } diff --git a/go/vt/mysqlctl/mysql_flavor_mysql56.go b/go/vt/mysqlctl/mysql_flavor_mysql56.go index b2e2df579f..780cd2d7bc 100644 --- a/go/vt/mysqlctl/mysql_flavor_mysql56.go +++ b/go/vt/mysqlctl/mysql_flavor_mysql56.go @@ -30,7 +30,7 @@ func (*mysql56) VersionMatch(version string) bool { // MasterPosition implements MysqlFlavor.MasterPosition(). func (flavor *mysql56) MasterPosition(mysqld *Mysqld) (rp proto.ReplicationPosition, err error) { - qr, err := mysqld.fetchSuperQuery("SELECT @@GLOBAL.gtid_executed") + qr, err := mysqld.FetchSuperQuery("SELECT @@GLOBAL.gtid_executed") if err != nil { return rp, err } @@ -62,7 +62,7 @@ func (*mysql56) WaitMasterPos(mysqld *Mysqld, targetPos proto.ReplicationPositio query = fmt.Sprintf("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s', %v)", targetPos, int(waitTimeout.Seconds())) log.Infof("Waiting for minimum replication position with query: %v", query) - qr, err := mysqld.fetchSuperQuery(query) + qr, err := mysqld.FetchSuperQuery(query) if err != nil { return fmt.Errorf("WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS() failed: %v", err) } diff --git a/go/vt/mysqlctl/permissions.go b/go/vt/mysqlctl/permissions.go index 2f507663ff..1467b361d2 100644 --- a/go/vt/mysqlctl/permissions.go +++ b/go/vt/mysqlctl/permissions.go @@ -8,11 +8,12 @@ import ( "github.com/youtube/vitess/go/vt/mysqlctl/proto" ) +// GetPermissions lists the permissions on the mysqld func (mysqld *Mysqld) GetPermissions() (*proto.Permissions, error) { permissions := &proto.Permissions{} // get Users - qr, err := mysqld.fetchSuperQuery("SELECT * FROM mysql.user") + qr, err := mysqld.FetchSuperQuery("SELECT * FROM mysql.user") if err != nil { return nil, err } @@ -21,7 +22,7 @@ func (mysqld *Mysqld) GetPermissions() (*proto.Permissions, error) { } // get Dbs - qr, err = mysqld.fetchSuperQuery("SELECT * FROM mysql.db") + qr, err = mysqld.FetchSuperQuery("SELECT * FROM mysql.db") if err != nil { return nil, err } @@ -30,7 +31,7 @@ func (mysqld *Mysqld) GetPermissions() (*proto.Permissions, error) { } // get Hosts - qr, err = mysqld.fetchSuperQuery("SELECT * FROM mysql.host") + qr, err = mysqld.FetchSuperQuery("SELECT * FROM mysql.host") if err != nil { return nil, err } diff --git a/go/vt/mysqlctl/query.go b/go/vt/mysqlctl/query.go index ef12065f7b..21e21f6fd9 100644 --- a/go/vt/mysqlctl/query.go +++ b/go/vt/mysqlctl/query.go @@ -33,8 +33,8 @@ func (mysqld *Mysqld) ExecuteSuperQueryList(queryList []string) error { return nil } -// fetchSuperQuery returns the results of executing a query as a super user. -func (mysqld *Mysqld) fetchSuperQuery(query string) (*mproto.QueryResult, error) { +// FetchSuperQuery returns the results of executing a query as a super user. +func (mysqld *Mysqld) FetchSuperQuery(query string) (*mproto.QueryResult, error) { conn, connErr := mysqld.dbaPool.Get(0) if connErr != nil { return nil, connErr @@ -51,7 +51,7 @@ func (mysqld *Mysqld) fetchSuperQuery(query string) (*mproto.QueryResult, error) // fetchSuperQueryMap returns a map from column names to cell data for a query // that should return exactly 1 row. func (mysqld *Mysqld) fetchSuperQueryMap(query string) (map[string]string, error) { - qr, err := mysqld.fetchSuperQuery(query) + qr, err := mysqld.FetchSuperQuery(query) if err != nil { return nil, err } diff --git a/go/vt/mysqlctl/reparent.go b/go/vt/mysqlctl/reparent.go index 4735f6563d..268d6b3c22 100644 --- a/go/vt/mysqlctl/reparent.go +++ b/go/vt/mysqlctl/reparent.go @@ -52,7 +52,7 @@ func queryReparentJournal(timeCreatedNS int64) string { // the row in the reparent_journal table. func (mysqld *Mysqld) WaitForReparentJournal(ctx context.Context, timeCreatedNS int64) error { for { - qr, err := mysqld.fetchSuperQuery(queryReparentJournal(timeCreatedNS)) + qr, err := mysqld.FetchSuperQuery(queryReparentJournal(timeCreatedNS)) if err == nil && len(qr.Rows) == 1 { // we have the row, we're done return nil diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index 9a25a4e0bd..b553b54251 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -150,7 +150,7 @@ func (mysqld *Mysqld) GetMasterAddr() (string, error) { // GetMysqlPort returns mysql port func (mysqld *Mysqld) GetMysqlPort() (int, error) { - qr, err := mysqld.fetchSuperQuery("SHOW VARIABLES LIKE 'port'") + qr, err := mysqld.FetchSuperQuery("SHOW VARIABLES LIKE 'port'") if err != nil { return 0, err } @@ -166,7 +166,7 @@ func (mysqld *Mysqld) GetMysqlPort() (int, error) { // IsReadOnly return true if the instance is read only func (mysqld *Mysqld) IsReadOnly() (bool, error) { - qr, err := mysqld.fetchSuperQuery("SHOW VARIABLES LIKE 'read_only'") + qr, err := mysqld.FetchSuperQuery("SHOW VARIABLES LIKE 'read_only'") if err != nil { return true, err } @@ -325,7 +325,7 @@ const ( // FindSlaves gets IP addresses for all currently connected slaves. func (mysqld *Mysqld) FindSlaves() ([]string, error) { - qr, err := mysqld.fetchSuperQuery("SHOW PROCESSLIST") + qr, err := mysqld.FetchSuperQuery("SHOW PROCESSLIST") if err != nil { return nil, err } @@ -370,7 +370,7 @@ func (mysqld *Mysqld) WaitBlpPosition(bp *blproto.BlpPosition, waitTimeout time. } cmd := binlogplayer.QueryBlpCheckpoint(bp.Uid) - qr, err := mysqld.fetchSuperQuery(cmd) + qr, err := mysqld.FetchSuperQuery(cmd) if err != nil { return err } diff --git a/go/vt/mysqlctl/schema.go b/go/vt/mysqlctl/schema.go index b2f2bea988..9b36749315 100644 --- a/go/vt/mysqlctl/schema.go +++ b/go/vt/mysqlctl/schema.go @@ -21,7 +21,7 @@ func (mysqld *Mysqld) GetSchema(dbName string, tables, excludeTables []string, i sd := &proto.SchemaDefinition{} // get the database creation command - qr, fetchErr := mysqld.fetchSuperQuery("SHOW CREATE DATABASE " + dbName) + qr, fetchErr := mysqld.FetchSuperQuery("SHOW CREATE DATABASE " + dbName) if fetchErr != nil { return nil, fetchErr } @@ -35,7 +35,7 @@ func (mysqld *Mysqld) GetSchema(dbName string, tables, excludeTables []string, i if !includeViews { sql += " AND table_type = '" + proto.TableBaseTable + "'" } - qr, err := mysqld.fetchSuperQuery(sql) + qr, err := mysqld.FetchSuperQuery(sql) if err != nil { return nil, err } @@ -67,7 +67,7 @@ func (mysqld *Mysqld) GetSchema(dbName string, tables, excludeTables []string, i } } - qr, fetchErr := mysqld.fetchSuperQuery("SHOW CREATE TABLE " + dbName + "." + tableName) + qr, fetchErr := mysqld.FetchSuperQuery("SHOW CREATE TABLE " + dbName + "." + tableName) if fetchErr != nil { return nil, fetchErr } diff --git a/go/vt/tabletmanager/init_tablet_test.go b/go/vt/tabletmanager/init_tablet_test.go index e08525ffe9..402b712360 100644 --- a/go/vt/tabletmanager/init_tablet_test.go +++ b/go/vt/tabletmanager/init_tablet_test.go @@ -31,7 +31,7 @@ func TestInitTablet(t *testing.T) { // start with idle, and a tablet record that doesn't exist port := 1234 securePort := 2345 - mysqlDaemon := &mysqlctl.FakeMysqlDaemon{} + mysqlDaemon := mysqlctl.NewFakeMysqlDaemon() agent := &ActionAgent{ TopoServer: ts, TabletAlias: tabletAlias, diff --git a/go/vt/tabletmanager/restore.go b/go/vt/tabletmanager/restore.go index 539ec4e5d0..1038a6b193 100644 --- a/go/vt/tabletmanager/restore.go +++ b/go/vt/tabletmanager/restore.go @@ -44,7 +44,7 @@ func (agent *ActionAgent) restoreFromBackup() { // do the optional restore, if that fails we are in a bad state, // just log.Fatalf out. bucket := fmt.Sprintf("%v/%v", tablet.Keyspace, tablet.Shard) - pos, err := agent.Mysqld.Restore(bucket, *restoreConcurrency, agent.hookExtraEnv()) + pos, err := mysqlctl.Restore(agent.Mysqld, bucket, *restoreConcurrency, agent.hookExtraEnv()) if err != nil && err != mysqlctl.ErrNoBackup { log.Fatalf("Cannot restore original backup: %v", err) } diff --git a/go/vt/wrangler/testlib/fake_tablet.go b/go/vt/wrangler/testlib/fake_tablet.go index fb997f4f0c..63ed1a1834 100644 --- a/go/vt/wrangler/testlib/fake_tablet.go +++ b/go/vt/wrangler/testlib/fake_tablet.go @@ -110,7 +110,7 @@ func NewFakeTablet(t *testing.T, wr *wrangler.Wrangler, cell string, uid uint32, } // create a FakeMysqlDaemon with the right information by default - fakeMysqlDaemon := &mysqlctl.FakeMysqlDaemon{} + fakeMysqlDaemon := mysqlctl.NewFakeMysqlDaemon() if ok { fakeMysqlDaemon.MasterAddr = fmt.Sprintf("%v.0.0.1:%v", 100+puid, 3300+puid) } From b047c8c87fe333a063ed15f66ca8a4eb34e7b069 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Tue, 19 May 2015 14:45:12 -0700 Subject: [PATCH 049/128] Make mysqlctl.Backup work on MysqlDaemon. (unit tests to follow). --- go/vt/mysqlctl/backup.go | 12 ++++++------ go/vt/mysqlctl/clone.go | 4 ++-- go/vt/mysqlctl/mysql_daemon.go | 6 ++++++ go/vt/mysqlctl/replication.go | 5 +++-- go/vt/tabletmanager/agent_rpc_actions.go | 2 +- 5 files changed, 18 insertions(+), 11 deletions(-) diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index 08e37d0022..2404f5bba9 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -188,7 +188,7 @@ func findFilesTobackup(cnf *Mycnf, logger logutil.Logger) ([]FileEntry, error) { // - uses the BackupStorage service to store a new backup // - shuts down Mysqld during the backup // - remember if we were replicating, restore the exact same state -func (mysqld *Mysqld) Backup(logger logutil.Logger, bucket, name string, backupConcurrency int, hookExtraEnv map[string]string) error { +func Backup(mysqld MysqlDaemon, logger logutil.Logger, bucket, name string, backupConcurrency int, hookExtraEnv map[string]string) error { // start the backup with the BackupStorage bs := backupstorage.GetBackupStorage() @@ -197,7 +197,7 @@ func (mysqld *Mysqld) Backup(logger logutil.Logger, bucket, name string, backupC return fmt.Errorf("StartBackup failed: %v", err) } - if err = mysqld.backup(logger, bh, backupConcurrency, hookExtraEnv); err != nil { + if err = backup(mysqld, logger, bh, backupConcurrency, hookExtraEnv); err != nil { if err := bh.AbortBackup(); err != nil { logger.Errorf("failed to abort backup: %v", err) } @@ -208,7 +208,7 @@ func (mysqld *Mysqld) Backup(logger logutil.Logger, bucket, name string, backupC return err } -func (mysqld *Mysqld) backup(logger logutil.Logger, bh backupstorage.BackupHandle, backupConcurrency int, hookExtraEnv map[string]string) error { +func backup(mysqld MysqlDaemon, logger logutil.Logger, bh backupstorage.BackupHandle, backupConcurrency int, hookExtraEnv map[string]string) error { // save initial state so we can restore slaveStartRequired := false @@ -273,7 +273,7 @@ func (mysqld *Mysqld) backup(logger logutil.Logger, bh backupstorage.BackupHandl logger.Infof("found %v files to backup", len(fes)) // backup everything - if err := mysqld.backupFiles(logger, bh, fes, replicationPosition, backupConcurrency); err != nil { + if err := backupFiles(mysqld, logger, bh, fes, replicationPosition, backupConcurrency); err != nil { return fmt.Errorf("cannot backup files: %v", err) } @@ -290,7 +290,7 @@ func (mysqld *Mysqld) backup(logger logutil.Logger, bh backupstorage.BackupHandl } // this should be quick, but we might as well just wait - if err := mysqld.WaitForSlaveStart(slaveStartDeadline); err != nil { + if err := WaitForSlaveStart(mysqld, slaveStartDeadline); err != nil { return fmt.Errorf("slave is not restarting: %v", err) } } @@ -304,7 +304,7 @@ func (mysqld *Mysqld) backup(logger logutil.Logger, bh backupstorage.BackupHandl return nil } -func (mysqld *Mysqld) backupFiles(logger logutil.Logger, bh backupstorage.BackupHandle, fes []FileEntry, replicationPosition proto.ReplicationPosition, backupConcurrency int) error { +func backupFiles(mysqld MysqlDaemon, logger logutil.Logger, bh backupstorage.BackupHandle, fes []FileEntry, replicationPosition proto.ReplicationPosition, backupConcurrency int) error { sema := sync2.NewSemaphore(backupConcurrency, 0) rec := concurrency.AllErrorRecorder{} diff --git a/go/vt/mysqlctl/clone.go b/go/vt/mysqlctl/clone.go index cff85a399b..8231a9b21c 100644 --- a/go/vt/mysqlctl/clone.go +++ b/go/vt/mysqlctl/clone.go @@ -359,7 +359,7 @@ func (mysqld *Mysqld) SnapshotSourceEnd(slaveStartRequired, readOnly, deleteSnap } // this should be quick, but we might as well just wait - if err := mysqld.WaitForSlaveStart(slaveStartDeadline); err != nil { + if err := WaitForSlaveStart(mysqld, slaveStartDeadline); err != nil { return err } } @@ -438,7 +438,7 @@ func (mysqld *Mysqld) RestoreFromSnapshot(logger logutil.Logger, snapshotManifes } if !dontWaitForSlaveStart { - if err := mysqld.WaitForSlaveStart(slaveStartDeadline); err != nil { + if err := WaitForSlaveStart(mysqld, slaveStartDeadline); err != nil { return err } } diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index b07613f0fb..cf7776a3fb 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -40,6 +40,7 @@ type MysqlDaemon interface { // reparenting related methods ResetReplicationCommands() ([]string, error) MasterPosition() (proto.ReplicationPosition, error) + IsReadOnly() (bool, error) SetReadOnly(on bool) error StartReplicationCommands(status *proto.ReplicationStatus) ([]string, error) SetMasterCommands(masterHost string, masterPort int) ([]string, error) @@ -237,6 +238,11 @@ func (fmd *FakeMysqlDaemon) MasterPosition() (proto.ReplicationPosition, error) return fmd.CurrentMasterPosition, nil } +// IsReadOnly is part of the MysqlDaemon interface +func (fmd *FakeMysqlDaemon) IsReadOnly() (bool, error) { + return fmd.ReadOnly, nil +} + // SetReadOnly is part of the MysqlDaemon interface func (fmd *FakeMysqlDaemon) SetReadOnly(on bool) error { fmd.ReadOnly = on diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index b553b54251..123790270d 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -89,8 +89,9 @@ func parseSlaveStatus(fields map[string]string) proto.ReplicationStatus { return status } -// WaitForSlaveStart waits a slave until given deadline passed -func (mysqld *Mysqld) WaitForSlaveStart(slaveStartDeadline int) error { +// WaitForSlaveStart waits for MySQL replication to start until given +// deadline (in seconds) passed. +func WaitForSlaveStart(mysqld MysqlDaemon, slaveStartDeadline int) error { var rowMap map[string]string for slaveWait := 0; slaveWait < slaveStartDeadline; slaveWait++ { status, err := mysqld.SlaveStatus() diff --git a/go/vt/tabletmanager/agent_rpc_actions.go b/go/vt/tabletmanager/agent_rpc_actions.go index 7d28f60118..ca16f5ad4c 100644 --- a/go/vt/tabletmanager/agent_rpc_actions.go +++ b/go/vt/tabletmanager/agent_rpc_actions.go @@ -743,7 +743,7 @@ func (agent *ActionAgent) Backup(ctx context.Context, concurrency int, logger lo // now we can run the backup bucket := fmt.Sprintf("%v/%v", tablet.Keyspace, tablet.Shard) name := fmt.Sprintf("%v-%v", tablet.Alias, time.Now().Unix()) - returnErr := agent.Mysqld.Backup(l, bucket, name, concurrency, agent.hookExtraEnv()) + returnErr := mysqlctl.Backup(agent.Mysqld, l, bucket, name, concurrency, agent.hookExtraEnv()) // and change our type back to the appropriate value: // - if healthcheck is enabled, go to spare From b3428563059c5cc1a483e79b0bae3eeb4148939c Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Tue, 19 May 2015 15:04:04 -0700 Subject: [PATCH 050/128] Moving a few things to MysqlDaemon. --- go/vt/mysqlctl/mysql_daemon.go | 8 ++++++++ go/vt/mysqlctl/schema.go | 2 +- go/vt/tabletmanager/after_action.go | 3 ++- go/vt/tabletmanager/agent.go | 4 ++-- go/vt/tabletmanager/agent_rpc_actions.go | 2 +- go/vt/tabletmanager/binlog.go | 2 +- go/vt/tabletmanager/restore.go | 2 +- 7 files changed, 16 insertions(+), 7 deletions(-) diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index cf7776a3fb..b06045abe0 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -70,6 +70,10 @@ type MysqlDaemon interface { // FetchSuperQuery executes one query, returns the result FetchSuperQuery(query string) (*mproto.QueryResult, error) + + // Close will close this instance of Mysqld. It will wait for all dba + // queries to be finished. + Close() } // FakeMysqlDaemon implements MysqlDaemon and allows the user to fake @@ -337,6 +341,10 @@ func (fmd *FakeMysqlDaemon) FetchSuperQuery(query string) (*mproto.QueryResult, return qr, nil } +// Close is part of the MysqlDaemon interface +func (fmd *FakeMysqlDaemon) Close() { +} + // CheckSuperQueryList returns an error if all the queries we expected // haven't been seen. func (fmd *FakeMysqlDaemon) CheckSuperQueryList() error { diff --git a/go/vt/mysqlctl/schema.go b/go/vt/mysqlctl/schema.go index 9b36749315..453953dfdd 100644 --- a/go/vt/mysqlctl/schema.go +++ b/go/vt/mysqlctl/schema.go @@ -114,7 +114,7 @@ func (mysqld *Mysqld) GetSchema(dbName string, tables, excludeTables []string, i // ResolveTables returns a list of actual tables+views matching a list // of regexps -func (mysqld *Mysqld) ResolveTables(dbName string, tables []string) ([]string, error) { +func ResolveTables(mysqld MysqlDaemon, dbName string, tables []string) ([]string, error) { sd, err := mysqld.GetSchema(dbName, tables, nil, true) if err != nil { return nil, err diff --git a/go/vt/tabletmanager/after_action.go b/go/vt/tabletmanager/after_action.go index a6bd8aebc8..78d0662c1c 100644 --- a/go/vt/tabletmanager/after_action.go +++ b/go/vt/tabletmanager/after_action.go @@ -17,6 +17,7 @@ import ( "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/trace" "github.com/youtube/vitess/go/vt/binlog" + "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/tabletserver" "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" "github.com/youtube/vitess/go/vt/topo" @@ -106,7 +107,7 @@ func (agent *ActionAgent) loadKeyspaceAndBlacklistRules(tablet *topo.Tablet, bla blacklistRules := tabletserver.NewQueryRules() if len(blacklistedTables) > 0 { // tables, first resolve wildcards - tables, err := agent.Mysqld.ResolveTables(tablet.DbName(), blacklistedTables) + tables, err := mysqlctl.ResolveTables(agent.MysqlDaemon, tablet.DbName(), blacklistedTables) if err != nil { return err } diff --git a/go/vt/tabletmanager/agent.go b/go/vt/tabletmanager/agent.go index 0410a4d7c6..bc951fcfd3 100644 --- a/go/vt/tabletmanager/agent.go +++ b/go/vt/tabletmanager/agent.go @@ -457,8 +457,8 @@ func (agent *ActionAgent) Stop() { if agent.BinlogPlayerMap != nil { agent.BinlogPlayerMap.StopAllPlayersAndReset() } - if agent.Mysqld != nil { - agent.Mysqld.Close() + if agent.MysqlDaemon != nil { + agent.MysqlDaemon.Close() } } diff --git a/go/vt/tabletmanager/agent_rpc_actions.go b/go/vt/tabletmanager/agent_rpc_actions.go index ca16f5ad4c..0e5260bcac 100644 --- a/go/vt/tabletmanager/agent_rpc_actions.go +++ b/go/vt/tabletmanager/agent_rpc_actions.go @@ -743,7 +743,7 @@ func (agent *ActionAgent) Backup(ctx context.Context, concurrency int, logger lo // now we can run the backup bucket := fmt.Sprintf("%v/%v", tablet.Keyspace, tablet.Shard) name := fmt.Sprintf("%v-%v", tablet.Alias, time.Now().Unix()) - returnErr := mysqlctl.Backup(agent.Mysqld, l, bucket, name, concurrency, agent.hookExtraEnv()) + returnErr := mysqlctl.Backup(agent.MysqlDaemon, l, bucket, name, concurrency, agent.hookExtraEnv()) // and change our type back to the appropriate value: // - if healthcheck is enabled, go to spare diff --git a/go/vt/tabletmanager/binlog.go b/go/vt/tabletmanager/binlog.go index e45c53244c..f40b98265e 100644 --- a/go/vt/tabletmanager/binlog.go +++ b/go/vt/tabletmanager/binlog.go @@ -259,7 +259,7 @@ func (bpc *BinlogPlayerController) Iteration() (err error) { // check which kind of replication we're doing, tables or keyrange if len(bpc.sourceShard.Tables) > 0 { // tables, first resolve wildcards - tables, err := bpc.mysqld.ResolveTables(bpc.dbName, bpc.sourceShard.Tables) + tables, err := mysqlctl.ResolveTables(bpc.mysqld, bpc.dbName, bpc.sourceShard.Tables) if err != nil { return fmt.Errorf("failed to resolve table names: %v", err) } diff --git a/go/vt/tabletmanager/restore.go b/go/vt/tabletmanager/restore.go index 1038a6b193..3428df2984 100644 --- a/go/vt/tabletmanager/restore.go +++ b/go/vt/tabletmanager/restore.go @@ -44,7 +44,7 @@ func (agent *ActionAgent) restoreFromBackup() { // do the optional restore, if that fails we are in a bad state, // just log.Fatalf out. bucket := fmt.Sprintf("%v/%v", tablet.Keyspace, tablet.Shard) - pos, err := mysqlctl.Restore(agent.Mysqld, bucket, *restoreConcurrency, agent.hookExtraEnv()) + pos, err := mysqlctl.Restore(agent.MysqlDaemon, bucket, *restoreConcurrency, agent.hookExtraEnv()) if err != nil && err != mysqlctl.ErrNoBackup { log.Fatalf("Cannot restore original backup: %v", err) } From 199aa60881f9ff5d639b9ba692b345d8a9535373 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Tue, 19 May 2015 16:03:31 -0700 Subject: [PATCH 051/128] add SplitColumn field to SplitQuery 1. Add SplitColumn field to SplitQuery so that caller could hint SplitQuery endpoint which column is best for splitting query. 2. The split column must be indexed and this will be verified on the server side. 3. coding style fixes suggested by golint. --- go/vt/tabletserver/proto/structs.go | 23 +++++++-- go/vt/tabletserver/query_splitter.go | 47 ++++++++++++------- go/vt/tabletserver/query_splitter_test.go | 38 +++++++++++++-- go/vt/tabletserver/sqlquery.go | 12 ++--- .../vitess/vtgate/SplitQueryRequest.java | 14 ++++-- .../com/youtube/vitess/vtgate/VtGate.java | 4 +- .../vitess/vtgate/hadoop/VitessConf.java | 9 ++++ .../vtgate/hadoop/VitessInputFormat.java | 2 +- .../vtgate/rpcclient/gorpc/Bsonify.java | 1 + .../vitess/vtgate/integration/VtGateIT.java | 6 +-- 10 files changed, 117 insertions(+), 39 deletions(-) diff --git a/go/vt/tabletserver/proto/structs.go b/go/vt/tabletserver/proto/structs.go index c8c911cfdb..1fc4f64132 100644 --- a/go/vt/tabletserver/proto/structs.go +++ b/go/vt/tabletserver/proto/structs.go @@ -11,15 +11,20 @@ import ( mproto "github.com/youtube/vitess/go/mysql/proto" ) +// SessionParams is passed to GetSessionId. The server will +// double-check the keyspace and shard are what the tablet is serving. type SessionParams struct { Keyspace string Shard string } +// SessionInfo is returned by GetSessionId. Use the provided +// session_id in the Session object for any subsequent call. type SessionInfo struct { SessionId int64 } +// Query is the payload to Execute. type Query struct { Sql string BindVariables map[string]interface{} @@ -56,6 +61,7 @@ func slimit(s string) string { return s[:l] } +// BoundQuery is one query in a QueryList. type BoundQuery struct { Sql string BindVariables map[string]interface{} @@ -63,6 +69,7 @@ type BoundQuery struct { //go:generate bsongen -file $GOFILE -type BoundQuery -o bound_query_bson.go +// QueryList is the payload to ExecuteBatch. type QueryList struct { Queries []BoundQuery SessionId int64 @@ -71,12 +78,14 @@ type QueryList struct { //go:generate bsongen -file $GOFILE -type QueryList -o query_list_bson.go +// QueryResultList is the return type for ExecuteBatch. type QueryResultList struct { List []mproto.QueryResult } //go:generate bsongen -file $GOFILE -type QueryResultList -o query_result_list_bson.go +// Session is passed to all calls. type Session struct { SessionId int64 TransactionId int64 @@ -84,17 +93,23 @@ type Session struct { //go:generate bsongen -file $GOFILE -type Session -o session_bson.go +// TransactionInfo is returned by Begin. Use the provided +// transaction_id in the Session object for any subsequent call to be inside +// the transaction. type TransactionInfo struct { TransactionId int64 } // SplitQueryRequest represents a request to split a Query into queries that // each return a subset of the original query. -// TODO(anandhenry): Add SessionId to this struct. +// SplitColumn: preferred column to split. Server will pick a random PK column +// if this field is empty or returns an error if this field is not +// empty but not found in schema info or not be indexed. type SplitQueryRequest struct { - Query BoundQuery - SplitCount int - SessionID int64 + Query BoundQuery + SplitColumn string + SplitCount int + SessionID int64 } // QuerySplit represents a split of SplitQueryRequest.Query. RowCount is only diff --git a/go/vt/tabletserver/query_splitter.go b/go/vt/tabletserver/query_splitter.go index d375b3cdb0..b199a689e0 100644 --- a/go/vt/tabletserver/query_splitter.go +++ b/go/vt/tabletserver/query_splitter.go @@ -17,26 +17,31 @@ import ( // one primary key and the leading primary key must be numeric, see // QuerySplitter.splitBoundaries() type QuerySplitter struct { - query *proto.BoundQuery - splitCount int - schemaInfo *SchemaInfo - sel *sqlparser.Select - tableName string - pkCol string - rowCount int64 + query *proto.BoundQuery + splitCount int + schemaInfo *SchemaInfo + sel *sqlparser.Select + tableName string + splitColumn string + rowCount int64 } // NewQuerySplitter creates a new QuerySplitter. query is the original query // to split and splitCount is the desired number of splits. splitCount must // be a positive int, if not it will be set to 1. -func NewQuerySplitter(query *proto.BoundQuery, splitCount int, schemaInfo *SchemaInfo) *QuerySplitter { +func NewQuerySplitter( + query *proto.BoundQuery, + splitColumn string, + splitCount int, + schemaInfo *SchemaInfo) *QuerySplitter { if splitCount < 1 { splitCount = 1 } return &QuerySplitter{ - query: query, - splitCount: splitCount, - schemaInfo: schemaInfo, + query: query, + splitCount: splitCount, + schemaInfo: schemaInfo, + splitColumn: splitColumn, } } @@ -74,7 +79,17 @@ func (qs *QuerySplitter) validateQuery() error { if len(tableInfo.PKColumns) == 0 { return fmt.Errorf("no primary keys") } - qs.pkCol = tableInfo.GetPKColumn(0).Name + if qs.splitColumn != "" { + for _, index := range tableInfo.Indexes { + for _, column := range index.Columns { + if qs.splitColumn == column { + return nil + } + } + } + return fmt.Errorf("split column is not indexed or does not exist in table schema, SplitColumn: %s, TableInfo.Table: %v", qs.splitColumn, tableInfo.Table) + } + qs.splitColumn = tableInfo.GetPKColumn(0).Name return nil } @@ -130,9 +145,9 @@ func (qs *QuerySplitter) getWhereClause(start, end sqltypes.Value) *sqlparser.Wh return qs.sel.Where } pk := &sqlparser.ColName{ - Name: []byte(qs.pkCol), + Name: []byte(qs.splitColumn), } - // pkCol >= start + // splitColumn >= start if !start.IsNull() { startClause = &sqlparser.ComparisonExpr{ Operator: sqlparser.AST_GE, @@ -140,7 +155,7 @@ func (qs *QuerySplitter) getWhereClause(start, end sqltypes.Value) *sqlparser.Wh Right: sqlparser.NumVal((start).Raw()), } } - // pkCol < end + // splitColumn < end if !end.IsNull() { endClause = &sqlparser.ComparisonExpr{ Operator: sqlparser.AST_LT, @@ -154,7 +169,7 @@ func (qs *QuerySplitter) getWhereClause(start, end sqltypes.Value) *sqlparser.Wh if endClause == nil { clauses = startClause } else { - // pkCol >= start AND pkCol < end + // splitColumn >= start AND splitColumn < end clauses = &sqlparser.AndExpr{ Left: startClause, Right: endClause, diff --git a/go/vt/tabletserver/query_splitter_test.go b/go/vt/tabletserver/query_splitter_test.go index 0535308902..73ba784c0f 100644 --- a/go/vt/tabletserver/query_splitter_test.go +++ b/go/vt/tabletserver/query_splitter_test.go @@ -3,6 +3,7 @@ package tabletserver import ( "fmt" "reflect" + "strings" "testing" mproto "github.com/youtube/vitess/go/mysql/proto" @@ -18,8 +19,14 @@ func getSchemaInfo() *SchemaInfo { } zero, _ := sqltypes.BuildValue(0) table.AddColumn("id", "int", zero, "") + table.AddColumn("id2", "int", zero, "") table.AddColumn("count", "int", zero, "") table.PKColumns = []int{0} + primaryIndex := table.AddIndex("PRIMARY") + primaryIndex.AddColumn("id", 12345) + + id2Index := table.AddIndex("idx_id2") + id2Index.AddColumn("id2", 1234) tables := make(map[string]*TableInfo, 1) tables["test_table"] = &TableInfo{Table: table} @@ -37,7 +44,7 @@ func getSchemaInfo() *SchemaInfo { func TestValidateQuery(t *testing.T) { schemaInfo := getSchemaInfo() query := &proto.BoundQuery{} - splitter := NewQuerySplitter(query, 3, schemaInfo) + splitter := NewQuerySplitter(query, "", 3, schemaInfo) query.Sql = "delete from test_table" got := splitter.validateQuery() @@ -94,6 +101,31 @@ func TestValidateQuery(t *testing.T) { if !reflect.DeepEqual(got, want) { t.Errorf("valid query validation failed, got:%v, want:%v", got, want) } + + // column id2 is indexed + splitter = NewQuerySplitter(query, "id2", 3, schemaInfo) + query.Sql = "select * from test_table where count > :count" + got = splitter.validateQuery() + want = nil + if !reflect.DeepEqual(got, want) { + t.Errorf("valid query validation failed, got:%v, want:%v", got, want) + } + + // column does not exist + splitter = NewQuerySplitter(query, "unknown_column", 3, schemaInfo) + got = splitter.validateQuery() + wantStr := "split column is not indexed or does not exist in table schema" + if !strings.Contains(got.Error(), wantStr) { + t.Errorf("unknown table validation failed, got:%v, want:%v", got, wantStr) + } + + // column is not indexed + splitter = NewQuerySplitter(query, "count", 3, schemaInfo) + got = splitter.validateQuery() + wantStr = "split column is not indexed or does not exist in table schema" + if !strings.Contains(got.Error(), wantStr) { + t.Errorf("unknown table validation failed, got:%v, want:%v", got, wantStr) + } } func TestGetWhereClause(t *testing.T) { @@ -101,7 +133,7 @@ func TestGetWhereClause(t *testing.T) { sql := "select * from test_table where count > :count" statement, _ := sqlparser.Parse(sql) splitter.sel, _ = statement.(*sqlparser.Select) - splitter.pkCol = "id" + splitter.splitColumn = "id" // no boundary case, start = end = nil, should not change the where clause nilValue := sqltypes.Value{} @@ -238,7 +270,7 @@ func TestSplitQuery(t *testing.T) { query := &proto.BoundQuery{ Sql: "select * from test_table where count > :count", } - splitter := NewQuerySplitter(query, 3, schemaInfo) + splitter := NewQuerySplitter(query, "", 3, schemaInfo) splitter.validateQuery() min, _ := sqltypes.BuildValue(0) max, _ := sqltypes.BuildValue(300) diff --git a/go/vt/tabletserver/sqlquery.go b/go/vt/tabletserver/sqlquery.go index 0f41b32030..1980afde42 100644 --- a/go/vt/tabletserver/sqlquery.go +++ b/go/vt/tabletserver/sqlquery.go @@ -507,7 +507,7 @@ func (sq *SqlQuery) SplitQuery(ctx context.Context, req *proto.SplitQueryRequest sq.endRequest() }() - splitter := NewQuerySplitter(&(req.Query), req.SplitCount, sq.qe.schemaInfo) + splitter := NewQuerySplitter(&(req.Query), req.SplitColumn, req.SplitCount, sq.qe.schemaInfo) err = splitter.validateQuery() if err != nil { return NewTabletError(ErrFail, "splitQuery: query validation error: %s, request: %#v", err, req) @@ -520,12 +520,12 @@ func (sq *SqlQuery) SplitQuery(ctx context.Context, req *proto.SplitQueryRequest } conn := qre.getConn(sq.qe.connPool) defer conn.Recycle() - // TODO: For fetching pkMinMax, include where clauses on the + // TODO: For fetching MinMax, include where clauses on the // primary key, if any, in the original query which might give a narrower - // range of PKs to work with. - minMaxSql := fmt.Sprintf("SELECT MIN(%v), MAX(%v) FROM %v", splitter.pkCol, splitter.pkCol, splitter.tableName) - pkMinMax := qre.execSQL(conn, minMaxSql, true) - reply.Queries, err = splitter.split(pkMinMax) + // range of split column to work with. + minMaxSql := fmt.Sprintf("SELECT MIN(%v), MAX(%v) FROM %v", splitter.splitColumn, splitter.splitColumn, splitter.tableName) + splitColumnMinMax := qre.execSQL(conn, minMaxSql, true) + reply.Queries, err = splitter.split(splitColumnMinMax) if err != nil { return NewTabletError(ErrFail, "splitQuery: query split error: %s, request: %#v", err, req) } diff --git a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/SplitQueryRequest.java b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/SplitQueryRequest.java index 35b7e080f2..5147d7bb96 100644 --- a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/SplitQueryRequest.java +++ b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/SplitQueryRequest.java @@ -3,23 +3,29 @@ package com.youtube.vitess.vtgate; public class SplitQueryRequest { private String sql; private String keyspace; + private String splitColumn; private int splitCount; - public SplitQueryRequest(String sql, String keyspace, int splitCount) { + public SplitQueryRequest(String sql, String keyspace, int splitCount, String splitColumn) { this.sql = sql; this.keyspace = keyspace; this.splitCount = splitCount; + this.splitColumn = splitColumn; } public String getSql() { - return sql; + return this.sql; } public String getKeyspace() { - return keyspace; + return this.keyspace; } public int getSplitCount() { - return splitCount; + return this.splitCount; + } + + public String getSplitColumn() { + return this.splitColumn; } } diff --git a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/VtGate.java b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/VtGate.java index aa16bac544..5210d540d4 100644 --- a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/VtGate.java +++ b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/VtGate.java @@ -114,9 +114,9 @@ public class VtGate { * instances. Batch jobs or MapReduce jobs that needs to scan all rows can use these queries to * parallelize full table scans. */ - public Map splitQuery(String keyspace, String sql, int splitCount) + public Map splitQuery(String keyspace, String sql, int splitCount, String pkColumn) throws ConnectionException, DatabaseException { - SplitQueryRequest req = new SplitQueryRequest(sql, keyspace, splitCount); + SplitQueryRequest req = new SplitQueryRequest(sql, keyspace, splitCount, pkColumn); SplitQueryResponse response = client.splitQuery(req); if (response.getError() != null) { throw new DatabaseException(response.getError()); diff --git a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/hadoop/VitessConf.java b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/hadoop/VitessConf.java index 7bc27e51a1..4fde55c145 100644 --- a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/hadoop/VitessConf.java +++ b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/hadoop/VitessConf.java @@ -11,6 +11,7 @@ public class VitessConf { public static final String INPUT_KEYSPACE = "vitess.vtgate.hadoop.keyspace"; public static final String INPUT_QUERY = "vitess.vtgate.hadoop.input_query"; public static final String SPLITS = "vitess.vtgate.hadoop.splits"; + public static final String SPLIT_COLUMN = "vitess.vtgate.hadoop.splitcolumn"; public static final String HOSTS_DELIM = ","; private Configuration conf; @@ -58,4 +59,12 @@ public class VitessConf { public void setSplits(int splits) { conf.setInt(SPLITS, splits); } + + public String getSplitColumn() { + return conf.get(SPLIT_COLUMN); + } + + public void setSplitColumn(String splitColumn) { + conf.set(SPLIT_COLUMN, splitColumn); + } } diff --git a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/hadoop/VitessInputFormat.java b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/hadoop/VitessInputFormat.java index a703485461..a0311b4378 100644 --- a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/hadoop/VitessInputFormat.java +++ b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/hadoop/VitessInputFormat.java @@ -33,7 +33,7 @@ public class VitessInputFormat extends InputFormat { VitessConf conf = new VitessConf(context.getConfiguration()); VtGate vtgate = VtGate.connect(conf.getHosts(), conf.getTimeoutMs()); Map queries = - vtgate.splitQuery(conf.getKeyspace(), conf.getInputQuery(), conf.getSplits()); + vtgate.splitQuery(conf.getKeyspace(), conf.getInputQuery(), conf.getSplits(), conf.getSplitColumn()); List splits = new LinkedList<>(); for (Query query : queries.keySet()) { Long size = queries.get(query); diff --git a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/rpcclient/gorpc/Bsonify.java b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/rpcclient/gorpc/Bsonify.java index c4e60dfe9c..dc5c06b1b2 100644 --- a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/rpcclient/gorpc/Bsonify.java +++ b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/rpcclient/gorpc/Bsonify.java @@ -185,6 +185,7 @@ public class Bsonify { query.put("Sql", request.getSql()); BSONObject b = new BasicBSONObject(); b.put("Keyspace", request.getKeyspace()); + b.put("SplitColumn", request.getSplitColumn()); b.put("Query", query); b.put("SplitCount", request.getSplitCount()); return b; diff --git a/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/VtGateIT.java b/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/VtGateIT.java index 5b97d75b1c..597ca5161e 100644 --- a/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/VtGateIT.java +++ b/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/VtGateIT.java @@ -306,7 +306,7 @@ public class VtGateIT { Util.waitForTablet("rdonly", 40, 3, testEnv); VtGate vtgate = VtGate.connect("localhost:" + testEnv.port, 0); Map queries = - vtgate.splitQuery("test_keyspace", "select id,keyspace_id from vtgate_test", 1); + vtgate.splitQuery("test_keyspace", "select id,keyspace_id from vtgate_test", 1, ""); vtgate.close(); // Verify 2 splits, one per shard @@ -342,7 +342,7 @@ public class VtGateIT { VtGate vtgate = VtGate.connect("localhost:" + testEnv.port, 0); int splitCount = 6; Map queries = - vtgate.splitQuery("test_keyspace", "select id,keyspace_id from vtgate_test", splitCount); + vtgate.splitQuery("test_keyspace", "select id,keyspace_id from vtgate_test", splitCount, ""); vtgate.close(); // Verify 6 splits, 3 per shard @@ -370,7 +370,7 @@ public class VtGateIT { public void testSplitQueryInvalidTable() throws Exception { VtGate vtgate = VtGate.connect("localhost:" + testEnv.port, 0); try { - vtgate.splitQuery("test_keyspace", "select id from invalid_table", 1); + vtgate.splitQuery("test_keyspace", "select id from invalid_table", 1, ""); Assert.fail("failed to raise connection exception"); } catch (ConnectionException e) { Assert.assertTrue( From a8785556be93cff03e9d5c4e46887d9c0fe1ee29 Mon Sep 17 00:00:00 2001 From: Michael Berlin Date: Tue, 19 May 2015 16:17:16 -0700 Subject: [PATCH 052/128] docker/test/run.sh: Make repo world readable for access from docker. Previously, the permissions of all items *within* the repository were already updated, but not the repository itself. This resulted into the error 'cp: cannot stat `/tmp/src/*': Permission denied' when running "docker/test/run.sh mariadb". --- docker/test/run.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docker/test/run.sh b/docker/test/run.sh index 15c6bedc2f..6156dff2d2 100755 --- a/docker/test/run.sh +++ b/docker/test/run.sh @@ -18,8 +18,9 @@ if [[ ! -f bootstrap.sh ]]; then exit 1 fi -# To avoid AUFS permission issues, files must allow access by "other" -chmod -R o=g * +# To avoid AUFS permission issues, files must allow access by "other" (permissions rX required). +# Mirror permissions to "other" from the owning group (for which we assume it has at least rX permissions). +chmod -R o=g . args="$args --rm -e USER=vitess -v /dev/log:/dev/log" args="$args -v $PWD:/tmp/src" From 1b8cbb90f101bde86e6bbcb3bc581c3f831604da Mon Sep 17 00:00:00 2001 From: Michael Berlin Date: Tue, 5 May 2015 23:20:40 -0700 Subject: [PATCH 053/128] Initial version of automation framework. The automation framework allows to automate cluster operations which require a series of manual steps e.g. resharding. A Cluster Operation has a list of task containers which are processed sequentially. Each task container can contain one or more tasks which will be executed in parallel. Here's an example of a cluster operation with two task containers. The second task container has two tasks: - step 1 - step 2a | step 2b If the task container contains one task, the task can emit new task containers which will be inserted after the current task container. This mechanism is used to fully expand Cluster Operations by special tasks which emit new task containers e.g. "ReshardingTask". This patchset implements the minimal steps to automate "resharding" whereas task implementations for "vtctl" and "vtworker" are missing. These will be added in later, separate commits. --- Makefile | 1 + .../automation/cluster_operation_instance.go | 52 +++ .../automation/horizontal_resharding_task.go | 106 ++++++ .../horizontal_resharding_task_test.go | 35 ++ go/vt/automation/id_generator.go | 18 + go/vt/automation/scheduler.go | 317 ++++++++++++++++++ go/vt/automation/scheduler_test.go | 242 +++++++++++++ go/vt/automation/task.go | 20 ++ go/vt/automation/task_containers.go | 32 ++ go/vt/automation/tasks.go | 33 ++ go/vt/automation/testutils_test.go | 66 ++++ go/vt/proto/automation/automation.pb.go | 311 +++++++++++++++++ proto/automation.proto | 89 +++++ 13 files changed, 1322 insertions(+) create mode 100644 go/vt/automation/cluster_operation_instance.go create mode 100644 go/vt/automation/horizontal_resharding_task.go create mode 100644 go/vt/automation/horizontal_resharding_task_test.go create mode 100644 go/vt/automation/id_generator.go create mode 100644 go/vt/automation/scheduler.go create mode 100644 go/vt/automation/scheduler_test.go create mode 100644 go/vt/automation/task.go create mode 100644 go/vt/automation/task_containers.go create mode 100644 go/vt/automation/tasks.go create mode 100644 go/vt/automation/testutils_test.go create mode 100644 go/vt/proto/automation/automation.pb.go create mode 100644 proto/automation.proto diff --git a/Makefile b/Makefile index bf1815a0db..1754166381 100644 --- a/Makefile +++ b/Makefile @@ -185,6 +185,7 @@ proto: cd go/vt/proto/queryservice && $$VTROOT/dist/protobuf/bin/protoc -I../../../../proto ../../../../proto/queryservice.proto --go_out=plugins=grpc:. cd go/vt/proto/vtctl && $$VTROOT/dist/protobuf/bin/protoc -I../../../../proto ../../../../proto/vtctl.proto --go_out=plugins=grpc:. cd go/vt/proto/tabletmanager && $$VTROOT/dist/protobuf/bin/protoc -I../../../../proto ../../../../proto/tabletmanager.proto --go_out=plugins=grpc:. + cd go/vt/proto/automation && $$VTROOT/dist/protobuf/bin/protoc -I../../../../proto ../../../../proto/automation.proto --go_out=plugins=grpc:. find go/vt/proto -name "*.pb.go" | xargs sed --in-place -r -e 's,"([a-z0-9_]+).pb","github.com/youtube/vitess/go/vt/proto/\1",g' cd py/vtctl && $$VTROOT/dist/protobuf/bin/protoc -I../../proto ../../proto/vtctl.proto --python_out=. --grpc_out=. --plugin=protoc-gen-grpc=$$VTROOT/dist/grpc/bin/grpc_python_plugin diff --git a/go/vt/automation/cluster_operation_instance.go b/go/vt/automation/cluster_operation_instance.go new file mode 100644 index 0000000000..d1f97a4f5e --- /dev/null +++ b/go/vt/automation/cluster_operation_instance.go @@ -0,0 +1,52 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package automation + +import ( + pb "github.com/youtube/vitess/go/vt/proto/automation" +) + +// ClusterOperationInstance is a runtime type which enhances the protobuf message "ClusterOperation" with runtime specific data. +// Unlike the protobuf message, the additional runtime data will not be part of a checkpoint. +type ClusterOperationInstance struct { + pb.ClusterOperation + taskIDGenerator *IDGenerator +} + +// NewClusterOperationInstance creates a new cluster operation instance with one initial task. +func NewClusterOperationInstance(clusterOpID string, initialTask *pb.TaskContainer, taskIDGenerator *IDGenerator) *ClusterOperationInstance { + c := &ClusterOperationInstance{ + pb.ClusterOperation{ + Id: clusterOpID, + SerialTasks: []*pb.TaskContainer{}, + State: pb.ClusterOperationState_CLUSTER_OPERATION_NOT_STARTED, + }, + taskIDGenerator, + } + c.InsertTaskContainers([]*pb.TaskContainer{initialTask}, 0) + return c +} + +// addMissingTaskID assigns a task id to each task in "tc". +func (c *ClusterOperationInstance) addMissingTaskID(tc []*pb.TaskContainer) { + for _, taskContainer := range tc { + for _, task := range taskContainer.ParallelTasks { + if task.Id == "" { + task.Id = c.taskIDGenerator.GetNextID() + } + } + } +} + +// InsertTaskContainers inserts "newTaskContainers" at pos in the current list of task containers. Existing task containers will be moved after the new task containers. +func (c *ClusterOperationInstance) InsertTaskContainers(newTaskContainers []*pb.TaskContainer, pos int) { + c.addMissingTaskID(newTaskContainers) + + newSerialTasks := make([]*pb.TaskContainer, len(c.SerialTasks)+len(newTaskContainers)) + copy(newSerialTasks, c.SerialTasks[:pos]) + copy(newSerialTasks[pos:], newTaskContainers) + copy(newSerialTasks[pos+len(newTaskContainers):], c.SerialTasks[pos:]) + c.SerialTasks = newSerialTasks +} diff --git a/go/vt/automation/horizontal_resharding_task.go b/go/vt/automation/horizontal_resharding_task.go new file mode 100644 index 0000000000..5ad1951fc0 --- /dev/null +++ b/go/vt/automation/horizontal_resharding_task.go @@ -0,0 +1,106 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package automation + +import ( + "fmt" + "strings" + + pb "github.com/youtube/vitess/go/vt/proto/automation" +) + +// HorizontalReshardingTask is a cluster operation which allows to increase the number of shards. +type HorizontalReshardingTask struct { +} + +// TODO(mberlin): Uncomment/remove when "ForceReparent" and "CopySchemaShard" will be implemented. +//func selectAnyTabletFromShardByType(shard string, tabletType string) string { +// return "" +//} + +func (t *HorizontalReshardingTask) run(parameters map[string]string) ([]*pb.TaskContainer, string, error) { + // Example: test_keyspace + keyspace := parameters["keyspace"] + // Example: 10-20 + sourceShards := strings.Split(parameters["source_shard_list"], ",") + // Example: 10-18,18-20 + destShards := strings.Split(parameters["dest_shard_list"], ",") + // Example: cell1-0000062352 + sourceRdonlyTablets := strings.Split(parameters["source_shard_rdonly_list"], ",") + + var newTasks []*pb.TaskContainer + // TODO(mberlin): Implement "ForceParent" task and uncomment this. + // reparentTasks := NewTaskContainer() + // for _, destShard := range destShards { + // newMaster := selectAnyTabletFromShardByType(destShard, "master") + // AddTask(reparentTasks, "ForceReparent", map[string]string{ + // "shard": destShard, + // "master": newMaster, + // }) + // } + // newTasks = append(newTasks, reparentTasks) + + // TODO(mberlin): Implement "CopySchemaShard" task and uncomment this. + // copySchemaTasks := NewTaskContainer() + // sourceRdonlyTablet := selectAnyTabletFromShardByType(sourceShards[0], "rdonly") + // for _, destShard := range destShards { + // AddTask(copySchemaTasks, "CopySchemaShard", map[string]string{ + // "shard": destShard, + // "source_rdonly_tablet": sourceRdonlyTablet, + // }) + // } + // newTasks = append(newTasks, copySchemaTasks) + + splitCloneTasks := NewTaskContainer() + for _, sourceShard := range sourceShards { + // TODO(mberlin): Add a semaphore as argument to limit the parallism. + AddTask(splitCloneTasks, "vtworker", map[string]string{ + "command": "SplitClone", + "keyspace": keyspace, + "shard": sourceShard, + "vtworker_endpoint": parameters["vtworker_endpoint"], + }) + } + newTasks = append(newTasks, splitCloneTasks) + + // TODO(mberlin): Remove this once SplitClone does this on its own. + restoreTypeTasks := NewTaskContainer() + for _, sourceRdonlyTablet := range sourceRdonlyTablets { + AddTask(restoreTypeTasks, "vtctl", map[string]string{ + "command": fmt.Sprintf("ChangeSlaveType %v rdonly", sourceRdonlyTablet), + }) + } + newTasks = append(newTasks, restoreTypeTasks) + + splitDiffTasks := NewTaskContainer() + for _, destShard := range destShards { + AddTask(splitDiffTasks, "vtworker", map[string]string{ + "command": "SplitDiff", + "keyspace": keyspace, + "shard": destShard, + "vtworker_endpoint": parameters["vtworker_endpoint"], + }) + } + newTasks = append(newTasks, splitDiffTasks) + + // TODO(mberlin): Implement "CopySchemaShard" task and uncomment this. + // for _, servedType := range []string{"rdonly", "replica", "master"} { + // migrateServedTypesTasks := NewTaskContainer() + // for _, sourceShard := range sourceShards { + // AddTask(migrateServedTypesTasks, "MigrateServedTypes", map[string]string{ + // "keyspace": keyspace, + // "shard": sourceShard, + // "served_type": servedType, + // }) + // } + // newTasks = append(newTasks, migrateServedTypesTasks) + // } + + return newTasks, "", nil +} + +func (t *HorizontalReshardingTask) requiredParameters() []string { + return []string{"keyspace", "source_shard_list", "source_shard_rdonly_list", "dest_shard_list"} +} diff --git a/go/vt/automation/horizontal_resharding_task_test.go b/go/vt/automation/horizontal_resharding_task_test.go new file mode 100644 index 0000000000..6c0235f6eb --- /dev/null +++ b/go/vt/automation/horizontal_resharding_task_test.go @@ -0,0 +1,35 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package automation + +import ( + "testing" + + "github.com/golang/protobuf/proto" +) + +func TestHorizontalReshardingTaskEmittedTasks(t *testing.T) { + reshardingTask := &HorizontalReshardingTask{} + + parameters := map[string]string{ + "source_shard_rdonly_list": "cell1-0000062352", + "keyspace": "test_keyspace", + "source_shard_list": "10-20", + "dest_shard_list": "10-18,18-20", + "vtworker_endpoint": "localhost:12345", + } + + err := checkRequiredParameters(reshardingTask, parameters) + if err != nil { + t.Fatalf("Not all required parameters were specified: %v", err) + } + + newTaskContainers, _, _ := reshardingTask.run(parameters) + + // TODO(mberlin): Check emitted tasks against expected output. + for _, tc := range newTaskContainers { + t.Logf("new tasks: %v", proto.MarshalTextString(tc)) + } +} diff --git a/go/vt/automation/id_generator.go b/go/vt/automation/id_generator.go new file mode 100644 index 0000000000..9cd0b40298 --- /dev/null +++ b/go/vt/automation/id_generator.go @@ -0,0 +1,18 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package automation + +import "strconv" +import "sync/atomic" + +// IDGenerator generates unique task and cluster operation IDs. +type IDGenerator struct { + counter int64 +} + +// GetNextID returns an ID which wasn't returned before. +func (ig *IDGenerator) GetNextID() string { + return strconv.FormatInt(atomic.AddInt64(&ig.counter, 1), 10) +} diff --git a/go/vt/automation/scheduler.go b/go/vt/automation/scheduler.go new file mode 100644 index 0000000000..a6ec04a798 --- /dev/null +++ b/go/vt/automation/scheduler.go @@ -0,0 +1,317 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package automation contains code to execute high-level cluster operations +(e.g. resharding) as a series of low-level operations +(e.g. vtctl, shell commands, ...). +*/ +package automation + +import ( + "fmt" + "sync" + + log "github.com/golang/glog" + pb "github.com/youtube/vitess/go/vt/proto/automation" + "golang.org/x/net/context" +) + +type schedulerState int32 + +const ( + stateNotRunning schedulerState = iota + stateRunning + stateShuttingDown + stateShutdown +) + +type taskCreator func(string) Task + +// Scheduler executes automation tasks and maintains the execution state. +type Scheduler struct { + idGenerator IDGenerator + + mu sync.Mutex + // Guarded by "mu". + registeredClusterOperations map[string]bool + // Guarded by "mu". + toBeScheduledClusterOperations chan *ClusterOperationInstance + // Guarded by "mu". + state schedulerState + + // Guarded by "taskCreatorMu". May be overriden by testing code. + taskCreator taskCreator + taskCreatorMu sync.Mutex + + pendingOpsWg *sync.WaitGroup + + muOpList sync.Mutex + // Guarded by "muOpList". + activeClusterOperations map[string]*ClusterOperationInstance + // Guarded by "muOpList". + finishedClusterOperations map[string]*ClusterOperationInstance +} + +// NewScheduler creates a new instance. +func NewScheduler() (*Scheduler, error) { + defaultClusterOperations := map[string]bool{ + "HorizontalReshardingTask": true, + } + + s := &Scheduler{ + registeredClusterOperations: defaultClusterOperations, + idGenerator: IDGenerator{}, + toBeScheduledClusterOperations: make(chan *ClusterOperationInstance, 10), + state: stateNotRunning, + taskCreator: defaultTaskCreator, + pendingOpsWg: &sync.WaitGroup{}, + activeClusterOperations: make(map[string]*ClusterOperationInstance), + finishedClusterOperations: make(map[string]*ClusterOperationInstance), + } + + return s, nil +} + +func (s *Scheduler) registerClusterOperation(clusterOperationName string) { + s.mu.Lock() + defer s.mu.Unlock() + + s.registeredClusterOperations[clusterOperationName] = true +} + +// Run processes queued cluster operations. +func (s *Scheduler) Run() { + s.mu.Lock() + s.state = stateRunning + s.mu.Unlock() + + s.startProcessRequestsLoop() +} + +func (s *Scheduler) startProcessRequestsLoop() { + // Use a WaitGroup instead of just a done channel, because we want + // to be able to shut down the scheduler even if Run() was never executed. + s.pendingOpsWg.Add(1) + go s.processRequestsLoop() +} + +func (s *Scheduler) processRequestsLoop() { + defer s.pendingOpsWg.Done() + + for op := range s.toBeScheduledClusterOperations { + s.processClusterOperation(op) + } + log.Infof("Stopped processing loop for ClusterOperations.") +} + +func (s *Scheduler) processClusterOperation(clusterOp *ClusterOperationInstance) { + if clusterOp.State == pb.ClusterOperationState_CLUSTER_OPERATION_DONE { + log.Infof("ClusterOperation: %v skipping because it is already done. Details: %v", clusterOp.Id, clusterOp) + return + } + + log.Infof("ClusterOperation: %v running. Details: %v", clusterOp.Id, clusterOp) + + var lastTaskError string + for i := 0; i < len(clusterOp.SerialTasks); i++ { + taskContainer := clusterOp.SerialTasks[i] + for _, taskProto := range taskContainer.ParallelTasks { + if taskProto.State == pb.TaskState_DONE { + if taskProto.Error != "" { + log.Errorf("Task: %v (%v/%v) failed before. Aborting the ClusterOperation. Error: %v Details: %v", taskProto.Name, clusterOp.Id, taskProto.Id, taskProto.Error, taskProto) + lastTaskError = taskProto.Error + break + } else { + log.Infof("Task: %v (%v/%v) skipped because it is already done. Full Details: %v", taskProto.Name, clusterOp.Id, taskProto.Id, taskProto) + } + } + + task, err := s.createTaskInstance(taskProto.Name) + if err != nil { + log.Errorf("Task: %v (%v/%v) could not be instantiated. Error: %v Details: %v", taskProto.Name, clusterOp.Id, taskProto.Id, err, taskProto) + MarkTaskFailed(taskProto, "", err) + lastTaskError = err.Error() + break + } + + taskProto.State = pb.TaskState_RUNNING + log.Infof("Task: %v (%v/%v) running. Details: %v", taskProto.Name, clusterOp.Id, taskProto.Id, taskProto) + newTaskContainers, output, errRun := task.run(taskProto.Parameters) + log.Infof("Task: %v (%v/%v) finished. newTaskContainers: %v, output: %v, error: %v", taskProto.Name, clusterOp.Id, taskProto.Id, newTaskContainers, output, errRun) + + if errRun != nil { + MarkTaskFailed(taskProto, output, errRun) + lastTaskError = errRun.Error() + break + } + MarkTaskSucceeded(taskProto, output) + + if newTaskContainers != nil { + // Make sure all new tasks do not miss any required parameters. + for _, newTaskContainer := range newTaskContainers { + for _, newTaskProto := range newTaskContainer.ParallelTasks { + err := s.validateTaskSpecification(newTaskProto.Name, newTaskProto.Parameters) + if err != nil { + log.Errorf("Task: %v (%v/%v) emitted a new task which is not valid. Error: %v Details: %v", taskProto.Name, clusterOp.Id, taskProto.Id, err, newTaskProto) + MarkTaskFailed(taskProto, output, err) + lastTaskError = err.Error() + break + } + } + } + + if lastTaskError == "" { + clusterOp.InsertTaskContainers(newTaskContainers, i+1) + log.Infof("ClusterOperation: %v %d new task containers added by %v (%v/%v). Updated ClusterOperation: %v", + clusterOp.Id, len(newTaskContainers), taskProto.Name, clusterOp.Id, taskProto.Id, clusterOp) + } + } + } + } + + clusterOp.State = pb.ClusterOperationState_CLUSTER_OPERATION_DONE + if lastTaskError != "" { + clusterOp.Error = lastTaskError + } + log.Infof("ClusterOperation: %v finished. Details: %v", clusterOp.Id, clusterOp) + + // Move operation from active to finished. + s.muOpList.Lock() + if s.activeClusterOperations[clusterOp.Id] != clusterOp { + panic("Pending ClusterOperation was not recorded as active, but should have.") + } + delete(s.activeClusterOperations, clusterOp.Id) + s.finishedClusterOperations[clusterOp.Id] = clusterOp + s.muOpList.Unlock() +} + +func defaultTaskCreator(taskName string) Task { + switch taskName { + case "HorizontalReshardingTask": + return &HorizontalReshardingTask{} + default: + return nil + } +} + +func (s *Scheduler) setTaskCreator(creator taskCreator) { + s.taskCreatorMu.Lock() + defer s.taskCreatorMu.Unlock() + + s.taskCreator = creator +} + +func (s *Scheduler) validateTaskSpecification(taskName string, parameters map[string]string) error { + taskInstanceForParametersCheck, err := s.createTaskInstance(taskName) + if err != nil { + return err + } + errParameters := checkRequiredParameters(taskInstanceForParametersCheck, parameters) + if errParameters != nil { + return errParameters + } + return nil +} + +func (s *Scheduler) createTaskInstance(taskName string) (Task, error) { + s.taskCreatorMu.Lock() + taskCreator := s.taskCreator + s.taskCreatorMu.Unlock() + + task := taskCreator(taskName) + if task == nil { + return nil, fmt.Errorf("No implementation found for: %v", taskName) + } + return task, nil +} + +// checkRequiredParameters returns an error if not all required parameters are provided in "parameters". +func checkRequiredParameters(task Task, parameters map[string]string) error { + for _, requiredParameter := range task.requiredParameters() { + if _, ok := parameters[requiredParameter]; !ok { + return fmt.Errorf("Parameter %v is required, but not provided", requiredParameter) + } + } + return nil +} + +// EnqueueClusterOperation can be used to start a new cluster operation. +func (s *Scheduler) EnqueueClusterOperation(ctx context.Context, req *pb.EnqueueClusterOperationRequest) (*pb.EnqueueClusterOperationResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.state != stateRunning { + return nil, fmt.Errorf("Scheduler is not running. State: %v", s.state) + } + + if s.registeredClusterOperations[req.Name] != true { + return nil, fmt.Errorf("No ClusterOperation with name: %v is registered", req.Name) + } + + err := s.validateTaskSpecification(req.Name, req.Parameters) + if err != nil { + return nil, err + } + + clusterOpID := s.idGenerator.GetNextID() + taskIDGenerator := IDGenerator{} + initialTask := NewTaskContainerWithSingleTask(req.Name, req.Parameters) + clusterOp := NewClusterOperationInstance(clusterOpID, initialTask, &taskIDGenerator) + + s.muOpList.Lock() + s.toBeScheduledClusterOperations <- clusterOp + s.activeClusterOperations[clusterOpID] = clusterOp + s.muOpList.Unlock() + + return &pb.EnqueueClusterOperationResponse{ + Id: clusterOp.Id, + }, nil +} + +// findClusterOp checks for a given ClusterOperation ID if it's in the list of active or finished operations. +func (s *Scheduler) findClusterOp(id string) (*ClusterOperationInstance, error) { + var ok bool + var clusterOp *ClusterOperationInstance + + s.muOpList.Lock() + defer s.muOpList.Unlock() + clusterOp, ok = s.activeClusterOperations[id] + if !ok { + clusterOp, ok = s.finishedClusterOperations[id] + } + if !ok { + return nil, fmt.Errorf("ClusterOperation with id: %v not found", id) + } + return clusterOp, nil +} + +// GetClusterOperationDetails can be used to query the full details of active or finished operations. +func (s *Scheduler) GetClusterOperationDetails(ctx context.Context, req *pb.GetClusterOperationDetailsRequest) (*pb.GetClusterOperationDetailsResponse, error) { + clusterOp, err := s.findClusterOp(req.Id) + if err != nil { + return nil, err + } + return &pb.GetClusterOperationDetailsResponse{ + ClusterOp: &clusterOp.ClusterOperation, + }, nil +} + +// ShutdownAndWait shuts down the scheduler and waits infinitely until all pending cluster operations have finished. +func (s *Scheduler) ShutdownAndWait() { + s.mu.Lock() + if s.state != stateShuttingDown { + s.state = stateShuttingDown + close(s.toBeScheduledClusterOperations) + } + s.mu.Unlock() + + log.Infof("Scheduler was shut down. Waiting for pending ClusterOperations to finish.") + s.pendingOpsWg.Wait() + + s.mu.Lock() + s.state = stateShutdown + s.mu.Unlock() + log.Infof("All pending ClusterOperations finished.") +} diff --git a/go/vt/automation/scheduler_test.go b/go/vt/automation/scheduler_test.go new file mode 100644 index 0000000000..ecff89c976 --- /dev/null +++ b/go/vt/automation/scheduler_test.go @@ -0,0 +1,242 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package automation + +import ( + "testing" + "time" + + "github.com/golang/protobuf/proto" + context "golang.org/x/net/context" + + pb "github.com/youtube/vitess/go/vt/proto/automation" +) + +// newTestScheduler constructs a scheduler with test tasks. +// If tasks should be available as cluster operation, they still have to be registered manually with scheduler.registerClusterOperation. +func newTestScheduler(t *testing.T) *Scheduler { + scheduler, err := NewScheduler() + if err != nil { + t.Fatalf("Failed to create scheduler: %v", err) + } + scheduler.setTaskCreator(testingTaskCreator) + return scheduler +} + +// waitForClusterOperation is a helper function which blocks until the Cluster Operation has finished. +func waitForClusterOperation(t *testing.T, scheduler *Scheduler, id string, expectedOutputLastTask string, expectedErrorLastTask string) *pb.ClusterOperation { + if expectedOutputLastTask == "" && expectedErrorLastTask == "" { + t.Fatal("Error in test: Cannot wait for an operation where both output and error are expected to be empty.") + } + + getDetailsRequest := &pb.GetClusterOperationDetailsRequest{ + Id: id, + } + + for { + getDetailsResponse, err := scheduler.GetClusterOperationDetails(context.TODO(), getDetailsRequest) + if err != nil { + t.Fatalf("Failed to get details for cluster operation. Request: %v Error: %v", getDetailsRequest, err) + } + if getDetailsResponse.ClusterOp.State == pb.ClusterOperationState_CLUSTER_OPERATION_DONE { + tc := getDetailsResponse.ClusterOp.SerialTasks + lastTc := tc[len(tc)-1] + if expectedOutputLastTask != "" { + if lastTc.ParallelTasks[len(lastTc.ParallelTasks)-1].Output != expectedOutputLastTask { + t.Fatalf("ClusterOperation finished but did not return expected output. want: %v Full ClusterOperation details: %v", expectedOutputLastTask, proto.MarshalTextString(getDetailsResponse.ClusterOp)) + } + } + if expectedErrorLastTask != "" { + if lastTc.ParallelTasks[len(lastTc.ParallelTasks)-1].Error != expectedErrorLastTask { + t.Fatalf("ClusterOperation finished but did not return expected error. Full ClusterOperation details: %v", getDetailsResponse.ClusterOp) + } + } + return getDetailsResponse.ClusterOp + } + + t.Logf("Waiting for clusterOp: %v", getDetailsResponse.ClusterOp) + time.Sleep(5 * time.Millisecond) + } +} + +func TestSchedulerImmediateShutdown(t *testing.T) { + // Make sure that the scheduler shuts down cleanly when it was instantiated, but not started with Run(). + scheduler, err := NewScheduler() + if err != nil { + t.Fatalf("Failed to create scheduler: %v", err) + } + scheduler.ShutdownAndWait() +} + +func enqueueClusterOperationAndCheckOutput(t *testing.T, taskName string, expectedOutput string) { + scheduler := newTestScheduler(t) + defer scheduler.ShutdownAndWait() + scheduler.registerClusterOperation("TestingEchoTask") + scheduler.registerClusterOperation("TestingEmitEchoTask") + + scheduler.Run() + + enqueueRequest := &pb.EnqueueClusterOperationRequest{ + Name: taskName, + Parameters: map[string]string{ + "echo_text": expectedOutput, + }, + } + enqueueResponse, err := scheduler.EnqueueClusterOperation(context.TODO(), enqueueRequest) + if err != nil { + t.Fatalf("Failed to start cluster operation. Request: %v Error: %v", enqueueRequest, err) + } + + waitForClusterOperation(t, scheduler, enqueueResponse.Id, expectedOutput, "") +} + +func TestEnqueueSingleTask(t *testing.T) { + enqueueClusterOperationAndCheckOutput(t, "TestingEchoTask", "echoed text") +} + +func TestEnqueueEmittingTask(t *testing.T) { + enqueueClusterOperationAndCheckOutput(t, "TestingEmitEchoTask", "echoed text from emitted task") +} + +func TestEnqueueFailsDueToMissingParameter(t *testing.T) { + scheduler := newTestScheduler(t) + defer scheduler.ShutdownAndWait() + scheduler.registerClusterOperation("TestingEchoTask") + + scheduler.Run() + + enqueueRequest := &pb.EnqueueClusterOperationRequest{ + Name: "TestingEchoTask", + Parameters: map[string]string{ + "unrelevant-parameter": "value", + }, + } + enqueueResponse, err := scheduler.EnqueueClusterOperation(context.TODO(), enqueueRequest) + + if err == nil { + t.Fatalf("Scheduler should have failed to start cluster operation because not all required parameters were provided. Request: %v Error: %v Response: %v", enqueueRequest, err, enqueueResponse) + } + want := "Parameter echo_text is required, but not provided" + if err.Error() != want { + t.Fatalf("Wrong error message. got: '%v' want: '%v'", err, want) + } +} + +func TestFailedTaskFailsClusterOperation(t *testing.T) { + scheduler := newTestScheduler(t) + defer scheduler.ShutdownAndWait() + scheduler.registerClusterOperation("TestingFailTask") + + scheduler.Run() + + enqueueRequest := &pb.EnqueueClusterOperationRequest{ + Name: "TestingFailTask", + } + enqueueResponse, err := scheduler.EnqueueClusterOperation(context.TODO(), enqueueRequest) + if err != nil { + t.Fatalf("Failed to start cluster operation. Request: %v Error: %v", enqueueRequest, err) + } + + waitForClusterOperation(t, scheduler, enqueueResponse.Id, "something went wrong", "full error message") +} + +func TestEnqueueFailsDueToUnregisteredClusterOperation(t *testing.T) { + scheduler := newTestScheduler(t) + defer scheduler.ShutdownAndWait() + + scheduler.Run() + + enqueueRequest := &pb.EnqueueClusterOperationRequest{ + Name: "TestingEchoTask", + Parameters: map[string]string{ + "unrelevant-parameter": "value", + }, + } + enqueueResponse, err := scheduler.EnqueueClusterOperation(context.TODO(), enqueueRequest) + + if err == nil { + t.Fatalf("Scheduler should have failed to start cluster operation because it should not have been registered. Request: %v Error: %v Response: %v", enqueueRequest, err, enqueueResponse) + } + want := "No ClusterOperation with name: TestingEchoTask is registered" + if err.Error() != want { + t.Fatalf("Wrong error message. got: '%v' want: '%v'", err, want) + } +} + +func TestGetDetailsFailsUnknownId(t *testing.T) { + scheduler := newTestScheduler(t) + defer scheduler.ShutdownAndWait() + + scheduler.Run() + + getDetailsRequest := &pb.GetClusterOperationDetailsRequest{ + Id: "-1", // There will never be a ClusterOperation with this id. + } + + getDetailsResponse, err := scheduler.GetClusterOperationDetails(context.TODO(), getDetailsRequest) + if err == nil { + t.Fatalf("Did not fail to get details for invalid ClusterOperation id. Request: %v Response: %v Error: %v", getDetailsRequest, getDetailsResponse, err) + } + want := "ClusterOperation with id: -1 not found" + if err.Error() != want { + t.Fatalf("Wrong error message. got: '%v' want: '%v'", err, want) + } +} + +func TestEnqueueFailsBecauseTaskInstanceCannotBeCreated(t *testing.T) { + scheduler := newTestScheduler(t) + defer scheduler.ShutdownAndWait() + scheduler.setTaskCreator(defaultTaskCreator) + // TestingEchoTask is registered as cluster operation, but its task cannot be instantied because "testingTaskCreator" was not set. + scheduler.registerClusterOperation("TestingEchoTask") + + scheduler.Run() + + enqueueRequest := &pb.EnqueueClusterOperationRequest{ + Name: "TestingEchoTask", + Parameters: map[string]string{ + "unrelevant-parameter": "value", + }, + } + enqueueResponse, err := scheduler.EnqueueClusterOperation(context.TODO(), enqueueRequest) + + if err == nil { + t.Fatalf("Scheduler should have failed to start cluster operation because the task could not be instantiated. Request: %v Error: %v Response: %v", enqueueRequest, err, enqueueResponse) + } + want := "No implementation found for: TestingEchoTask" + if err.Error() != want { + t.Fatalf("Wrong error message. got: '%v' want: '%v'", err, want) + } +} + +func TestTaskEmitsTaskWhichCannotBeInstantiated(t *testing.T) { + scheduler := newTestScheduler(t) + defer scheduler.ShutdownAndWait() + scheduler.setTaskCreator(func(taskName string) Task { + // TaskCreator which doesn't know TestingEchoTask (but emitted by TestingEmitEchoTask). + switch taskName { + case "TestingEmitEchoTask": + return &TestingEmitEchoTask{} + default: + return nil + } + }) + scheduler.registerClusterOperation("TestingEmitEchoTask") + + scheduler.Run() + + enqueueRequest := &pb.EnqueueClusterOperationRequest{ + Name: "TestingEmitEchoTask", + } + enqueueResponse, err := scheduler.EnqueueClusterOperation(context.TODO(), enqueueRequest) + if err != nil { + t.Fatalf("Failed to start cluster operation. Request: %v Error: %v", enqueueRequest, err) + } + + details := waitForClusterOperation(t, scheduler, enqueueResponse.Id, "emitted TestingEchoTask", "No implementation found for: TestingEchoTask") + if len(details.SerialTasks) != 1 { + t.Errorf("A task has been emitted, but it shouldn't. Details:\n%v", proto.MarshalTextString(details)) + } +} diff --git a/go/vt/automation/task.go b/go/vt/automation/task.go new file mode 100644 index 0000000000..912ed5711b --- /dev/null +++ b/go/vt/automation/task.go @@ -0,0 +1,20 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package automation + +import ( + pb "github.com/youtube/vitess/go/vt/proto/automation" +) + +// Task implementations can be executed by the scheduler. +type Task interface { + // run executes the task using the key/values from parameters. + // "newTaskContainers" contains new tasks which the task can emit. They'll be inserted in the cluster operation directly after this task. It may be "nil". + // "output" may be empty. It contains any text which maybe must e.g. to debug the task or show it in the UI. + run(parameters map[string]string) (newTaskContainers []*pb.TaskContainer, output string, err error) + + // requiredParameters() returns a list of parameter keys which must be provided as input for run(). + requiredParameters() []string +} diff --git a/go/vt/automation/task_containers.go b/go/vt/automation/task_containers.go new file mode 100644 index 0000000000..1899fad172 --- /dev/null +++ b/go/vt/automation/task_containers.go @@ -0,0 +1,32 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package automation + +import ( + pb "github.com/youtube/vitess/go/vt/proto/automation" +) + +// Helper functions for "TaskContainer" protobuf message. + +// NewTaskContainerWithSingleTask creates a new task container with exactly one task. +func NewTaskContainerWithSingleTask(taskName string, parameters map[string]string) *pb.TaskContainer { + return &pb.TaskContainer{ + ParallelTasks: []*pb.Task{ + NewTask(taskName, parameters), + }, + } +} + +// NewTaskContainer creates an empty task container. Use AddTask() to add tasks to it. +func NewTaskContainer() *pb.TaskContainer { + return &pb.TaskContainer{ + ParallelTasks: []*pb.Task{}, + } +} + +// AddTask adds a new task to an existing task container. +func AddTask(t *pb.TaskContainer, taskName string, parameters map[string]string) { + t.ParallelTasks = append(t.ParallelTasks, NewTask(taskName, parameters)) +} diff --git a/go/vt/automation/tasks.go b/go/vt/automation/tasks.go new file mode 100644 index 0000000000..690f9d6dfe --- /dev/null +++ b/go/vt/automation/tasks.go @@ -0,0 +1,33 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package automation + +import ( + pb "github.com/youtube/vitess/go/vt/proto/automation" +) + +// Helper functions for "Task" protobuf message. + +// MarkTaskSucceeded marks the task as done. +func MarkTaskSucceeded(t *pb.Task, output string) { + t.State = pb.TaskState_DONE + t.Output = output +} + +// MarkTaskFailed marks the task as failed. +func MarkTaskFailed(t *pb.Task, output string, err error) { + t.State = pb.TaskState_DONE + t.Output = output + t.Error = err.Error() +} + +// NewTask creates a new task protobuf message for "taskName" with "parameters". +func NewTask(taskName string, parameters map[string]string) *pb.Task { + return &pb.Task{ + State: pb.TaskState_NOT_STARTED, + Name: taskName, + Parameters: parameters, + } +} diff --git a/go/vt/automation/testutils_test.go b/go/vt/automation/testutils_test.go new file mode 100644 index 0000000000..9db7bbbca0 --- /dev/null +++ b/go/vt/automation/testutils_test.go @@ -0,0 +1,66 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package automation + +import ( + "errors" + + pb "github.com/youtube/vitess/go/vt/proto/automation" +) + +func testingTaskCreator(taskName string) Task { + switch taskName { + // Tasks for testing only. + case "TestingEchoTask": + return &TestingEchoTask{} + case "TestingEmitEchoTask": + return &TestingEmitEchoTask{} + case "TestingFailTask": + return &TestingFailTask{} + default: + return nil + } +} + +// TestingEchoTask is used only for testing. It returns the join of all parameter values. +type TestingEchoTask struct { +} + +func (t *TestingEchoTask) run(parameters map[string]string) (newTasks []*pb.TaskContainer, output string, err error) { + for _, v := range parameters { + output += v + } + return +} + +func (t *TestingEchoTask) requiredParameters() []string { + return []string{"echo_text"} +} + +// TestingEmitEchoTask is used only for testing. It emits a TestingEchoTask. +type TestingEmitEchoTask struct { +} + +func (t *TestingEmitEchoTask) run(parameters map[string]string) (newTasks []*pb.TaskContainer, output string, err error) { + return []*pb.TaskContainer{ + NewTaskContainerWithSingleTask("TestingEchoTask", parameters), + }, "emitted TestingEchoTask", nil +} + +func (t *TestingEmitEchoTask) requiredParameters() []string { + return []string{} +} + +// TestingFailTask is used only for testing. It always fails. +type TestingFailTask struct { +} + +func (t *TestingFailTask) run(parameters map[string]string) (newTasks []*pb.TaskContainer, output string, err error) { + return nil, "something went wrong", errors.New("full error message") +} + +func (t *TestingFailTask) requiredParameters() []string { + return []string{} +} diff --git a/go/vt/proto/automation/automation.pb.go b/go/vt/proto/automation/automation.pb.go new file mode 100644 index 0000000000..3633bc57b7 --- /dev/null +++ b/go/vt/proto/automation/automation.pb.go @@ -0,0 +1,311 @@ +// Code generated by protoc-gen-go. +// source: automation.proto +// DO NOT EDIT! + +/* +Package automation is a generated protocol buffer package. + +It is generated from these files: + automation.proto + +It has these top-level messages: + ClusterOperation + TaskContainer + Task + EnqueueClusterOperationRequest + EnqueueClusterOperationResponse + GetClusterOperationStateRequest + GetClusterOperationStateResponse + GetClusterOperationDetailsRequest + GetClusterOperationDetailsResponse +*/ +package automation + +import proto "github.com/golang/protobuf/proto" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +type ClusterOperationState int32 + +const ( + ClusterOperationState_UNKNOWN_CLUSTER_OPERATION_STATE ClusterOperationState = 0 + ClusterOperationState_CLUSTER_OPERATION_NOT_STARTED ClusterOperationState = 1 + ClusterOperationState_CLUSTER_OPERATION_RUNNING ClusterOperationState = 2 + ClusterOperationState_CLUSTER_OPERATION_DONE ClusterOperationState = 3 +) + +var ClusterOperationState_name = map[int32]string{ + 0: "UNKNOWN_CLUSTER_OPERATION_STATE", + 1: "CLUSTER_OPERATION_NOT_STARTED", + 2: "CLUSTER_OPERATION_RUNNING", + 3: "CLUSTER_OPERATION_DONE", +} +var ClusterOperationState_value = map[string]int32{ + "UNKNOWN_CLUSTER_OPERATION_STATE": 0, + "CLUSTER_OPERATION_NOT_STARTED": 1, + "CLUSTER_OPERATION_RUNNING": 2, + "CLUSTER_OPERATION_DONE": 3, +} + +func (x ClusterOperationState) String() string { + return proto.EnumName(ClusterOperationState_name, int32(x)) +} + +type TaskState int32 + +const ( + TaskState_UNKNOWN_TASK_STATE TaskState = 0 + TaskState_NOT_STARTED TaskState = 1 + TaskState_RUNNING TaskState = 2 + TaskState_DONE TaskState = 3 +) + +var TaskState_name = map[int32]string{ + 0: "UNKNOWN_TASK_STATE", + 1: "NOT_STARTED", + 2: "RUNNING", + 3: "DONE", +} +var TaskState_value = map[string]int32{ + "UNKNOWN_TASK_STATE": 0, + "NOT_STARTED": 1, + "RUNNING": 2, + "DONE": 3, +} + +func (x TaskState) String() string { + return proto.EnumName(TaskState_name, int32(x)) +} + +type ClusterOperation struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // TaskContainer are processed sequentially, one at a time. + SerialTasks []*TaskContainer `protobuf:"bytes,2,rep,name=serial_tasks" json:"serial_tasks,omitempty"` + // Cached value. This has to be re-evaluated e.g. after a checkpoint load because running tasks may have already finished. + State ClusterOperationState `protobuf:"varint,3,opt,name=state,enum=automation.ClusterOperationState" json:"state,omitempty"` + // Error of the first task which failed. Set after state advanced to CLUSTER_OPERATION_DONE. If empty, all tasks succeeded. Cached value, see state above. + Error string `protobuf:"bytes,4,opt,name=error" json:"error,omitempty"` +} + +func (m *ClusterOperation) Reset() { *m = ClusterOperation{} } +func (m *ClusterOperation) String() string { return proto.CompactTextString(m) } +func (*ClusterOperation) ProtoMessage() {} + +func (m *ClusterOperation) GetSerialTasks() []*TaskContainer { + if m != nil { + return m.SerialTasks + } + return nil +} + +// TaskContainer holds one or more task which may be executed in parallel. +// "concurrency", if > 0, limits the amount of concurrently executed tasks. +type TaskContainer struct { + ParallelTasks []*Task `protobuf:"bytes,1,rep,name=parallel_tasks" json:"parallel_tasks,omitempty"` + Concurrency int32 `protobuf:"varint,2,opt,name=concurrency" json:"concurrency,omitempty"` +} + +func (m *TaskContainer) Reset() { *m = TaskContainer{} } +func (m *TaskContainer) String() string { return proto.CompactTextString(m) } +func (*TaskContainer) ProtoMessage() {} + +func (m *TaskContainer) GetParallelTasks() []*Task { + if m != nil { + return m.ParallelTasks + } + return nil +} + +// Task represents a specific task which should be automatically executed. +type Task struct { + // Task specification. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Runtime data. + Id string `protobuf:"bytes,3,opt,name=id" json:"id,omitempty"` + State TaskState `protobuf:"varint,4,opt,name=state,enum=automation.TaskState" json:"state,omitempty"` + // Set after state advanced to DONE. + Output string `protobuf:"bytes,5,opt,name=output" json:"output,omitempty"` + // Set after state advanced to DONE. If empty, the task did succeed. + Error string `protobuf:"bytes,6,opt,name=error" json:"error,omitempty"` +} + +func (m *Task) Reset() { *m = Task{} } +func (m *Task) String() string { return proto.CompactTextString(m) } +func (*Task) ProtoMessage() {} + +func (m *Task) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +type EnqueueClusterOperationRequest struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *EnqueueClusterOperationRequest) Reset() { *m = EnqueueClusterOperationRequest{} } +func (m *EnqueueClusterOperationRequest) String() string { return proto.CompactTextString(m) } +func (*EnqueueClusterOperationRequest) ProtoMessage() {} + +func (m *EnqueueClusterOperationRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +type EnqueueClusterOperationResponse struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` +} + +func (m *EnqueueClusterOperationResponse) Reset() { *m = EnqueueClusterOperationResponse{} } +func (m *EnqueueClusterOperationResponse) String() string { return proto.CompactTextString(m) } +func (*EnqueueClusterOperationResponse) ProtoMessage() {} + +type GetClusterOperationStateRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` +} + +func (m *GetClusterOperationStateRequest) Reset() { *m = GetClusterOperationStateRequest{} } +func (m *GetClusterOperationStateRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterOperationStateRequest) ProtoMessage() {} + +type GetClusterOperationStateResponse struct { + State ClusterOperationState `protobuf:"varint,1,opt,name=state,enum=automation.ClusterOperationState" json:"state,omitempty"` +} + +func (m *GetClusterOperationStateResponse) Reset() { *m = GetClusterOperationStateResponse{} } +func (m *GetClusterOperationStateResponse) String() string { return proto.CompactTextString(m) } +func (*GetClusterOperationStateResponse) ProtoMessage() {} + +type GetClusterOperationDetailsRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` +} + +func (m *GetClusterOperationDetailsRequest) Reset() { *m = GetClusterOperationDetailsRequest{} } +func (m *GetClusterOperationDetailsRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterOperationDetailsRequest) ProtoMessage() {} + +type GetClusterOperationDetailsResponse struct { + // Full snapshot of the execution e.g. including output of each task. + ClusterOp *ClusterOperation `protobuf:"bytes,2,opt,name=cluster_op" json:"cluster_op,omitempty"` +} + +func (m *GetClusterOperationDetailsResponse) Reset() { *m = GetClusterOperationDetailsResponse{} } +func (m *GetClusterOperationDetailsResponse) String() string { return proto.CompactTextString(m) } +func (*GetClusterOperationDetailsResponse) ProtoMessage() {} + +func (m *GetClusterOperationDetailsResponse) GetClusterOp() *ClusterOperation { + if m != nil { + return m.ClusterOp + } + return nil +} + +func init() { + proto.RegisterEnum("automation.ClusterOperationState", ClusterOperationState_name, ClusterOperationState_value) + proto.RegisterEnum("automation.TaskState", TaskState_name, TaskState_value) +} + +// Client API for Automation service + +type AutomationClient interface { + // Start a cluster operation. + EnqueueClusterOperation(ctx context.Context, in *EnqueueClusterOperationRequest, opts ...grpc.CallOption) (*EnqueueClusterOperationResponse, error) + // TODO(mberlin): Polling this is bad. Implement a subscribe mechanism to wait for changes? + // Get all details of an active cluster operation. + GetClusterOperationDetails(ctx context.Context, in *GetClusterOperationDetailsRequest, opts ...grpc.CallOption) (*GetClusterOperationDetailsResponse, error) +} + +type automationClient struct { + cc *grpc.ClientConn +} + +func NewAutomationClient(cc *grpc.ClientConn) AutomationClient { + return &automationClient{cc} +} + +func (c *automationClient) EnqueueClusterOperation(ctx context.Context, in *EnqueueClusterOperationRequest, opts ...grpc.CallOption) (*EnqueueClusterOperationResponse, error) { + out := new(EnqueueClusterOperationResponse) + err := grpc.Invoke(ctx, "/automation.Automation/EnqueueClusterOperation", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *automationClient) GetClusterOperationDetails(ctx context.Context, in *GetClusterOperationDetailsRequest, opts ...grpc.CallOption) (*GetClusterOperationDetailsResponse, error) { + out := new(GetClusterOperationDetailsResponse) + err := grpc.Invoke(ctx, "/automation.Automation/GetClusterOperationDetails", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Automation service + +type AutomationServer interface { + // Start a cluster operation. + EnqueueClusterOperation(context.Context, *EnqueueClusterOperationRequest) (*EnqueueClusterOperationResponse, error) + // TODO(mberlin): Polling this is bad. Implement a subscribe mechanism to wait for changes? + // Get all details of an active cluster operation. + GetClusterOperationDetails(context.Context, *GetClusterOperationDetailsRequest) (*GetClusterOperationDetailsResponse, error) +} + +func RegisterAutomationServer(s *grpc.Server, srv AutomationServer) { + s.RegisterService(&_Automation_serviceDesc, srv) +} + +func _Automation_EnqueueClusterOperation_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(EnqueueClusterOperationRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(AutomationServer).EnqueueClusterOperation(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _Automation_GetClusterOperationDetails_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(GetClusterOperationDetailsRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(AutomationServer).GetClusterOperationDetails(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +var _Automation_serviceDesc = grpc.ServiceDesc{ + ServiceName: "automation.Automation", + HandlerType: (*AutomationServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "EnqueueClusterOperation", + Handler: _Automation_EnqueueClusterOperation_Handler, + }, + { + MethodName: "GetClusterOperationDetails", + Handler: _Automation_GetClusterOperationDetails_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, +} diff --git a/proto/automation.proto b/proto/automation.proto new file mode 100644 index 0000000000..15a90f80fc --- /dev/null +++ b/proto/automation.proto @@ -0,0 +1,89 @@ +// Protobuf messages for the automation framework. + +// Messages (e.g. Task) are used both for checkpoint data and API access +// (e.g. retrieving the current status of a pending cluster operation). + +syntax = "proto3"; + +package automation; + +enum ClusterOperationState { + UNKNOWN_CLUSTER_OPERATION_STATE = 0; + CLUSTER_OPERATION_NOT_STARTED = 1; + CLUSTER_OPERATION_RUNNING = 2; + CLUSTER_OPERATION_DONE = 3; +} + +message ClusterOperation { + string id = 1; + // TaskContainer are processed sequentially, one at a time. + repeated TaskContainer serial_tasks = 2; + // Cached value. This has to be re-evaluated e.g. after a checkpoint load because running tasks may have already finished. + ClusterOperationState state = 3; + // Error of the first task which failed. Set after state advanced to CLUSTER_OPERATION_DONE. If empty, all tasks succeeded. Cached value, see state above. + string error = 4; +} + +// TaskContainer holds one or more task which may be executed in parallel. +// "concurrency", if > 0, limits the amount of concurrently executed tasks. +message TaskContainer { + repeated Task parallel_tasks = 1; + int32 concurrency = 2; +} + +enum TaskState { + UNKNOWN_TASK_STATE = 0; + NOT_STARTED = 1; + RUNNING = 2; + DONE = 3; +} + +// Task represents a specific task which should be automatically executed. +message Task { + // Task specification. + string name = 1; + map parameters = 2; + + // Runtime data. + string id = 3; + TaskState state = 4; + // Set after state advanced to DONE. + string output = 5; + // Set after state advanced to DONE. If empty, the task did succeed. + string error = 6; +} + +message EnqueueClusterOperationRequest { + string name = 1; + map parameters = 2; +} + +message EnqueueClusterOperationResponse { + string id = 1; +} + +message GetClusterOperationStateRequest { + string id = 1; +} + +message GetClusterOperationStateResponse { + ClusterOperationState state = 1; +} + +message GetClusterOperationDetailsRequest { + string id = 1; +} + +message GetClusterOperationDetailsResponse { + // Full snapshot of the execution e.g. including output of each task. + ClusterOperation cluster_op = 2; +} + +service Automation { + // Start a cluster operation. + rpc EnqueueClusterOperation(EnqueueClusterOperationRequest) returns (EnqueueClusterOperationResponse) {}; + + // TODO(mberlin): Polling this is bad. Implement a subscribe mechanism to wait for changes? + // Get all details of an active cluster operation. + rpc GetClusterOperationDetails(GetClusterOperationDetailsRequest) returns (GetClusterOperationDetailsResponse) {}; +} From bbf0916778c24f39559ae4171a43193849614a17 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 20 May 2015 08:03:01 -0700 Subject: [PATCH 054/128] Few cosmetic changes, and addressing cancellation better. --- go/vt/worker/clone_utils.go | 6 +++--- go/vt/worker/diff_utils.go | 3 ++- go/vt/worker/split_clone.go | 21 +++++++-------------- go/vt/worker/split_diff.go | 21 ++++++++++----------- go/vt/worker/sqldiffer.go | 4 ++-- go/vt/worker/status_worker.go | 7 ++++++- go/vt/worker/topo_utils.go | 2 +- go/vt/worker/vertical_split_clone.go | 21 +++++++-------------- go/vt/worker/vertical_split_diff.go | 21 ++++++++++----------- go/vt/worker/worker.go | 1 + 10 files changed, 49 insertions(+), 58 deletions(-) diff --git a/go/vt/worker/clone_utils.go b/go/vt/worker/clone_utils.go index a0bb820223..410d44ddd0 100644 --- a/go/vt/worker/clone_utils.go +++ b/go/vt/worker/clone_utils.go @@ -30,7 +30,7 @@ import ( // Does a topo lookup for a single shard, and returns the tablet record of the master tablet. func resolveDestinationShardMaster(ctx context.Context, keyspace, shard string, wr *wrangler.Wrangler) (*topo.TabletInfo, error) { var ti *topo.TabletInfo - newCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + newCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) si, err := topo.GetShard(newCtx, wr.TopoServer(), keyspace, shard) cancel() if err != nil { @@ -43,7 +43,7 @@ func resolveDestinationShardMaster(ctx context.Context, keyspace, shard string, wr.Logger().Infof("Found target master alias %v in shard %v/%v", si.MasterAlias, keyspace, shard) - newCtx, cancel = context.WithTimeout(ctx, 60*time.Second) + newCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) ti, err = topo.GetTablet(newCtx, wr.TopoServer(), si.MasterAlias) cancel() if err != nil { @@ -280,7 +280,7 @@ func findChunks(ctx context.Context, wr *wrangler.Wrangler, ti *topo.TabletInfo, // get the min and max of the leading column of the primary key query := fmt.Sprintf("SELECT MIN(%v), MAX(%v) FROM %v.%v", td.PrimaryKeyColumns[0], td.PrimaryKeyColumns[0], ti.DbName(), td.Name) - ctx, cancel := context.WithTimeout(ctx, 60*time.Second) + ctx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) qr, err := wr.TabletManagerClient().ExecuteFetchAsApp(ctx, ti, query, 1, true) cancel() if err != nil { diff --git a/go/vt/worker/diff_utils.go b/go/vt/worker/diff_utils.go index dc1b12028b..9194dc2fa5 100644 --- a/go/vt/worker/diff_utils.go +++ b/go/vt/worker/diff_utils.go @@ -42,7 +42,7 @@ func NewQueryResultReaderForTablet(ctx context.Context, ts topo.Server, tabletAl return nil, err } - conn, err := tabletconn.GetDialer()(ctx, *endPoint, tablet.Keyspace, tablet.Shard, 30*time.Second) + conn, err := tabletconn.GetDialer()(ctx, *endPoint, tablet.Keyspace, tablet.Shard, *remoteActionsTimeout) if err != nil { return nil, err } @@ -153,6 +153,7 @@ func (qrr *QueryResultReader) Error() error { return qrr.clientErrFn() } +// Close closes the connection to the tablet. func (qrr *QueryResultReader) Close() { qrr.conn.Close() } diff --git a/go/vt/worker/split_clone.go b/go/vt/worker/split_clone.go index 9ca90e4333..88e5dbe355 100644 --- a/go/vt/worker/split_clone.go +++ b/go/vt/worker/split_clone.go @@ -288,7 +288,7 @@ func (scw *SplitCloneWorker) findTargets(ctx context.Context) error { return fmt.Errorf("cannot read tablet %v: %v", alias, err) } - shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) err := scw.wr.TabletManagerClient().StopSlave(shortCtx, scw.sourceTablets[i]) cancel() if err != nil { @@ -377,7 +377,7 @@ func (scw *SplitCloneWorker) copy(ctx context.Context) error { // on all source shards. Furthermore, we estimate the number of rows // in each source shard for each table to be about the same // (rowCount is used to estimate an ETA) - shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) sourceSchemaDefinition, err := scw.wr.GetSchema(shortCtx, scw.sourceAliases[0], nil, scw.excludeTables, true) cancel() if err != nil { @@ -430,19 +430,16 @@ func (scw *SplitCloneWorker) copy(ctx context.Context) error { mu := sync.Mutex{} var firstError error + ctx, cancel = context.WithCancel(ctx) processError := func(format string, args ...interface{}) { scw.wr.Logger().Errorf(format, args...) mu.Lock() if firstError == nil { firstError = fmt.Errorf(format, args...) + cancel() } mu.Unlock() } - shouldStop := func() bool { - mu.Lock() - defer mu.Unlock() - return firstError != nil - } insertChannels := make([]chan string, len(scw.destinationShards)) destinationWaitGroup := sync.WaitGroup{} @@ -493,10 +490,6 @@ func (scw *SplitCloneWorker) copy(ctx context.Context) error { sema.Acquire() defer sema.Release() - if shouldStop() { - return - } - scw.tableStatus[tableIndex].threadStarted() // build the query, and start the streaming @@ -538,7 +531,7 @@ func (scw *SplitCloneWorker) copy(ctx context.Context) error { // get the current position from the sources for shardIndex := range scw.sourceShards { - shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) status, err := scw.wr.TabletManagerClient().SlaveStatus(shortCtx, scw.sourceTablets[shardIndex]) cancel() if err != nil { @@ -573,7 +566,7 @@ func (scw *SplitCloneWorker) copy(ctx context.Context) error { } else { for _, si := range scw.destinationShards { scw.wr.Logger().Infof("Setting SourceShard on shard %v/%v", si.Keyspace(), si.ShardName()) - shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) err := scw.wr.SetSourceShards(shortCtx, si.Keyspace(), si.ShardName(), scw.sourceAliases, nil) cancel() if err != nil { @@ -595,7 +588,7 @@ func (scw *SplitCloneWorker) copy(ctx context.Context) error { go func(ti *topo.TabletInfo) { defer destinationWaitGroup.Done() scw.wr.Logger().Infof("Reloading schema on tablet %v", ti.Alias) - shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) err := scw.wr.TabletManagerClient().ReloadSchema(shortCtx, ti) cancel() if err != nil { diff --git a/go/vt/worker/split_diff.go b/go/vt/worker/split_diff.go index cffd9b28ff..0c008946c4 100644 --- a/go/vt/worker/split_diff.go +++ b/go/vt/worker/split_diff.go @@ -8,7 +8,6 @@ import ( "fmt" "html/template" "sync" - "time" "golang.org/x/net/context" @@ -226,7 +225,7 @@ func (sdw *SplitDiffWorker) synchronizeReplication(ctx context.Context) error { // 1 - stop the master binlog replication, get its current position sdw.wr.Logger().Infof("Stopping master binlog replication on %v", sdw.shardInfo.MasterAlias) - shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) blpPositionList, err := sdw.wr.TabletManagerClient().StopBlp(shortCtx, masterInfo) cancel() if err != nil { @@ -254,8 +253,8 @@ func (sdw *SplitDiffWorker) synchronizeReplication(ctx context.Context) error { // stop replication sdw.wr.Logger().Infof("Stopping slave[%v] %v at a minimum of %v", i, sdw.sourceAliases[i], blpPos.Position) - shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) - stoppedAt, err := sdw.wr.TabletManagerClient().StopSlaveMinimum(shortCtx, sourceTablet, blpPos.Position, 30*time.Second) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) + stoppedAt, err := sdw.wr.TabletManagerClient().StopSlaveMinimum(shortCtx, sourceTablet, blpPos.Position, *remoteActionsTimeout) cancel() if err != nil { return fmt.Errorf("cannot stop slave %v at right binlog position %v: %v", sdw.sourceAliases[i], blpPos.Position, err) @@ -276,8 +275,8 @@ func (sdw *SplitDiffWorker) synchronizeReplication(ctx context.Context) error { // 3 - ask the master of the destination shard to resume filtered // replication up to the new list of positions sdw.wr.Logger().Infof("Restarting master %v until it catches up to %v", sdw.shardInfo.MasterAlias, stopPositionList) - shortCtx, cancel = context.WithTimeout(ctx, 60*time.Second) - masterPos, err := sdw.wr.TabletManagerClient().RunBlpUntil(shortCtx, masterInfo, &stopPositionList, 30*time.Second) + shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) + masterPos, err := sdw.wr.TabletManagerClient().RunBlpUntil(shortCtx, masterInfo, &stopPositionList, *remoteActionsTimeout) cancel() if err != nil { return fmt.Errorf("RunBlpUntil for %v until %v failed: %v", sdw.shardInfo.MasterAlias, stopPositionList, err) @@ -290,8 +289,8 @@ func (sdw *SplitDiffWorker) synchronizeReplication(ctx context.Context) error { if err != nil { return err } - shortCtx, cancel = context.WithTimeout(ctx, 60*time.Second) - _, err = sdw.wr.TabletManagerClient().StopSlaveMinimum(shortCtx, destinationTablet, masterPos, 30*time.Second) + shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) + _, err = sdw.wr.TabletManagerClient().StopSlaveMinimum(shortCtx, destinationTablet, masterPos, *remoteActionsTimeout) cancel() if err != nil { return fmt.Errorf("StopSlaveMinimum for %v at %v failed: %v", sdw.destinationAlias, masterPos, err) @@ -305,7 +304,7 @@ func (sdw *SplitDiffWorker) synchronizeReplication(ctx context.Context) error { // 5 - restart filtered replication on destination master sdw.wr.Logger().Infof("Restarting filtered replication on master %v", sdw.shardInfo.MasterAlias) - shortCtx, cancel = context.WithTimeout(ctx, 60*time.Second) + shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) err = sdw.wr.TabletManagerClient().StartBlp(shortCtx, masterInfo) if err := sdw.cleaner.RemoveActionByName(wrangler.StartBlpActionName, sdw.shardInfo.MasterAlias.String()); err != nil { sdw.wr.Logger().Warningf("Cannot find cleaning action %v/%v: %v", wrangler.StartBlpActionName, sdw.shardInfo.MasterAlias.String(), err) @@ -333,7 +332,7 @@ func (sdw *SplitDiffWorker) diff(ctx context.Context) error { wg.Add(1) go func() { var err error - shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) sdw.destinationSchemaDefinition, err = sdw.wr.GetSchema( shortCtx, sdw.destinationAlias, nil /* tables */, sdw.excludeTables, false /* includeViews */) cancel() @@ -345,7 +344,7 @@ func (sdw *SplitDiffWorker) diff(ctx context.Context) error { wg.Add(1) go func(i int, sourceAlias topo.TabletAlias) { var err error - shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) sdw.sourceSchemaDefinitions[i], err = sdw.wr.GetSchema( shortCtx, sourceAlias, nil /* tables */, sdw.excludeTables, false /* includeViews */) cancel() diff --git a/go/vt/worker/sqldiffer.go b/go/vt/worker/sqldiffer.go index e2fab4cb6c..ef3e50c529 100644 --- a/go/vt/worker/sqldiffer.go +++ b/go/vt/worker/sqldiffer.go @@ -174,7 +174,7 @@ func (worker *SQLDiffWorker) synchronizeReplication(ctx context.Context) error { if err != nil { return err } - shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) err = worker.wr.TabletManagerClient().StopSlave(shortCtx, subsetTablet) cancel() if err != nil { @@ -205,7 +205,7 @@ func (worker *SQLDiffWorker) synchronizeReplication(ctx context.Context) error { if err != nil { return err } - shortCtx, cancel = context.WithTimeout(ctx, 60*time.Second) + shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) err = worker.wr.TabletManagerClient().StopSlave(shortCtx, supersetTablet) cancel() if err != nil { diff --git a/go/vt/worker/status_worker.go b/go/vt/worker/status_worker.go index af36886cbd..1c98e9fb15 100644 --- a/go/vt/worker/status_worker.go +++ b/go/vt/worker/status_worker.go @@ -38,7 +38,12 @@ func (state StatusWorkerState) String() string { // and StatusAsText to make it easier on workers if they don't need to // export more. type StatusWorker struct { - Mu *sync.Mutex + // Mu is protecting the state variable, and can also be used + // by implementations to protect their own variables. + Mu *sync.Mutex + + // State contains the worker's current state, and should only + // be accessed under Mu. State StatusWorkerState } diff --git a/go/vt/worker/topo_utils.go b/go/vt/worker/topo_utils.go index b9d1cdf89d..ee9be34d39 100644 --- a/go/vt/worker/topo_utils.go +++ b/go/vt/worker/topo_utils.go @@ -75,7 +75,7 @@ func findChecker(ctx context.Context, wr *wrangler.Wrangler, cleaner *wrangler.C defer wrangler.RecordTabletTagAction(cleaner, tabletAlias, "worker", "") wr.Logger().Infof("Changing tablet %v to 'checker'", tabletAlias) - ctx, cancel := context.WithTimeout(ctx, 60*time.Second) + ctx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) err = wr.ChangeType(ctx, tabletAlias, topo.TYPE_CHECKER, false /*force*/) cancel() if err != nil { diff --git a/go/vt/worker/vertical_split_clone.go b/go/vt/worker/vertical_split_clone.go index 13ac4dda17..113778fe78 100644 --- a/go/vt/worker/vertical_split_clone.go +++ b/go/vt/worker/vertical_split_clone.go @@ -256,7 +256,7 @@ func (vscw *VerticalSplitCloneWorker) findTargets(ctx context.Context) error { } // stop replication on it - shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) err = vscw.wr.TabletManagerClient().StopSlave(shortCtx, vscw.sourceTablet) cancel() if err != nil { @@ -330,7 +330,7 @@ func (vscw *VerticalSplitCloneWorker) copy(ctx context.Context) error { vscw.setState(WorkerStateCopy) // get source schema - shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) sourceSchemaDefinition, err := vscw.wr.GetSchema(shortCtx, vscw.sourceAlias, vscw.tables, nil, true) cancel() if err != nil { @@ -369,19 +369,16 @@ func (vscw *VerticalSplitCloneWorker) copy(ctx context.Context) error { mu := sync.Mutex{} var firstError error + ctx, cancel = context.WithCancel(ctx) processError := func(format string, args ...interface{}) { vscw.wr.Logger().Errorf(format, args...) mu.Lock() if firstError == nil { firstError = fmt.Errorf(format, args...) + cancel() } mu.Unlock() } - shouldStop := func() bool { - mu.Lock() - defer mu.Unlock() - return firstError != nil - } destinationWaitGroup := sync.WaitGroup{} @@ -427,10 +424,6 @@ func (vscw *VerticalSplitCloneWorker) copy(ctx context.Context) error { sema.Acquire() defer sema.Release() - if shouldStop() { - return - } - vscw.tableStatus[tableIndex].threadStarted() // build the query, and start the streaming @@ -461,7 +454,7 @@ func (vscw *VerticalSplitCloneWorker) copy(ctx context.Context) error { // then create and populate the blp_checkpoint table if vscw.strategy.PopulateBlpCheckpoint { // get the current position from the source - shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) status, err := vscw.wr.TabletManagerClient().SlaveStatus(shortCtx, vscw.sourceTablet) cancel() if err != nil { @@ -494,7 +487,7 @@ func (vscw *VerticalSplitCloneWorker) copy(ctx context.Context) error { vscw.wr.Logger().Infof("Skipping setting SourceShard on destination shard.") } else { vscw.wr.Logger().Infof("Setting SourceShard on shard %v/%v", vscw.destinationKeyspace, vscw.destinationShard) - shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) err := vscw.wr.SetSourceShards(shortCtx, vscw.destinationKeyspace, vscw.destinationShard, []topo.TabletAlias{vscw.sourceAlias}, vscw.tables) cancel() if err != nil { @@ -514,7 +507,7 @@ func (vscw *VerticalSplitCloneWorker) copy(ctx context.Context) error { go func(ti *topo.TabletInfo) { defer destinationWaitGroup.Done() vscw.wr.Logger().Infof("Reloading schema on tablet %v", ti.Alias) - shortCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) err := vscw.wr.TabletManagerClient().ReloadSchema(shortCtx, ti) cancel() if err != nil { diff --git a/go/vt/worker/vertical_split_diff.go b/go/vt/worker/vertical_split_diff.go index 72999fc7d4..11f2371860 100644 --- a/go/vt/worker/vertical_split_diff.go +++ b/go/vt/worker/vertical_split_diff.go @@ -9,7 +9,6 @@ import ( "html/template" "regexp" "sync" - "time" "golang.org/x/net/context" @@ -232,7 +231,7 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) // 1 - stop the master binlog replication, get its current position vsdw.wr.Logger().Infof("Stopping master binlog replication on %v", vsdw.shardInfo.MasterAlias) - ctx, cancel := context.WithTimeout(ctx, 60*time.Second) + ctx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) blpPositionList, err := vsdw.wr.TabletManagerClient().StopBlp(ctx, masterInfo) cancel() if err != nil { @@ -258,8 +257,8 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) if err != nil { return err } - ctx, cancel = context.WithTimeout(ctx, 60*time.Second) - stoppedAt, err := vsdw.wr.TabletManagerClient().StopSlaveMinimum(ctx, sourceTablet, pos.Position, 30*time.Second) + ctx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) + stoppedAt, err := vsdw.wr.TabletManagerClient().StopSlaveMinimum(ctx, sourceTablet, pos.Position, *remoteActionsTimeout) cancel() if err != nil { return fmt.Errorf("cannot stop slave %v at right binlog position %v: %v", vsdw.sourceAlias, pos.Position, err) @@ -279,8 +278,8 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) // 3 - ask the master of the destination shard to resume filtered // replication up to the new list of positions vsdw.wr.Logger().Infof("Restarting master %v until it catches up to %v", vsdw.shardInfo.MasterAlias, stopPositionList) - ctx, cancel = context.WithTimeout(ctx, 60*time.Second) - masterPos, err := vsdw.wr.TabletManagerClient().RunBlpUntil(ctx, masterInfo, &stopPositionList, 30*time.Second) + ctx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) + masterPos, err := vsdw.wr.TabletManagerClient().RunBlpUntil(ctx, masterInfo, &stopPositionList, *remoteActionsTimeout) cancel() if err != nil { return fmt.Errorf("RunBlpUntil on %v until %v failed: %v", vsdw.shardInfo.MasterAlias, stopPositionList, err) @@ -293,8 +292,8 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) if err != nil { return err } - ctx, cancel = context.WithTimeout(ctx, 60*time.Second) - _, err = vsdw.wr.TabletManagerClient().StopSlaveMinimum(ctx, destinationTablet, masterPos, 30*time.Second) + ctx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) + _, err = vsdw.wr.TabletManagerClient().StopSlaveMinimum(ctx, destinationTablet, masterPos, *remoteActionsTimeout) cancel() if err != nil { return fmt.Errorf("StopSlaveMinimum on %v at %v failed: %v", vsdw.destinationAlias, masterPos, err) @@ -308,7 +307,7 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) // 5 - restart filtered replication on destination master vsdw.wr.Logger().Infof("Restarting filtered replication on master %v", vsdw.shardInfo.MasterAlias) - ctx, cancel = context.WithTimeout(ctx, 60*time.Second) + ctx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) err = vsdw.wr.TabletManagerClient().StartBlp(ctx, masterInfo) if err := vsdw.cleaner.RemoveActionByName(wrangler.StartBlpActionName, vsdw.shardInfo.MasterAlias.String()); err != nil { vsdw.wr.Logger().Warningf("Cannot find cleaning action %v/%v: %v", wrangler.StartBlpActionName, vsdw.shardInfo.MasterAlias.String(), err) @@ -335,7 +334,7 @@ func (vsdw *VerticalSplitDiffWorker) diff(ctx context.Context) error { wg.Add(1) go func() { var err error - ctx, cancel := context.WithTimeout(ctx, 60*time.Second) + ctx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) vsdw.destinationSchemaDefinition, err = vsdw.wr.GetSchema( ctx, vsdw.destinationAlias, nil /* tables */, vsdw.excludeTables, false /* includeViews */) cancel() @@ -346,7 +345,7 @@ func (vsdw *VerticalSplitDiffWorker) diff(ctx context.Context) error { wg.Add(1) go func() { var err error - ctx, cancel := context.WithTimeout(ctx, 60*time.Second) + ctx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) vsdw.sourceSchemaDefinition, err = vsdw.wr.GetSchema( ctx, vsdw.sourceAlias, nil /* tables */, vsdw.excludeTables, false /* includeViews */) cancel() diff --git a/go/vt/worker/worker.go b/go/vt/worker/worker.go index b3909e47f0..10627feed7 100644 --- a/go/vt/worker/worker.go +++ b/go/vt/worker/worker.go @@ -47,6 +47,7 @@ type Resolver interface { var ( resolveTTL = flag.Duration("resolve_ttl", 15*time.Second, "Amount of time that a topo resolution can be cached for") executeFetchRetryTime = flag.Duration("executefetch_retry_time", 30*time.Second, "Amount of time we should wait before retrying ExecuteFetch calls") + remoteActionsTimeout = flag.Duration("remote_actions_timeout", time.Minute, "Amount of time to wait for remote actions (like replication stop, ...)") statsState = stats.NewString("WorkerState") // the number of times that the worker attempst to reresolve the masters From e15f38cd393d6d4f891b073bb0f71339c08ad853 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 20 May 2015 09:00:23 -0700 Subject: [PATCH 055/128] Removing clone.py & primecache.py, adding backup.py. --- Makefile | 2 +- go/vt/mysqlctl/mysqld.go | 2 - test/clone.py | 296 --------------------------------------- test/config.json | 2 +- test/primecache.py | 177 ----------------------- 5 files changed, 2 insertions(+), 477 deletions(-) delete mode 100755 test/clone.py delete mode 100755 test/primecache.py diff --git a/Makefile b/Makefile index 1754166381..9126f40cbe 100644 --- a/Makefile +++ b/Makefile @@ -92,7 +92,7 @@ small_integration_test_files = \ sharded.py \ secure.py \ binlog.py \ - clone.py \ + backup.py \ update_stream.py medium_integration_test_files = \ diff --git a/go/vt/mysqlctl/mysqld.go b/go/vt/mysqlctl/mysqld.go index 6c8adc1495..3d590311c4 100644 --- a/go/vt/mysqlctl/mysqld.go +++ b/go/vt/mysqlctl/mysqld.go @@ -42,8 +42,6 @@ const ( ) var ( - // TODO(aaijazi): for reasons I don't understand, the dba pool size needs to be fairly large (15+) - // for test/clone.py to pass. dbaPoolSize = flag.Int("dba_pool_size", 20, "Size of the connection pool for dba connections") dbaIdleTimeout = flag.Duration("dba_idle_timeout", time.Minute, "Idle timeout for dba connections") appPoolSize = flag.Int("app_pool_size", 40, "Size of the connection pool for app connections") diff --git a/test/clone.py b/test/clone.py deleted file mode 100755 index cba945b1da..0000000000 --- a/test/clone.py +++ /dev/null @@ -1,296 +0,0 @@ -#!/usr/bin/env python - -import warnings -# Dropping a table inexplicably produces a warning despite -# the "IF EXISTS" clause. Squelch these warnings. -warnings.simplefilter("ignore") - -import gzip -import logging -import os -import shutil -from subprocess import call -import unittest - -import environment -import utils -import tablet - -use_mysqlctld = True - -tablet_62344 = tablet.Tablet(62344, use_mysqlctld=use_mysqlctld) -tablet_31981 = tablet.Tablet(31981, use_mysqlctld=use_mysqlctld) - -def setUpModule(): - try: - environment.topo_server().setup() - - # start mysql instance external to the test - global setup_procs - setup_procs = [ - tablet_62344.init_mysql(), - tablet_31981.init_mysql(), - ] - if use_mysqlctld: - tablet_62344.wait_for_mysqlctl_socket() - tablet_31981.wait_for_mysqlctl_socket() - else: - utils.wait_procs(setup_procs) - except: - tearDownModule() - raise - -def tearDownModule(): - if utils.options.skip_teardown: - return - - if use_mysqlctld: - # Try to terminate mysqlctld gracefully, so it kills its mysqld. - for proc in setup_procs: - utils.kill_sub_process(proc, soft=True) - teardown_procs = setup_procs - else: - teardown_procs = [ - tablet_62344.teardown_mysql(), - tablet_31981.teardown_mysql(), - ] - utils.wait_procs(teardown_procs, raise_on_error=False) - - environment.topo_server().teardown() - utils.kill_sub_processes() - utils.remove_tmp_files() - - tablet_62344.remove_tree() - tablet_31981.remove_tree() - - path = os.path.join(environment.vtdataroot, 'snapshot') - try: - shutil.rmtree(path) - except OSError as e: - logging.debug("removing snapshot %s: %s", path, str(e)) - -class TestClone(unittest.TestCase): - def tearDown(self): - tablet.Tablet.check_vttablet_count() - environment.topo_server().wipe() - for t in [tablet_62344, tablet_31981]: - t.reset_replication() - t.clean_dbs() - - _create_vt_insert_test = '''create table vt_insert_test ( - id bigint auto_increment, - msg varchar(64), - primary key (id) - ) Engine=InnoDB''' - - _populate_vt_insert_test = [ - "insert into vt_insert_test (msg) values ('test %s')" % x - for x in xrange(4)] - - - def _test_mysqlctl_clone(server_mode): - if server_mode: - snapshot_cmd = ['snapshotsourcestart', '-concurrency=8'] - restore_flags = ['-dont_wait_for_slave_start'] - else: - snapshot_cmd = ['snapshot', '-concurrency=5'] - restore_flags = [] - - # Start up a master mysql and vttablet - utils.run_vtctl(['CreateKeyspace', 'snapshot_test']) - - tablet_62344.init_tablet('master', 'snapshot_test', '0') - utils.run_vtctl(['RebuildShardGraph', 'snapshot_test/0']) - utils.validate_topology() - - tablet_62344.populate('vt_snapshot_test', self._create_vt_insert_test, - self._populate_vt_insert_test) - - tablet_62344.start_vttablet() - - err = tablet_62344.mysqlctl(snapshot_cmd + ['vt_snapshot_test'], - with_ports=True).wait() - if err != 0: - self.fail('mysqlctl %s failed' % str(snapshot_cmd)) - - utils.pause("%s finished" % str(snapshot_cmd)) - - call(["touch", "/tmp/vtSimulateFetchFailures"]) - err = tablet_31981.mysqlctl(['restore', - '-fetch_concurrency=2', - '-fetch_retry_count=4'] + - restore_flags + - [environment.vtdataroot + '/snapshot/vt_0000062344/snapshot_manifest.json'], - with_ports=True).wait() - if err != 0: - self.fail('mysqlctl restore failed') - - tablet_31981.assert_table_count('vt_snapshot_test', 'vt_insert_test', 4) - - if server_mode: - err = tablet_62344.mysqlctl(['snapshotsourceend', - '-read_write', - 'vt_snapshot_test'], with_ports=True).wait() - if err != 0: - self.fail('mysqlctl snapshotsourceend failed') - - # see if server restarted properly - tablet_62344.assert_table_count('vt_snapshot_test', 'vt_insert_test', 4) - - tablet_62344.kill_vttablet() - - # Subsumed by vtctl_clone* tests. - def _test_mysqlctl_clone(self): - self._test_mysqlctl_clone(False) - - # Subsumed by vtctl_clone* tests. - def _test_mysqlctl_clone_server(self): - self._test_mysqlctl_clone(True) - - def _test_vtctl_snapshot_restore(self, server_mode): - if server_mode: - snapshot_flags = ['-server-mode', '-concurrency=8'] - restore_flags = ['-dont-wait-for-slave-start'] - else: - snapshot_flags = ['-concurrency=4'] - restore_flags = [] - - # Start up a master mysql and vttablet - utils.run_vtctl(['CreateKeyspace', 'snapshot_test']) - - tablet_62344.init_tablet('master', 'snapshot_test', '0') - utils.run_vtctl(['RebuildShardGraph', 'snapshot_test/0']) - utils.validate_topology() - - tablet_62344.populate('vt_snapshot_test', self._create_vt_insert_test, - self._populate_vt_insert_test) - - tablet_31981.create_db('vt_snapshot_test') - - tablet_62344.start_vttablet() - - # Need to force snapshot since this is a master db. - out, err = utils.run_vtctl(['Snapshot', '-force'] + snapshot_flags + - [tablet_62344.tablet_alias], trap_output=True) - results = {} - for name in ['Manifest', 'ParentAlias', 'SlaveStartRequired', 'ReadOnly', 'OriginalType']: - sepPos = err.find(name + ": ") - if sepPos != -1: - results[name] = err[sepPos+len(name)+2:].splitlines()[0] - if "Manifest" not in results: - self.fail("Snapshot didn't echo Manifest file: %s" % str(err)) - if "ParentAlias" not in results: - self.fail("Snapshot didn't echo ParentAlias: %s" % str(err)) - utils.pause("snapshot finished: " + results['Manifest'] + " " + results['ParentAlias']) - if server_mode: - if "SlaveStartRequired" not in results: - self.fail("Snapshot didn't echo SlaveStartRequired: %s" % err) - if "ReadOnly" not in results: - self.fail("Snapshot didn't echo ReadOnly %s" % err) - if "OriginalType" not in results: - self.fail("Snapshot didn't echo OriginalType: %s" % err) - if (results['SlaveStartRequired'] != 'false' or - results['ReadOnly'] != 'true' or - results['OriginalType'] != 'master'): - self.fail("Bad values returned by Snapshot: %s" % err) - - # try to init + start in one go - tablet_31981.start_vttablet(wait_for_state='NOT_SERVING', - init_tablet_type='idle') - - # do not specify a MANIFEST, see if 'default' works - call(["touch", "/tmp/vtSimulateFetchFailures"]) - utils.run_vtctl(['Restore', - '-fetch-concurrency=2', - '-fetch-retry-count=4'] + - restore_flags + - [tablet_62344.tablet_alias, 'default', - tablet_31981.tablet_alias, results['ParentAlias']], - auto_log=True) - self._check_shard() - utils.pause("restore finished") - - tablet_31981.assert_table_count('vt_snapshot_test', 'vt_insert_test', 4) - - utils.validate_topology() - - # in server_mode, get the server out of it and check it - if server_mode: - utils.run_vtctl(['SnapshotSourceEnd', tablet_62344.tablet_alias, - results['OriginalType']], auto_log=True) - tablet_62344.assert_table_count('vt_snapshot_test', 'vt_insert_test', 4) - utils.validate_topology() - - tablet.kill_tablets([tablet_62344, tablet_31981]) - - # Subsumed by vtctl_clone* tests. - def _test_vtctl_snapshot_restore(self): - self._test_vtctl_snapshot_restore(server_mode=False) - - # Subsumed by vtctl_clone* tests. - def _test_vtctl_snapshot_restore_server(self): - self._test_vtctl_snapshot_restore(server_mode=True) - - def _test_vtctl_clone(self, server_mode): - if server_mode: - clone_flags = ['-server-mode'] - else: - clone_flags = [] - - # Start up a master mysql and vttablet - utils.run_vtctl(['CreateKeyspace', 'snapshot_test']) - - tablet_62344.init_tablet('master', 'snapshot_test', '0') - utils.run_vtctl(['RebuildShardGraph', 'snapshot_test/0']) - utils.validate_topology() - - tablet_62344.populate('vt_snapshot_test', self._create_vt_insert_test, - self._populate_vt_insert_test) - tablet_62344.start_vttablet() - - tablet_31981.create_db('vt_snapshot_test') - tablet_31981.init_tablet('idle', start=True) - - # small test to make sure the directory validation works - snapshot_dir = os.path.join(environment.vtdataroot, 'snapshot') - utils.run("rm -rf %s" % snapshot_dir) - utils.run("mkdir -p %s" % snapshot_dir) - utils.run("chmod -w %s" % snapshot_dir) - out, err = utils.run_vtctl(['Clone', '-force'] + clone_flags + - [tablet_62344.tablet_alias, - tablet_31981.tablet_alias], - log_level='INFO', expect_fail=True) - if "Cannot validate snapshot directory" not in err: - self.fail("expected validation error: %s" % err) - if "Un-reserved test_ny-0000031981" not in err: - self.fail("expected Un-reserved: %s" % err) - logging.debug("Failed Clone output: " + err) - utils.run("chmod +w %s" % snapshot_dir) - - call(["touch", "/tmp/vtSimulateFetchFailures"]) - utils.run_vtctl(['Clone', '-force'] + clone_flags + - [tablet_62344.tablet_alias, tablet_31981.tablet_alias], - auto_log=True) - self._check_shard() - - utils.pause("look at logs!") - tablet_31981.assert_table_count('vt_snapshot_test', 'vt_insert_test', 4) - tablet_62344.assert_table_count('vt_snapshot_test', 'vt_insert_test', 4) - - utils.validate_topology() - - tablet.kill_tablets([tablet_62344, tablet_31981]) - - # _check_shard makes sure the Cells list in the shard is up to date - def _check_shard(self): - shard = utils.run_vtctl_json(['GetShard', 'snapshot_test/0']) - self.assertEqual(shard['Cells'], ['test_nj', 'test_ny'], "Cells list is incomplete in tablet: %s" % str(shard)) - - def test_vtctl_clone(self): - self._test_vtctl_clone(server_mode=False) - - def test_vtctl_clone_server(self): - self._test_vtctl_clone(server_mode=True) - -if __name__ == '__main__': - utils.main() diff --git a/test/config.json b/test/config.json index 2aab23eda5..d832313621 100644 --- a/test/config.json +++ b/test/config.json @@ -38,7 +38,7 @@ "File": "binlog.py" }, { - "File": "clone.py" + "File": "backup.py" }, { "File": "update_stream.py" diff --git a/test/primecache.py b/test/primecache.py deleted file mode 100755 index 1b4bff3472..0000000000 --- a/test/primecache.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2014, Google Inc. All rights reserved. -# Use of this source code is governed by a BSD-style license that can -# be found in the LICENSE file. - -# This test is not automated, it depends on flushing the Linux OS buffer cache, -# which can only be done by root. Also, it is fairly big and runs for a long time. -# On a server with a SSD drive, it takes: -# - 96s to insert the data on the master -# - 30s to clone the master to a replica -# - then we change random data for 30s on the master (with replication stopped) -# - then we run primecache in the background (optional) -# - and we see how long it takes to catch up: -# - 29s without primecache -# - 19s with primecache at 4 db connections -# - <17s with primecache at 8 db connections, not much less with 16 connections. - -import logging -import random -import time -import unittest - -import environment -import utils -import tablet - -# tablets -master = tablet.Tablet() -replica = tablet.Tablet() - -def setUpModule(): - try: - environment.topo_server().setup() - - setup_procs = [ - master.init_mysql(), - replica.init_mysql(), - ] - utils.wait_procs(setup_procs) - except: - tearDownModule() - raise - -def tearDownModule(): - if utils.options.skip_teardown: - return - - teardown_procs = [ - master.teardown_mysql(), - replica.teardown_mysql(), ] - utils.wait_procs(teardown_procs, raise_on_error=False) - - environment.topo_server().teardown() - utils.kill_sub_processes() - utils.remove_tmp_files() - - master.remove_tree() - replica.remove_tree() - -class TestPrimeCache(unittest.TestCase): - - ROW_COUNT = 100000 - CHANGE_DURATION = 30 - - def _create_data(self): - create_table_template = '''create table lots_of_data( -id bigint auto_increment, -ts datetime, -msg varchar(4096), -primary key (id) -) Engine=InnoDB''' - utils.run_vtctl(['ApplySchema', - '-sql=' + create_table_template, - 'test_keyspace'], - auto_log=True) - - start = time.time() - for i in xrange(100): - for j in xrange(self.ROW_COUNT / 100): - master.mquery('vt_test_keyspace', 'insert into lots_of_data(msg, ts) values(repeat("a", 4096), now())', write=True) - logging.info("Inserted %u%% of the data", i) - logging.info("It took %g seconds to insert data" % (time.time() - start)) - - # _change_random_data will change random data in the data set - # on the master, for up to CHANGE_DURATION seconds - def _change_random_data(self): - logging.info("Starting to change data for %us on the master", - self.CHANGE_DURATION) - start = time.time() - random.seed() - - count = 0 - while True: - queries = [] - count += 100 - for i in xrange(100): - index = random.randrange(self.ROW_COUNT) - queries.append('update lots_of_data set ts=now() where id=%u' % index) - master.mquery('vt_test_keyspace', queries, write=True) - - if time.time() - start > self.CHANGE_DURATION: - break - logging.info("Changed %u rows", count) - - def catch_up(self): - start = time.time() - time.sleep(5) # no need to start too early - while True: - s = replica.mquery('', 'show slave status') - sbm = s[0][32] - if sbm is not None and sbm == 0: - logging.info("It took %g seconds to catch up" % (time.time() - start)) - return - time.sleep(0.1) - - def test_primecache(self): - utils.run_vtctl(['CreateKeyspace', 'test_keyspace']) - - master.init_tablet( 'master', 'test_keyspace', '0') - replica.init_tablet('idle') - - utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True) - - master.create_db('vt_test_keyspace') - master.start_vttablet(wait_for_state=None) - replica.start_vttablet(wait_for_state=None) - - master.wait_for_vttablet_state('SERVING') - replica.wait_for_vttablet_state('NOT_SERVING') # DB doesn't exist - - self._create_data() - - # we use clone to not prime the mysql cache on the slave db - utils.run_vtctl(['Clone', '-force', '-server-mode', - master.tablet_alias, replica.tablet_alias], - auto_log=True) - - # sync the buffer cache, and clear it. This will prompt for user's password - utils.run(['sync']) - utils.run(['sudo', 'bash', '-c', 'echo 1 > /proc/sys/vm/drop_caches']) - - # we can now change data on the master for 30s, while slave is stopped. - # master's binlog will be in OS buffer cache now. - replica.mquery('', 'slave stop') - self._change_random_data() - - use_primecache = True # easy to test without - if use_primecache: - # starting vtprimecache, sleeping for a couple seconds - args = environment.binary_args('vtprimecache') + [ - '-db-config-dba-uname', 'vt_dba', - '-db-config-dba-charset', 'utf8', - '-db-config-app-uname', 'vt_app', - '-db-config-app-charset', 'utf8', - '-db-config-app-dbname', 'vt_test_keyspace', - '-relay_logs_path', replica.tablet_dir+'/relay-logs', - '-mysql_socket_file', replica.tablet_dir+'/mysql.sock', - '-log_dir', environment.vtlogroot, - '-worker_count', '4', - '-alsologtostderr', - ] - vtprimecache = utils.run_bg(args) - time.sleep(2) - - # start slave, see how longs it takes to catch up on replication - replica.mquery('', 'slave start') - self.catch_up() - - if use_primecache: - # TODO(alainjobart): read and check stats - utils.kill_sub_process(vtprimecache) - - tablet.kill_tablets([master, replica]) - -if __name__ == '__main__': - utils.main() From 9b9f96abd8da8c448d7ec3f25e2d6ccd0965d8d5 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 20 May 2015 09:26:52 -0700 Subject: [PATCH 056/128] Deleting old Snapshot/Restore/Clone code. --- go/cmd/mysqlctl/mysqlctl.go | 77 --- go/cmd/vttablet/vttablet.go | 1 - go/vt/mysqlctl/backup.go | 5 + go/vt/mysqlctl/clone.go | 474 ------------------ go/vt/mysqlctl/fileutil.go | 461 ----------------- go/vt/tabletmanager/actionnode/actionnode.go | 12 - go/vt/tabletmanager/actionnode/structs.go | 40 -- go/vt/tabletmanager/agent_rpc_actions.go | 282 ----------- .../agentrpctest/test_agent_rpc.go | 156 ------ .../tabletmanager/faketmclient/fake_client.go | 26 - go/vt/tabletmanager/gorpcproto/structs.go | 11 - .../gorpctmclient/gorpc_client.go | 111 ---- .../gorpctmserver/gorpc_server.go | 78 --- go/vt/tabletmanager/http.go | 157 ------ .../tabletmanager/tmclient/rpc_client_api.go | 15 - go/vt/vtctl/vtctl.go | 119 ----- go/vt/wrangler/clone.go | 235 --------- go/vt/wrangler/wrangler.go | 5 +- 18 files changed, 6 insertions(+), 2259 deletions(-) delete mode 100644 go/vt/mysqlctl/clone.go delete mode 100644 go/vt/tabletmanager/http.go delete mode 100644 go/vt/wrangler/clone.go diff --git a/go/cmd/mysqlctl/mysqlctl.go b/go/cmd/mysqlctl/mysqlctl.go index a51640d93d..241ca6bd33 100644 --- a/go/cmd/mysqlctl/mysqlctl.go +++ b/go/cmd/mysqlctl/mysqlctl.go @@ -42,26 +42,6 @@ func initCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) err return nil } -func restoreCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) error { - dontWaitForSlaveStart := subFlags.Bool("dont_wait_for_slave_start", false, "won't wait for replication to start (useful when restoring from master server)") - fetchConcurrency := subFlags.Int("fetch_concurrency", 3, "how many files to fetch simultaneously") - fetchRetryCount := subFlags.Int("fetch_retry_count", 3, "how many times to retry a failed transfer") - subFlags.Parse(args) - if subFlags.NArg() != 1 { - return fmt.Errorf("Command restore requires ") - } - - rs, err := mysqlctl.ReadSnapshotManifest(subFlags.Arg(0)) - if err != nil { - return fmt.Errorf("restore failed: ReadSnapshotManifest: %v", err) - } - err = mysqld.RestoreFromSnapshot(logutil.NewConsoleLogger(), rs, *fetchConcurrency, *fetchRetryCount, *dontWaitForSlaveStart, nil) - if err != nil { - return fmt.Errorf("restore failed: RestoreFromSnapshot: %v", err) - } - return nil -} - func shutdownCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) error { waitTime := subFlags.Duration("wait_time", mysqlctl.MysqlWaitTime, "how long to wait for shutdown") subFlags.Parse(args) @@ -72,50 +52,6 @@ func shutdownCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) return nil } -func snapshotCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) error { - concurrency := subFlags.Int("concurrency", 4, "how many compression jobs to run simultaneously") - subFlags.Parse(args) - if subFlags.NArg() != 1 { - return fmt.Errorf("Command snapshot requires ") - } - - filename, _, _, err := mysqld.CreateSnapshot(logutil.NewConsoleLogger(), subFlags.Arg(0), tabletAddr, false, *concurrency, false, nil) - if err != nil { - return fmt.Errorf("snapshot failed: %v", err) - } - log.Infof("manifest location: %v", filename) - return nil -} - -func snapshotSourceStartCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) error { - concurrency := subFlags.Int("concurrency", 4, "how many checksum jobs to run simultaneously") - subFlags.Parse(args) - if subFlags.NArg() != 1 { - return fmt.Errorf("Command snapshotsourcestart requires ") - } - - filename, slaveStartRequired, readOnly, err := mysqld.CreateSnapshot(logutil.NewConsoleLogger(), subFlags.Arg(0), tabletAddr, false, *concurrency, true, nil) - if err != nil { - return fmt.Errorf("snapshot failed: %v", err) - } - log.Infof("manifest location: %v", filename) - log.Infof("slave start required: %v", slaveStartRequired) - log.Infof("read only: %v", readOnly) - return nil -} - -func snapshotSourceEndCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) error { - slaveStartRequired := subFlags.Bool("slave_start", false, "will restart replication") - readWrite := subFlags.Bool("read_write", false, "will make the server read-write") - subFlags.Parse(args) - - err := mysqld.SnapshotSourceEnd(*slaveStartRequired, !(*readWrite), true, map[string]string{}) - if err != nil { - return fmt.Errorf("snapshotsourceend failed: %v", err) - } - return nil -} - func startCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) error { waitTime := subFlags.Duration("wait_time", mysqlctl.MysqlWaitTime, "how long to wait for startup") subFlags.Parse(args) @@ -188,19 +124,6 @@ var commands = []command{ command{"shutdown", shutdownCmd, "[-wait_time=20s]", "Shuts down mysqld, does not remove any file"}, - command{"snapshot", snapshotCmd, - "[-concurrency=4] ", - "Takes a full snapshot, copying the innodb data files"}, - command{"snapshotsourcestart", snapshotSourceStartCmd, - "[-concurrency=4] ", - "Enters snapshot server mode (mysqld stopped, serving innodb data files)"}, - command{"snapshotsourceend", snapshotSourceEndCmd, - "[-slave_start] [-read_write]", - "Gets out of snapshot server mode"}, - command{"restore", restoreCmd, - "[-fetch_concurrency=3] [-fetch_retry_count=3] [-dont_wait_for_slave_start] ", - "Restores a full snapshot"}, - command{"position", positionCmd, " ", "Compute operations on replication positions"}, diff --git a/go/cmd/vttablet/vttablet.go b/go/cmd/vttablet/vttablet.go index 75623e1201..a7e31ef0d7 100644 --- a/go/cmd/vttablet/vttablet.go +++ b/go/cmd/vttablet/vttablet.go @@ -108,7 +108,6 @@ func main() { exit.Return(1) } - tabletmanager.HttpHandleSnapshots(mycnf, tabletAlias.Uid) servenv.OnRun(func() { addStatusParts(qsc) }) diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index 2404f5bba9..8227e7dba9 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -39,6 +39,11 @@ const ( backupManifest = "MANIFEST" ) +const ( + // slaveStartDeadline is the deadline for starting a slave + slaveStartDeadline = 30 +) + var ( // ErrNoBackup is returned when there is no backup ErrNoBackup = errors.New("no available backup") diff --git a/go/vt/mysqlctl/clone.go b/go/vt/mysqlctl/clone.go deleted file mode 100644 index 8231a9b21c..0000000000 --- a/go/vt/mysqlctl/clone.go +++ /dev/null @@ -1,474 +0,0 @@ -// Copyright 2012, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mysqlctl - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "strings" - - log "github.com/golang/glog" - "github.com/youtube/vitess/go/ioutil2" - "github.com/youtube/vitess/go/vt/hook" - "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/mysqlctl/proto" -) - -// These methods deal with cloning a running instance of mysql. - -const ( - maxLagSeconds = 5 -) - -const ( - // slaveStartDeadline is the deadline for starting a slave - slaveStartDeadline = 30 -) - -const ( - // SnapshotManifestFile is the file name for the snapshot manifest. - SnapshotManifestFile = "snapshot_manifest.json" - - // SnapshotURLPath is the URL where to find the snapshot manifest. - SnapshotURLPath = "/snapshot" -) - -// Validate that this instance is a reasonable source of data. -func (mysqld *Mysqld) validateCloneSource(serverMode bool, hookExtraEnv map[string]string) error { - // NOTE(msolomon) Removing this check for now - I don't see the value of validating this. - // // needs to be master, or slave that's not too far behind - // slaveStatus, err := mysqld.slaveStatus() - // if err != nil { - // if err != ErrNotSlave { - // return fmt.Errorf("mysqlctl: validateCloneSource failed, %v", err) - // } - // } else { - // lagSeconds, _ := strconv.Atoi(slaveStatus["seconds_behind_master"]) - // if lagSeconds > maxLagSeconds { - // return fmt.Errorf("mysqlctl: validateCloneSource failed, lag_seconds exceed maximum tolerance (%v)", lagSeconds) - // } - // } - - // make sure we can write locally - if err := mysqld.ValidateSnapshotPath(); err != nil { - return err - } - - // run a hook to check local things - // FIXME(alainjobart) What other parameters do we have to - // provide? dbname, host, socket? - params := make([]string, 0, 1) - if serverMode { - params = append(params, "--server-mode") - } - h := hook.NewHook("preflight_snapshot", params) - h.ExtraEnv = hookExtraEnv - if err := h.ExecuteOptional(); err != nil { - return err - } - - // FIXME(msolomon) check free space based on an estimate of the current - // size of the db files. - // Also, check that we aren't already cloning/compressing or acting as a - // source. Mysqld being down isn't enough, presumably that will be - // restarted as soon as the snapshot is taken. - return nil -} - -// ValidateCloneTarget makes sure this mysql daemon is a valid target -// for a clone. -func (mysqld *Mysqld) ValidateCloneTarget(hookExtraEnv map[string]string) error { - // run a hook to check local things - h := hook.NewSimpleHook("preflight_restore") - h.ExtraEnv = hookExtraEnv - if err := h.ExecuteOptional(); err != nil { - return err - } - - qr, err := mysqld.FetchSuperQuery("SHOW DATABASES") - if err != nil { - return fmt.Errorf("mysqlctl: ValidateCloneTarget failed, %v", err) - } - - for _, row := range qr.Rows { - if strings.HasPrefix(row[0].String(), "vt_") { - dbName := row[0].String() - tableQr, err := mysqld.FetchSuperQuery("SHOW TABLES FROM " + dbName) - if err != nil { - return fmt.Errorf("mysqlctl: ValidateCloneTarget failed, %v", err) - } else if len(tableQr.Rows) == 0 { - // no tables == empty db, all is well - continue - } - return fmt.Errorf("mysqlctl: ValidateCloneTarget failed, found active db %v", dbName) - } - } - - return nil -} - -func findFilesToServe(srcDir, dstDir string, compress bool) ([]string, []string, error) { - fiList, err := ioutil.ReadDir(srcDir) - if err != nil { - return nil, nil, err - } - sources := make([]string, 0, len(fiList)) - destinations := make([]string, 0, len(fiList)) - for _, fi := range fiList { - if !fi.IsDir() { - srcPath := path.Join(srcDir, fi.Name()) - var dstPath string - if compress { - dstPath = path.Join(dstDir, fi.Name()+".gz") - } else { - dstPath = path.Join(dstDir, fi.Name()) - } - sources = append(sources, srcPath) - destinations = append(destinations, dstPath) - } - } - return sources, destinations, nil -} - -func (mysqld *Mysqld) createSnapshot(logger logutil.Logger, concurrency int, serverMode bool) ([]SnapshotFile, error) { - sources := make([]string, 0, 128) - destinations := make([]string, 0, 128) - - // clean out and start fresh - logger.Infof("removing previous snapshots: %v", mysqld.SnapshotDir) - if err := os.RemoveAll(mysqld.SnapshotDir); err != nil { - return nil, err - } - - // FIXME(msolomon) innodb paths must match patterns in mycnf - - // probably belongs as a derived path. - type snapPair struct{ srcDir, dstDir string } - dps := []snapPair{ - {mysqld.config.InnodbDataHomeDir, path.Join(mysqld.SnapshotDir, innodbDataSubdir)}, - {mysqld.config.InnodbLogGroupHomeDir, path.Join(mysqld.SnapshotDir, innodbLogSubdir)}, - } - - dataDirEntries, err := ioutil.ReadDir(mysqld.config.DataDir) - if err != nil { - return nil, err - } - - for _, de := range dataDirEntries { - dbDirPath := path.Join(mysqld.config.DataDir, de.Name()) - // If this is not a directory, try to eval it as a syslink. - if !de.IsDir() { - dbDirPath, err = filepath.EvalSymlinks(dbDirPath) - if err != nil { - return nil, err - } - de, err = os.Stat(dbDirPath) - if err != nil { - return nil, err - } - } - if de.IsDir() { - // Copy anything that defines a db.opt file - that includes empty databases. - _, err := os.Stat(path.Join(dbDirPath, "db.opt")) - if err == nil { - dps = append(dps, snapPair{dbDirPath, path.Join(mysqld.SnapshotDir, dataDir, de.Name())}) - } else { - // Look for at least one .frm file - dbDirEntries, err := ioutil.ReadDir(dbDirPath) - if err == nil { - for _, dbEntry := range dbDirEntries { - if strings.HasSuffix(dbEntry.Name(), ".frm") { - dps = append(dps, snapPair{dbDirPath, path.Join(mysqld.SnapshotDir, dataDir, de.Name())}) - break - } - } - } else { - return nil, err - } - } - } - } - - for _, dp := range dps { - if err := os.MkdirAll(dp.dstDir, 0775); err != nil { - return nil, err - } - if s, d, err := findFilesToServe(dp.srcDir, dp.dstDir, !serverMode); err != nil { - return nil, err - } else { - sources = append(sources, s...) - destinations = append(destinations, d...) - } - } - - return newSnapshotFiles(sources, destinations, mysqld.SnapshotDir, concurrency, !serverMode) -} - -// CreateSnapshot runs on the machine acting as the source for the clone. -// -// Check master/slave status and determine restore needs. -// If this instance is a slave, stop replication, otherwise place in read-only mode. -// Record replication position. -// Shutdown mysql -// Check paths for storing data -// -// Depending on the serverMode flag, we do the following: -// serverMode = false: -// Compress /vt/vt_[0-9a-f]+/data/vt_.+ -// Compute hash (of compressed files, as we serve .gz files here) -// Place in /vt/clone_src where they will be served by http server (not rpc) -// Restart mysql -// serverMode = true: -// Make symlinks for /vt/vt_[0-9a-f]+/data/vt_.+ to innodb files -// Compute hash (of uncompressed files, as we serve uncompressed files) -// Place symlinks in /vt/clone_src where they will be served by http server -// Leave mysql stopped, return slaveStartRequired, readOnly -func (mysqld *Mysqld) CreateSnapshot(logger logutil.Logger, dbName, sourceAddr string, allowHierarchicalReplication bool, concurrency int, serverMode bool, hookExtraEnv map[string]string) (snapshotManifestURLPath string, slaveStartRequired, readOnly bool, err error) { - if dbName == "" { - return "", false, false, errors.New("CreateSnapshot failed: no database name provided") - } - - if err = mysqld.validateCloneSource(serverMode, hookExtraEnv); err != nil { - return - } - - // save initial state so we can restore on Start() - slaveStartRequired = false - sourceIsMaster := false - readOnly = true - - slaveStatus, err := mysqld.SlaveStatus() - if err == nil { - slaveStartRequired = slaveStatus.SlaveRunning() - } else if err == ErrNotSlave { - sourceIsMaster = true - } else { - // If we can't get any data, just fail. - return - } - - readOnly, err = mysqld.IsReadOnly() - if err != nil { - return - } - - // Stop sources of writes so we can get a consistent replication position. - // If the source is a slave use the master replication position - // unless we are allowing hierarchical replicas. - masterAddr := "" - var replicationPosition proto.ReplicationPosition - if sourceIsMaster { - if err = mysqld.SetReadOnly(true); err != nil { - return - } - replicationPosition, err = mysqld.MasterPosition() - if err != nil { - return - } - masterAddr = mysqld.IPAddr() - } else { - if err = StopSlave(mysqld, hookExtraEnv); err != nil { - return - } - var slaveStatus proto.ReplicationStatus - slaveStatus, err = mysqld.SlaveStatus() - if err != nil { - return - } - replicationPosition = slaveStatus.Position - - // We are a slave, check our replication strategy before - // choosing the master address. - if allowHierarchicalReplication { - masterAddr = mysqld.IPAddr() - } else { - masterAddr, err = mysqld.GetMasterAddr() - if err != nil { - return - } - } - } - - if err = mysqld.Shutdown(true, MysqlWaitTime); err != nil { - return - } - - var smFile string - dataFiles, snapshotErr := mysqld.createSnapshot(logger, concurrency, serverMode) - if snapshotErr != nil { - logger.Errorf("CreateSnapshot failed: %v", snapshotErr) - } else { - var sm *SnapshotManifest - sm, snapshotErr = newSnapshotManifest(sourceAddr, mysqld.IPAddr(), - masterAddr, dbName, dataFiles, replicationPosition, proto.ReplicationPosition{}) - if snapshotErr != nil { - logger.Errorf("CreateSnapshot failed: %v", snapshotErr) - } else { - smFile = path.Join(mysqld.SnapshotDir, SnapshotManifestFile) - if snapshotErr = writeJSON(smFile, sm); snapshotErr != nil { - logger.Errorf("CreateSnapshot failed: %v", snapshotErr) - } - } - } - - // restore our state if required - if serverMode && snapshotErr == nil { - logger.Infof("server mode snapshot worked, not restarting mysql") - } else { - if err = mysqld.SnapshotSourceEnd(slaveStartRequired, readOnly, false /*deleteSnapshot*/, hookExtraEnv); err != nil { - return - } - } - - if snapshotErr != nil { - return "", slaveStartRequired, readOnly, snapshotErr - } - relative, err := filepath.Rel(mysqld.SnapshotDir, smFile) - if err != nil { - return "", slaveStartRequired, readOnly, nil - } - return path.Join(SnapshotURLPath, relative), slaveStartRequired, readOnly, nil -} - -// SnapshotSourceEnd removes the current snapshot, and restarts mysqld. -func (mysqld *Mysqld) SnapshotSourceEnd(slaveStartRequired, readOnly, deleteSnapshot bool, hookExtraEnv map[string]string) error { - if deleteSnapshot { - // clean out our files - log.Infof("removing snapshot links: %v", mysqld.SnapshotDir) - if err := os.RemoveAll(mysqld.SnapshotDir); err != nil { - log.Warningf("failed to remove old snapshot: %v", err) - return err - } - } - - // Try to restart mysqld - if err := mysqld.Start(MysqlWaitTime); err != nil { - return err - } - - // Restore original mysqld state that we saved above. - if slaveStartRequired { - if err := StartSlave(mysqld, hookExtraEnv); err != nil { - return err - } - - // this should be quick, but we might as well just wait - if err := WaitForSlaveStart(mysqld, slaveStartDeadline); err != nil { - return err - } - } - - // And set read-only mode - if err := mysqld.SetReadOnly(readOnly); err != nil { - return err - } - - return nil -} - -func writeJSON(filename string, x interface{}) error { - data, err := json.MarshalIndent(x, " ", " ") - if err != nil { - return err - } - return ioutil2.WriteFileAtomic(filename, data, 0660) -} - -// ReadSnapshotManifest reads and unpacks a SnapshotManifest -func ReadSnapshotManifest(filename string) (*SnapshotManifest, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - sm := new(SnapshotManifest) - if err = json.Unmarshal(data, sm); err != nil { - return nil, fmt.Errorf("ReadSnapshotManifest failed: %v %v", filename, err) - } - return sm, nil -} - -// RestoreFromSnapshot runs on the presumably empty machine acting as -// the target in the create replica action. -// -// validate target (self) -// shutdown_mysql() -// create temp data directory /vt/target/vt_ -// copy compressed data files via HTTP -// verify hash of compressed files -// uncompress into /vt/vt_/data/vt_ -// start_mysql() -// clean up compressed files -func (mysqld *Mysqld) RestoreFromSnapshot(logger logutil.Logger, snapshotManifest *SnapshotManifest, fetchConcurrency, fetchRetryCount int, dontWaitForSlaveStart bool, hookExtraEnv map[string]string) error { - if snapshotManifest == nil { - return errors.New("RestoreFromSnapshot: nil snapshotManifest") - } - - logger.Infof("ValidateCloneTarget") - if err := mysqld.ValidateCloneTarget(hookExtraEnv); err != nil { - return err - } - - logger.Infof("Shutdown mysqld") - if err := mysqld.Shutdown(true, MysqlWaitTime); err != nil { - return err - } - - logger.Infof("Fetch snapshot") - if err := mysqld.fetchSnapshot(snapshotManifest, fetchConcurrency, fetchRetryCount); err != nil { - return err - } - - logger.Infof("Restart mysqld") - if err := mysqld.Start(MysqlWaitTime); err != nil { - return err - } - - cmdList, err := mysqld.StartReplicationCommands(snapshotManifest.ReplicationStatus) - if err != nil { - return err - } - if err := mysqld.ExecuteSuperQueryList(cmdList); err != nil { - return err - } - - if !dontWaitForSlaveStart { - if err := WaitForSlaveStart(mysqld, slaveStartDeadline); err != nil { - return err - } - } - - h := hook.NewSimpleHook("postflight_restore") - h.ExtraEnv = hookExtraEnv - if err := h.ExecuteOptional(); err != nil { - return err - } - - return nil -} - -func (mysqld *Mysqld) fetchSnapshot(snapshotManifest *SnapshotManifest, fetchConcurrency, fetchRetryCount int) error { - replicaDbPath := path.Join(mysqld.config.DataDir, snapshotManifest.DbName) - - cleanDirs := []string{mysqld.SnapshotDir, replicaDbPath, - mysqld.config.InnodbDataHomeDir, mysqld.config.InnodbLogGroupHomeDir} - - // clean out and start fresh - // FIXME(msolomon) this might be changed to allow partial recovery, but at that point - // we are starting to reimplement rsync. - for _, dir := range cleanDirs { - if err := os.RemoveAll(dir); err != nil { - return err - } - if err := os.MkdirAll(dir, 0775); err != nil { - return err - } - } - - return fetchFiles(snapshotManifest, mysqld.TabletDir, fetchConcurrency, fetchRetryCount) -} diff --git a/go/vt/mysqlctl/fileutil.go b/go/vt/mysqlctl/fileutil.go index 3276171d89..b4d3ae0ccf 100644 --- a/go/vt/mysqlctl/fileutil.go +++ b/go/vt/mysqlctl/fileutil.go @@ -5,25 +5,13 @@ package mysqlctl import ( - "bufio" // "crypto/md5" "encoding/hex" - "fmt" "hash" // "hash/crc64" - "io" - "io/ioutil" - "net/http" "os" - "path" - "path/filepath" - "sort" - "strings" - "sync" - log "github.com/golang/glog" "github.com/youtube/vitess/go/cgzip" - "github.com/youtube/vitess/go/vt/mysqlctl/proto" ) // Use this to simulate failures in tests @@ -75,452 +63,3 @@ func newHasher() *hasher { func (h *hasher) HashString() string { return hex.EncodeToString(h.Sum(nil)) } - -// SnapshotFile describes a file to serve. -// 'Path' is the path component of the URL. SnapshotManifest.Addr is -// the host+port component of the URL. -// If path ends in '.gz', it is compressed. -// Size and Hash are computed on the Path itself -// if TableName is set, this file belongs to that table -type SnapshotFile struct { - Path string - Size int64 - Hash string - TableName string -} - -type SnapshotFiles []SnapshotFile - -// sort.Interface -// we sort by descending file size -func (s SnapshotFiles) Len() int { return len(s) } -func (s SnapshotFiles) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s SnapshotFiles) Less(i, j int) bool { return s[i].Size > s[j].Size } - -// This function returns the local file used to store the SnapshotFile, -// relative to the basePath. -// for instance, if the source path is something like: -// /vt/snapshot/vt_0000062344/data/vt_snapshot_test-MA,Mw/vt_insert_test.csv.gz -// we will get everything starting with 'data/...', append it to basepath, -// and remove the .gz extension. So with basePath=myPath, it will return: -// myPath/data/vt_snapshot_test-MA,Mw/vt_insert_test.csv -func (dataFile *SnapshotFile) getLocalFilename(basePath string) string { - filename := path.Join(basePath, dataFile.Path) - // trim compression extension if present - if strings.HasSuffix(filename, ".gz") { - filename = filename[:len(filename)-3] - } - return filename -} - -// newSnapshotFile behavior depends on the compress flag: -// - if compress is true , it compresses a single file with gzip, and -// computes the hash on the compressed version. -// - if compress is false, just symlinks and computes the hash on the file -// The source file is always left intact. -// The path of the returned SnapshotFile will be relative -// to root. -func newSnapshotFile(srcPath, dstPath, root string, compress bool) (*SnapshotFile, error) { - // open the source file - srcFile, err := os.OpenFile(srcPath, os.O_RDONLY, 0) - if err != nil { - return nil, err - } - defer srcFile.Close() - src := bufio.NewReaderSize(srcFile, 2*1024*1024) - - var hash string - var size int64 - if compress { - log.Infof("newSnapshotFile: starting to compress %v into %v", srcPath, dstPath) - - // open the temporary destination file - dir, filePrefix := path.Split(dstPath) - dstFile, err := ioutil.TempFile(dir, filePrefix) - if err != nil { - return nil, err - } - defer func() { - // try to close and delete the file. in the - // success case, the file will already be - // closed and renamed, so all of this would - // fail anyway, no biggie - dstFile.Close() - os.Remove(dstFile.Name()) - }() - dst := bufio.NewWriterSize(dstFile, 2*1024*1024) - - // create the hasher and the tee on top - hasher := newHasher() - tee := io.MultiWriter(dst, hasher) - - // create the gzip compression filter - gzip, err := cgzip.NewWriterLevel(tee, cgzip.Z_BEST_SPEED) - if err != nil { - return nil, err - } - - // copy from the file to gzip to tee to output file and hasher - _, err = io.Copy(gzip, src) - if err != nil { - return nil, err - } - - // close gzip to flush it - if err = gzip.Close(); err != nil { - return nil, err - } - - // close dst manually to flush all buffers to disk - dst.Flush() - dstFile.Close() - hash = hasher.HashString() - - // atomically move completed compressed file - err = os.Rename(dstFile.Name(), dstPath) - if err != nil { - return nil, err - } - - // and get its size - fi, err := os.Stat(dstPath) - if err != nil { - return nil, err - } - size = fi.Size() - } else { - log.Infof("newSnapshotFile: starting to hash and symlinking %v to %v", srcPath, dstPath) - - // get the hash - hasher := newHasher() - _, err = io.Copy(hasher, src) - if err != nil { - return nil, err - } - hash = hasher.HashString() - - // do the symlink - err = os.Symlink(srcPath, dstPath) - if err != nil { - return nil, err - } - - // and get the size - fi, err := os.Stat(srcPath) - if err != nil { - return nil, err - } - size = fi.Size() - } - - log.Infof("clone data ready %v:%v", dstPath, hash) - relativeDst, err := filepath.Rel(root, dstPath) - if err != nil { - return nil, err - } - return &SnapshotFile{relativeDst, size, hash, ""}, nil -} - -// newSnapshotFiles processes multiple files in parallel. The Paths of -// the returned SnapshotFiles will be relative to root. -// - if compress is true, we compress the files and compute the hash on -// the compressed version. -// - if compress is false, we symlink the files, and compute the hash on -// the original version. -func newSnapshotFiles(sources, destinations []string, root string, concurrency int, compress bool) ([]SnapshotFile, error) { - if len(sources) != len(destinations) || len(sources) == 0 { - return nil, fmt.Errorf("programming error: bad array lengths: %v %v", len(sources), len(destinations)) - } - - workQueue := make(chan int, len(sources)) - for i := 0; i < len(sources); i++ { - workQueue <- i - } - close(workQueue) - - snapshotFiles := make([]SnapshotFile, len(sources)) - resultQueue := make(chan error, len(sources)) - for i := 0; i < concurrency; i++ { - go func() { - for i := range workQueue { - sf, err := newSnapshotFile(sources[i], destinations[i], root, compress) - if err == nil { - snapshotFiles[i] = *sf - } - resultQueue <- err - } - }() - } - - var err error - for i := 0; i < len(sources); i++ { - if compressErr := <-resultQueue; compressErr != nil { - err = compressErr - } - } - - // clean up files if we had an error - // FIXME(alainjobart) it seems extreme to delete all files if - // the last one failed. Since we only move the file into - // its destination when it worked, we could assume if the file - // already exists it's good, and re-compute its hash. - if err != nil { - log.Infof("Error happened, deleting all the files we already compressed") - for _, dest := range destinations { - os.Remove(dest) - } - return nil, err - } - - return snapshotFiles, nil -} - -// a SnapshotManifest describes multiple SnapshotFiles and where -// to get them from. -type SnapshotManifest struct { - Addr string // this is the address of the tabletserver, not mysql - - DbName string - Files SnapshotFiles - - ReplicationStatus *proto.ReplicationStatus - MasterPosition proto.ReplicationPosition -} - -func newSnapshotManifest(addr, mysqlAddr, masterAddr, dbName string, files []SnapshotFile, pos, masterPos proto.ReplicationPosition) (*SnapshotManifest, error) { - nrs, err := proto.NewReplicationStatus(masterAddr) - if err != nil { - return nil, err - } - rs := &SnapshotManifest{ - Addr: addr, - DbName: dbName, - Files: files, - ReplicationStatus: nrs, - MasterPosition: masterPos, - } - sort.Sort(rs.Files) - rs.ReplicationStatus.Position = pos - return rs, nil -} - -// fetchFile fetches data from the web server. It then sends it to a -// tee, which on one side has an hash checksum reader, and on the other -// a gunzip reader writing to a file. It will compare the hash -// checksum after the copy is done. -func fetchFile(srcUrl, srcHash, dstFilename string) error { - log.Infof("fetchFile: starting to fetch %v from %v", dstFilename, srcUrl) - - // open the URL - req, err := http.NewRequest("GET", srcUrl, nil) - if err != nil { - return fmt.Errorf("NewRequest failed for %v: %v", srcUrl, err) - } - // we set the 'gzip' encoding ourselves so the library doesn't - // do it for us and ends up using go gzip (we want to use our own - // cgzip which is much faster) - req.Header.Set("Accept-Encoding", "gzip") - resp, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - if resp.StatusCode != 200 { - return fmt.Errorf("failed fetching %v: %v", srcUrl, resp.Status) - } - defer resp.Body.Close() - - // see if we need some uncompression - var reader io.Reader = resp.Body - ce := resp.Header.Get("Content-Encoding") - if ce != "" { - if ce == "gzip" { - gz, err := cgzip.NewReader(reader) - if err != nil { - return err - } - defer gz.Close() - reader = gz - } else { - return fmt.Errorf("unsupported Content-Encoding: %v", ce) - } - } - - return uncompressAndCheck(reader, srcHash, dstFilename, strings.HasSuffix(srcUrl, ".gz")) -} - -// uncompressAndCheck uses the provided reader to read data, and then -// sends it to a tee, which on one side has an hash checksum reader, -// and on the other a gunzip reader writing to a file. It will -// compare the hash checksum after the copy is done. -func uncompressAndCheck(reader io.Reader, srcHash, dstFilename string, needsUncompress bool) error { - // create destination directory - dir, filePrefix := path.Split(dstFilename) - if dirErr := os.MkdirAll(dir, 0775); dirErr != nil { - return dirErr - } - - // create a temporary file to uncompress to - dstFile, err := ioutil.TempFile(dir, filePrefix) - if err != nil { - return err - } - defer func() { - // try to close and delete the file. - // in the success case, the file will already be closed - // and renamed, so all of this would fail anyway, no biggie - dstFile.Close() - os.Remove(dstFile.Name()) - }() - - // create a buffering output - dst := bufio.NewWriterSize(dstFile, 2*1024*1024) - - // create hash to write the compressed data to - hasher := newHasher() - - // create a Tee: we split the HTTP input into the hasher - // and into the gunziper - tee := io.TeeReader(reader, hasher) - - // create the uncompresser - var decompressor io.Reader - if needsUncompress { - gz, err := cgzip.NewReader(tee) - if err != nil { - return err - } - defer gz.Close() - decompressor = gz - } else { - decompressor = tee - } - - // see if we need to introduce failures - if simulateFailures { - failureCounter++ - if failureCounter%10 == 0 { - return fmt.Errorf("Simulated error") - } - } - - // copy the data. Will also write to the hasher - if _, err = io.Copy(dst, decompressor); err != nil { - return err - } - - // check the hash - hash := hasher.HashString() - if srcHash != hash { - return fmt.Errorf("hash mismatch for %v, %v != %v", dstFilename, srcHash, hash) - } - - // we're good - log.Infof("processed snapshot file: %v", dstFilename) - dst.Flush() - dstFile.Close() - - // atomically move uncompressed file - if err := os.Chmod(dstFile.Name(), 0664); err != nil { - return err - } - return os.Rename(dstFile.Name(), dstFilename) -} - -// fetchFileWithRetry fetches data from the web server, retrying a few -// times. -func fetchFileWithRetry(srcUrl, srcHash, dstFilename string, fetchRetryCount int) (err error) { - for i := 0; i < fetchRetryCount; i++ { - err = fetchFile(srcUrl, srcHash, dstFilename) - if err == nil { - return nil - } - log.Warningf("fetching snapshot file %v failed (try=%v): %v", dstFilename, i, err) - } - - log.Errorf("fetching snapshot file %v failed too many times", dstFilename) - return err -} - -// FIXME(msolomon) Should we add deadlines? What really matters more -// than a deadline is probably a sense of progress, more like a -// "progress timeout" - how long will we wait if there is no change in -// received bytes. -// FIXME(alainjobart) support fetching files in chunks: create a new -// struct fileChunk { -// snapshotFile *SnapshotFile -// relatedChunks []*fileChunk -// start,end uint64 -// observedCrc32 uint32 -// } -// Create a slice of fileChunk objects, populate it: -// For files smaller than , create one fileChunk -// For files bigger than , create N fileChunks -// (the first one has the list of all the others) -// Fetch them all: -// - change the workqueue to have indexes on the fileChunk slice -// - compute the crc32 while fetching, but don't compare right away -// Collect results the same way, write observedCrc32 in the fileChunk -// For each fileChunk, compare checksum: -// - if single file, compare snapshotFile.hash with observedCrc32 -// - if multiple chunks and first chunk, merge observedCrc32, and compare -func fetchFiles(snapshotManifest *SnapshotManifest, destinationPath string, fetchConcurrency, fetchRetryCount int) (err error) { - // create a workQueue, a resultQueue, and the go routines - // to process entries out of workQueue into resultQueue - // the mutex protects the error response - workQueue := make(chan SnapshotFile, len(snapshotManifest.Files)) - resultQueue := make(chan error, len(snapshotManifest.Files)) - mutex := sync.Mutex{} - for i := 0; i < fetchConcurrency; i++ { - go func() { - for sf := range workQueue { - // if someone else errored out, we skip our job - mutex.Lock() - previousError := err - mutex.Unlock() - if previousError != nil { - resultQueue <- previousError - continue - } - - // do our fetch, save the error - filename := sf.getLocalFilename(destinationPath) - furl := "http://" + snapshotManifest.Addr + path.Join(SnapshotURLPath, sf.Path) - fetchErr := fetchFileWithRetry(furl, sf.Hash, filename, fetchRetryCount) - if fetchErr != nil { - mutex.Lock() - err = fetchErr - mutex.Unlock() - } - resultQueue <- fetchErr - } - }() - } - - // add the jobs (writing on the channel will block if the queue - // is full, no big deal) - jobCount := 0 - for _, fi := range snapshotManifest.Files { - workQueue <- fi - jobCount++ - } - close(workQueue) - - // read the responses (we guarantee one response per job) - for i := 0; i < jobCount; i++ { - <-resultQueue - } - - // clean up files if we had an error - // FIXME(alainjobart) it seems extreme to delete all files if - // the last one failed. Maybe we shouldn't, and if a file already - // exists, we hash it before retransmitting. - if err != nil { - log.Infof("Error happened, deleting all the files we already got") - for _, fi := range snapshotManifest.Files { - filename := fi.getLocalFilename(destinationPath) - os.Remove(filename) - } - } - - return err -} diff --git a/go/vt/tabletmanager/actionnode/actionnode.go b/go/vt/tabletmanager/actionnode/actionnode.go index 341481a3d6..e67bc2c3e6 100644 --- a/go/vt/tabletmanager/actionnode/actionnode.go +++ b/go/vt/tabletmanager/actionnode/actionnode.go @@ -168,18 +168,6 @@ const ( // TabletActionBackup takes a db backup and stores it into BackupStorage TabletActionBackup = "Backup" - // TabletActionSnapshot takes a db snapshot - TabletActionSnapshot = "Snapshot" - - // TabletActionSnapshotSourceEnd restarts the mysql server - TabletActionSnapshotSourceEnd = "SnapshotSourceEnd" - - // TabletActionReserveForRestore will prepare a server for restore - TabletActionReserveForRestore = "ReserveForRestore" - - // TabletActionRestore will restore a backup - TabletActionRestore = "Restore" - // // Shard actions - involve all tablets in a shard. // These are just descriptive and used for locking / logging. diff --git a/go/vt/tabletmanager/actionnode/structs.go b/go/vt/tabletmanager/actionnode/structs.go index e31002bcc4..4d2ab77e77 100644 --- a/go/vt/tabletmanager/actionnode/structs.go +++ b/go/vt/tabletmanager/actionnode/structs.go @@ -49,46 +49,6 @@ type SlaveWasRestartedArgs struct { Parent topo.TabletAlias } -// SnapshotArgs is the paylod for Snapshot -type SnapshotArgs struct { - Concurrency int - ServerMode bool - ForceMasterSnapshot bool -} - -// SnapshotReply is the response for Snapshot -type SnapshotReply struct { - ParentAlias topo.TabletAlias - ManifestPath string - - // these two are only used for ServerMode=true full snapshot - SlaveStartRequired bool - ReadOnly bool -} - -// SnapshotSourceEndArgs is the payload for SnapshotSourceEnd -type SnapshotSourceEndArgs struct { - SlaveStartRequired bool - ReadOnly bool - OriginalType topo.TabletType -} - -// ReserveForRestoreArgs is the payload for ReserveForRestore -type ReserveForRestoreArgs struct { - SrcTabletAlias topo.TabletAlias -} - -// RestoreArgs is the payload for Restore -type RestoreArgs struct { - SrcTabletAlias topo.TabletAlias - SrcFilePath string - ParentAlias topo.TabletAlias - FetchConcurrency int - FetchRetryCount int - WasReserved bool - DontWaitForSlaveStart bool -} - // shard action node structures // ApplySchemaShardArgs is the payload for ApplySchemaShard diff --git a/go/vt/tabletmanager/agent_rpc_actions.go b/go/vt/tabletmanager/agent_rpc_actions.go index 0e5260bcac..579115c498 100644 --- a/go/vt/tabletmanager/agent_rpc_actions.go +++ b/go/vt/tabletmanager/agent_rpc_actions.go @@ -5,19 +5,12 @@ package tabletmanager import ( - "encoding/json" "fmt" - "io/ioutil" - "net/http" - "path" - "strings" "time" - log "github.com/golang/glog" "github.com/youtube/vitess/go/mysql/proto" blproto "github.com/youtube/vitess/go/vt/binlog/proto" "github.com/youtube/vitess/go/vt/hook" - "github.com/youtube/vitess/go/vt/key" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl" myproto "github.com/youtube/vitess/go/vt/mysqlctl/proto" @@ -124,14 +117,6 @@ type RPCAgent interface { Backup(ctx context.Context, concurrency int, logger logutil.Logger) error - Snapshot(ctx context.Context, args *actionnode.SnapshotArgs, logger logutil.Logger) (*actionnode.SnapshotReply, error) - - SnapshotSourceEnd(ctx context.Context, args *actionnode.SnapshotSourceEndArgs) error - - ReserveForRestore(ctx context.Context, args *actionnode.ReserveForRestoreArgs) error - - Restore(ctx context.Context, args *actionnode.RestoreArgs, logger logutil.Logger) error - // RPC helpers RPCWrap(ctx context.Context, name string, args, reply interface{}, f func() error) error RPCWrapLock(ctx context.Context, name string, args, reply interface{}, verbose bool, f func() error) error @@ -763,270 +748,3 @@ func (agent *ActionAgent) Backup(ctx context.Context, concurrency int, logger lo return returnErr } - -// Snapshot takes a db snapshot -// Should be called under RPCWrapLockAction. -func (agent *ActionAgent) Snapshot(ctx context.Context, args *actionnode.SnapshotArgs, logger logutil.Logger) (*actionnode.SnapshotReply, error) { - // update our type to TYPE_BACKUP - tablet, err := agent.TopoServer.GetTablet(agent.TabletAlias) - if err != nil { - return nil, err - } - originalType := tablet.Type - - // ForceMasterSnapshot: Normally a master is not a viable tablet - // to snapshot. However, there are degenerate cases where you need - // to override this, for instance the initial clone of a new master. - if tablet.Type == topo.TYPE_MASTER && args.ForceMasterSnapshot { - // In this case, we don't bother recomputing the serving graph. - // All queries will have to fail anyway. - log.Infof("force change type master -> backup") - // There is a legitimate reason to force in the case of a single - // master. - tablet.Tablet.Type = topo.TYPE_BACKUP - err = topo.UpdateTablet(ctx, agent.TopoServer, tablet) - } else { - err = topotools.ChangeType(ctx, agent.TopoServer, tablet.Alias, topo.TYPE_BACKUP, make(map[string]string)) - } - if err != nil { - return nil, err - } - - // let's update our internal state (stop query service and other things) - if err := agent.refreshTablet(ctx, "snapshotStart"); err != nil { - return nil, fmt.Errorf("failed to update state before snaphost: %v", err) - } - - // create the loggers: tee to console and source - l := logutil.NewTeeLogger(logutil.NewConsoleLogger(), logger) - - // now we can run the backup - filename, slaveStartRequired, readOnly, returnErr := agent.Mysqld.CreateSnapshot(l, tablet.DbName(), tablet.Addr(), false, args.Concurrency, args.ServerMode, agent.hookExtraEnv()) - - // and change our type to the appropriate value - newType := originalType - if returnErr != nil { - log.Errorf("snapshot failed, restoring tablet type back to %v: %v", newType, returnErr) - } else { - if args.ServerMode { - log.Infof("server mode specified, switching tablet to snapshot_source mode") - newType = topo.TYPE_SNAPSHOT_SOURCE - } else { - log.Infof("change type back after snapshot: %v", newType) - } - } - if originalType == topo.TYPE_MASTER && args.ForceMasterSnapshot && newType != topo.TYPE_SNAPSHOT_SOURCE { - log.Infof("force change type backup -> master: %v", tablet.Alias) - tablet.Tablet.Type = topo.TYPE_MASTER - err = topo.UpdateTablet(ctx, agent.TopoServer, tablet) - } else { - err = topotools.ChangeType(ctx, agent.TopoServer, tablet.Alias, newType, nil) - } - if err != nil { - // failure in changing the topology type is probably worse, - // so returning that (we logged the snapshot error anyway) - returnErr = err - } - - // if anything failed, don't return anything - if returnErr != nil { - return nil, returnErr - } - - // it all worked, return the required information - sr := &actionnode.SnapshotReply{ - ManifestPath: filename, - SlaveStartRequired: slaveStartRequired, - ReadOnly: readOnly, - } - if tablet.Type == topo.TYPE_MASTER { - // If this is a master, this will be the new parent. - sr.ParentAlias = tablet.Alias - } else { - // Otherwise get the master from the shard record - si, err := agent.TopoServer.GetShard(tablet.Keyspace, tablet.Shard) - if err != nil { - return nil, err - } - sr.ParentAlias = si.MasterAlias - } - return sr, nil -} - -// SnapshotSourceEnd restores the state of the server after a -// Snapshot(server_mode =true) -// Should be called under RPCWrapLockAction. -func (agent *ActionAgent) SnapshotSourceEnd(ctx context.Context, args *actionnode.SnapshotSourceEndArgs) error { - tablet, err := agent.TopoServer.GetTablet(agent.TabletAlias) - if err != nil { - return err - } - if tablet.Type != topo.TYPE_SNAPSHOT_SOURCE { - return fmt.Errorf("expected snapshot_source type, not %v", tablet.Type) - } - - if err := agent.Mysqld.SnapshotSourceEnd(args.SlaveStartRequired, args.ReadOnly, true, agent.hookExtraEnv()); err != nil { - log.Errorf("SnapshotSourceEnd failed, leaving tablet type alone: %v", err) - return err - } - - // change the type back - if args.OriginalType == topo.TYPE_MASTER { - // force the master update - tablet.Tablet.Type = topo.TYPE_MASTER - err = topo.UpdateTablet(ctx, agent.TopoServer, tablet) - } else { - err = topotools.ChangeType(ctx, agent.TopoServer, tablet.Alias, args.OriginalType, make(map[string]string)) - } - - return err -} - -// change a tablet type to RESTORE and set all the other arguments. -// from now on, we can go to: -// - back to IDLE if we don't use the tablet at all (after for instance -// a successful ReserveForRestore but a failed Snapshot) -// - to SCRAP if something in the process on the target host fails -// - to SPARE if the clone works -func (agent *ActionAgent) changeTypeToRestore(ctx context.Context, tablet, sourceTablet *topo.TabletInfo, keyRange key.KeyRange) error { - // run the optional preflight_assigned hook - hk := hook.NewSimpleHook("preflight_assigned") - topotools.ConfigureTabletHook(hk, agent.TabletAlias) - if err := hk.ExecuteOptional(); err != nil { - return err - } - - // change the type - tablet.Keyspace = sourceTablet.Keyspace - tablet.Shard = sourceTablet.Shard - tablet.Type = topo.TYPE_RESTORE - tablet.KeyRange = keyRange - tablet.DbNameOverride = sourceTablet.DbNameOverride - if err := topo.UpdateTablet(ctx, agent.TopoServer, tablet); err != nil { - return err - } - - // and create the replication graph items - return topo.UpdateTabletReplicationData(ctx, agent.TopoServer, tablet.Tablet) -} - -// ReserveForRestore reserves the current tablet for an upcoming -// restore operation. -// Should be called under RPCWrapLockAction. -func (agent *ActionAgent) ReserveForRestore(ctx context.Context, args *actionnode.ReserveForRestoreArgs) error { - // first check mysql, no need to go further if we can't restore - if err := agent.Mysqld.ValidateCloneTarget(agent.hookExtraEnv()); err != nil { - return err - } - - // read our current tablet, verify its state - tablet, err := agent.TopoServer.GetTablet(agent.TabletAlias) - if err != nil { - return err - } - if tablet.Type != topo.TYPE_IDLE { - return fmt.Errorf("expected idle type, not %v", tablet.Type) - } - - // read the source tablet - sourceTablet, err := agent.TopoServer.GetTablet(args.SrcTabletAlias) - if err != nil { - return err - } - - return agent.changeTypeToRestore(ctx, tablet, sourceTablet, sourceTablet.KeyRange) -} - -func fetchAndParseJSONFile(addr, filename string, result interface{}) error { - // read the manifest - murl := "http://" + addr + filename - resp, err := http.Get(murl) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("Error fetching url %v: %v", murl, resp.Status) - } - data, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return err - } - - // unpack it - return json.Unmarshal(data, result) -} - -// Restore stops the tablet's mysqld, replaces its data folder with a snapshot, -// and then restarts it. -// -// Check that the SnapshotManifest is valid and the master has not changed. -// Shutdown mysqld. -// Load the snapshot from source tablet. -// Restart mysqld and replication. -// Put tablet into the replication graph as a spare. -// Should be called under RPCWrapLockAction. -func (agent *ActionAgent) Restore(ctx context.Context, args *actionnode.RestoreArgs, logger logutil.Logger) error { - // read our current tablet, verify its state - tablet, err := agent.TopoServer.GetTablet(agent.TabletAlias) - if err != nil { - return err - } - if args.WasReserved { - if tablet.Type != topo.TYPE_RESTORE { - return fmt.Errorf("expected restore type, not %v", tablet.Type) - } - } else { - if tablet.Type != topo.TYPE_IDLE { - return fmt.Errorf("expected idle type, not %v", tablet.Type) - } - } - // read the source tablet, compute args.SrcFilePath if default - sourceTablet, err := agent.TopoServer.GetTablet(args.SrcTabletAlias) - if err != nil { - return err - } - if strings.ToLower(args.SrcFilePath) == "default" { - args.SrcFilePath = path.Join(mysqlctl.SnapshotURLPath, mysqlctl.SnapshotManifestFile) - } - - // read the parent tablet, verify its state - parentTablet, err := agent.TopoServer.GetTablet(args.ParentAlias) - if err != nil { - return err - } - if parentTablet.Type != topo.TYPE_MASTER && parentTablet.Type != topo.TYPE_SNAPSHOT_SOURCE { - return fmt.Errorf("restore expected master or snapshot_source parent: %v %v", parentTablet.Type, args.ParentAlias) - } - - // read & unpack the manifest - sm := new(mysqlctl.SnapshotManifest) - if err := fetchAndParseJSONFile(sourceTablet.Addr(), args.SrcFilePath, sm); err != nil { - return err - } - - if !args.WasReserved { - if err := agent.changeTypeToRestore(ctx, tablet, sourceTablet, sourceTablet.KeyRange); err != nil { - return err - } - } - - // create the loggers: tee to console and source - l := logutil.NewTeeLogger(logutil.NewConsoleLogger(), logger) - - // do the work - if err := agent.Mysqld.RestoreFromSnapshot(l, sm, args.FetchConcurrency, args.FetchRetryCount, args.DontWaitForSlaveStart, agent.hookExtraEnv()); err != nil { - log.Errorf("RestoreFromSnapshot failed (%v), scrapping", err) - if err := topotools.Scrap(ctx, agent.TopoServer, agent.TabletAlias, false); err != nil { - log.Errorf("Failed to Scrap after failed RestoreFromSnapshot: %v", err) - } - - return err - } - - // reload the schema - agent.ReloadSchema(ctx) - - // change to TYPE_SPARE, we're done! - return topotools.ChangeType(ctx, agent.TopoServer, agent.TabletAlias, topo.TYPE_SPARE, nil) -} diff --git a/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go b/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go index 33aedac007..8421af7173 100644 --- a/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go +++ b/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go @@ -1162,154 +1162,6 @@ func agentRPCTestBackupPanic(ctx context.Context, t *testing.T, client tmclient. expectRPCWrapLockActionPanic(t, err) } -var testSnapshotArgs = &actionnode.SnapshotArgs{ - Concurrency: 42, - ServerMode: true, - ForceMasterSnapshot: true, -} -var testSnapshotReply = &actionnode.SnapshotReply{ - ParentAlias: topo.TabletAlias{ - Cell: "test", - Uid: 456, - }, - ManifestPath: "path", - SlaveStartRequired: true, - ReadOnly: true, -} - -func (fra *fakeRPCAgent) Snapshot(ctx context.Context, args *actionnode.SnapshotArgs, logger logutil.Logger) (*actionnode.SnapshotReply, error) { - if fra.panics { - panic(fmt.Errorf("test-triggered panic")) - } - compare(fra.t, "Snapshot args", args, testSnapshotArgs) - logStuff(logger, 0) - return testSnapshotReply, nil -} - -func agentRPCTestSnapshot(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - logChannel, errFunc, err := client.Snapshot(ctx, ti, testSnapshotArgs) - if err != nil { - t.Fatalf("Snapshot failed: %v", err) - } - compareLoggedStuff(t, "Snapshot", logChannel, 0) - sr, err := errFunc() - compareError(t, "Snapshot", err, sr, testSnapshotReply) -} - -func agentRPCTestSnapshotPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - logChannel, errFunc, err := client.Snapshot(ctx, ti, testSnapshotArgs) - if err != nil { - t.Fatalf("Snapshot failed: %v", err) - } - if e, ok := <-logChannel; ok { - t.Fatalf("Unexpected Snapshot logs: %v", e) - } - _, err = errFunc() - expectRPCWrapLockActionPanic(t, err) -} - -var testSnapshotSourceEndArgs = &actionnode.SnapshotSourceEndArgs{ - SlaveStartRequired: true, - ReadOnly: true, - OriginalType: topo.TYPE_RDONLY, -} -var testSnapshotSourceEndCalled = false - -func (fra *fakeRPCAgent) SnapshotSourceEnd(ctx context.Context, args *actionnode.SnapshotSourceEndArgs) error { - if fra.panics { - panic(fmt.Errorf("test-triggered panic")) - } - compare(fra.t, "SnapshotSourceEnd args", args, testSnapshotSourceEndArgs) - testSnapshotSourceEndCalled = true - return nil -} - -func agentRPCTestSnapshotSourceEnd(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.SnapshotSourceEnd(ctx, ti, testSnapshotSourceEndArgs) - compareError(t, "SnapshotSourceEnd", err, true, testSnapshotSourceEndCalled) -} - -func agentRPCTestSnapshotSourceEndPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.SnapshotSourceEnd(ctx, ti, testSnapshotSourceEndArgs) - expectRPCWrapLockActionPanic(t, err) -} - -var testReserveForRestoreArgs = &actionnode.ReserveForRestoreArgs{ - SrcTabletAlias: topo.TabletAlias{ - Cell: "test", - Uid: 456, - }, -} -var testReserveForRestoreCalled = false - -func (fra *fakeRPCAgent) ReserveForRestore(ctx context.Context, args *actionnode.ReserveForRestoreArgs) error { - if fra.panics { - panic(fmt.Errorf("test-triggered panic")) - } - compare(fra.t, "ReserveForRestore args", args, testReserveForRestoreArgs) - testReserveForRestoreCalled = true - return nil -} - -func agentRPCTestReserveForRestore(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.ReserveForRestore(ctx, ti, testReserveForRestoreArgs) - compareError(t, "ReserveForRestore", err, true, testReserveForRestoreCalled) -} - -func agentRPCTestReserveForRestorePanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - err := client.ReserveForRestore(ctx, ti, testReserveForRestoreArgs) - expectRPCWrapLockActionPanic(t, err) -} - -var testRestoreArgs = &actionnode.RestoreArgs{ - SrcTabletAlias: topo.TabletAlias{ - Cell: "jail1", - Uid: 890, - }, - SrcFilePath: "source", - ParentAlias: topo.TabletAlias{ - Cell: "jail2", - Uid: 901, - }, - FetchConcurrency: 12, - FetchRetryCount: 678, - WasReserved: true, - DontWaitForSlaveStart: true, -} -var testRestoreCalled = false - -func (fra *fakeRPCAgent) Restore(ctx context.Context, args *actionnode.RestoreArgs, logger logutil.Logger) error { - if fra.panics { - panic(fmt.Errorf("test-triggered panic")) - } - compare(fra.t, "Restore args", args, testRestoreArgs) - logStuff(logger, 10) - testRestoreCalled = true - return nil -} - -func agentRPCTestRestore(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - logChannel, errFunc, err := client.Restore(ctx, ti, testRestoreArgs) - if err != nil { - t.Fatalf("Restore failed: %v", err) - } - compareLoggedStuff(t, "Restore", logChannel, 10) - err = errFunc() - compareError(t, "Restore", err, true, testRestoreCalled) -} - -func agentRPCTestRestorePanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo) { - logChannel, errFunc, err := client.Restore(ctx, ti, testRestoreArgs) - if err != nil { - t.Fatalf("Snapshot failed: %v", err) - } - if e, ok := <-logChannel; ok { - t.Fatalf("Unexpected Snapshot logs: %v", e) - } - err = errFunc() - expectRPCWrapLockActionPanic(t, err) -} - // // RPC helpers // @@ -1400,10 +1252,6 @@ func Run(t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo, // Backup / restore related methods agentRPCTestBackup(ctx, t, client, ti) - agentRPCTestSnapshot(ctx, t, client, ti) - agentRPCTestSnapshotSourceEnd(ctx, t, client, ti) - agentRPCTestReserveForRestore(ctx, t, client, ti) - agentRPCTestRestore(ctx, t, client, ti) // // Tests panic handling everywhere now @@ -1457,8 +1305,4 @@ func Run(t *testing.T, client tmclient.TabletManagerClient, ti *topo.TabletInfo, // Backup / restore related methods agentRPCTestBackupPanic(ctx, t, client, ti) - agentRPCTestSnapshotPanic(ctx, t, client, ti) - agentRPCTestSnapshotSourceEndPanic(ctx, t, client, ti) - agentRPCTestReserveForRestorePanic(ctx, t, client, ti) - agentRPCTestRestorePanic(ctx, t, client, ti) } diff --git a/go/vt/tabletmanager/faketmclient/fake_client.go b/go/vt/tabletmanager/faketmclient/fake_client.go index 472e8e2153..3cf9c5f51d 100644 --- a/go/vt/tabletmanager/faketmclient/fake_client.go +++ b/go/vt/tabletmanager/faketmclient/fake_client.go @@ -302,32 +302,6 @@ func (client *FakeTabletManagerClient) Backup(ctx context.Context, tablet *topo. }, nil } -// Snapshot is part of the tmclient.TabletManagerClient interface -func (client *FakeTabletManagerClient) Snapshot(ctx context.Context, tablet *topo.TabletInfo, sa *actionnode.SnapshotArgs) (<-chan *logutil.LoggerEvent, tmclient.SnapshotReplyFunc, error) { - logstream := make(chan *logutil.LoggerEvent, 10) - return logstream, func() (*actionnode.SnapshotReply, error) { - return &actionnode.SnapshotReply{}, nil - }, nil -} - -// SnapshotSourceEnd is part of the tmclient.TabletManagerClient interface -func (client *FakeTabletManagerClient) SnapshotSourceEnd(ctx context.Context, tablet *topo.TabletInfo, args *actionnode.SnapshotSourceEndArgs) error { - return nil -} - -// ReserveForRestore is part of the tmclient.TabletManagerClient interface -func (client *FakeTabletManagerClient) ReserveForRestore(ctx context.Context, tablet *topo.TabletInfo, args *actionnode.ReserveForRestoreArgs) error { - return nil -} - -// Restore is part of the tmclient.TabletManagerClient interface -func (client *FakeTabletManagerClient) Restore(ctx context.Context, tablet *topo.TabletInfo, sa *actionnode.RestoreArgs) (<-chan *logutil.LoggerEvent, tmclient.ErrFunc, error) { - logstream := make(chan *logutil.LoggerEvent, 10) - return logstream, func() error { - return nil - }, nil -} - // // RPC related methods // diff --git a/go/vt/tabletmanager/gorpcproto/structs.go b/go/vt/tabletmanager/gorpcproto/structs.go index a763f6b29a..78e35fcf93 100644 --- a/go/vt/tabletmanager/gorpcproto/structs.go +++ b/go/vt/tabletmanager/gorpcproto/structs.go @@ -8,9 +8,7 @@ import ( "time" blproto "github.com/youtube/vitess/go/vt/binlog/proto" - "github.com/youtube/vitess/go/vt/logutil" myproto "github.com/youtube/vitess/go/vt/mysqlctl/proto" - "github.com/youtube/vitess/go/vt/tabletmanager/actionnode" "github.com/youtube/vitess/go/vt/topo" ) @@ -99,15 +97,6 @@ type BackupArgs struct { Concurrency int } -// gorpc doesn't support returning a streaming type during streaming -// and a final return value, so using structures with either one set. - -// SnapshotStreamingReply has the two possible replies for Snapshot -type SnapshotStreamingReply struct { - Log *logutil.LoggerEvent - Result *actionnode.SnapshotReply -} - // TabletExternallyReparentedArgs has arguments for TabletExternallyReparented type TabletExternallyReparentedArgs struct { ExternalID string diff --git a/go/vt/tabletmanager/gorpctmclient/gorpc_client.go b/go/vt/tabletmanager/gorpctmclient/gorpc_client.go index 23e9d7bce4..1d6816c3b9 100644 --- a/go/vt/tabletmanager/gorpctmclient/gorpc_client.go +++ b/go/vt/tabletmanager/gorpctmclient/gorpc_client.go @@ -506,117 +506,6 @@ func (client *GoRPCTabletManagerClient) Backup(ctx context.Context, tablet *topo }, nil } -// Snapshot is part of the tmclient.TabletManagerClient interface -func (client *GoRPCTabletManagerClient) Snapshot(ctx context.Context, tablet *topo.TabletInfo, sa *actionnode.SnapshotArgs) (<-chan *logutil.LoggerEvent, tmclient.SnapshotReplyFunc, error) { - var connectTimeout time.Duration - deadline, ok := ctx.Deadline() - if ok { - connectTimeout = deadline.Sub(time.Now()) - if connectTimeout < 0 { - return nil, nil, timeoutError{fmt.Errorf("timeout connecting to TabletManager.Snapshot on %v", tablet.Alias)} - } - } - rpcClient, err := bsonrpc.DialHTTP("tcp", tablet.Addr(), connectTimeout, nil) - if err != nil { - return nil, nil, err - } - - logstream := make(chan *logutil.LoggerEvent, 10) - rpcstream := make(chan *gorpcproto.SnapshotStreamingReply, 10) - result := &actionnode.SnapshotReply{} - - c := rpcClient.StreamGo("TabletManager.Snapshot", sa, rpcstream) - interrupted := false - go func() { - for { - select { - case <-ctx.Done(): - // context is done - interrupted = true - close(logstream) - rpcClient.Close() - return - case ssr, ok := <-rpcstream: - if !ok { - close(logstream) - rpcClient.Close() - return - } - if ssr.Log != nil { - logstream <- ssr.Log - } - if ssr.Result != nil { - *result = *ssr.Result - } - } - } - }() - return logstream, func() (*actionnode.SnapshotReply, error) { - // this is only called after streaming is done - if interrupted { - return nil, fmt.Errorf("TabletManager.Snapshot interrupted by context") - } - return result, c.Error - }, nil -} - -// SnapshotSourceEnd is part of the tmclient.TabletManagerClient interface -func (client *GoRPCTabletManagerClient) SnapshotSourceEnd(ctx context.Context, tablet *topo.TabletInfo, args *actionnode.SnapshotSourceEndArgs) error { - return client.rpcCallTablet(ctx, tablet, actionnode.TabletActionSnapshotSourceEnd, args, &rpc.Unused{}) -} - -// ReserveForRestore is part of the tmclient.TabletManagerClient interface -func (client *GoRPCTabletManagerClient) ReserveForRestore(ctx context.Context, tablet *topo.TabletInfo, args *actionnode.ReserveForRestoreArgs) error { - return client.rpcCallTablet(ctx, tablet, actionnode.TabletActionReserveForRestore, args, &rpc.Unused{}) -} - -// Restore is part of the tmclient.TabletManagerClient interface -func (client *GoRPCTabletManagerClient) Restore(ctx context.Context, tablet *topo.TabletInfo, sa *actionnode.RestoreArgs) (<-chan *logutil.LoggerEvent, tmclient.ErrFunc, error) { - var connectTimeout time.Duration - deadline, ok := ctx.Deadline() - if ok { - connectTimeout = deadline.Sub(time.Now()) - if connectTimeout < 0 { - return nil, nil, timeoutError{fmt.Errorf("timeout connecting to TabletManager.Restore on %v", tablet.Alias)} - } - } - rpcClient, err := bsonrpc.DialHTTP("tcp", tablet.Addr(), connectTimeout, nil) - if err != nil { - return nil, nil, err - } - - logstream := make(chan *logutil.LoggerEvent, 10) - rpcstream := make(chan *logutil.LoggerEvent, 10) - c := rpcClient.StreamGo("TabletManager.Restore", sa, rpcstream) - interrupted := false - go func() { - for { - select { - case <-ctx.Done(): - // context is done - interrupted = true - close(logstream) - rpcClient.Close() - return - case ssr, ok := <-rpcstream: - if !ok { - close(logstream) - rpcClient.Close() - return - } - logstream <- ssr - } - } - }() - return logstream, func() error { - // this is only called after streaming is done - if interrupted { - return fmt.Errorf("TabletManager.Restore interrupted by context") - } - return c.Error - }, nil -} - // // RPC related methods // diff --git a/go/vt/tabletmanager/gorpctmserver/gorpc_server.go b/go/vt/tabletmanager/gorpctmserver/gorpc_server.go index 153f9a4e69..050fad2ab7 100644 --- a/go/vt/tabletmanager/gorpctmserver/gorpc_server.go +++ b/go/vt/tabletmanager/gorpctmserver/gorpc_server.go @@ -494,84 +494,6 @@ func (tm *TabletManager) Backup(ctx context.Context, args *gorpcproto.BackupArgs }) } -// Snapshot wraps RPCAgent.Snapshot -func (tm *TabletManager) Snapshot(ctx context.Context, args *actionnode.SnapshotArgs, sendReply func(interface{}) error) error { - ctx = callinfo.RPCWrapCallInfo(ctx) - return tm.agent.RPCWrapLockAction(ctx, actionnode.TabletActionSnapshot, args, nil, true, func() error { - // create a logger, send the result back to the caller - logger := logutil.NewChannelLogger(10) - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - for e := range logger { - ssr := &gorpcproto.SnapshotStreamingReply{ - Log: &e, - } - // Note we don't interrupt the loop here, as - // we still need to flush and finish the - // command, even if the channel to the client - // has been broken. We'll just keep trying to send. - sendReply(ssr) - } - wg.Done() - }() - - sr, err := tm.agent.Snapshot(ctx, args, logger) - close(logger) - wg.Wait() - if err != nil { - return err - } - ssr := &gorpcproto.SnapshotStreamingReply{ - Result: sr, - } - sendReply(ssr) - return nil - }) -} - -// SnapshotSourceEnd wraps RPCAgent. -func (tm *TabletManager) SnapshotSourceEnd(ctx context.Context, args *actionnode.SnapshotSourceEndArgs, reply *rpc.Unused) error { - ctx = callinfo.RPCWrapCallInfo(ctx) - return tm.agent.RPCWrapLockAction(ctx, actionnode.TabletActionSnapshotSourceEnd, args, reply, true, func() error { - return tm.agent.SnapshotSourceEnd(ctx, args) - }) -} - -// ReserveForRestore wraps RPCAgent.ReserveForRestore -func (tm *TabletManager) ReserveForRestore(ctx context.Context, args *actionnode.ReserveForRestoreArgs, reply *rpc.Unused) error { - ctx = callinfo.RPCWrapCallInfo(ctx) - return tm.agent.RPCWrapLockAction(ctx, actionnode.TabletActionReserveForRestore, args, reply, true, func() error { - return tm.agent.ReserveForRestore(ctx, args) - }) -} - -// Restore wraps RPCAgent.Restore -func (tm *TabletManager) Restore(ctx context.Context, args *actionnode.RestoreArgs, sendReply func(interface{}) error) error { - ctx = callinfo.RPCWrapCallInfo(ctx) - return tm.agent.RPCWrapLockAction(ctx, actionnode.TabletActionRestore, args, nil, true, func() error { - // create a logger, send the result back to the caller - logger := logutil.NewChannelLogger(10) - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - for e := range logger { - // Note we don't interrupt the loop here, as - // we still need to flush and finish the - // command, even if the channel to the client - // has been broken. We'll just keep trying to send. - sendReply(&e) - } - wg.Done() - }() - - err := tm.agent.Restore(ctx, args, logger) - close(logger) - wg.Wait() - return err - }) -} - // registration glue func init() { diff --git a/go/vt/tabletmanager/http.go b/go/vt/tabletmanager/http.go deleted file mode 100644 index b05e29c3cd..0000000000 --- a/go/vt/tabletmanager/http.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2013, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tabletmanager - -// This file handles the http server for snapshots, clones, ... - -import ( - "fmt" - "io" - "net/http" - "os" - "path" - "path/filepath" - "strings" - "time" - - log "github.com/golang/glog" - "github.com/youtube/vitess/go/cgzip" - vtenv "github.com/youtube/vitess/go/vt/env" - "github.com/youtube/vitess/go/vt/mysqlctl" -) - -// HttpHandleSnapshots handles the serving of files from the local tablet -func HttpHandleSnapshots(mycnf *mysqlctl.Mycnf, uid uint32) { - // make a list of paths we can serve HTTP traffic from. - // we don't resolve them here to real paths, as they might not exits yet - snapshotDir := mysqlctl.SnapshotDir(uid) - allowedPaths := []string{ - path.Join(vtenv.VtDataRoot(), "data"), - mysqlctl.TabletDir(uid), - mysqlctl.SnapshotDir(uid), - mycnf.DataDir, - mycnf.InnodbDataHomeDir, - mycnf.InnodbLogGroupHomeDir, - } - - // NOTE: trailing slash in pattern means we handle all paths with this prefix - http.Handle(mysqlctl.SnapshotURLPath+"/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - handleSnapshot(w, r, snapshotDir, allowedPaths) - })) - -} - -// serve an individual query -func handleSnapshot(rw http.ResponseWriter, req *http.Request, snapshotDir string, allowedPaths []string) { - // if we get any error, we'll try to write a server error - // (it will fail if the header has already been written, but at least - // we won't crash vttablet) - defer func() { - if x := recover(); x != nil { - log.Errorf("vttablet http server panic: %v", x) - http.Error(rw, fmt.Sprintf("500 internal server error: %v", x), http.StatusInternalServerError) - } - }() - - // /snapshot must be rewritten to the actual location of the snapshot. - relative, err := filepath.Rel(mysqlctl.SnapshotURLPath, req.URL.Path) - if err != nil { - log.Errorf("bad snapshot relative path %v %v", req.URL.Path, err) - http.Error(rw, "400 bad request", http.StatusBadRequest) - return - } - - // Make sure that realPath is absolute and resolve any escaping from - // snapshotDir through a symlink. - realPath, err := filepath.Abs(path.Join(snapshotDir, relative)) - if err != nil { - log.Errorf("bad snapshot absolute path %v %v", req.URL.Path, err) - http.Error(rw, "400 bad request", http.StatusBadRequest) - return - } - - realPath, err = filepath.EvalSymlinks(realPath) - if err != nil { - log.Errorf("bad snapshot symlink eval %v %v", req.URL.Path, err) - http.Error(rw, "400 bad request", http.StatusBadRequest) - return - } - - // Resolve all the possible roots and make sure we're serving - // from one of them - for _, allowedPath := range allowedPaths { - // eval the symlinks of the allowed path - allowedPath, err := filepath.EvalSymlinks(allowedPath) - if err != nil { - continue - } - if strings.HasPrefix(realPath, allowedPath) { - sendFile(rw, req, realPath) - return - } - } - - log.Errorf("bad snapshot real path %v %v", req.URL.Path, realPath) - http.Error(rw, "400 bad request", http.StatusBadRequest) -} - -// custom function to serve files -func sendFile(rw http.ResponseWriter, req *http.Request, path string) { - log.Infof("serve %v %v", req.URL.Path, path) - file, err := os.Open(path) - if err != nil { - http.NotFound(rw, req) - return - } - defer file.Close() - - fileinfo, err := file.Stat() - if err != nil { - http.NotFound(rw, req) - return - } - - // for directories, or for files smaller than 1k, use library - if fileinfo.Mode().IsDir() || fileinfo.Size() < 1024 { - http.ServeFile(rw, req, path) - return - } - - // supports If-Modified-Since header - if t, err := time.Parse(http.TimeFormat, req.Header.Get("If-Modified-Since")); err == nil && fileinfo.ModTime().Before(t.Add(1*time.Second)) { - rw.WriteHeader(http.StatusNotModified) - return - } - - // support Accept-Encoding header - var writer io.Writer = rw - var reader io.Reader = file - if !strings.HasSuffix(path, ".gz") { - ae := req.Header.Get("Accept-Encoding") - - if strings.Contains(ae, "gzip") { - gz, err := cgzip.NewWriterLevel(rw, cgzip.Z_BEST_SPEED) - if err != nil { - http.Error(rw, err.Error(), http.StatusInternalServerError) - return - } - rw.Header().Set("Content-Encoding", "gzip") - defer gz.Close() - writer = gz - } - } - - // add content-length if we know it - if writer == rw && reader == file { - rw.Header().Set("Content-Length", fmt.Sprintf("%v", fileinfo.Size())) - } - - // and just copy content out - rw.Header().Set("Last-Modified", fileinfo.ModTime().UTC().Format(http.TimeFormat)) - rw.WriteHeader(http.StatusOK) - if _, err := io.Copy(writer, reader); err != nil { - log.Warningf("transfer failed %v: %v", path, err) - } -} diff --git a/go/vt/tabletmanager/tmclient/rpc_client_api.go b/go/vt/tabletmanager/tmclient/rpc_client_api.go index b358db3546..8875e0f32a 100644 --- a/go/vt/tabletmanager/tmclient/rpc_client_api.go +++ b/go/vt/tabletmanager/tmclient/rpc_client_api.go @@ -24,9 +24,6 @@ var tabletManagerProtocol = flag.String("tablet_manager_protocol", "bson", "the // ErrFunc is used by streaming RPCs that don't return a specific result type ErrFunc func() error -// SnapshotReplyFunc is used by Snapshot to return result and error -type SnapshotReplyFunc func() (*actionnode.SnapshotReply, error) - // TabletManagerClient defines the interface used to talk to a remote tablet type TabletManagerClient interface { // @@ -190,18 +187,6 @@ type TabletManagerClient interface { // Backup creates a database backup Backup(ctx context.Context, tablet *topo.TabletInfo, concurrency int) (<-chan *logutil.LoggerEvent, ErrFunc, error) - // Snapshot takes a database snapshot - Snapshot(ctx context.Context, tablet *topo.TabletInfo, sa *actionnode.SnapshotArgs) (<-chan *logutil.LoggerEvent, SnapshotReplyFunc, error) - - // SnapshotSourceEnd restarts the mysql server - SnapshotSourceEnd(ctx context.Context, tablet *topo.TabletInfo, ssea *actionnode.SnapshotSourceEndArgs) error - - // ReserveForRestore will prepare a server for restore - ReserveForRestore(ctx context.Context, tablet *topo.TabletInfo, rfra *actionnode.ReserveForRestoreArgs) error - - // Restore restores a database snapshot - Restore(ctx context.Context, tablet *topo.TabletInfo, sa *actionnode.RestoreArgs) (<-chan *logutil.LoggerEvent, ErrFunc, error) - // // RPC related methods // diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index e4d8f27f7c..c81ee87f4f 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -107,21 +107,6 @@ var commands = []commandGroup{ command{"Backup", commandBackup, "[-concurrency=4] ", "Stop mysqld and copy data to BackupStorage."}, - command{"Snapshot", commandSnapshot, - "[-force] [-server-mode] [-concurrency=4] ", - "Stop mysqld and copy compressed data aside."}, - command{"SnapshotSourceEnd", commandSnapshotSourceEnd, - "[-slave-start] [-read-write] ", - "Restart Mysql and restore original server type." + - "Valid :\n" + - " " + strings.Join(topo.MakeStringTypeList(topo.AllTabletTypes), " ")}, - command{"Restore", commandRestore, - "[-fetch-concurrency=3] [-fetch-retry-count=3] [-dont-wait-for-slave-start] []", - "Copy the given snaphot from the source tablet and restart replication to the new master path (or uses the if not specified). If is 'default', uses the default value.\n" + - "NOTE: This does not wait for replication to catch up. The destination tablet must be 'idle' to begin with. It will transition to 'spare' once the restore is complete."}, - command{"Clone", commandClone, - "[-force] [-concurrency=4] [-fetch-concurrency=3] [-fetch-retry-count=3] [-server-mode] ...", - "This performs Snapshot and then Restore on all the targets in parallel. The advantage of having separate actions is that one snapshot can be used for many restores, and it's then easier to spread them over time."}, command{"ExecuteHook", commandExecuteHook, " [ ...]", "This runs the specified hook on the given tablet."}, @@ -931,110 +916,6 @@ func commandBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.Fl return errFunc() } -func commandSnapshotSourceEnd(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - slaveStartRequired := subFlags.Bool("slave-start", false, "will restart replication") - readWrite := subFlags.Bool("read-write", false, "will make the server read-write") - if err := subFlags.Parse(args); err != nil { - return err - } - if subFlags.NArg() != 2 { - return fmt.Errorf("action SnapshotSourceEnd requires ") - } - - tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) - if err != nil { - return err - } - tabletType, err := parseTabletType(subFlags.Arg(1), topo.AllTabletTypes) - if err != nil { - return err - } - return wr.SnapshotSourceEnd(ctx, tabletAlias, *slaveStartRequired, !(*readWrite), tabletType) -} - -func commandSnapshot(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - force := subFlags.Bool("force", false, "will force the snapshot for a master, and turn it into a backup") - serverMode := subFlags.Bool("server-mode", false, "will symlink the data files and leave mysqld stopped") - concurrency := subFlags.Int("concurrency", 4, "how many compression/checksum jobs to run simultaneously") - if err := subFlags.Parse(args); err != nil { - return err - } - if subFlags.NArg() != 1 { - return fmt.Errorf("action Snapshot requires ") - } - - tabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) - if err != nil { - return err - } - sr, originalType, err := wr.Snapshot(ctx, tabletAlias, *force, *concurrency, *serverMode) - if err == nil { - log.Infof("Manifest: %v", sr.ManifestPath) - log.Infof("ParentAlias: %v", sr.ParentAlias) - if *serverMode { - log.Infof("SlaveStartRequired: %v", sr.SlaveStartRequired) - log.Infof("ReadOnly: %v", sr.ReadOnly) - log.Infof("OriginalType: %v", originalType) - } - } - return err -} - -func commandRestore(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - dontWaitForSlaveStart := subFlags.Bool("dont-wait-for-slave-start", false, "won't wait for replication to start (useful when restoring from snapshot source that is the replication master)") - fetchConcurrency := subFlags.Int("fetch-concurrency", 3, "how many files to fetch simultaneously") - fetchRetryCount := subFlags.Int("fetch-retry-count", 3, "how many times to retry a failed transfer") - if err := subFlags.Parse(args); err != nil { - return err - } - if subFlags.NArg() != 3 && subFlags.NArg() != 4 { - return fmt.Errorf("action Restore requires []") - } - srcTabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) - if err != nil { - return err - } - dstTabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(2)) - if err != nil { - return err - } - parentAlias := srcTabletAlias - if subFlags.NArg() == 4 { - parentAlias, err = topo.ParseTabletAliasString(subFlags.Arg(3)) - if err != nil { - return err - } - } - return wr.Restore(ctx, srcTabletAlias, subFlags.Arg(1), dstTabletAlias, parentAlias, *fetchConcurrency, *fetchRetryCount, false, *dontWaitForSlaveStart) -} - -func commandClone(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - force := subFlags.Bool("force", false, "will force the snapshot for a master, and turn it into a backup") - concurrency := subFlags.Int("concurrency", 4, "how many compression/checksum jobs to run simultaneously") - fetchConcurrency := subFlags.Int("fetch-concurrency", 3, "how many files to fetch simultaneously") - fetchRetryCount := subFlags.Int("fetch-retry-count", 3, "how many times to retry a failed transfer") - serverMode := subFlags.Bool("server-mode", false, "will keep the snapshot server offline to serve DB files directly") - if err := subFlags.Parse(args); err != nil { - return err - } - if subFlags.NArg() < 2 { - return fmt.Errorf("action Clone requires [...]") - } - - srcTabletAlias, err := topo.ParseTabletAliasString(subFlags.Arg(0)) - if err != nil { - return err - } - dstTabletAliases := make([]topo.TabletAlias, subFlags.NArg()-1) - for i := 1; i < subFlags.NArg(); i++ { - dstTabletAliases[i-1], err = topo.ParseTabletAliasString(subFlags.Arg(i)) - if err != nil { - return err - } - } - return wr.Clone(ctx, srcTabletAlias, dstTabletAliases, *force, *concurrency, *fetchConcurrency, *fetchRetryCount, *serverMode) -} - func commandExecuteFetchAsDba(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { maxRows := subFlags.Int("max_rows", 10000, "maximum number of rows to allow in reset") wantFields := subFlags.Bool("want_fields", false, "also get the field names") diff --git a/go/vt/wrangler/clone.go b/go/vt/wrangler/clone.go deleted file mode 100644 index 0a8dfdd1d3..0000000000 --- a/go/vt/wrangler/clone.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright 2012, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package wrangler - -import ( - "fmt" - "sync" - - "github.com/youtube/vitess/go/vt/concurrency" - "github.com/youtube/vitess/go/vt/tabletmanager/actionnode" - "github.com/youtube/vitess/go/vt/topo" - "golang.org/x/net/context" -) - -// Snapshot takes a tablet snapshot. -// -// forceMasterSnapshot: Normally a master is not a viable tablet to snapshot. -// However, there are degenerate cases where you need to override this, for -// instance the initial clone of a new master. -// -// serverMode: if specified, the server will stop its mysqld, and be -// ready to serve the data files directly. Slaves can just download -// these and use them directly. Call SnapshotSourceEnd to return into -// serving mode. If not specified, the server will create an archive -// of the files, store them locally, and restart. -// -// If error is nil, returns the SnapshotReply from the remote host, -// and the original type the server was before the snapshot. -func (wr *Wrangler) Snapshot(ctx context.Context, tabletAlias topo.TabletAlias, forceMasterSnapshot bool, snapshotConcurrency int, serverMode bool) (*actionnode.SnapshotReply, topo.TabletType, error) { - // read the tablet to be able to RPC to it, and also to get its - // original type - ti, err := wr.ts.GetTablet(tabletAlias) - if err != nil { - return nil, "", err - } - originalType := ti.Tablet.Type - - // execute the remote action, log the results, save the error - args := &actionnode.SnapshotArgs{ - Concurrency: snapshotConcurrency, - ServerMode: serverMode, - ForceMasterSnapshot: forceMasterSnapshot, - } - logStream, errFunc, err := wr.tmc.Snapshot(ctx, ti, args) - if err != nil { - return nil, "", err - } - for e := range logStream { - wr.Logger().Infof("Snapshot(%v): %v", tabletAlias, e) - } - reply, err := errFunc() - return reply, originalType, err -} - -// SnapshotSourceEnd will change the tablet back to its original type -// once it's done serving backups. -func (wr *Wrangler) SnapshotSourceEnd(ctx context.Context, tabletAlias topo.TabletAlias, slaveStartRequired, readWrite bool, originalType topo.TabletType) (err error) { - var ti *topo.TabletInfo - ti, err = wr.ts.GetTablet(tabletAlias) - if err != nil { - return - } - - args := &actionnode.SnapshotSourceEndArgs{ - SlaveStartRequired: slaveStartRequired, - ReadOnly: !readWrite, - OriginalType: originalType, - } - return wr.tmc.SnapshotSourceEnd(ctx, ti, args) -} - -// ReserveForRestore will make sure a tablet is ready to be used as a restore -// target. -func (wr *Wrangler) ReserveForRestore(ctx context.Context, srcTabletAlias, dstTabletAlias topo.TabletAlias) (err error) { - // read our current tablet, verify its state before sending it - // to the tablet itself - tablet, err := wr.ts.GetTablet(dstTabletAlias) - if err != nil { - return err - } - if tablet.Type != topo.TYPE_IDLE { - return fmt.Errorf("expected idle type, not %v: %v", tablet.Type, dstTabletAlias) - } - - args := &actionnode.ReserveForRestoreArgs{ - SrcTabletAlias: srcTabletAlias, - } - return wr.tmc.ReserveForRestore(ctx, tablet, args) -} - -// UnreserveForRestore switches the tablet back to its original state, -// the restore won't happen. -func (wr *Wrangler) UnreserveForRestore(ctx context.Context, dstTabletAlias topo.TabletAlias) (err error) { - tablet, err := wr.ts.GetTablet(dstTabletAlias) - if err != nil { - return err - } - err = topo.DeleteTabletReplicationData(wr.ts, tablet.Tablet) - if err != nil { - return err - } - - return wr.ChangeType(ctx, tablet.Alias, topo.TYPE_IDLE, false) -} - -// Restore actually performs the restore action on a tablet. -func (wr *Wrangler) Restore(ctx context.Context, srcTabletAlias topo.TabletAlias, srcFilePath string, dstTabletAlias, parentAlias topo.TabletAlias, fetchConcurrency, fetchRetryCount int, wasReserved, dontWaitForSlaveStart bool) error { - // read our current tablet, verify its state before sending it - // to the tablet itself - tablet, err := wr.ts.GetTablet(dstTabletAlias) - if err != nil { - return err - } - if wasReserved { - if tablet.Type != topo.TYPE_RESTORE { - return fmt.Errorf("expected restore type, not %v: %v", tablet.Type, dstTabletAlias) - } - } else { - if tablet.Type != topo.TYPE_IDLE { - return fmt.Errorf("expected idle type, not %v: %v", tablet.Type, dstTabletAlias) - } - } - - // update the shard record if we need to, to update Cells - srcTablet, err := wr.ts.GetTablet(srcTabletAlias) - if err != nil { - return err - } - si, err := wr.ts.GetShard(srcTablet.Keyspace, srcTablet.Shard) - if err != nil { - return fmt.Errorf("Cannot read shard: %v", err) - } - if err := wr.updateShardCellsAndMaster(ctx, si, tablet.Alias, topo.TYPE_SPARE, false); err != nil { - return err - } - - // do the work - args := &actionnode.RestoreArgs{ - SrcTabletAlias: srcTabletAlias, - SrcFilePath: srcFilePath, - ParentAlias: parentAlias, - FetchConcurrency: fetchConcurrency, - FetchRetryCount: fetchRetryCount, - WasReserved: wasReserved, - DontWaitForSlaveStart: dontWaitForSlaveStart, - } - logStream, errFunc, err := wr.tmc.Restore(ctx, tablet, args) - if err != nil { - return err - } - for e := range logStream { - wr.Logger().Infof("Restore(%v): %v", dstTabletAlias, e) - } - if err := errFunc(); err != nil { - return err - } - - // Restore moves us into the replication graph as a - // spare. There are no consequences to the replication or - // serving graphs, so no rebuild required. - return nil -} - -// UnreserveForRestoreMulti calls UnreserveForRestore on all targets. -func (wr *Wrangler) UnreserveForRestoreMulti(ctx context.Context, dstTabletAliases []topo.TabletAlias) { - for _, dstTabletAlias := range dstTabletAliases { - ufrErr := wr.UnreserveForRestore(ctx, dstTabletAlias) - if ufrErr != nil { - wr.Logger().Errorf("Failed to UnreserveForRestore destination tablet after failed source snapshot: %v", ufrErr) - } else { - wr.Logger().Infof("Un-reserved %v", dstTabletAlias) - } - } -} - -// Clone will do all the necessary actions to copy all the data from a -// source to a set of destinations. -func (wr *Wrangler) Clone(ctx context.Context, srcTabletAlias topo.TabletAlias, dstTabletAliases []topo.TabletAlias, forceMasterSnapshot bool, snapshotConcurrency, fetchConcurrency, fetchRetryCount int, serverMode bool) error { - // make sure the destination can be restored into (otherwise - // there is no point in taking the snapshot in the first place), - // and reserve it. - reserved := make([]topo.TabletAlias, 0, len(dstTabletAliases)) - for _, dstTabletAlias := range dstTabletAliases { - err := wr.ReserveForRestore(ctx, srcTabletAlias, dstTabletAlias) - if err != nil { - wr.UnreserveForRestoreMulti(ctx, reserved) - return err - } - reserved = append(reserved, dstTabletAlias) - wr.Logger().Infof("Successfully reserved %v for restore", dstTabletAlias) - } - - // take the snapshot, or put the server in SnapshotSource mode - // srcFilePath, parentAlias, slaveStartRequired, readWrite - sr, originalType, err := wr.Snapshot(ctx, srcTabletAlias, forceMasterSnapshot, snapshotConcurrency, serverMode) - if err != nil { - // The snapshot failed so un-reserve the destinations and return - wr.UnreserveForRestoreMulti(ctx, reserved) - return err - } - - // try to restore the snapshot - // In serverMode, and in the case where we're replicating from - // the master, we can't wait for replication, as the master is down. - wg := sync.WaitGroup{} - rec := concurrency.FirstErrorRecorder{} - for _, dstTabletAlias := range dstTabletAliases { - wg.Add(1) - go func(dstTabletAlias topo.TabletAlias) { - e := wr.Restore(ctx, srcTabletAlias, sr.ManifestPath, dstTabletAlias, sr.ParentAlias, fetchConcurrency, fetchRetryCount, true, serverMode && originalType == topo.TYPE_MASTER) - rec.RecordError(e) - wg.Done() - }(dstTabletAlias) - } - wg.Wait() - err = rec.Error() - - // in any case, fix the server - if serverMode { - resetErr := wr.SnapshotSourceEnd(ctx, srcTabletAlias, sr.SlaveStartRequired, sr.ReadOnly, originalType) - if resetErr != nil { - if err == nil { - // If there is no other error, this matters. - err = resetErr - } else { - // In the context of a larger failure, just log a note to cleanup. - wr.Logger().Errorf("Failed to reset snapshot source: %v - vtctl SnapshotSourceEnd is required", resetErr) - } - } - } - - return err -} diff --git a/go/vt/wrangler/wrangler.go b/go/vt/wrangler/wrangler.go index 6bcca64550..eb8253ec2c 100644 --- a/go/vt/wrangler/wrangler.go +++ b/go/vt/wrangler/wrangler.go @@ -24,10 +24,7 @@ var ( ) // Wrangler manages complex actions on the topology, like reparents, -// snapshots, restores, ... -// -// FIXME(alainjobart) take the context out of this structure. -// We want the context to come from the outside on every call. +// backups, resharding, ... // // Multiple go routines can use the same Wrangler at the same time, // provided they want to share the same logger / topo server / lock timeout. From 979f0fdc4e7f8600ce005dea5c5f032dacc297cd Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 20 May 2015 13:13:57 -0700 Subject: [PATCH 057/128] Adding a unit test for backup/restore. Had to export a couple flags so it's unit-testable. --- go/vt/mysqlctl/backupstorage/file.go | 8 +- go/vt/mysqlctl/backupstorage/interface.go | 6 +- go/vt/tabletmanager/agent.go | 5 +- go/vt/tabletmanager/restore.go | 24 +-- go/vt/wrangler/testlib/backup_test.go | 171 ++++++++++++++++++++++ 5 files changed, 196 insertions(+), 18 deletions(-) create mode 100644 go/vt/wrangler/testlib/backup_test.go diff --git a/go/vt/mysqlctl/backupstorage/file.go b/go/vt/mysqlctl/backupstorage/file.go index 7245f0e8df..479afc754c 100644 --- a/go/vt/mysqlctl/backupstorage/file.go +++ b/go/vt/mysqlctl/backupstorage/file.go @@ -17,7 +17,9 @@ import ( // BackupStorage interface. var ( - fileBackupStorageRoot = flag.String("file_backup_storage_root", "", "root directory for the file backup storage") + // FileBackupStorageRoot is where the backups will go. + // Exported for test purposes. + FileBackupStorageRoot = flag.String("file_backup_storage_root", "", "root directory for the file backup storage") ) // FileBackupHandle implements BackupHandle for local file system. @@ -138,9 +140,9 @@ func (fbs *FileBackupStorage) RemoveBackup(bucket, name string) error { // RegisterFileBackupStorage should be called after Flags has been // initialized, to register the FileBackupStorage implementation func RegisterFileBackupStorage() { - if *fileBackupStorageRoot != "" { + if *FileBackupStorageRoot != "" { BackupStorageMap["file"] = &FileBackupStorage{ - root: *fileBackupStorageRoot, + root: *FileBackupStorageRoot, } } } diff --git a/go/vt/mysqlctl/backupstorage/interface.go b/go/vt/mysqlctl/backupstorage/interface.go index e28dc8d842..542be8fec2 100644 --- a/go/vt/mysqlctl/backupstorage/interface.go +++ b/go/vt/mysqlctl/backupstorage/interface.go @@ -14,7 +14,9 @@ import ( ) var ( - backupStorageImplementation = flag.String("backup_storage_implementation", "", "which implementation to use for the backup storage feature") + // BackupStorageImplementation is the implementation to use + // for BackupStorage. Exported for test purposes. + BackupStorageImplementation = flag.String("backup_storage_implementation", "", "which implementation to use for the backup storage feature") ) // BackupHandle describes an individual backup. @@ -74,7 +76,7 @@ var BackupStorageMap = make(map[string]BackupStorage) // GetBackupStorage returns the current BackupStorage implementation. // Should be called after flags have been initialized. func GetBackupStorage() BackupStorage { - bs, ok := BackupStorageMap[*backupStorageImplementation] + bs, ok := BackupStorageMap[*BackupStorageImplementation] if !ok { log.Fatalf("no registered implementation of BackupStorage") } diff --git a/go/vt/tabletmanager/agent.go b/go/vt/tabletmanager/agent.go index bc951fcfd3..9b1fc0d57f 100644 --- a/go/vt/tabletmanager/agent.go +++ b/go/vt/tabletmanager/agent.go @@ -196,7 +196,10 @@ func NewActionAgent( go func() { // restoreFromBackup wil just be a regular action // (same as if it was triggered remotely) - agent.restoreFromBackup() + if err := agent.RestoreFromBackup(); err != nil { + println(fmt.Sprintf("RestoreFromBackup failed: %v", err)) + log.Fatalf("RestoreFromBackup failed: %v", err) + } // after the restore is done, start health check agent.initHeathCheck() diff --git a/go/vt/tabletmanager/restore.go b/go/vt/tabletmanager/restore.go index 3428df2984..a867ddf5b7 100644 --- a/go/vt/tabletmanager/restore.go +++ b/go/vt/tabletmanager/restore.go @@ -7,7 +7,6 @@ package tabletmanager import ( "flag" "fmt" - "log" "github.com/youtube/vitess/go/vt/mysqlctl" myproto "github.com/youtube/vitess/go/vt/mysqlctl/proto" @@ -22,11 +21,11 @@ var ( restoreConcurrency = flag.Int("restore_concurrency", 4, "(init restore parameter) how many concurrent files to restore at once") ) -// restoreFromBackup is the main entry point for backup restore. -// It will either work, fail gracefully and log the error, or log.Fatal -// in case of a non-recoverable error. +// RestoreFromBackup is the main entry point for backup restore. +// It will either work, fail gracefully, or return +// an error in case of a non-recoverable error. // It takes the action lock so no RPC interferes. -func (agent *ActionAgent) restoreFromBackup() { +func (agent *ActionAgent) RestoreFromBackup() error { agent.actionMutex.Lock() defer agent.actionMutex.Unlock() @@ -38,7 +37,7 @@ func (agent *ActionAgent) restoreFromBackup() { tablet.Type = topo.TYPE_RESTORE return nil }); err != nil { - log.Fatalf("Cannot change type to RESTORE: %v", err) + return fmt.Errorf("Cannot change type to RESTORE: %v", err) } // do the optional restore, if that fails we are in a bad state, @@ -46,18 +45,18 @@ func (agent *ActionAgent) restoreFromBackup() { bucket := fmt.Sprintf("%v/%v", tablet.Keyspace, tablet.Shard) pos, err := mysqlctl.Restore(agent.MysqlDaemon, bucket, *restoreConcurrency, agent.hookExtraEnv()) if err != nil && err != mysqlctl.ErrNoBackup { - log.Fatalf("Cannot restore original backup: %v", err) + return fmt.Errorf("Cannot restore original backup: %v", err) } if err == nil { // now read the shard to find the current master, and its location si, err := agent.TopoServer.GetShard(tablet.Keyspace, tablet.Shard) if err != nil { - log.Fatalf("Cannot read shard: %v", err) + return fmt.Errorf("Cannot read shard: %v", err) } ti, err := agent.TopoServer.GetTablet(si.MasterAlias) if err != nil { - log.Fatalf("Cannot read master tablet %v: %v", si.MasterAlias, err) + return fmt.Errorf("Cannot read master tablet %v: %v", si.MasterAlias, err) } // set replication straight @@ -68,10 +67,10 @@ func (agent *ActionAgent) restoreFromBackup() { } cmds, err := agent.MysqlDaemon.StartReplicationCommands(status) if err != nil { - log.Fatalf("MysqlDaemon.StartReplicationCommands failed: %v", err) + return fmt.Errorf("MysqlDaemon.StartReplicationCommands failed: %v", err) } if err := agent.MysqlDaemon.ExecuteSuperQueryList(cmds); err != nil { - log.Fatalf("MysqlDaemon.ExecuteSuperQueryList failed: %v", err) + return fmt.Errorf("MysqlDaemon.ExecuteSuperQueryList failed: %v", err) } } @@ -80,6 +79,7 @@ func (agent *ActionAgent) restoreFromBackup() { tablet.Type = originalType return nil }); err != nil { - log.Fatalf("Cannot change type back to %v: %v", originalType, err) + return fmt.Errorf("Cannot change type back to %v: %v", originalType, err) } + return nil } diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go new file mode 100644 index 0000000000..ccc0dadc15 --- /dev/null +++ b/go/vt/wrangler/testlib/backup_test.go @@ -0,0 +1,171 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testlib + +import ( + "io/ioutil" + "os" + "path" + "testing" + "time" + + mproto "github.com/youtube/vitess/go/mysql/proto" + "github.com/youtube/vitess/go/vt/logutil" + "github.com/youtube/vitess/go/vt/mysqlctl" + "github.com/youtube/vitess/go/vt/mysqlctl/backupstorage" + myproto "github.com/youtube/vitess/go/vt/mysqlctl/proto" + "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" + "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/wrangler" + "github.com/youtube/vitess/go/vt/zktopo" + "golang.org/x/net/context" +) + +func TestBackupRestore(t *testing.T) { + // Initialize our environment + ctx := context.Background() + ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"}) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), time.Second) + + // Initialize our temp dirs + root, err := ioutil.TempDir("", "backuptest") + if err != nil { + t.Fatalf("os.TempDir failed: %v", err) + } + defer os.RemoveAll(root) + + // Initialize BackupStorage + fbsRoot := path.Join(root, "fbs") + *backupstorage.FileBackupStorageRoot = fbsRoot + *backupstorage.BackupStorageImplementation = "file" + backupstorage.RegisterFileBackupStorage() + + // Initialize the fake mysql root directories + sourceInnodbDataDir := path.Join(root, "source_innodb_data") + sourceInnodbLogDir := path.Join(root, "source_innodb_log") + sourceDataDir := path.Join(root, "source_data") + sourceDataDbDir := path.Join(sourceDataDir, "vt_db") + sourceExtraDir := path.Join(sourceDataDir, "extra_dir") + for _, s := range []string{sourceInnodbDataDir, sourceInnodbLogDir, sourceDataDbDir, sourceExtraDir} { + if err := os.MkdirAll(s, os.ModePerm); err != nil { + t.Fatalf("failed to create directory %v: %v", s, err) + } + } + if err := ioutil.WriteFile(path.Join(sourceInnodbDataDir, "innodb_data_1"), []byte("innodb data 1 contents"), os.ModePerm); err != nil { + t.Fatalf("failed to write file innodb_data_1: %v", err) + } + if err := ioutil.WriteFile(path.Join(sourceInnodbLogDir, "innodb_log_1"), []byte("innodb log 1 contents"), os.ModePerm); err != nil { + t.Fatalf("failed to write file innodb_log_1: %v", err) + } + if err := ioutil.WriteFile(path.Join(sourceDataDbDir, "db.opt"), []byte("db opt file"), os.ModePerm); err != nil { + t.Fatalf("failed to write file db.opt: %v", err) + } + if err := ioutil.WriteFile(path.Join(sourceExtraDir, "extra.stuff"), []byte("extra file"), os.ModePerm); err != nil { + t.Fatalf("failed to write file extra.stuff: %v", err) + } + + // create a master tablet, not started, just for shard health + master := NewFakeTablet(t, wr, "cell1", 0, topo.TYPE_MASTER) + + // create a single tablet, set it up so we can do backups + sourceTablet := NewFakeTablet(t, wr, "cell1", 1, topo.TYPE_REPLICA) + sourceTablet.FakeMysqlDaemon.ReadOnly = true + sourceTablet.FakeMysqlDaemon.Replicating = true + sourceTablet.FakeMysqlDaemon.CurrentMasterPosition = myproto.ReplicationPosition{ + GTIDSet: myproto.MariadbGTID{ + Domain: 2, + Server: 123, + Sequence: 457, + }, + } + sourceTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ + "STOP SLAVE", + "START SLAVE", + } + sourceTablet.FakeMysqlDaemon.Mycnf = &mysqlctl.Mycnf{ + DataDir: sourceDataDir, + InnodbDataHomeDir: sourceInnodbDataDir, + InnodbLogGroupHomeDir: sourceInnodbLogDir, + } + sourceTablet.StartActionLoop(t, wr) + defer sourceTablet.StopActionLoop(t) + + ti, err := ts.GetTablet(sourceTablet.Tablet.Alias) + if err != nil { + t.Fatalf("GetTablet failed: %v", err) + } + + // run the backup + logStream, errFunc, err := wr.TabletManagerClient().Backup(ctx, ti, 4) + if err != nil { + t.Fatalf("Backup failed: %v", err) + } + for e := range logStream { + t.Logf("%v", e) + } + if err := errFunc(); err != nil { + t.Fatalf("Backup errFunc failed: %v", err) + } + + // verify the full status + if err := sourceTablet.FakeMysqlDaemon.CheckSuperQueryList(); err != nil { + t.Errorf("sourceTablet.FakeMysqlDaemon.CheckSuperQueryList failed: %v", err) + } + if !sourceTablet.FakeMysqlDaemon.Replicating { + t.Errorf("sourceTablet.FakeMysqlDaemon.Replicating not set") + } + if !sourceTablet.FakeMysqlDaemon.Running { + t.Errorf("sourceTablet.FakeMysqlDaemon.Running not set") + } + + // create a destination tablet, set it up so we can do restores + destTablet := NewFakeTablet(t, wr, "cell1", 2, topo.TYPE_REPLICA) + destTablet.FakeMysqlDaemon.ReadOnly = true + destTablet.FakeMysqlDaemon.Replicating = true + destTablet.FakeMysqlDaemon.CurrentMasterPosition = myproto.ReplicationPosition{ + GTIDSet: myproto.MariadbGTID{ + Domain: 2, + Server: 123, + Sequence: 457, + }, + } + destTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ + "cmd1", + } + destTablet.FakeMysqlDaemon.Mycnf = &mysqlctl.Mycnf{ + DataDir: sourceDataDir, + InnodbDataHomeDir: sourceInnodbDataDir, + InnodbLogGroupHomeDir: sourceInnodbLogDir, + } + destTablet.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*mproto.QueryResult{ + "SHOW DATABASES": &mproto.QueryResult{}, + } + destTablet.FakeMysqlDaemon.StartReplicationCommandsStatus = &myproto.ReplicationStatus{ + Position: sourceTablet.FakeMysqlDaemon.CurrentMasterPosition, + MasterHost: master.Tablet.Hostname, + MasterPort: master.Tablet.Portmap["mysql"], + MasterConnectRetry: 10, + } + destTablet.FakeMysqlDaemon.StartReplicationCommandsResult = []string{"cmd1"} + + destTablet.StartActionLoop(t, wr) + defer destTablet.StopActionLoop(t) + + if err := destTablet.Agent.RestoreFromBackup(); err != nil { + t.Fatalf("RestoreFromBackup failed: %v", err) + } + + // verify the full status + if err := destTablet.FakeMysqlDaemon.CheckSuperQueryList(); err != nil { + t.Errorf("destTablet.FakeMysqlDaemon.CheckSuperQueryList failed: %v", err) + } + if !destTablet.FakeMysqlDaemon.Replicating { + t.Errorf("destTablet.FakeMysqlDaemon.Replicating not set") + } + if !destTablet.FakeMysqlDaemon.Running { + t.Errorf("destTablet.FakeMysqlDaemon.Running not set") + } + +} From a4bb4d36adf2ae2009275bdec000fe0a961c7548 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 20 May 2015 13:43:47 -0700 Subject: [PATCH 058/128] Removing now unused mysqld.GetMasterAddr and related uses. --- go/vt/mysqlctl/mysql_daemon.go | 19 -------- go/vt/mysqlctl/replication.go | 9 ---- go/vt/topo/test/faketopo/fixture.go | 21 +-------- go/vt/topotools/rebuild_test.go | 4 +- go/vt/worker/split_clone_test.go | 12 ++---- go/vt/worker/split_diff_test.go | 12 ++---- go/vt/worker/sqldiffer_test.go | 12 ++---- go/vt/worker/vertical_split_clone_test.go | 9 ++-- go/vt/worker/vertical_split_diff_test.go | 12 ++---- .../testlib/copy_schema_shard_test.go | 3 +- go/vt/wrangler/testlib/fake_tablet.go | 12 ------ .../testlib/reparent_external_test.go | 43 +++++-------------- 12 files changed, 33 insertions(+), 135 deletions(-) diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index b06045abe0..728dbfe67e 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -27,10 +27,6 @@ type MysqlDaemon interface { Start(mysqlWaitTime time.Duration) error Shutdown(waitForMysqld bool, mysqlWaitTime time.Duration) error - // GetMasterAddr returns the mysql master address, as shown by - // 'show slave status'. - GetMasterAddr() (string, error) - // GetMysqlPort returns the current port mysql is listening on. GetMysqlPort() (int, error) @@ -85,10 +81,6 @@ type FakeMysqlDaemon struct { // Running is used by Start / Shutdown Running bool - // MasterAddr will be returned by GetMasterAddr(). Set to "" to return - // ErrNotSlave, or to "ERROR" to return an error. - MasterAddr string - // MysqlPort will be returned by GetMysqlPort(). Set to -1 to // return an error. MysqlPort int @@ -202,17 +194,6 @@ func (fmd *FakeMysqlDaemon) Shutdown(waitForMysqld bool, mysqlWaitTime time.Dura return nil } -// GetMasterAddr is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) GetMasterAddr() (string, error) { - if fmd.MasterAddr == "" { - return "", ErrNotSlave - } - if fmd.MasterAddr == "ERROR" { - return "", fmt.Errorf("FakeMysqlDaemon.GetMasterAddr returns an error") - } - return fmd.MasterAddr, nil -} - // GetMysqlPort is part of the MysqlDaemon interface func (fmd *FakeMysqlDaemon) GetMysqlPort() (int, error) { if fmd.MysqlPort == -1 { diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index 123790270d..9027756e64 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -140,15 +140,6 @@ func StopSlave(md MysqlDaemon, hookExtraEnv map[string]string) error { return md.ExecuteSuperQueryList([]string{SqlStopSlave}) } -// GetMasterAddr returns master address -func (mysqld *Mysqld) GetMasterAddr() (string, error) { - slaveStatus, err := mysqld.SlaveStatus() - if err != nil { - return "", err - } - return slaveStatus.MasterAddr(), nil -} - // GetMysqlPort returns mysql port func (mysqld *Mysqld) GetMysqlPort() (int, error) { qr, err := mysqld.FetchSuperQuery("SHOW VARIABLES LIKE 'port'") diff --git a/go/vt/topo/test/faketopo/fixture.go b/go/vt/topo/test/faketopo/fixture.go index 7f24db09e1..633b9a8af0 100644 --- a/go/vt/topo/test/faketopo/fixture.go +++ b/go/vt/topo/test/faketopo/fixture.go @@ -66,25 +66,9 @@ func (fix *Fixture) TearDown() { close(fix.done) } -// MakeMySQLMaster makes the (fake) MySQL used by tablet identified by -// uid the master. -func (fix *Fixture) MakeMySQLMaster(uid int) { - newMaster, ok := fix.tablets[uid] - if !ok { - fix.Fatalf("bad tablet uid: %v", uid) - } - for id, tablet := range fix.tablets { - if id == uid { - tablet.mysql.MasterAddr = "" - } else { - tablet.mysql.MasterAddr = newMaster.MysqlIPAddr() - } - } -} - // AddTablet adds a new tablet to the topology and starts its event // loop. -func (fix *Fixture) AddTablet(uid int, cell string, tabletType topo.TabletType, master *topo.Tablet) *topo.Tablet { +func (fix *Fixture) AddTablet(uid int, cell string, tabletType topo.TabletType) *topo.Tablet { tablet := &topo.Tablet{ Alias: topo.TabletAlias{Cell: cell, Uid: uint32(uid)}, Hostname: fmt.Sprintf("%vbsr%v", cell, uid), @@ -103,9 +87,6 @@ func (fix *Fixture) AddTablet(uid int, cell string, tabletType topo.TabletType, fix.Fatalf("CreateTablet: %v", err) } mysqlDaemon := &mysqlctl.FakeMysqlDaemon{} - if master != nil { - mysqlDaemon.MasterAddr = master.MysqlIPAddr() - } mysqlDaemon.MysqlPort = 3334 + 10*uid pack := &tabletPack{Tablet: tablet, mysql: mysqlDaemon} diff --git a/go/vt/topotools/rebuild_test.go b/go/vt/topotools/rebuild_test.go index e475806696..e45656daf3 100644 --- a/go/vt/topotools/rebuild_test.go +++ b/go/vt/topotools/rebuild_test.go @@ -32,8 +32,8 @@ func TestRebuildShardRace(t *testing.T) { keyspace := faketopo.TestKeyspace shard := faketopo.TestShard - master := f.AddTablet(1, "test_cell", topo.TYPE_MASTER, nil) - f.AddTablet(2, "test_cell", topo.TYPE_REPLICA, master) + f.AddTablet(1, "test_cell", topo.TYPE_MASTER) + f.AddTablet(2, "test_cell", topo.TYPE_REPLICA) // Do an initial rebuild. if _, err := RebuildShard(ctx, logger, f.Topo, keyspace, shard, cells, time.Minute); err != nil { diff --git a/go/vt/worker/split_clone_test.go b/go/vt/worker/split_clone_test.go index 45b46dbc08..14772082f9 100644 --- a/go/vt/worker/split_clone_test.go +++ b/go/vt/worker/split_clone_test.go @@ -250,23 +250,19 @@ func testSplitClone(t *testing.T, strategy string) { sourceMaster := testlib.NewFakeTablet(t, wr, "cell1", 0, topo.TYPE_MASTER, testlib.TabletKeyspaceShard(t, "ks", "-80")) sourceRdonly1 := testlib.NewFakeTablet(t, wr, "cell1", 1, - topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "ks", "-80"), - testlib.TabletParent(sourceMaster.Tablet.Alias)) + topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "ks", "-80")) sourceRdonly2 := testlib.NewFakeTablet(t, wr, "cell1", 2, - topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "ks", "-80"), - testlib.TabletParent(sourceMaster.Tablet.Alias)) + topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "ks", "-80")) leftMaster := testlib.NewFakeTablet(t, wr, "cell1", 10, topo.TYPE_MASTER, testlib.TabletKeyspaceShard(t, "ks", "-40")) leftRdonly := testlib.NewFakeTablet(t, wr, "cell1", 11, - topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "ks", "-40"), - testlib.TabletParent(leftMaster.Tablet.Alias)) + topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "ks", "-40")) rightMaster := testlib.NewFakeTablet(t, wr, "cell1", 20, topo.TYPE_MASTER, testlib.TabletKeyspaceShard(t, "ks", "40-80")) rightRdonly := testlib.NewFakeTablet(t, wr, "cell1", 21, - topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "ks", "40-80"), - testlib.TabletParent(rightMaster.Tablet.Alias)) + topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "ks", "40-80")) for _, ft := range []*testlib.FakeTablet{sourceMaster, sourceRdonly1, sourceRdonly2, leftMaster, leftRdonly, rightMaster, rightRdonly} { ft.StartActionLoop(t, wr) diff --git a/go/vt/worker/split_diff_test.go b/go/vt/worker/split_diff_test.go index 21436ac0c1..5dd41c7693 100644 --- a/go/vt/worker/split_diff_test.go +++ b/go/vt/worker/split_diff_test.go @@ -156,20 +156,16 @@ func TestSplitDiff(t *testing.T) { sourceMaster := testlib.NewFakeTablet(t, wr, "cell1", 0, topo.TYPE_MASTER, testlib.TabletKeyspaceShard(t, "ks", "-80")) sourceRdonly1 := testlib.NewFakeTablet(t, wr, "cell1", 1, - topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "ks", "-80"), - testlib.TabletParent(sourceMaster.Tablet.Alias)) + topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "ks", "-80")) sourceRdonly2 := testlib.NewFakeTablet(t, wr, "cell1", 2, - topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "ks", "-80"), - testlib.TabletParent(sourceMaster.Tablet.Alias)) + topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "ks", "-80")) leftMaster := testlib.NewFakeTablet(t, wr, "cell1", 10, topo.TYPE_MASTER, testlib.TabletKeyspaceShard(t, "ks", "-40")) leftRdonly1 := testlib.NewFakeTablet(t, wr, "cell1", 11, - topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "ks", "-40"), - testlib.TabletParent(leftMaster.Tablet.Alias)) + topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "ks", "-40")) leftRdonly2 := testlib.NewFakeTablet(t, wr, "cell1", 12, - topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "ks", "-40"), - testlib.TabletParent(leftMaster.Tablet.Alias)) + topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "ks", "-40")) for _, ft := range []*testlib.FakeTablet{sourceMaster, sourceRdonly1, sourceRdonly2, leftMaster, leftRdonly1, leftRdonly2} { ft.StartActionLoop(t, wr) diff --git a/go/vt/worker/sqldiffer_test.go b/go/vt/worker/sqldiffer_test.go index a0af742cd7..c517c70af0 100644 --- a/go/vt/worker/sqldiffer_test.go +++ b/go/vt/worker/sqldiffer_test.go @@ -79,20 +79,16 @@ func TestSqlDiffer(t *testing.T) { supersetMaster := testlib.NewFakeTablet(t, wr, "cell1", 0, topo.TYPE_MASTER, testlib.TabletKeyspaceShard(t, "source_ks", "0")) supersetRdonly1 := testlib.NewFakeTablet(t, wr, "cell1", 1, - topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "source_ks", "0"), - testlib.TabletParent(supersetMaster.Tablet.Alias)) + topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "source_ks", "0")) supersetRdonly2 := testlib.NewFakeTablet(t, wr, "cell1", 2, - topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "source_ks", "0"), - testlib.TabletParent(supersetMaster.Tablet.Alias)) + topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "source_ks", "0")) subsetMaster := testlib.NewFakeTablet(t, wr, "cell1", 10, topo.TYPE_MASTER, testlib.TabletKeyspaceShard(t, "destination_ks", "0")) subsetRdonly1 := testlib.NewFakeTablet(t, wr, "cell1", 11, - topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "destination_ks", "0"), - testlib.TabletParent(subsetMaster.Tablet.Alias)) + topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "destination_ks", "0")) subsetRdonly2 := testlib.NewFakeTablet(t, wr, "cell1", 12, - topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "destination_ks", "0"), - testlib.TabletParent(subsetMaster.Tablet.Alias)) + topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "destination_ks", "0")) for _, ft := range []*testlib.FakeTablet{supersetMaster, supersetRdonly1, supersetRdonly2, subsetMaster, subsetRdonly1, subsetRdonly2} { ft.StartActionLoop(t, wr) diff --git a/go/vt/worker/vertical_split_clone_test.go b/go/vt/worker/vertical_split_clone_test.go index c4e136723f..67e3e32bea 100644 --- a/go/vt/worker/vertical_split_clone_test.go +++ b/go/vt/worker/vertical_split_clone_test.go @@ -234,11 +234,9 @@ func testVerticalSplitClone(t *testing.T, strategy string) { sourceMaster := testlib.NewFakeTablet(t, wr, "cell1", 0, topo.TYPE_MASTER, testlib.TabletKeyspaceShard(t, "source_ks", "0")) sourceRdonly1 := testlib.NewFakeTablet(t, wr, "cell1", 1, - topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "source_ks", "0"), - testlib.TabletParent(sourceMaster.Tablet.Alias)) + topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "source_ks", "0")) sourceRdonly2 := testlib.NewFakeTablet(t, wr, "cell1", 2, - topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "source_ks", "0"), - testlib.TabletParent(sourceMaster.Tablet.Alias)) + topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "source_ks", "0")) // Create the destination keyspace with the appropriate ServedFromMap ki := &topo.Keyspace{} @@ -252,8 +250,7 @@ func testVerticalSplitClone(t *testing.T, strategy string) { destMaster := testlib.NewFakeTablet(t, wr, "cell1", 10, topo.TYPE_MASTER, testlib.TabletKeyspaceShard(t, "destination_ks", "0")) destRdonly := testlib.NewFakeTablet(t, wr, "cell1", 11, - topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "destination_ks", "0"), - testlib.TabletParent(destMaster.Tablet.Alias)) + topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "destination_ks", "0")) for _, ft := range []*testlib.FakeTablet{sourceMaster, sourceRdonly1, sourceRdonly2, destMaster, destRdonly} { ft.StartActionLoop(t, wr) diff --git a/go/vt/worker/vertical_split_diff_test.go b/go/vt/worker/vertical_split_diff_test.go index 8d8c9f933a..0a6efbae94 100644 --- a/go/vt/worker/vertical_split_diff_test.go +++ b/go/vt/worker/vertical_split_diff_test.go @@ -90,11 +90,9 @@ func TestVerticalSplitDiff(t *testing.T) { sourceMaster := testlib.NewFakeTablet(t, wr, "cell1", 0, topo.TYPE_MASTER, testlib.TabletKeyspaceShard(t, "source_ks", "0")) sourceRdonly1 := testlib.NewFakeTablet(t, wr, "cell1", 1, - topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "source_ks", "0"), - testlib.TabletParent(sourceMaster.Tablet.Alias)) + topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "source_ks", "0")) sourceRdonly2 := testlib.NewFakeTablet(t, wr, "cell1", 2, - topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "source_ks", "0"), - testlib.TabletParent(sourceMaster.Tablet.Alias)) + topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "source_ks", "0")) // Create the destination keyspace with the appropriate ServedFromMap ki := &topo.Keyspace{} @@ -108,11 +106,9 @@ func TestVerticalSplitDiff(t *testing.T) { destMaster := testlib.NewFakeTablet(t, wr, "cell1", 10, topo.TYPE_MASTER, testlib.TabletKeyspaceShard(t, "destination_ks", "0")) destRdonly1 := testlib.NewFakeTablet(t, wr, "cell1", 11, - topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "destination_ks", "0"), - testlib.TabletParent(destMaster.Tablet.Alias)) + topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "destination_ks", "0")) destRdonly2 := testlib.NewFakeTablet(t, wr, "cell1", 12, - topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "destination_ks", "0"), - testlib.TabletParent(destMaster.Tablet.Alias)) + topo.TYPE_RDONLY, testlib.TabletKeyspaceShard(t, "destination_ks", "0")) for _, ft := range []*testlib.FakeTablet{sourceMaster, sourceRdonly1, sourceRdonly2, destMaster, destRdonly1, destRdonly2} { ft.StartActionLoop(t, wr) diff --git a/go/vt/wrangler/testlib/copy_schema_shard_test.go b/go/vt/wrangler/testlib/copy_schema_shard_test.go index 4a2c6779d8..4c153bf09d 100644 --- a/go/vt/wrangler/testlib/copy_schema_shard_test.go +++ b/go/vt/wrangler/testlib/copy_schema_shard_test.go @@ -132,8 +132,7 @@ func TestCopySchemaShard(t *testing.T) { sourceMaster := NewFakeTablet(t, wr, "cell1", 0, topo.TYPE_MASTER, TabletKeyspaceShard(t, "ks", "-80")) sourceRdonly := NewFakeTablet(t, wr, "cell1", 1, - topo.TYPE_RDONLY, TabletKeyspaceShard(t, "ks", "-80"), - TabletParent(sourceMaster.Tablet.Alias)) + topo.TYPE_RDONLY, TabletKeyspaceShard(t, "ks", "-80")) destinationMaster := NewFakeTablet(t, wr, "cell1", 10, topo.TYPE_MASTER, TabletKeyspaceShard(t, "ks", "-40")) diff --git a/go/vt/wrangler/testlib/fake_tablet.go b/go/vt/wrangler/testlib/fake_tablet.go index 63ed1a1834..92009fa587 100644 --- a/go/vt/wrangler/testlib/fake_tablet.go +++ b/go/vt/wrangler/testlib/fake_tablet.go @@ -49,14 +49,6 @@ type FakeTablet struct { // making it too cumbersome. type TabletOption func(tablet *topo.Tablet) -// TabletParent is the tablet option to set the parent alias -func TabletParent(parent topo.TabletAlias) TabletOption { - return func(tablet *topo.Tablet) { - // save the parent alias uid into the portmap as a hack - tablet.Portmap["parent_uid"] = int(parent.Uid) - } -} - // TabletKeyspaceShard is the option to set the tablet keyspace and shard func TabletKeyspaceShard(t *testing.T, keyspace, shard string) TabletOption { return func(tablet *topo.Tablet) { @@ -101,7 +93,6 @@ func NewFakeTablet(t *testing.T, wr *wrangler.Wrangler, cell string, uid uint32, for _, option := range options { option(tablet) } - puid, ok := tablet.Portmap["parent_uid"] delete(tablet.Portmap, "parent_uid") _, force := tablet.Portmap["force_init"] delete(tablet.Portmap, "force_init") @@ -111,9 +102,6 @@ func NewFakeTablet(t *testing.T, wr *wrangler.Wrangler, cell string, uid uint32, // create a FakeMysqlDaemon with the right information by default fakeMysqlDaemon := mysqlctl.NewFakeMysqlDaemon() - if ok { - fakeMysqlDaemon.MasterAddr = fmt.Sprintf("%v.0.0.1:%v", 100+puid, 3300+puid) - } fakeMysqlDaemon.MysqlPort = 3300 + int(uid) return &FakeTablet{ diff --git a/go/vt/wrangler/testlib/reparent_external_test.go b/go/vt/wrangler/testlib/reparent_external_test.go index 4c93e3aed8..8015e3bc6b 100644 --- a/go/vt/wrangler/testlib/reparent_external_test.go +++ b/go/vt/wrangler/testlib/reparent_external_test.go @@ -40,14 +40,10 @@ func testTabletExternallyReparented(t *testing.T, fast bool) { // Create an old master, a new master, two good slaves, one bad slave oldMaster := NewFakeTablet(t, wr, "cell1", 0, topo.TYPE_MASTER) - newMaster := NewFakeTablet(t, wr, "cell1", 1, topo.TYPE_REPLICA, - TabletParent(oldMaster.Tablet.Alias)) - goodSlave1 := NewFakeTablet(t, wr, "cell1", 2, topo.TYPE_REPLICA, - TabletParent(oldMaster.Tablet.Alias)) - goodSlave2 := NewFakeTablet(t, wr, "cell2", 3, topo.TYPE_REPLICA, - TabletParent(oldMaster.Tablet.Alias)) - badSlave := NewFakeTablet(t, wr, "cell1", 4, topo.TYPE_REPLICA, - TabletParent(oldMaster.Tablet.Alias)) + newMaster := NewFakeTablet(t, wr, "cell1", 1, topo.TYPE_REPLICA) + goodSlave1 := NewFakeTablet(t, wr, "cell1", 2, topo.TYPE_REPLICA) + goodSlave2 := NewFakeTablet(t, wr, "cell2", 3, topo.TYPE_REPLICA) + badSlave := NewFakeTablet(t, wr, "cell1", 4, topo.TYPE_REPLICA) // Add a new Cell to the Shard, that doesn't map to any read topo cell, // to simulate a data center being unreachable. @@ -97,29 +93,24 @@ func testTabletExternallyReparented(t *testing.T, fast bool) { // On the elected master, we will respond to // TabletActionSlaveWasPromoted - newMaster.FakeMysqlDaemon.MasterAddr = "" newMaster.StartActionLoop(t, wr) defer newMaster.StopActionLoop(t) // On the old master, we will only respond to // TabletActionSlaveWasRestarted. - oldMaster.FakeMysqlDaemon.MasterAddr = newMaster.Tablet.MysqlIPAddr() oldMaster.StartActionLoop(t, wr) defer oldMaster.StopActionLoop(t) // On the good slaves, we will respond to // TabletActionSlaveWasRestarted. - goodSlave1.FakeMysqlDaemon.MasterAddr = newMaster.Tablet.MysqlIPAddr() goodSlave1.StartActionLoop(t, wr) defer goodSlave1.StopActionLoop(t) - goodSlave2.FakeMysqlDaemon.MasterAddr = newMaster.Tablet.MysqlIPAddr() goodSlave2.StartActionLoop(t, wr) defer goodSlave2.StopActionLoop(t) // On the bad slave, we will respond to // TabletActionSlaveWasRestarted with bad data. - badSlave.FakeMysqlDaemon.MasterAddr = "234.0.0.1:3301" badSlave.StartActionLoop(t, wr) defer badSlave.StopActionLoop(t) @@ -191,10 +182,8 @@ func testTabletExternallyReparentedWithDifferentMysqlPort(t *testing.T, fast boo // Create an old master, a new master, two good slaves, one bad slave oldMaster := NewFakeTablet(t, wr, "cell1", 0, topo.TYPE_MASTER) - newMaster := NewFakeTablet(t, wr, "cell1", 1, topo.TYPE_REPLICA, - TabletParent(oldMaster.Tablet.Alias)) - goodSlave := NewFakeTablet(t, wr, "cell1", 2, topo.TYPE_REPLICA, - TabletParent(oldMaster.Tablet.Alias)) + newMaster := NewFakeTablet(t, wr, "cell1", 1, topo.TYPE_REPLICA) + goodSlave := NewFakeTablet(t, wr, "cell1", 2, topo.TYPE_REPLICA) // Now we're restarting mysql on a different port, 3301->3303 // but without updating the Tablet record in topology. @@ -202,20 +191,17 @@ func testTabletExternallyReparentedWithDifferentMysqlPort(t *testing.T, fast boo // On the elected master, we will respond to // TabletActionSlaveWasPromoted, so we need a MysqlDaemon // that returns no master, and the new port (as returned by mysql) - newMaster.FakeMysqlDaemon.MasterAddr = "" newMaster.FakeMysqlDaemon.MysqlPort = 3303 newMaster.StartActionLoop(t, wr) defer newMaster.StopActionLoop(t) // On the old master, we will only respond to // TabletActionSlaveWasRestarted and point to the new mysql port - oldMaster.FakeMysqlDaemon.MasterAddr = "101.0.0.1:3303" oldMaster.StartActionLoop(t, wr) defer oldMaster.StopActionLoop(t) // On the good slaves, we will respond to // TabletActionSlaveWasRestarted and point to the new mysql port - goodSlave.FakeMysqlDaemon.MasterAddr = "101.0.0.1:3303" goodSlave.StartActionLoop(t, wr) defer goodSlave.StopActionLoop(t) @@ -249,27 +235,22 @@ func testTabletExternallyReparentedContinueOnUnexpectedMaster(t *testing.T, fast // Create an old master, a new master, two good slaves, one bad slave oldMaster := NewFakeTablet(t, wr, "cell1", 0, topo.TYPE_MASTER) - newMaster := NewFakeTablet(t, wr, "cell1", 1, topo.TYPE_REPLICA, - TabletParent(oldMaster.Tablet.Alias)) - goodSlave := NewFakeTablet(t, wr, "cell1", 2, topo.TYPE_REPLICA, - TabletParent(oldMaster.Tablet.Alias)) + newMaster := NewFakeTablet(t, wr, "cell1", 1, topo.TYPE_REPLICA) + goodSlave := NewFakeTablet(t, wr, "cell1", 2, topo.TYPE_REPLICA) // On the elected master, we will respond to // TabletActionSlaveWasPromoted, so we need a MysqlDaemon // that returns no master, and the new port (as returned by mysql) - newMaster.FakeMysqlDaemon.MasterAddr = "" newMaster.StartActionLoop(t, wr) defer newMaster.StopActionLoop(t) // On the old master, we will only respond to // TabletActionSlaveWasRestarted and point to a bad host - oldMaster.FakeMysqlDaemon.MasterAddr = "1.2.3.4:6666" oldMaster.StartActionLoop(t, wr) defer oldMaster.StopActionLoop(t) // On the good slave, we will respond to // TabletActionSlaveWasRestarted and point to a bad host - goodSlave.FakeMysqlDaemon.MasterAddr = "1.2.3.4:6666" goodSlave.StartActionLoop(t, wr) defer goodSlave.StopActionLoop(t) @@ -301,16 +282,13 @@ func testTabletExternallyReparentedFailedOldMaster(t *testing.T, fast bool) { // Create an old master, a new master, and a good slave. oldMaster := NewFakeTablet(t, wr, "cell1", 0, topo.TYPE_MASTER) - newMaster := NewFakeTablet(t, wr, "cell1", 1, topo.TYPE_REPLICA, - TabletParent(oldMaster.Tablet.Alias)) - goodSlave := NewFakeTablet(t, wr, "cell1", 2, topo.TYPE_REPLICA, - TabletParent(oldMaster.Tablet.Alias)) + newMaster := NewFakeTablet(t, wr, "cell1", 1, topo.TYPE_REPLICA) + goodSlave := NewFakeTablet(t, wr, "cell1", 2, topo.TYPE_REPLICA) // Reparent to a replica, and pretend the old master is not responding. // On the elected master, we will respond to // TabletActionSlaveWasPromoted - newMaster.FakeMysqlDaemon.MasterAddr = "" newMaster.StartActionLoop(t, wr) defer newMaster.StopActionLoop(t) @@ -320,7 +298,6 @@ func testTabletExternallyReparentedFailedOldMaster(t *testing.T, fast bool) { // On the good slave, we will respond to // TabletActionSlaveWasRestarted. - goodSlave.FakeMysqlDaemon.MasterAddr = newMaster.Tablet.MysqlIPAddr() goodSlave.StartActionLoop(t, wr) defer goodSlave.StopActionLoop(t) From 9776e112ce12e20589eb0fe8f3ce79f784259e25 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 20 May 2015 13:54:55 -0700 Subject: [PATCH 059/128] Removing couple unused methods. --- go/vt/mysqlctl/replication.go | 59 ++--------------------------------- 1 file changed, 2 insertions(+), 57 deletions(-) diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index 9027756e64..24b0c1005d 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -12,8 +12,6 @@ import ( "bytes" "errors" "fmt" - "os" - "path" "strconv" "strings" "text/template" @@ -89,8 +87,8 @@ func parseSlaveStatus(fields map[string]string) proto.ReplicationStatus { return status } -// WaitForSlaveStart waits for MySQL replication to start until given -// deadline (in seconds) passed. +// WaitForSlaveStart waits until the deadline for replication to start. +// This validates the current master is correct and can be connected to. func WaitForSlaveStart(mysqld MysqlDaemon, slaveStartDeadline int) error { var rowMap map[string]string for slaveWait := 0; slaveWait < slaveStartDeadline; slaveWait++ { @@ -247,43 +245,6 @@ func (mysqld *Mysqld) SetMasterCommands(masterHost string, masterPort int) ([]st return flavor.SetMasterCommands(¶ms, masterHost, masterPort, int(masterConnectRetry.Seconds())) } -// WaitForSlave waits for a slave if its lag is larger than given maxLag -func (mysqld *Mysqld) WaitForSlave(maxLag int) (err error) { - // FIXME(msolomon) verify that slave started based on show slave status; - var rowMap map[string]string - for { - rowMap, err = mysqld.fetchSuperQueryMap("SHOW SLAVE STATUS") - if err != nil { - return - } - - if rowMap["Seconds_Behind_Master"] == "NULL" { - break - } else { - lag, err := strconv.Atoi(rowMap["Seconds_Behind_Master"]) - if err != nil { - break - } - if lag < maxLag { - return nil - } - } - time.Sleep(time.Second) - } - - errorKeys := []string{"Last_Error", "Last_IO_Error", "Last_SQL_Error"} - errs := make([]string, 0, len(errorKeys)) - for _, key := range errorKeys { - if rowMap[key] != "" { - errs = append(errs, key+": "+rowMap[key]) - } - } - if len(errs) != 0 { - return errors.New(strings.Join(errs, ", ")) - } - return errors.New("replication stopped, it will never catch up") -} - // ResetReplicationCommands returns the commands to run to reset all // replication for this host. func (mysqld *Mysqld) ResetReplicationCommands() ([]string, error) { @@ -336,22 +297,6 @@ func (mysqld *Mysqld) FindSlaves() ([]string, error) { return addrs, nil } -// ValidateSnapshotPath is a helper function to make sure we can write to the local snapshot area, before we actually do any action -// (can be used for both partial and full snapshots) -func (mysqld *Mysqld) ValidateSnapshotPath() error { - _path := path.Join(mysqld.SnapshotDir, "validate_test") - if err := os.RemoveAll(_path); err != nil { - return fmt.Errorf("ValidateSnapshotPath: Cannot validate snapshot directory: %v", err) - } - if err := os.MkdirAll(_path, 0775); err != nil { - return fmt.Errorf("ValidateSnapshotPath: Cannot validate snapshot directory: %v", err) - } - if err := os.RemoveAll(_path); err != nil { - return fmt.Errorf("ValidateSnapshotPath: Cannot validate snapshot directory: %v", err) - } - return nil -} - // WaitBlpPosition will wait for the filtered replication to reach at least // the provided position. func (mysqld *Mysqld) WaitBlpPosition(bp *blproto.BlpPosition, waitTimeout time.Duration) error { From cf568fdc28ec1de86bab9f7f0222cc3442530bc7 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 20 May 2015 16:18:28 -0700 Subject: [PATCH 060/128] Adding unit test for findFiles for backup. Making other unit test simpler. Removing special case for symlinks, unused. --- go/vt/mysqlctl/backup.go | 27 ++------ go/vt/mysqlctl/backup_test.go | 93 +++++++++++++++++++++++++++ go/vt/mysqlctl/mysqld.go | 8 +-- go/vt/wrangler/testlib/backup_test.go | 6 +- 4 files changed, 104 insertions(+), 30 deletions(-) create mode 100644 go/vt/mysqlctl/backup_test.go diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index 8227e7dba9..1921545d9a 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -13,7 +13,6 @@ import ( "io/ioutil" "os" "path" - "path/filepath" "strings" "sync" @@ -145,7 +144,7 @@ func addDirectory(fes []FileEntry, base string, baseDir string, subDir string) ( return fes, nil } -func findFilesTobackup(cnf *Mycnf, logger logutil.Logger) ([]FileEntry, error) { +func findFilesTobackup(cnf *Mycnf) ([]FileEntry, error) { var err error var result []FileEntry @@ -167,18 +166,6 @@ func findFilesTobackup(cnf *Mycnf, logger logutil.Logger) ([]FileEntry, error) { for _, fi := range fis { p := path.Join(cnf.DataDir, fi.Name()) - - // If this is not a directory, try to eval it as a syslink. - if !fi.IsDir() { - p, err = filepath.EvalSymlinks(p) - if err != nil { - return nil, err - } - fi, err = os.Stat(p) - if err != nil { - return nil, err - } - } if isDbDir(p) { result, err = addDirectory(result, backupData, cnf.DataDir, fi.Name()) if err != nil { @@ -203,14 +190,12 @@ func Backup(mysqld MysqlDaemon, logger logutil.Logger, bucket, name string, back } if err = backup(mysqld, logger, bh, backupConcurrency, hookExtraEnv); err != nil { - if err := bh.AbortBackup(); err != nil { - logger.Errorf("failed to abort backup: %v", err) + if abortErr := bh.AbortBackup(); abortErr != nil { + logger.Errorf("failed to abort backup: %v", abortErr) } - } else { - err = bh.EndBackup() + return err } - - return err + return bh.EndBackup() } func backup(mysqld MysqlDaemon, logger logutil.Logger, bh backupstorage.BackupHandle, backupConcurrency int, hookExtraEnv map[string]string) error { @@ -271,7 +256,7 @@ func backup(mysqld MysqlDaemon, logger logutil.Logger, bh backupstorage.BackupHa } // get the files to backup - fes, err := findFilesTobackup(mysqld.Cnf(), logger) + fes, err := findFilesTobackup(mysqld.Cnf()) if err != nil { return fmt.Errorf("cannot find files to backup: %v", err) } diff --git a/go/vt/mysqlctl/backup_test.go b/go/vt/mysqlctl/backup_test.go new file mode 100644 index 0000000000..25faf40fe9 --- /dev/null +++ b/go/vt/mysqlctl/backup_test.go @@ -0,0 +1,93 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mysqlctl + +import ( + "io/ioutil" + "os" + "path" + "reflect" + "sort" + "testing" +) + +func TestFindFilesToBackup(t *testing.T) { + root, err := ioutil.TempDir("", "backuptest") + if err != nil { + t.Fatalf("os.TempDir failed: %v", err) + } + defer os.RemoveAll(root) + + // Initialize the fake mysql root directories + innodbDataDir := path.Join(root, "innodb_data") + innodbLogDir := path.Join(root, "innodb_log") + dataDir := path.Join(root, "data") + dataDbDir := path.Join(dataDir, "vt_db") + extraDir := path.Join(dataDir, "extra_dir") + outsideDbDir := path.Join(root, "outside_db") + for _, s := range []string{innodbDataDir, innodbLogDir, dataDbDir, extraDir, outsideDbDir} { + if err := os.MkdirAll(s, os.ModePerm); err != nil { + t.Fatalf("failed to create directory %v: %v", s, err) + } + } + if err := ioutil.WriteFile(path.Join(innodbDataDir, "innodb_data_1"), []byte("innodb data 1 contents"), os.ModePerm); err != nil { + t.Fatalf("failed to write file innodb_data_1: %v", err) + } + if err := ioutil.WriteFile(path.Join(innodbLogDir, "innodb_log_1"), []byte("innodb log 1 contents"), os.ModePerm); err != nil { + t.Fatalf("failed to write file innodb_log_1: %v", err) + } + if err := ioutil.WriteFile(path.Join(dataDbDir, "db.opt"), []byte("db opt file"), os.ModePerm); err != nil { + t.Fatalf("failed to write file db.opt: %v", err) + } + if err := ioutil.WriteFile(path.Join(extraDir, "extra.stuff"), []byte("extra file"), os.ModePerm); err != nil { + t.Fatalf("failed to write file extra.stuff: %v", err) + } + if err := ioutil.WriteFile(path.Join(outsideDbDir, "table1.frm"), []byte("frm file"), os.ModePerm); err != nil { + t.Fatalf("failed to write file table1.opt: %v", err) + } + if err := os.Symlink(outsideDbDir, path.Join(dataDir, "vt_symlink")); err != nil { + t.Fatalf("failed to symlink vt_symlink: %v", err) + } + + cnf := &Mycnf{ + InnodbDataHomeDir: innodbDataDir, + InnodbLogGroupHomeDir: innodbLogDir, + DataDir: dataDir, + } + + result, err := findFilesTobackup(cnf) + if err != nil { + t.Fatalf("findFilesTobackup failed: %v", err) + } + sort.Sort(forTest(result)) + t.Logf("findFilesTobackup returned: %v", result) + expected := []FileEntry{ + FileEntry{ + Base: "Data", + Name: "vt_db/db.opt", + }, + FileEntry{ + Base: "Data", + Name: "vt_symlink/table1.frm", + }, + FileEntry{ + Base: "InnoDBData", + Name: "innodb_data_1", + }, + FileEntry{ + Base: "InnoDBLog", + Name: "innodb_log_1", + }, + } + if !reflect.DeepEqual(result, expected) { + t.Fatalf("got wrong list of FileEntry %v, expected %v", result, expected) + } +} + +type forTest []FileEntry + +func (f forTest) Len() int { return len(f) } +func (f forTest) Swap(i, j int) { f[i], f[j] = f[j], f[i] } +func (f forTest) Less(i, j int) bool { return f[i].Base+f[i].Name < f[j].Base+f[j].Name } diff --git a/go/vt/mysqlctl/mysqld.go b/go/vt/mysqlctl/mysqld.go index 3d590311c4..0cca57a182 100644 --- a/go/vt/mysqlctl/mysqld.go +++ b/go/vt/mysqlctl/mysqld.go @@ -400,7 +400,7 @@ func (mysqld *Mysqld) initConfig(root string) error { func (mysqld *Mysqld) createDirs() error { log.Infof("creating directory %s", mysqld.TabletDir) - if err := os.MkdirAll(mysqld.TabletDir, 0775); err != nil { + if err := os.MkdirAll(mysqld.TabletDir, os.ModePerm); err != nil { return err } for _, dir := range TopLevelDirs() { @@ -410,7 +410,7 @@ func (mysqld *Mysqld) createDirs() error { } for _, dir := range mysqld.config.directoryList() { log.Infof("creating directory %s", dir) - if err := os.MkdirAll(dir, 0775); err != nil { + if err := os.MkdirAll(dir, os.ModePerm); err != nil { return err } // FIXME(msolomon) validate permissions? @@ -433,14 +433,14 @@ func (mysqld *Mysqld) createTopDir(dir string) error { if os.IsNotExist(err) { topdir := path.Join(mysqld.TabletDir, dir) log.Infof("creating directory %s", topdir) - return os.MkdirAll(topdir, 0775) + return os.MkdirAll(topdir, os.ModePerm) } return err } linkto := path.Join(target, vtname) source := path.Join(mysqld.TabletDir, dir) log.Infof("creating directory %s", linkto) - err = os.MkdirAll(linkto, 0775) + err = os.MkdirAll(linkto, os.ModePerm) if err != nil { return err } diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go index ccc0dadc15..95b7c04072 100644 --- a/go/vt/wrangler/testlib/backup_test.go +++ b/go/vt/wrangler/testlib/backup_test.go @@ -47,8 +47,7 @@ func TestBackupRestore(t *testing.T) { sourceInnodbLogDir := path.Join(root, "source_innodb_log") sourceDataDir := path.Join(root, "source_data") sourceDataDbDir := path.Join(sourceDataDir, "vt_db") - sourceExtraDir := path.Join(sourceDataDir, "extra_dir") - for _, s := range []string{sourceInnodbDataDir, sourceInnodbLogDir, sourceDataDbDir, sourceExtraDir} { + for _, s := range []string{sourceInnodbDataDir, sourceInnodbLogDir, sourceDataDbDir} { if err := os.MkdirAll(s, os.ModePerm); err != nil { t.Fatalf("failed to create directory %v: %v", s, err) } @@ -62,9 +61,6 @@ func TestBackupRestore(t *testing.T) { if err := ioutil.WriteFile(path.Join(sourceDataDbDir, "db.opt"), []byte("db opt file"), os.ModePerm); err != nil { t.Fatalf("failed to write file db.opt: %v", err) } - if err := ioutil.WriteFile(path.Join(sourceExtraDir, "extra.stuff"), []byte("extra file"), os.ModePerm); err != nil { - t.Fatalf("failed to write file extra.stuff: %v", err) - } // create a master tablet, not started, just for shard health master := NewFakeTablet(t, wr, "cell1", 0, topo.TYPE_MASTER) From 1172c8969747a534b18a7bf637b1ca6cb66ea7cd Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Wed, 20 May 2015 21:07:18 -0700 Subject: [PATCH 061/128] merge DataSourcer and EventHandler interface DataSourcer and EventHandler interfaces are good but often a concrete EventHandler implementation needs some information only exists in DataSourcer. e.g. A DataSourcer reads schema changes from a file system and an EventHandler wants to move this file around to response different schema change events. The information exchange would be hard to do with two separate interfaces, as this level of abstraction introduces more boilerplate code. This change combines these two interface into a single one: schemamanager.Controller, which reduces one level of abstraction and the code looks more cleaner. --- go/vt/schemamanager/console_event_handler.go | 52 ----- .../console_event_handler_test.go | 41 ---- go/vt/schemamanager/plain_controller.go | 78 ++++++++ go/vt/schemamanager/plain_controller_test.go | 56 ++++++ go/vt/schemamanager/schemamanager.go | 52 +++-- go/vt/schemamanager/schemamanager_test.go | 182 +++++++++--------- go/vt/schemamanager/simple_data_sourcer.go | 41 ---- go/vt/schemamanager/tablet_executor.go | 3 +- 8 files changed, 245 insertions(+), 260 deletions(-) delete mode 100644 go/vt/schemamanager/console_event_handler.go delete mode 100644 go/vt/schemamanager/console_event_handler_test.go create mode 100644 go/vt/schemamanager/plain_controller.go create mode 100644 go/vt/schemamanager/plain_controller_test.go delete mode 100644 go/vt/schemamanager/simple_data_sourcer.go diff --git a/go/vt/schemamanager/console_event_handler.go b/go/vt/schemamanager/console_event_handler.go deleted file mode 100644 index 54d08f065f..0000000000 --- a/go/vt/schemamanager/console_event_handler.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2015, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package schemamanager - -import ( - "encoding/json" - "fmt" -) - -// ConsoleEventHandler prints various schema manager events to the stdout -type ConsoleEventHandler struct{} - -// NewConsoleEventHandler creates a new ConsoleEventHandler instance. -func NewConsoleEventHandler() *ConsoleEventHandler { - return &ConsoleEventHandler{} -} - -// OnDataSourcerReadSuccess is called when schemamanager successfully reads all sql statements. -func (handler *ConsoleEventHandler) OnDataSourcerReadSuccess(sql []string) error { - fmt.Println("Successfully read all schema changes.") - return nil -} - -// OnDataSourcerReadFail is called when schemamanager fails to read all sql statements. -func (handler *ConsoleEventHandler) OnDataSourcerReadFail(err error) error { - fmt.Printf("Failed to read schema changes, error: %v\n", err) - return err -} - -// OnValidationSuccess is called when schemamanager successfully validates all sql statements. -func (handler *ConsoleEventHandler) OnValidationSuccess([]string) error { - fmt.Println("Successfully validate all sqls.") - return nil -} - -// OnValidationFail is called when schemamanager fails to validate sql statements. -func (handler *ConsoleEventHandler) OnValidationFail(err error) error { - fmt.Printf("Failed to validate sqls, error: %v\n", err) - return err -} - -// OnExecutorComplete is called when schemamanager finishes applying schema changes. -func (handler *ConsoleEventHandler) OnExecutorComplete(result *ExecuteResult) error { - out, _ := json.MarshalIndent(result, "", " ") - fmt.Printf("Executor finished, result: %s\n", string(out)) - return nil -} - -// ConsoleEventHandler have to implement EventHandler interface -var _ EventHandler = (*ConsoleEventHandler)(nil) diff --git a/go/vt/schemamanager/console_event_handler_test.go b/go/vt/schemamanager/console_event_handler_test.go deleted file mode 100644 index f1b4b55fe1..0000000000 --- a/go/vt/schemamanager/console_event_handler_test.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package schemamanager - -import ( - "fmt" - "testing" -) - -func TestConsoleEventHandler(t *testing.T) { - sqls := []string{"CREATE TABLE test_table (pk int)"} - handler := NewConsoleEventHandler() - err := handler.OnDataSourcerReadSuccess(sqls) - if err != nil { - t.Fatalf("OnDataSourcerReadSuccess should succeed") - } - - errReadFail := fmt.Errorf("read fail") - err = handler.OnDataSourcerReadFail(errReadFail) - if err != errReadFail { - t.Fatalf("should get error:%v, but get: %v", errReadFail, err) - } - - err = handler.OnValidationSuccess(sqls) - if err != nil { - t.Fatalf("OnValidationSuccess should succeed") - } - - errValidationFail := fmt.Errorf("validation fail") - err = handler.OnValidationFail(errValidationFail) - if err != errValidationFail { - t.Fatalf("should get error:%v, but get: %v", errValidationFail, err) - } - - err = handler.OnExecutorComplete(&ExecuteResult{}) - if err != nil { - t.Fatalf("OnExecutorComplete should succeed") - } -} diff --git a/go/vt/schemamanager/plain_controller.go b/go/vt/schemamanager/plain_controller.go new file mode 100644 index 0000000000..6aba581a73 --- /dev/null +++ b/go/vt/schemamanager/plain_controller.go @@ -0,0 +1,78 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemamanager + +import ( + "encoding/json" + "fmt" + "strings" +) + +// PlainController implements Controller interface. +type PlainController struct { + sqls []string +} + +// NewPlainController creates a new PlainController instance. +func NewPlainController(sqlStr string) *PlainController { + result := &PlainController{ + sqls: make([]string, 0, 32), + } + for _, sql := range strings.Split(sqlStr, ";") { + s := strings.TrimSpace(sql) + if s != "" { + result.sqls = append(result.sqls, s) + } + } + return result +} + +// Open is a no-op. +func (controller *PlainController) Open() error { + return nil +} + +// Read reads schema changes +func (controller *PlainController) Read() ([]string, error) { + return controller.sqls, nil +} + +// Close is a no-op. +func (controller *PlainController) Close() { +} + +// OnReadSuccess is called when schemamanager successfully +// reads all sql statements. +func (controller *PlainController) OnReadSuccess() error { + fmt.Println("Successfully read all schema changes.") + return nil +} + +// OnReadFail is called when schemamanager fails to read all sql statements. +func (controller *PlainController) OnReadFail(err error) error { + fmt.Printf("Failed to read schema changes, error: %v\n", err) + return err +} + +// OnValidationSuccess is called when schemamanager successfully validates all sql statements. +func (controller *PlainController) OnValidationSuccess() error { + fmt.Println("Successfully validate all sqls.") + return nil +} + +// OnValidationFail is called when schemamanager fails to validate sql statements. +func (controller *PlainController) OnValidationFail(err error) error { + fmt.Printf("Failed to validate sqls, error: %v\n", err) + return err +} + +// OnExecutorComplete is called when schemamanager finishes applying schema changes. +func (controller *PlainController) OnExecutorComplete(result *ExecuteResult) error { + out, _ := json.MarshalIndent(result, "", " ") + fmt.Printf("Executor finished, result: %s\n", string(out)) + return nil +} + +var _ Controller = (*PlainController)(nil) diff --git a/go/vt/schemamanager/plain_controller_test.go b/go/vt/schemamanager/plain_controller_test.go new file mode 100644 index 0000000000..ecf5e25b51 --- /dev/null +++ b/go/vt/schemamanager/plain_controller_test.go @@ -0,0 +1,56 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemamanager + +import ( + "fmt" + "testing" +) + +func TestPlainController(t *testing.T) { + sql := "CREATE TABLE test_table (pk int)" + controller := NewPlainController(sql) + err := controller.Open() + if err != nil { + t.Fatalf("controller.Open should succeed, but got error: %v", err) + } + sqls, err := controller.Read() + if err != nil { + t.Fatalf("controller.Read should succeed, but got error: %v", err) + } + if len(sqls) != 1 { + t.Fatalf("controller should only get one sql, but got: %v", sqls) + } + if sqls[0] != sql { + t.Fatalf("expect to get sql: '%s', but got: '%s'", sql, sqls[0]) + } + defer controller.Close() + err = controller.OnReadSuccess() + if err != nil { + t.Fatalf("OnDataSourcerReadSuccess should succeed") + } + + errReadFail := fmt.Errorf("read fail") + err = controller.OnReadFail(errReadFail) + if err != errReadFail { + t.Fatalf("should get error:%v, but get: %v", errReadFail, err) + } + + err = controller.OnValidationSuccess() + if err != nil { + t.Fatalf("OnValidationSuccess should succeed") + } + + errValidationFail := fmt.Errorf("validation fail") + err = controller.OnValidationFail(errValidationFail) + if err != errValidationFail { + t.Fatalf("should get error:%v, but get: %v", errValidationFail, err) + } + + err = controller.OnExecutorComplete(&ExecuteResult{}) + if err != nil { + t.Fatalf("OnExecutorComplete should succeed") + } +} diff --git a/go/vt/schemamanager/schemamanager.go b/go/vt/schemamanager/schemamanager.go index 9884778f2e..0c3ad8205b 100644 --- a/go/vt/schemamanager/schemamanager.go +++ b/go/vt/schemamanager/schemamanager.go @@ -12,19 +12,17 @@ import ( mproto "github.com/youtube/vitess/go/mysql/proto" ) -// DataSourcer defines how the autoschema system get schema change commands -type DataSourcer interface { +// Controller is responsible for getting schema change for a +// certain keyspace and also handling various events happened during schema +// change. +type Controller interface { Open() error - Read() ([]string, error) - Close() error -} - -// EventHandler defines callbacks for events happen during schema management -type EventHandler interface { - OnDataSourcerReadSuccess([]string) error - OnDataSourcerReadFail(error) error - OnValidationSuccess([]string) error - OnValidationFail(error) error + Read() (sqls []string, err error) + Close() + OnReadSuccess() error + OnReadFail(err error) error + OnValidationSuccess() error + OnValidationFail(err error) error OnExecutorComplete(*ExecuteResult) error } @@ -33,7 +31,7 @@ type Executor interface { Open() error Validate(sqls []string) error Execute(sqls []string) *ExecuteResult - Close() error + Close() } // ExecuteResult contains information about schema management state @@ -58,34 +56,32 @@ type ShardResult struct { } // Run schema changes on Vitess through VtGate -func Run(sourcer DataSourcer, - exec Executor, - handler EventHandler) error { - if err := sourcer.Open(); err != nil { +func Run(controller Controller, executor Executor) error { + if err := controller.Open(); err != nil { log.Errorf("failed to open data sourcer: %v", err) return err } - defer sourcer.Close() - sqls, err := sourcer.Read() + defer controller.Close() + sqls, err := controller.Read() if err != nil { log.Errorf("failed to read data from data sourcer: %v", err) - handler.OnDataSourcerReadFail(err) + controller.OnReadFail(err) return err } - handler.OnDataSourcerReadSuccess(sqls) - if err := exec.Open(); err != nil { + controller.OnReadSuccess() + if err := executor.Open(); err != nil { log.Errorf("failed to open executor: %v", err) return err } - defer exec.Close() - if err := exec.Validate(sqls); err != nil { + defer executor.Close() + if err := executor.Validate(sqls); err != nil { log.Errorf("validation fail: %v", err) - handler.OnValidationFail(err) + controller.OnValidationFail(err) return err } - handler.OnValidationSuccess(sqls) - result := exec.Execute(sqls) - handler.OnExecutorComplete(result) + controller.OnValidationSuccess() + result := executor.Execute(sqls) + controller.OnExecutorComplete(result) if result.ExecutorErr != "" || len(result.FailedShards) > 0 { out, _ := json.MarshalIndent(result, "", " ") return fmt.Errorf("Schema change failed, ExecuteResult: %v\n", string(out)) diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index 4d071de61f..470325d657 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -18,76 +18,72 @@ import ( ) var ( - errDataSourcerOpen = errors.New("Open Fail") - errDataSourcerRead = errors.New("Read Fail") - errDataSourcerClose = errors.New("Close Fail") + errControllerOpen = errors.New("Open Fail") + errControllerRead = errors.New("Read Fail") ) -func TestRunSchemaChangesDataSourcerOpenFail(t *testing.T) { - dataSourcer := newFakeDataSourcer([]string{"select * from test_db"}, true, false, false) - handler := newFakeHandler() - exec := newFakeExecutor() - err := Run(dataSourcer, exec, handler) - if err != errDataSourcerOpen { - t.Fatalf("data sourcer open fail, shoud get error: %v, but get error: %v", - errDataSourcerOpen, err) +func TestSchemaManagerControllerOpenFail(t *testing.T) { + controller := newFakeController( + []string{"select * from test_db"}, true, false, false) + err := Run(controller, newFakeExecutor()) + if err != errControllerOpen { + t.Fatalf("controller.Open fail, shoud get error: %v, but get error: %v", + errControllerOpen, err) } } -func TestRunSchemaChangesDataSourcerReadFail(t *testing.T) { - dataSourcer := newFakeDataSourcer([]string{"select * from test_db"}, false, true, false) - handler := newFakeHandler() - exec := newFakeExecutor() - err := Run(dataSourcer, exec, handler) - if err != errDataSourcerRead { - t.Fatalf("data sourcer read fail, shoud get error: %v, but get error: %v", - errDataSourcerRead, err) +func TestSchemaManagerControllerReadFail(t *testing.T) { + controller := newFakeController( + []string{"select * from test_db"}, false, true, false) + err := Run(controller, newFakeExecutor()) + if err != errControllerRead { + t.Fatalf("controller.Read fail, shoud get error: %v, but get error: %v", + errControllerRead, err) } - if !handler.onDataSourcerReadFailTriggered { - t.Fatalf("event handler should call OnDataSourcerReadFail but it didn't") + if !controller.onReadFailTriggered { + t.Fatalf("OnReadFail should be called") } } -func TestRunSchemaChangesValidationFail(t *testing.T) { - dataSourcer := newFakeDataSourcer([]string{"invalid sql"}, false, false, false) - handler := newFakeHandler() - exec := newFakeExecutor() - err := Run(dataSourcer, exec, handler) +func TestSchemaManagerValidationFail(t *testing.T) { + controller := newFakeController( + []string{"invalid sql"}, false, false, false) + err := Run(controller, newFakeExecutor()) if err == nil { t.Fatalf("run schema change should fail due to executor.Validate fail") } } -func TestRunSchemaChangesExecutorOpenFail(t *testing.T) { - dataSourcer := newFakeDataSourcer([]string{"create table test_table (pk int);"}, false, false, false) - handler := newFakeHandler() - exec := NewTabletExecutor( +func TestSchemaManagerExecutorOpenFail(t *testing.T) { + controller := newFakeController( + []string{"create table test_table (pk int);"}, false, false, false) + executor := NewTabletExecutor( newFakeTabletManagerClient(), newFakeTopo(), "unknown_keyspace") - err := Run(dataSourcer, exec, handler) + err := Run(controller, executor) if err == nil { t.Fatalf("run schema change should fail due to executor.Open fail") } } -func TestRunSchemaChangesExecutorExecuteFail(t *testing.T) { - dataSourcer := newFakeDataSourcer([]string{"create table test_table (pk int);"}, false, false, false) - handler := newFakeHandler() - exec := NewTabletExecutor( +func TestSchemaManagerExecutorExecuteFail(t *testing.T) { + controller := newFakeController( + []string{"create table test_table (pk int);"}, false, false, false) + executor := NewTabletExecutor( newFakeTabletManagerClient(), newFakeTopo(), "test_keyspace") - err := Run(dataSourcer, exec, handler) + err := Run(controller, executor) if err == nil { t.Fatalf("run schema change should fail due to executor.Execute fail") } } -func TestRunSchemaChanges(t *testing.T) { +func TestSchemaManagerRun(t *testing.T) { sql := "create table test_table (pk int)" - dataSourcer := NewSimpleDataSourcer(sql) - handler := newFakeHandler() + controller := newFakeController( + []string{sql}, false, false, false) fakeTmc := newFakeTabletManagerClient() fakeTmc.AddSchemaChange(sql, &proto.SchemaChangeResult{ BeforeSchema: &proto.SchemaDefinition{}, @@ -105,29 +101,29 @@ func TestRunSchemaChanges(t *testing.T) { fakeTmc.AddSchemaDefinition("vt_test_keyspace", &proto.SchemaDefinition{}) - exec := NewTabletExecutor( + executor := NewTabletExecutor( fakeTmc, newFakeTopo(), "test_keyspace") - err := Run(dataSourcer, exec, handler) + err := Run(controller, executor) if err != nil { t.Fatalf("schema change should success but get error: %v", err) } - if !handler.onDataSourcerReadSuccessTriggered { - t.Fatalf("event handler should call OnDataSourcerReadSuccess but it didn't") + if !controller.onReadSuccessTriggered { + t.Fatalf("OnReadSuccess should be called") } - if handler.onDataSourcerReadFailTriggered { - t.Fatalf("event handler should not call OnDataSourcerReadFail but it did") + if controller.onReadFailTriggered { + t.Fatalf("OnReadFail should not be called") } - if !handler.onValidationSuccessTriggered { - t.Fatalf("event handler should call OnDataSourcerValidateSuccess but it didn't") + if !controller.onValidationSuccessTriggered { + t.Fatalf("OnValidateSuccess should be called") } - if handler.onValidationFailTriggered { - t.Fatalf("event handler should not call OnValidationFail but it did") + if controller.onValidationFailTriggered { + t.Fatalf("OnValidationFail should not be called") } - if !handler.onExecutorCompleteTriggered { - t.Fatalf("event handler should call OnExecutorComplete but it didn't") + if !controller.onExecutorCompleteTriggered { + t.Fatalf("OnExecutorComplete should be called") } } @@ -358,74 +354,68 @@ func (topoServer *fakeTopo) UnlockShardForAction(keyspace, shard, lockPath, resu return fmt.Errorf("not implemented") } -type fakeDataSourcer struct { - sqls []string - openFail bool - readFail bool - closeFail bool +type fakeController struct { + sqls []string + openFail bool + readFail bool + closeFail bool + onReadSuccessTriggered bool + onReadFailTriggered bool + onValidationSuccessTriggered bool + onValidationFailTriggered bool + onExecutorCompleteTriggered bool } -func newFakeDataSourcer(sqls []string, openFail bool, readFail bool, closeFail bool) *fakeDataSourcer { - return &fakeDataSourcer{sqls, openFail, readFail, closeFail} +func newFakeController( + sqls []string, openFail bool, readFail bool, closeFail bool) *fakeController { + return &fakeController{ + sqls: sqls, + openFail: openFail, + readFail: readFail, + closeFail: closeFail, + } } -func (sourcer fakeDataSourcer) Open() error { - if sourcer.openFail { - return errDataSourcerOpen +func (controller *fakeController) Open() error { + if controller.openFail { + return errControllerOpen } return nil } -func (sourcer fakeDataSourcer) Read() ([]string, error) { - if sourcer.readFail { - return nil, errDataSourcerRead +func (controller *fakeController) Read() ([]string, error) { + if controller.readFail { + return nil, errControllerRead } - return sourcer.sqls, nil + return controller.sqls, nil } -func (sourcer fakeDataSourcer) Close() error { - if sourcer.closeFail { - return errDataSourcerClose - } +func (controller *fakeController) Close() { +} + +func (controller *fakeController) OnReadSuccess() error { + controller.onReadSuccessTriggered = true return nil } -type fakeEventHandler struct { - onDataSourcerReadSuccessTriggered bool - onDataSourcerReadFailTriggered bool - onValidationSuccessTriggered bool - onValidationFailTriggered bool - onExecutorCompleteTriggered bool -} - -func newFakeHandler() *fakeEventHandler { - return &fakeEventHandler{} -} - -func (handler *fakeEventHandler) OnDataSourcerReadSuccess([]string) error { - handler.onDataSourcerReadSuccessTriggered = true - return nil -} - -func (handler *fakeEventHandler) OnDataSourcerReadFail(err error) error { - handler.onDataSourcerReadFailTriggered = true +func (controller *fakeController) OnReadFail(err error) error { + controller.onReadFailTriggered = true return err } -func (handler *fakeEventHandler) OnValidationSuccess([]string) error { - handler.onValidationSuccessTriggered = true +func (controller *fakeController) OnValidationSuccess() error { + controller.onValidationSuccessTriggered = true return nil } -func (handler *fakeEventHandler) OnValidationFail(err error) error { - handler.onValidationFailTriggered = true +func (controller *fakeController) OnValidationFail(err error) error { + controller.onValidationFailTriggered = true return err } -func (handler *fakeEventHandler) OnExecutorComplete(*ExecuteResult) error { - handler.onExecutorCompleteTriggered = true +func (controller *fakeController) OnExecutorComplete(*ExecuteResult) error { + controller.onExecutorCompleteTriggered = true return nil } -var _ EventHandler = (*fakeEventHandler)(nil) -var _ DataSourcer = (*fakeDataSourcer)(nil) +var _ Controller = (*fakeController)(nil) diff --git a/go/vt/schemamanager/simple_data_sourcer.go b/go/vt/schemamanager/simple_data_sourcer.go deleted file mode 100644 index 51665f75f8..0000000000 --- a/go/vt/schemamanager/simple_data_sourcer.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package schemamanager - -import "strings" - -// SimpleDataSourcer is really simple -type SimpleDataSourcer struct { - sqls []string -} - -// NewSimpleDataSourcer creates a new SimpleDataSourcer instance -func NewSimpleDataSourcer(sqlStr string) *SimpleDataSourcer { - result := &SimpleDataSourcer{sqls: make([]string, 0, 32)} - for _, sql := range strings.Split(sqlStr, ";") { - s := strings.TrimSpace(sql) - if s != "" { - result.sqls = append(result.sqls, s) - } - } - return result -} - -// Open is a no-op -func (ds *SimpleDataSourcer) Open() error { - return nil -} - -// Read reads schema changes -func (ds *SimpleDataSourcer) Read() ([]string, error) { - return ds.sqls, nil -} - -// Close is a no-op -func (ds *SimpleDataSourcer) Close() error { - return nil -} - -var _ DataSourcer = (*SimpleDataSourcer)(nil) diff --git a/go/vt/schemamanager/tablet_executor.go b/go/vt/schemamanager/tablet_executor.go index 39d737dade..1c330c1c80 100644 --- a/go/vt/schemamanager/tablet_executor.go +++ b/go/vt/schemamanager/tablet_executor.go @@ -213,12 +213,11 @@ func (exec *TabletExecutor) executeOneTablet( } // Close clears tablet executor states -func (exec *TabletExecutor) Close() error { +func (exec *TabletExecutor) Close() { if !exec.isClosed { exec.tabletInfos = nil exec.isClosed = true } - return nil } var _ Executor = (*TabletExecutor)(nil) From c1c33838341fe87cac9cd61931258cb94194b435 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Wed, 20 May 2015 21:37:43 -0700 Subject: [PATCH 062/128] remove uihandler and create UIController --- go/cmd/vtctld/vtctld.go | 12 +-- go/vt/schemamanager/plain_controller.go | 6 +- go/vt/schemamanager/ui_controller.go | 91 +++++++++++++++++++++++ go/vt/schemamanager/ui_controller_test.go | 77 +++++++++++++++++++ go/vt/schemamanager/uihandler/handler.go | 61 --------------- go/vt/wrangler/schema.go | 3 +- 6 files changed, 176 insertions(+), 74 deletions(-) create mode 100644 go/vt/schemamanager/ui_controller.go create mode 100644 go/vt/schemamanager/ui_controller_test.go delete mode 100644 go/vt/schemamanager/uihandler/handler.go diff --git a/go/cmd/vtctld/vtctld.go b/go/cmd/vtctld/vtctld.go index b7472acb12..92a2a7af40 100644 --- a/go/cmd/vtctld/vtctld.go +++ b/go/cmd/vtctld/vtctld.go @@ -10,15 +10,12 @@ import ( log "github.com/golang/glog" "github.com/youtube/vitess/go/acl" - schmgr "github.com/youtube/vitess/go/vt/schemamanager" - "github.com/youtube/vitess/go/vt/schemamanager/uihandler" + "github.com/youtube/vitess/go/vt/schemamanager" "github.com/youtube/vitess/go/vt/servenv" "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topotools" "github.com/youtube/vitess/go/vt/wrangler" - // register gorpc vtgate client - _ "github.com/youtube/vitess/go/vt/vtgate/gorpcvtgateconn" ) var ( @@ -484,15 +481,14 @@ func main() { } sqlStr := r.FormValue("data") keyspace := r.FormValue("keyspace") - executor := schmgr.NewTabletExecutor( + executor := schemamanager.NewTabletExecutor( tmclient.NewTabletManagerClient(), ts, keyspace) - schmgr.Run( - schmgr.NewSimpleDataSourcer(sqlStr), + schemamanager.Run( + schemamanager.NewUIController(sqlStr, w), executor, - uihandler.NewUIEventHandler(w), ) }) servenv.RunDefault() diff --git a/go/vt/schemamanager/plain_controller.go b/go/vt/schemamanager/plain_controller.go index 6aba581a73..ad50c0fb2a 100644 --- a/go/vt/schemamanager/plain_controller.go +++ b/go/vt/schemamanager/plain_controller.go @@ -17,16 +17,16 @@ type PlainController struct { // NewPlainController creates a new PlainController instance. func NewPlainController(sqlStr string) *PlainController { - result := &PlainController{ + controller := &PlainController{ sqls: make([]string, 0, 32), } for _, sql := range strings.Split(sqlStr, ";") { s := strings.TrimSpace(sql) if s != "" { - result.sqls = append(result.sqls, s) + controller.sqls = append(controller.sqls, s) } } - return result + return controller } // Open is a no-op. diff --git a/go/vt/schemamanager/ui_controller.go b/go/vt/schemamanager/ui_controller.go new file mode 100644 index 0000000000..5f08ba2a9d --- /dev/null +++ b/go/vt/schemamanager/ui_controller.go @@ -0,0 +1,91 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemamanager + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + + log "github.com/golang/glog" +) + +// UIController handles schema events. +type UIController struct { + sqls []string + writer http.ResponseWriter +} + +// NewUIController creates a UIController instance +func NewUIController(sqlStr string, writer http.ResponseWriter) *UIController { + controller := &UIController{ + sqls: make([]string, 0, 32), + writer: writer, + } + for _, sql := range strings.Split(sqlStr, ";") { + s := strings.TrimSpace(sql) + if s != "" { + controller.sqls = append(controller.sqls, s) + } + } + + return controller +} + +// Open is a no-op. +func (controller *UIController) Open() error { + return nil +} + +// Read reads schema changes +func (controller *UIController) Read() ([]string, error) { + return controller.sqls, nil +} + +// Close is a no-op. +func (controller *UIController) Close() { +} + +// OnReadSuccess is no-op +func (controller *UIController) OnReadSuccess() error { + controller.writer.Write( + []byte(fmt.Sprintf("OnReadSuccess, sqls: %v\n", controller.sqls))) + return nil +} + +// OnReadFail is no-op +func (controller *UIController) OnReadFail(err error) error { + controller.writer.Write( + []byte(fmt.Sprintf("OnReadFail, error: %v\n", err))) + return err +} + +// OnValidationSuccess is no-op +func (controller *UIController) OnValidationSuccess() error { + controller.writer.Write( + []byte(fmt.Sprintf("OnValidationSuccess, sqls: %v\n", controller.sqls))) + return nil +} + +// OnValidationFail is no-op +func (controller *UIController) OnValidationFail(err error) error { + controller.writer.Write( + []byte(fmt.Sprintf("OnValidationFail, error: %v\n", err))) + return err +} + +// OnExecutorComplete is no-op +func (controller *UIController) OnExecutorComplete(result *ExecuteResult) error { + data, err := json.Marshal(result) + if err != nil { + log.Errorf("Failed to serialize ExecuteResult: %v", err) + return err + } + controller.writer.Write([]byte(fmt.Sprintf("Executor succeeds: %s", string(data)))) + return nil +} + +var _ Controller = (*UIController)(nil) diff --git a/go/vt/schemamanager/ui_controller_test.go b/go/vt/schemamanager/ui_controller_test.go new file mode 100644 index 0000000000..1b9635c199 --- /dev/null +++ b/go/vt/schemamanager/ui_controller_test.go @@ -0,0 +1,77 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemamanager + +import ( + "fmt" + "net/http/httptest" + "strings" + "testing" +) + +func TestUIController(t *testing.T) { + sql := "CREATE TABLE test_table (pk int)" + response := httptest.NewRecorder() + controller := NewUIController(sql, response) + err := controller.Open() + if err != nil { + t.Fatalf("controller.Open should succeed, but got error: %v", err) + } + sqls, err := controller.Read() + if err != nil { + t.Fatalf("controller.Read should succeed, but got error: %v", err) + } + if len(sqls) != 1 { + t.Fatalf("controller should only get one sql, but got: %v", sqls) + } + if sqls[0] != sql { + t.Fatalf("expect to get sql: '%s', but got: '%s'", sql, sqls[0]) + } + defer controller.Close() + err = controller.OnReadSuccess() + if err != nil { + t.Fatalf("OnDataSourcerReadSuccess should succeed") + } + if !strings.Contains(response.Body.String(), "OnReadSuccess, sqls") { + t.Fatalf("controller.OnReadSuccess should write to http response") + } + errReadFail := fmt.Errorf("read fail") + err = controller.OnReadFail(errReadFail) + if err != errReadFail { + t.Fatalf("should get error:%v, but get: %v", errReadFail, err) + } + + if !strings.Contains(response.Body.String(), "OnReadFail, error") { + t.Fatalf("controller.OnReadFail should write to http response") + } + + err = controller.OnValidationSuccess() + if err != nil { + t.Fatalf("OnValidationSuccess should succeed") + } + + if !strings.Contains(response.Body.String(), "OnValidationSuccess, sqls") { + t.Fatalf("controller.OnValidationSuccess should write to http response") + } + + errValidationFail := fmt.Errorf("validation fail") + err = controller.OnValidationFail(errValidationFail) + if err != errValidationFail { + t.Fatalf("should get error:%v, but get: %v", errValidationFail, err) + } + + if !strings.Contains(response.Body.String(), "OnValidationFail, error") { + t.Fatalf("controller.OnValidationFail should write to http response") + } + + err = controller.OnExecutorComplete(&ExecuteResult{}) + if err != nil { + t.Fatalf("OnExecutorComplete should succeed") + } + + if !strings.Contains(response.Body.String(), "Executor succeeds") { + t.Fatalf("controller.OnExecutorComplete should write to http response") + } +} diff --git a/go/vt/schemamanager/uihandler/handler.go b/go/vt/schemamanager/uihandler/handler.go deleted file mode 100644 index 9be9720908..0000000000 --- a/go/vt/schemamanager/uihandler/handler.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2015, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uihandler - -import ( - "encoding/json" - "fmt" - "net/http" - - log "github.com/golang/glog" - "github.com/youtube/vitess/go/vt/schemamanager" -) - -// UIEventHandler handles schema events -type UIEventHandler struct { - writer http.ResponseWriter -} - -// NewUIEventHandler creates a UIEventHandler instance -func NewUIEventHandler(writer http.ResponseWriter) *UIEventHandler { - return &UIEventHandler{writer: writer} -} - -// OnDataSourcerReadSuccess is no-op -func (handler *UIEventHandler) OnDataSourcerReadSuccess(sqls []string) error { - handler.writer.Write([]byte(fmt.Sprintf("OnDataSourcerReadSuccess, sqls: %v\n", sqls))) - return nil -} - -// OnDataSourcerReadFail is no-op -func (handler *UIEventHandler) OnDataSourcerReadFail(err error) error { - handler.writer.Write([]byte(fmt.Sprintf("OnDataSourcerReadFail, error: %v\n", err))) - return err -} - -// OnValidationSuccess is no-op -func (handler *UIEventHandler) OnValidationSuccess(sqls []string) error { - handler.writer.Write([]byte(fmt.Sprintf("OnValidationSuccess, sqls: %v\n", sqls))) - return nil -} - -// OnValidationFail is no-op -func (handler *UIEventHandler) OnValidationFail(err error) error { - handler.writer.Write([]byte(fmt.Sprintf("OnValidationFail, error: %v\n", err))) - return err -} - -// OnExecutorComplete is no-op -func (handler *UIEventHandler) OnExecutorComplete(result *schemamanager.ExecuteResult) error { - str, err := json.Marshal(result) - if err != nil { - log.Errorf("Failed to serialize ExecuteResult: %v", err) - return err - } - handler.writer.Write(str) - return nil -} - -var _ schemamanager.EventHandler = (*UIEventHandler)(nil) diff --git a/go/vt/wrangler/schema.go b/go/vt/wrangler/schema.go index 7215f6d5f7..182c69511c 100644 --- a/go/vt/wrangler/schema.go +++ b/go/vt/wrangler/schema.go @@ -409,9 +409,8 @@ func (wr *Wrangler) ApplySchemaKeyspace(ctx context.Context, keyspace string, ch } err = schemamanager.Run( - schemamanager.NewSimpleDataSourcer(change), + schemamanager.NewPlainController(change), schemamanager.NewTabletExecutor(wr.tmc, wr.ts, keyspace), - schemamanager.NewConsoleEventHandler(), ) return nil, wr.unlockKeyspace(ctx, keyspace, actionNode, lockPath, err) From 318ebfc310e7a5cb25ec4258b925adfbb5c48d5a Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Wed, 20 May 2015 23:03:40 -0700 Subject: [PATCH 063/128] add GetKeyspace in the schemamanager.Controller interface The idea is that sometimes a Controller implementation will take care all keyspaces. In such case, schemamanager.Executor is not able to know keyspace ahead. --- go/cmd/vtctld/vtctld.go | 5 ++--- go/vt/schemamanager/plain_controller.go | 13 ++++++++--- go/vt/schemamanager/plain_controller_test.go | 8 ++++++- go/vt/schemamanager/schemamanager.go | 6 +++-- go/vt/schemamanager/schemamanager_test.go | 23 +++++++++++++------- go/vt/schemamanager/tablet_executor.go | 21 ++++++++---------- go/vt/schemamanager/tablet_executor_test.go | 11 +++++----- go/vt/schemamanager/ui_controller.go | 18 ++++++++++----- go/vt/schemamanager/ui_controller_test.go | 8 ++++++- go/vt/wrangler/schema.go | 4 ++-- 10 files changed, 74 insertions(+), 43 deletions(-) diff --git a/go/cmd/vtctld/vtctld.go b/go/cmd/vtctld/vtctld.go index 92a2a7af40..51d310f14d 100644 --- a/go/cmd/vtctld/vtctld.go +++ b/go/cmd/vtctld/vtctld.go @@ -483,11 +483,10 @@ func main() { keyspace := r.FormValue("keyspace") executor := schemamanager.NewTabletExecutor( tmclient.NewTabletManagerClient(), - ts, - keyspace) + ts) schemamanager.Run( - schemamanager.NewUIController(sqlStr, w), + schemamanager.NewUIController(sqlStr, keyspace, w), executor, ) }) diff --git a/go/vt/schemamanager/plain_controller.go b/go/vt/schemamanager/plain_controller.go index ad50c0fb2a..5b97b132ed 100644 --- a/go/vt/schemamanager/plain_controller.go +++ b/go/vt/schemamanager/plain_controller.go @@ -12,13 +12,15 @@ import ( // PlainController implements Controller interface. type PlainController struct { - sqls []string + sqls []string + keyspace string } // NewPlainController creates a new PlainController instance. -func NewPlainController(sqlStr string) *PlainController { +func NewPlainController(sqlStr string, keyspace string) *PlainController { controller := &PlainController{ - sqls: make([]string, 0, 32), + sqls: make([]string, 0, 32), + keyspace: keyspace, } for _, sql := range strings.Split(sqlStr, ";") { s := strings.TrimSpace(sql) @@ -43,6 +45,11 @@ func (controller *PlainController) Read() ([]string, error) { func (controller *PlainController) Close() { } +// GetKeyspace returns keyspace to apply schema. +func (controller *PlainController) GetKeyspace() string { + return controller.keyspace +} + // OnReadSuccess is called when schemamanager successfully // reads all sql statements. func (controller *PlainController) OnReadSuccess() error { diff --git a/go/vt/schemamanager/plain_controller_test.go b/go/vt/schemamanager/plain_controller_test.go index ecf5e25b51..7816d23ad1 100644 --- a/go/vt/schemamanager/plain_controller_test.go +++ b/go/vt/schemamanager/plain_controller_test.go @@ -11,11 +11,17 @@ import ( func TestPlainController(t *testing.T) { sql := "CREATE TABLE test_table (pk int)" - controller := NewPlainController(sql) + controller := NewPlainController(sql, "test_keyspace") err := controller.Open() if err != nil { t.Fatalf("controller.Open should succeed, but got error: %v", err) } + + keyspace := controller.GetKeyspace() + if keyspace != "test_keyspace" { + t.Fatalf("expect to get keyspace: 'test_keyspace', but got keyspace: '%s'", keyspace) + } + sqls, err := controller.Read() if err != nil { t.Fatalf("controller.Read should succeed, but got error: %v", err) diff --git a/go/vt/schemamanager/schemamanager.go b/go/vt/schemamanager/schemamanager.go index 0c3ad8205b..9c70f07c0d 100644 --- a/go/vt/schemamanager/schemamanager.go +++ b/go/vt/schemamanager/schemamanager.go @@ -19,6 +19,7 @@ type Controller interface { Open() error Read() (sqls []string, err error) Close() + GetKeyspace() string OnReadSuccess() error OnReadFail(err error) error OnValidationSuccess() error @@ -28,7 +29,7 @@ type Controller interface { // Executor applies schema changes to underlying system type Executor interface { - Open() error + Open(keyspace string) error Validate(sqls []string) error Execute(sqls []string) *ExecuteResult Close() @@ -69,7 +70,8 @@ func Run(controller Controller, executor Executor) error { return err } controller.OnReadSuccess() - if err := executor.Open(); err != nil { + keyspace := controller.GetKeyspace() + if err := executor.Open(keyspace); err != nil { log.Errorf("failed to open executor: %v", err) return err } diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index 470325d657..e8e3649cfb 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -57,10 +57,10 @@ func TestSchemaManagerValidationFail(t *testing.T) { func TestSchemaManagerExecutorOpenFail(t *testing.T) { controller := newFakeController( []string{"create table test_table (pk int);"}, false, false, false) + controller.SetKeyspace("unknown_keyspace") executor := NewTabletExecutor( newFakeTabletManagerClient(), - newFakeTopo(), - "unknown_keyspace") + newFakeTopo()) err := Run(controller, executor) if err == nil { t.Fatalf("run schema change should fail due to executor.Open fail") @@ -72,8 +72,7 @@ func TestSchemaManagerExecutorExecuteFail(t *testing.T) { []string{"create table test_table (pk int);"}, false, false, false) executor := NewTabletExecutor( newFakeTabletManagerClient(), - newFakeTopo(), - "test_keyspace") + newFakeTopo()) err := Run(controller, executor) if err == nil { t.Fatalf("run schema change should fail due to executor.Execute fail") @@ -103,8 +102,7 @@ func TestSchemaManagerRun(t *testing.T) { executor := NewTabletExecutor( fakeTmc, - newFakeTopo(), - "test_keyspace") + newFakeTopo()) err := Run(controller, executor) if err != nil { @@ -130,8 +128,7 @@ func TestSchemaManagerRun(t *testing.T) { func newFakeExecutor() *TabletExecutor { return NewTabletExecutor( newFakeTabletManagerClient(), - newFakeTopo(), - "test_keyspace") + newFakeTopo()) } func newFakeTabletManagerClient() *fakeTabletManagerClient { @@ -356,6 +353,7 @@ func (topoServer *fakeTopo) UnlockShardForAction(keyspace, shard, lockPath, resu type fakeController struct { sqls []string + keyspace string openFail bool readFail bool closeFail bool @@ -370,12 +368,17 @@ func newFakeController( sqls []string, openFail bool, readFail bool, closeFail bool) *fakeController { return &fakeController{ sqls: sqls, + keyspace: "test_keyspace", openFail: openFail, readFail: readFail, closeFail: closeFail, } } +func (controller *fakeController) SetKeyspace(keyspace string) { + controller.keyspace = keyspace +} + func (controller *fakeController) Open() error { if controller.openFail { return errControllerOpen @@ -393,6 +396,10 @@ func (controller *fakeController) Read() ([]string, error) { func (controller *fakeController) Close() { } +func (controller *fakeController) GetKeyspace() string { + return controller.keyspace +} + func (controller *fakeController) OnReadSuccess() error { controller.onReadSuccessTriggered = true return nil diff --git a/go/vt/schemamanager/tablet_executor.go b/go/vt/schemamanager/tablet_executor.go index 1c330c1c80..089e15ee59 100644 --- a/go/vt/schemamanager/tablet_executor.go +++ b/go/vt/schemamanager/tablet_executor.go @@ -18,7 +18,6 @@ import ( // TabletExecutor applies schema changes to all tablets. type TabletExecutor struct { - keyspace string tmClient tmclient.TabletManagerClient topoServer topo.Server tabletInfos []*topo.TabletInfo @@ -29,10 +28,8 @@ type TabletExecutor struct { // NewTabletExecutor creates a new TabletExecutor instance func NewTabletExecutor( tmClient tmclient.TabletManagerClient, - topoServer topo.Server, - keyspace string) *TabletExecutor { + topoServer topo.Server) *TabletExecutor { return &TabletExecutor{ - keyspace: keyspace, tmClient: tmClient, topoServer: topoServer, isClosed: true, @@ -40,32 +37,32 @@ func NewTabletExecutor( } // Open opens a connection to the master for every shard -func (exec *TabletExecutor) Open() error { +func (exec *TabletExecutor) Open(keyspace string) error { if !exec.isClosed { return nil } - shardNames, err := exec.topoServer.GetShardNames(exec.keyspace) + shardNames, err := exec.topoServer.GetShardNames(keyspace) if err != nil { - return fmt.Errorf("unable to get shard names for keyspace: %s, error: %v", exec.keyspace, err) + return fmt.Errorf("unable to get shard names for keyspace: %s, error: %v", keyspace, err) } - log.Infof("Keyspace: %v, Shards: %v\n", exec.keyspace, shardNames) + log.Infof("Keyspace: %v, Shards: %v\n", keyspace, shardNames) exec.tabletInfos = make([]*topo.TabletInfo, len(shardNames)) for i, shardName := range shardNames { - shardInfo, err := exec.topoServer.GetShard(exec.keyspace, shardName) + shardInfo, err := exec.topoServer.GetShard(keyspace, shardName) log.Infof("\tShard: %s, ShardInfo: %v\n", shardName, shardInfo) if err != nil { - return fmt.Errorf("unable to get shard info, keyspace: %s, shard: %s, error: %v", exec.keyspace, shardName, err) + return fmt.Errorf("unable to get shard info, keyspace: %s, shard: %s, error: %v", keyspace, shardName, err) } tabletInfo, err := exec.topoServer.GetTablet(shardInfo.MasterAlias) if err != nil { - return fmt.Errorf("unable to get master tablet info, keyspace: %s, shard: %s, error: %v", exec.keyspace, shardName, err) + return fmt.Errorf("unable to get master tablet info, keyspace: %s, shard: %s, error: %v", keyspace, shardName, err) } exec.tabletInfos[i] = tabletInfo log.Infof("\t\tTabletInfo: %+v\n", tabletInfo) } if len(exec.tabletInfos) == 0 { - return fmt.Errorf("keyspace: %s does not contain any master tablets", exec.keyspace) + return fmt.Errorf("keyspace: %s does not contain any master tablets", keyspace) } exec.isClosed = false return nil diff --git a/go/vt/schemamanager/tablet_executor_test.go b/go/vt/schemamanager/tablet_executor_test.go index 394edae46f..140e879365 100644 --- a/go/vt/schemamanager/tablet_executor_test.go +++ b/go/vt/schemamanager/tablet_executor_test.go @@ -12,13 +12,13 @@ import ( func TestTabletExecutorOpen(t *testing.T) { executor := newFakeExecutor() - if err := executor.Open(); err != nil { + if err := executor.Open("test_keyspace"); err != nil { t.Fatalf("executor.Open() should succeed") } defer executor.Close() - if err := executor.Open(); err != nil { + if err := executor.Open("test_keyspace"); err != nil { t.Fatalf("open an opened executor should also succeed") } } @@ -51,8 +51,7 @@ func TestTabletExecutorValidate(t *testing.T) { executor := NewTabletExecutor( fakeTmc, - newFakeTopo(), - "test_keyspace") + newFakeTopo()) sqls := []string{ "ALTER TABLE test_table ADD COLUMN new_id bigint(20)", @@ -63,7 +62,7 @@ func TestTabletExecutorValidate(t *testing.T) { t.Fatalf("validate should fail because executor is closed") } - executor.Open() + executor.Open("test_keyspace") defer executor.Close() // schema changes with DMLs should fail @@ -108,7 +107,7 @@ func TestTabletExecutorExecute(t *testing.T) { t.Fatalf("execute should fail, call execute.Open first") } - executor.Open() + executor.Open("test_keyspace") defer executor.Close() result = executor.Execute(sqls) diff --git a/go/vt/schemamanager/ui_controller.go b/go/vt/schemamanager/ui_controller.go index 5f08ba2a9d..651190afc3 100644 --- a/go/vt/schemamanager/ui_controller.go +++ b/go/vt/schemamanager/ui_controller.go @@ -15,15 +15,18 @@ import ( // UIController handles schema events. type UIController struct { - sqls []string - writer http.ResponseWriter + sqls []string + keyspace string + writer http.ResponseWriter } // NewUIController creates a UIController instance -func NewUIController(sqlStr string, writer http.ResponseWriter) *UIController { +func NewUIController( + sqlStr string, keyspace string, writer http.ResponseWriter) *UIController { controller := &UIController{ - sqls: make([]string, 0, 32), - writer: writer, + sqls: make([]string, 0, 32), + keyspace: keyspace, + writer: writer, } for _, sql := range strings.Split(sqlStr, ";") { s := strings.TrimSpace(sql) @@ -49,6 +52,11 @@ func (controller *UIController) Read() ([]string, error) { func (controller *UIController) Close() { } +// GetKeyspace returns keyspace to apply schema. +func (controller *UIController) GetKeyspace() string { + return controller.keyspace +} + // OnReadSuccess is no-op func (controller *UIController) OnReadSuccess() error { controller.writer.Write( diff --git a/go/vt/schemamanager/ui_controller_test.go b/go/vt/schemamanager/ui_controller_test.go index 1b9635c199..9be646ed7d 100644 --- a/go/vt/schemamanager/ui_controller_test.go +++ b/go/vt/schemamanager/ui_controller_test.go @@ -14,11 +14,17 @@ import ( func TestUIController(t *testing.T) { sql := "CREATE TABLE test_table (pk int)" response := httptest.NewRecorder() - controller := NewUIController(sql, response) + controller := NewUIController(sql, "test_keyspace", response) err := controller.Open() if err != nil { t.Fatalf("controller.Open should succeed, but got error: %v", err) } + + keyspace := controller.GetKeyspace() + if keyspace != "test_keyspace" { + t.Fatalf("expect to get keyspace: 'test_keyspace', but got keyspace: '%s'", keyspace) + } + sqls, err := controller.Read() if err != nil { t.Fatalf("controller.Read should succeed, but got error: %v", err) diff --git a/go/vt/wrangler/schema.go b/go/vt/wrangler/schema.go index 182c69511c..0f96b380c9 100644 --- a/go/vt/wrangler/schema.go +++ b/go/vt/wrangler/schema.go @@ -409,8 +409,8 @@ func (wr *Wrangler) ApplySchemaKeyspace(ctx context.Context, keyspace string, ch } err = schemamanager.Run( - schemamanager.NewPlainController(change), - schemamanager.NewTabletExecutor(wr.tmc, wr.ts, keyspace), + schemamanager.NewPlainController(change, keyspace), + schemamanager.NewTabletExecutor(wr.tmc, wr.ts), ) return nil, wr.unlockKeyspace(ctx, keyspace, actionNode, lockPath, err) From f777f49938de5a2e1d7adc81a951816db78bab8b Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Tue, 19 May 2015 16:36:05 -0700 Subject: [PATCH 064/128] monitor schema change dir in vtctld 1. Add two flags in vtctld: schemaChangeDir and schemaChangeController. schemaChangeDir specifies a parent directory that contains schema changes for all keyspaces. schemaChangeController controls how to get schema change sqls from schemaChangeDir and handle relevant schema change events. 2. Add RegisterControllerFactory in schemamanager that allows developers to register different Controller implementations. 3. Add test case to test when schema change failed on some shards. --- go/cmd/vtctld/vtctld.go | 33 ++++++++- go/vt/schemamanager/schemamanager.go | 29 ++++++++ go/vt/schemamanager/schemamanager_test.go | 75 ++++++++++++++++++++- go/vt/schemamanager/tablet_executor_test.go | 2 +- 4 files changed, 134 insertions(+), 5 deletions(-) diff --git a/go/cmd/vtctld/vtctld.go b/go/cmd/vtctld/vtctld.go index 51d310f14d..7237cb7207 100644 --- a/go/cmd/vtctld/vtctld.go +++ b/go/cmd/vtctld/vtctld.go @@ -5,11 +5,13 @@ import ( "fmt" "net/http" "strings" + "time" "golang.org/x/net/context" log "github.com/golang/glog" "github.com/youtube/vitess/go/acl" + "github.com/youtube/vitess/go/timer" "github.com/youtube/vitess/go/vt/schemamanager" "github.com/youtube/vitess/go/vt/servenv" "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" @@ -19,8 +21,10 @@ import ( ) var ( - templateDir = flag.String("templates", "", "directory containing templates") - debug = flag.Bool("debug", false, "recompile templates for every request") + templateDir = flag.String("templates", "", "directory containing templates") + debug = flag.Bool("debug", false, "recompile templates for every request") + schemaChangeDir = flag.String("schema-change-dir", "", "directory contains schema changes for all keyspaces. Each keyspace has its own directory and schema changes are expected to live in '$KEYSPACE/input' dir. e.g. test_keyspace/input/*sql, each sql file represents a schema change") + schemaChangeController = flag.String("schema-change-controller", "", "schema change controller is responsible for finding schema changes and responsing schema change events") ) func init() { @@ -490,5 +494,30 @@ func main() { executor, ) }) + if *schemaChangeDir != "" { + timer := timer.NewTimer(1 * time.Minute) + controllerFactory, err := + schemamanager.GetControllerFactory(*schemaChangeController) + if err != nil { + log.Fatalf("unable to get a controller factory, error: %v", err) + } + + timer.Start(func() { + controller, err := controllerFactory(map[string]string{ + schemamanager.SchemaChangeDirName: *schemaChangeDir, + }) + if err != nil { + log.Errorf("failed to get controller, error: %v", err) + return + } + + schemamanager.Run( + controller, + schemamanager.NewTabletExecutor( + tmclient.NewTabletManagerClient(), ts), + ) + }) + servenv.OnClose(func() { timer.Stop() }) + } servenv.RunDefault() } diff --git a/go/vt/schemamanager/schemamanager.go b/go/vt/schemamanager/schemamanager.go index 9c70f07c0d..d039ccf48b 100644 --- a/go/vt/schemamanager/schemamanager.go +++ b/go/vt/schemamanager/schemamanager.go @@ -12,6 +12,17 @@ import ( mproto "github.com/youtube/vitess/go/mysql/proto" ) +const ( + SchemaChangeDirName = "schema_change_dir" +) + +// ControllerFactory takes a set params and construct a Controller instance. +type ControllerFactory func(params map[string]string) (Controller, error) + +var ( + controllerFactories = make(map[string]ControllerFactory) +) + // Controller is responsible for getting schema change for a // certain keyspace and also handling various events happened during schema // change. @@ -69,6 +80,7 @@ func Run(controller Controller, executor Executor) error { controller.OnReadFail(err) return err } + controller.OnReadSuccess() keyspace := controller.GetKeyspace() if err := executor.Open(keyspace); err != nil { @@ -90,3 +102,20 @@ func Run(controller Controller, executor Executor) error { } return nil } + +// RegisterControllerFactory register a control factory. +func RegisterControllerFactory(name string, factory ControllerFactory) { + if _, ok := controllerFactories[name]; ok { + panic(fmt.Sprintf("register a registered key: %s", name)) + } + controllerFactories[name] = factory +} + +// GetControllerFactory gets a ControllerFactory. +func GetControllerFactory(name string) (ControllerFactory, error) { + factory, ok := controllerFactories[name] + if !ok { + return nil, fmt.Errorf("there is no data sourcer factory with name: %s", name) + } + return factory, nil +} diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index e8e3649cfb..e61baea401 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -9,6 +9,7 @@ import ( "fmt" "testing" + mproto "github.com/youtube/vitess/go/mysql/proto" "github.com/youtube/vitess/go/vt/mysqlctl/proto" "github.com/youtube/vitess/go/vt/tabletmanager/faketmclient" _ "github.com/youtube/vitess/go/vt/tabletmanager/gorpctmclient" @@ -125,6 +126,67 @@ func TestSchemaManagerRun(t *testing.T) { } } +func TestSchemaManagerExecutorFail(t *testing.T) { + sql := "create table test_table (pk int)" + controller := newFakeController([]string{sql}, false, false, false) + fakeTmc := newFakeTabletManagerClient() + fakeTmc.AddSchemaChange(sql, &proto.SchemaChangeResult{ + BeforeSchema: &proto.SchemaDefinition{}, + AfterSchema: &proto.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE `{{.DatabaseName}}` /*!40100 DEFAULT CHARACTER SET utf8 */", + TableDefinitions: []*proto.TableDefinition{ + &proto.TableDefinition{ + Name: "test_table", + Schema: sql, + Type: proto.TableBaseTable, + }, + }, + }, + }) + + fakeTmc.AddSchemaDefinition("vt_test_keyspace", &proto.SchemaDefinition{}) + fakeTmc.EnableExecuteFetchAsDbaError = true + executor := NewTabletExecutor(fakeTmc, newFakeTopo()) + + err := Run(controller, executor) + if err == nil { + t.Fatalf("schema change should fail") + } +} + +func TestSchemaManagerRegisterControllerFactory(t *testing.T) { + sql := "create table test_table (pk int)" + RegisterControllerFactory( + "test_controller", + func(params map[string]string) (Controller, error) { + return newFakeController([]string{sql}, false, false, false), nil + + }) + + _, err := GetControllerFactory("unknown") + if err == nil { + t.Fatalf("controller factory is not registered, GetControllerFactory should return an error") + } + _, err = GetControllerFactory("test_controller") + if err != nil { + t.Fatalf("GetControllerFactory should succeed, but get an error: %v", err) + } + func() { + defer func() { + err := recover() + if err == nil { + t.Fatalf("RegisterControllerFactory should fail, it registers a registered ControllerFactory") + } + }() + RegisterControllerFactory( + "test_controller", + func(params map[string]string) (Controller, error) { + return newFakeController([]string{sql}, false, false, false), nil + + }) + }() +} + func newFakeExecutor() *TabletExecutor { return NewTabletExecutor( newFakeTabletManagerClient(), @@ -141,8 +203,9 @@ func newFakeTabletManagerClient() *fakeTabletManagerClient { type fakeTabletManagerClient struct { tmclient.TabletManagerClient - preflightSchemas map[string]*proto.SchemaChangeResult - schemaDefinitions map[string]*proto.SchemaDefinition + EnableExecuteFetchAsDbaError bool + preflightSchemas map[string]*proto.SchemaChangeResult + schemaDefinitions map[string]*proto.SchemaDefinition } func (client *fakeTabletManagerClient) AddSchemaChange( @@ -172,6 +235,14 @@ func (client *fakeTabletManagerClient) GetSchema(ctx context.Context, tablet *to return result, nil } +func (client *fakeTabletManagerClient) ExecuteFetchAsDba(ctx context.Context, tablet *topo.TabletInfo, query string, maxRows int, wantFields, disableBinlogs, reloadSchema bool) (*mproto.QueryResult, error) { + if client.EnableExecuteFetchAsDbaError { + var result mproto.QueryResult + return &result, fmt.Errorf("ExecuteFetchAsDba occur an unknown error") + } + return client.TabletManagerClient.ExecuteFetchAsDba(ctx, tablet, query, maxRows, wantFields, disableBinlogs, reloadSchema) +} + type fakeTopo struct{} func newFakeTopo() *fakeTopo { diff --git a/go/vt/schemamanager/tablet_executor_test.go b/go/vt/schemamanager/tablet_executor_test.go index 140e879365..481bbf6ccf 100644 --- a/go/vt/schemamanager/tablet_executor_test.go +++ b/go/vt/schemamanager/tablet_executor_test.go @@ -13,7 +13,7 @@ import ( func TestTabletExecutorOpen(t *testing.T) { executor := newFakeExecutor() if err := executor.Open("test_keyspace"); err != nil { - t.Fatalf("executor.Open() should succeed") + t.Fatalf("executor.Open should succeed") } defer executor.Close() From d0cc07ae69e14552698c29631babe06385d32a40 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 21 May 2015 08:54:52 -0700 Subject: [PATCH 065/128] Simplifying file backup storage (implementation and plugins). --- go/cmd/vtctl/plugin_filebackupstorage.go | 10 +++------ go/cmd/vttablet/plugin_filebackupstorage.go | 16 -------------- go/vt/mysqlctl/backupstorage/file.go | 24 +++++++-------------- go/vt/mysqlctl/backupstorage/file_test.go | 7 +++--- go/vt/wrangler/testlib/backup_test.go | 1 - 5 files changed, 14 insertions(+), 44 deletions(-) delete mode 100644 go/cmd/vttablet/plugin_filebackupstorage.go diff --git a/go/cmd/vtctl/plugin_filebackupstorage.go b/go/cmd/vtctl/plugin_filebackupstorage.go index fedb3f853f..25b4da38bf 100644 --- a/go/cmd/vtctl/plugin_filebackupstorage.go +++ b/go/cmd/vtctl/plugin_filebackupstorage.go @@ -4,10 +4,6 @@ package main -import "github.com/youtube/vitess/go/vt/mysqlctl/backupstorage" - -func init() { - initFuncs = append(initFuncs, func() { - backupstorage.RegisterFileBackupStorage() - }) -} +import ( + _ "github.com/youtube/vitess/go/vt/mysqlctl/backupstorage" +) diff --git a/go/cmd/vttablet/plugin_filebackupstorage.go b/go/cmd/vttablet/plugin_filebackupstorage.go deleted file mode 100644 index ae1bec8e4a..0000000000 --- a/go/cmd/vttablet/plugin_filebackupstorage.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "github.com/youtube/vitess/go/vt/mysqlctl/backupstorage" - "github.com/youtube/vitess/go/vt/servenv" -) - -func init() { - servenv.OnRun(func() { - backupstorage.RegisterFileBackupStorage() - }) -} diff --git a/go/vt/mysqlctl/backupstorage/file.go b/go/vt/mysqlctl/backupstorage/file.go index 479afc754c..e293301031 100644 --- a/go/vt/mysqlctl/backupstorage/file.go +++ b/go/vt/mysqlctl/backupstorage/file.go @@ -45,7 +45,7 @@ func (fbh *FileBackupHandle) AddFile(filename string) (io.WriteCloser, error) { if fbh.readOnly { return nil, fmt.Errorf("AddFile cannot be called on read-only backup") } - p := path.Join(fbh.fbs.root, fbh.bucket, fbh.name, filename) + p := path.Join(*FileBackupStorageRoot, fbh.bucket, fbh.name, filename) return os.Create(p) } @@ -70,19 +70,17 @@ func (fbh *FileBackupHandle) ReadFile(filename string) (io.ReadCloser, error) { if !fbh.readOnly { return nil, fmt.Errorf("ReadFile cannot be called on read-write backup") } - p := path.Join(fbh.fbs.root, fbh.bucket, fbh.name, filename) + p := path.Join(*FileBackupStorageRoot, fbh.bucket, fbh.name, filename) return os.Open(p) } // FileBackupStorage implements BackupStorage for local file system. -type FileBackupStorage struct { - root string -} +type FileBackupStorage struct{} // ListBackups is part of the BackupStorage interface func (fbs *FileBackupStorage) ListBackups(bucket string) ([]BackupHandle, error) { // ReadDir already sorts the results - p := path.Join(fbs.root, bucket) + p := path.Join(*FileBackupStorageRoot, bucket) fi, err := ioutil.ReadDir(p) if err != nil { if os.IsNotExist(err) { @@ -112,7 +110,7 @@ func (fbs *FileBackupStorage) ListBackups(bucket string) ([]BackupHandle, error) // StartBackup is part of the BackupStorage interface func (fbs *FileBackupStorage) StartBackup(bucket, name string) (BackupHandle, error) { // make sure the bucket directory exists - p := path.Join(fbs.root, bucket) + p := path.Join(*FileBackupStorageRoot, bucket) if err := os.MkdirAll(p, os.ModePerm); err != nil { return nil, err } @@ -133,16 +131,10 @@ func (fbs *FileBackupStorage) StartBackup(bucket, name string) (BackupHandle, er // RemoveBackup is part of the BackupStorage interface func (fbs *FileBackupStorage) RemoveBackup(bucket, name string) error { - p := path.Join(fbs.root, bucket, name) + p := path.Join(*FileBackupStorageRoot, bucket, name) return os.RemoveAll(p) } -// RegisterFileBackupStorage should be called after Flags has been -// initialized, to register the FileBackupStorage implementation -func RegisterFileBackupStorage() { - if *FileBackupStorageRoot != "" { - BackupStorageMap["file"] = &FileBackupStorage{ - root: *FileBackupStorageRoot, - } - } +func init() { + BackupStorageMap["file"] = &FileBackupStorage{} } diff --git a/go/vt/mysqlctl/backupstorage/file_test.go b/go/vt/mysqlctl/backupstorage/file_test.go index 1d0d58279c..25f5eae6f9 100644 --- a/go/vt/mysqlctl/backupstorage/file_test.go +++ b/go/vt/mysqlctl/backupstorage/file_test.go @@ -25,14 +25,13 @@ func setupFileBackupStorage(t *testing.T) *FileBackupStorage { if err != nil { t.Fatalf("os.TempDir failed: %v", err) } - return &FileBackupStorage{ - root: root, - } + *FileBackupStorageRoot = root + return &FileBackupStorage{} } // cleanupFileBackupStorage removes the entire directory func cleanupFileBackupStorage(fbs *FileBackupStorage) { - os.RemoveAll(fbs.root) + os.RemoveAll(*FileBackupStorageRoot) } func TestListBackups(t *testing.T) { diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go index 95b7c04072..02c01d13f3 100644 --- a/go/vt/wrangler/testlib/backup_test.go +++ b/go/vt/wrangler/testlib/backup_test.go @@ -40,7 +40,6 @@ func TestBackupRestore(t *testing.T) { fbsRoot := path.Join(root, "fbs") *backupstorage.FileBackupStorageRoot = fbsRoot *backupstorage.BackupStorageImplementation = "file" - backupstorage.RegisterFileBackupStorage() // Initialize the fake mysql root directories sourceInnodbDataDir := path.Join(root, "source_innodb_data") From 3fd4524b3af5eddea1e4d7482308a1535584fa04 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 21 May 2015 09:03:15 -0700 Subject: [PATCH 066/128] Fixing contexts in these file. Short context is now always named shortCtx, and never overrides the ctx variable. Fixes vertical_split.py. --- go/vt/worker/clone_utils.go | 20 ++++++++++---------- go/vt/worker/topo_utils.go | 4 ++-- go/vt/worker/vertical_split_diff.go | 28 ++++++++++++++-------------- 3 files changed, 26 insertions(+), 26 deletions(-) diff --git a/go/vt/worker/clone_utils.go b/go/vt/worker/clone_utils.go index 410d44ddd0..ca7b941e8d 100644 --- a/go/vt/worker/clone_utils.go +++ b/go/vt/worker/clone_utils.go @@ -30,8 +30,8 @@ import ( // Does a topo lookup for a single shard, and returns the tablet record of the master tablet. func resolveDestinationShardMaster(ctx context.Context, keyspace, shard string, wr *wrangler.Wrangler) (*topo.TabletInfo, error) { var ti *topo.TabletInfo - newCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) - si, err := topo.GetShard(newCtx, wr.TopoServer(), keyspace, shard) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) + si, err := topo.GetShard(shortCtx, wr.TopoServer(), keyspace, shard) cancel() if err != nil { return ti, fmt.Errorf("unable to resolve destination shard %v/%v", keyspace, shard) @@ -43,8 +43,8 @@ func resolveDestinationShardMaster(ctx context.Context, keyspace, shard string, wr.Logger().Infof("Found target master alias %v in shard %v/%v", si.MasterAlias, keyspace, shard) - newCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - ti, err = topo.GetTablet(newCtx, wr.TopoServer(), si.MasterAlias) + shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) + ti, err = topo.GetTablet(shortCtx, wr.TopoServer(), si.MasterAlias) cancel() if err != nil { return ti, fmt.Errorf("unable to get master tablet from alias %v in shard %v/%v", @@ -58,16 +58,16 @@ func resolveDestinationShardMaster(ctx context.Context, keyspace, shard string, // 2. Map of tablet alias : tablet record for all tablets. func resolveReloadTabletsForShard(ctx context.Context, keyspace, shard string, wr *wrangler.Wrangler) (reloadAliases []topo.TabletAlias, reloadTablets map[topo.TabletAlias]*topo.TabletInfo, err error) { // Keep a long timeout, because we really don't want the copying to succeed, and then the worker to fail at the end. - newCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) - reloadAliases, err = topo.FindAllTabletAliasesInShard(newCtx, wr.TopoServer(), keyspace, shard) + shortCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + reloadAliases, err = topo.FindAllTabletAliasesInShard(shortCtx, wr.TopoServer(), keyspace, shard) cancel() if err != nil { return nil, nil, fmt.Errorf("cannot find all reload target tablets in %v/%v: %v", keyspace, shard, err) } wr.Logger().Infof("Found %v reload target aliases in shard %v/%v", len(reloadAliases), keyspace, shard) - newCtx, cancel = context.WithTimeout(ctx, 5*time.Minute) - reloadTablets, err = topo.GetTabletMap(newCtx, wr.TopoServer(), reloadAliases) + shortCtx, cancel = context.WithTimeout(ctx, 5*time.Minute) + reloadTablets, err = topo.GetTabletMap(shortCtx, wr.TopoServer(), reloadAliases) cancel() if err != nil { return nil, nil, fmt.Errorf("cannot read all reload target tablets in %v/%v: %v", @@ -280,8 +280,8 @@ func findChunks(ctx context.Context, wr *wrangler.Wrangler, ti *topo.TabletInfo, // get the min and max of the leading column of the primary key query := fmt.Sprintf("SELECT MIN(%v), MAX(%v) FROM %v.%v", td.PrimaryKeyColumns[0], td.PrimaryKeyColumns[0], ti.DbName(), td.Name) - ctx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) - qr, err := wr.TabletManagerClient().ExecuteFetchAsApp(ctx, ti, query, 1, true) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) + qr, err := wr.TabletManagerClient().ExecuteFetchAsApp(shortCtx, ti, query, 1, true) cancel() if err != nil { return nil, fmt.Errorf("ExecuteFetchAsApp: %v", err) diff --git a/go/vt/worker/topo_utils.go b/go/vt/worker/topo_utils.go index ee9be34d39..abd1a3c53b 100644 --- a/go/vt/worker/topo_utils.go +++ b/go/vt/worker/topo_utils.go @@ -75,8 +75,8 @@ func findChecker(ctx context.Context, wr *wrangler.Wrangler, cleaner *wrangler.C defer wrangler.RecordTabletTagAction(cleaner, tabletAlias, "worker", "") wr.Logger().Infof("Changing tablet %v to 'checker'", tabletAlias) - ctx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) - err = wr.ChangeType(ctx, tabletAlias, topo.TYPE_CHECKER, false /*force*/) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) + err = wr.ChangeType(shortCtx, tabletAlias, topo.TYPE_CHECKER, false /*force*/) cancel() if err != nil { return topo.TabletAlias{}, err diff --git a/go/vt/worker/vertical_split_diff.go b/go/vt/worker/vertical_split_diff.go index 11f2371860..92e15ec3de 100644 --- a/go/vt/worker/vertical_split_diff.go +++ b/go/vt/worker/vertical_split_diff.go @@ -231,8 +231,8 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) // 1 - stop the master binlog replication, get its current position vsdw.wr.Logger().Infof("Stopping master binlog replication on %v", vsdw.shardInfo.MasterAlias) - ctx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) - blpPositionList, err := vsdw.wr.TabletManagerClient().StopBlp(ctx, masterInfo) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) + blpPositionList, err := vsdw.wr.TabletManagerClient().StopBlp(shortCtx, masterInfo) cancel() if err != nil { return fmt.Errorf("StopBlp on master %v failed: %v", vsdw.shardInfo.MasterAlias, err) @@ -257,8 +257,8 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) if err != nil { return err } - ctx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - stoppedAt, err := vsdw.wr.TabletManagerClient().StopSlaveMinimum(ctx, sourceTablet, pos.Position, *remoteActionsTimeout) + shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) + stoppedAt, err := vsdw.wr.TabletManagerClient().StopSlaveMinimum(shortCtx, sourceTablet, pos.Position, *remoteActionsTimeout) cancel() if err != nil { return fmt.Errorf("cannot stop slave %v at right binlog position %v: %v", vsdw.sourceAlias, pos.Position, err) @@ -278,8 +278,8 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) // 3 - ask the master of the destination shard to resume filtered // replication up to the new list of positions vsdw.wr.Logger().Infof("Restarting master %v until it catches up to %v", vsdw.shardInfo.MasterAlias, stopPositionList) - ctx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - masterPos, err := vsdw.wr.TabletManagerClient().RunBlpUntil(ctx, masterInfo, &stopPositionList, *remoteActionsTimeout) + shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) + masterPos, err := vsdw.wr.TabletManagerClient().RunBlpUntil(shortCtx, masterInfo, &stopPositionList, *remoteActionsTimeout) cancel() if err != nil { return fmt.Errorf("RunBlpUntil on %v until %v failed: %v", vsdw.shardInfo.MasterAlias, stopPositionList, err) @@ -292,8 +292,8 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) if err != nil { return err } - ctx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - _, err = vsdw.wr.TabletManagerClient().StopSlaveMinimum(ctx, destinationTablet, masterPos, *remoteActionsTimeout) + shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) + _, err = vsdw.wr.TabletManagerClient().StopSlaveMinimum(shortCtx, destinationTablet, masterPos, *remoteActionsTimeout) cancel() if err != nil { return fmt.Errorf("StopSlaveMinimum on %v at %v failed: %v", vsdw.destinationAlias, masterPos, err) @@ -307,8 +307,8 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) // 5 - restart filtered replication on destination master vsdw.wr.Logger().Infof("Restarting filtered replication on master %v", vsdw.shardInfo.MasterAlias) - ctx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) - err = vsdw.wr.TabletManagerClient().StartBlp(ctx, masterInfo) + shortCtx, cancel = context.WithTimeout(ctx, *remoteActionsTimeout) + err = vsdw.wr.TabletManagerClient().StartBlp(shortCtx, masterInfo) if err := vsdw.cleaner.RemoveActionByName(wrangler.StartBlpActionName, vsdw.shardInfo.MasterAlias.String()); err != nil { vsdw.wr.Logger().Warningf("Cannot find cleaning action %v/%v: %v", wrangler.StartBlpActionName, vsdw.shardInfo.MasterAlias.String(), err) } @@ -334,9 +334,9 @@ func (vsdw *VerticalSplitDiffWorker) diff(ctx context.Context) error { wg.Add(1) go func() { var err error - ctx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) vsdw.destinationSchemaDefinition, err = vsdw.wr.GetSchema( - ctx, vsdw.destinationAlias, nil /* tables */, vsdw.excludeTables, false /* includeViews */) + shortCtx, vsdw.destinationAlias, nil /* tables */, vsdw.excludeTables, false /* includeViews */) cancel() rec.RecordError(err) vsdw.wr.Logger().Infof("Got schema from destination %v", vsdw.destinationAlias) @@ -345,9 +345,9 @@ func (vsdw *VerticalSplitDiffWorker) diff(ctx context.Context) error { wg.Add(1) go func() { var err error - ctx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) + shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) vsdw.sourceSchemaDefinition, err = vsdw.wr.GetSchema( - ctx, vsdw.sourceAlias, nil /* tables */, vsdw.excludeTables, false /* includeViews */) + shortCtx, vsdw.sourceAlias, nil /* tables */, vsdw.excludeTables, false /* includeViews */) cancel() rec.RecordError(err) vsdw.wr.Logger().Infof("Got schema from source %v", vsdw.sourceAlias) From a5fd0206c44155a891c217b1b2169419b99949ee Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 21 May 2015 09:28:45 -0700 Subject: [PATCH 067/128] Making GetBackupStorage return en error, instead of Fatalf-ing out. --- go/vt/mysqlctl/backup.go | 10 ++++++++-- go/vt/mysqlctl/backupstorage/interface.go | 9 ++++----- go/vt/vtctl/backup.go | 10 ++++++++-- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index 1921545d9a..649b560c22 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -183,7 +183,10 @@ func findFilesTobackup(cnf *Mycnf) ([]FileEntry, error) { func Backup(mysqld MysqlDaemon, logger logutil.Logger, bucket, name string, backupConcurrency int, hookExtraEnv map[string]string) error { // start the backup with the BackupStorage - bs := backupstorage.GetBackupStorage() + bs, err := backupstorage.GetBackupStorage() + if err != nil { + return err + } bh, err := bs.StartBackup(bucket, name) if err != nil { return fmt.Errorf("StartBackup failed: %v", err) @@ -494,7 +497,10 @@ func restoreFiles(cnf *Mycnf, bh backupstorage.BackupHandle, fes []FileEntry, re func Restore(mysqld MysqlDaemon, bucket string, restoreConcurrency int, hookExtraEnv map[string]string) (proto.ReplicationPosition, error) { // find the right backup handle: most recent one, with a MANIFEST log.Infof("Restore: looking for a suitable backup to restore") - bs := backupstorage.GetBackupStorage() + bs, err := backupstorage.GetBackupStorage() + if err != nil { + return proto.ReplicationPosition{}, err + } bhs, err := bs.ListBackups(bucket) if err != nil { return proto.ReplicationPosition{}, fmt.Errorf("ListBackups failed: %v", err) diff --git a/go/vt/mysqlctl/backupstorage/interface.go b/go/vt/mysqlctl/backupstorage/interface.go index 542be8fec2..e60f21abca 100644 --- a/go/vt/mysqlctl/backupstorage/interface.go +++ b/go/vt/mysqlctl/backupstorage/interface.go @@ -8,9 +8,8 @@ package backupstorage import ( "flag" + "fmt" "io" - - log "github.com/golang/glog" ) var ( @@ -75,10 +74,10 @@ var BackupStorageMap = make(map[string]BackupStorage) // GetBackupStorage returns the current BackupStorage implementation. // Should be called after flags have been initialized. -func GetBackupStorage() BackupStorage { +func GetBackupStorage() (BackupStorage, error) { bs, ok := BackupStorageMap[*BackupStorageImplementation] if !ok { - log.Fatalf("no registered implementation of BackupStorage") + return nil, fmt.Errorf("no registered implementation of BackupStorage") } - return bs + return bs, nil } diff --git a/go/vt/vtctl/backup.go b/go/vt/vtctl/backup.go index a3a1bc7771..eaa7e77604 100644 --- a/go/vt/vtctl/backup.go +++ b/go/vt/vtctl/backup.go @@ -41,7 +41,10 @@ func commandListBackups(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl } bucket := fmt.Sprintf("%v/%v", keyspace, shard) - bs := backupstorage.GetBackupStorage() + bs, err := backupstorage.GetBackupStorage() + if err != nil { + return err + } bhs, err := bs.ListBackups(bucket) if err != nil { return err @@ -67,6 +70,9 @@ func commandRemoveBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *f bucket := fmt.Sprintf("%v/%v", keyspace, shard) name := subFlags.Arg(1) - bs := backupstorage.GetBackupStorage() + bs, err := backupstorage.GetBackupStorage() + if err != nil { + return err + } return bs.RemoveBackup(bucket, name) } From d89f042c5a9a32c40a614555490f9710a34b6762 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 21 May 2015 09:39:28 -0700 Subject: [PATCH 068/128] Fixing backup timestamp format to be UTC, human readable, and correctly comparable. Example backup name: test_nj-0000062345.2015-05-21.163734 --- go/vt/mysqlctl/backupstorage/interface.go | 4 +++- go/vt/tabletmanager/agent_rpc_actions.go | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/go/vt/mysqlctl/backupstorage/interface.go b/go/vt/mysqlctl/backupstorage/interface.go index e60f21abca..ae9aff277a 100644 --- a/go/vt/mysqlctl/backupstorage/interface.go +++ b/go/vt/mysqlctl/backupstorage/interface.go @@ -54,7 +54,9 @@ type BackupHandle interface { type BackupStorage interface { // ListBackups returns all the backups in a bucket. The // returned backups are read-only (ReadFile can be called, but - // AddFile/EndBackup/AbortBackup cannot) + // AddFile/EndBackup/AbortBackup cannot). + // The backups are string-sorted by Name(), ascending (ends up + // being the oldest backup first). ListBackups(bucket string) ([]BackupHandle, error) // StartBackup creates a new backup with the given name. If a diff --git a/go/vt/tabletmanager/agent_rpc_actions.go b/go/vt/tabletmanager/agent_rpc_actions.go index 579115c498..1c4544de9f 100644 --- a/go/vt/tabletmanager/agent_rpc_actions.go +++ b/go/vt/tabletmanager/agent_rpc_actions.go @@ -727,7 +727,7 @@ func (agent *ActionAgent) Backup(ctx context.Context, concurrency int, logger lo // now we can run the backup bucket := fmt.Sprintf("%v/%v", tablet.Keyspace, tablet.Shard) - name := fmt.Sprintf("%v-%v", tablet.Alias, time.Now().Unix()) + name := fmt.Sprintf("%v.%v", tablet.Alias, time.Now().UTC().Format("2006-01-02.150405")) returnErr := mysqlctl.Backup(agent.MysqlDaemon, l, bucket, name, concurrency, agent.hookExtraEnv()) // and change our type back to the appropriate value: From 794e034a087424ab1ae0275d5a56d4c098616f6f Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 21 May 2015 11:15:29 -0700 Subject: [PATCH 069/128] Moving filebackupstorage into its own package. --- go/cmd/vtctl/plugin_filebackupstorage.go | 2 +- go/cmd/vttablet/plugin_filebackupstorage.go | 9 +++++++++ .../file.go | 17 +++++++++-------- .../file_test.go | 2 +- go/vt/wrangler/testlib/backup_test.go | 3 ++- 5 files changed, 22 insertions(+), 11 deletions(-) create mode 100644 go/cmd/vttablet/plugin_filebackupstorage.go rename go/vt/mysqlctl/{backupstorage => filebackupstorage}/file.go (87%) rename go/vt/mysqlctl/{backupstorage => filebackupstorage}/file_test.go (99%) diff --git a/go/cmd/vtctl/plugin_filebackupstorage.go b/go/cmd/vtctl/plugin_filebackupstorage.go index 25b4da38bf..cf6c963131 100644 --- a/go/cmd/vtctl/plugin_filebackupstorage.go +++ b/go/cmd/vtctl/plugin_filebackupstorage.go @@ -5,5 +5,5 @@ package main import ( - _ "github.com/youtube/vitess/go/vt/mysqlctl/backupstorage" + _ "github.com/youtube/vitess/go/vt/mysqlctl/filebackupstorage" ) diff --git a/go/cmd/vttablet/plugin_filebackupstorage.go b/go/cmd/vttablet/plugin_filebackupstorage.go new file mode 100644 index 0000000000..cf6c963131 --- /dev/null +++ b/go/cmd/vttablet/plugin_filebackupstorage.go @@ -0,0 +1,9 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + _ "github.com/youtube/vitess/go/vt/mysqlctl/filebackupstorage" +) diff --git a/go/vt/mysqlctl/backupstorage/file.go b/go/vt/mysqlctl/filebackupstorage/file.go similarity index 87% rename from go/vt/mysqlctl/backupstorage/file.go rename to go/vt/mysqlctl/filebackupstorage/file.go index e293301031..1da8753df4 100644 --- a/go/vt/mysqlctl/backupstorage/file.go +++ b/go/vt/mysqlctl/filebackupstorage/file.go @@ -2,7 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package backupstorage +// Package filebackupstorage implements the BacksupStorage interface +// for a local filesystem (which can be an NFS mount). +package filebackupstorage import ( "flag" @@ -11,10 +13,9 @@ import ( "io/ioutil" "os" "path" -) -// This file contains the flocal file system implementation of the -// BackupStorage interface. + "github.com/youtube/vitess/go/vt/mysqlctl/backupstorage" +) var ( // FileBackupStorageRoot is where the backups will go. @@ -78,7 +79,7 @@ func (fbh *FileBackupHandle) ReadFile(filename string) (io.ReadCloser, error) { type FileBackupStorage struct{} // ListBackups is part of the BackupStorage interface -func (fbs *FileBackupStorage) ListBackups(bucket string) ([]BackupHandle, error) { +func (fbs *FileBackupStorage) ListBackups(bucket string) ([]backupstorage.BackupHandle, error) { // ReadDir already sorts the results p := path.Join(*FileBackupStorageRoot, bucket) fi, err := ioutil.ReadDir(p) @@ -89,7 +90,7 @@ func (fbs *FileBackupStorage) ListBackups(bucket string) ([]BackupHandle, error) return nil, err } - result := make([]BackupHandle, 0, len(fi)) + result := make([]backupstorage.BackupHandle, 0, len(fi)) for _, info := range fi { if !info.IsDir() { continue @@ -108,7 +109,7 @@ func (fbs *FileBackupStorage) ListBackups(bucket string) ([]BackupHandle, error) } // StartBackup is part of the BackupStorage interface -func (fbs *FileBackupStorage) StartBackup(bucket, name string) (BackupHandle, error) { +func (fbs *FileBackupStorage) StartBackup(bucket, name string) (backupstorage.BackupHandle, error) { // make sure the bucket directory exists p := path.Join(*FileBackupStorageRoot, bucket) if err := os.MkdirAll(p, os.ModePerm); err != nil { @@ -136,5 +137,5 @@ func (fbs *FileBackupStorage) RemoveBackup(bucket, name string) error { } func init() { - BackupStorageMap["file"] = &FileBackupStorage{} + backupstorage.BackupStorageMap["file"] = &FileBackupStorage{} } diff --git a/go/vt/mysqlctl/backupstorage/file_test.go b/go/vt/mysqlctl/filebackupstorage/file_test.go similarity index 99% rename from go/vt/mysqlctl/backupstorage/file_test.go rename to go/vt/mysqlctl/filebackupstorage/file_test.go index 25f5eae6f9..be02cf1b8d 100644 --- a/go/vt/mysqlctl/backupstorage/file_test.go +++ b/go/vt/mysqlctl/filebackupstorage/file_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package backupstorage +package filebackupstorage import ( "io" diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go index 02c01d13f3..b4c6e3f02d 100644 --- a/go/vt/wrangler/testlib/backup_test.go +++ b/go/vt/wrangler/testlib/backup_test.go @@ -15,6 +15,7 @@ import ( "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/mysqlctl/backupstorage" + "github.com/youtube/vitess/go/vt/mysqlctl/filebackupstorage" myproto "github.com/youtube/vitess/go/vt/mysqlctl/proto" "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" @@ -38,7 +39,7 @@ func TestBackupRestore(t *testing.T) { // Initialize BackupStorage fbsRoot := path.Join(root, "fbs") - *backupstorage.FileBackupStorageRoot = fbsRoot + *filebackupstorage.FileBackupStorageRoot = fbsRoot *backupstorage.BackupStorageImplementation = "file" // Initialize the fake mysql root directories From 8f2a4c71aacb5138d3a39e677c937cfa9e9c9946 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 21 May 2015 14:41:48 -0700 Subject: [PATCH 070/128] Removing extra log line. --- go/cmd/vtgate/vtgate.go | 1 - 1 file changed, 1 deletion(-) diff --git a/go/cmd/vtgate/vtgate.go b/go/cmd/vtgate/vtgate.go index 745f9d61cd..c8fc408d34 100644 --- a/go/cmd/vtgate/vtgate.go +++ b/go/cmd/vtgate/vtgate.go @@ -48,7 +48,6 @@ func main() { defer topo.CloseServers() var schema *planbuilder.Schema - log.Info(*cell, *schemaFile) if *schemaFile != "" { var err error if schema, err = planbuilder.LoadFile(*schemaFile); err != nil { From fb10d05bfd31879e7099650804b02064dc710648 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 21 May 2015 14:46:15 -0700 Subject: [PATCH 071/128] Exporting a couple methods for other modules to use. --- go/vt/worker/clone_utils.go | 4 ++-- go/vt/worker/split_clone.go | 4 ++-- go/vt/worker/split_diff.go | 4 ++-- go/vt/worker/sqldiffer.go | 4 ++-- go/vt/worker/topo_utils.go | 10 +++++----- go/vt/worker/vertical_split_clone.go | 4 ++-- go/vt/worker/vertical_split_diff.go | 4 ++-- 7 files changed, 17 insertions(+), 17 deletions(-) diff --git a/go/vt/worker/clone_utils.go b/go/vt/worker/clone_utils.go index ca7b941e8d..179f7b44b3 100644 --- a/go/vt/worker/clone_utils.go +++ b/go/vt/worker/clone_utils.go @@ -258,14 +258,14 @@ func runSqlCommands(ctx context.Context, wr *wrangler.Wrangler, r Resolver, shar return nil } -// findChunks returns an array of chunks to use for splitting up a table +// FindChunks returns an array of chunks to use for splitting up a table // into multiple data chunks. It only works for tables with a primary key // (and the primary key first column is an integer type). // The array will always look like: // "", "value1", "value2", "" // A non-split tablet will just return: // "", "" -func findChunks(ctx context.Context, wr *wrangler.Wrangler, ti *topo.TabletInfo, td *myproto.TableDefinition, minTableSizeForSplit uint64, sourceReaderCount int) ([]string, error) { +func FindChunks(ctx context.Context, wr *wrangler.Wrangler, ti *topo.TabletInfo, td *myproto.TableDefinition, minTableSizeForSplit uint64, sourceReaderCount int) ([]string, error) { result := []string{"", ""} // eliminate a few cases we don't split tables for diff --git a/go/vt/worker/split_clone.go b/go/vt/worker/split_clone.go index 88e5dbe355..4b4f89e21a 100644 --- a/go/vt/worker/split_clone.go +++ b/go/vt/worker/split_clone.go @@ -273,7 +273,7 @@ func (scw *SplitCloneWorker) findTargets(ctx context.Context) error { // find an appropriate endpoint in the source shards scw.sourceAliases = make([]topo.TabletAlias, len(scw.sourceShards)) for i, si := range scw.sourceShards { - scw.sourceAliases[i], err = findChecker(ctx, scw.wr, scw.cleaner, scw.cell, si.Keyspace(), si.ShardName()) + scw.sourceAliases[i], err = FindChecker(ctx, scw.wr, scw.cleaner, scw.cell, si.Keyspace(), si.ShardName()) if err != nil { return fmt.Errorf("cannot find checker for %v/%v/%v: %v", scw.cell, si.Keyspace(), si.ShardName(), err) } @@ -476,7 +476,7 @@ func (scw *SplitCloneWorker) copy(ctx context.Context) error { rowSplitter := NewRowSplitter(scw.destinationShards, scw.keyspaceInfo.ShardingColumnType, columnIndexes[tableIndex]) - chunks, err := findChunks(ctx, scw.wr, scw.sourceTablets[shardIndex], td, scw.minTableSizeForSplit, scw.sourceReaderCount) + chunks, err := FindChunks(ctx, scw.wr, scw.sourceTablets[shardIndex], td, scw.minTableSizeForSplit, scw.sourceReaderCount) if err != nil { return err } diff --git a/go/vt/worker/split_diff.go b/go/vt/worker/split_diff.go index 0c008946c4..0e6a604a95 100644 --- a/go/vt/worker/split_diff.go +++ b/go/vt/worker/split_diff.go @@ -180,7 +180,7 @@ func (sdw *SplitDiffWorker) findTargets(ctx context.Context) error { // find an appropriate endpoint in destination shard var err error - sdw.destinationAlias, err = findChecker(ctx, sdw.wr, sdw.cleaner, sdw.cell, sdw.keyspace, sdw.shard) + sdw.destinationAlias, err = FindChecker(ctx, sdw.wr, sdw.cleaner, sdw.cell, sdw.keyspace, sdw.shard) if err != nil { return fmt.Errorf("cannot find checker for %v/%v/%v: %v", sdw.cell, sdw.keyspace, sdw.shard, err) } @@ -188,7 +188,7 @@ func (sdw *SplitDiffWorker) findTargets(ctx context.Context) error { // find an appropriate endpoint in the source shards sdw.sourceAliases = make([]topo.TabletAlias, len(sdw.shardInfo.SourceShards)) for i, ss := range sdw.shardInfo.SourceShards { - sdw.sourceAliases[i], err = findChecker(ctx, sdw.wr, sdw.cleaner, sdw.cell, sdw.keyspace, ss.Shard) + sdw.sourceAliases[i], err = FindChecker(ctx, sdw.wr, sdw.cleaner, sdw.cell, sdw.keyspace, ss.Shard) if err != nil { return fmt.Errorf("cannot find checker for %v/%v/%v: %v", sdw.cell, sdw.keyspace, ss.Shard, err) } diff --git a/go/vt/worker/sqldiffer.go b/go/vt/worker/sqldiffer.go index ef3e50c529..632b0ad939 100644 --- a/go/vt/worker/sqldiffer.go +++ b/go/vt/worker/sqldiffer.go @@ -146,13 +146,13 @@ func (worker *SQLDiffWorker) findTargets(ctx context.Context) error { // find an appropriate endpoint in superset var err error - worker.superset.alias, err = findChecker(ctx, worker.wr, worker.cleaner, worker.cell, worker.superset.Keyspace, worker.superset.Shard) + worker.superset.alias, err = FindChecker(ctx, worker.wr, worker.cleaner, worker.cell, worker.superset.Keyspace, worker.superset.Shard) if err != nil { return err } // find an appropriate endpoint in subset - worker.subset.alias, err = findChecker(ctx, worker.wr, worker.cleaner, worker.cell, worker.subset.Keyspace, worker.subset.Shard) + worker.subset.alias, err = FindChecker(ctx, worker.wr, worker.cleaner, worker.cell, worker.subset.Keyspace, worker.subset.Shard) if err != nil { return err } diff --git a/go/vt/worker/topo_utils.go b/go/vt/worker/topo_utils.go index abd1a3c53b..70899bea0d 100644 --- a/go/vt/worker/topo_utils.go +++ b/go/vt/worker/topo_utils.go @@ -20,10 +20,10 @@ var ( minHealthyEndPoints = flag.Int("min_healthy_rdonly_endpoints", 2, "minimum number of healthy rdonly endpoints required for checker") ) -// findHealthyRdonlyEndPoint returns a random healthy endpoint. +// FindHealthyRdonlyEndPoint returns a random healthy endpoint. // Since we don't want to use them all, we require at least // minHealthyEndPoints servers to be healthy. -func findHealthyRdonlyEndPoint(wr *wrangler.Wrangler, cell, keyspace, shard string) (topo.TabletAlias, error) { +func FindHealthyRdonlyEndPoint(wr *wrangler.Wrangler, cell, keyspace, shard string) (topo.TabletAlias, error) { endPoints, err := wr.TopoServer().GetEndPoints(cell, keyspace, shard, topo.TYPE_RDONLY) if err != nil { return topo.TabletAlias{}, fmt.Errorf("GetEndPoints(%v,%v,%v,rdonly) failed: %v", cell, keyspace, shard, err) @@ -46,12 +46,12 @@ func findHealthyRdonlyEndPoint(wr *wrangler.Wrangler, cell, keyspace, shard stri }, nil } -// findChecker: +// FindChecker will: // - find a rdonly instance in the keyspace / shard // - mark it as checker // - tag it with our worker process -func findChecker(ctx context.Context, wr *wrangler.Wrangler, cleaner *wrangler.Cleaner, cell, keyspace, shard string) (topo.TabletAlias, error) { - tabletAlias, err := findHealthyRdonlyEndPoint(wr, cell, keyspace, shard) +func FindChecker(ctx context.Context, wr *wrangler.Wrangler, cleaner *wrangler.Cleaner, cell, keyspace, shard string) (topo.TabletAlias, error) { + tabletAlias, err := FindHealthyRdonlyEndPoint(wr, cell, keyspace, shard) if err != nil { return topo.TabletAlias{}, err } diff --git a/go/vt/worker/vertical_split_clone.go b/go/vt/worker/vertical_split_clone.go index 113778fe78..24a76eef45 100644 --- a/go/vt/worker/vertical_split_clone.go +++ b/go/vt/worker/vertical_split_clone.go @@ -243,7 +243,7 @@ func (vscw *VerticalSplitCloneWorker) findTargets(ctx context.Context) error { // find an appropriate endpoint in the source shard var err error - vscw.sourceAlias, err = findChecker(ctx, vscw.wr, vscw.cleaner, vscw.cell, vscw.sourceKeyspace, "0") + vscw.sourceAlias, err = FindChecker(ctx, vscw.wr, vscw.cleaner, vscw.cell, vscw.sourceKeyspace, "0") if err != nil { return fmt.Errorf("cannot find checker for %v/%v/0: %v", vscw.cell, vscw.sourceKeyspace, err) } @@ -410,7 +410,7 @@ func (vscw *VerticalSplitCloneWorker) copy(ctx context.Context) error { continue } - chunks, err := findChunks(ctx, vscw.wr, vscw.sourceTablet, td, vscw.minTableSizeForSplit, vscw.sourceReaderCount) + chunks, err := FindChunks(ctx, vscw.wr, vscw.sourceTablet, td, vscw.minTableSizeForSplit, vscw.sourceReaderCount) if err != nil { return err } diff --git a/go/vt/worker/vertical_split_diff.go b/go/vt/worker/vertical_split_diff.go index 92e15ec3de..1961827fe8 100644 --- a/go/vt/worker/vertical_split_diff.go +++ b/go/vt/worker/vertical_split_diff.go @@ -189,13 +189,13 @@ func (vsdw *VerticalSplitDiffWorker) findTargets(ctx context.Context) error { // find an appropriate endpoint in destination shard var err error - vsdw.destinationAlias, err = findChecker(ctx, vsdw.wr, vsdw.cleaner, vsdw.cell, vsdw.keyspace, vsdw.shard) + vsdw.destinationAlias, err = FindChecker(ctx, vsdw.wr, vsdw.cleaner, vsdw.cell, vsdw.keyspace, vsdw.shard) if err != nil { return fmt.Errorf("cannot find checker for %v/%v/%v: %v", vsdw.cell, vsdw.keyspace, vsdw.shard, err) } // find an appropriate endpoint in the source shard - vsdw.sourceAlias, err = findChecker(ctx, vsdw.wr, vsdw.cleaner, vsdw.cell, vsdw.shardInfo.SourceShards[0].Keyspace, vsdw.shardInfo.SourceShards[0].Shard) + vsdw.sourceAlias, err = FindChecker(ctx, vsdw.wr, vsdw.cleaner, vsdw.cell, vsdw.shardInfo.SourceShards[0].Keyspace, vsdw.shardInfo.SourceShards[0].Shard) if err != nil { return fmt.Errorf("cannot find checker for %v/%v/%v: %v", vsdw.cell, vsdw.shardInfo.SourceShards[0].Keyspace, vsdw.shardInfo.SourceShards[0].Shard, err) } From f76208ec52d7a134b3ff346eed2d1fcda9d151fe Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 21 May 2015 15:42:24 -0700 Subject: [PATCH 072/128] Submitting another change from Ric. Will need fixing. --- go/vt/topo/tablet.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/go/vt/topo/tablet.go b/go/vt/topo/tablet.go index 345cd8dc8d..0dfe5af238 100644 --- a/go/vt/topo/tablet.go +++ b/go/vt/topo/tablet.go @@ -178,6 +178,12 @@ const ( // lagging in replication. TYPE_CHECKER = TabletType("checker") + // FIXME(szopa): Make TYPE_EXPORT a truly separate type. + + // TYPE_EXPORT is tablet that is running an export process. It + // is probably lagging in replication. + TYPE_EXPORT = TYPE_CHECKER + // a machine with data that needs to be wiped TYPE_SCRAP = TabletType("scrap") ) From 7ac9766e46af139a3446859db5cf3d660e8d586f Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 21 May 2015 16:36:16 -0700 Subject: [PATCH 073/128] Fixing TabletType: - removing SNAPSHOT_SOURCE - renaming CHECKER to WORKER --- go/vt/topo/tablet.go | 50 ++++++---------------------- go/vt/topotools/tablet.go | 2 +- go/vt/vtctl/vtctl.go | 2 +- go/vt/worker/split_clone.go | 2 +- go/vt/worker/split_diff.go | 4 +-- go/vt/worker/sqldiffer.go | 4 +-- go/vt/worker/topo_utils.go | 8 ++--- go/vt/worker/vertical_split_clone.go | 2 +- go/vt/worker/vertical_split_diff.go | 4 +-- 9 files changed, 24 insertions(+), 54 deletions(-) diff --git a/go/vt/topo/tablet.go b/go/vt/topo/tablet.go index 0dfe5af238..247540ad55 100644 --- a/go/vt/topo/tablet.go +++ b/go/vt/topo/tablet.go @@ -164,25 +164,13 @@ const ( // replication sql thread may be stopped TYPE_BACKUP = TabletType("backup") - // a slaved copy of the data, where mysqld is *not* running, - // and we are serving our data files to clone slaves - // use 'vtctl Snapshot -server-mode ...' to get in this mode - // use 'vtctl SnapshotSourceEnd ...' to get out of this mode - TYPE_SNAPSHOT_SOURCE = TabletType("snapshot_source") - // A tablet that has not been in the replication graph and is restoring - // from a snapshot. idle -> restore -> spare + // from a snapshot. TYPE_RESTORE = TabletType("restore") - // A tablet that is running a checker process. It is probably + // A tablet that is used by a worker process. It is probably // lagging in replication. - TYPE_CHECKER = TabletType("checker") - - // FIXME(szopa): Make TYPE_EXPORT a truly separate type. - - // TYPE_EXPORT is tablet that is running an export process. It - // is probably lagging in replication. - TYPE_EXPORT = TYPE_CHECKER + TYPE_WORKER = TabletType("worker") // a machine with data that needs to be wiped TYPE_SCRAP = TabletType("scrap") @@ -198,9 +186,8 @@ var AllTabletTypes = []TabletType{TYPE_IDLE, TYPE_EXPERIMENTAL, TYPE_SCHEMA_UPGRADE, TYPE_BACKUP, - TYPE_SNAPSHOT_SOURCE, TYPE_RESTORE, - TYPE_CHECKER, + TYPE_WORKER, TYPE_SCRAP, } @@ -213,9 +200,8 @@ var SlaveTabletTypes = []TabletType{ TYPE_EXPERIMENTAL, TYPE_SCHEMA_UPGRADE, TYPE_BACKUP, - TYPE_SNAPSHOT_SOURCE, TYPE_RESTORE, - TYPE_CHECKER, + TYPE_WORKER, } // IsTypeInList returns true if the given type is in the list. @@ -248,9 +234,9 @@ func MakeStringTypeList(types []TabletType) []string { // without changes to the replication graph func IsTrivialTypeChange(oldTabletType, newTabletType TabletType) bool { switch oldTabletType { - case TYPE_REPLICA, TYPE_RDONLY, TYPE_BATCH, TYPE_SPARE, TYPE_BACKUP, TYPE_SNAPSHOT_SOURCE, TYPE_EXPERIMENTAL, TYPE_SCHEMA_UPGRADE, TYPE_CHECKER: + case TYPE_REPLICA, TYPE_RDONLY, TYPE_BATCH, TYPE_SPARE, TYPE_BACKUP, TYPE_EXPERIMENTAL, TYPE_SCHEMA_UPGRADE, TYPE_WORKER: switch newTabletType { - case TYPE_REPLICA, TYPE_RDONLY, TYPE_BATCH, TYPE_SPARE, TYPE_BACKUP, TYPE_SNAPSHOT_SOURCE, TYPE_EXPERIMENTAL, TYPE_SCHEMA_UPGRADE, TYPE_CHECKER: + case TYPE_REPLICA, TYPE_RDONLY, TYPE_BATCH, TYPE_SPARE, TYPE_BACKUP, TYPE_EXPERIMENTAL, TYPE_SCHEMA_UPGRADE, TYPE_WORKER: return true } case TYPE_SCRAP: @@ -264,22 +250,6 @@ func IsTrivialTypeChange(oldTabletType, newTabletType TabletType) bool { return false } -// IsValidTypeChange returns if we should we allow this transition at -// all. Most transitions are allowed, but some don't make sense under -// any circumstances. If a transition could be forced, don't disallow -// it here. -func IsValidTypeChange(oldTabletType, newTabletType TabletType) bool { - switch oldTabletType { - case TYPE_SNAPSHOT_SOURCE: - switch newTabletType { - case TYPE_BACKUP, TYPE_SNAPSHOT_SOURCE: - return false - } - } - - return true -} - // IsInServingGraph returns if a tablet appears in the serving graph func IsInServingGraph(tt TabletType) bool { switch tt { @@ -292,7 +262,7 @@ func IsInServingGraph(tt TabletType) bool { // IsRunningQueryService returns if a tablet is running the query service func IsRunningQueryService(tt TabletType) bool { switch tt { - case TYPE_MASTER, TYPE_REPLICA, TYPE_RDONLY, TYPE_BATCH, TYPE_CHECKER: + case TYPE_MASTER, TYPE_REPLICA, TYPE_RDONLY, TYPE_BATCH, TYPE_WORKER: return true } return false @@ -325,10 +295,10 @@ func IsInReplicationGraph(tt TabletType) bool { // and actively replicating? // MASTER is not obviously (only support one level replication graph) // IDLE and SCRAP are not either -// BACKUP, RESTORE, TYPE_CHECKER may or may not be, but we don't know for sure +// BACKUP, RESTORE, TYPE_WORKER may or may not be, but we don't know for sure func IsSlaveType(tt TabletType) bool { switch tt { - case TYPE_MASTER, TYPE_IDLE, TYPE_SCRAP, TYPE_BACKUP, TYPE_RESTORE, TYPE_CHECKER: + case TYPE_MASTER, TYPE_IDLE, TYPE_SCRAP, TYPE_BACKUP, TYPE_RESTORE, TYPE_WORKER: return false } return true diff --git a/go/vt/topotools/tablet.go b/go/vt/topotools/tablet.go index 13438f6577..82cae2885f 100644 --- a/go/vt/topotools/tablet.go +++ b/go/vt/topotools/tablet.go @@ -105,7 +105,7 @@ func ChangeType(ctx context.Context, ts topo.Server, tabletAlias topo.TabletAlia return err } - if !topo.IsTrivialTypeChange(tablet.Type, newType) || !topo.IsValidTypeChange(tablet.Type, newType) { + if !topo.IsTrivialTypeChange(tablet.Type, newType) { return fmt.Errorf("cannot change tablet type %v -> %v %v", tablet.Type, newType, tabletAlias) } diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index c81ee87f4f..9583493874 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -763,7 +763,7 @@ func commandChangeSlaveType(ctx context.Context, wr *wrangler.Wrangler, subFlags if err != nil { return fmt.Errorf("failed reading tablet %v: %v", tabletAlias, err) } - if !topo.IsTrivialTypeChange(ti.Type, newType) || !topo.IsValidTypeChange(ti.Type, newType) { + if !topo.IsTrivialTypeChange(ti.Type, newType) { return fmt.Errorf("invalid type transition %v: %v -> %v", tabletAlias, ti.Type, newType) } wr.Logger().Printf("- %v\n", fmtTabletAwkable(ti)) diff --git a/go/vt/worker/split_clone.go b/go/vt/worker/split_clone.go index 4b4f89e21a..f4386636bf 100644 --- a/go/vt/worker/split_clone.go +++ b/go/vt/worker/split_clone.go @@ -273,7 +273,7 @@ func (scw *SplitCloneWorker) findTargets(ctx context.Context) error { // find an appropriate endpoint in the source shards scw.sourceAliases = make([]topo.TabletAlias, len(scw.sourceShards)) for i, si := range scw.sourceShards { - scw.sourceAliases[i], err = FindChecker(ctx, scw.wr, scw.cleaner, scw.cell, si.Keyspace(), si.ShardName()) + scw.sourceAliases[i], err = FindWorkerTablet(ctx, scw.wr, scw.cleaner, scw.cell, si.Keyspace(), si.ShardName()) if err != nil { return fmt.Errorf("cannot find checker for %v/%v/%v: %v", scw.cell, si.Keyspace(), si.ShardName(), err) } diff --git a/go/vt/worker/split_diff.go b/go/vt/worker/split_diff.go index 0e6a604a95..c21552548f 100644 --- a/go/vt/worker/split_diff.go +++ b/go/vt/worker/split_diff.go @@ -180,7 +180,7 @@ func (sdw *SplitDiffWorker) findTargets(ctx context.Context) error { // find an appropriate endpoint in destination shard var err error - sdw.destinationAlias, err = FindChecker(ctx, sdw.wr, sdw.cleaner, sdw.cell, sdw.keyspace, sdw.shard) + sdw.destinationAlias, err = FindWorkerTablet(ctx, sdw.wr, sdw.cleaner, sdw.cell, sdw.keyspace, sdw.shard) if err != nil { return fmt.Errorf("cannot find checker for %v/%v/%v: %v", sdw.cell, sdw.keyspace, sdw.shard, err) } @@ -188,7 +188,7 @@ func (sdw *SplitDiffWorker) findTargets(ctx context.Context) error { // find an appropriate endpoint in the source shards sdw.sourceAliases = make([]topo.TabletAlias, len(sdw.shardInfo.SourceShards)) for i, ss := range sdw.shardInfo.SourceShards { - sdw.sourceAliases[i], err = FindChecker(ctx, sdw.wr, sdw.cleaner, sdw.cell, sdw.keyspace, ss.Shard) + sdw.sourceAliases[i], err = FindWorkerTablet(ctx, sdw.wr, sdw.cleaner, sdw.cell, sdw.keyspace, ss.Shard) if err != nil { return fmt.Errorf("cannot find checker for %v/%v/%v: %v", sdw.cell, sdw.keyspace, ss.Shard, err) } diff --git a/go/vt/worker/sqldiffer.go b/go/vt/worker/sqldiffer.go index 632b0ad939..1aa9141bcb 100644 --- a/go/vt/worker/sqldiffer.go +++ b/go/vt/worker/sqldiffer.go @@ -146,13 +146,13 @@ func (worker *SQLDiffWorker) findTargets(ctx context.Context) error { // find an appropriate endpoint in superset var err error - worker.superset.alias, err = FindChecker(ctx, worker.wr, worker.cleaner, worker.cell, worker.superset.Keyspace, worker.superset.Shard) + worker.superset.alias, err = FindWorkerTablet(ctx, worker.wr, worker.cleaner, worker.cell, worker.superset.Keyspace, worker.superset.Shard) if err != nil { return err } // find an appropriate endpoint in subset - worker.subset.alias, err = FindChecker(ctx, worker.wr, worker.cleaner, worker.cell, worker.subset.Keyspace, worker.subset.Shard) + worker.subset.alias, err = FindWorkerTablet(ctx, worker.wr, worker.cleaner, worker.cell, worker.subset.Keyspace, worker.subset.Shard) if err != nil { return err } diff --git a/go/vt/worker/topo_utils.go b/go/vt/worker/topo_utils.go index 70899bea0d..81d14b9ea5 100644 --- a/go/vt/worker/topo_utils.go +++ b/go/vt/worker/topo_utils.go @@ -46,11 +46,11 @@ func FindHealthyRdonlyEndPoint(wr *wrangler.Wrangler, cell, keyspace, shard stri }, nil } -// FindChecker will: +// FindWorkerTablet will: // - find a rdonly instance in the keyspace / shard -// - mark it as checker +// - mark it as worker // - tag it with our worker process -func FindChecker(ctx context.Context, wr *wrangler.Wrangler, cleaner *wrangler.Cleaner, cell, keyspace, shard string) (topo.TabletAlias, error) { +func FindWorkerTablet(ctx context.Context, wr *wrangler.Wrangler, cleaner *wrangler.Cleaner, cell, keyspace, shard string) (topo.TabletAlias, error) { tabletAlias, err := FindHealthyRdonlyEndPoint(wr, cell, keyspace, shard) if err != nil { return topo.TabletAlias{}, err @@ -76,7 +76,7 @@ func FindChecker(ctx context.Context, wr *wrangler.Wrangler, cleaner *wrangler.C wr.Logger().Infof("Changing tablet %v to 'checker'", tabletAlias) shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) - err = wr.ChangeType(shortCtx, tabletAlias, topo.TYPE_CHECKER, false /*force*/) + err = wr.ChangeType(shortCtx, tabletAlias, topo.TYPE_WORKER, false /*force*/) cancel() if err != nil { return topo.TabletAlias{}, err diff --git a/go/vt/worker/vertical_split_clone.go b/go/vt/worker/vertical_split_clone.go index 24a76eef45..25cfe7680a 100644 --- a/go/vt/worker/vertical_split_clone.go +++ b/go/vt/worker/vertical_split_clone.go @@ -243,7 +243,7 @@ func (vscw *VerticalSplitCloneWorker) findTargets(ctx context.Context) error { // find an appropriate endpoint in the source shard var err error - vscw.sourceAlias, err = FindChecker(ctx, vscw.wr, vscw.cleaner, vscw.cell, vscw.sourceKeyspace, "0") + vscw.sourceAlias, err = FindWorkerTablet(ctx, vscw.wr, vscw.cleaner, vscw.cell, vscw.sourceKeyspace, "0") if err != nil { return fmt.Errorf("cannot find checker for %v/%v/0: %v", vscw.cell, vscw.sourceKeyspace, err) } diff --git a/go/vt/worker/vertical_split_diff.go b/go/vt/worker/vertical_split_diff.go index 1961827fe8..3d29bfeda1 100644 --- a/go/vt/worker/vertical_split_diff.go +++ b/go/vt/worker/vertical_split_diff.go @@ -189,13 +189,13 @@ func (vsdw *VerticalSplitDiffWorker) findTargets(ctx context.Context) error { // find an appropriate endpoint in destination shard var err error - vsdw.destinationAlias, err = FindChecker(ctx, vsdw.wr, vsdw.cleaner, vsdw.cell, vsdw.keyspace, vsdw.shard) + vsdw.destinationAlias, err = FindWorkerTablet(ctx, vsdw.wr, vsdw.cleaner, vsdw.cell, vsdw.keyspace, vsdw.shard) if err != nil { return fmt.Errorf("cannot find checker for %v/%v/%v: %v", vsdw.cell, vsdw.keyspace, vsdw.shard, err) } // find an appropriate endpoint in the source shard - vsdw.sourceAlias, err = FindChecker(ctx, vsdw.wr, vsdw.cleaner, vsdw.cell, vsdw.shardInfo.SourceShards[0].Keyspace, vsdw.shardInfo.SourceShards[0].Shard) + vsdw.sourceAlias, err = FindWorkerTablet(ctx, vsdw.wr, vsdw.cleaner, vsdw.cell, vsdw.shardInfo.SourceShards[0].Keyspace, vsdw.shardInfo.SourceShards[0].Shard) if err != nil { return fmt.Errorf("cannot find checker for %v/%v/%v: %v", vsdw.cell, vsdw.shardInfo.SourceShards[0].Keyspace, vsdw.shardInfo.SourceShards[0].Shard, err) } From 80678fa46b8b3b000f6ef916e23c75429d1cb49f Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 21 May 2015 19:01:53 -0700 Subject: [PATCH 074/128] Removing now empty {Enable,Disable}BinlogServer methods. (they were useful before we switched to using a MySQL connection). --- go/vt/mysqlctl/mysql_flavor.go | 10 ---------- go/vt/mysqlctl/mysql_flavor_mariadb.go | 10 ---------- go/vt/mysqlctl/mysql_flavor_mysql56.go | 10 ---------- go/vt/mysqlctl/mysql_flavor_test.go | 2 -- go/vt/mysqlctl/replication.go | 20 -------------------- go/vt/tabletmanager/binlog.go | 22 ---------------------- 6 files changed, 74 deletions(-) diff --git a/go/vt/mysqlctl/mysql_flavor.go b/go/vt/mysqlctl/mysql_flavor.go index 5c47d15adf..4345f54b93 100644 --- a/go/vt/mysqlctl/mysql_flavor.go +++ b/go/vt/mysqlctl/mysql_flavor.go @@ -73,16 +73,6 @@ type MysqlFlavor interface { // WaitMasterPos waits until slave replication reaches at // least targetPos. WaitMasterPos(mysqld *Mysqld, targetPos proto.ReplicationPosition, waitTimeout time.Duration) error - - // EnableBinlogPlayback prepares the server to play back - // events from a binlog stream. Whatever it does for a given - // flavor, it must be idempotent. - EnableBinlogPlayback(mysqld *Mysqld) error - - // DisableBinlogPlayback returns the server to the normal - // state after playback is done. Whatever it does for a given - // flavor, it must be idempotent. - DisableBinlogPlayback(mysqld *Mysqld) error } var mysqlFlavors = make(map[string]MysqlFlavor) diff --git a/go/vt/mysqlctl/mysql_flavor_mariadb.go b/go/vt/mysqlctl/mysql_flavor_mariadb.go index 013b5f1a41..9799d7be50 100644 --- a/go/vt/mysqlctl/mysql_flavor_mariadb.go +++ b/go/vt/mysqlctl/mysql_flavor_mariadb.go @@ -177,16 +177,6 @@ func (*mariaDB10) MakeBinlogEvent(buf []byte) blproto.BinlogEvent { return NewMariadbBinlogEvent(buf) } -// EnableBinlogPlayback implements MysqlFlavor.EnableBinlogPlayback(). -func (*mariaDB10) EnableBinlogPlayback(mysqld *Mysqld) error { - return nil -} - -// DisableBinlogPlayback implements MysqlFlavor.DisableBinlogPlayback(). -func (*mariaDB10) DisableBinlogPlayback(mysqld *Mysqld) error { - return nil -} - // mariadbBinlogEvent wraps a raw packet buffer and provides methods to examine // it by implementing blproto.BinlogEvent. Some methods are pulled in from // binlogEvent. diff --git a/go/vt/mysqlctl/mysql_flavor_mysql56.go b/go/vt/mysqlctl/mysql_flavor_mysql56.go index 780cd2d7bc..d832613ba1 100644 --- a/go/vt/mysqlctl/mysql_flavor_mysql56.go +++ b/go/vt/mysqlctl/mysql_flavor_mysql56.go @@ -158,16 +158,6 @@ func (*mysql56) MakeBinlogEvent(buf []byte) blproto.BinlogEvent { return NewMysql56BinlogEvent(buf) } -// EnableBinlogPlayback implements MysqlFlavor.EnableBinlogPlayback(). -func (*mysql56) EnableBinlogPlayback(mysqld *Mysqld) error { - return nil -} - -// DisableBinlogPlayback implements MysqlFlavor.DisableBinlogPlayback(). -func (*mysql56) DisableBinlogPlayback(mysqld *Mysqld) error { - return nil -} - // mysql56BinlogEvent wraps a raw packet buffer and provides methods to examine // it by implementing blproto.BinlogEvent. Some methods are pulled in from // binlogEvent. diff --git a/go/vt/mysqlctl/mysql_flavor_test.go b/go/vt/mysqlctl/mysql_flavor_test.go index c760886872..4cc4405a99 100644 --- a/go/vt/mysqlctl/mysql_flavor_test.go +++ b/go/vt/mysqlctl/mysql_flavor_test.go @@ -42,8 +42,6 @@ func (fakeMysqlFlavor) StartReplicationCommands(params *sqldb.ConnParams, status func (fakeMysqlFlavor) SetMasterCommands(params *sqldb.ConnParams, masterHost string, masterPort int, masterConnectRetry int) ([]string, error) { return nil, nil } -func (fakeMysqlFlavor) EnableBinlogPlayback(mysqld *Mysqld) error { return nil } -func (fakeMysqlFlavor) DisableBinlogPlayback(mysqld *Mysqld) error { return nil } func TestMysqlFlavorEnvironmentVariable(t *testing.T) { os.Setenv("MYSQL_FLAVOR", "fake flavor") diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index 24b0c1005d..b5cd197f34 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -331,23 +331,3 @@ func (mysqld *Mysqld) WaitBlpPosition(bp *blproto.BlpPosition, waitTimeout time. return fmt.Errorf("WaitBlpPosition(%v) timed out", bp.Uid) } - -// EnableBinlogPlayback prepares the server to play back events from a binlog stream. -// Whatever it does for a given flavor, it must be idempotent. -func (mysqld *Mysqld) EnableBinlogPlayback() error { - flavor, err := mysqld.flavor() - if err != nil { - return fmt.Errorf("EnableBinlogPlayback needs flavor: %v", err) - } - return flavor.EnableBinlogPlayback(mysqld) -} - -// DisableBinlogPlayback returns the server to the normal state after streaming. -// Whatever it does for a given flavor, it must be idempotent. -func (mysqld *Mysqld) DisableBinlogPlayback() error { - flavor, err := mysqld.flavor() - if err != nil { - return fmt.Errorf("DisableBinlogPlayback needs flavor: %v", err) - } - return flavor.DisableBinlogPlayback(mysqld) -} diff --git a/go/vt/tabletmanager/binlog.go b/go/vt/tabletmanager/binlog.go index f40b98265e..b0e07f481b 100644 --- a/go/vt/tabletmanager/binlog.go +++ b/go/vt/tabletmanager/binlog.go @@ -210,13 +210,6 @@ func (bpc *BinlogPlayerController) Iteration() (err error) { } }() - // Apply any special settings necessary for playback of binlogs. - // We do it on every iteration to be sure, in case MySQL was restarted. - if err := bpc.mysqld.EnableBinlogPlayback(); err != nil { - // We failed to apply the required settings, so we shouldn't keep going. - return err - } - // create the db connection, connect it vtClient := binlogplayer.NewDbClient(bpc.dbConfig) if err := vtClient.Connect(); err != nil { @@ -388,21 +381,14 @@ func (blm *BinlogPlayerMap) addPlayer(cell string, keyspaceIdType key.KeyspaceId // StopAllPlayersAndReset stops all the binlog players, and reset the map of players. func (blm *BinlogPlayerMap) StopAllPlayersAndReset() { - hadPlayers := false blm.mu.Lock() for _, bpc := range blm.players { if blm.state == BpmStateRunning { bpc.Stop() } - hadPlayers = true } blm.players = make(map[uint32]*BinlogPlayerController) blm.mu.Unlock() - - if hadPlayers { - // We're done streaming, so turn off special playback settings. - blm.mysqld.DisableBinlogPlayback() - } } // RefreshMap reads the right data from topo.Server and makes sure @@ -426,10 +412,8 @@ func (blm *BinlogPlayerMap) RefreshMap(tablet *topo.Tablet, keyspaceInfo *topo.K // get the existing sources and build a map of sources to remove toRemove := make(map[uint32]bool) - hadPlayers := false for source := range blm.players { toRemove[source] = true - hadPlayers = true } // for each source, add it if not there, and delete from toRemove @@ -437,7 +421,6 @@ func (blm *BinlogPlayerMap) RefreshMap(tablet *topo.Tablet, keyspaceInfo *topo.K blm.addPlayer(tablet.Alias.Cell, keyspaceInfo.ShardingColumnType, tablet.KeyRange, sourceShard, tablet.DbName()) delete(toRemove, sourceShard.Uid) } - hasPlayers := len(shardInfo.SourceShards) > 0 // remove all entries from toRemove for source := range toRemove { @@ -446,11 +429,6 @@ func (blm *BinlogPlayerMap) RefreshMap(tablet *topo.Tablet, keyspaceInfo *topo.K } blm.mu.Unlock() - - if hadPlayers && !hasPlayers { - // We're done streaming, so turn off special playback settings. - blm.mysqld.DisableBinlogPlayback() - } } // Stop stops the current players, but does not remove them from the map. From a5b3f2aa53d2fae8b537c2b84b1c8c3a3962f343 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 21 May 2015 19:12:46 -0700 Subject: [PATCH 075/128] Removing unused mysqld parameter for method. --- go/vt/mysqlctl/mysql_flavor.go | 2 +- go/vt/mysqlctl/mysql_flavor_mariadb.go | 2 +- go/vt/mysqlctl/mysql_flavor_mysql56.go | 2 +- go/vt/mysqlctl/mysql_flavor_test.go | 2 +- go/vt/mysqlctl/slave_connection.go | 2 +- go/vt/tabletmanager/binlog.go | 8 ++++---- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go/vt/mysqlctl/mysql_flavor.go b/go/vt/mysqlctl/mysql_flavor.go index 4345f54b93..080aba07d5 100644 --- a/go/vt/mysqlctl/mysql_flavor.go +++ b/go/vt/mysqlctl/mysql_flavor.go @@ -63,7 +63,7 @@ type MysqlFlavor interface { // SendBinlogDumpCommand sends the flavor-specific version of // the COM_BINLOG_DUMP command to start dumping raw binlog // events over a slave connection, starting at a given GTID. - SendBinlogDumpCommand(mysqld *Mysqld, conn *SlaveConnection, startPos proto.ReplicationPosition) error + SendBinlogDumpCommand(conn *SlaveConnection, startPos proto.ReplicationPosition) error // MakeBinlogEvent takes a raw packet from the MySQL binlog // stream connection and returns a BinlogEvent through which diff --git a/go/vt/mysqlctl/mysql_flavor_mariadb.go b/go/vt/mysqlctl/mysql_flavor_mariadb.go index 9799d7be50..83cbeac93d 100644 --- a/go/vt/mysqlctl/mysql_flavor_mariadb.go +++ b/go/vt/mysqlctl/mysql_flavor_mariadb.go @@ -138,7 +138,7 @@ func (*mariaDB10) ParseReplicationPosition(s string) (proto.ReplicationPosition, } // SendBinlogDumpCommand implements MysqlFlavor.SendBinlogDumpCommand(). -func (*mariaDB10) SendBinlogDumpCommand(mysqld *Mysqld, conn *SlaveConnection, startPos proto.ReplicationPosition) error { +func (*mariaDB10) SendBinlogDumpCommand(conn *SlaveConnection, startPos proto.ReplicationPosition) error { const ComBinlogDump = 0x12 // Tell the server that we understand GTIDs by setting our slave capability diff --git a/go/vt/mysqlctl/mysql_flavor_mysql56.go b/go/vt/mysqlctl/mysql_flavor_mysql56.go index d832613ba1..6bb1c03265 100644 --- a/go/vt/mysqlctl/mysql_flavor_mysql56.go +++ b/go/vt/mysqlctl/mysql_flavor_mysql56.go @@ -134,7 +134,7 @@ func (*mysql56) ParseReplicationPosition(s string) (proto.ReplicationPosition, e } // SendBinlogDumpCommand implements MysqlFlavor.SendBinlogDumpCommand(). -func (flavor *mysql56) SendBinlogDumpCommand(mysqld *Mysqld, conn *SlaveConnection, startPos proto.ReplicationPosition) error { +func (flavor *mysql56) SendBinlogDumpCommand(conn *SlaveConnection, startPos proto.ReplicationPosition) error { const ComBinlogDumpGTID = 0x1E // COM_BINLOG_DUMP_GTID gtidSet, ok := startPos.GTIDSet.(proto.Mysql56GTIDSet) diff --git a/go/vt/mysqlctl/mysql_flavor_test.go b/go/vt/mysqlctl/mysql_flavor_test.go index 4cc4405a99..7b287dfe92 100644 --- a/go/vt/mysqlctl/mysql_flavor_test.go +++ b/go/vt/mysqlctl/mysql_flavor_test.go @@ -24,7 +24,7 @@ func (fakeMysqlFlavor) MakeBinlogEvent(buf []byte) blproto.BinlogEvent { return func (fakeMysqlFlavor) ParseReplicationPosition(string) (proto.ReplicationPosition, error) { return proto.ReplicationPosition{}, nil } -func (fakeMysqlFlavor) SendBinlogDumpCommand(mysqld *Mysqld, conn *SlaveConnection, startPos proto.ReplicationPosition) error { +func (fakeMysqlFlavor) SendBinlogDumpCommand(conn *SlaveConnection, startPos proto.ReplicationPosition) error { return nil } func (fakeMysqlFlavor) WaitMasterPos(mysqld *Mysqld, targetPos proto.ReplicationPosition, waitTimeout time.Duration) error { diff --git a/go/vt/mysqlctl/slave_connection.go b/go/vt/mysqlctl/slave_connection.go index 97ecfe1415..48c2b8cbad 100644 --- a/go/vt/mysqlctl/slave_connection.go +++ b/go/vt/mysqlctl/slave_connection.go @@ -72,7 +72,7 @@ func (sc *SlaveConnection) StartBinlogDump(startPos proto.ReplicationPosition) ( } log.Infof("sending binlog dump command: startPos=%v, slaveID=%v", startPos, sc.slaveID) - if err = flavor.SendBinlogDumpCommand(sc.mysqld, sc, startPos); err != nil { + if err = flavor.SendBinlogDumpCommand(sc, startPos); err != nil { log.Errorf("couldn't send binlog dump command: %v", err) return nil, err } diff --git a/go/vt/tabletmanager/binlog.go b/go/vt/tabletmanager/binlog.go index b0e07f481b..c81bf1c674 100644 --- a/go/vt/tabletmanager/binlog.go +++ b/go/vt/tabletmanager/binlog.go @@ -37,7 +37,7 @@ type BinlogPlayerController struct { // Configuration parameters (set at construction, immutable). ts topo.Server dbConfig *sqldb.ConnParams - mysqld *mysqlctl.Mysqld + mysqld mysqlctl.MysqlDaemon // Information about us (set at construction, immutable). cell string @@ -72,7 +72,7 @@ type BinlogPlayerController struct { lastError error } -func newBinlogPlayerController(ts topo.Server, dbConfig *sqldb.ConnParams, mysqld *mysqlctl.Mysqld, cell string, keyspaceIdType key.KeyspaceIdType, keyRange key.KeyRange, sourceShard topo.SourceShard, dbName string) *BinlogPlayerController { +func newBinlogPlayerController(ts topo.Server, dbConfig *sqldb.ConnParams, mysqld mysqlctl.MysqlDaemon, cell string, keyspaceIdType key.KeyspaceIdType, keyRange key.KeyRange, sourceShard topo.SourceShard, dbName string) *BinlogPlayerController { blc := &BinlogPlayerController{ ts: ts, dbConfig: dbConfig, @@ -283,7 +283,7 @@ type BinlogPlayerMap struct { // Immutable, set at construction time. ts topo.Server dbConfig *sqldb.ConnParams - mysqld *mysqlctl.Mysqld + mysqld mysqlctl.MysqlDaemon // This mutex protects the map and the state. mu sync.Mutex @@ -299,7 +299,7 @@ const ( ) // NewBinlogPlayerMap creates a new map of players. -func NewBinlogPlayerMap(ts topo.Server, dbConfig *sqldb.ConnParams, mysqld *mysqlctl.Mysqld) *BinlogPlayerMap { +func NewBinlogPlayerMap(ts topo.Server, dbConfig *sqldb.ConnParams, mysqld mysqlctl.MysqlDaemon) *BinlogPlayerMap { return &BinlogPlayerMap{ ts: ts, dbConfig: dbConfig, From b575f2f89fa27b0ffd1686cc488bf86bfce75b20 Mon Sep 17 00:00:00 2001 From: Ammar Aijazi Date: Thu, 21 May 2015 19:19:59 -0700 Subject: [PATCH 076/128] Make test/worker.py do batch insert at setup, so that worker copy and initial insert take similiar amounts of time --- test/utils.py | 11 +++++++++-- test/worker.py | 24 ++++++++++++++++-------- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/test/utils.py b/test/utils.py index 14963f332a..dc8b5b82f9 100644 --- a/test/utils.py +++ b/test/utils.py @@ -328,7 +328,7 @@ def wait_for_vars(name, port, var=None): break timeout = wait_step('waiting for /debug/vars of %s' % name, timeout) -def poll_for_vars(name, port, condition_msg, timeout=60.0, condition_fn=None): +def poll_for_vars(name, port, condition_msg, timeout=60.0, condition_fn=None, require_vars=False): """Polls for debug variables to exist, or match specific conditions, within a timeout. This function polls in a tight loop, with no sleeps. This is useful for @@ -343,9 +343,14 @@ def poll_for_vars(name, port, condition_msg, timeout=60.0, condition_fn=None): timeout - number of seconds that we should attempt to poll for. condition_fn - a function that takes the debug vars dict as input, and returns a truthy value if it matches the success conditions. + require_vars - True iff we expect the vars to always exist. If True, and the + vars don't exist, we'll raise a TestError. This can be used to differentiate + between a timeout waiting for a particular condition vs if the process that + you're polling has already exited. Raises: TestError, if the conditions aren't met within the given timeout + TestError, if vars are required and don't exist Returns: dict of debug variables @@ -356,6 +361,8 @@ def poll_for_vars(name, port, condition_msg, timeout=60.0, condition_fn=None): raise TestError('Timed out polling for vars from %s; condition "%s" not met' % (name, condition_msg)) _vars = get_vars(port) if _vars is None: + if require_vars: + raise TestError('Expected vars to exist on %s, but they do not; process probably exited earlier than expected.' % (name,)) continue if condition_fn is None: return _vars @@ -396,7 +403,7 @@ def wait_for_replication_pos(tablet_a, tablet_b, timeout=60.0): timeout = wait_step( "%s's replication position to catch up %s's; currently at: %s, waiting to catch up to: %s" % ( tablet_b.tablet_alias, tablet_a.tablet_alias, replication_pos_b, replication_pos_a), - timeout + timeout, sleep_time=0.1 ) # vtgate helpers, assuming it always restarts on the same port diff --git a/test/worker.py b/test/worker.py index 3f5beb9d97..ba63dcbd9e 100755 --- a/test/worker.py +++ b/test/worker.py @@ -192,8 +192,8 @@ class TestBaseSplitCloneResiliency(unittest.TestCase): 'test_keyspace'], auto_log=True) - def _insert_value(self, tablet, id, msg, keyspace_id): - """Inserts a value in the MySQL database along with the required routing comments. + def _insert_values(self, tablet, id_offset, msg, keyspace_id, num_values): + """Inserts values in the MySQL database along with the required routing comments. Args: tablet - the Tablet instance to insert into @@ -202,9 +202,14 @@ class TestBaseSplitCloneResiliency(unittest.TestCase): keyspace_id - the value of `keyspace_id` column """ k = "%u" % keyspace_id + values_str = '' + for i in xrange(num_values): + if i != 0: + values_str += ',' + values_str += '(%u, "%s", 0x%x)' % (id_offset + i, msg, keyspace_id) tablet.mquery('vt_test_keyspace', [ 'begin', - 'insert into worker_test(id, msg, keyspace_id) values(%u, "%s", 0x%x) /* EMD keyspace_id:%s user_id:%u */' % (id, msg, keyspace_id, k, id), + 'insert into worker_test(id, msg, keyspace_id) values%s /* EMD keyspace_id:%s*/' % (values_str, k), 'commit' ], write=True) @@ -224,11 +229,12 @@ class TestBaseSplitCloneResiliency(unittest.TestCase): """ shard_width = keyspace_id_range / num_shards shard_offsets = [i * shard_width for i in xrange(num_shards)] - for i in xrange(num_values): - for shard_num in xrange(num_shards): - self._insert_value(tablet, shard_offsets[shard_num] + offset + i, + for shard_num in xrange(num_shards): + self._insert_values(tablet, + shard_offsets[shard_num] + offset, 'msg-shard-%u' % shard_num, - shard_offsets[shard_num] + i) + shard_offsets[shard_num], + num_values) def assert_shard_data_equal(self, shard_num, source_tablet, destination_tablet): """Asserts that a shard's data is identical on source and destination tablets. @@ -369,7 +375,9 @@ class TestBaseSplitCloneResiliency(unittest.TestCase): # for your environment (trial-and-error...) worker_vars = utils.poll_for_vars('vtworker', worker_port, 'WorkerState == cleaning up', - condition_fn=lambda v: v.get('WorkerState') == 'cleaning up') + condition_fn=lambda v: v.get('WorkerState') == 'cleaning up', + # We know that vars should already be ready, since we read them earlier + require_vars=True) # Verify that we were forced to reresolve and retry. self.assertGreater(worker_vars['WorkerDestinationActualResolves'], 1) From b0c2337210fa15efeb5df9718a1f07f96becd31b Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 21 May 2015 19:52:43 -0700 Subject: [PATCH 077/128] Converting one more call to MysqlDaemon. --- go/vt/mysqlctl/replication.go | 2 +- go/vt/tabletmanager/agent_rpc_actions.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index b5cd197f34..0c623718bd 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -299,7 +299,7 @@ func (mysqld *Mysqld) FindSlaves() ([]string, error) { // WaitBlpPosition will wait for the filtered replication to reach at least // the provided position. -func (mysqld *Mysqld) WaitBlpPosition(bp *blproto.BlpPosition, waitTimeout time.Duration) error { +func WaitBlpPosition(mysqld MysqlDaemon, bp *blproto.BlpPosition, waitTimeout time.Duration) error { timeOut := time.Now().Add(waitTimeout) for { if time.Now().After(timeOut) { diff --git a/go/vt/tabletmanager/agent_rpc_actions.go b/go/vt/tabletmanager/agent_rpc_actions.go index 1c4544de9f..076ce331c3 100644 --- a/go/vt/tabletmanager/agent_rpc_actions.go +++ b/go/vt/tabletmanager/agent_rpc_actions.go @@ -355,7 +355,7 @@ func (agent *ActionAgent) GetSlaves(ctx context.Context) ([]string, error) { // reached. // Should be called under RPCWrapLock. func (agent *ActionAgent) WaitBlpPosition(ctx context.Context, blpPosition *blproto.BlpPosition, waitTime time.Duration) error { - return agent.Mysqld.WaitBlpPosition(blpPosition, waitTime) + return mysqlctl.WaitBlpPosition(agent.MysqlDaemon, blpPosition, waitTime) } // StopBlp stops the binlog players, and return their positions. From cc148e81d9aac169bf5aa3b31e37d8e1d9211c1b Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 22 May 2015 09:20:50 -0700 Subject: [PATCH 078/128] Converting FindSlaves to MysqlDaemon. --- go/vt/mysqlctl/replication.go | 2 +- go/vt/tabletmanager/agent_rpc_actions.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index 0c623718bd..a85495a83f 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -277,7 +277,7 @@ const ( ) // FindSlaves gets IP addresses for all currently connected slaves. -func (mysqld *Mysqld) FindSlaves() ([]string, error) { +func FindSlaves(mysqld MysqlDaemon) ([]string, error) { qr, err := mysqld.FetchSuperQuery("SHOW PROCESSLIST") if err != nil { return nil, err diff --git a/go/vt/tabletmanager/agent_rpc_actions.go b/go/vt/tabletmanager/agent_rpc_actions.go index 076ce331c3..c93ab3eef7 100644 --- a/go/vt/tabletmanager/agent_rpc_actions.go +++ b/go/vt/tabletmanager/agent_rpc_actions.go @@ -348,7 +348,7 @@ func (agent *ActionAgent) StartSlave(ctx context.Context) error { // GetSlaves returns the address of all the slaves // Should be called under RPCWrap. func (agent *ActionAgent) GetSlaves(ctx context.Context) ([]string, error) { - return agent.Mysqld.FindSlaves() + return mysqlctl.FindSlaves(agent.MysqlDaemon) } // WaitBlpPosition waits until a specific filtered replication position is From 2fbb54133d66d154c91766375ca0d65b74e62f53 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 22 May 2015 09:33:23 -0700 Subject: [PATCH 079/128] Converting a couple more methods to MysqlDaemon. --- go/vt/mysqlctl/mysql_daemon.go | 28 +++++++++++++++++++++++- go/vt/mysqlctl/permissions.go | 2 +- go/vt/tabletmanager/agent_rpc_actions.go | 6 ++--- 3 files changed, 31 insertions(+), 5 deletions(-) diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index 728dbfe67e..ddf74c16f7 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -55,6 +55,8 @@ type MysqlDaemon interface { // Schema related methods GetSchema(dbName string, tables, excludeTables []string, includeViews bool) (*proto.SchemaDefinition, error) + PreflightSchemaChange(dbName string, change string) (*proto.SchemaChangeResult, error) + ApplySchemaChange(dbName string, change *proto.SchemaChange) (*proto.SchemaChangeResult, error) // GetAppConnection returns a app connection to be able to talk to the database. GetAppConnection() (dbconnpool.PoolConnection, error) @@ -137,10 +139,18 @@ type FakeMysqlDaemon struct { // PromoteSlaveResult is returned by PromoteSlave PromoteSlaveResult proto.ReplicationPosition - // Schema that will be returned by GetSchema. If nil we'll + // Schema will be returned by GetSchema. If nil we'll // return an error. Schema *proto.SchemaDefinition + // PreflightSchemaChangeResult will be returned by PreflightSchemaChange. + // If nil we'll return an error. + PreflightSchemaChangeResult *proto.SchemaChangeResult + + // ApplySchemaChangeResult will be returned by ApplySchemaChange. + // If nil we'll return an error. + ApplySchemaChangeResult *proto.SchemaChangeResult + // DbaConnectionFactory is the factory for making fake dba connections DbaConnectionFactory func() (dbconnpool.PoolConnection, error) @@ -343,6 +353,22 @@ func (fmd *FakeMysqlDaemon) GetSchema(dbName string, tables, excludeTables []str return fmd.Schema.FilterTables(tables, excludeTables, includeViews) } +// PreflightSchemaChange is part of the MysqlDaemon interface +func (fmd *FakeMysqlDaemon) PreflightSchemaChange(dbName string, change string) (*proto.SchemaChangeResult, error) { + if fmd.PreflightSchemaChangeResult == nil { + return nil, fmt.Errorf("no preflight result defined") + } + return fmd.PreflightSchemaChangeResult, nil +} + +// ApplySchemaChange is part of the MysqlDaemon interface +func (fmd *FakeMysqlDaemon) ApplySchemaChange(dbName string, change *proto.SchemaChange) (*proto.SchemaChangeResult, error) { + if fmd.ApplySchemaChangeResult == nil { + return nil, fmt.Errorf("no apply schema defined") + } + return fmd.ApplySchemaChangeResult, nil +} + // GetAppConnection is part of the MysqlDaemon interface func (fmd *FakeMysqlDaemon) GetAppConnection() (dbconnpool.PoolConnection, error) { if fmd.DbAppConnectionFactory == nil { diff --git a/go/vt/mysqlctl/permissions.go b/go/vt/mysqlctl/permissions.go index 1467b361d2..47123fdfab 100644 --- a/go/vt/mysqlctl/permissions.go +++ b/go/vt/mysqlctl/permissions.go @@ -9,7 +9,7 @@ import ( ) // GetPermissions lists the permissions on the mysqld -func (mysqld *Mysqld) GetPermissions() (*proto.Permissions, error) { +func GetPermissions(mysqld MysqlDaemon) (*proto.Permissions, error) { permissions := &proto.Permissions{} // get Users diff --git a/go/vt/tabletmanager/agent_rpc_actions.go b/go/vt/tabletmanager/agent_rpc_actions.go index c93ab3eef7..3d2dd688bd 100644 --- a/go/vt/tabletmanager/agent_rpc_actions.go +++ b/go/vt/tabletmanager/agent_rpc_actions.go @@ -145,7 +145,7 @@ func (agent *ActionAgent) GetSchema(ctx context.Context, tables, excludeTables [ // GetPermissions returns the db permissions. // Should be called under RPCWrap. func (agent *ActionAgent) GetPermissions(ctx context.Context) (*myproto.Permissions, error) { - return agent.Mysqld.GetPermissions() + return mysqlctl.GetPermissions(agent.MysqlDaemon) } // SetReadOnly makes the mysql instance read-only or read-write @@ -231,7 +231,7 @@ func (agent *ActionAgent) PreflightSchema(ctx context.Context, change string) (* tablet := agent.Tablet() // and preflight the change - return agent.Mysqld.PreflightSchemaChange(tablet.DbName(), change) + return agent.MysqlDaemon.PreflightSchemaChange(tablet.DbName(), change) } // ApplySchema will apply a schema change @@ -241,7 +241,7 @@ func (agent *ActionAgent) ApplySchema(ctx context.Context, change *myproto.Schem tablet := agent.Tablet() // apply the change - scr, err := agent.Mysqld.ApplySchemaChange(tablet.DbName(), change) + scr, err := agent.MysqlDaemon.ApplySchemaChange(tablet.DbName(), change) if err != nil { return nil, err } From bf3777c6b0b08fd85e8eec32b9230f81f67da6fa Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 22 May 2015 09:52:41 -0700 Subject: [PATCH 080/128] Now converting binlog player to MysqlDaemon. --- go/vt/binlog/binlog_streamer.go | 6 +++--- go/vt/binlog/event_streamer.go | 2 +- go/vt/binlog/updatestreamctl.go | 6 +++--- go/vt/mysqlctl/mysql_daemon.go | 8 ++++++++ go/vt/mysqlctl/slave_connection.go | 2 +- go/vt/tabletmanager/after_action.go | 2 +- 6 files changed, 17 insertions(+), 9 deletions(-) diff --git a/go/vt/binlog/binlog_streamer.go b/go/vt/binlog/binlog_streamer.go index d3b89d52c6..aaee06a08a 100644 --- a/go/vt/binlog/binlog_streamer.go +++ b/go/vt/binlog/binlog_streamer.go @@ -64,7 +64,7 @@ func getStatementCategory(sql []byte) int { type BinlogStreamer struct { // dbname and mysqld are set at creation. dbname string - mysqld *mysqlctl.Mysqld + mysqld mysqlctl.MysqlDaemon clientCharset *mproto.Charset startPos myproto.ReplicationPosition sendTransaction sendTransactionFunc @@ -79,7 +79,7 @@ type BinlogStreamer struct { // charset is the default character set on the BinlogPlayer side. // startPos is the position to start streaming at. // sendTransaction is called each time a transaction is committed or rolled back. -func NewBinlogStreamer(dbname string, mysqld *mysqlctl.Mysqld, clientCharset *mproto.Charset, startPos myproto.ReplicationPosition, sendTransaction sendTransactionFunc) *BinlogStreamer { +func NewBinlogStreamer(dbname string, mysqld mysqlctl.MysqlDaemon, clientCharset *mproto.Charset, startPos myproto.ReplicationPosition, sendTransaction sendTransactionFunc) *BinlogStreamer { return &BinlogStreamer{ dbname: dbname, mysqld: mysqld, @@ -99,7 +99,7 @@ func (bls *BinlogStreamer) Stream(ctx *sync2.ServiceContext) (err error) { log.Infof("stream ended @ %v, err = %v", stopPos, err) }() - if bls.conn, err = mysqlctl.NewSlaveConnection(bls.mysqld); err != nil { + if bls.conn, err = bls.mysqld.NewSlaveConnection(); err != nil { return err } defer bls.conn.Close() diff --git a/go/vt/binlog/event_streamer.go b/go/vt/binlog/event_streamer.go index 1c681cf1ad..c3b4c800be 100644 --- a/go/vt/binlog/event_streamer.go +++ b/go/vt/binlog/event_streamer.go @@ -39,7 +39,7 @@ type EventStreamer struct { sendEvent sendEventFunc } -func NewEventStreamer(dbname string, mysqld *mysqlctl.Mysqld, startPos myproto.ReplicationPosition, sendEvent sendEventFunc) *EventStreamer { +func NewEventStreamer(dbname string, mysqld mysqlctl.MysqlDaemon, startPos myproto.ReplicationPosition, sendEvent sendEventFunc) *EventStreamer { evs := &EventStreamer{ sendEvent: sendEvent, } diff --git a/go/vt/binlog/updatestreamctl.go b/go/vt/binlog/updatestreamctl.go index d3894304f9..b84621f622 100644 --- a/go/vt/binlog/updatestreamctl.go +++ b/go/vt/binlog/updatestreamctl.go @@ -45,7 +45,7 @@ type UpdateStream struct { actionLock sync.Mutex state sync2.AtomicInt64 - mysqld *mysqlctl.Mysqld + mysqld mysqlctl.MysqlDaemon stateWaitGroup sync.WaitGroup dbname string streams streamList @@ -121,7 +121,7 @@ func logError() { } // EnableUpdateStreamService enables the RPC service for UpdateStream -func EnableUpdateStreamService(dbname string, mysqld *mysqlctl.Mysqld) { +func EnableUpdateStreamService(dbname string, mysqld mysqlctl.MysqlDaemon) { defer logError() UpdateStreamRpcService.enable(dbname, mysqld) } @@ -148,7 +148,7 @@ func GetReplicationPosition() (myproto.ReplicationPosition, error) { return UpdateStreamRpcService.getReplicationPosition() } -func (updateStream *UpdateStream) enable(dbname string, mysqld *mysqlctl.Mysqld) { +func (updateStream *UpdateStream) enable(dbname string, mysqld mysqlctl.MysqlDaemon) { updateStream.actionLock.Lock() defer updateStream.actionLock.Unlock() if updateStream.isEnabled() { diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index ddf74c16f7..31720ebe71 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -69,6 +69,9 @@ type MysqlDaemon interface { // FetchSuperQuery executes one query, returns the result FetchSuperQuery(query string) (*mproto.QueryResult, error) + // NewSlaveConnection returns a SlaveConnection to the database. + NewSlaveConnection() (*SlaveConnection, error) + // Close will close this instance of Mysqld. It will wait for all dba // queries to be finished. Close() @@ -332,6 +335,11 @@ func (fmd *FakeMysqlDaemon) FetchSuperQuery(query string) (*mproto.QueryResult, return qr, nil } +// NewSlaveConnection is part of the MysqlDaemon interface +func (fmd *FakeMysqlDaemon) NewSlaveConnection() (*SlaveConnection, error) { + panic(fmt.Errorf("not implemented on FakeMysqlDaemon")) +} + // Close is part of the MysqlDaemon interface func (fmd *FakeMysqlDaemon) Close() { } diff --git a/go/vt/mysqlctl/slave_connection.go b/go/vt/mysqlctl/slave_connection.go index 48c2b8cbad..666dc9df23 100644 --- a/go/vt/mysqlctl/slave_connection.go +++ b/go/vt/mysqlctl/slave_connection.go @@ -37,7 +37,7 @@ type SlaveConnection struct { // 1) No other processes are making fake slave connections to our mysqld. // 2) No real slave servers will have IDs in the range 1-N where N is the peak // number of concurrent fake slave connections we will ever make. -func NewSlaveConnection(mysqld *Mysqld) (*SlaveConnection, error) { +func (mysqld *Mysqld) NewSlaveConnection() (*SlaveConnection, error) { params, err := dbconfigs.MysqlParams(mysqld.dba) if err != nil { return nil, err diff --git a/go/vt/tabletmanager/after_action.go b/go/vt/tabletmanager/after_action.go index 78d0662c1c..5729087930 100644 --- a/go/vt/tabletmanager/after_action.go +++ b/go/vt/tabletmanager/after_action.go @@ -212,7 +212,7 @@ func (agent *ActionAgent) changeCallback(ctx context.Context, oldTablet, newTabl // update stream needs to be started or stopped too if agent.DBConfigs != nil { if topo.IsRunningUpdateStream(newTablet.Type) { - binlog.EnableUpdateStreamService(agent.DBConfigs.App.DbName, agent.Mysqld) + binlog.EnableUpdateStreamService(agent.DBConfigs.App.DbName, agent.MysqlDaemon) } else { binlog.DisableUpdateStreamService() } From 7307085cd984d29432f646007efdba4e525a9003 Mon Sep 17 00:00:00 2001 From: Ammar Aijazi Date: Fri, 22 May 2015 10:10:03 -0700 Subject: [PATCH 081/128] Add checks in worker test to make sure that there's no extra data on destination shards --- test/worker.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/test/worker.py b/test/worker.py index ba63dcbd9e..027ffb08cf 100755 --- a/test/worker.py +++ b/test/worker.py @@ -246,10 +246,16 @@ class TestBaseSplitCloneResiliency(unittest.TestCase): """ select_query = 'select * from worker_test where msg="msg-shard-%s" order by id asc' % shard_num + # Make sure all the right rows made it from the source to the destination source_rows = source_tablet.mquery('vt_test_keyspace', select_query) destination_rows = destination_tablet.mquery('vt_test_keyspace', select_query) self.assertEqual(source_rows, destination_rows) + # Make sure that there are no extra rows on the destination + count_query = 'select count(*) from worker_test' + destination_count = destination_tablet.mquery('vt_test_keyspace', count_query)[0][0] + self.assertEqual(destination_count, len(destination_rows)) + def run_split_diff(self, keyspace_shard, source_tablets, destination_tablets): """Runs a vtworker SplitDiff on the given keyspace/shard, and then sets all former rdonly slaves back to rdonly. From ce8aed335eccf0def06dc57fcc742541e00e9b0a Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 22 May 2015 11:09:36 -0700 Subject: [PATCH 082/128] tabletserver now uses MysqlDaemon too. --- go/vt/tabletmanager/after_action.go | 2 +- go/vt/tabletserver/query_engine.go | 2 +- go/vt/tabletserver/queryctl.go | 6 +++--- go/vt/tabletserver/rowcache_invalidator.go | 4 ++-- go/vt/tabletserver/sqlquery.go | 2 +- go/vt/tabletserver/testutils_test.go | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go/vt/tabletmanager/after_action.go b/go/vt/tabletmanager/after_action.go index 5729087930..6a99bc43ea 100644 --- a/go/vt/tabletmanager/after_action.go +++ b/go/vt/tabletmanager/after_action.go @@ -67,7 +67,7 @@ func (agent *ActionAgent) allowQueries(tablet *topo.Tablet, blacklistedTables [] return err } - return agent.QueryServiceControl.AllowQueries(agent.DBConfigs, agent.SchemaOverrides, agent.Mysqld) + return agent.QueryServiceControl.AllowQueries(agent.DBConfigs, agent.SchemaOverrides, agent.MysqlDaemon) } // loadKeyspaceAndBlacklistRules does what the name suggests: diff --git a/go/vt/tabletserver/query_engine.go b/go/vt/tabletserver/query_engine.go index 7dc5ebb375..f95aaf06e3 100644 --- a/go/vt/tabletserver/query_engine.go +++ b/go/vt/tabletserver/query_engine.go @@ -186,7 +186,7 @@ func NewQueryEngine(config Config) *QueryEngine { } // Open must be called before sending requests to QueryEngine. -func (qe *QueryEngine) Open(dbconfigs *dbconfigs.DBConfigs, schemaOverrides []SchemaOverride, mysqld *mysqlctl.Mysqld) { +func (qe *QueryEngine) Open(dbconfigs *dbconfigs.DBConfigs, schemaOverrides []SchemaOverride, mysqld mysqlctl.MysqlDaemon) { qe.dbconfigs = dbconfigs appParams := dbconfigs.App.ConnParams // Create dba params based on App connection params diff --git a/go/vt/tabletserver/queryctl.go b/go/vt/tabletserver/queryctl.go index 82c18c9337..253bdcb433 100644 --- a/go/vt/tabletserver/queryctl.go +++ b/go/vt/tabletserver/queryctl.go @@ -168,7 +168,7 @@ type QueryServiceControl interface { AddStatusPart() // AllowQueries enables queries. - AllowQueries(*dbconfigs.DBConfigs, []SchemaOverride, *mysqlctl.Mysqld) error + AllowQueries(*dbconfigs.DBConfigs, []SchemaOverride, mysqlctl.MysqlDaemon) error // DisallowQueries shuts down the query service. DisallowQueries() @@ -225,7 +225,7 @@ func (tqsc *TestQueryServiceControl) AddStatusPart() { } // AllowQueries is part of the QueryServiceControl interface -func (tqsc *TestQueryServiceControl) AllowQueries(*dbconfigs.DBConfigs, []SchemaOverride, *mysqlctl.Mysqld) error { +func (tqsc *TestQueryServiceControl) AllowQueries(*dbconfigs.DBConfigs, []SchemaOverride, mysqlctl.MysqlDaemon) error { tqsc.QueryServiceEnabled = tqsc.AllowQueriesError == nil return tqsc.AllowQueriesError } @@ -296,7 +296,7 @@ func (rqsc *realQueryServiceControl) Register() { } // AllowQueries starts the query service. -func (rqsc *realQueryServiceControl) AllowQueries(dbconfigs *dbconfigs.DBConfigs, schemaOverrides []SchemaOverride, mysqld *mysqlctl.Mysqld) error { +func (rqsc *realQueryServiceControl) AllowQueries(dbconfigs *dbconfigs.DBConfigs, schemaOverrides []SchemaOverride, mysqld mysqlctl.MysqlDaemon) error { return rqsc.sqlQueryRPCService.allowQueries(dbconfigs, schemaOverrides, mysqld) } diff --git a/go/vt/tabletserver/rowcache_invalidator.go b/go/vt/tabletserver/rowcache_invalidator.go index 545d822886..ce2cbbb3b4 100644 --- a/go/vt/tabletserver/rowcache_invalidator.go +++ b/go/vt/tabletserver/rowcache_invalidator.go @@ -29,7 +29,7 @@ import ( type RowcacheInvalidator struct { qe *QueryEngine dbname string - mysqld *mysqlctl.Mysqld + mysqld mysqlctl.MysqlDaemon svm sync2.ServiceManager @@ -79,7 +79,7 @@ func NewRowcacheInvalidator(statsPrefix string, qe *QueryEngine, enablePublishSt } // Open runs the invalidation loop. -func (rci *RowcacheInvalidator) Open(dbname string, mysqld *mysqlctl.Mysqld) { +func (rci *RowcacheInvalidator) Open(dbname string, mysqld mysqlctl.MysqlDaemon) { rp, err := mysqld.MasterPosition() if err != nil { panic(NewTabletError(ErrFatal, "Rowcache invalidator aborting: cannot determine replication position: %v", err)) diff --git a/go/vt/tabletserver/sqlquery.go b/go/vt/tabletserver/sqlquery.go index 1980afde42..720e8c0b92 100644 --- a/go/vt/tabletserver/sqlquery.go +++ b/go/vt/tabletserver/sqlquery.go @@ -126,7 +126,7 @@ func (sq *SqlQuery) setState(state int64) { // If waitForMysql is set to true, allowQueries will not return // until it's able to connect to mysql. // No other operations are allowed when allowQueries is running. -func (sq *SqlQuery) allowQueries(dbconfigs *dbconfigs.DBConfigs, schemaOverrides []SchemaOverride, mysqld *mysqlctl.Mysqld) (err error) { +func (sq *SqlQuery) allowQueries(dbconfigs *dbconfigs.DBConfigs, schemaOverrides []SchemaOverride, mysqld mysqlctl.MysqlDaemon) (err error) { sq.mu.Lock() if sq.state == StateServing { sq.mu.Unlock() diff --git a/go/vt/tabletserver/testutils_test.go b/go/vt/tabletserver/testutils_test.go index bc423015e9..effc8b5c04 100644 --- a/go/vt/tabletserver/testutils_test.go +++ b/go/vt/tabletserver/testutils_test.go @@ -90,7 +90,7 @@ func (util *testUtils) getTabletErrorString(tabletErrorType int) string { return "" } -func (util *testUtils) newMysqld(dbconfigs *dbconfigs.DBConfigs) *mysqlctl.Mysqld { +func (util *testUtils) newMysqld(dbconfigs *dbconfigs.DBConfigs) mysqlctl.MysqlDaemon { return mysqlctl.NewMysqld( "", "", From 0979197a8cc417e9cae4014b968f1c056a455858 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 22 May 2015 11:12:33 -0700 Subject: [PATCH 083/128] Removing *Mysqld from Agent now. --- go/vt/tabletmanager/agent.go | 5 +---- go/vt/tabletmanager/init_tablet_test.go | 1 - 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/go/vt/tabletmanager/agent.go b/go/vt/tabletmanager/agent.go index 9b1fc0d57f..d928b844b0 100644 --- a/go/vt/tabletmanager/agent.go +++ b/go/vt/tabletmanager/agent.go @@ -60,7 +60,6 @@ type ActionAgent struct { HealthReporter health.Reporter TopoServer topo.Server TabletAlias topo.TabletAlias - Mysqld *mysqlctl.Mysqld MysqlDaemon mysqlctl.MysqlDaemon DBConfigs *dbconfigs.DBConfigs SchemaOverrides []tabletserver.SchemaOverride @@ -126,7 +125,7 @@ func loadSchemaOverrides(overridesFile string) []tabletserver.SchemaOverride { // it spawns. func NewActionAgent( batchCtx context.Context, - mysqld *mysqlctl.Mysqld, + mysqld mysqlctl.MysqlDaemon, queryServiceControl tabletserver.QueryServiceControl, tabletAlias topo.TabletAlias, dbcfgs *dbconfigs.DBConfigs, @@ -145,7 +144,6 @@ func NewActionAgent( batchCtx: batchCtx, TopoServer: topoServer, TabletAlias: tabletAlias, - Mysqld: mysqld, MysqlDaemon: mysqld, DBConfigs: dbcfgs, SchemaOverrides: schemaOverrides, @@ -221,7 +219,6 @@ func NewTestActionAgent(batchCtx context.Context, ts topo.Server, tabletAlias to batchCtx: batchCtx, TopoServer: ts, TabletAlias: tabletAlias, - Mysqld: nil, MysqlDaemon: mysqlDaemon, DBConfigs: nil, SchemaOverrides: nil, diff --git a/go/vt/tabletmanager/init_tablet_test.go b/go/vt/tabletmanager/init_tablet_test.go index 402b712360..9f307259c1 100644 --- a/go/vt/tabletmanager/init_tablet_test.go +++ b/go/vt/tabletmanager/init_tablet_test.go @@ -35,7 +35,6 @@ func TestInitTablet(t *testing.T) { agent := &ActionAgent{ TopoServer: ts, TabletAlias: tabletAlias, - Mysqld: nil, MysqlDaemon: mysqlDaemon, DBConfigs: nil, SchemaOverrides: nil, From 5b039af896d3a43c4c0594f1a045900ebb6f82b3 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 22 May 2015 12:55:06 -0700 Subject: [PATCH 084/128] Revert "Removing now empty {Enable,Disable}BinlogServer methods." This reverts commit 80678fa46b8b3b000f6ef916e23c75429d1cb49f. --- go/vt/mysqlctl/mysql_flavor.go | 10 ++++++++++ go/vt/mysqlctl/mysql_flavor_mariadb.go | 10 ++++++++++ go/vt/mysqlctl/mysql_flavor_mysql56.go | 10 ++++++++++ go/vt/mysqlctl/mysql_flavor_test.go | 2 ++ go/vt/mysqlctl/replication.go | 20 ++++++++++++++++++++ go/vt/tabletmanager/binlog.go | 22 ++++++++++++++++++++++ 6 files changed, 74 insertions(+) diff --git a/go/vt/mysqlctl/mysql_flavor.go b/go/vt/mysqlctl/mysql_flavor.go index 080aba07d5..5ae79d67f8 100644 --- a/go/vt/mysqlctl/mysql_flavor.go +++ b/go/vt/mysqlctl/mysql_flavor.go @@ -73,6 +73,16 @@ type MysqlFlavor interface { // WaitMasterPos waits until slave replication reaches at // least targetPos. WaitMasterPos(mysqld *Mysqld, targetPos proto.ReplicationPosition, waitTimeout time.Duration) error + + // EnableBinlogPlayback prepares the server to play back + // events from a binlog stream. Whatever it does for a given + // flavor, it must be idempotent. + EnableBinlogPlayback(mysqld *Mysqld) error + + // DisableBinlogPlayback returns the server to the normal + // state after playback is done. Whatever it does for a given + // flavor, it must be idempotent. + DisableBinlogPlayback(mysqld *Mysqld) error } var mysqlFlavors = make(map[string]MysqlFlavor) diff --git a/go/vt/mysqlctl/mysql_flavor_mariadb.go b/go/vt/mysqlctl/mysql_flavor_mariadb.go index 83cbeac93d..3687b91bab 100644 --- a/go/vt/mysqlctl/mysql_flavor_mariadb.go +++ b/go/vt/mysqlctl/mysql_flavor_mariadb.go @@ -177,6 +177,16 @@ func (*mariaDB10) MakeBinlogEvent(buf []byte) blproto.BinlogEvent { return NewMariadbBinlogEvent(buf) } +// EnableBinlogPlayback implements MysqlFlavor.EnableBinlogPlayback(). +func (*mariaDB10) EnableBinlogPlayback(mysqld *Mysqld) error { + return nil +} + +// DisableBinlogPlayback implements MysqlFlavor.DisableBinlogPlayback(). +func (*mariaDB10) DisableBinlogPlayback(mysqld *Mysqld) error { + return nil +} + // mariadbBinlogEvent wraps a raw packet buffer and provides methods to examine // it by implementing blproto.BinlogEvent. Some methods are pulled in from // binlogEvent. diff --git a/go/vt/mysqlctl/mysql_flavor_mysql56.go b/go/vt/mysqlctl/mysql_flavor_mysql56.go index 6bb1c03265..d75eb6bc7b 100644 --- a/go/vt/mysqlctl/mysql_flavor_mysql56.go +++ b/go/vt/mysqlctl/mysql_flavor_mysql56.go @@ -158,6 +158,16 @@ func (*mysql56) MakeBinlogEvent(buf []byte) blproto.BinlogEvent { return NewMysql56BinlogEvent(buf) } +// EnableBinlogPlayback implements MysqlFlavor.EnableBinlogPlayback(). +func (*mysql56) EnableBinlogPlayback(mysqld *Mysqld) error { + return nil +} + +// DisableBinlogPlayback implements MysqlFlavor.DisableBinlogPlayback(). +func (*mysql56) DisableBinlogPlayback(mysqld *Mysqld) error { + return nil +} + // mysql56BinlogEvent wraps a raw packet buffer and provides methods to examine // it by implementing blproto.BinlogEvent. Some methods are pulled in from // binlogEvent. diff --git a/go/vt/mysqlctl/mysql_flavor_test.go b/go/vt/mysqlctl/mysql_flavor_test.go index 7b287dfe92..ea74350e54 100644 --- a/go/vt/mysqlctl/mysql_flavor_test.go +++ b/go/vt/mysqlctl/mysql_flavor_test.go @@ -42,6 +42,8 @@ func (fakeMysqlFlavor) StartReplicationCommands(params *sqldb.ConnParams, status func (fakeMysqlFlavor) SetMasterCommands(params *sqldb.ConnParams, masterHost string, masterPort int, masterConnectRetry int) ([]string, error) { return nil, nil } +func (fakeMysqlFlavor) EnableBinlogPlayback(mysqld *Mysqld) error { return nil } +func (fakeMysqlFlavor) DisableBinlogPlayback(mysqld *Mysqld) error { return nil } func TestMysqlFlavorEnvironmentVariable(t *testing.T) { os.Setenv("MYSQL_FLAVOR", "fake flavor") diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index a85495a83f..a261aa888b 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -331,3 +331,23 @@ func WaitBlpPosition(mysqld MysqlDaemon, bp *blproto.BlpPosition, waitTimeout ti return fmt.Errorf("WaitBlpPosition(%v) timed out", bp.Uid) } + +// EnableBinlogPlayback prepares the server to play back events from a binlog stream. +// Whatever it does for a given flavor, it must be idempotent. +func (mysqld *Mysqld) EnableBinlogPlayback() error { + flavor, err := mysqld.flavor() + if err != nil { + return fmt.Errorf("EnableBinlogPlayback needs flavor: %v", err) + } + return flavor.EnableBinlogPlayback(mysqld) +} + +// DisableBinlogPlayback returns the server to the normal state after streaming. +// Whatever it does for a given flavor, it must be idempotent. +func (mysqld *Mysqld) DisableBinlogPlayback() error { + flavor, err := mysqld.flavor() + if err != nil { + return fmt.Errorf("DisableBinlogPlayback needs flavor: %v", err) + } + return flavor.DisableBinlogPlayback(mysqld) +} diff --git a/go/vt/tabletmanager/binlog.go b/go/vt/tabletmanager/binlog.go index c81bf1c674..7219131146 100644 --- a/go/vt/tabletmanager/binlog.go +++ b/go/vt/tabletmanager/binlog.go @@ -210,6 +210,13 @@ func (bpc *BinlogPlayerController) Iteration() (err error) { } }() + // Apply any special settings necessary for playback of binlogs. + // We do it on every iteration to be sure, in case MySQL was restarted. + if err := bpc.mysqld.EnableBinlogPlayback(); err != nil { + // We failed to apply the required settings, so we shouldn't keep going. + return err + } + // create the db connection, connect it vtClient := binlogplayer.NewDbClient(bpc.dbConfig) if err := vtClient.Connect(); err != nil { @@ -381,14 +388,21 @@ func (blm *BinlogPlayerMap) addPlayer(cell string, keyspaceIdType key.KeyspaceId // StopAllPlayersAndReset stops all the binlog players, and reset the map of players. func (blm *BinlogPlayerMap) StopAllPlayersAndReset() { + hadPlayers := false blm.mu.Lock() for _, bpc := range blm.players { if blm.state == BpmStateRunning { bpc.Stop() } + hadPlayers = true } blm.players = make(map[uint32]*BinlogPlayerController) blm.mu.Unlock() + + if hadPlayers { + // We're done streaming, so turn off special playback settings. + blm.mysqld.DisableBinlogPlayback() + } } // RefreshMap reads the right data from topo.Server and makes sure @@ -412,8 +426,10 @@ func (blm *BinlogPlayerMap) RefreshMap(tablet *topo.Tablet, keyspaceInfo *topo.K // get the existing sources and build a map of sources to remove toRemove := make(map[uint32]bool) + hadPlayers := false for source := range blm.players { toRemove[source] = true + hadPlayers = true } // for each source, add it if not there, and delete from toRemove @@ -421,6 +437,7 @@ func (blm *BinlogPlayerMap) RefreshMap(tablet *topo.Tablet, keyspaceInfo *topo.K blm.addPlayer(tablet.Alias.Cell, keyspaceInfo.ShardingColumnType, tablet.KeyRange, sourceShard, tablet.DbName()) delete(toRemove, sourceShard.Uid) } + hasPlayers := len(shardInfo.SourceShards) > 0 // remove all entries from toRemove for source := range toRemove { @@ -429,6 +446,11 @@ func (blm *BinlogPlayerMap) RefreshMap(tablet *topo.Tablet, keyspaceInfo *topo.K } blm.mu.Unlock() + + if hadPlayers && !hasPlayers { + // We're done streaming, so turn off special playback settings. + blm.mysqld.DisableBinlogPlayback() + } } // Stop stops the current players, but does not remove them from the map. From f49b4877eb0aba4a23b06d848f94399238294e74 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 22 May 2015 13:04:40 -0700 Subject: [PATCH 085/128] Adding {Enable,Disable}BinlogPlayer to MysqlDaemon. --- go/vt/mysqlctl/mysql_daemon.go | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index 31720ebe71..de32676dd4 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -72,6 +72,12 @@ type MysqlDaemon interface { // NewSlaveConnection returns a SlaveConnection to the database. NewSlaveConnection() (*SlaveConnection, error) + // EnableBinlogPlayback enables playback of binlog events + EnableBinlogPlayback() error + + // DisableBinlogPlayback disable playback of binlog events + DisableBinlogPlayback() error + // Close will close this instance of Mysqld. It will wait for all dba // queries to be finished. Close() @@ -174,6 +180,9 @@ type FakeMysqlDaemon struct { // FetchSuperQueryResults is used by FetchSuperQuery FetchSuperQueryMap map[string]*mproto.QueryResult + + // BinlogPlayerEnabled is used by {Enable,Disable}BinlogPlayer + BinlogPlayerEnabled bool } // NewFakeMysqlDaemon returns a FakeMysqlDaemon where mysqld appears @@ -340,6 +349,24 @@ func (fmd *FakeMysqlDaemon) NewSlaveConnection() (*SlaveConnection, error) { panic(fmt.Errorf("not implemented on FakeMysqlDaemon")) } +// EnableBinlogPlayback is part of the MysqlDaemon interface +func (fmd *FakeMysqlDaemon) EnableBinlogPlayback() error { + if fmd.BinlogPlayerEnabled { + return fmt.Errorf("binlog player already enabled") + } + fmd.BinlogPlayerEnabled = true + return nil +} + +// DisableBinlogPlayback disable playback of binlog events +func (fmd *FakeMysqlDaemon) DisableBinlogPlayback() error { + if fmd.BinlogPlayerEnabled { + return fmt.Errorf("binlog player already disabled") + } + fmd.BinlogPlayerEnabled = false + return nil +} + // Close is part of the MysqlDaemon interface func (fmd *FakeMysqlDaemon) Close() { } From cd593d62cc95f87bcfa3e8db2af126decd24bbfc Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Tue, 26 May 2015 07:21:51 -0700 Subject: [PATCH 086/128] Adding doc for OS Backup and Restore. --- doc/BackupAndRestore.md | 54 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 doc/BackupAndRestore.md diff --git a/doc/BackupAndRestore.md b/doc/BackupAndRestore.md new file mode 100644 index 0000000000..f1ce8a4c17 --- /dev/null +++ b/doc/BackupAndRestore.md @@ -0,0 +1,54 @@ +# Backup and Restore + +This document describes Vitess Backup and Restore strategy. + +### Overview + +Backups are used in Vitess for two purposes: to provide a point-in-time backup for the data, and to bootstrap new instances. + +### Backup Storage + +Backups are stored on a Backup Storage service. Vitess core software contains an implementation that uses a local filesystem to store the files. Any network-mounted drive can then be used as the repository for backups. + +We have plans to implement a version of the Backup Storage service for Google Cloud Storage (contact us if you are interested). + +(The interface definition for the Backup Storage service is in [interface.go](https://github.com/youtube/vitess/blob/master/go/vt/mysqlctl/backupstorage/interface.go), see comments there for more details). + +Concretely, the following command line flags are used for Backup Storage: +* -backup\_storage\_implementation: which implementation of the Backup Storage interface to use. +* -file\_backup\_storage\_root: the root of the backups if 'file' is used as a Backup Storage. + +### Taking a Backup + +To take a backup is very straightforward: just run the 'vtctl Backup ' command. The designated tablet will take itself out of the healthy serving tablets, shutdown its mysqld process, copy the necessary files to the Backup Storage, restart mysql, restart replication, and join the cluster back. + +With health-check enabled (the recommended default), the tablet goes back to spare state. Once it catches up on replication, it will go back to a serving state. + +Note for this to work correctly, the tablet must be started with the right parameters to point it to the Backup Storage system (see previous section). + +### Life of a Shard + +To illustrate how backups are used in Vitess to bootstrap new instances, let's go through the creation and life of a Shard: +* A shard is initially brought up with no existing backup. All instances are started as replicas. With health-check enabled (the recommended default), each instance will realize replication is not running, and just stay unhealthy as spare. +* Once a few replicas are up, InitShardMaster is run, one host becomes the master, the others replicas. Master becomes healthy, replicas are not as no database exists. +* Initial schema can then be applied to the Master. Either use the usual Schema change tools, or use CopySchemaShard for shards created as targets for resharding. +* After replicating the schema creation, all replicas become healthy. At this point, we have a working and functionnal shard. +* The initial backup is taken (that stores the data and the current replication position), and backup data is copied to a network storage. +* When a replica comes up (either a new replica, or one whose instance was just restarted), it restores the latest backup, resets its master to the current shard master, and starts replicating. +* A Cronjob to backup the data on a regular basis should then be run. The frequency of the backups should be high enough (compared to MySQL binlog retention), so we can always have a backup to fall back upon. + +Restoring a backup is enabled by the --restore\_from\_backup command line option in vttablet. It can be enabled all the time for all the tablets in a shard, as it doesn't prevent vttablet from starting if no backup can be found. + +### Backup Management + +Two vtctl commands exist to manage the backups: +* 'vtctl ListBackups ' will display the existing backups for a keyspace/shard in the order they were taken (oldest first). +* 'vtctl RemoveBackup ' will remove a backup from Backup Storage. + +### Details + +Both Backup and Restore copy and compress / decompress multiple files simultaneously to increase throughput. The concurrency can be controlled by command-line flags (-concurrency for 'vtctl Backup', and -restore\_concurrency for vttablet). If the network link is fast enough, the concurrency will match the CPU usage of the process during backup / restore. + + + + From 3c560fca01347448868722c18e95bd98a1c83be5 Mon Sep 17 00:00:00 2001 From: Michael Berlin Date: Tue, 26 May 2015 12:21:44 -0700 Subject: [PATCH 087/128] java/vtgate-client: Reenable integration test which was temporary disabled for debugging. --- .../java/com/youtube/vitess/vtgate/integration/FailuresIT.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/FailuresIT.java b/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/FailuresIT.java index aa3acab9a9..a0225c5505 100644 --- a/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/FailuresIT.java +++ b/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/FailuresIT.java @@ -19,7 +19,6 @@ import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; @@ -52,7 +51,6 @@ public class FailuresIT { } @Test - @Ignore("causing other tests to fail") public void testIntegrityException() throws Exception { VtGate vtgate = VtGate.connect("localhost:" + testEnv.port, 0); String insertSql = "insert into vtgate_test(id, keyspace_id) values (:id, :keyspace_id)"; From da11ffeb54f2ce752df052df4222ce266e944073 Mon Sep 17 00:00:00 2001 From: Michael Berlin Date: Tue, 26 May 2015 12:28:01 -0700 Subject: [PATCH 088/128] Import various Java changes. --- .../src/main/java/com/youtube/vitess/vtgate/Exceptions.java | 2 +- .../src/main/java/com/youtube/vitess/vtgate/Row.java | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/Exceptions.java b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/Exceptions.java index 1bf9839391..1bda9fc2bd 100644 --- a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/Exceptions.java +++ b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/Exceptions.java @@ -36,7 +36,7 @@ public class Exceptions { * Exception raised due to fetching a non-existent field or with the wrong type */ @SuppressWarnings("serial") - public static class InvalidFieldException extends Exception { + public static class InvalidFieldException extends RuntimeException { public InvalidFieldException(String message) { super(message); } diff --git a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/Row.java b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/Row.java index 2e6e2600bd..dfece75e91 100644 --- a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/Row.java +++ b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/Row.java @@ -10,13 +10,13 @@ import org.joda.time.DateTime; import java.math.BigDecimal; import java.util.Iterator; -import java.util.LinkedList; +import java.util.List; public class Row implements Iterator, Iterable { private ImmutableMap contents; private Iterator iterator; - public Row(LinkedList cells) { + public Row(List cells) { ImmutableMap.Builder builder = new ImmutableMap.Builder<>(); for (Cell cell : cells) { builder.put(cell.getName(), cell); From 8f395aca43083037a53b58a9431ad51a20c0d4b7 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Tue, 26 May 2015 13:46:45 -0700 Subject: [PATCH 089/128] Adding context to all topo.Server APIs. --- go/cmd/topo2topo/topo2topo.go | 10 +- go/cmd/vtctld/tablet_data.go | 4 +- go/cmd/vtctld/topo_data.go | 35 +-- go/cmd/vtctld/topo_data_test.go | 50 +++-- go/cmd/vtctld/vtctld.go | 37 +-- go/cmd/vtgate/vtgate.go | 4 +- go/cmd/vtworker/command.go | 3 +- go/cmd/vtworker/interactive.go | 4 +- go/cmd/vtworker/split_clone.go | 11 +- go/cmd/vtworker/split_diff.go | 13 +- go/cmd/vtworker/vertical_split_clone.go | 11 +- go/cmd/vtworker/vertical_split_diff.go | 13 +- go/vt/client2/sharded.go | 7 +- go/vt/etcdtopo/explorer_test.go | 16 +- go/vt/etcdtopo/keyspace.go | 13 +- go/vt/etcdtopo/lock.go | 6 +- go/vt/etcdtopo/replication_graph.go | 7 +- go/vt/etcdtopo/server.go | 3 +- go/vt/etcdtopo/server_test.go | 30 ++- go/vt/etcdtopo/serving_graph.go | 25 ++- go/vt/etcdtopo/shard.go | 15 +- go/vt/etcdtopo/tablet.go | 23 +- go/vt/etcdtopo/vschema.go | 5 +- go/vt/schemamanager/schemamanager_test.go | 72 +++--- go/vt/schemamanager/tablet_executor.go | 7 +- go/vt/tabletmanager/actionnode/utils.go | 6 +- go/vt/tabletmanager/after_action.go | 4 +- go/vt/tabletmanager/agent.go | 26 +-- go/vt/tabletmanager/agent_rpc_actions.go | 20 +- go/vt/tabletmanager/binlog.go | 33 +-- go/vt/tabletmanager/healthcheck.go | 2 +- go/vt/tabletmanager/healthcheck_test.go | 48 ++-- go/vt/tabletmanager/init_tablet.go | 4 +- go/vt/tabletmanager/init_tablet_test.go | 21 +- go/vt/tabletmanager/restore.go | 11 +- go/vt/topo/helpers/copy.go | 49 ++-- go/vt/topo/helpers/copy_test.go | 42 ++-- go/vt/topo/helpers/tee.go | 212 +++++++++--------- go/vt/topo/helpers/tee_test.go | 18 +- go/vt/topo/helpers/tee_topo_test.go | 26 ++- go/vt/topo/keyspace.go | 11 +- go/vt/topo/naming.go | 5 +- go/vt/topo/replication.go | 16 +- go/vt/topo/server.go | 76 +++---- go/vt/topo/serving_graph.go | 2 +- go/vt/topo/shard.go | 12 +- go/vt/topo/tablet.go | 24 +- go/vt/topo/test/faketopo/faketopo.go | 72 +++--- go/vt/topo/test/faketopo/fixture.go | 2 +- go/vt/topo/test/keyspace.go | 30 +-- go/vt/topo/test/lock.go | 88 ++++---- go/vt/topo/test/replication.go | 19 +- go/vt/topo/test/serving.go | 54 ++--- go/vt/topo/test/shard.go | 20 +- go/vt/topo/test/tablet.go | 24 +- go/vt/topo/test/testing.go | 5 +- go/vt/topo/test/vschema.go | 16 +- go/vt/topo/wildcards.go | 21 +- go/vt/topo/wildcards_test.go | 14 +- go/vt/topotools/rebuild.go | 12 +- go/vt/topotools/rebuild_test.go | 8 +- go/vt/topotools/shard.go | 10 +- go/vt/topotools/shard_test.go | 2 +- go/vt/topotools/split.go | 5 +- go/vt/topotools/tablet.go | 6 +- go/vt/topotools/topology.go | 10 +- go/vt/topotools/utils.go | 4 +- go/vt/vtctl/plugin_zktopo.go | 2 +- go/vt/vtctl/reparent.go | 2 +- go/vt/vtctl/vtctl.go | 76 +++---- go/vt/vtctl/vtctlclienttest/client.go | 5 +- go/vt/vtgate/sandbox_test.go | 24 +- go/vt/vtgate/srv_topo_server.go | 176 +++++++-------- go/vt/vtgate/srv_topo_server_test.go | 10 +- go/vt/worker/diff_utils.go | 2 +- go/vt/worker/split_clone.go | 10 +- go/vt/worker/split_clone_test.go | 2 +- go/vt/worker/split_diff.go | 14 +- go/vt/worker/split_diff_test.go | 2 +- go/vt/worker/sqldiffer.go | 4 +- go/vt/worker/topo_utils.go | 8 +- go/vt/worker/vertical_split_clone.go | 8 +- go/vt/worker/vertical_split_clone_test.go | 4 +- go/vt/worker/vertical_split_diff.go | 14 +- go/vt/worker/vertical_split_diff_test.go | 2 +- go/vt/wrangler/cleaner.go | 2 +- go/vt/wrangler/hook.go | 2 +- go/vt/wrangler/keyspace.go | 46 ++-- go/vt/wrangler/permissions.go | 8 +- go/vt/wrangler/rebuild.go | 14 +- go/vt/wrangler/reparent.go | 16 +- go/vt/wrangler/schema.go | 26 +-- go/vt/wrangler/shard.go | 26 +-- go/vt/wrangler/split.go | 2 +- go/vt/wrangler/tablet.go | 24 +- go/vt/wrangler/testlib/backup_test.go | 4 +- .../testlib/init_shard_master_test.go | 6 +- .../testlib/reparent_external_test.go | 23 +- go/vt/wrangler/testlib/reparent_utils_test.go | 8 +- go/vt/wrangler/validator.go | 16 +- go/vt/wrangler/version.go | 22 +- go/vt/wrangler/zkns.go | 12 +- go/vt/zktopo/cell.go | 4 +- go/vt/zktopo/keyspace.go | 11 +- go/vt/zktopo/lock.go | 12 +- go/vt/zktopo/replication_graph.go | 7 +- go/vt/zktopo/serving_graph.go | 25 ++- go/vt/zktopo/shard.go | 13 +- go/vt/zktopo/tablet.go | 19 +- go/vt/zktopo/testserver.go | 18 +- go/vt/zktopo/vschema.go | 5 +- go/vt/zktopo/zktopo_test.go | 30 ++- 112 files changed, 1195 insertions(+), 1085 deletions(-) diff --git a/go/cmd/topo2topo/topo2topo.go b/go/cmd/topo2topo/topo2topo.go index d379ba1bc8..c08b45c78f 100644 --- a/go/cmd/topo2topo/topo2topo.go +++ b/go/cmd/topo2topo/topo2topo.go @@ -13,6 +13,7 @@ import ( "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/helpers" + "golang.org/x/net/context" ) var fromTopo = flag.String("from", "", "topology to copy data from") @@ -41,19 +42,20 @@ func main() { exit.Return(1) } + ctx := context.Background() fromTS := topo.GetServerByName(*fromTopo) toTS := topo.GetServerByName(*toTopo) if *doKeyspaces { - helpers.CopyKeyspaces(fromTS, toTS) + helpers.CopyKeyspaces(ctx, fromTS, toTS) } if *doShards { - helpers.CopyShards(fromTS, toTS, *deleteKeyspaceShards) + helpers.CopyShards(ctx, fromTS, toTS, *deleteKeyspaceShards) } if *doShardReplications { - helpers.CopyShardReplications(fromTS, toTS) + helpers.CopyShardReplications(ctx, fromTS, toTS) } if *doTablets { - helpers.CopyTablets(fromTS, toTS) + helpers.CopyTablets(ctx, fromTS, toTS) } } diff --git a/go/cmd/vtctld/tablet_data.go b/go/cmd/vtctld/tablet_data.go index ed789dbf41..e5b4ebdb58 100644 --- a/go/cmd/vtctld/tablet_data.go +++ b/go/cmd/vtctld/tablet_data.go @@ -48,12 +48,12 @@ func newTabletHealth(thc *tabletHealthCache, tabletAlias topo.TabletAlias) (*Tab func (th *TabletHealth) update(thc *tabletHealthCache, tabletAlias topo.TabletAlias) { defer thc.delete(tabletAlias) - ti, err := thc.ts.GetTablet(tabletAlias) + ctx := context.Background() + ti, err := thc.ts.GetTablet(ctx, tabletAlias) if err != nil { return } - ctx := context.Background() c, errFunc, err := thc.tmc.HealthStream(ctx, ti) if err != nil { return diff --git a/go/cmd/vtctld/topo_data.go b/go/cmd/vtctld/topo_data.go index be5220ddb8..969a574a26 100644 --- a/go/cmd/vtctld/topo_data.go +++ b/go/cmd/vtctld/topo_data.go @@ -9,6 +9,7 @@ import ( "time" "github.com/youtube/vitess/go/vt/topo" + "golang.org/x/net/context" ) // This file includes the support for serving topo data to an ajax-based @@ -47,7 +48,7 @@ func (bvo *BaseVersionedObject) SetVersion(version int) { // GetVersionedObjectFunc is the function the cache will call to get // the object itself. -type GetVersionedObjectFunc func() (VersionedObject, error) +type GetVersionedObjectFunc func(ctx context.Context) (VersionedObject, error) // VersionedObjectCache is the main cache object. Just needs a method to get // the content. @@ -68,7 +69,7 @@ func NewVersionedObjectCache(getObject GetVersionedObjectFunc) *VersionedObjectC } // Get returns the versioned value from the cache. -func (voc *VersionedObjectCache) Get() ([]byte, error) { +func (voc *VersionedObjectCache) Get(ctx context.Context) ([]byte, error) { voc.mu.Lock() defer voc.mu.Unlock() @@ -77,7 +78,7 @@ func (voc *VersionedObjectCache) Get() ([]byte, error) { return voc.result, nil } - newObject, err := voc.getObject() + newObject, err := voc.getObject(ctx) if err != nil { return nil, err } @@ -142,7 +143,7 @@ func NewVersionedObjectCacheMap(factory VersionedObjectCacheFactory) *VersionedO } // Get finds the right VersionedObjectCache and returns its value -func (vocm *VersionedObjectCacheMap) Get(key string) ([]byte, error) { +func (vocm *VersionedObjectCacheMap) Get(ctx context.Context, key string) ([]byte, error) { vocm.mu.Lock() voc, ok := vocm.cacheMap[key] if !ok { @@ -151,7 +152,7 @@ func (vocm *VersionedObjectCacheMap) Get(key string) ([]byte, error) { } vocm.mu.Unlock() - return voc.Get() + return voc.Get(ctx) } // Flush will flush the entire cache @@ -177,8 +178,8 @@ func (kc *KnownCells) Reset() { } func newKnownCellsCache(ts topo.Server) *VersionedObjectCache { - return NewVersionedObjectCache(func() (VersionedObject, error) { - cells, err := ts.GetKnownCells() + return NewVersionedObjectCache(func(ctx context.Context) (VersionedObject, error) { + cells, err := ts.GetKnownCells(ctx) if err != nil { return nil, err } @@ -202,8 +203,8 @@ func (k *Keyspaces) Reset() { } func newKeyspacesCache(ts topo.Server) *VersionedObjectCache { - return NewVersionedObjectCache(func() (VersionedObject, error) { - keyspaces, err := ts.GetKeyspaces() + return NewVersionedObjectCache(func(ctx context.Context) (VersionedObject, error) { + keyspaces, err := ts.GetKeyspaces(ctx) if err != nil { return nil, err } @@ -232,8 +233,8 @@ func (k *Keyspace) Reset() { func newKeyspaceCache(ts topo.Server) *VersionedObjectCacheMap { return NewVersionedObjectCacheMap(func(key string) *VersionedObjectCache { - return NewVersionedObjectCache(func() (VersionedObject, error) { - k, err := ts.GetKeyspace(key) + return NewVersionedObjectCache(func(ctx context.Context) (VersionedObject, error) { + k, err := ts.GetKeyspace(ctx, key) if err != nil { return nil, err } @@ -264,8 +265,8 @@ func (s *ShardNames) Reset() { func newShardNamesCache(ts topo.Server) *VersionedObjectCacheMap { return NewVersionedObjectCacheMap(func(key string) *VersionedObjectCache { - return NewVersionedObjectCache(func() (VersionedObject, error) { - sn, err := ts.GetShardNames(key) + return NewVersionedObjectCache(func(ctx context.Context) (VersionedObject, error) { + sn, err := ts.GetShardNames(ctx, key) if err != nil { return nil, err } @@ -301,13 +302,13 @@ func (s *Shard) Reset() { func newShardCache(ts topo.Server) *VersionedObjectCacheMap { return NewVersionedObjectCacheMap(func(key string) *VersionedObjectCache { - return NewVersionedObjectCache(func() (VersionedObject, error) { + return NewVersionedObjectCache(func(ctx context.Context) (VersionedObject, error) { keyspace, shard, err := topo.ParseKeyspaceShardString(key) if err != nil { return nil, err } - s, err := ts.GetShard(keyspace, shard) + s, err := ts.GetShard(ctx, keyspace, shard) if err != nil { return nil, err } @@ -349,12 +350,12 @@ func (cst *CellShardTablets) Reset() { func newCellShardTabletsCache(ts topo.Server) *VersionedObjectCacheMap { return NewVersionedObjectCacheMap(func(key string) *VersionedObjectCache { - return NewVersionedObjectCache(func() (VersionedObject, error) { + return NewVersionedObjectCache(func(ctx context.Context) (VersionedObject, error) { parts := strings.Split(key, "/") if len(parts) != 3 { return nil, fmt.Errorf("Invalid shard tablets path: %v", key) } - sr, err := ts.GetShardReplication(parts[0], parts[1], parts[2]) + sr, err := ts.GetShardReplication(ctx, parts[0], parts[1], parts[2]) if err != nil { return nil, err } diff --git a/go/cmd/vtctld/topo_data_test.go b/go/cmd/vtctld/topo_data_test.go index 8e34c8d4a6..3b5c1dbd7f 100644 --- a/go/cmd/vtctld/topo_data_test.go +++ b/go/cmd/vtctld/topo_data_test.go @@ -8,10 +8,12 @@ import ( "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/zktopo" + "golang.org/x/net/context" ) func testVersionedObjectCache(t *testing.T, voc *VersionedObjectCache, vo VersionedObject, expectedVO VersionedObject) { - result, err := voc.Get() + ctx := context.Background() + result, err := voc.Get(ctx) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -26,7 +28,7 @@ func testVersionedObjectCache(t *testing.T, voc *VersionedObjectCache, vo Versio t.Fatalf("Got bad result: %#v expected: %#v", vo, expectedVO) } - result2, err := voc.Get() + result2, err := voc.Get(ctx) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -36,7 +38,7 @@ func testVersionedObjectCache(t *testing.T, voc *VersionedObjectCache, vo Versio // force a re-get with same content, version shouldn't change voc.timestamp = time.Time{} - result2, err = voc.Get() + result2, err = voc.Get(ctx) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -47,7 +49,7 @@ func testVersionedObjectCache(t *testing.T, voc *VersionedObjectCache, vo Versio // force a reget with different content, version should change voc.timestamp = time.Time{} voc.versionedObject.Reset() // poking inside the object here - result, err = voc.Get() + result, err = voc.Get(ctx) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -64,7 +66,7 @@ func testVersionedObjectCache(t *testing.T, voc *VersionedObjectCache, vo Versio // force a flush and see the version increase again voc.Flush() - result, err = voc.Get() + result, err = voc.Get(ctx) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -81,7 +83,8 @@ func testVersionedObjectCache(t *testing.T, voc *VersionedObjectCache, vo Versio } func testVersionedObjectCacheMap(t *testing.T, vocm *VersionedObjectCacheMap, key string, vo VersionedObject, expectedVO VersionedObject) { - result, err := vocm.Get(key) + ctx := context.Background() + result, err := vocm.Get(ctx, key) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -96,7 +99,7 @@ func testVersionedObjectCacheMap(t *testing.T, vocm *VersionedObjectCacheMap, ke t.Fatalf("Got bad result: %#v expected: %#v", vo, expectedVO) } - result2, err := vocm.Get(key) + result2, err := vocm.Get(ctx, key) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -106,7 +109,7 @@ func testVersionedObjectCacheMap(t *testing.T, vocm *VersionedObjectCacheMap, ke // force a re-get with same content, version shouldn't change vocm.cacheMap[key].timestamp = time.Time{} - result2, err = vocm.Get(key) + result2, err = vocm.Get(ctx, key) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -117,7 +120,7 @@ func testVersionedObjectCacheMap(t *testing.T, vocm *VersionedObjectCacheMap, ke // force a reget with different content, version should change vocm.cacheMap[key].timestamp = time.Time{} vocm.cacheMap[key].versionedObject.Reset() // poking inside the object here - result, err = vocm.Get(key) + result, err = vocm.Get(ctx, key) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -134,7 +137,7 @@ func testVersionedObjectCacheMap(t *testing.T, vocm *VersionedObjectCacheMap, ke // force a flush and see the version increase again vocm.Flush() - result, err = vocm.Get(key) + result, err = vocm.Get(ctx, key) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -162,11 +165,12 @@ func TestKnownCellsCache(t *testing.T) { } func TestKeyspacesCache(t *testing.T) { + ctx := context.Background() ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"}) - if err := ts.CreateKeyspace("ks1", &topo.Keyspace{}); err != nil { + if err := ts.CreateKeyspace(ctx, "ks1", &topo.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace failed: %v", err) } - if err := ts.CreateKeyspace("ks2", &topo.Keyspace{}); err != nil { + if err := ts.CreateKeyspace(ctx, "ks2", &topo.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace failed: %v", err) } kc := newKeyspacesCache(ts) @@ -179,13 +183,14 @@ func TestKeyspacesCache(t *testing.T) { } func TestKeyspaceCache(t *testing.T) { + ctx := context.Background() ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"}) - if err := ts.CreateKeyspace("ks1", &topo.Keyspace{ + if err := ts.CreateKeyspace(ctx, "ks1", &topo.Keyspace{ ShardingColumnName: "sharding_key", }); err != nil { t.Fatalf("CreateKeyspace failed: %v", err) } - if err := ts.CreateKeyspace("ks2", &topo.Keyspace{ + if err := ts.CreateKeyspace(ctx, "ks2", &topo.Keyspace{ SplitShardCount: 10, }); err != nil { t.Fatalf("CreateKeyspace failed: %v", err) @@ -211,18 +216,19 @@ func TestKeyspaceCache(t *testing.T) { } func TestShardNamesCache(t *testing.T) { + ctx := context.Background() ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"}) - if err := ts.CreateKeyspace("ks1", &topo.Keyspace{ + if err := ts.CreateKeyspace(ctx, "ks1", &topo.Keyspace{ ShardingColumnName: "sharding_key", }); err != nil { t.Fatalf("CreateKeyspace failed: %v", err) } - if err := ts.CreateShard("ks1", "s1", &topo.Shard{ + if err := ts.CreateShard(ctx, "ks1", "s1", &topo.Shard{ Cells: []string{"cell1", "cell2"}, }); err != nil { t.Fatalf("CreateShard failed: %v", err) } - if err := ts.CreateShard("ks1", "s2", &topo.Shard{ + if err := ts.CreateShard(ctx, "ks1", "s2", &topo.Shard{ MasterAlias: topo.TabletAlias{ Cell: "cell1", Uid: 12, @@ -241,18 +247,19 @@ func TestShardNamesCache(t *testing.T) { } func TestShardCache(t *testing.T) { + ctx := context.Background() ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"}) - if err := ts.CreateKeyspace("ks1", &topo.Keyspace{ + if err := ts.CreateKeyspace(ctx, "ks1", &topo.Keyspace{ ShardingColumnName: "sharding_key", }); err != nil { t.Fatalf("CreateKeyspace failed: %v", err) } - if err := ts.CreateShard("ks1", "s1", &topo.Shard{ + if err := ts.CreateShard(ctx, "ks1", "s1", &topo.Shard{ Cells: []string{"cell1", "cell2"}, }); err != nil { t.Fatalf("CreateShard failed: %v", err) } - if err := ts.CreateShard("ks1", "s2", &topo.Shard{ + if err := ts.CreateShard(ctx, "ks1", "s2", &topo.Shard{ MasterAlias: topo.TabletAlias{ Cell: "cell1", Uid: 12, @@ -286,8 +293,9 @@ func TestShardCache(t *testing.T) { } func TestCellShardTabletsCache(t *testing.T) { + ctx := context.Background() ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"}) - if err := ts.UpdateShardReplicationFields("cell1", "ks1", "s1", func(sr *topo.ShardReplication) error { + if err := ts.UpdateShardReplicationFields(ctx, "cell1", "ks1", "s1", func(sr *topo.ShardReplication) error { sr.ReplicationLinks = []topo.ReplicationLink{ topo.ReplicationLink{ TabletAlias: topo.TabletAlias{ diff --git a/go/cmd/vtctld/vtctld.go b/go/cmd/vtctld/vtctld.go index 7237cb7207..b57d96ff31 100644 --- a/go/cmd/vtctld/vtctld.go +++ b/go/cmd/vtctld/vtctld.go @@ -114,7 +114,7 @@ func main() { // tablet actions actionRepo.RegisterTabletAction("Ping", "", func(ctx context.Context, wr *wrangler.Wrangler, tabletAlias topo.TabletAlias, r *http.Request) (string, error) { - ti, err := wr.TopoServer().GetTablet(tabletAlias) + ti, err := wr.TopoServer().GetTablet(ctx, tabletAlias) if err != nil { return "", err } @@ -124,7 +124,7 @@ func main() { actionRepo.RegisterTabletAction("ScrapTablet", acl.ADMIN, func(ctx context.Context, wr *wrangler.Wrangler, tabletAlias topo.TabletAlias, r *http.Request) (string, error) { // refuse to scrap tablets that are not spare - ti, err := wr.TopoServer().GetTablet(tabletAlias) + ti, err := wr.TopoServer().GetTablet(ctx, tabletAlias) if err != nil { return "", err } @@ -137,7 +137,7 @@ func main() { actionRepo.RegisterTabletAction("ScrapTabletForce", acl.ADMIN, func(ctx context.Context, wr *wrangler.Wrangler, tabletAlias topo.TabletAlias, r *http.Request) (string, error) { // refuse to scrap tablets that are not spare - ti, err := wr.TopoServer().GetTablet(tabletAlias) + ti, err := wr.TopoServer().GetTablet(ctx, tabletAlias) if err != nil { return "", err } @@ -149,7 +149,7 @@ func main() { actionRepo.RegisterTabletAction("DeleteTablet", acl.ADMIN, func(ctx context.Context, wr *wrangler.Wrangler, tabletAlias topo.TabletAlias, r *http.Request) (string, error) { - return "", wr.DeleteTablet(tabletAlias) + return "", wr.DeleteTablet(ctx, tabletAlias) }) // keyspace actions @@ -254,7 +254,8 @@ func main() { cell := parts[len(parts)-1] if cell == "" { - cells, err := ts.GetKnownCells() + ctx := context.Background() + cells, err := ts.GetKnownCells(ctx) if err != nil { httpError(w, "cannot get known cells: %v", err) return @@ -263,7 +264,8 @@ func main() { return } - servingGraph := topotools.DbServingGraph(ts, cell) + ctx := context.Background() + servingGraph := topotools.DbServingGraph(ctx, ts, cell) if modifyDbServingGraph != nil { modifyDbServingGraph(ts, servingGraph) } @@ -292,12 +294,13 @@ func main() { Error error Input, Output string } + ctx := context.Background() switch r.Method { case "POST": data.Input = r.FormValue("vschema") - data.Error = schemafier.SaveVSchema(data.Input) + data.Error = schemafier.SaveVSchema(ctx, data.Input) } - vschema, err := schemafier.GetVSchema() + vschema, err := schemafier.GetVSchema(ctx) if err != nil { if data.Error == nil { data.Error = fmt.Errorf("Error fetching schema: %s", err) @@ -330,7 +333,8 @@ func main() { // serve some data knownCellsCache := newKnownCellsCache(ts) http.HandleFunc("/json/KnownCells", func(w http.ResponseWriter, r *http.Request) { - result, err := knownCellsCache.Get() + ctx := context.Background() + result, err := knownCellsCache.Get(ctx) if err != nil { httpError(w, "error getting known cells: %v", err) return @@ -340,7 +344,8 @@ func main() { keyspacesCache := newKeyspacesCache(ts) http.HandleFunc("/json/Keyspaces", func(w http.ResponseWriter, r *http.Request) { - result, err := keyspacesCache.Get() + ctx := context.Background() + result, err := keyspacesCache.Get(ctx) if err != nil { httpError(w, "error getting keyspaces: %v", err) return @@ -359,7 +364,8 @@ func main() { http.Error(w, "no keyspace provided", http.StatusBadRequest) return } - result, err := keyspaceCache.Get(keyspace) + ctx := context.Background() + result, err := keyspaceCache.Get(ctx, keyspace) if err != nil { httpError(w, "error getting keyspace: %v", err) return @@ -378,7 +384,8 @@ func main() { http.Error(w, "no keyspace provided", http.StatusBadRequest) return } - result, err := shardNamesCache.Get(keyspace) + ctx := context.Background() + result, err := shardNamesCache.Get(ctx, keyspace) if err != nil { httpError(w, "error getting shardNames: %v", err) return @@ -402,7 +409,8 @@ func main() { http.Error(w, "no shard provided", http.StatusBadRequest) return } - result, err := shardCache.Get(keyspace + "/" + shard) + ctx := context.Background() + result, err := shardCache.Get(ctx, keyspace+"/"+shard) if err != nil { httpError(w, "error getting shard: %v", err) return @@ -431,7 +439,8 @@ func main() { http.Error(w, "no shard provided", http.StatusBadRequest) return } - result, err := cellShardTabletsCache.Get(cell + "/" + keyspace + "/" + shard) + ctx := context.Background() + result, err := cellShardTabletsCache.Get(ctx, cell+"/"+keyspace+"/"+shard) if err != nil { httpError(w, "error getting shard: %v", err) return diff --git a/go/cmd/vtgate/vtgate.go b/go/cmd/vtgate/vtgate.go index c8fc408d34..90dc8ed9eb 100644 --- a/go/cmd/vtgate/vtgate.go +++ b/go/cmd/vtgate/vtgate.go @@ -14,6 +14,7 @@ import ( "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vtgate" "github.com/youtube/vitess/go/vt/vtgate/planbuilder" + "golang.org/x/net/context" ) var ( @@ -61,7 +62,8 @@ func main() { log.Infof("Skipping v3 initialization: topo does not suppurt schemafier interface") goto startServer } - schemaJSON, err := schemafier.GetVSchema() + ctx := context.Background() + schemaJSON, err := schemafier.GetVSchema(ctx) if err != nil { log.Warningf("Skipping v3 initialization: GetVSchema failed: %v", err) goto startServer diff --git a/go/cmd/vtworker/command.go b/go/cmd/vtworker/command.go index 4a103b1bc0..caeda196ff 100644 --- a/go/cmd/vtworker/command.go +++ b/go/cmd/vtworker/command.go @@ -14,6 +14,7 @@ import ( log "github.com/golang/glog" "github.com/youtube/vitess/go/vt/worker" "github.com/youtube/vitess/go/vt/wrangler" + "golang.org/x/net/context" ) var ( @@ -23,7 +24,7 @@ var ( type command struct { Name string method func(wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (worker.Worker, error) - interactive func(wr *wrangler.Wrangler, w http.ResponseWriter, r *http.Request) + interactive func(ctx context.Context, wr *wrangler.Wrangler, w http.ResponseWriter, r *http.Request) params string Help string // if help is empty, won't list the command } diff --git a/go/cmd/vtworker/interactive.go b/go/cmd/vtworker/interactive.go index 6527b27523..e10acd41ad 100644 --- a/go/cmd/vtworker/interactive.go +++ b/go/cmd/vtworker/interactive.go @@ -10,6 +10,7 @@ import ( "net/http" log "github.com/golang/glog" + "golang.org/x/net/context" ) const indexHTML = ` @@ -83,7 +84,8 @@ func initInteractiveMode() { // closure. pc := c http.HandleFunc("/"+cg.Name+"/"+c.Name, func(w http.ResponseWriter, r *http.Request) { - pc.interactive(wr, w, r) + ctx := context.Background() + pc.interactive(ctx, wr, w, r) }) } } diff --git a/go/cmd/vtworker/split_clone.go b/go/cmd/vtworker/split_clone.go index 079762c028..cd45929191 100644 --- a/go/cmd/vtworker/split_clone.go +++ b/go/cmd/vtworker/split_clone.go @@ -18,6 +18,7 @@ import ( "github.com/youtube/vitess/go/vt/topotools" "github.com/youtube/vitess/go/vt/worker" "github.com/youtube/vitess/go/vt/wrangler" + "golang.org/x/net/context" ) const splitCloneHTML = ` @@ -107,8 +108,8 @@ func commandSplitClone(wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []str return worker, nil } -func keyspacesWithOverlappingShards(wr *wrangler.Wrangler) ([]map[string]string, error) { - keyspaces, err := wr.TopoServer().GetKeyspaces() +func keyspacesWithOverlappingShards(ctx context.Context, wr *wrangler.Wrangler) ([]map[string]string, error) { + keyspaces, err := wr.TopoServer().GetKeyspaces(ctx) if err != nil { return nil, err } @@ -121,7 +122,7 @@ func keyspacesWithOverlappingShards(wr *wrangler.Wrangler) ([]map[string]string, wg.Add(1) go func(keyspace string) { defer wg.Done() - osList, err := topotools.FindOverlappingShards(wr.TopoServer(), keyspace) + osList, err := topotools.FindOverlappingShards(ctx, wr.TopoServer(), keyspace) if err != nil { rec.RecordError(err) return @@ -147,7 +148,7 @@ func keyspacesWithOverlappingShards(wr *wrangler.Wrangler) ([]map[string]string, return result, nil } -func interactiveSplitClone(wr *wrangler.Wrangler, w http.ResponseWriter, r *http.Request) { +func interactiveSplitClone(ctx context.Context, wr *wrangler.Wrangler, w http.ResponseWriter, r *http.Request) { if err := r.ParseForm(); err != nil { httpError(w, "cannot parse form: %s", err) return @@ -159,7 +160,7 @@ func interactiveSplitClone(wr *wrangler.Wrangler, w http.ResponseWriter, r *http // display the list of possible splits to choose from // (just find all the overlapping guys) result := make(map[string]interface{}) - choices, err := keyspacesWithOverlappingShards(wr) + choices, err := keyspacesWithOverlappingShards(ctx, wr) if err != nil { result["Error"] = err.Error() } else { diff --git a/go/cmd/vtworker/split_diff.go b/go/cmd/vtworker/split_diff.go index 6dd7a69c6e..56b8576795 100644 --- a/go/cmd/vtworker/split_diff.go +++ b/go/cmd/vtworker/split_diff.go @@ -16,6 +16,7 @@ import ( "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/worker" "github.com/youtube/vitess/go/vt/wrangler" + "golang.org/x/net/context" ) const splitDiffHTML = ` @@ -76,8 +77,8 @@ func commandSplitDiff(wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []stri // shardsWithSources returns all the shards that have SourceShards set // with no Tables list. -func shardsWithSources(wr *wrangler.Wrangler) ([]map[string]string, error) { - keyspaces, err := wr.TopoServer().GetKeyspaces() +func shardsWithSources(ctx context.Context, wr *wrangler.Wrangler) ([]map[string]string, error) { + keyspaces, err := wr.TopoServer().GetKeyspaces(ctx) if err != nil { return nil, err } @@ -90,7 +91,7 @@ func shardsWithSources(wr *wrangler.Wrangler) ([]map[string]string, error) { wg.Add(1) go func(keyspace string) { defer wg.Done() - shards, err := wr.TopoServer().GetShardNames(keyspace) + shards, err := wr.TopoServer().GetShardNames(ctx, keyspace) if err != nil { rec.RecordError(err) return @@ -99,7 +100,7 @@ func shardsWithSources(wr *wrangler.Wrangler) ([]map[string]string, error) { wg.Add(1) go func(keyspace, shard string) { defer wg.Done() - si, err := wr.TopoServer().GetShard(keyspace, shard) + si, err := wr.TopoServer().GetShard(ctx, keyspace, shard) if err != nil { rec.RecordError(err) return @@ -128,7 +129,7 @@ func shardsWithSources(wr *wrangler.Wrangler) ([]map[string]string, error) { return result, nil } -func interactiveSplitDiff(wr *wrangler.Wrangler, w http.ResponseWriter, r *http.Request) { +func interactiveSplitDiff(ctx context.Context, wr *wrangler.Wrangler, w http.ResponseWriter, r *http.Request) { if err := r.ParseForm(); err != nil { httpError(w, "cannot parse form: %s", err) return @@ -139,7 +140,7 @@ func interactiveSplitDiff(wr *wrangler.Wrangler, w http.ResponseWriter, r *http. if keyspace == "" || shard == "" { // display the list of possible shards to chose from result := make(map[string]interface{}) - shards, err := shardsWithSources(wr) + shards, err := shardsWithSources(ctx, wr) if err != nil { result["Error"] = err.Error() } else { diff --git a/go/cmd/vtworker/vertical_split_clone.go b/go/cmd/vtworker/vertical_split_clone.go index a6f9e8cd0c..f0d10b0979 100644 --- a/go/cmd/vtworker/vertical_split_clone.go +++ b/go/cmd/vtworker/vertical_split_clone.go @@ -17,6 +17,7 @@ import ( "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/worker" "github.com/youtube/vitess/go/vt/wrangler" + "golang.org/x/net/context" ) const ( @@ -114,8 +115,8 @@ func commandVerticalSplitClone(wr *wrangler.Wrangler, subFlags *flag.FlagSet, ar // keyspacesWithServedFrom returns all the keyspaces that have ServedFrom set // to one value. -func keyspacesWithServedFrom(wr *wrangler.Wrangler) ([]string, error) { - keyspaces, err := wr.TopoServer().GetKeyspaces() +func keyspacesWithServedFrom(ctx context.Context, wr *wrangler.Wrangler) ([]string, error) { + keyspaces, err := wr.TopoServer().GetKeyspaces(ctx) if err != nil { return nil, err } @@ -128,7 +129,7 @@ func keyspacesWithServedFrom(wr *wrangler.Wrangler) ([]string, error) { wg.Add(1) go func(keyspace string) { defer wg.Done() - ki, err := wr.TopoServer().GetKeyspace(keyspace) + ki, err := wr.TopoServer().GetKeyspace(ctx, keyspace) if err != nil { rec.RecordError(err) return @@ -151,7 +152,7 @@ func keyspacesWithServedFrom(wr *wrangler.Wrangler) ([]string, error) { return result, nil } -func interactiveVerticalSplitClone(wr *wrangler.Wrangler, w http.ResponseWriter, r *http.Request) { +func interactiveVerticalSplitClone(ctx context.Context, wr *wrangler.Wrangler, w http.ResponseWriter, r *http.Request) { if err := r.ParseForm(); err != nil { httpError(w, "cannot parse form: %s", err) return @@ -161,7 +162,7 @@ func interactiveVerticalSplitClone(wr *wrangler.Wrangler, w http.ResponseWriter, if keyspace == "" { // display the list of possible keyspaces to choose from result := make(map[string]interface{}) - keyspaces, err := keyspacesWithServedFrom(wr) + keyspaces, err := keyspacesWithServedFrom(ctx, wr) if err != nil { result["Error"] = err.Error() } else { diff --git a/go/cmd/vtworker/vertical_split_diff.go b/go/cmd/vtworker/vertical_split_diff.go index a45ca7883d..1988f32d99 100644 --- a/go/cmd/vtworker/vertical_split_diff.go +++ b/go/cmd/vtworker/vertical_split_diff.go @@ -16,6 +16,7 @@ import ( "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/worker" "github.com/youtube/vitess/go/vt/wrangler" + "golang.org/x/net/context" ) const verticalSplitDiffHTML = ` @@ -75,8 +76,8 @@ func commandVerticalSplitDiff(wr *wrangler.Wrangler, subFlags *flag.FlagSet, arg // shardsWithTablesSources returns all the shards that have SourceShards set // to one value, with an array of Tables. -func shardsWithTablesSources(wr *wrangler.Wrangler) ([]map[string]string, error) { - keyspaces, err := wr.TopoServer().GetKeyspaces() +func shardsWithTablesSources(ctx context.Context, wr *wrangler.Wrangler) ([]map[string]string, error) { + keyspaces, err := wr.TopoServer().GetKeyspaces(ctx) if err != nil { return nil, err } @@ -89,7 +90,7 @@ func shardsWithTablesSources(wr *wrangler.Wrangler) ([]map[string]string, error) wg.Add(1) go func(keyspace string) { defer wg.Done() - shards, err := wr.TopoServer().GetShardNames(keyspace) + shards, err := wr.TopoServer().GetShardNames(ctx, keyspace) if err != nil { rec.RecordError(err) return @@ -98,7 +99,7 @@ func shardsWithTablesSources(wr *wrangler.Wrangler) ([]map[string]string, error) wg.Add(1) go func(keyspace, shard string) { defer wg.Done() - si, err := wr.TopoServer().GetShard(keyspace, shard) + si, err := wr.TopoServer().GetShard(ctx, keyspace, shard) if err != nil { rec.RecordError(err) return @@ -127,7 +128,7 @@ func shardsWithTablesSources(wr *wrangler.Wrangler) ([]map[string]string, error) return result, nil } -func interactiveVerticalSplitDiff(wr *wrangler.Wrangler, w http.ResponseWriter, r *http.Request) { +func interactiveVerticalSplitDiff(ctx context.Context, wr *wrangler.Wrangler, w http.ResponseWriter, r *http.Request) { if err := r.ParseForm(); err != nil { httpError(w, "cannot parse form: %s", err) return @@ -138,7 +139,7 @@ func interactiveVerticalSplitDiff(wr *wrangler.Wrangler, w http.ResponseWriter, if keyspace == "" || shard == "" { // display the list of possible shards to chose from result := make(map[string]interface{}) - shards, err := shardsWithTablesSources(wr) + shards, err := shardsWithTablesSources(ctx, wr) if err != nil { result["Error"] = err.Error() } else { diff --git a/go/vt/client2/sharded.go b/go/vt/client2/sharded.go index 45abd5773d..2ef7f08383 100644 --- a/go/vt/client2/sharded.go +++ b/go/vt/client2/sharded.go @@ -19,6 +19,7 @@ import ( "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/zktopo" "github.com/youtube/vitess/go/zk" + "golang.org/x/net/context" ) // The sharded client handles writing to multiple shards across the @@ -117,9 +118,10 @@ func (sc *ShardedConn) Close() error { } func (sc *ShardedConn) readKeyspace() error { + ctx := context.TODO() sc.Close() var err error - sc.srvKeyspace, err = sc.ts.GetSrvKeyspace(sc.cell, sc.keyspace) + sc.srvKeyspace, err = sc.ts.GetSrvKeyspace(ctx, sc.cell, sc.keyspace) if err != nil { return fmt.Errorf("vt: GetSrvKeyspace failed %v", err) } @@ -526,8 +528,9 @@ func (sc *ShardedConn) ExecuteBatch(queryList []ClientQuery, keyVal interface{}) */ func (sc *ShardedConn) dial(shardIdx int) (conn *tablet.VtConn, err error) { + ctx := context.TODO() shardReference := &(sc.srvKeyspace.Partitions[sc.tabletType].ShardReferences[shardIdx]) - addrs, err := sc.ts.GetEndPoints(sc.cell, sc.keyspace, shardReference.Name, sc.tabletType) + addrs, err := sc.ts.GetEndPoints(ctx, sc.cell, sc.keyspace, shardReference.Name, sc.tabletType) if err != nil { return nil, fmt.Errorf("vt: GetEndPoints failed %v", err) } diff --git a/go/vt/etcdtopo/explorer_test.go b/go/vt/etcdtopo/explorer_test.go index 36812b9f34..6207edac32 100644 --- a/go/vt/etcdtopo/explorer_test.go +++ b/go/vt/etcdtopo/explorer_test.go @@ -14,6 +14,7 @@ import ( "github.com/youtube/vitess/go/jscfg" "github.com/youtube/vitess/go/vt/topo" + "golang.org/x/net/context" ) func TestSplitCellPath(t *testing.T) { @@ -78,14 +79,15 @@ func TestHandlePathKeyspace(t *testing.T) { shard := &topo.Shard{} want := jscfg.ToJSON(keyspace) + ctx := context.Background() ts := newTestServer(t, cells) - if err := ts.CreateKeyspace("test_keyspace", keyspace); err != nil { + if err := ts.CreateKeyspace(ctx, "test_keyspace", keyspace); err != nil { t.Fatalf("CreateKeyspace error: %v", err) } - if err := ts.CreateShard("test_keyspace", "10-20", shard); err != nil { + if err := ts.CreateShard(ctx, "test_keyspace", "10-20", shard); err != nil { t.Fatalf("CreateShard error: %v", err) } - if err := ts.CreateShard("test_keyspace", "20-30", shard); err != nil { + if err := ts.CreateShard(ctx, "test_keyspace", "20-30", shard); err != nil { t.Fatalf("CreateShard error: %v", err) } @@ -114,11 +116,12 @@ func TestHandlePathShard(t *testing.T) { shard := &topo.Shard{} want := jscfg.ToJSON(shard) + ctx := context.Background() ts := newTestServer(t, cells) - if err := ts.CreateKeyspace("test_keyspace", keyspace); err != nil { + if err := ts.CreateKeyspace(ctx, "test_keyspace", keyspace); err != nil { t.Fatalf("CreateKeyspace error: %v", err) } - if err := ts.CreateShard("test_keyspace", "-80", shard); err != nil { + if err := ts.CreateShard(ctx, "test_keyspace", "-80", shard); err != nil { t.Fatalf("CreateShard error: %v", err) } @@ -150,8 +153,9 @@ func TestHandlePathTablet(t *testing.T) { } want := jscfg.ToJSON(tablet) + ctx := context.Background() ts := newTestServer(t, cells) - if err := ts.CreateTablet(tablet); err != nil { + if err := ts.CreateTablet(ctx, tablet); err != nil { t.Fatalf("CreateTablet error: %v", err) } diff --git a/go/vt/etcdtopo/keyspace.go b/go/vt/etcdtopo/keyspace.go index 9fe1e62094..cd6122527e 100644 --- a/go/vt/etcdtopo/keyspace.go +++ b/go/vt/etcdtopo/keyspace.go @@ -14,10 +14,11 @@ import ( "github.com/youtube/vitess/go/vt/concurrency" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/events" + "golang.org/x/net/context" ) // CreateKeyspace implements topo.Server. -func (s *Server) CreateKeyspace(keyspace string, value *topo.Keyspace) error { +func (s *Server) CreateKeyspace(ctx context.Context, keyspace string, value *topo.Keyspace) error { data := jscfg.ToJSON(value) global := s.getGlobal() @@ -44,7 +45,7 @@ func (s *Server) CreateKeyspace(keyspace string, value *topo.Keyspace) error { } // UpdateKeyspace implements topo.Server. -func (s *Server) UpdateKeyspace(ki *topo.KeyspaceInfo, existingVersion int64) (int64, error) { +func (s *Server) UpdateKeyspace(ctx context.Context, ki *topo.KeyspaceInfo, existingVersion int64) (int64, error) { data := jscfg.ToJSON(ki.Keyspace) resp, err := s.getGlobal().CompareAndSwap(keyspaceFilePath(ki.KeyspaceName()), @@ -64,7 +65,7 @@ func (s *Server) UpdateKeyspace(ki *topo.KeyspaceInfo, existingVersion int64) (i } // GetKeyspace implements topo.Server. -func (s *Server) GetKeyspace(keyspace string) (*topo.KeyspaceInfo, error) { +func (s *Server) GetKeyspace(ctx context.Context, keyspace string) (*topo.KeyspaceInfo, error) { resp, err := s.getGlobal().Get(keyspaceFilePath(keyspace), false /* sort */, false /* recursive */) if err != nil { return nil, convertError(err) @@ -82,7 +83,7 @@ func (s *Server) GetKeyspace(keyspace string) (*topo.KeyspaceInfo, error) { } // GetKeyspaces implements topo.Server. -func (s *Server) GetKeyspaces() ([]string, error) { +func (s *Server) GetKeyspaces(ctx context.Context) ([]string, error) { resp, err := s.getGlobal().Get(keyspacesDirPath, true /* sort */, false /* recursive */) if err != nil { err = convertError(err) @@ -95,8 +96,8 @@ func (s *Server) GetKeyspaces() ([]string, error) { } // DeleteKeyspaceShards implements topo.Server. -func (s *Server) DeleteKeyspaceShards(keyspace string) error { - shards, err := s.GetShardNames(keyspace) +func (s *Server) DeleteKeyspaceShards(ctx context.Context, keyspace string) error { + shards, err := s.GetShardNames(ctx, keyspace) if err != nil { return err } diff --git a/go/vt/etcdtopo/lock.go b/go/vt/etcdtopo/lock.go index b3752b0538..5be0edfbfd 100644 --- a/go/vt/etcdtopo/lock.go +++ b/go/vt/etcdtopo/lock.go @@ -185,7 +185,7 @@ func (s *Server) LockSrvShardForAction(ctx context.Context, cellName, keyspace, } // UnlockSrvShardForAction implements topo.Server. -func (s *Server) UnlockSrvShardForAction(cellName, keyspace, shard, actionPath, results string) error { +func (s *Server) UnlockSrvShardForAction(ctx context.Context, cellName, keyspace, shard, actionPath, results string) error { log.Infof("results of %v: %v", actionPath, results) cell, err := s.getCell(cellName) @@ -204,7 +204,7 @@ func (s *Server) LockKeyspaceForAction(ctx context.Context, keyspace, contents s } // UnlockKeyspaceForAction implements topo.Server. -func (s *Server) UnlockKeyspaceForAction(keyspace, actionPath, results string) error { +func (s *Server) UnlockKeyspaceForAction(ctx context.Context, keyspace, actionPath, results string) error { log.Infof("results of %v: %v", actionPath, results) return unlock(s.getGlobal(), keyspaceDirPath(keyspace), actionPath, @@ -218,7 +218,7 @@ func (s *Server) LockShardForAction(ctx context.Context, keyspace, shard, conten } // UnlockShardForAction implements topo.Server. -func (s *Server) UnlockShardForAction(keyspace, shard, actionPath, results string) error { +func (s *Server) UnlockShardForAction(ctx context.Context, keyspace, shard, actionPath, results string) error { log.Infof("results of %v: %v", actionPath, results) return unlock(s.getGlobal(), shardDirPath(keyspace, shard), actionPath, diff --git a/go/vt/etcdtopo/replication_graph.go b/go/vt/etcdtopo/replication_graph.go index 6b4e444571..f63fe4eac0 100644 --- a/go/vt/etcdtopo/replication_graph.go +++ b/go/vt/etcdtopo/replication_graph.go @@ -10,10 +10,11 @@ import ( "github.com/youtube/vitess/go/jscfg" "github.com/youtube/vitess/go/vt/topo" + "golang.org/x/net/context" ) // UpdateShardReplicationFields implements topo.Server. -func (s *Server) UpdateShardReplicationFields(cell, keyspace, shard string, updateFunc func(*topo.ShardReplication) error) error { +func (s *Server) UpdateShardReplicationFields(ctx context.Context, cell, keyspace, shard string, updateFunc func(*topo.ShardReplication) error) error { var sri *topo.ShardReplicationInfo var version int64 var err error @@ -82,7 +83,7 @@ func (s *Server) createShardReplication(sri *topo.ShardReplicationInfo) (int64, } // GetShardReplication implements topo.Server. -func (s *Server) GetShardReplication(cell, keyspace, shard string) (*topo.ShardReplicationInfo, error) { +func (s *Server) GetShardReplication(ctx context.Context, cell, keyspace, shard string) (*topo.ShardReplicationInfo, error) { sri, _, err := s.getShardReplication(cell, keyspace, shard) return sri, err } @@ -110,7 +111,7 @@ func (s *Server) getShardReplication(cellName, keyspace, shard string) (*topo.Sh } // DeleteShardReplication implements topo.Server. -func (s *Server) DeleteShardReplication(cellName, keyspace, shard string) error { +func (s *Server) DeleteShardReplication(ctx context.Context, cellName, keyspace, shard string) error { cell, err := s.getCell(cellName) if err != nil { return err diff --git a/go/vt/etcdtopo/server.go b/go/vt/etcdtopo/server.go index e57bbbda4d..ca9a7fe283 100644 --- a/go/vt/etcdtopo/server.go +++ b/go/vt/etcdtopo/server.go @@ -25,6 +25,7 @@ import ( "sync" "github.com/youtube/vitess/go/vt/topo" + "golang.org/x/net/context" ) // Server is the implementation of topo.Server for etcd. @@ -52,7 +53,7 @@ func (s *Server) Close() { } // GetKnownCells implements topo.Server. -func (s *Server) GetKnownCells() ([]string, error) { +func (s *Server) GetKnownCells(ctx context.Context) ([]string, error) { resp, err := s.getGlobal().Get(cellsDirPath, true /* sort */, false /* recursive */) if err != nil { return nil, convertError(err) diff --git a/go/vt/etcdtopo/server_test.go b/go/vt/etcdtopo/server_test.go index e4dcf35ad8..b5f8637005 100644 --- a/go/vt/etcdtopo/server_test.go +++ b/go/vt/etcdtopo/server_test.go @@ -31,73 +31,83 @@ func newTestServer(t *testing.T, cells []string) *Server { } func TestKeyspace(t *testing.T) { + ctx := context.Background() ts := newTestServer(t, []string{"test"}) defer ts.Close() - test.CheckKeyspace(t, ts) + test.CheckKeyspace(ctx, t, ts) } func TestShard(t *testing.T) { + ctx := context.Background() ts := newTestServer(t, []string{"test"}) defer ts.Close() - test.CheckShard(context.Background(), t, ts) + test.CheckShard(ctx, t, ts) } func TestTablet(t *testing.T) { + ctx := context.Background() ts := newTestServer(t, []string{"test"}) defer ts.Close() - test.CheckTablet(context.Background(), t, ts) + test.CheckTablet(ctx, t, ts) } func TestShardReplication(t *testing.T) { + ctx := context.Background() ts := newTestServer(t, []string{"test"}) defer ts.Close() - test.CheckShardReplication(t, ts) + test.CheckShardReplication(ctx, t, ts) } func TestServingGraph(t *testing.T) { + ctx := context.Background() ts := newTestServer(t, []string{"test"}) defer ts.Close() - test.CheckServingGraph(context.Background(), t, ts) + test.CheckServingGraph(ctx, t, ts) } func TestWatchEndPoints(t *testing.T) { + ctx := context.Background() ts := newTestServer(t, []string{"test"}) defer ts.Close() - test.CheckWatchEndPoints(context.Background(), t, ts) + test.CheckWatchEndPoints(ctx, t, ts) } func TestKeyspaceLock(t *testing.T) { + ctx := context.Background() ts := newTestServer(t, []string{"test"}) defer ts.Close() - test.CheckKeyspaceLock(t, ts) + test.CheckKeyspaceLock(ctx, t, ts) } func TestShardLock(t *testing.T) { + ctx := context.Background() if testing.Short() { t.Skip("skipping wait-based test in short mode.") } ts := newTestServer(t, []string{"test"}) defer ts.Close() - test.CheckShardLock(t, ts) + test.CheckShardLock(ctx, t, ts) } func TestSrvShardLock(t *testing.T) { + ctx := context.Background() if testing.Short() { t.Skip("skipping wait-based test in short mode.") } ts := newTestServer(t, []string{"test"}) defer ts.Close() - test.CheckSrvShardLock(t, ts) + test.CheckSrvShardLock(ctx, t, ts) } func TestVSchema(t *testing.T) { + ctx := context.Background() if testing.Short() { t.Skip("skipping wait-based test in short mode.") } ts := newTestServer(t, []string{"test"}) defer ts.Close() - test.CheckVSchema(t, ts) + test.CheckVSchema(ctx, t, ts) } diff --git a/go/vt/etcdtopo/serving_graph.go b/go/vt/etcdtopo/serving_graph.go index 6484dd39b8..f47aade4a9 100644 --- a/go/vt/etcdtopo/serving_graph.go +++ b/go/vt/etcdtopo/serving_graph.go @@ -14,6 +14,7 @@ import ( log "github.com/golang/glog" "github.com/youtube/vitess/go/jscfg" "github.com/youtube/vitess/go/vt/topo" + "golang.org/x/net/context" ) // WatchSleepDuration is how many seconds interval to poll for in case @@ -22,7 +23,7 @@ import ( var WatchSleepDuration = 30 * time.Second // GetSrvTabletTypesPerShard implements topo.Server. -func (s *Server) GetSrvTabletTypesPerShard(cellName, keyspace, shard string) ([]topo.TabletType, error) { +func (s *Server) GetSrvTabletTypesPerShard(ctx context.Context, cellName, keyspace, shard string) ([]topo.TabletType, error) { cell, err := s.getCell(cellName) if err != nil { return nil, err @@ -44,7 +45,7 @@ func (s *Server) GetSrvTabletTypesPerShard(cellName, keyspace, shard string) ([] } // UpdateEndPoints implements topo.Server. -func (s *Server) UpdateEndPoints(cellName, keyspace, shard string, tabletType topo.TabletType, addrs *topo.EndPoints) error { +func (s *Server) UpdateEndPoints(ctx context.Context, cellName, keyspace, shard string, tabletType topo.TabletType, addrs *topo.EndPoints) error { cell, err := s.getCell(cellName) if err != nil { return err @@ -71,7 +72,7 @@ func (s *Server) updateEndPoints(cellName, keyspace, shard string, tabletType to } // GetEndPoints implements topo.Server. -func (s *Server) GetEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) { +func (s *Server) GetEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) { value, _, err := s.getEndPoints(cell, keyspace, shard, tabletType) return value, err } @@ -100,7 +101,7 @@ func (s *Server) getEndPoints(cellName, keyspace, shard string, tabletType topo. } // DeleteEndPoints implements topo.Server. -func (s *Server) DeleteEndPoints(cellName, keyspace, shard string, tabletType topo.TabletType) error { +func (s *Server) DeleteEndPoints(ctx context.Context, cellName, keyspace, shard string, tabletType topo.TabletType) error { cell, err := s.getCell(cellName) if err != nil { return err @@ -111,7 +112,7 @@ func (s *Server) DeleteEndPoints(cellName, keyspace, shard string, tabletType to } // UpdateSrvShard implements topo.Server. -func (s *Server) UpdateSrvShard(cellName, keyspace, shard string, srvShard *topo.SrvShard) error { +func (s *Server) UpdateSrvShard(ctx context.Context, cellName, keyspace, shard string, srvShard *topo.SrvShard) error { cell, err := s.getCell(cellName) if err != nil { return err @@ -124,7 +125,7 @@ func (s *Server) UpdateSrvShard(cellName, keyspace, shard string, srvShard *topo } // GetSrvShard implements topo.Server. -func (s *Server) GetSrvShard(cellName, keyspace, shard string) (*topo.SrvShard, error) { +func (s *Server) GetSrvShard(ctx context.Context, cellName, keyspace, shard string) (*topo.SrvShard, error) { cell, err := s.getCell(cellName) if err != nil { return nil, err @@ -146,7 +147,7 @@ func (s *Server) GetSrvShard(cellName, keyspace, shard string) (*topo.SrvShard, } // DeleteSrvShard implements topo.Server. -func (s *Server) DeleteSrvShard(cellName, keyspace, shard string) error { +func (s *Server) DeleteSrvShard(ctx context.Context, cellName, keyspace, shard string) error { cell, err := s.getCell(cellName) if err != nil { return err @@ -157,7 +158,7 @@ func (s *Server) DeleteSrvShard(cellName, keyspace, shard string) error { } // UpdateSrvKeyspace implements topo.Server. -func (s *Server) UpdateSrvKeyspace(cellName, keyspace string, srvKeyspace *topo.SrvKeyspace) error { +func (s *Server) UpdateSrvKeyspace(ctx context.Context, cellName, keyspace string, srvKeyspace *topo.SrvKeyspace) error { cell, err := s.getCell(cellName) if err != nil { return err @@ -170,7 +171,7 @@ func (s *Server) UpdateSrvKeyspace(cellName, keyspace string, srvKeyspace *topo. } // GetSrvKeyspace implements topo.Server. -func (s *Server) GetSrvKeyspace(cellName, keyspace string) (*topo.SrvKeyspace, error) { +func (s *Server) GetSrvKeyspace(ctx context.Context, cellName, keyspace string) (*topo.SrvKeyspace, error) { cell, err := s.getCell(cellName) if err != nil { return nil, err @@ -192,7 +193,7 @@ func (s *Server) GetSrvKeyspace(cellName, keyspace string) (*topo.SrvKeyspace, e } // GetSrvKeyspaceNames implements topo.Server. -func (s *Server) GetSrvKeyspaceNames(cellName string) ([]string, error) { +func (s *Server) GetSrvKeyspaceNames(ctx context.Context, cellName string) ([]string, error) { cell, err := s.getCell(cellName) if err != nil { return nil, err @@ -206,7 +207,7 @@ func (s *Server) GetSrvKeyspaceNames(cellName string) ([]string, error) { } // UpdateTabletEndpoint implements topo.Server. -func (s *Server) UpdateTabletEndpoint(cell, keyspace, shard string, tabletType topo.TabletType, addr *topo.EndPoint) error { +func (s *Server) UpdateTabletEndpoint(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType, addr *topo.EndPoint) error { for { addrs, version, err := s.getEndPoints(cell, keyspace, shard, tabletType) if err == topo.ErrNoNode { @@ -239,7 +240,7 @@ func (s *Server) UpdateTabletEndpoint(cell, keyspace, shard string, tabletType t } // WatchEndPoints is part of the topo.Server interface -func (s *Server) WatchEndPoints(cellName, keyspace, shard string, tabletType topo.TabletType) (<-chan *topo.EndPoints, chan<- struct{}, error) { +func (s *Server) WatchEndPoints(ctx context.Context, cellName, keyspace, shard string, tabletType topo.TabletType) (<-chan *topo.EndPoints, chan<- struct{}, error) { cell, err := s.getCell(cellName) if err != nil { return nil, nil, fmt.Errorf("WatchEndPoints cannot get cell: %v", err) diff --git a/go/vt/etcdtopo/shard.go b/go/vt/etcdtopo/shard.go index a6bd3e08a6..6d48a1d5b4 100644 --- a/go/vt/etcdtopo/shard.go +++ b/go/vt/etcdtopo/shard.go @@ -12,10 +12,11 @@ import ( "github.com/youtube/vitess/go/jscfg" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/events" + "golang.org/x/net/context" ) // CreateShard implements topo.Server. -func (s *Server) CreateShard(keyspace, shard string, value *topo.Shard) error { +func (s *Server) CreateShard(ctx context.Context, keyspace, shard string, value *topo.Shard) error { data := jscfg.ToJSON(value) global := s.getGlobal() @@ -42,7 +43,7 @@ func (s *Server) CreateShard(keyspace, shard string, value *topo.Shard) error { } // UpdateShard implements topo.Server. -func (s *Server) UpdateShard(si *topo.ShardInfo, existingVersion int64) (int64, error) { +func (s *Server) UpdateShard(ctx context.Context, si *topo.ShardInfo, existingVersion int64) (int64, error) { data := jscfg.ToJSON(si.Shard) resp, err := s.getGlobal().CompareAndSwap(shardFilePath(si.Keyspace(), si.ShardName()), @@ -62,13 +63,13 @@ func (s *Server) UpdateShard(si *topo.ShardInfo, existingVersion int64) (int64, } // ValidateShard implements topo.Server. -func (s *Server) ValidateShard(keyspace, shard string) error { - _, err := s.GetShard(keyspace, shard) +func (s *Server) ValidateShard(ctx context.Context, keyspace, shard string) error { + _, err := s.GetShard(ctx, keyspace, shard) return err } // GetShard implements topo.Server. -func (s *Server) GetShard(keyspace, shard string) (*topo.ShardInfo, error) { +func (s *Server) GetShard(ctx context.Context, keyspace, shard string) (*topo.ShardInfo, error) { resp, err := s.getGlobal().Get(shardFilePath(keyspace, shard), false /* sort */, false /* recursive */) if err != nil { return nil, convertError(err) @@ -86,7 +87,7 @@ func (s *Server) GetShard(keyspace, shard string) (*topo.ShardInfo, error) { } // GetShardNames implements topo.Server. -func (s *Server) GetShardNames(keyspace string) ([]string, error) { +func (s *Server) GetShardNames(ctx context.Context, keyspace string) ([]string, error) { resp, err := s.getGlobal().Get(shardsDirPath(keyspace), true /* sort */, false /* recursive */) if err != nil { return nil, convertError(err) @@ -95,7 +96,7 @@ func (s *Server) GetShardNames(keyspace string) ([]string, error) { } // DeleteShard implements topo.Server. -func (s *Server) DeleteShard(keyspace, shard string) error { +func (s *Server) DeleteShard(ctx context.Context, keyspace, shard string) error { _, err := s.getGlobal().Delete(shardDirPath(keyspace, shard), true /* recursive */) if err != nil { return convertError(err) diff --git a/go/vt/etcdtopo/tablet.go b/go/vt/etcdtopo/tablet.go index 0e8a99f224..1a440792a9 100644 --- a/go/vt/etcdtopo/tablet.go +++ b/go/vt/etcdtopo/tablet.go @@ -12,10 +12,11 @@ import ( "github.com/youtube/vitess/go/jscfg" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/events" + "golang.org/x/net/context" ) // CreateTablet implements topo.Server. -func (s *Server) CreateTablet(tablet *topo.Tablet) error { +func (s *Server) CreateTablet(ctx context.Context, tablet *topo.Tablet) error { cell, err := s.getCell(tablet.Alias.Cell) if err != nil { return err @@ -35,7 +36,7 @@ func (s *Server) CreateTablet(tablet *topo.Tablet) error { } // UpdateTablet implements topo.Server. -func (s *Server) UpdateTablet(ti *topo.TabletInfo, existingVersion int64) (int64, error) { +func (s *Server) UpdateTablet(ctx context.Context, ti *topo.TabletInfo, existingVersion int64) (int64, error) { cell, err := s.getCell(ti.Alias.Cell) if err != nil { return -1, err @@ -59,18 +60,18 @@ func (s *Server) UpdateTablet(ti *topo.TabletInfo, existingVersion int64) (int64 } // UpdateTabletFields implements topo.Server. -func (s *Server) UpdateTabletFields(tabletAlias topo.TabletAlias, updateFunc func(*topo.Tablet) error) error { +func (s *Server) UpdateTabletFields(ctx context.Context, tabletAlias topo.TabletAlias, updateFunc func(*topo.Tablet) error) error { var ti *topo.TabletInfo var err error for { - if ti, err = s.GetTablet(tabletAlias); err != nil { + if ti, err = s.GetTablet(ctx, tabletAlias); err != nil { return err } if err = updateFunc(ti.Tablet); err != nil { return err } - if _, err = s.UpdateTablet(ti, ti.Version()); err != topo.ErrBadVersion { + if _, err = s.UpdateTablet(ctx, ti, ti.Version()); err != topo.ErrBadVersion { break } } @@ -86,14 +87,14 @@ func (s *Server) UpdateTabletFields(tabletAlias topo.TabletAlias, updateFunc fun } // DeleteTablet implements topo.Server. -func (s *Server) DeleteTablet(tabletAlias topo.TabletAlias) error { +func (s *Server) DeleteTablet(ctx context.Context, tabletAlias topo.TabletAlias) error { cell, err := s.getCell(tabletAlias.Cell) if err != nil { return err } // Get the keyspace and shard names for the TabletChange event. - ti, tiErr := s.GetTablet(tabletAlias) + ti, tiErr := s.GetTablet(ctx, tabletAlias) _, err = cell.Delete(tabletDirPath(tabletAlias.String()), true /* recursive */) if err != nil { @@ -116,13 +117,13 @@ func (s *Server) DeleteTablet(tabletAlias topo.TabletAlias) error { } // ValidateTablet implements topo.Server. -func (s *Server) ValidateTablet(tabletAlias topo.TabletAlias) error { - _, err := s.GetTablet(tabletAlias) +func (s *Server) ValidateTablet(ctx context.Context, tabletAlias topo.TabletAlias) error { + _, err := s.GetTablet(ctx, tabletAlias) return err } // GetTablet implements topo.Server. -func (s *Server) GetTablet(tabletAlias topo.TabletAlias) (*topo.TabletInfo, error) { +func (s *Server) GetTablet(ctx context.Context, tabletAlias topo.TabletAlias) (*topo.TabletInfo, error) { cell, err := s.getCell(tabletAlias.Cell) if err != nil { return nil, err @@ -145,7 +146,7 @@ func (s *Server) GetTablet(tabletAlias topo.TabletAlias) (*topo.TabletInfo, erro } // GetTabletsByCell implements topo.Server. -func (s *Server) GetTabletsByCell(cellName string) ([]topo.TabletAlias, error) { +func (s *Server) GetTabletsByCell(ctx context.Context, cellName string) ([]topo.TabletAlias, error) { cell, err := s.getCell(cellName) if err != nil { return nil, err diff --git a/go/vt/etcdtopo/vschema.go b/go/vt/etcdtopo/vschema.go index 7a37234f4f..9145996c63 100644 --- a/go/vt/etcdtopo/vschema.go +++ b/go/vt/etcdtopo/vschema.go @@ -3,6 +3,7 @@ package etcdtopo import ( "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vtgate/planbuilder" + "golang.org/x/net/context" // vindexes needs to be imported so that they register // themselves against vtgate/planbuilder. This will allow // us to sanity check the schema being uploaded. @@ -14,7 +15,7 @@ This file contains the vschema management code for etcdtopo.Server */ // SaveVSchema saves the JSON vschema into the topo. -func (s *Server) SaveVSchema(vschema string) error { +func (s *Server) SaveVSchema(ctx context.Context, vschema string) error { _, err := planbuilder.NewSchema([]byte(vschema)) if err != nil { return err @@ -28,7 +29,7 @@ func (s *Server) SaveVSchema(vschema string) error { } // GetVSchema fetches the JSON vschema from the topo. -func (s *Server) GetVSchema() (string, error) { +func (s *Server) GetVSchema(ctx context.Context) (string, error) { resp, err := s.getGlobal().Get(vschemaPath, false /* sort */, false /* recursive */) if err != nil { err = convertError(err) diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index e61baea401..aeb593b9ed 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -249,7 +249,7 @@ func newFakeTopo() *fakeTopo { return &fakeTopo{} } -func (topoServer *fakeTopo) GetShardNames(keyspace string) ([]string, error) { +func (topoServer *fakeTopo) GetShardNames(ctx context.Context, keyspace string) ([]string, error) { if keyspace != "test_keyspace" { return nil, fmt.Errorf("expect to get keyspace: test_keyspace, but got: %s", keyspace) @@ -257,7 +257,7 @@ func (topoServer *fakeTopo) GetShardNames(keyspace string) ([]string, error) { return []string{"0", "1", "2"}, nil } -func (topoServer *fakeTopo) GetShard(keyspace string, shard string) (*topo.ShardInfo, error) { +func (topoServer *fakeTopo) GetShard(ctx context.Context, keyspace string, shard string) (*topo.ShardInfo, error) { value := &topo.Shard{ MasterAlias: topo.TabletAlias{ Cell: "test_cell", @@ -267,7 +267,7 @@ func (topoServer *fakeTopo) GetShard(keyspace string, shard string) (*topo.Shard return topo.NewShardInfo(keyspace, shard, value, 0), nil } -func (topoServer *fakeTopo) GetTablet(tabletAlias topo.TabletAlias) (*topo.TabletInfo, error) { +func (topoServer *fakeTopo) GetTablet(ctx context.Context, tabletAlias topo.TabletAlias) (*topo.TabletInfo, error) { return &topo.TabletInfo{ Tablet: &topo.Tablet{ Alias: tabletAlias, @@ -276,89 +276,89 @@ func (topoServer *fakeTopo) GetTablet(tabletAlias topo.TabletAlias) (*topo.Table }, nil } -func (topoServer *fakeTopo) GetSrvKeyspaceNames(cell string) ([]string, error) { +func (topoServer *fakeTopo) GetSrvKeyspaceNames(ctx context.Context, cell string) ([]string, error) { return nil, fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) GetSrvKeyspace(cell, keyspace string) (*topo.SrvKeyspace, error) { +func (topoServer *fakeTopo) GetSrvKeyspace(ctx context.Context, cell, keyspace string) (*topo.SrvKeyspace, error) { return nil, fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) GetEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) { +func (topoServer *fakeTopo) GetEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) { return nil, fmt.Errorf("not implemented") } func (topoServer *fakeTopo) Close() {} -func (topoServer *fakeTopo) GetKnownCells() ([]string, error) { +func (topoServer *fakeTopo) GetKnownCells(ctx context.Context) ([]string, error) { return nil, fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) CreateKeyspace(keyspace string, value *topo.Keyspace) error { +func (topoServer *fakeTopo) CreateKeyspace(ctx context.Context, keyspace string, value *topo.Keyspace) error { return fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) UpdateKeyspace(ki *topo.KeyspaceInfo, existingVersion int64) (int64, error) { +func (topoServer *fakeTopo) UpdateKeyspace(ctx context.Context, ki *topo.KeyspaceInfo, existingVersion int64) (int64, error) { return 0, fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) GetKeyspace(keyspace string) (*topo.KeyspaceInfo, error) { +func (topoServer *fakeTopo) GetKeyspace(ctx context.Context, keyspace string) (*topo.KeyspaceInfo, error) { return nil, fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) GetKeyspaces() ([]string, error) { +func (topoServer *fakeTopo) GetKeyspaces(ctx context.Context) ([]string, error) { return nil, fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) DeleteKeyspaceShards(keyspace string) error { +func (topoServer *fakeTopo) DeleteKeyspaceShards(ctx context.Context, keyspace string) error { return fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) CreateShard(keyspace, shard string, value *topo.Shard) error { +func (topoServer *fakeTopo) CreateShard(ctx context.Context, keyspace, shard string, value *topo.Shard) error { return fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) UpdateShard(si *topo.ShardInfo, existingVersion int64) (int64, error) { +func (topoServer *fakeTopo) UpdateShard(ctx context.Context, si *topo.ShardInfo, existingVersion int64) (int64, error) { return 0, fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) ValidateShard(keyspace, shard string) error { +func (topoServer *fakeTopo) ValidateShard(ctx context.Context, keyspace, shard string) error { return fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) DeleteShard(keyspace, shard string) error { +func (topoServer *fakeTopo) DeleteShard(ctx context.Context, keyspace, shard string) error { return fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) CreateTablet(tablet *topo.Tablet) error { +func (topoServer *fakeTopo) CreateTablet(ctx context.Context, tablet *topo.Tablet) error { return fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) UpdateTablet(tablet *topo.TabletInfo, existingVersion int64) (newVersion int64, err error) { +func (topoServer *fakeTopo) UpdateTablet(ctx context.Context, tablet *topo.TabletInfo, existingVersion int64) (newVersion int64, err error) { return 0, fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) UpdateTabletFields(tabletAlias topo.TabletAlias, update func(*topo.Tablet) error) error { +func (topoServer *fakeTopo) UpdateTabletFields(ctx context.Context, tabletAlias topo.TabletAlias, update func(*topo.Tablet) error) error { return fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) DeleteTablet(alias topo.TabletAlias) error { +func (topoServer *fakeTopo) DeleteTablet(ctx context.Context, alias topo.TabletAlias) error { return fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) GetTabletsByCell(cell string) ([]topo.TabletAlias, error) { +func (topoServer *fakeTopo) GetTabletsByCell(ctx context.Context, cell string) ([]topo.TabletAlias, error) { return nil, fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) UpdateShardReplicationFields(cell, keyspace, shard string, update func(*topo.ShardReplication) error) error { +func (topoServer *fakeTopo) UpdateShardReplicationFields(ctx context.Context, cell, keyspace, shard string, update func(*topo.ShardReplication) error) error { return fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) GetShardReplication(cell, keyspace, shard string) (*topo.ShardReplicationInfo, error) { +func (topoServer *fakeTopo) GetShardReplication(ctx context.Context, cell, keyspace, shard string) (*topo.ShardReplicationInfo, error) { return nil, fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) DeleteShardReplication(cell, keyspace, shard string) error { +func (topoServer *fakeTopo) DeleteShardReplication(ctx context.Context, cell, keyspace, shard string) error { return fmt.Errorf("not implemented") } @@ -366,43 +366,43 @@ func (topoServer *fakeTopo) LockSrvShardForAction(ctx context.Context, cell, key return "", fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) UnlockSrvShardForAction(cell, keyspace, shard, lockPath, results string) error { +func (topoServer *fakeTopo) UnlockSrvShardForAction(ctx context.Context, cell, keyspace, shard, lockPath, results string) error { return fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) GetSrvTabletTypesPerShard(cell, keyspace, shard string) ([]topo.TabletType, error) { +func (topoServer *fakeTopo) GetSrvTabletTypesPerShard(ctx context.Context, cell, keyspace, shard string) ([]topo.TabletType, error) { return nil, fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) UpdateEndPoints(cell, keyspace, shard string, tabletType topo.TabletType, addrs *topo.EndPoints) error { +func (topoServer *fakeTopo) UpdateEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType, addrs *topo.EndPoints) error { return fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) DeleteEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) error { +func (topoServer *fakeTopo) DeleteEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) error { return fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) WatchEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) (<-chan *topo.EndPoints, chan<- struct{}, error) { +func (topoServer *fakeTopo) WatchEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) (<-chan *topo.EndPoints, chan<- struct{}, error) { return nil, nil, fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) UpdateSrvShard(cell, keyspace, shard string, srvShard *topo.SrvShard) error { +func (topoServer *fakeTopo) UpdateSrvShard(ctx context.Context, cell, keyspace, shard string, srvShard *topo.SrvShard) error { return fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) GetSrvShard(cell, keyspace, shard string) (*topo.SrvShard, error) { +func (topoServer *fakeTopo) GetSrvShard(ctx context.Context, cell, keyspace, shard string) (*topo.SrvShard, error) { return nil, fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) DeleteSrvShard(cell, keyspace, shard string) error { +func (topoServer *fakeTopo) DeleteSrvShard(ctx context.Context, cell, keyspace, shard string) error { return fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) UpdateSrvKeyspace(cell, keyspace string, srvKeyspace *topo.SrvKeyspace) error { +func (topoServer *fakeTopo) UpdateSrvKeyspace(ctx context.Context, cell, keyspace string, srvKeyspace *topo.SrvKeyspace) error { return fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) UpdateTabletEndpoint(cell, keyspace, shard string, tabletType topo.TabletType, addr *topo.EndPoint) error { +func (topoServer *fakeTopo) UpdateTabletEndpoint(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType, addr *topo.EndPoint) error { return fmt.Errorf("not implemented") } @@ -410,7 +410,7 @@ func (topoServer *fakeTopo) LockKeyspaceForAction(ctx context.Context, keyspace, return "", fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) UnlockKeyspaceForAction(keyspace, lockPath, results string) error { +func (topoServer *fakeTopo) UnlockKeyspaceForAction(ctx context.Context, keyspace, lockPath, results string) error { return fmt.Errorf("not implemented") } @@ -418,7 +418,7 @@ func (topoServer *fakeTopo) LockShardForAction(ctx context.Context, keyspace, sh return "", fmt.Errorf("not implemented") } -func (topoServer *fakeTopo) UnlockShardForAction(keyspace, shard, lockPath, results string) error { +func (topoServer *fakeTopo) UnlockShardForAction(ctx context.Context, keyspace, shard, lockPath, results string) error { return fmt.Errorf("not implemented") } diff --git a/go/vt/schemamanager/tablet_executor.go b/go/vt/schemamanager/tablet_executor.go index 089e15ee59..6369c14c50 100644 --- a/go/vt/schemamanager/tablet_executor.go +++ b/go/vt/schemamanager/tablet_executor.go @@ -38,22 +38,23 @@ func NewTabletExecutor( // Open opens a connection to the master for every shard func (exec *TabletExecutor) Open(keyspace string) error { + ctx := context.TODO() if !exec.isClosed { return nil } - shardNames, err := exec.topoServer.GetShardNames(keyspace) + shardNames, err := exec.topoServer.GetShardNames(ctx, keyspace) if err != nil { return fmt.Errorf("unable to get shard names for keyspace: %s, error: %v", keyspace, err) } log.Infof("Keyspace: %v, Shards: %v\n", keyspace, shardNames) exec.tabletInfos = make([]*topo.TabletInfo, len(shardNames)) for i, shardName := range shardNames { - shardInfo, err := exec.topoServer.GetShard(keyspace, shardName) + shardInfo, err := exec.topoServer.GetShard(ctx, keyspace, shardName) log.Infof("\tShard: %s, ShardInfo: %v\n", shardName, shardInfo) if err != nil { return fmt.Errorf("unable to get shard info, keyspace: %s, shard: %s, error: %v", keyspace, shardName, err) } - tabletInfo, err := exec.topoServer.GetTablet(shardInfo.MasterAlias) + tabletInfo, err := exec.topoServer.GetTablet(ctx, shardInfo.MasterAlias) if err != nil { return fmt.Errorf("unable to get master tablet info, keyspace: %s, shard: %s, error: %v", keyspace, shardName, err) } diff --git a/go/vt/tabletmanager/actionnode/utils.go b/go/vt/tabletmanager/actionnode/utils.go index 733af00821..4deec0441a 100644 --- a/go/vt/tabletmanager/actionnode/utils.go +++ b/go/vt/tabletmanager/actionnode/utils.go @@ -54,7 +54,7 @@ func (n *ActionNode) UnlockKeyspace(ctx context.Context, ts topo.Server, keyspac n.Error = "" n.State = ActionStateDone } - err := ts.UnlockKeyspaceForAction(keyspace, lockPath, n.ToJSON()) + err := ts.UnlockKeyspaceForAction(ctx, keyspace, lockPath, n.ToJSON()) if actionError != nil { if err != nil { // this will be masked @@ -99,7 +99,7 @@ func (n *ActionNode) UnlockShard(ctx context.Context, ts topo.Server, keyspace, n.Error = "" n.State = ActionStateDone } - err := ts.UnlockShardForAction(keyspace, shard, lockPath, n.ToJSON()) + err := ts.UnlockShardForAction(ctx, keyspace, shard, lockPath, n.ToJSON()) if actionError != nil { if err != nil { // this will be masked @@ -146,7 +146,7 @@ func (n *ActionNode) UnlockSrvShard(ctx context.Context, ts topo.Server, cell, k n.Error = "" n.State = ActionStateDone } - err := ts.UnlockSrvShardForAction(cell, keyspace, shard, lockPath, n.ToJSON()) + err := ts.UnlockSrvShardForAction(ctx, cell, keyspace, shard, lockPath, n.ToJSON()) if actionError != nil { if err != nil { // this will be masked diff --git a/go/vt/tabletmanager/after_action.go b/go/vt/tabletmanager/after_action.go index 6a99bc43ea..8c9fd87bf5 100644 --- a/go/vt/tabletmanager/after_action.go +++ b/go/vt/tabletmanager/after_action.go @@ -174,7 +174,7 @@ func (agent *ActionAgent) changeCallback(ctx context.Context, oldTablet, newTabl // for binlog replication, only if source shards are set. var keyspaceInfo *topo.KeyspaceInfo if newTablet.Type == topo.TYPE_MASTER && shardInfo != nil && len(shardInfo.SourceShards) > 0 { - keyspaceInfo, err = agent.TopoServer.GetKeyspace(newTablet.Keyspace) + keyspaceInfo, err = agent.TopoServer.GetKeyspace(ctx, newTablet.Keyspace) if err != nil { log.Errorf("Cannot read keyspace for this tablet %v: %v", newTablet.Alias, err) keyspaceInfo = nil @@ -227,7 +227,7 @@ func (agent *ActionAgent) changeCallback(ctx context.Context, oldTablet, newTabl // See if we need to start or stop any binlog player if agent.BinlogPlayerMap != nil { if newTablet.Type == topo.TYPE_MASTER { - agent.BinlogPlayerMap.RefreshMap(newTablet, keyspaceInfo, shardInfo) + agent.BinlogPlayerMap.RefreshMap(ctx, newTablet, keyspaceInfo, shardInfo) } else { agent.BinlogPlayerMap.StopAllPlayersAndReset() } diff --git a/go/vt/tabletmanager/agent.go b/go/vt/tabletmanager/agent.go index d928b844b0..a1c4d39430 100644 --- a/go/vt/tabletmanager/agent.go +++ b/go/vt/tabletmanager/agent.go @@ -179,7 +179,7 @@ func NewActionAgent( } } - if err := agent.Start(mysqlPort, port, securePort); err != nil { + if err := agent.Start(batchCtx, mysqlPort, port, securePort); err != nil { return nil, err } @@ -194,7 +194,7 @@ func NewActionAgent( go func() { // restoreFromBackup wil just be a regular action // (same as if it was triggered remotely) - if err := agent.RestoreFromBackup(); err != nil { + if err := agent.RestoreFromBackup(batchCtx); err != nil { println(fmt.Sprintf("RestoreFromBackup failed: %v", err)) log.Fatalf("RestoreFromBackup failed: %v", err) } @@ -228,7 +228,7 @@ func NewTestActionAgent(batchCtx context.Context, ts topo.Server, tabletAlias to _healthy: fmt.Errorf("healthcheck not run yet"), healthStreamMap: make(map[int]chan<- *actionnode.HealthStreamReply), } - if err := agent.Start(0, port, 0); err != nil { + if err := agent.Start(batchCtx, 0, port, 0); err != nil { panic(fmt.Errorf("agent.Start(%v) failed: %v", tabletAlias, err)) } return agent @@ -359,13 +359,13 @@ func (agent *ActionAgent) refreshTablet(ctx context.Context, reason string) erro return nil } -func (agent *ActionAgent) verifyTopology() error { +func (agent *ActionAgent) verifyTopology(ctx context.Context) error { tablet := agent.Tablet() if tablet == nil { return fmt.Errorf("agent._tablet is nil") } - if err := topo.Validate(agent.TopoServer, agent.TabletAlias); err != nil { + if err := topo.Validate(ctx, agent.TopoServer, agent.TabletAlias); err != nil { // Don't stop, it's not serious enough, this is likely transient. log.Warningf("tablet validate failed: %v %v", agent.TabletAlias, err) } @@ -373,7 +373,7 @@ func (agent *ActionAgent) verifyTopology() error { return nil } -func (agent *ActionAgent) verifyServingAddrs() error { +func (agent *ActionAgent) verifyServingAddrs(ctx context.Context) error { if !agent.Tablet().IsRunningQueryService() { return nil } @@ -383,12 +383,12 @@ func (agent *ActionAgent) verifyServingAddrs() error { if err != nil { return err } - return agent.TopoServer.UpdateTabletEndpoint(agent.Tablet().Tablet.Alias.Cell, agent.Tablet().Keyspace, agent.Tablet().Shard, agent.Tablet().Type, addr) + return agent.TopoServer.UpdateTabletEndpoint(ctx, agent.Tablet().Tablet.Alias.Cell, agent.Tablet().Keyspace, agent.Tablet().Shard, agent.Tablet().Type, addr) } // Start validates and updates the topology records for the tablet, and performs // the initial state change callback to start tablet services. -func (agent *ActionAgent) Start(mysqlPort, vtPort, vtsPort int) error { +func (agent *ActionAgent) Start(ctx context.Context, mysqlPort, vtPort, vtsPort int) error { var err error if _, err = agent.readTablet(context.TODO()); err != nil { return err @@ -428,25 +428,25 @@ func (agent *ActionAgent) Start(mysqlPort, vtPort, vtsPort int) error { } return nil } - if err := agent.TopoServer.UpdateTabletFields(agent.Tablet().Alias, f); err != nil { + if err := agent.TopoServer.UpdateTabletFields(ctx, agent.Tablet().Alias, f); err != nil { return err } // Reread to get the changes we just made - if _, err := agent.readTablet(context.TODO()); err != nil { + if _, err := agent.readTablet(ctx); err != nil { return err } - if err = agent.verifyTopology(); err != nil { + if err = agent.verifyTopology(ctx); err != nil { return err } - if err = agent.verifyServingAddrs(); err != nil { + if err = agent.verifyServingAddrs(ctx); err != nil { return err } oldTablet := &topo.Tablet{} - if err = agent.updateState(context.TODO(), oldTablet, "Start"); err != nil { + if err = agent.updateState(ctx, oldTablet, "Start"); err != nil { log.Warningf("Initial updateState failed, will need a state change before running properly: %v", err) } return nil diff --git a/go/vt/tabletmanager/agent_rpc_actions.go b/go/vt/tabletmanager/agent_rpc_actions.go index 3d2dd688bd..0da8474914 100644 --- a/go/vt/tabletmanager/agent_rpc_actions.go +++ b/go/vt/tabletmanager/agent_rpc_actions.go @@ -374,7 +374,7 @@ func (agent *ActionAgent) StartBlp(ctx context.Context) error { if agent.BinlogPlayerMap == nil { return fmt.Errorf("No BinlogPlayerMap configured") } - agent.BinlogPlayerMap.Start() + agent.BinlogPlayerMap.Start(agent.batchCtx) return nil } @@ -384,7 +384,7 @@ func (agent *ActionAgent) RunBlpUntil(ctx context.Context, bpl *blproto.BlpPosit if agent.BinlogPlayerMap == nil { return nil, fmt.Errorf("No BinlogPlayerMap configured") } - if err := agent.BinlogPlayerMap.RunUntil(bpl, waitTime); err != nil { + if err := agent.BinlogPlayerMap.RunUntil(ctx, bpl, waitTime); err != nil { return nil, err } rp, err := agent.MysqlDaemon.MasterPosition() @@ -454,7 +454,7 @@ func (agent *ActionAgent) PopulateReparentJournal(ctx context.Context, timeCreat // InitSlave sets replication master and position, and waits for the // reparent_journal table entry up to context timeout func (agent *ActionAgent) InitSlave(ctx context.Context, parent topo.TabletAlias, replicationPosition myproto.ReplicationPosition, timeCreatedNS int64) error { - ti, err := agent.TopoServer.GetTablet(parent) + ti, err := agent.TopoServer.GetTablet(ctx, parent) if err != nil { return err } @@ -503,7 +503,7 @@ func (agent *ActionAgent) DemoteMaster(ctx context.Context) (myproto.Replication // replication up to the provided point, and then makes the slave the // shard master. func (agent *ActionAgent) PromoteSlaveWhenCaughtUp(ctx context.Context, pos myproto.ReplicationPosition) (myproto.ReplicationPosition, error) { - tablet, err := agent.TopoServer.GetTablet(agent.TabletAlias) + tablet, err := agent.TopoServer.GetTablet(ctx, agent.TabletAlias) if err != nil { return myproto.ReplicationPosition{}, err } @@ -536,7 +536,7 @@ func (agent *ActionAgent) PromoteSlaveWhenCaughtUp(ctx context.Context, pos mypr // SlaveWasPromoted promotes a slave to master, no questions asked. // Should be called under RPCWrapLockAction. func (agent *ActionAgent) SlaveWasPromoted(ctx context.Context) error { - tablet, err := agent.TopoServer.GetTablet(agent.TabletAlias) + tablet, err := agent.TopoServer.GetTablet(ctx, agent.TabletAlias) if err != nil { return err } @@ -547,7 +547,7 @@ func (agent *ActionAgent) SlaveWasPromoted(ctx context.Context) error { // SetMaster sets replication master, and waits for the // reparent_journal table entry up to context timeout func (agent *ActionAgent) SetMaster(ctx context.Context, parent topo.TabletAlias, timeCreatedNS int64, forceStartSlave bool) error { - ti, err := agent.TopoServer.GetTablet(parent) + ti, err := agent.TopoServer.GetTablet(ctx, parent) if err != nil { return err } @@ -582,7 +582,7 @@ func (agent *ActionAgent) SetMaster(ctx context.Context, parent topo.TabletAlias } // change our type to spare if we used to be the master - tablet, err := agent.TopoServer.GetTablet(agent.TabletAlias) + tablet, err := agent.TopoServer.GetTablet(ctx, agent.TabletAlias) if err != nil { return err } @@ -605,7 +605,7 @@ func (agent *ActionAgent) SetMaster(ctx context.Context, parent topo.TabletAlias // SlaveWasRestarted updates the parent record for a tablet. // Should be called under RPCWrapLockAction. func (agent *ActionAgent) SlaveWasRestarted(ctx context.Context, swrd *actionnode.SlaveWasRestartedArgs) error { - tablet, err := agent.TopoServer.GetTablet(agent.TabletAlias) + tablet, err := agent.TopoServer.GetTablet(ctx, agent.TabletAlias) if err != nil { return err } @@ -654,7 +654,7 @@ func (agent *ActionAgent) StopReplicationAndGetStatus(ctx context.Context) (mypr // PromoteSlave makes the current tablet the master func (agent *ActionAgent) PromoteSlave(ctx context.Context) (myproto.ReplicationPosition, error) { - tablet, err := agent.TopoServer.GetTablet(agent.TabletAlias) + tablet, err := agent.TopoServer.GetTablet(ctx, agent.TabletAlias) if err != nil { return myproto.ReplicationPosition{}, err } @@ -705,7 +705,7 @@ func (agent *ActionAgent) updateReplicationGraphForPromotedSlave(ctx context.Con // Should be called under RPCWrapLockAction. func (agent *ActionAgent) Backup(ctx context.Context, concurrency int, logger logutil.Logger) error { // update our type to TYPE_BACKUP - tablet, err := agent.TopoServer.GetTablet(agent.TabletAlias) + tablet, err := agent.TopoServer.GetTablet(ctx, agent.TabletAlias) if err != nil { return err } diff --git a/go/vt/tabletmanager/binlog.go b/go/vt/tabletmanager/binlog.go index 7219131146..9a2c6986d7 100644 --- a/go/vt/tabletmanager/binlog.go +++ b/go/vt/tabletmanager/binlog.go @@ -26,6 +26,7 @@ import ( "github.com/youtube/vitess/go/vt/mysqlctl" myproto "github.com/youtube/vitess/go/vt/mysqlctl/proto" "github.com/youtube/vitess/go/vt/topo" + "golang.org/x/net/context" ) func init() { @@ -92,7 +93,7 @@ func (bpc *BinlogPlayerController) String() string { } // Start will start the player in the background and run forever. -func (bpc *BinlogPlayerController) Start() { +func (bpc *BinlogPlayerController) Start(ctx context.Context) { bpc.playerMutex.Lock() defer bpc.playerMutex.Unlock() if bpc.interrupted != nil { @@ -103,11 +104,11 @@ func (bpc *BinlogPlayerController) Start() { bpc.interrupted = make(chan struct{}, 1) bpc.done = make(chan struct{}, 1) bpc.stopPosition = myproto.ReplicationPosition{} // run forever - go bpc.Loop() + go bpc.Loop(ctx) } // StartUntil will start the Player until we reach the given position. -func (bpc *BinlogPlayerController) StartUntil(stopPos myproto.ReplicationPosition) error { +func (bpc *BinlogPlayerController) StartUntil(ctx context.Context, stopPos myproto.ReplicationPosition) error { bpc.playerMutex.Lock() defer bpc.playerMutex.Unlock() if bpc.interrupted != nil { @@ -117,7 +118,7 @@ func (bpc *BinlogPlayerController) StartUntil(stopPos myproto.ReplicationPositio bpc.interrupted = make(chan struct{}, 1) bpc.done = make(chan struct{}, 1) bpc.stopPosition = stopPos - go bpc.Loop() + go bpc.Loop(ctx) return nil } @@ -177,9 +178,9 @@ func (bpc *BinlogPlayerController) Stop() { // Loop runs the main player loop: try to play, and in case of error, // sleep for 5 seconds and try again. -func (bpc *BinlogPlayerController) Loop() { +func (bpc *BinlogPlayerController) Loop(ctx context.Context) { for { - err := bpc.Iteration() + err := bpc.Iteration(ctx) if err == nil { // this happens when we get interrupted break @@ -202,7 +203,7 @@ func (bpc *BinlogPlayerController) Loop() { // Iteration is a single iteration for the player: get the current status, // try to play, and plays until interrupted, or until an error occurs. -func (bpc *BinlogPlayerController) Iteration() (err error) { +func (bpc *BinlogPlayerController) Iteration(ctx context.Context) (err error) { defer func() { if x := recover(); x != nil { log.Errorf("%v: caught panic: %v", bpc, x) @@ -236,7 +237,7 @@ func (bpc *BinlogPlayerController) Iteration() (err error) { } // Find the server list for the source shard in our cell - addrs, err := bpc.ts.GetEndPoints(bpc.cell, bpc.sourceShard.Keyspace, bpc.sourceShard.Shard, topo.TYPE_REPLICA) + addrs, err := bpc.ts.GetEndPoints(ctx, bpc.cell, bpc.sourceShard.Keyspace, bpc.sourceShard.Shard, topo.TYPE_REPLICA) if err != nil { return fmt.Errorf("can't find any source tablet for %v %v %v: %v", bpc.cell, bpc.sourceShard.String(), topo.TYPE_REPLICA, err) } @@ -372,7 +373,7 @@ func (blm *BinlogPlayerMap) size() int64 { } // addPlayer adds a new player to the map. It assumes we have the lock. -func (blm *BinlogPlayerMap) addPlayer(cell string, keyspaceIdType key.KeyspaceIdType, keyRange key.KeyRange, sourceShard topo.SourceShard, dbName string) { +func (blm *BinlogPlayerMap) addPlayer(ctx context.Context, cell string, keyspaceIdType key.KeyspaceIdType, keyRange key.KeyRange, sourceShard topo.SourceShard, dbName string) { bpc, ok := blm.players[sourceShard.Uid] if ok { log.Infof("Already playing logs for %v", sourceShard) @@ -382,7 +383,7 @@ func (blm *BinlogPlayerMap) addPlayer(cell string, keyspaceIdType key.KeyspaceId bpc = newBinlogPlayerController(blm.ts, blm.dbConfig, blm.mysqld, cell, keyspaceIdType, keyRange, sourceShard, dbName) blm.players[sourceShard.Uid] = bpc if blm.state == BpmStateRunning { - bpc.Start() + bpc.Start(ctx) } } @@ -407,7 +408,7 @@ func (blm *BinlogPlayerMap) StopAllPlayersAndReset() { // RefreshMap reads the right data from topo.Server and makes sure // we're playing the right logs. -func (blm *BinlogPlayerMap) RefreshMap(tablet *topo.Tablet, keyspaceInfo *topo.KeyspaceInfo, shardInfo *topo.ShardInfo) { +func (blm *BinlogPlayerMap) RefreshMap(ctx context.Context, tablet *topo.Tablet, keyspaceInfo *topo.KeyspaceInfo, shardInfo *topo.ShardInfo) { log.Infof("Refreshing map of binlog players") if shardInfo == nil { log.Warningf("Could not read shardInfo, not changing anything") @@ -434,7 +435,7 @@ func (blm *BinlogPlayerMap) RefreshMap(tablet *topo.Tablet, keyspaceInfo *topo.K // for each source, add it if not there, and delete from toRemove for _, sourceShard := range shardInfo.SourceShards { - blm.addPlayer(tablet.Alias.Cell, keyspaceInfo.ShardingColumnType, tablet.KeyRange, sourceShard, tablet.DbName()) + blm.addPlayer(ctx, tablet.Alias.Cell, keyspaceInfo.ShardingColumnType, tablet.KeyRange, sourceShard, tablet.DbName()) delete(toRemove, sourceShard.Uid) } hasPlayers := len(shardInfo.SourceShards) > 0 @@ -470,7 +471,7 @@ func (blm *BinlogPlayerMap) Stop() { } // Start restarts the current players. -func (blm *BinlogPlayerMap) Start() { +func (blm *BinlogPlayerMap) Start(ctx context.Context) { blm.mu.Lock() defer blm.mu.Unlock() if blm.state == BpmStateRunning { @@ -479,7 +480,7 @@ func (blm *BinlogPlayerMap) Start() { } log.Infof("Starting map of binlog players") for _, bpc := range blm.players { - bpc.Start() + bpc.Start(ctx) } blm.state = BpmStateRunning } @@ -509,7 +510,7 @@ func (blm *BinlogPlayerMap) BlpPositionList() (*blproto.BlpPositionList, error) // RunUntil will run all the players until they reach the given position. // Holds the map lock during that exercise, shouldn't take long at all. -func (blm *BinlogPlayerMap) RunUntil(blpPositionList *blproto.BlpPositionList, waitTimeout time.Duration) error { +func (blm *BinlogPlayerMap) RunUntil(ctx context.Context, blpPositionList *blproto.BlpPositionList, waitTimeout time.Duration) error { // lock and check state blm.mu.Lock() defer blm.mu.Unlock() @@ -531,7 +532,7 @@ func (blm *BinlogPlayerMap) RunUntil(blpPositionList *blproto.BlpPositionList, w // start all the players giving them where to stop for _, bpc := range blm.players { - if err := bpc.StartUntil(posMap[bpc.sourceShard.Uid]); err != nil { + if err := bpc.StartUntil(ctx, posMap[bpc.sourceShard.Uid]); err != nil { return err } } diff --git a/go/vt/tabletmanager/healthcheck.go b/go/vt/tabletmanager/healthcheck.go index 2c761fb364..2fc5706434 100644 --- a/go/vt/tabletmanager/healthcheck.go +++ b/go/vt/tabletmanager/healthcheck.go @@ -208,7 +208,7 @@ func (agent *ActionAgent) runHealthCheck(targetTabletType topo.TabletType) { agent.mutex.Unlock() } else { log.Infof("Updating tablet mysql port to %v", mysqlPort) - if err := agent.TopoServer.UpdateTabletFields(tablet.Alias, func(tablet *topo.Tablet) error { + if err := agent.TopoServer.UpdateTabletFields(agent.batchCtx, tablet.Alias, func(tablet *topo.Tablet) error { tablet.Portmap["mysql"] = mysqlPort return nil }); err != nil { diff --git a/go/vt/tabletmanager/healthcheck_test.go b/go/vt/tabletmanager/healthcheck_test.go index 7cd7cfed9a..910093fc1c 100644 --- a/go/vt/tabletmanager/healthcheck_test.go +++ b/go/vt/tabletmanager/healthcheck_test.go @@ -111,14 +111,14 @@ func (fhc *fakeHealthCheck) HTMLName() template.HTML { return template.HTML("fakeHealthCheck") } -func createTestAgent(t *testing.T) *ActionAgent { +func createTestAgent(ctx context.Context, t *testing.T) *ActionAgent { ts := zktopo.NewTestServer(t, []string{cell}) - if err := ts.CreateKeyspace(keyspace, &topo.Keyspace{}); err != nil { + if err := ts.CreateKeyspace(ctx, keyspace, &topo.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace failed: %v", err) } - if err := topo.CreateShard(ts, keyspace, shard); err != nil { + if err := topo.CreateShard(ctx, ts, keyspace, shard); err != nil { t.Fatalf("CreateShard failed: %v", err) } @@ -134,12 +134,12 @@ func createTestAgent(t *testing.T) *ActionAgent { Shard: shard, Type: topo.TYPE_SPARE, } - if err := topo.CreateTablet(context.Background(), ts, tablet); err != nil { + if err := topo.CreateTablet(ctx, ts, tablet); err != nil { t.Fatalf("CreateTablet failed: %v", err) } mysqlDaemon := &mysqlctl.FakeMysqlDaemon{MysqlPort: 3306} - agent := NewTestActionAgent(context.Background(), ts, tabletAlias, port, mysqlDaemon) + agent := NewTestActionAgent(ctx, ts, tabletAlias, port, mysqlDaemon) agent.BinlogPlayerMap = NewBinlogPlayerMap(ts, nil, nil) agent.HealthReporter = &fakeHealthCheck{} @@ -149,14 +149,15 @@ func createTestAgent(t *testing.T) *ActionAgent { // TestHealthCheckControlsQueryService verifies that a tablet going healthy // starts the query service, and going unhealthy stops it. func TestHealthCheckControlsQueryService(t *testing.T) { - agent := createTestAgent(t) + ctx := context.Background() + agent := createTestAgent(ctx, t) targetTabletType := topo.TYPE_REPLICA // first health check, should change us to replica, and update the // mysql port to 3306 before := time.Now() agent.runHealthCheck(targetTabletType) - ti, err := agent.TopoServer.GetTablet(tabletAlias) + ti, err := agent.TopoServer.GetTablet(ctx, tabletAlias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -177,7 +178,7 @@ func TestHealthCheckControlsQueryService(t *testing.T) { agent.HealthReporter.(*fakeHealthCheck).reportError = fmt.Errorf("tablet is unhealthy") before = time.Now() agent.runHealthCheck(targetTabletType) - ti, err = agent.TopoServer.GetTablet(tabletAlias) + ti, err = agent.TopoServer.GetTablet(ctx, tabletAlias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -195,13 +196,14 @@ func TestHealthCheckControlsQueryService(t *testing.T) { // TestQueryServiceNotStarting verifies that if a tablet cannot start the // query service, it should not go healthy func TestQueryServiceNotStarting(t *testing.T) { - agent := createTestAgent(t) + ctx := context.Background() + agent := createTestAgent(ctx, t) targetTabletType := topo.TYPE_REPLICA agent.QueryServiceControl.(*tabletserver.TestQueryServiceControl).AllowQueriesError = fmt.Errorf("test cannot start query service") before := time.Now() agent.runHealthCheck(targetTabletType) - ti, err := agent.TopoServer.GetTablet(tabletAlias) + ti, err := agent.TopoServer.GetTablet(ctx, tabletAlias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -219,13 +221,14 @@ func TestQueryServiceNotStarting(t *testing.T) { // TestQueryServiceStopped verifies that if a healthy tablet's query // service is shut down, the tablet does unhealthy func TestQueryServiceStopped(t *testing.T) { - agent := createTestAgent(t) + ctx := context.Background() + agent := createTestAgent(ctx, t) targetTabletType := topo.TYPE_REPLICA // first health check, should change us to replica before := time.Now() agent.runHealthCheck(targetTabletType) - ti, err := agent.TopoServer.GetTablet(tabletAlias) + ti, err := agent.TopoServer.GetTablet(ctx, tabletAlias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -246,7 +249,7 @@ func TestQueryServiceStopped(t *testing.T) { // health check should now fail before = time.Now() agent.runHealthCheck(targetTabletType) - ti, err = agent.TopoServer.GetTablet(tabletAlias) + ti, err = agent.TopoServer.GetTablet(ctx, tabletAlias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -264,13 +267,14 @@ func TestQueryServiceStopped(t *testing.T) { // TestTabletControl verifies the shard's TabletControl record can disable // query service in a tablet. func TestTabletControl(t *testing.T) { - agent := createTestAgent(t) + ctx := context.Background() + agent := createTestAgent(ctx, t) targetTabletType := topo.TYPE_REPLICA // first health check, should change us to replica before := time.Now() agent.runHealthCheck(targetTabletType) - ti, err := agent.TopoServer.GetTablet(tabletAlias) + ti, err := agent.TopoServer.GetTablet(ctx, tabletAlias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -285,7 +289,7 @@ func TestTabletControl(t *testing.T) { } // now update the shard - si, err := agent.TopoServer.GetShard(keyspace, shard) + si, err := agent.TopoServer.GetShard(ctx, keyspace, shard) if err != nil { t.Fatalf("GetShard failed: %v", err) } @@ -294,12 +298,11 @@ func TestTabletControl(t *testing.T) { DisableQueryService: true, }, } - if err := topo.UpdateShard(context.Background(), agent.TopoServer, si); err != nil { + if err := topo.UpdateShard(ctx, agent.TopoServer, si); err != nil { t.Fatalf("UpdateShard failed: %v", err) } // now refresh the tablet state, as the resharding process would do - ctx := context.Background() agent.RPCWrapLockAction(ctx, actionnode.TabletActionRefreshState, "", "", true, func() error { agent.RefreshState(ctx) return nil @@ -313,7 +316,7 @@ func TestTabletControl(t *testing.T) { // check running a health check will not start it again before = time.Now() agent.runHealthCheck(targetTabletType) - ti, err = agent.TopoServer.GetTablet(tabletAlias) + ti, err = agent.TopoServer.GetTablet(ctx, tabletAlias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -331,7 +334,7 @@ func TestTabletControl(t *testing.T) { agent.HealthReporter.(*fakeHealthCheck).reportError = fmt.Errorf("tablet is unhealthy") before = time.Now() agent.runHealthCheck(targetTabletType) - ti, err = agent.TopoServer.GetTablet(tabletAlias) + ti, err = agent.TopoServer.GetTablet(ctx, tabletAlias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -349,7 +352,7 @@ func TestTabletControl(t *testing.T) { agent.HealthReporter.(*fakeHealthCheck).reportError = nil before = time.Now() agent.runHealthCheck(targetTabletType) - ti, err = agent.TopoServer.GetTablet(tabletAlias) + ti, err = agent.TopoServer.GetTablet(ctx, tabletAlias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -367,7 +370,8 @@ func TestTabletControl(t *testing.T) { // TestOldHealthCheck verifies that a healthcheck that is too old will // return an error func TestOldHealthCheck(t *testing.T) { - agent := createTestAgent(t) + ctx := context.Background() + agent := createTestAgent(ctx, t) *healthCheckInterval = 20 * time.Second agent._healthy = nil diff --git a/go/vt/tabletmanager/init_tablet.go b/go/vt/tabletmanager/init_tablet.go index cdfc7c8879..ef7875bd45 100644 --- a/go/vt/tabletmanager/init_tablet.go +++ b/go/vt/tabletmanager/init_tablet.go @@ -111,7 +111,7 @@ func (agent *ActionAgent) InitTablet(port, securePort int) error { } // re-read the shard with the lock - si, err = agent.TopoServer.GetShard(*initKeyspace, shard) + si, err = agent.TopoServer.GetShard(ctx, *initKeyspace, shard) if err != nil { return actionNode.UnlockShard(ctx, agent.TopoServer, *initKeyspace, shard, lockPath, err) } @@ -179,7 +179,7 @@ func (agent *ActionAgent) InitTablet(port, securePort int) error { case topo.ErrNodeExists: // The node already exists, will just try to update // it. So we read it first. - oldTablet, err := agent.TopoServer.GetTablet(tablet.Alias) + oldTablet, err := agent.TopoServer.GetTablet(ctx, tablet.Alias) if err != nil { fmt.Errorf("InitTablet failed to read existing tablet record: %v", err) } diff --git a/go/vt/tabletmanager/init_tablet_test.go b/go/vt/tabletmanager/init_tablet_test.go index 9f307259c1..cdf25aa76b 100644 --- a/go/vt/tabletmanager/init_tablet_test.go +++ b/go/vt/tabletmanager/init_tablet_test.go @@ -22,6 +22,7 @@ import ( // tablet node correctly. Note we modify global parameters (the flags) // so this has to be in one test. func TestInitTablet(t *testing.T) { + ctx := context.Background() ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"}) tabletAlias := topo.TabletAlias{ Cell: "cell1", @@ -40,7 +41,7 @@ func TestInitTablet(t *testing.T) { SchemaOverrides: nil, BinlogPlayerMap: nil, LockTimeout: 10 * time.Second, - batchCtx: context.Background(), + batchCtx: ctx, History: history.New(historyLength), lastHealthMapCount: new(stats.Int), _healthy: fmt.Errorf("healthcheck not run yet"), @@ -50,7 +51,7 @@ func TestInitTablet(t *testing.T) { if err := agent.InitTablet(port, securePort); err != nil { t.Fatalf("NewTestActionAgent(idle) failed: %v", err) } - ti, err := ts.GetTablet(tabletAlias) + ti, err := ts.GetTablet(ctx, tabletAlias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -73,7 +74,7 @@ func TestInitTablet(t *testing.T) { if err := agent.InitTablet(port, securePort); err != nil { t.Fatalf("NewTestActionAgent(idle again) failed: %v", err) } - ti, err = ts.GetTablet(tabletAlias) + ti, err = ts.GetTablet(ctx, tabletAlias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -103,14 +104,14 @@ func TestInitTablet(t *testing.T) { if err := agent.InitTablet(port, securePort); err != nil { t.Fatalf("InitTablet(type) failed: %v", err) } - si, err := ts.GetShard("test_keyspace", "-80") + si, err := ts.GetShard(ctx, "test_keyspace", "-80") if err != nil { t.Fatalf("GetShard failed: %v", err) } if len(si.Cells) != 1 || si.Cells[0] != "cell1" { t.Errorf("shard.Cells not updated properly: %v", si) } - ti, err = ts.GetTablet(tabletAlias) + ti, err = ts.GetTablet(ctx, tabletAlias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -124,7 +125,7 @@ func TestInitTablet(t *testing.T) { if err := agent.InitTablet(port, securePort); err != nil { t.Fatalf("InitTablet(type, healthcheck) failed: %v", err) } - ti, err = ts.GetTablet(tabletAlias) + ti, err = ts.GetTablet(ctx, tabletAlias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -133,18 +134,18 @@ func TestInitTablet(t *testing.T) { } // update shard's master to our alias, then try to init again - si, err = ts.GetShard("test_keyspace", "-80") + si, err = ts.GetShard(ctx, "test_keyspace", "-80") if err != nil { t.Fatalf("GetShard failed: %v", err) } si.MasterAlias = tabletAlias - if err := topo.UpdateShard(context.Background(), ts, si); err != nil { + if err := topo.UpdateShard(ctx, ts, si); err != nil { t.Fatalf("UpdateShard failed: %v", err) } if err := agent.InitTablet(port, securePort); err != nil { t.Fatalf("InitTablet(type, healthcheck) failed: %v", err) } - ti, err = ts.GetTablet(tabletAlias) + ti, err = ts.GetTablet(ctx, tabletAlias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -161,7 +162,7 @@ func TestInitTablet(t *testing.T) { if err := agent.InitTablet(port, securePort); err != nil { t.Fatalf("InitTablet(type, healthcheck) failed: %v", err) } - ti, err = ts.GetTablet(tabletAlias) + ti, err = ts.GetTablet(ctx, tabletAlias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } diff --git a/go/vt/tabletmanager/restore.go b/go/vt/tabletmanager/restore.go index a867ddf5b7..1d23cb7383 100644 --- a/go/vt/tabletmanager/restore.go +++ b/go/vt/tabletmanager/restore.go @@ -11,6 +11,7 @@ import ( "github.com/youtube/vitess/go/vt/mysqlctl" myproto "github.com/youtube/vitess/go/vt/mysqlctl/proto" "github.com/youtube/vitess/go/vt/topo" + "golang.org/x/net/context" ) // This file handles the initial backup restore upon startup. @@ -25,7 +26,7 @@ var ( // It will either work, fail gracefully, or return // an error in case of a non-recoverable error. // It takes the action lock so no RPC interferes. -func (agent *ActionAgent) RestoreFromBackup() error { +func (agent *ActionAgent) RestoreFromBackup(ctx context.Context) error { agent.actionMutex.Lock() defer agent.actionMutex.Unlock() @@ -33,7 +34,7 @@ func (agent *ActionAgent) RestoreFromBackup() error { // always authorized) tablet := agent.Tablet() originalType := tablet.Type - if err := agent.TopoServer.UpdateTabletFields(tablet.Alias, func(tablet *topo.Tablet) error { + if err := agent.TopoServer.UpdateTabletFields(ctx, tablet.Alias, func(tablet *topo.Tablet) error { tablet.Type = topo.TYPE_RESTORE return nil }); err != nil { @@ -50,11 +51,11 @@ func (agent *ActionAgent) RestoreFromBackup() error { if err == nil { // now read the shard to find the current master, and its location - si, err := agent.TopoServer.GetShard(tablet.Keyspace, tablet.Shard) + si, err := agent.TopoServer.GetShard(ctx, tablet.Keyspace, tablet.Shard) if err != nil { return fmt.Errorf("Cannot read shard: %v", err) } - ti, err := agent.TopoServer.GetTablet(si.MasterAlias) + ti, err := agent.TopoServer.GetTablet(ctx, si.MasterAlias) if err != nil { return fmt.Errorf("Cannot read master tablet %v: %v", si.MasterAlias, err) } @@ -75,7 +76,7 @@ func (agent *ActionAgent) RestoreFromBackup() error { } // change type back to original type - if err := agent.TopoServer.UpdateTabletFields(tablet.Alias, func(tablet *topo.Tablet) error { + if err := agent.TopoServer.UpdateTabletFields(ctx, tablet.Alias, func(tablet *topo.Tablet) error { tablet.Type = originalType return nil }); err != nil { diff --git a/go/vt/topo/helpers/copy.go b/go/vt/topo/helpers/copy.go index aa921ca22e..7ea9e018a2 100644 --- a/go/vt/topo/helpers/copy.go +++ b/go/vt/topo/helpers/copy.go @@ -13,11 +13,12 @@ import ( log "github.com/golang/glog" "github.com/youtube/vitess/go/vt/concurrency" "github.com/youtube/vitess/go/vt/topo" + "golang.org/x/net/context" ) // CopyKeyspaces will create the keyspaces in the destination topo -func CopyKeyspaces(fromTS, toTS topo.Server) { - keyspaces, err := fromTS.GetKeyspaces() +func CopyKeyspaces(ctx context.Context, fromTS, toTS topo.Server) { + keyspaces, err := fromTS.GetKeyspaces(ctx) if err != nil { log.Fatalf("GetKeyspaces: %v", err) } @@ -29,13 +30,13 @@ func CopyKeyspaces(fromTS, toTS topo.Server) { go func(keyspace string) { defer wg.Done() - k, err := fromTS.GetKeyspace(keyspace) + k, err := fromTS.GetKeyspace(ctx, keyspace) if err != nil { rec.RecordError(fmt.Errorf("GetKeyspace(%v): %v", keyspace, err)) return } - if err := toTS.CreateKeyspace(keyspace, k.Keyspace); err != nil { + if err := toTS.CreateKeyspace(ctx, keyspace, k.Keyspace); err != nil { if err == topo.ErrNodeExists { log.Warningf("keyspace %v already exists", keyspace) } else { @@ -51,8 +52,8 @@ func CopyKeyspaces(fromTS, toTS topo.Server) { } // CopyShards will create the shards in the destination topo -func CopyShards(fromTS, toTS topo.Server, deleteKeyspaceShards bool) { - keyspaces, err := fromTS.GetKeyspaces() +func CopyShards(ctx context.Context, fromTS, toTS topo.Server, deleteKeyspaceShards bool) { + keyspaces, err := fromTS.GetKeyspaces(ctx) if err != nil { log.Fatalf("fromTS.GetKeyspaces: %v", err) } @@ -63,14 +64,14 @@ func CopyShards(fromTS, toTS topo.Server, deleteKeyspaceShards bool) { wg.Add(1) go func(keyspace string) { defer wg.Done() - shards, err := fromTS.GetShardNames(keyspace) + shards, err := fromTS.GetShardNames(ctx, keyspace) if err != nil { rec.RecordError(fmt.Errorf("GetShardNames(%v): %v", keyspace, err)) return } if deleteKeyspaceShards { - if err := toTS.DeleteKeyspaceShards(keyspace); err != nil { + if err := toTS.DeleteKeyspaceShards(ctx, keyspace); err != nil { rec.RecordError(fmt.Errorf("DeleteKeyspaceShards(%v): %v", keyspace, err)) return } @@ -80,7 +81,7 @@ func CopyShards(fromTS, toTS topo.Server, deleteKeyspaceShards bool) { wg.Add(1) go func(keyspace, shard string) { defer wg.Done() - if err := topo.CreateShard(toTS, keyspace, shard); err != nil { + if err := topo.CreateShard(ctx, toTS, keyspace, shard); err != nil { if err == topo.ErrNodeExists { log.Warningf("shard %v/%v already exists", keyspace, shard) } else { @@ -89,19 +90,19 @@ func CopyShards(fromTS, toTS topo.Server, deleteKeyspaceShards bool) { } } - si, err := fromTS.GetShard(keyspace, shard) + si, err := fromTS.GetShard(ctx, keyspace, shard) if err != nil { rec.RecordError(fmt.Errorf("GetShard(%v, %v): %v", keyspace, shard, err)) return } - toSi, err := toTS.GetShard(keyspace, shard) + toSi, err := toTS.GetShard(ctx, keyspace, shard) if err != nil { rec.RecordError(fmt.Errorf("toTS.GetShard(%v, %v): %v", keyspace, shard, err)) return } - if _, err := toTS.UpdateShard(si, toSi.Version()); err != nil { + if _, err := toTS.UpdateShard(ctx, si, toSi.Version()); err != nil { rec.RecordError(fmt.Errorf("UpdateShard(%v, %v): %v", keyspace, shard, err)) } }(keyspace, shard) @@ -115,8 +116,8 @@ func CopyShards(fromTS, toTS topo.Server, deleteKeyspaceShards bool) { } // CopyTablets will create the tablets in the destination topo -func CopyTablets(fromTS, toTS topo.Server) { - cells, err := fromTS.GetKnownCells() +func CopyTablets(ctx context.Context, fromTS, toTS topo.Server) { + cells, err := fromTS.GetKnownCells(ctx) if err != nil { log.Fatalf("fromTS.GetKnownCells: %v", err) } @@ -127,7 +128,7 @@ func CopyTablets(fromTS, toTS topo.Server) { wg.Add(1) go func(cell string) { defer wg.Done() - tabletAliases, err := fromTS.GetTabletsByCell(cell) + tabletAliases, err := fromTS.GetTabletsByCell(ctx, cell) if err != nil { rec.RecordError(fmt.Errorf("GetTabletsByCell(%v): %v", cell, err)) } else { @@ -137,18 +138,18 @@ func CopyTablets(fromTS, toTS topo.Server) { defer wg.Done() // read the source tablet - ti, err := fromTS.GetTablet(tabletAlias) + ti, err := fromTS.GetTablet(ctx, tabletAlias) if err != nil { rec.RecordError(fmt.Errorf("GetTablet(%v): %v", tabletAlias, err)) return } // try to create the destination - err = toTS.CreateTablet(ti.Tablet) + err = toTS.CreateTablet(ctx, ti.Tablet) if err == topo.ErrNodeExists { // update the destination tablet log.Warningf("tablet %v already exists, updating it", tabletAlias) - err = toTS.UpdateTabletFields(ti.Alias, func(t *topo.Tablet) error { + err = toTS.UpdateTabletFields(ctx, ti.Alias, func(t *topo.Tablet) error { *t = *ti.Tablet return nil }) @@ -170,8 +171,8 @@ func CopyTablets(fromTS, toTS topo.Server) { // CopyShardReplications will create the ShardReplication objects in // the destination topo -func CopyShardReplications(fromTS, toTS topo.Server) { - keyspaces, err := fromTS.GetKeyspaces() +func CopyShardReplications(ctx context.Context, fromTS, toTS topo.Server) { + keyspaces, err := fromTS.GetKeyspaces(ctx) if err != nil { log.Fatalf("fromTS.GetKeyspaces: %v", err) } @@ -182,7 +183,7 @@ func CopyShardReplications(fromTS, toTS topo.Server) { wg.Add(1) go func(keyspace string) { defer wg.Done() - shards, err := fromTS.GetShardNames(keyspace) + shards, err := fromTS.GetShardNames(ctx, keyspace) if err != nil { rec.RecordError(fmt.Errorf("GetShardNames(%v): %v", keyspace, err)) return @@ -194,20 +195,20 @@ func CopyShardReplications(fromTS, toTS topo.Server) { defer wg.Done() // read the source shard to get the cells - si, err := fromTS.GetShard(keyspace, shard) + si, err := fromTS.GetShard(ctx, keyspace, shard) if err != nil { rec.RecordError(fmt.Errorf("GetShard(%v, %v): %v", keyspace, shard, err)) return } for _, cell := range si.Cells { - sri, err := fromTS.GetShardReplication(cell, keyspace, shard) + sri, err := fromTS.GetShardReplication(ctx, cell, keyspace, shard) if err != nil { rec.RecordError(fmt.Errorf("GetShardReplication(%v, %v, %v): %v", cell, keyspace, shard, err)) continue } - if err := toTS.UpdateShardReplicationFields(cell, keyspace, shard, func(oldSR *topo.ShardReplication) error { + if err := toTS.UpdateShardReplicationFields(ctx, cell, keyspace, shard, func(oldSR *topo.ShardReplication) error { *oldSR = *sri.ShardReplication return nil }); err != nil { diff --git a/go/vt/topo/helpers/copy_test.go b/go/vt/topo/helpers/copy_test.go index 3c3532f9b2..e78a44c5f2 100644 --- a/go/vt/topo/helpers/copy_test.go +++ b/go/vt/topo/helpers/copy_test.go @@ -19,7 +19,7 @@ import ( "launchpad.net/gozk/zookeeper" ) -func createSetup(t *testing.T) (topo.Server, topo.Server) { +func createSetup(ctx context.Context, t *testing.T) (topo.Server, topo.Server) { fromConn := fakezk.NewConn() fromTS := zktopo.NewServer(fromConn) @@ -33,13 +33,13 @@ func createSetup(t *testing.T) (topo.Server, topo.Server) { } // create a keyspace and a couple tablets - if err := fromTS.CreateKeyspace("test_keyspace", &topo.Keyspace{}); err != nil { + if err := fromTS.CreateKeyspace(ctx, "test_keyspace", &topo.Keyspace{}); err != nil { t.Fatalf("cannot create keyspace: %v", err) } - if err := fromTS.CreateShard("test_keyspace", "0", &topo.Shard{Cells: []string{"test_cell"}}); err != nil { + if err := fromTS.CreateShard(ctx, "test_keyspace", "0", &topo.Shard{Cells: []string{"test_cell"}}); err != nil { t.Fatalf("cannot create shard: %v", err) } - if err := topo.CreateTablet(context.Background(), fromTS, &topo.Tablet{ + if err := topo.CreateTablet(ctx, fromTS, &topo.Tablet{ Alias: topo.TabletAlias{ Cell: "test_cell", Uid: 123, @@ -59,7 +59,7 @@ func createSetup(t *testing.T) (topo.Server, topo.Server) { }); err != nil { t.Fatalf("cannot create master tablet: %v", err) } - if err := topo.CreateTablet(context.Background(), fromTS, &topo.Tablet{ + if err := topo.CreateTablet(ctx, fromTS, &topo.Tablet{ Alias: topo.TabletAlias{ Cell: "test_cell", Uid: 234, @@ -82,7 +82,7 @@ func createSetup(t *testing.T) (topo.Server, topo.Server) { } os.Setenv("ZK_CLIENT_CONFIG", testfiles.Locate("topo_helpers_test_zk_client.json")) - cells, err := fromTS.GetKnownCells() + cells, err := fromTS.GetKnownCells(ctx) if err != nil { t.Fatalf("fromTS.GetKnownCells: %v", err) } @@ -92,31 +92,31 @@ func createSetup(t *testing.T) (topo.Server, topo.Server) { } func TestBasic(t *testing.T) { - - fromTS, toTS := createSetup(t) + ctx := context.Background() + fromTS, toTS := createSetup(ctx, t) // check keyspace copy - CopyKeyspaces(fromTS, toTS) - keyspaces, err := toTS.GetKeyspaces() + CopyKeyspaces(ctx, fromTS, toTS) + keyspaces, err := toTS.GetKeyspaces(ctx) if err != nil { t.Fatalf("toTS.GetKeyspaces failed: %v", err) } if len(keyspaces) != 1 || keyspaces[0] != "test_keyspace" { t.Fatalf("unexpected keyspaces: %v", keyspaces) } - CopyKeyspaces(fromTS, toTS) + CopyKeyspaces(ctx, fromTS, toTS) // check shard copy - CopyShards(fromTS, toTS, true) - shards, err := toTS.GetShardNames("test_keyspace") + CopyShards(ctx, fromTS, toTS, true) + shards, err := toTS.GetShardNames(ctx, "test_keyspace") if err != nil { t.Fatalf("toTS.GetShardNames failed: %v", err) } if len(shards) != 1 || shards[0] != "0" { t.Fatalf("unexpected shards: %v", shards) } - CopyShards(fromTS, toTS, false) - si, err := toTS.GetShard("test_keyspace", "0") + CopyShards(ctx, fromTS, toTS, false) + si, err := toTS.GetShard(ctx, "test_keyspace", "0") if err != nil { t.Fatalf("cannot read shard: %v", err) } @@ -125,12 +125,12 @@ func TestBasic(t *testing.T) { } // check ShardReplication copy - sr, err := fromTS.GetShardReplication("test_cell", "test_keyspace", "0") + sr, err := fromTS.GetShardReplication(ctx, "test_cell", "test_keyspace", "0") if err != nil { t.Fatalf("fromTS.GetShardReplication failed: %v", err) } - CopyShardReplications(fromTS, toTS) - sr, err = toTS.GetShardReplication("test_cell", "test_keyspace", "0") + CopyShardReplications(ctx, fromTS, toTS) + sr, err = toTS.GetShardReplication(ctx, "test_cell", "test_keyspace", "0") if err != nil { t.Fatalf("toTS.GetShardReplication failed: %v", err) } @@ -139,13 +139,13 @@ func TestBasic(t *testing.T) { } // check tablet copy - CopyTablets(fromTS, toTS) - tablets, err := toTS.GetTabletsByCell("test_cell") + CopyTablets(ctx, fromTS, toTS) + tablets, err := toTS.GetTabletsByCell(ctx, "test_cell") if err != nil { t.Fatalf("toTS.GetTabletsByCell failed: %v", err) } if len(tablets) != 2 || tablets[0].Uid != 123 || tablets[1].Uid != 234 { t.Fatalf("unexpected tablets: %v", tablets) } - CopyTablets(fromTS, toTS) + CopyTablets(ctx, fromTS, toTS) } diff --git a/go/vt/topo/helpers/tee.go b/go/vt/topo/helpers/tee.go index 939f1e79fa..11b9605978 100644 --- a/go/vt/topo/helpers/tee.go +++ b/go/vt/topo/helpers/tee.go @@ -92,8 +92,8 @@ func (tee *Tee) Close() { // // GetKnownCells is part of the topo.Server interface -func (tee *Tee) GetKnownCells() ([]string, error) { - return tee.readFrom.GetKnownCells() +func (tee *Tee) GetKnownCells(ctx context.Context) ([]string, error) { + return tee.readFrom.GetKnownCells(ctx) } // @@ -101,21 +101,21 @@ func (tee *Tee) GetKnownCells() ([]string, error) { // // CreateKeyspace is part of the topo.Server interface -func (tee *Tee) CreateKeyspace(keyspace string, value *topo.Keyspace) error { - if err := tee.primary.CreateKeyspace(keyspace, value); err != nil { +func (tee *Tee) CreateKeyspace(ctx context.Context, keyspace string, value *topo.Keyspace) error { + if err := tee.primary.CreateKeyspace(ctx, keyspace, value); err != nil { return err } // this is critical enough that we want to fail - if err := tee.secondary.CreateKeyspace(keyspace, value); err != nil { + if err := tee.secondary.CreateKeyspace(ctx, keyspace, value); err != nil { return err } return nil } // UpdateKeyspace is part of the topo.Server interface -func (tee *Tee) UpdateKeyspace(ki *topo.KeyspaceInfo, existingVersion int64) (newVersion int64, err error) { - if newVersion, err = tee.primary.UpdateKeyspace(ki, existingVersion); err != nil { +func (tee *Tee) UpdateKeyspace(ctx context.Context, ki *topo.KeyspaceInfo, existingVersion int64) (newVersion int64, err error) { + if newVersion, err = tee.primary.UpdateKeyspace(ctx, ki, existingVersion); err != nil { // failed on primary, not updating secondary return } @@ -130,16 +130,16 @@ func (tee *Tee) UpdateKeyspace(ki *topo.KeyspaceInfo, existingVersion int64) (ne delete(tee.keyspaceVersionMapping, ki.KeyspaceName()) } tee.mu.Unlock() - if newVersion2, serr := tee.secondary.UpdateKeyspace(ki, existingVersion); serr != nil { + if newVersion2, serr := tee.secondary.UpdateKeyspace(ctx, ki, existingVersion); serr != nil { // not critical enough to fail if serr == topo.ErrNoNode { // the keyspace doesn't exist on the secondary, let's // just create it - if serr = tee.secondary.CreateKeyspace(ki.KeyspaceName(), ki.Keyspace); serr != nil { + if serr = tee.secondary.CreateKeyspace(ctx, ki.KeyspaceName(), ki.Keyspace); serr != nil { log.Warningf("secondary.CreateKeyspace(%v) failed (after UpdateKeyspace returned ErrNoNode): %v", ki.KeyspaceName(), serr) } else { log.Infof("secondary.UpdateKeyspace(%v) failed with ErrNoNode, CreateKeyspace then worked.", ki.KeyspaceName()) - ki, gerr := tee.secondary.GetKeyspace(ki.KeyspaceName()) + ki, gerr := tee.secondary.GetKeyspace(ctx, ki.KeyspaceName()) if gerr != nil { log.Warningf("Failed to re-read keyspace(%v) after creating it on secondary: %v", ki.KeyspaceName(), gerr) } else { @@ -166,13 +166,13 @@ func (tee *Tee) UpdateKeyspace(ki *topo.KeyspaceInfo, existingVersion int64) (ne } // GetKeyspace is part of the topo.Server interface -func (tee *Tee) GetKeyspace(keyspace string) (*topo.KeyspaceInfo, error) { - ki, err := tee.readFrom.GetKeyspace(keyspace) +func (tee *Tee) GetKeyspace(ctx context.Context, keyspace string) (*topo.KeyspaceInfo, error) { + ki, err := tee.readFrom.GetKeyspace(ctx, keyspace) if err != nil { return nil, err } - ki2, err := tee.readFromSecond.GetKeyspace(keyspace) + ki2, err := tee.readFromSecond.GetKeyspace(ctx, keyspace) if err != nil { // can't read from secondary, so we can's keep version map return ki, nil @@ -188,17 +188,17 @@ func (tee *Tee) GetKeyspace(keyspace string) (*topo.KeyspaceInfo, error) { } // GetKeyspaces is part of the topo.Server interface -func (tee *Tee) GetKeyspaces() ([]string, error) { - return tee.readFrom.GetKeyspaces() +func (tee *Tee) GetKeyspaces(ctx context.Context) ([]string, error) { + return tee.readFrom.GetKeyspaces(ctx) } // DeleteKeyspaceShards is part of the topo.Server interface -func (tee *Tee) DeleteKeyspaceShards(keyspace string) error { - if err := tee.primary.DeleteKeyspaceShards(keyspace); err != nil { +func (tee *Tee) DeleteKeyspaceShards(ctx context.Context, keyspace string) error { + if err := tee.primary.DeleteKeyspaceShards(ctx, keyspace); err != nil { return err } - if err := tee.secondary.DeleteKeyspaceShards(keyspace); err != nil { + if err := tee.secondary.DeleteKeyspaceShards(ctx, keyspace); err != nil { // not critical enough to fail log.Warningf("secondary.DeleteKeyspaceShards(%v) failed: %v", keyspace, err) } @@ -210,13 +210,13 @@ func (tee *Tee) DeleteKeyspaceShards(keyspace string) error { // // CreateShard is part of the topo.Server interface -func (tee *Tee) CreateShard(keyspace, shard string, value *topo.Shard) error { - err := tee.primary.CreateShard(keyspace, shard, value) +func (tee *Tee) CreateShard(ctx context.Context, keyspace, shard string, value *topo.Shard) error { + err := tee.primary.CreateShard(ctx, keyspace, shard, value) if err != nil && err != topo.ErrNodeExists { return err } - serr := tee.secondary.CreateShard(keyspace, shard, value) + serr := tee.secondary.CreateShard(ctx, keyspace, shard, value) if serr != nil && serr != topo.ErrNodeExists { // not critical enough to fail log.Warningf("secondary.CreateShard(%v,%v) failed: %v", keyspace, shard, err) @@ -225,8 +225,8 @@ func (tee *Tee) CreateShard(keyspace, shard string, value *topo.Shard) error { } // UpdateShard is part of the topo.Server interface -func (tee *Tee) UpdateShard(si *topo.ShardInfo, existingVersion int64) (newVersion int64, err error) { - if newVersion, err = tee.primary.UpdateShard(si, existingVersion); err != nil { +func (tee *Tee) UpdateShard(ctx context.Context, si *topo.ShardInfo, existingVersion int64) (newVersion int64, err error) { + if newVersion, err = tee.primary.UpdateShard(ctx, si, existingVersion); err != nil { // failed on primary, not updating secondary return } @@ -241,16 +241,16 @@ func (tee *Tee) UpdateShard(si *topo.ShardInfo, existingVersion int64) (newVersi delete(tee.shardVersionMapping, si.Keyspace()+"/"+si.ShardName()) } tee.mu.Unlock() - if newVersion2, serr := tee.secondary.UpdateShard(si, existingVersion); serr != nil { + if newVersion2, serr := tee.secondary.UpdateShard(ctx, si, existingVersion); serr != nil { // not critical enough to fail if serr == topo.ErrNoNode { // the shard doesn't exist on the secondary, let's // just create it - if serr = tee.secondary.CreateShard(si.Keyspace(), si.ShardName(), si.Shard); serr != nil { + if serr = tee.secondary.CreateShard(ctx, si.Keyspace(), si.ShardName(), si.Shard); serr != nil { log.Warningf("secondary.CreateShard(%v,%v) failed (after UpdateShard returned ErrNoNode): %v", si.Keyspace(), si.ShardName(), serr) } else { log.Infof("secondary.UpdateShard(%v, %v) failed with ErrNoNode, CreateShard then worked.", si.Keyspace(), si.ShardName()) - si, gerr := tee.secondary.GetShard(si.Keyspace(), si.ShardName()) + si, gerr := tee.secondary.GetShard(ctx, si.Keyspace(), si.ShardName()) if gerr != nil { log.Warningf("Failed to re-read shard(%v, %v) after creating it on secondary: %v", si.Keyspace(), si.ShardName(), gerr) } else { @@ -277,13 +277,13 @@ func (tee *Tee) UpdateShard(si *topo.ShardInfo, existingVersion int64) (newVersi } // ValidateShard is part of the topo.Server interface -func (tee *Tee) ValidateShard(keyspace, shard string) error { - err := tee.primary.ValidateShard(keyspace, shard) +func (tee *Tee) ValidateShard(ctx context.Context, keyspace, shard string) error { + err := tee.primary.ValidateShard(ctx, keyspace, shard) if err != nil { return err } - if err := tee.secondary.ValidateShard(keyspace, shard); err != nil { + if err := tee.secondary.ValidateShard(ctx, keyspace, shard); err != nil { // not critical enough to fail log.Warningf("secondary.ValidateShard(%v,%v) failed: %v", keyspace, shard, err) } @@ -291,13 +291,13 @@ func (tee *Tee) ValidateShard(keyspace, shard string) error { } // GetShard is part of the topo.Server interface -func (tee *Tee) GetShard(keyspace, shard string) (*topo.ShardInfo, error) { - si, err := tee.readFrom.GetShard(keyspace, shard) +func (tee *Tee) GetShard(ctx context.Context, keyspace, shard string) (*topo.ShardInfo, error) { + si, err := tee.readFrom.GetShard(ctx, keyspace, shard) if err != nil { return nil, err } - si2, err := tee.readFromSecond.GetShard(keyspace, shard) + si2, err := tee.readFromSecond.GetShard(ctx, keyspace, shard) if err != nil { // can't read from secondary, so we can's keep version map return si, nil @@ -313,18 +313,18 @@ func (tee *Tee) GetShard(keyspace, shard string) (*topo.ShardInfo, error) { } // GetShardNames is part of the topo.Server interface -func (tee *Tee) GetShardNames(keyspace string) ([]string, error) { - return tee.readFrom.GetShardNames(keyspace) +func (tee *Tee) GetShardNames(ctx context.Context, keyspace string) ([]string, error) { + return tee.readFrom.GetShardNames(ctx, keyspace) } // DeleteShard is part of the topo.Server interface -func (tee *Tee) DeleteShard(keyspace, shard string) error { - err := tee.primary.DeleteShard(keyspace, shard) +func (tee *Tee) DeleteShard(ctx context.Context, keyspace, shard string) error { + err := tee.primary.DeleteShard(ctx, keyspace, shard) if err != nil && err != topo.ErrNoNode { return err } - if err := tee.secondary.DeleteShard(keyspace, shard); err != nil { + if err := tee.secondary.DeleteShard(ctx, keyspace, shard); err != nil { // not critical enough to fail log.Warningf("secondary.DeleteShard(%v, %v) failed: %v", keyspace, shard, err) } @@ -336,13 +336,13 @@ func (tee *Tee) DeleteShard(keyspace, shard string) error { // // CreateTablet is part of the topo.Server interface -func (tee *Tee) CreateTablet(tablet *topo.Tablet) error { - err := tee.primary.CreateTablet(tablet) +func (tee *Tee) CreateTablet(ctx context.Context, tablet *topo.Tablet) error { + err := tee.primary.CreateTablet(ctx, tablet) if err != nil && err != topo.ErrNodeExists { return err } - if err := tee.primary.CreateTablet(tablet); err != nil && err != topo.ErrNodeExists { + if err := tee.primary.CreateTablet(ctx, tablet); err != nil && err != topo.ErrNodeExists { // not critical enough to fail log.Warningf("secondary.CreateTablet(%v) failed: %v", tablet.Alias, err) } @@ -350,8 +350,8 @@ func (tee *Tee) CreateTablet(tablet *topo.Tablet) error { } // UpdateTablet is part of the topo.Server interface -func (tee *Tee) UpdateTablet(tablet *topo.TabletInfo, existingVersion int64) (newVersion int64, err error) { - if newVersion, err = tee.primary.UpdateTablet(tablet, existingVersion); err != nil { +func (tee *Tee) UpdateTablet(ctx context.Context, tablet *topo.TabletInfo, existingVersion int64) (newVersion int64, err error) { + if newVersion, err = tee.primary.UpdateTablet(ctx, tablet, existingVersion); err != nil { // failed on primary, not updating secondary return } @@ -366,16 +366,16 @@ func (tee *Tee) UpdateTablet(tablet *topo.TabletInfo, existingVersion int64) (ne delete(tee.tabletVersionMapping, tablet.Alias) } tee.mu.Unlock() - if newVersion2, serr := tee.secondary.UpdateTablet(tablet, existingVersion); serr != nil { + if newVersion2, serr := tee.secondary.UpdateTablet(ctx, tablet, existingVersion); serr != nil { // not critical enough to fail if serr == topo.ErrNoNode { // the tablet doesn't exist on the secondary, let's // just create it - if serr = tee.secondary.CreateTablet(tablet.Tablet); serr != nil { + if serr = tee.secondary.CreateTablet(ctx, tablet.Tablet); serr != nil { log.Warningf("secondary.CreateTablet(%v) failed (after UpdateTablet returned ErrNoNode): %v", tablet.Alias, serr) } else { log.Infof("secondary.UpdateTablet(%v) failed with ErrNoNode, CreateTablet then worked.", tablet.Alias) - ti, gerr := tee.secondary.GetTablet(tablet.Alias) + ti, gerr := tee.secondary.GetTablet(ctx, tablet.Alias) if gerr != nil { log.Warningf("Failed to re-read tablet(%v) after creating it on secondary: %v", tablet.Alias, gerr) } else { @@ -402,13 +402,13 @@ func (tee *Tee) UpdateTablet(tablet *topo.TabletInfo, existingVersion int64) (ne } // UpdateTabletFields is part of the topo.Server interface -func (tee *Tee) UpdateTabletFields(tabletAlias topo.TabletAlias, update func(*topo.Tablet) error) error { - if err := tee.primary.UpdateTabletFields(tabletAlias, update); err != nil { +func (tee *Tee) UpdateTabletFields(ctx context.Context, tabletAlias topo.TabletAlias, update func(*topo.Tablet) error) error { + if err := tee.primary.UpdateTabletFields(ctx, tabletAlias, update); err != nil { // failed on primary, not updating secondary return err } - if err := tee.secondary.UpdateTabletFields(tabletAlias, update); err != nil { + if err := tee.secondary.UpdateTabletFields(ctx, tabletAlias, update); err != nil { // not critical enough to fail log.Warningf("secondary.UpdateTabletFields(%v) failed: %v", tabletAlias, err) } @@ -416,12 +416,12 @@ func (tee *Tee) UpdateTabletFields(tabletAlias topo.TabletAlias, update func(*to } // DeleteTablet is part of the topo.Server interface -func (tee *Tee) DeleteTablet(alias topo.TabletAlias) error { - if err := tee.primary.DeleteTablet(alias); err != nil { +func (tee *Tee) DeleteTablet(ctx context.Context, alias topo.TabletAlias) error { + if err := tee.primary.DeleteTablet(ctx, alias); err != nil { return err } - if err := tee.secondary.DeleteTablet(alias); err != nil { + if err := tee.secondary.DeleteTablet(ctx, alias); err != nil { // not critical enough to fail log.Warningf("secondary.DeleteTablet(%v) failed: %v", alias, err) } @@ -429,13 +429,13 @@ func (tee *Tee) DeleteTablet(alias topo.TabletAlias) error { } // GetTablet is part of the topo.Server interface -func (tee *Tee) GetTablet(alias topo.TabletAlias) (*topo.TabletInfo, error) { - ti, err := tee.readFrom.GetTablet(alias) +func (tee *Tee) GetTablet(ctx context.Context, alias topo.TabletAlias) (*topo.TabletInfo, error) { + ti, err := tee.readFrom.GetTablet(ctx, alias) if err != nil { return nil, err } - ti2, err := tee.readFromSecond.GetTablet(alias) + ti2, err := tee.readFromSecond.GetTablet(ctx, alias) if err != nil { // can't read from secondary, so we can's keep version map return ti, nil @@ -451,8 +451,8 @@ func (tee *Tee) GetTablet(alias topo.TabletAlias) (*topo.TabletInfo, error) { } // GetTabletsByCell is part of the topo.Server interface -func (tee *Tee) GetTabletsByCell(cell string) ([]topo.TabletAlias, error) { - return tee.readFrom.GetTabletsByCell(cell) +func (tee *Tee) GetTabletsByCell(ctx context.Context, cell string) ([]topo.TabletAlias, error) { + return tee.readFrom.GetTabletsByCell(ctx, cell) } // @@ -460,13 +460,13 @@ func (tee *Tee) GetTabletsByCell(cell string) ([]topo.TabletAlias, error) { // // UpdateShardReplicationFields is part of the topo.Server interface -func (tee *Tee) UpdateShardReplicationFields(cell, keyspace, shard string, update func(*topo.ShardReplication) error) error { - if err := tee.primary.UpdateShardReplicationFields(cell, keyspace, shard, update); err != nil { +func (tee *Tee) UpdateShardReplicationFields(ctx context.Context, cell, keyspace, shard string, update func(*topo.ShardReplication) error) error { + if err := tee.primary.UpdateShardReplicationFields(ctx, cell, keyspace, shard, update); err != nil { // failed on primary, not updating secondary return err } - if err := tee.secondary.UpdateShardReplicationFields(cell, keyspace, shard, update); err != nil { + if err := tee.secondary.UpdateShardReplicationFields(ctx, cell, keyspace, shard, update); err != nil { // not critical enough to fail log.Warningf("secondary.UpdateShardReplicationFields(%v, %v, %v) failed: %v", cell, keyspace, shard, err) } @@ -474,17 +474,17 @@ func (tee *Tee) UpdateShardReplicationFields(cell, keyspace, shard string, updat } // GetShardReplication is part of the topo.Server interface -func (tee *Tee) GetShardReplication(cell, keyspace, shard string) (*topo.ShardReplicationInfo, error) { - return tee.readFrom.GetShardReplication(cell, keyspace, shard) +func (tee *Tee) GetShardReplication(ctx context.Context, cell, keyspace, shard string) (*topo.ShardReplicationInfo, error) { + return tee.readFrom.GetShardReplication(ctx, cell, keyspace, shard) } // DeleteShardReplication is part of the topo.Server interface -func (tee *Tee) DeleteShardReplication(cell, keyspace, shard string) error { - if err := tee.primary.DeleteShardReplication(cell, keyspace, shard); err != nil { +func (tee *Tee) DeleteShardReplication(ctx context.Context, cell, keyspace, shard string) error { + if err := tee.primary.DeleteShardReplication(ctx, cell, keyspace, shard); err != nil { return err } - if err := tee.secondary.DeleteShardReplication(cell, keyspace, shard); err != nil { + if err := tee.secondary.DeleteShardReplication(ctx, cell, keyspace, shard); err != nil { // not critical enough to fail log.Warningf("secondary.DeleteShardReplication(%v, %v, %v) failed: %v", cell, keyspace, shard, err) } @@ -506,7 +506,7 @@ func (tee *Tee) LockSrvShardForAction(ctx context.Context, cell, keyspace, shard // lock lockSecond sLockPath, err := tee.lockSecond.LockSrvShardForAction(ctx, cell, keyspace, shard, contents) if err != nil { - if err := tee.lockFirst.UnlockSrvShardForAction(cell, keyspace, shard, pLockPath, "{}"); err != nil { + if err := tee.lockFirst.UnlockSrvShardForAction(ctx, cell, keyspace, shard, pLockPath, "{}"); err != nil { log.Warningf("Failed to unlock lockFirst shard after failed lockSecond lock for %v/%v/%v", cell, keyspace, shard) } return "", err @@ -520,7 +520,7 @@ func (tee *Tee) LockSrvShardForAction(ctx context.Context, cell, keyspace, shard } // UnlockSrvShardForAction is part of the topo.Server interface -func (tee *Tee) UnlockSrvShardForAction(cell, keyspace, shard, lockPath, results string) error { +func (tee *Tee) UnlockSrvShardForAction(ctx context.Context, cell, keyspace, shard, lockPath, results string) error { // get from map tee.mu.Lock() // not using defer for unlock, to minimize lock time sLockPath, ok := tee.srvShardLockPaths[lockPath] @@ -532,8 +532,8 @@ func (tee *Tee) UnlockSrvShardForAction(cell, keyspace, shard, lockPath, results tee.mu.Unlock() // unlock lockSecond, then lockFirst - serr := tee.lockSecond.UnlockSrvShardForAction(cell, keyspace, shard, sLockPath, results) - perr := tee.lockFirst.UnlockSrvShardForAction(cell, keyspace, shard, lockPath, results) + serr := tee.lockSecond.UnlockSrvShardForAction(ctx, cell, keyspace, shard, sLockPath, results) + perr := tee.lockFirst.UnlockSrvShardForAction(ctx, cell, keyspace, shard, lockPath, results) if serr != nil { if perr != nil { @@ -545,17 +545,17 @@ func (tee *Tee) UnlockSrvShardForAction(cell, keyspace, shard, lockPath, results } // GetSrvTabletTypesPerShard is part of the topo.Server interface -func (tee *Tee) GetSrvTabletTypesPerShard(cell, keyspace, shard string) ([]topo.TabletType, error) { - return tee.readFrom.GetSrvTabletTypesPerShard(cell, keyspace, shard) +func (tee *Tee) GetSrvTabletTypesPerShard(ctx context.Context, cell, keyspace, shard string) ([]topo.TabletType, error) { + return tee.readFrom.GetSrvTabletTypesPerShard(ctx, cell, keyspace, shard) } // UpdateEndPoints is part of the topo.Server interface -func (tee *Tee) UpdateEndPoints(cell, keyspace, shard string, tabletType topo.TabletType, addrs *topo.EndPoints) error { - if err := tee.primary.UpdateEndPoints(cell, keyspace, shard, tabletType, addrs); err != nil { +func (tee *Tee) UpdateEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType, addrs *topo.EndPoints) error { + if err := tee.primary.UpdateEndPoints(ctx, cell, keyspace, shard, tabletType, addrs); err != nil { return err } - if err := tee.secondary.UpdateEndPoints(cell, keyspace, shard, tabletType, addrs); err != nil { + if err := tee.secondary.UpdateEndPoints(ctx, cell, keyspace, shard, tabletType, addrs); err != nil { // not critical enough to fail log.Warningf("secondary.UpdateEndPoints(%v, %v, %v, %v) failed: %v", cell, keyspace, shard, tabletType, err) } @@ -563,18 +563,18 @@ func (tee *Tee) UpdateEndPoints(cell, keyspace, shard string, tabletType topo.Ta } // GetEndPoints is part of the topo.Server interface -func (tee *Tee) GetEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) { - return tee.readFrom.GetEndPoints(cell, keyspace, shard, tabletType) +func (tee *Tee) GetEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) { + return tee.readFrom.GetEndPoints(ctx, cell, keyspace, shard, tabletType) } // DeleteEndPoints is part of the topo.Server interface -func (tee *Tee) DeleteEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) error { - err := tee.primary.DeleteEndPoints(cell, keyspace, shard, tabletType) +func (tee *Tee) DeleteEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) error { + err := tee.primary.DeleteEndPoints(ctx, cell, keyspace, shard, tabletType) if err != nil && err != topo.ErrNoNode { return err } - if err := tee.secondary.DeleteEndPoints(cell, keyspace, shard, tabletType); err != nil { + if err := tee.secondary.DeleteEndPoints(ctx, cell, keyspace, shard, tabletType); err != nil { // not critical enough to fail log.Warningf("secondary.DeleteEndPoints(%v, %v, %v, %v) failed: %v", cell, keyspace, shard, tabletType, err) } @@ -582,12 +582,12 @@ func (tee *Tee) DeleteEndPoints(cell, keyspace, shard string, tabletType topo.Ta } // UpdateSrvShard is part of the topo.Server interface -func (tee *Tee) UpdateSrvShard(cell, keyspace, shard string, srvShard *topo.SrvShard) error { - if err := tee.primary.UpdateSrvShard(cell, keyspace, shard, srvShard); err != nil { +func (tee *Tee) UpdateSrvShard(ctx context.Context, cell, keyspace, shard string, srvShard *topo.SrvShard) error { + if err := tee.primary.UpdateSrvShard(ctx, cell, keyspace, shard, srvShard); err != nil { return err } - if err := tee.secondary.UpdateSrvShard(cell, keyspace, shard, srvShard); err != nil { + if err := tee.secondary.UpdateSrvShard(ctx, cell, keyspace, shard, srvShard); err != nil { // not critical enough to fail log.Warningf("secondary.UpdateSrvShard(%v, %v, %v) failed: %v", cell, keyspace, shard, err) } @@ -595,18 +595,18 @@ func (tee *Tee) UpdateSrvShard(cell, keyspace, shard string, srvShard *topo.SrvS } // GetSrvShard is part of the topo.Server interface -func (tee *Tee) GetSrvShard(cell, keyspace, shard string) (*topo.SrvShard, error) { - return tee.readFrom.GetSrvShard(cell, keyspace, shard) +func (tee *Tee) GetSrvShard(ctx context.Context, cell, keyspace, shard string) (*topo.SrvShard, error) { + return tee.readFrom.GetSrvShard(ctx, cell, keyspace, shard) } // DeleteSrvShard is part of the topo.Server interface -func (tee *Tee) DeleteSrvShard(cell, keyspace, shard string) error { - err := tee.primary.DeleteSrvShard(cell, keyspace, shard) +func (tee *Tee) DeleteSrvShard(ctx context.Context, cell, keyspace, shard string) error { + err := tee.primary.DeleteSrvShard(ctx, cell, keyspace, shard) if err != nil && err != topo.ErrNoNode { return err } - if err := tee.secondary.DeleteSrvShard(cell, keyspace, shard); err != nil { + if err := tee.secondary.DeleteSrvShard(ctx, cell, keyspace, shard); err != nil { // not critical enough to fail log.Warningf("secondary.DeleteSrvShard(%v, %v, %v) failed: %v", cell, keyspace, shard, err) } @@ -614,12 +614,12 @@ func (tee *Tee) DeleteSrvShard(cell, keyspace, shard string) error { } // UpdateSrvKeyspace is part of the topo.Server interface -func (tee *Tee) UpdateSrvKeyspace(cell, keyspace string, srvKeyspace *topo.SrvKeyspace) error { - if err := tee.primary.UpdateSrvKeyspace(cell, keyspace, srvKeyspace); err != nil { +func (tee *Tee) UpdateSrvKeyspace(ctx context.Context, cell, keyspace string, srvKeyspace *topo.SrvKeyspace) error { + if err := tee.primary.UpdateSrvKeyspace(ctx, cell, keyspace, srvKeyspace); err != nil { return err } - if err := tee.secondary.UpdateSrvKeyspace(cell, keyspace, srvKeyspace); err != nil { + if err := tee.secondary.UpdateSrvKeyspace(ctx, cell, keyspace, srvKeyspace); err != nil { // not critical enough to fail log.Warningf("secondary.UpdateSrvKeyspace(%v, %v) failed: %v", cell, keyspace, err) } @@ -627,22 +627,22 @@ func (tee *Tee) UpdateSrvKeyspace(cell, keyspace string, srvKeyspace *topo.SrvKe } // GetSrvKeyspace is part of the topo.Server interface -func (tee *Tee) GetSrvKeyspace(cell, keyspace string) (*topo.SrvKeyspace, error) { - return tee.readFrom.GetSrvKeyspace(cell, keyspace) +func (tee *Tee) GetSrvKeyspace(ctx context.Context, cell, keyspace string) (*topo.SrvKeyspace, error) { + return tee.readFrom.GetSrvKeyspace(ctx, cell, keyspace) } // GetSrvKeyspaceNames is part of the topo.Server interface -func (tee *Tee) GetSrvKeyspaceNames(cell string) ([]string, error) { - return tee.readFrom.GetSrvKeyspaceNames(cell) +func (tee *Tee) GetSrvKeyspaceNames(ctx context.Context, cell string) ([]string, error) { + return tee.readFrom.GetSrvKeyspaceNames(ctx, cell) } // UpdateTabletEndpoint is part of the topo.Server interface -func (tee *Tee) UpdateTabletEndpoint(cell, keyspace, shard string, tabletType topo.TabletType, addr *topo.EndPoint) error { - if err := tee.primary.UpdateTabletEndpoint(cell, keyspace, shard, tabletType, addr); err != nil { +func (tee *Tee) UpdateTabletEndpoint(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType, addr *topo.EndPoint) error { + if err := tee.primary.UpdateTabletEndpoint(ctx, cell, keyspace, shard, tabletType, addr); err != nil { return err } - if err := tee.secondary.UpdateTabletEndpoint(cell, keyspace, shard, tabletType, addr); err != nil { + if err := tee.secondary.UpdateTabletEndpoint(ctx, cell, keyspace, shard, tabletType, addr); err != nil { // not critical enough to fail log.Warningf("secondary.UpdateTabletEndpoint(%v, %v, %v, %v) failed: %v", cell, keyspace, shard, tabletType, err) } @@ -651,8 +651,8 @@ func (tee *Tee) UpdateTabletEndpoint(cell, keyspace, shard string, tabletType to // WatchEndPoints is part of the topo.Server interface. // We only watch for changes on the primary. -func (tee *Tee) WatchEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) (<-chan *topo.EndPoints, chan<- struct{}, error) { - return tee.primary.WatchEndPoints(cell, keyspace, shard, tabletType) +func (tee *Tee) WatchEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) (<-chan *topo.EndPoints, chan<- struct{}, error) { + return tee.primary.WatchEndPoints(ctx, cell, keyspace, shard, tabletType) } // @@ -670,7 +670,7 @@ func (tee *Tee) LockKeyspaceForAction(ctx context.Context, keyspace, contents st // lock lockSecond sLockPath, err := tee.lockSecond.LockKeyspaceForAction(ctx, keyspace, contents) if err != nil { - if err := tee.lockFirst.UnlockKeyspaceForAction(keyspace, pLockPath, "{}"); err != nil { + if err := tee.lockFirst.UnlockKeyspaceForAction(ctx, keyspace, pLockPath, "{}"); err != nil { log.Warningf("Failed to unlock lockFirst keyspace after failed lockSecond lock for %v", keyspace) } return "", err @@ -684,7 +684,7 @@ func (tee *Tee) LockKeyspaceForAction(ctx context.Context, keyspace, contents st } // UnlockKeyspaceForAction is part of the topo.Server interface -func (tee *Tee) UnlockKeyspaceForAction(keyspace, lockPath, results string) error { +func (tee *Tee) UnlockKeyspaceForAction(ctx context.Context, keyspace, lockPath, results string) error { // get from map tee.mu.Lock() // not using defer for unlock, to minimize lock time sLockPath, ok := tee.keyspaceLockPaths[lockPath] @@ -696,8 +696,8 @@ func (tee *Tee) UnlockKeyspaceForAction(keyspace, lockPath, results string) erro tee.mu.Unlock() // unlock lockSecond, then lockFirst - serr := tee.lockSecond.UnlockKeyspaceForAction(keyspace, sLockPath, results) - perr := tee.lockFirst.UnlockKeyspaceForAction(keyspace, lockPath, results) + serr := tee.lockSecond.UnlockKeyspaceForAction(ctx, keyspace, sLockPath, results) + perr := tee.lockFirst.UnlockKeyspaceForAction(ctx, keyspace, lockPath, results) if serr != nil { if perr != nil { @@ -719,7 +719,7 @@ func (tee *Tee) LockShardForAction(ctx context.Context, keyspace, shard, content // lock lockSecond sLockPath, err := tee.lockSecond.LockShardForAction(ctx, keyspace, shard, contents) if err != nil { - if err := tee.lockFirst.UnlockShardForAction(keyspace, shard, pLockPath, "{}"); err != nil { + if err := tee.lockFirst.UnlockShardForAction(ctx, keyspace, shard, pLockPath, "{}"); err != nil { log.Warningf("Failed to unlock lockFirst shard after failed lockSecond lock for %v/%v", keyspace, shard) } return "", err @@ -733,7 +733,7 @@ func (tee *Tee) LockShardForAction(ctx context.Context, keyspace, shard, content } // UnlockShardForAction is part of the topo.Server interface -func (tee *Tee) UnlockShardForAction(keyspace, shard, lockPath, results string) error { +func (tee *Tee) UnlockShardForAction(ctx context.Context, keyspace, shard, lockPath, results string) error { // get from map tee.mu.Lock() // not using defer for unlock, to minimize lock time sLockPath, ok := tee.shardLockPaths[lockPath] @@ -745,8 +745,8 @@ func (tee *Tee) UnlockShardForAction(keyspace, shard, lockPath, results string) tee.mu.Unlock() // unlock lockSecond, then lockFirst - serr := tee.lockSecond.UnlockShardForAction(keyspace, shard, sLockPath, results) - perr := tee.lockFirst.UnlockShardForAction(keyspace, shard, lockPath, results) + serr := tee.lockSecond.UnlockShardForAction(ctx, keyspace, shard, sLockPath, results) + perr := tee.lockFirst.UnlockShardForAction(ctx, keyspace, shard, lockPath, results) if serr != nil { if perr != nil { diff --git a/go/vt/topo/helpers/tee_test.go b/go/vt/topo/helpers/tee_test.go index 8d26ab6b99..9cb6e1d7f7 100644 --- a/go/vt/topo/helpers/tee_test.go +++ b/go/vt/topo/helpers/tee_test.go @@ -9,25 +9,27 @@ import ( "testing" "github.com/youtube/vitess/go/vt/topo" + "golang.org/x/net/context" ) func TestTee(t *testing.T) { + ctx := context.Background() // create the setup, copy the data - fromTS, toTS := createSetup(t) - CopyKeyspaces(fromTS, toTS) - CopyShards(fromTS, toTS, true) - CopyTablets(fromTS, toTS) + fromTS, toTS := createSetup(ctx, t) + CopyKeyspaces(ctx, fromTS, toTS) + CopyShards(ctx, fromTS, toTS, true) + CopyTablets(ctx, fromTS, toTS) // create a tee and check it implements the interface tee := NewTee(fromTS, toTS, true) var _ topo.Server = tee // create a keyspace, make sure it is on both sides - if err := tee.CreateKeyspace("keyspace2", &topo.Keyspace{}); err != nil { + if err := tee.CreateKeyspace(ctx, "keyspace2", &topo.Keyspace{}); err != nil { t.Fatalf("tee.CreateKeyspace(keyspace2) failed: %v", err) } - teeKeyspaces, err := tee.GetKeyspaces() + teeKeyspaces, err := tee.GetKeyspaces(ctx) if err != nil { t.Fatalf("tee.GetKeyspaces() failed: %v", err) } @@ -35,7 +37,7 @@ func TestTee(t *testing.T) { if !reflect.DeepEqual(expected, teeKeyspaces) { t.Errorf("teeKeyspaces mismatch, got %+v, want %+v", teeKeyspaces, expected) } - fromKeyspaces, err := fromTS.GetKeyspaces() + fromKeyspaces, err := fromTS.GetKeyspaces(ctx) if err != nil { t.Fatalf("fromTS.GetKeyspaces() failed: %v", err) } @@ -43,7 +45,7 @@ func TestTee(t *testing.T) { if !reflect.DeepEqual(expected, fromKeyspaces) { t.Errorf("fromKeyspaces mismatch, got %+v, want %+v", fromKeyspaces, expected) } - toKeyspaces, err := toTS.GetKeyspaces() + toKeyspaces, err := toTS.GetKeyspaces(ctx) if err != nil { t.Fatalf("toTS.GetKeyspaces() failed: %v", err) } diff --git a/go/vt/topo/helpers/tee_topo_test.go b/go/vt/topo/helpers/tee_topo_test.go index d21a99f2bb..f37b7a5654 100644 --- a/go/vt/topo/helpers/tee_topo_test.go +++ b/go/vt/topo/helpers/tee_topo_test.go @@ -24,7 +24,7 @@ type fakeServer struct { localCells []string } -func (s fakeServer) GetKnownCells() ([]string, error) { +func (s fakeServer) GetKnownCells(ctx context.Context) ([]string, error) { return s.localCells, nil } @@ -49,23 +49,27 @@ func newFakeTeeServer(t *testing.T) topo.Server { } func TestKeyspace(t *testing.T) { + ctx := context.Background() ts := newFakeTeeServer(t) - test.CheckKeyspace(t, ts) + test.CheckKeyspace(ctx, t, ts) } func TestShard(t *testing.T) { + ctx := context.Background() ts := newFakeTeeServer(t) - test.CheckShard(context.Background(), t, ts) + test.CheckShard(ctx, t, ts) } func TestTablet(t *testing.T) { + ctx := context.Background() ts := newFakeTeeServer(t) - test.CheckTablet(context.Background(), t, ts) + test.CheckTablet(ctx, t, ts) } func TestServingGraph(t *testing.T) { + ctx := context.Background() ts := newFakeTeeServer(t) - test.CheckServingGraph(context.Background(), t, ts) + test.CheckServingGraph(ctx, t, ts) } func TestWatchEndPoints(t *testing.T) { @@ -75,29 +79,33 @@ func TestWatchEndPoints(t *testing.T) { } func TestShardReplication(t *testing.T) { + ctx := context.Background() ts := newFakeTeeServer(t) - test.CheckShardReplication(t, ts) + test.CheckShardReplication(ctx, t, ts) } func TestKeyspaceLock(t *testing.T) { + ctx := context.Background() ts := newFakeTeeServer(t) - test.CheckKeyspaceLock(t, ts) + test.CheckKeyspaceLock(ctx, t, ts) } func TestShardLock(t *testing.T) { + ctx := context.Background() if testing.Short() { t.Skip("skipping wait-based test in short mode.") } ts := newFakeTeeServer(t) - test.CheckShardLock(t, ts) + test.CheckShardLock(ctx, t, ts) } func TestSrvShardLock(t *testing.T) { + ctx := context.Background() if testing.Short() { t.Skip("skipping wait-based test in short mode.") } ts := newFakeTeeServer(t) - test.CheckSrvShardLock(t, ts) + test.CheckSrvShardLock(ctx, t, ts) } diff --git a/go/vt/topo/keyspace.go b/go/vt/topo/keyspace.go index b029f2a1e5..d97cd9edf9 100644 --- a/go/vt/topo/keyspace.go +++ b/go/vt/topo/keyspace.go @@ -9,6 +9,7 @@ import ( "sync" log "github.com/golang/glog" + "golang.org/x/net/context" "github.com/youtube/vitess/go/vt/concurrency" "github.com/youtube/vitess/go/vt/key" @@ -175,13 +176,13 @@ func (ki *KeyspaceInfo) ComputeCellServedFrom(cell string) map[TabletType]string } // UpdateKeyspace updates the keyspace data, with the right version -func UpdateKeyspace(ts Server, ki *KeyspaceInfo) error { +func UpdateKeyspace(ctx context.Context, ts Server, ki *KeyspaceInfo) error { var version int64 = -1 if ki.version != 0 { version = ki.version } - newVersion, err := ts.UpdateKeyspace(ki, version) + newVersion, err := ts.UpdateKeyspace(ctx, ki, version) if err == nil { ki.version = newVersion } @@ -190,8 +191,8 @@ func UpdateKeyspace(ts Server, ki *KeyspaceInfo) error { // FindAllShardsInKeyspace reads and returns all the existing shards in // a keyspace. It doesn't take any lock. -func FindAllShardsInKeyspace(ts Server, keyspace string) (map[string]*ShardInfo, error) { - shards, err := ts.GetShardNames(keyspace) +func FindAllShardsInKeyspace(ctx context.Context, ts Server, keyspace string) (map[string]*ShardInfo, error) { + shards, err := ts.GetShardNames(ctx, keyspace) if err != nil { return nil, err } @@ -204,7 +205,7 @@ func FindAllShardsInKeyspace(ts Server, keyspace string) (map[string]*ShardInfo, wg.Add(1) go func(shard string) { defer wg.Done() - si, err := ts.GetShard(keyspace, shard) + si, err := ts.GetShard(ctx, keyspace, shard) if err != nil { rec.RecordError(fmt.Errorf("GetShard(%v,%v) failed: %v", keyspace, shard, err)) return diff --git a/go/vt/topo/naming.go b/go/vt/topo/naming.go index 5f2322aef6..1bb474dcca 100644 --- a/go/vt/topo/naming.go +++ b/go/vt/topo/naming.go @@ -24,6 +24,7 @@ import ( log "github.com/golang/glog" "github.com/youtube/vitess/go/netutil" + "golang.org/x/net/context" ) const ( @@ -94,8 +95,8 @@ func NewEndPoints() *EndPoints { // LookupVtName gets the list of EndPoints for a // cell/keyspace/shard/tablet type and converts the list to net.SRV records -func LookupVtName(ts Server, cell, keyspace, shard string, tabletType TabletType, namedPort string) ([]*net.SRV, error) { - addrs, err := ts.GetEndPoints(cell, keyspace, shard, tabletType) +func LookupVtName(ctx context.Context, ts Server, cell, keyspace, shard string, tabletType TabletType, namedPort string) ([]*net.SRV, error) { + addrs, err := ts.GetEndPoints(ctx, cell, keyspace, shard, tabletType) if err != nil { return nil, fmt.Errorf("LookupVtName(%v,%v,%v,%v) failed: %v", cell, keyspace, shard, tabletType, err) } diff --git a/go/vt/topo/replication.go b/go/vt/topo/replication.go index 49859bf403..18d54c473a 100644 --- a/go/vt/topo/replication.go +++ b/go/vt/topo/replication.go @@ -80,7 +80,7 @@ func UpdateShardReplicationRecord(ctx context.Context, ts Server, keyspace, shar span.Annotate("tablet", tabletAlias.String()) defer span.Finish() - return ts.UpdateShardReplicationFields(tabletAlias.Cell, keyspace, shard, func(sr *ShardReplication) error { + return ts.UpdateShardReplicationFields(ctx, tabletAlias.Cell, keyspace, shard, func(sr *ShardReplication) error { // not very efficient, but easy to read links := make([]ReplicationLink, 0, len(sr.ReplicationLinks)+1) found := false @@ -104,8 +104,8 @@ func UpdateShardReplicationRecord(ctx context.Context, ts Server, keyspace, shar // RemoveShardReplicationRecord is a low level function to remove an // entry from the ShardReplication object. -func RemoveShardReplicationRecord(ts Server, cell, keyspace, shard string, tabletAlias TabletAlias) error { - err := ts.UpdateShardReplicationFields(cell, keyspace, shard, func(sr *ShardReplication) error { +func RemoveShardReplicationRecord(ctx context.Context, ts Server, cell, keyspace, shard string, tabletAlias TabletAlias) error { + err := ts.UpdateShardReplicationFields(ctx, cell, keyspace, shard, func(sr *ShardReplication) error { links := make([]ReplicationLink, 0, len(sr.ReplicationLinks)) for _, link := range sr.ReplicationLinks { if link.TabletAlias != tabletAlias { @@ -120,17 +120,17 @@ func RemoveShardReplicationRecord(ts Server, cell, keyspace, shard string, table // FixShardReplication will fix the first problem it encounters within // a ShardReplication object -func FixShardReplication(ts Server, logger logutil.Logger, cell, keyspace, shard string) error { - sri, err := ts.GetShardReplication(cell, keyspace, shard) +func FixShardReplication(ctx context.Context, ts Server, logger logutil.Logger, cell, keyspace, shard string) error { + sri, err := ts.GetShardReplication(ctx, cell, keyspace, shard) if err != nil { return err } for _, rl := range sri.ReplicationLinks { - ti, err := ts.GetTablet(rl.TabletAlias) + ti, err := ts.GetTablet(ctx, rl.TabletAlias) if err == ErrNoNode { logger.Warningf("Tablet %v is in the replication graph, but does not exist, removing it", rl.TabletAlias) - return RemoveShardReplicationRecord(ts, cell, keyspace, shard, rl.TabletAlias) + return RemoveShardReplicationRecord(ctx, ts, cell, keyspace, shard, rl.TabletAlias) } if err != nil { // unknown error, we probably don't want to continue @@ -139,7 +139,7 @@ func FixShardReplication(ts Server, logger logutil.Logger, cell, keyspace, shard if ti.Type == TYPE_SCRAP { logger.Warningf("Tablet %v is in the replication graph, but is scrapped, removing it", rl.TabletAlias) - return RemoveShardReplicationRecord(ts, cell, keyspace, shard, rl.TabletAlias) + return RemoveShardReplicationRecord(ctx, ts, cell, keyspace, shard, rl.TabletAlias) } logger.Infof("Keeping tablet %v in the replication graph", rl.TabletAlias) diff --git a/go/vt/topo/server.go b/go/vt/topo/server.go index df3a882de3..83cacbcee4 100644 --- a/go/vt/topo/server.go +++ b/go/vt/topo/server.go @@ -62,7 +62,7 @@ type Server interface { // It is possible to find all tablets in the entire system // by then calling GetTabletsByCell on every cell, for instance. // They shall be sorted. - GetKnownCells() ([]string, error) + GetKnownCells(ctx context.Context) ([]string, error) // // Keyspace management, global. @@ -70,7 +70,7 @@ type Server interface { // CreateKeyspace creates the given keyspace, assuming it doesn't exist // yet. Can return ErrNodeExists if it already exists. - CreateKeyspace(keyspace string, value *Keyspace) error + CreateKeyspace(ctx context.Context, keyspace string, value *Keyspace) error // UpdateKeyspace updates the keyspace information // pointed at by ki.keyspace to the *ki value. @@ -79,18 +79,18 @@ type Server interface { // or ErrBadVersion if the version has changed. // // Do not use directly, but instead use topo.UpdateKeyspace. - UpdateKeyspace(ki *KeyspaceInfo, existingVersion int64) (newVersion int64, err error) + UpdateKeyspace(ctx context.Context, ki *KeyspaceInfo, existingVersion int64) (newVersion int64, err error) // GetKeyspace reads a keyspace and returns it. // Can return ErrNoNode - GetKeyspace(keyspace string) (*KeyspaceInfo, error) + GetKeyspace(ctx context.Context, keyspace string) (*KeyspaceInfo, error) // GetKeyspaces returns the known keyspace names. They shall be sorted. - GetKeyspaces() ([]string, error) + GetKeyspaces(ctx context.Context) ([]string, error) // DeleteKeyspaceShards deletes all the shards in a keyspace. // Use with caution. - DeleteKeyspaceShards(keyspace string) error + DeleteKeyspaceShards(ctx context.Context, keyspace string) error // // Shard management, global. @@ -100,7 +100,7 @@ type Server interface { // yet. The contents of the shard will be a new Shard{} object, // with KeyRange populated by the result of ValidateShardName(). // Can return ErrNodeExists if it already exists. - CreateShard(keyspace, shard string, value *Shard) error + CreateShard(ctx context.Context, keyspace, shard string, value *Shard) error // UpdateShard updates the shard information // pointed at by si.keyspace / si.shard to the *si value. @@ -109,23 +109,23 @@ type Server interface { // or ErrBadVersion if the version has changed. // // Do not use directly, but instead use topo.UpdateShard. - UpdateShard(si *ShardInfo, existingVersion int64) (newVersion int64, err error) + UpdateShard(ctx context.Context, si *ShardInfo, existingVersion int64) (newVersion int64, err error) // ValidateShard performs routine checks on the shard. - ValidateShard(keyspace, shard string) error + ValidateShard(ctx context.Context, keyspace, shard string) error // GetShard reads a shard and returns it. // Can return ErrNoNode - GetShard(keyspace, shard string) (*ShardInfo, error) + GetShard(ctx context.Context, keyspace, shard string) (*ShardInfo, error) // GetShardNames returns the known shards in a keyspace. // Can return ErrNoNode if the keyspace wasn't created, // or if DeleteKeyspaceShards was called. They shall be sorted. - GetShardNames(keyspace string) ([]string, error) + GetShardNames(ctx context.Context, keyspace string) ([]string, error) // DeleteShard deletes the provided shard. // Can return ErrNoNode if the shard doesn't exist. - DeleteShard(keyspace, shard string) error + DeleteShard(ctx context.Context, keyspace, shard string) error // // Tablet management, per cell. @@ -134,7 +134,7 @@ type Server interface { // CreateTablet creates the given tablet, assuming it doesn't exist // yet. It does *not* create the tablet replication paths. // Can return ErrNodeExists if it already exists. - CreateTablet(tablet *Tablet) error + CreateTablet(ctx context.Context, tablet *Tablet) error // UpdateTablet updates a given tablet. The version is used // for atomic updates. UpdateTablet will return ErrNoNode if @@ -142,26 +142,26 @@ type Server interface { // has changed. // // Do not use directly, but instead use topo.UpdateTablet. - UpdateTablet(tablet *TabletInfo, existingVersion int64) (newVersion int64, err error) + UpdateTablet(ctx context.Context, tablet *TabletInfo, existingVersion int64) (newVersion int64, err error) // UpdateTabletFields updates the current tablet record // with new values, independently of the version // Can return ErrNoNode if the tablet doesn't exist. - UpdateTabletFields(tabletAlias TabletAlias, update func(*Tablet) error) error + UpdateTabletFields(ctx context.Context, tabletAlias TabletAlias, update func(*Tablet) error) error // DeleteTablet removes a tablet from the system. // We assume no RPC is currently running to it. // TODO(alainjobart) verify this assumption, link with RPC code. // Can return ErrNoNode if the tablet doesn't exist. - DeleteTablet(alias TabletAlias) error + DeleteTablet(ctx context.Context, alias TabletAlias) error // GetTablet returns the tablet data (includes the current version). // Can return ErrNoNode if the tablet doesn't exist. - GetTablet(alias TabletAlias) (*TabletInfo, error) + GetTablet(ctx context.Context, alias TabletAlias) (*TabletInfo, error) // GetTabletsByCell returns all the tablets in the given cell. // Can return ErrNoNode if no tablet was ever created in that cell. - GetTabletsByCell(cell string) ([]TabletAlias, error) + GetTabletsByCell(ctx context.Context, cell string) ([]TabletAlias, error) // // Replication graph management, per cell. @@ -172,15 +172,15 @@ type Server interface { // ShardReplication object does not exist, an empty one will // be passed to the update function. All necessary directories // need to be created by this method, if applicable. - UpdateShardReplicationFields(cell, keyspace, shard string, update func(*ShardReplication) error) error + UpdateShardReplicationFields(ctx context.Context, cell, keyspace, shard string, update func(*ShardReplication) error) error // GetShardReplication returns the replication data. // Can return ErrNoNode if the object doesn't exist. - GetShardReplication(cell, keyspace, shard string) (*ShardReplicationInfo, error) + GetShardReplication(ctx context.Context, cell, keyspace, shard string) (*ShardReplicationInfo, error) // DeleteShardReplication deletes the replication data. // Can return ErrNoNode if the object doesn't exist. - DeleteShardReplication(cell, keyspace, shard string) error + DeleteShardReplication(ctx context.Context, cell, keyspace, shard string) error // // Serving Graph management, per cell. @@ -195,26 +195,26 @@ type Server interface { LockSrvShardForAction(ctx context.Context, cell, keyspace, shard, contents string) (string, error) // UnlockSrvShardForAction unlocks a serving shard. - UnlockSrvShardForAction(cell, keyspace, shard, lockPath, results string) error + UnlockSrvShardForAction(ctx context.Context, cell, keyspace, shard, lockPath, results string) error // GetSrvTabletTypesPerShard returns the existing serving types // for a shard. // Can return ErrNoNode. - GetSrvTabletTypesPerShard(cell, keyspace, shard string) ([]TabletType, error) + GetSrvTabletTypesPerShard(ctx context.Context, cell, keyspace, shard string) ([]TabletType, error) // UpdateEndPoints updates the serving records for a cell, // keyspace, shard, tabletType. - UpdateEndPoints(cell, keyspace, shard string, tabletType TabletType, addrs *EndPoints) error + UpdateEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType TabletType, addrs *EndPoints) error // GetEndPoints returns the EndPoints list of serving addresses // for a TabletType inside a shard. // Can return ErrNoNode. - GetEndPoints(cell, keyspace, shard string, tabletType TabletType) (*EndPoints, error) + GetEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType TabletType) (*EndPoints, error) // DeleteEndPoints deletes the serving records for a cell, // keyspace, shard, tabletType. // Can return ErrNoNode. - DeleteEndPoints(cell, keyspace, shard string, tabletType TabletType) error + DeleteEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType TabletType) error // WatchEndPoints returns a channel that receives notifications // every time EndPoints for the given type changes. @@ -229,36 +229,36 @@ type Server interface { // that are never going to work. Mutiple notifications with the // same contents may be sent (for instance when the serving graph // is rebuilt, but the content hasn't changed). - WatchEndPoints(cell, keyspace, shard string, tabletType TabletType) (notifications <-chan *EndPoints, stopWatching chan<- struct{}, err error) + WatchEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType TabletType) (notifications <-chan *EndPoints, stopWatching chan<- struct{}, err error) // UpdateSrvShard updates the serving records for a cell, // keyspace, shard. - UpdateSrvShard(cell, keyspace, shard string, srvShard *SrvShard) error + UpdateSrvShard(ctx context.Context, cell, keyspace, shard string, srvShard *SrvShard) error // GetSrvShard reads a SrvShard record. // Can return ErrNoNode. - GetSrvShard(cell, keyspace, shard string) (*SrvShard, error) + GetSrvShard(ctx context.Context, cell, keyspace, shard string) (*SrvShard, error) // DeleteSrvShard deletes a SrvShard record. // Can return ErrNoNode. - DeleteSrvShard(cell, keyspace, shard string) error + DeleteSrvShard(ctx context.Context, cell, keyspace, shard string) error // UpdateSrvKeyspace updates the serving records for a cell, keyspace. - UpdateSrvKeyspace(cell, keyspace string, srvKeyspace *SrvKeyspace) error + UpdateSrvKeyspace(ctx context.Context, cell, keyspace string, srvKeyspace *SrvKeyspace) error // GetSrvKeyspace reads a SrvKeyspace record. // Can return ErrNoNode. - GetSrvKeyspace(cell, keyspace string) (*SrvKeyspace, error) + GetSrvKeyspace(ctx context.Context, cell, keyspace string) (*SrvKeyspace, error) // GetSrvKeyspaceNames returns the list of visible Keyspaces // in this cell. They shall be sorted. - GetSrvKeyspaceNames(cell string) ([]string, error) + GetSrvKeyspaceNames(ctx context.Context, cell string) ([]string, error) // UpdateTabletEndpoint updates a single tablet record in the // already computed serving graph. The update has to be somewhat // atomic, so it requires Server intrisic knowledge. // If the node doesn't exist, it is not updated, this is not an error. - UpdateTabletEndpoint(cell, keyspace, shard string, tabletType TabletType, addr *EndPoint) error + UpdateTabletEndpoint(ctx context.Context, cell, keyspace, shard string, tabletType TabletType, addr *EndPoint) error // // Keyspace and Shard locks for actions, global. @@ -273,7 +273,7 @@ type Server interface { LockKeyspaceForAction(ctx context.Context, keyspace, contents string) (string, error) // UnlockKeyspaceForAction unlocks a keyspace. - UnlockKeyspaceForAction(keyspace, lockPath, results string) error + UnlockKeyspaceForAction(ctx context.Context, keyspace, lockPath, results string) error // LockShardForAction locks the shard in order to // perform the action described by contents. It will wait for @@ -284,14 +284,14 @@ type Server interface { LockShardForAction(ctx context.Context, keyspace, shard, contents string) (string, error) // UnlockShardForAction unlocks a shard. - UnlockShardForAction(keyspace, shard, lockPath, results string) error + UnlockShardForAction(ctx context.Context, keyspace, shard, lockPath, results string) error } // Schemafier is a temporary interface for supporting vschema // reads and writes. It will eventually be merged into Server. type Schemafier interface { - SaveVSchema(string) error - GetVSchema() (string, error) + SaveVSchema(context.Context, string) error + GetVSchema(ctx context.Context) (string, error) } // Registry for Server implementations. diff --git a/go/vt/topo/serving_graph.go b/go/vt/topo/serving_graph.go index 203731439a..1beb2b89f5 100644 --- a/go/vt/topo/serving_graph.go +++ b/go/vt/topo/serving_graph.go @@ -21,5 +21,5 @@ func UpdateEndPoints(ctx context.Context, ts Server, cell, keyspace, shard strin span.Annotate("tablet_type", string(tabletType)) defer span.Finish() - return ts.UpdateEndPoints(cell, keyspace, shard, tabletType, addrs) + return ts.UpdateEndPoints(ctx, cell, keyspace, shard, tabletType, addrs) } diff --git a/go/vt/topo/shard.go b/go/vt/topo/shard.go index 692685c390..71f51c8f97 100644 --- a/go/vt/topo/shard.go +++ b/go/vt/topo/shard.go @@ -259,7 +259,7 @@ func GetShard(ctx context.Context, ts Server, keyspace, shard string) (*ShardInf span.Annotate("shard", shard) defer span.Finish() - return ts.GetShard(keyspace, shard) + return ts.GetShard(ctx, keyspace, shard) } // UpdateShard updates the shard data, with the right version @@ -275,7 +275,7 @@ func UpdateShard(ctx context.Context, ts Server, si *ShardInfo) error { version = si.version } - newVersion, err := ts.UpdateShard(si, version) + newVersion, err := ts.UpdateShard(ctx, si, version) if err == nil { si.version = newVersion } @@ -305,7 +305,7 @@ func UpdateShardFields(ctx context.Context, ts Server, keyspace, shard string, u // This should be called while holding the keyspace lock for the shard. // (call topotools.CreateShard to do that for you). // In unit tests (that are not parallel), this function can be called directly. -func CreateShard(ts Server, keyspace, shard string) error { +func CreateShard(ctx context.Context, ts Server, keyspace, shard string) error { name, keyRange, err := ValidateShardName(shard) if err != nil { @@ -323,7 +323,7 @@ func CreateShard(ts Server, keyspace, shard string) error { }, } - sis, err := FindAllShardsInKeyspace(ts, keyspace) + sis, err := FindAllShardsInKeyspace(ctx, ts, keyspace) if err != nil && err != ErrNoNode { return err } @@ -338,7 +338,7 @@ func CreateShard(ts Server, keyspace, shard string) error { s.ServedTypesMap = nil } - return ts.CreateShard(keyspace, name, s) + return ts.CreateShard(ctx, keyspace, name, s) } // UpdateSourceBlacklistedTables will add or remove the listed tables @@ -588,7 +588,7 @@ func FindAllTabletAliasesInShardByCell(ctx context.Context, ts Server, keyspace, wg.Add(1) go func(cell string) { defer wg.Done() - sri, err := ts.GetShardReplication(cell, keyspace, shard) + sri, err := ts.GetShardReplication(ctx, cell, keyspace, shard) if err != nil { rec.RecordError(fmt.Errorf("GetShardReplication(%v, %v, %v) failed: %v", cell, keyspace, shard, err)) return diff --git a/go/vt/topo/tablet.go b/go/vt/topo/tablet.go index 247540ad55..4ad791e1fa 100644 --- a/go/vt/topo/tablet.go +++ b/go/vt/topo/tablet.go @@ -479,7 +479,7 @@ func GetTablet(ctx context.Context, ts Server, alias TabletAlias) (*TabletInfo, span.Annotate("tablet", alias.String()) defer span.Finish() - return ts.GetTablet(alias) + return ts.GetTablet(ctx, alias) } // UpdateTablet updates the tablet data only - not associated replication paths. @@ -494,7 +494,7 @@ func UpdateTablet(ctx context.Context, ts Server, tablet *TabletInfo) error { version = tablet.version } - newVersion, err := ts.UpdateTablet(tablet, version) + newVersion, err := ts.UpdateTablet(ctx, tablet, version) if err == nil { tablet.version = newVersion } @@ -509,13 +509,13 @@ func UpdateTabletFields(ctx context.Context, ts Server, alias TabletAlias, updat span.Annotate("tablet", alias.String()) defer span.Finish() - return ts.UpdateTabletFields(alias, update) + return ts.UpdateTabletFields(ctx, alias, update) } // Validate makes sure a tablet is represented correctly in the topology server. -func Validate(ts Server, tabletAlias TabletAlias) error { +func Validate(ctx context.Context, ts Server, tabletAlias TabletAlias) error { // read the tablet record, make sure it parses - tablet, err := ts.GetTablet(tabletAlias) + tablet, err := ts.GetTablet(ctx, tabletAlias) if err != nil { return err } @@ -529,11 +529,11 @@ func Validate(ts Server, tabletAlias TabletAlias) error { // Idle tablets are just not in any graph at all, we don't even know // their keyspace / shard to know where to check. if tablet.IsInReplicationGraph() { - if err = ts.ValidateShard(tablet.Keyspace, tablet.Shard); err != nil { + if err = ts.ValidateShard(ctx, tablet.Keyspace, tablet.Shard); err != nil { return err } - si, err := ts.GetShardReplication(tablet.Alias.Cell, tablet.Keyspace, tablet.Shard) + si, err := ts.GetShardReplication(ctx, tablet.Alias.Cell, tablet.Keyspace, tablet.Shard) if err != nil { return err } @@ -548,7 +548,7 @@ func Validate(ts Server, tabletAlias TabletAlias) error { // a replication graph doesn't leave a node behind. // However, while an action is running, there is some // time where this might be inconsistent. - si, err := ts.GetShardReplication(tablet.Alias.Cell, tablet.Keyspace, tablet.Shard) + si, err := ts.GetShardReplication(ctx, tablet.Alias.Cell, tablet.Keyspace, tablet.Shard) if err != nil { return err } @@ -566,7 +566,7 @@ func Validate(ts Server, tabletAlias TabletAlias) error { // replication graph. func CreateTablet(ctx context.Context, ts Server, tablet *Tablet) error { // Have the Server create the tablet - err := ts.CreateTablet(tablet) + err := ts.CreateTablet(ctx, tablet) if err != nil { return err } @@ -586,8 +586,8 @@ func UpdateTabletReplicationData(ctx context.Context, ts Server, tablet *Tablet) } // DeleteTabletReplicationData deletes replication data. -func DeleteTabletReplicationData(ts Server, tablet *Tablet) error { - return RemoveShardReplicationRecord(ts, tablet.Alias.Cell, tablet.Keyspace, tablet.Shard, tablet.Alias) +func DeleteTabletReplicationData(ctx context.Context, ts Server, tablet *Tablet) error { + return RemoveShardReplicationRecord(ctx, ts, tablet.Alias.Cell, tablet.Keyspace, tablet.Shard, tablet.Alias) } // GetTabletMap tries to read all the tablets in the provided list, @@ -610,7 +610,7 @@ func GetTabletMap(ctx context.Context, ts Server, tabletAliases []TabletAlias) ( wg.Add(1) go func(tabletAlias TabletAlias) { defer wg.Done() - tabletInfo, err := ts.GetTablet(tabletAlias) + tabletInfo, err := ts.GetTablet(ctx, tabletAlias) mutex.Lock() if err != nil { log.Warningf("%v: %v", tabletAlias, err) diff --git a/go/vt/topo/test/faketopo/faketopo.go b/go/vt/topo/test/faketopo/faketopo.go index 565e3b965b..be847d789c 100644 --- a/go/vt/topo/test/faketopo/faketopo.go +++ b/go/vt/topo/test/faketopo/faketopo.go @@ -14,101 +14,101 @@ var errNotImplemented = errors.New("Not implemented") // FakeTopo is a topo.Server implementation that always returns errNotImplemented errors. type FakeTopo struct{} -func (ft FakeTopo) GetSrvKeyspaceNames(cell string) ([]string, error) { +func (ft FakeTopo) GetSrvKeyspaceNames(ctx context.Context, cell string) ([]string, error) { return nil, errNotImplemented } -func (ft FakeTopo) GetSrvKeyspace(cell, keyspace string) (*topo.SrvKeyspace, error) { +func (ft FakeTopo) GetSrvKeyspace(ctx context.Context, cell, keyspace string) (*topo.SrvKeyspace, error) { return nil, errNotImplemented } -func (ft FakeTopo) GetEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) { +func (ft FakeTopo) GetEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) { return nil, errNotImplemented } func (ft FakeTopo) Close() {} -func (ft FakeTopo) GetKnownCells() ([]string, error) { +func (ft FakeTopo) GetKnownCells(ctx context.Context) ([]string, error) { return nil, errNotImplemented } -func (ft FakeTopo) CreateKeyspace(keyspace string, value *topo.Keyspace) error { +func (ft FakeTopo) CreateKeyspace(ctx context.Context, keyspace string, value *topo.Keyspace) error { return errNotImplemented } -func (ft FakeTopo) UpdateKeyspace(ki *topo.KeyspaceInfo, existingVersion int64) (int64, error) { +func (ft FakeTopo) UpdateKeyspace(ctx context.Context, ki *topo.KeyspaceInfo, existingVersion int64) (int64, error) { return 0, errNotImplemented } -func (ft FakeTopo) GetKeyspace(keyspace string) (*topo.KeyspaceInfo, error) { +func (ft FakeTopo) GetKeyspace(ctx context.Context, keyspace string) (*topo.KeyspaceInfo, error) { return nil, errNotImplemented } -func (ft FakeTopo) GetKeyspaces() ([]string, error) { +func (ft FakeTopo) GetKeyspaces(ctx context.Context) ([]string, error) { return nil, errNotImplemented } -func (ft FakeTopo) DeleteKeyspaceShards(keyspace string) error { +func (ft FakeTopo) DeleteKeyspaceShards(ctx context.Context, keyspace string) error { return errNotImplemented } -func (ft FakeTopo) CreateShard(keyspace, shard string, value *topo.Shard) error { +func (ft FakeTopo) CreateShard(ctx context.Context, keyspace, shard string, value *topo.Shard) error { return errNotImplemented } -func (ft FakeTopo) UpdateShard(si *topo.ShardInfo, existingVersion int64) (int64, error) { +func (ft FakeTopo) UpdateShard(ctx context.Context, si *topo.ShardInfo, existingVersion int64) (int64, error) { return 0, errNotImplemented } -func (ft FakeTopo) ValidateShard(keyspace, shard string) error { +func (ft FakeTopo) ValidateShard(ctx context.Context, keyspace, shard string) error { return errNotImplemented } -func (ft FakeTopo) GetShard(keyspace, shard string) (*topo.ShardInfo, error) { +func (ft FakeTopo) GetShard(ctx context.Context, keyspace, shard string) (*topo.ShardInfo, error) { return nil, errNotImplemented } -func (ft FakeTopo) GetShardNames(keyspace string) ([]string, error) { +func (ft FakeTopo) GetShardNames(ctx context.Context, keyspace string) ([]string, error) { return nil, errNotImplemented } -func (ft FakeTopo) DeleteShard(keyspace, shard string) error { +func (ft FakeTopo) DeleteShard(ctx context.Context, keyspace, shard string) error { return errNotImplemented } -func (ft FakeTopo) CreateTablet(tablet *topo.Tablet) error { +func (ft FakeTopo) CreateTablet(ctx context.Context, tablet *topo.Tablet) error { return errNotImplemented } -func (ft FakeTopo) UpdateTablet(tablet *topo.TabletInfo, existingVersion int64) (newVersion int64, err error) { +func (ft FakeTopo) UpdateTablet(ctx context.Context, tablet *topo.TabletInfo, existingVersion int64) (newVersion int64, err error) { return 0, errNotImplemented } -func (ft FakeTopo) UpdateTabletFields(tabletAlias topo.TabletAlias, update func(*topo.Tablet) error) error { +func (ft FakeTopo) UpdateTabletFields(ctx context.Context, tabletAlias topo.TabletAlias, update func(*topo.Tablet) error) error { return errNotImplemented } -func (ft FakeTopo) DeleteTablet(alias topo.TabletAlias) error { +func (ft FakeTopo) DeleteTablet(ctx context.Context, alias topo.TabletAlias) error { return errNotImplemented } -func (ft FakeTopo) GetTablet(alias topo.TabletAlias) (*topo.TabletInfo, error) { +func (ft FakeTopo) GetTablet(ctx context.Context, alias topo.TabletAlias) (*topo.TabletInfo, error) { return nil, errNotImplemented } -func (ft FakeTopo) GetTabletsByCell(cell string) ([]topo.TabletAlias, error) { +func (ft FakeTopo) GetTabletsByCell(ctx context.Context, cell string) ([]topo.TabletAlias, error) { return nil, errNotImplemented } -func (ft FakeTopo) UpdateShardReplicationFields(cell, keyspace, shard string, update func(*topo.ShardReplication) error) error { +func (ft FakeTopo) UpdateShardReplicationFields(ctx context.Context, cell, keyspace, shard string, update func(*topo.ShardReplication) error) error { return errNotImplemented } -func (ft FakeTopo) GetShardReplication(cell, keyspace, shard string) (*topo.ShardReplicationInfo, error) { +func (ft FakeTopo) GetShardReplication(ctx context.Context, cell, keyspace, shard string) (*topo.ShardReplicationInfo, error) { return nil, errNotImplemented } -func (ft FakeTopo) DeleteShardReplication(cell, keyspace, shard string) error { +func (ft FakeTopo) DeleteShardReplication(ctx context.Context, cell, keyspace, shard string) error { return errNotImplemented } @@ -116,43 +116,43 @@ func (ft FakeTopo) LockSrvShardForAction(ctx context.Context, cell, keyspace, sh return "", errNotImplemented } -func (ft FakeTopo) UnlockSrvShardForAction(cell, keyspace, shard, lockPath, results string) error { +func (ft FakeTopo) UnlockSrvShardForAction(ctx context.Context, cell, keyspace, shard, lockPath, results string) error { return errNotImplemented } -func (ft FakeTopo) GetSrvTabletTypesPerShard(cell, keyspace, shard string) ([]topo.TabletType, error) { +func (ft FakeTopo) GetSrvTabletTypesPerShard(ctx context.Context, cell, keyspace, shard string) ([]topo.TabletType, error) { return nil, errNotImplemented } -func (ft FakeTopo) UpdateEndPoints(cell, keyspace, shard string, tabletType topo.TabletType, addrs *topo.EndPoints) error { +func (ft FakeTopo) UpdateEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType, addrs *topo.EndPoints) error { return errNotImplemented } -func (ft FakeTopo) DeleteEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) error { +func (ft FakeTopo) DeleteEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) error { return errNotImplemented } -func (ft FakeTopo) WatchEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) (<-chan *topo.EndPoints, chan<- struct{}, error) { +func (ft FakeTopo) WatchEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) (<-chan *topo.EndPoints, chan<- struct{}, error) { return nil, nil, errNotImplemented } -func (ft FakeTopo) UpdateSrvShard(cell, keyspace, shard string, srvShard *topo.SrvShard) error { +func (ft FakeTopo) UpdateSrvShard(ctx context.Context, cell, keyspace, shard string, srvShard *topo.SrvShard) error { return errNotImplemented } -func (ft FakeTopo) GetSrvShard(cell, keyspace, shard string) (*topo.SrvShard, error) { +func (ft FakeTopo) GetSrvShard(ctx context.Context, cell, keyspace, shard string) (*topo.SrvShard, error) { return nil, errNotImplemented } -func (ft FakeTopo) DeleteSrvShard(cell, keyspace, shard string) error { +func (ft FakeTopo) DeleteSrvShard(ctx context.Context, cell, keyspace, shard string) error { return errNotImplemented } -func (ft FakeTopo) UpdateSrvKeyspace(cell, keyspace string, srvKeyspace *topo.SrvKeyspace) error { +func (ft FakeTopo) UpdateSrvKeyspace(ctx context.Context, cell, keyspace string, srvKeyspace *topo.SrvKeyspace) error { return errNotImplemented } -func (ft FakeTopo) UpdateTabletEndpoint(cell, keyspace, shard string, tabletType topo.TabletType, addr *topo.EndPoint) error { +func (ft FakeTopo) UpdateTabletEndpoint(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType, addr *topo.EndPoint) error { return errNotImplemented } @@ -160,7 +160,7 @@ func (ft FakeTopo) LockKeyspaceForAction(ctx context.Context, keyspace, contents return "", errNotImplemented } -func (ft FakeTopo) UnlockKeyspaceForAction(keyspace, lockPath, results string) error { +func (ft FakeTopo) UnlockKeyspaceForAction(ctx context.Context, keyspace, lockPath, results string) error { return errNotImplemented } @@ -168,6 +168,6 @@ func (ft FakeTopo) LockShardForAction(ctx context.Context, keyspace, shard, cont return "", errNotImplemented } -func (ft FakeTopo) UnlockShardForAction(keyspace, shard, lockPath, results string) error { +func (ft FakeTopo) UnlockShardForAction(ctx context.Context, keyspace, shard, lockPath, results string) error { return errNotImplemented } diff --git a/go/vt/topo/test/faketopo/fixture.go b/go/vt/topo/test/faketopo/fixture.go index 633b9a8af0..88c8b6211b 100644 --- a/go/vt/topo/test/faketopo/fixture.go +++ b/go/vt/topo/test/faketopo/fixture.go @@ -101,7 +101,7 @@ func (fix *Fixture) GetTablet(uid int) *topo.TabletInfo { if !ok { panic("bad tablet uid") } - ti, err := fix.Topo.GetTablet(tablet.Alias) + ti, err := fix.Topo.GetTablet(context.Background(), tablet.Alias) if err != nil { fix.Fatalf("GetTablet %v: %v", tablet.Alias, err) } diff --git a/go/vt/topo/test/keyspace.go b/go/vt/topo/test/keyspace.go index c7532032c4..b91d640601 100644 --- a/go/vt/topo/test/keyspace.go +++ b/go/vt/topo/test/keyspace.go @@ -1,8 +1,8 @@ -// TODO(sougou): The comments below look obsolete. Need to verify. // Package test contains utilities to test topo.Server // implementations. If you are testing your implementation, you will -// want to call CheckAll in your test method. For an example, look at -// the tests in github.com/youtube/vitess/go/vt/zktopo. +// want to call all the check methods in your test methods. For an +// example, look at the tests in +// github.com/youtube/vitess/go/vt/zktopo. package test import ( @@ -11,10 +11,12 @@ import ( "github.com/youtube/vitess/go/vt/key" "github.com/youtube/vitess/go/vt/topo" + "golang.org/x/net/context" ) -func CheckKeyspace(t *testing.T, ts topo.Server) { - keyspaces, err := ts.GetKeyspaces() +// CheckKeyspace tests the keyspace part of the API +func CheckKeyspace(ctx context.Context, t *testing.T, ts topo.Server) { + keyspaces, err := ts.GetKeyspaces(ctx) if err != nil { t.Errorf("GetKeyspaces(empty): %v", err) } @@ -22,14 +24,14 @@ func CheckKeyspace(t *testing.T, ts topo.Server) { t.Errorf("len(GetKeyspaces()) != 0: %v", keyspaces) } - if err := ts.CreateKeyspace("test_keyspace", &topo.Keyspace{}); err != nil { + if err := ts.CreateKeyspace(ctx, "test_keyspace", &topo.Keyspace{}); err != nil { t.Errorf("CreateKeyspace: %v", err) } - if err := ts.CreateKeyspace("test_keyspace", &topo.Keyspace{}); err != topo.ErrNodeExists { + if err := ts.CreateKeyspace(ctx, "test_keyspace", &topo.Keyspace{}); err != topo.ErrNodeExists { t.Errorf("CreateKeyspace(again) is not ErrNodeExists: %v", err) } - keyspaces, err = ts.GetKeyspaces() + keyspaces, err = ts.GetKeyspaces(ctx) if err != nil { t.Errorf("GetKeyspaces: %v", err) } @@ -52,10 +54,10 @@ func CheckKeyspace(t *testing.T, ts topo.Server) { }, SplitShardCount: 64, } - if err := ts.CreateKeyspace("test_keyspace2", k); err != nil { + if err := ts.CreateKeyspace(ctx, "test_keyspace2", k); err != nil { t.Errorf("CreateKeyspace: %v", err) } - keyspaces, err = ts.GetKeyspaces() + keyspaces, err = ts.GetKeyspaces(ctx) if err != nil { t.Errorf("GetKeyspaces: %v", err) } @@ -66,10 +68,10 @@ func CheckKeyspace(t *testing.T, ts topo.Server) { } // Call delete shards and make sure the keyspace still exists. - if err := ts.DeleteKeyspaceShards("test_keyspace2"); err != nil { + if err := ts.DeleteKeyspaceShards(ctx, "test_keyspace2"); err != nil { t.Errorf("DeleteKeyspaceShards: %v", err) } - ki, err := ts.GetKeyspace("test_keyspace2") + ki, err := ts.GetKeyspace(ctx, "test_keyspace2") if err != nil { t.Fatalf("GetKeyspace: %v", err) } @@ -81,11 +83,11 @@ func CheckKeyspace(t *testing.T, ts topo.Server) { ki.ShardingColumnType = key.KIT_BYTES delete(ki.ServedFromMap, topo.TYPE_MASTER) ki.ServedFromMap[topo.TYPE_REPLICA].Keyspace = "test_keyspace4" - err = topo.UpdateKeyspace(ts, ki) + err = topo.UpdateKeyspace(ctx, ts, ki) if err != nil { t.Fatalf("UpdateKeyspace: %v", err) } - ki, err = ts.GetKeyspace("test_keyspace2") + ki, err = ts.GetKeyspace(ctx, "test_keyspace2") if err != nil { t.Fatalf("GetKeyspace: %v", err) } diff --git a/go/vt/topo/test/lock.go b/go/vt/topo/test/lock.go index 4bb39a95d0..6b16f37bf3 100644 --- a/go/vt/topo/test/lock.go +++ b/go/vt/topo/test/lock.go @@ -18,18 +18,18 @@ import ( var timeUntilLockIsTaken = 10 * time.Millisecond // CheckKeyspaceLock checks we can take a keyspace lock as expected. -func CheckKeyspaceLock(t *testing.T, ts topo.Server) { - if err := ts.CreateKeyspace("test_keyspace", &topo.Keyspace{}); err != nil { +func CheckKeyspaceLock(ctx context.Context, t *testing.T, ts topo.Server) { + if err := ts.CreateKeyspace(ctx, "test_keyspace", &topo.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace: %v", err) } - checkKeyspaceLockTimeout(t, ts) - checkKeyspaceLockMissing(t, ts) - checkKeyspaceLockUnblocks(t, ts) + checkKeyspaceLockTimeout(ctx, t, ts) + checkKeyspaceLockMissing(ctx, t, ts) + checkKeyspaceLockUnblocks(ctx, t, ts) } -func checkKeyspaceLockTimeout(t *testing.T, ts topo.Server) { - ctx, ctxCancel := context.WithCancel(context.Background()) +func checkKeyspaceLockTimeout(ctx context.Context, t *testing.T, ts topo.Server) { + ctx, ctxCancel := context.WithCancel(ctx) lockPath, err := ts.LockKeyspaceForAction(ctx, "test_keyspace", "fake-content") if err != nil { t.Fatalf("LockKeyspaceForAction: %v", err) @@ -51,19 +51,18 @@ func checkKeyspaceLockTimeout(t *testing.T, ts topo.Server) { t.Errorf("LockKeyspaceForAction(interrupted): %v", err) } - if err := ts.UnlockKeyspaceForAction("test_keyspace", lockPath, "fake-results"); err != nil { + if err := ts.UnlockKeyspaceForAction(ctx, "test_keyspace", lockPath, "fake-results"); err != nil { t.Errorf("UnlockKeyspaceForAction(): %v", err) } // test we can't unlock again - if err := ts.UnlockKeyspaceForAction("test_keyspace", lockPath, "fake-results"); err == nil { + if err := ts.UnlockKeyspaceForAction(ctx, "test_keyspace", lockPath, "fake-results"); err == nil { t.Error("UnlockKeyspaceForAction(again) worked") } } // checkKeyspaceLockMissing makes sure we can't lock a non-existing keyspace -func checkKeyspaceLockMissing(t *testing.T, ts topo.Server) { - ctx := context.Background() +func checkKeyspaceLockMissing(ctx context.Context, t *testing.T, ts topo.Server) { if _, err := ts.LockKeyspaceForAction(ctx, "test_keyspace_666", "fake-content"); err == nil { t.Errorf("LockKeyspaceForAction(test_keyspace_666) worked for non-existing keyspace") } @@ -71,26 +70,24 @@ func checkKeyspaceLockMissing(t *testing.T, ts topo.Server) { // checkKeyspaceLockUnblocks makes sure that a routine waiting on a lock // is unblocked when another routine frees the lock -func checkKeyspaceLockUnblocks(t *testing.T, ts topo.Server) { +func checkKeyspaceLockUnblocks(ctx context.Context, t *testing.T, ts topo.Server) { unblock := make(chan struct{}) finished := make(chan struct{}) // as soon as we're unblocked, we try to lock the keyspace go func() { <-unblock - ctx := context.Background() lockPath, err := ts.LockKeyspaceForAction(ctx, "test_keyspace", "fake-content") if err != nil { t.Fatalf("LockKeyspaceForAction(test_keyspace) failed: %v", err) } - if err = ts.UnlockKeyspaceForAction("test_keyspace", lockPath, "fake-results"); err != nil { + if err = ts.UnlockKeyspaceForAction(ctx, "test_keyspace", lockPath, "fake-results"); err != nil { t.Errorf("UnlockKeyspaceForAction(test_keyspace): %v", err) } close(finished) }() // lock the keyspace - ctx := context.Background() lockPath2, err := ts.LockKeyspaceForAction(ctx, "test_keyspace", "fake-content") if err != nil { t.Fatalf("LockKeyspaceForAction(test_keyspace) failed: %v", err) @@ -102,7 +99,7 @@ func checkKeyspaceLockUnblocks(t *testing.T, ts topo.Server) { // sleep for a while so we're sure the go routine is blocking time.Sleep(timeUntilLockIsTaken) - if err = ts.UnlockKeyspaceForAction("test_keyspace", lockPath2, "fake-results"); err != nil { + if err = ts.UnlockKeyspaceForAction(ctx, "test_keyspace", lockPath2, "fake-results"); err != nil { t.Fatalf("UnlockKeyspaceForAction(test_keyspace): %v", err) } @@ -115,21 +112,21 @@ func checkKeyspaceLockUnblocks(t *testing.T, ts topo.Server) { } // CheckShardLock checks we can take a shard lock -func CheckShardLock(t *testing.T, ts topo.Server) { - if err := ts.CreateKeyspace("test_keyspace", &topo.Keyspace{}); err != nil { +func CheckShardLock(ctx context.Context, t *testing.T, ts topo.Server) { + if err := ts.CreateKeyspace(ctx, "test_keyspace", &topo.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace: %v", err) } - if err := topo.CreateShard(ts, "test_keyspace", "10-20"); err != nil { + if err := topo.CreateShard(ctx, ts, "test_keyspace", "10-20"); err != nil { t.Fatalf("CreateShard: %v", err) } - checkShardLockTimeout(t, ts) - checkShardLockMissing(t, ts) - checkShardLockUnblocks(t, ts) + checkShardLockTimeout(ctx, t, ts) + checkShardLockMissing(ctx, t, ts) + checkShardLockUnblocks(ctx, t, ts) } -func checkShardLockTimeout(t *testing.T, ts topo.Server) { - ctx, ctxCancel := context.WithCancel(context.Background()) +func checkShardLockTimeout(ctx context.Context, t *testing.T, ts topo.Server) { + ctx, ctxCancel := context.WithCancel(ctx) lockPath, err := ts.LockShardForAction(ctx, "test_keyspace", "10-20", "fake-content") if err != nil { t.Fatalf("LockShardForAction: %v", err) @@ -151,19 +148,18 @@ func checkShardLockTimeout(t *testing.T, ts topo.Server) { t.Errorf("LockShardForAction(interrupted): %v", err) } - if err := ts.UnlockShardForAction("test_keyspace", "10-20", lockPath, "fake-results"); err != nil { + if err := ts.UnlockShardForAction(ctx, "test_keyspace", "10-20", lockPath, "fake-results"); err != nil { t.Errorf("UnlockShardForAction(): %v", err) } // test we can't unlock again - if err := ts.UnlockShardForAction("test_keyspace", "10-20", lockPath, "fake-results"); err == nil { + if err := ts.UnlockShardForAction(ctx, "test_keyspace", "10-20", lockPath, "fake-results"); err == nil { t.Error("UnlockShardForAction(again) worked") } } -func checkShardLockMissing(t *testing.T, ts topo.Server) { +func checkShardLockMissing(ctx context.Context, t *testing.T, ts topo.Server) { // test we can't lock a non-existing shard - ctx := context.Background() if _, err := ts.LockShardForAction(ctx, "test_keyspace", "20-30", "fake-content"); err == nil { t.Errorf("LockShardForAction(test_keyspace/20-30) worked for non-existing shard") } @@ -171,26 +167,24 @@ func checkShardLockMissing(t *testing.T, ts topo.Server) { // checkShardLockUnblocks makes sure that a routine waiting on a lock // is unblocked when another routine frees the lock -func checkShardLockUnblocks(t *testing.T, ts topo.Server) { +func checkShardLockUnblocks(ctx context.Context, t *testing.T, ts topo.Server) { unblock := make(chan struct{}) finished := make(chan struct{}) // as soon as we're unblocked, we try to lock the shard go func() { <-unblock - ctx := context.Background() lockPath, err := ts.LockShardForAction(ctx, "test_keyspace", "10-20", "fake-content") if err != nil { t.Fatalf("LockShardForAction(test_keyspace, 10-20) failed: %v", err) } - if err = ts.UnlockShardForAction("test_keyspace", "10-20", lockPath, "fake-results"); err != nil { + if err = ts.UnlockShardForAction(ctx, "test_keyspace", "10-20", lockPath, "fake-results"); err != nil { t.Errorf("UnlockShardForAction(test_keyspace, 10-20): %v", err) } close(finished) }() // lock the shard - ctx := context.Background() lockPath2, err := ts.LockShardForAction(ctx, "test_keyspace", "10-20", "fake-content") if err != nil { t.Fatalf("LockShardForAction(test_keyspace, 10-20) failed: %v", err) @@ -202,7 +196,7 @@ func checkShardLockUnblocks(t *testing.T, ts topo.Server) { // sleep for a while so we're sure the go routine is blocking time.Sleep(timeUntilLockIsTaken) - if err = ts.UnlockShardForAction("test_keyspace", "10-20", lockPath2, "fake-results"); err != nil { + if err = ts.UnlockShardForAction(ctx, "test_keyspace", "10-20", lockPath2, "fake-results"); err != nil { t.Fatalf("UnlockShardForAction(test_keyspace, 10-20): %v", err) } @@ -215,22 +209,22 @@ func checkShardLockUnblocks(t *testing.T, ts topo.Server) { } // CheckSrvShardLock tests we can take a SrvShard lock -func CheckSrvShardLock(t *testing.T, ts topo.Server) { - checkSrvShardLockGeneral(t, ts) - checkSrvShardLockUnblocks(t, ts) +func CheckSrvShardLock(ctx context.Context, t *testing.T, ts topo.Server) { + checkSrvShardLockGeneral(ctx, t, ts) + checkSrvShardLockUnblocks(ctx, t, ts) } -func checkSrvShardLockGeneral(t *testing.T, ts topo.Server) { - cell := getLocalCell(t, ts) +func checkSrvShardLockGeneral(ctx context.Context, t *testing.T, ts topo.Server) { + cell := getLocalCell(ctx, t, ts) // make sure we can create the lock even if no directory exists - ctx, ctxCancel := context.WithCancel(context.Background()) + ctx, ctxCancel := context.WithCancel(ctx) lockPath, err := ts.LockSrvShardForAction(ctx, cell, "test_keyspace", "10-20", "fake-content") if err != nil { t.Fatalf("LockSrvShardForAction: %v", err) } - if err := ts.UnlockSrvShardForAction(cell, "test_keyspace", "10-20", lockPath, "fake-results"); err != nil { + if err := ts.UnlockSrvShardForAction(ctx, cell, "test_keyspace", "10-20", lockPath, "fake-results"); err != nil { t.Errorf("UnlockShardForAction: %v", err) } @@ -257,12 +251,12 @@ func checkSrvShardLockGeneral(t *testing.T, ts topo.Server) { } // unlock now - if err := ts.UnlockSrvShardForAction(cell, "test_keyspace", "10-20", lockPath, "fake-results"); err != nil { + if err := ts.UnlockSrvShardForAction(ctx, cell, "test_keyspace", "10-20", lockPath, "fake-results"); err != nil { t.Errorf("UnlockSrvShardForAction(): %v", err) } // test we can't unlock again - if err := ts.UnlockSrvShardForAction(cell, "test_keyspace", "10-20", lockPath, "fake-results"); err == nil { + if err := ts.UnlockSrvShardForAction(ctx, cell, "test_keyspace", "10-20", lockPath, "fake-results"); err == nil { t.Error("UnlockSrvShardForAction(again) worked") } @@ -270,27 +264,25 @@ func checkSrvShardLockGeneral(t *testing.T, ts topo.Server) { // checkSrvShardLockUnblocks makes sure that a routine waiting on a lock // is unblocked when another routine frees the lock -func checkSrvShardLockUnblocks(t *testing.T, ts topo.Server) { - cell := getLocalCell(t, ts) +func checkSrvShardLockUnblocks(ctx context.Context, t *testing.T, ts topo.Server) { + cell := getLocalCell(ctx, t, ts) unblock := make(chan struct{}) finished := make(chan struct{}) // as soon as we're unblocked, we try to lock the shard go func() { <-unblock - ctx := context.Background() lockPath, err := ts.LockSrvShardForAction(ctx, cell, "test_keyspace", "10-20", "fake-content") if err != nil { t.Fatalf("LockSrvShardForAction(test, test_keyspace, 10-20) failed: %v", err) } - if err = ts.UnlockSrvShardForAction(cell, "test_keyspace", "10-20", lockPath, "fake-results"); err != nil { + if err = ts.UnlockSrvShardForAction(ctx, cell, "test_keyspace", "10-20", lockPath, "fake-results"); err != nil { t.Errorf("UnlockSrvShardForAction(test, test_keyspace, 10-20): %v", err) } close(finished) }() // lock the shard - ctx := context.Background() lockPath2, err := ts.LockSrvShardForAction(ctx, cell, "test_keyspace", "10-20", "fake-content") if err != nil { t.Fatalf("LockSrvShardForAction(test, test_keyspace, 10-20) failed: %v", err) @@ -302,7 +294,7 @@ func checkSrvShardLockUnblocks(t *testing.T, ts topo.Server) { // sleep for a while so we're sure the go routine is blocking time.Sleep(timeUntilLockIsTaken) - if err = ts.UnlockSrvShardForAction(cell, "test_keyspace", "10-20", lockPath2, "fake-results"); err != nil { + if err = ts.UnlockSrvShardForAction(ctx, cell, "test_keyspace", "10-20", lockPath2, "fake-results"); err != nil { t.Fatalf("UnlockSrvShardForAction(test, test_keyspace, 10-20): %v", err) } diff --git a/go/vt/topo/test/replication.go b/go/vt/topo/test/replication.go index 3011ec1cef..5250c2f769 100644 --- a/go/vt/topo/test/replication.go +++ b/go/vt/topo/test/replication.go @@ -8,12 +8,13 @@ import ( "testing" "github.com/youtube/vitess/go/vt/topo" + "golang.org/x/net/context" ) // CheckShardReplication tests ShardReplication objects -func CheckShardReplication(t *testing.T, ts topo.Server) { - cell := getLocalCell(t, ts) - if _, err := ts.GetShardReplication(cell, "test_keyspace", "-10"); err != topo.ErrNoNode { +func CheckShardReplication(ctx context.Context, t *testing.T, ts topo.Server) { + cell := getLocalCell(ctx, t, ts) + if _, err := ts.GetShardReplication(ctx, cell, "test_keyspace", "-10"); err != topo.ErrNoNode { t.Errorf("GetShardReplication(not there): %v", err) } @@ -27,14 +28,14 @@ func CheckShardReplication(t *testing.T, ts topo.Server) { }, }, } - if err := ts.UpdateShardReplicationFields(cell, "test_keyspace", "-10", func(oldSr *topo.ShardReplication) error { + if err := ts.UpdateShardReplicationFields(ctx, cell, "test_keyspace", "-10", func(oldSr *topo.ShardReplication) error { *oldSr = *sr return nil }); err != nil { t.Fatalf("UpdateShardReplicationFields() failed: %v", err) } - if sri, err := ts.GetShardReplication(cell, "test_keyspace", "-10"); err != nil { + if sri, err := ts.GetShardReplication(ctx, cell, "test_keyspace", "-10"); err != nil { t.Errorf("GetShardReplication(new guy) failed: %v", err) } else { if len(sri.ReplicationLinks) != 1 || @@ -44,7 +45,7 @@ func CheckShardReplication(t *testing.T, ts topo.Server) { } } - if err := ts.UpdateShardReplicationFields(cell, "test_keyspace", "-10", func(sr *topo.ShardReplication) error { + if err := ts.UpdateShardReplicationFields(ctx, cell, "test_keyspace", "-10", func(sr *topo.ShardReplication) error { sr.ReplicationLinks = append(sr.ReplicationLinks, topo.ReplicationLink{ TabletAlias: topo.TabletAlias{ Cell: "c3", @@ -56,7 +57,7 @@ func CheckShardReplication(t *testing.T, ts topo.Server) { t.Errorf("UpdateShardReplicationFields() failed: %v", err) } - if sri, err := ts.GetShardReplication(cell, "test_keyspace", "-10"); err != nil { + if sri, err := ts.GetShardReplication(ctx, cell, "test_keyspace", "-10"); err != nil { t.Errorf("GetShardReplication(after append) failed: %v", err) } else { if len(sri.ReplicationLinks) != 2 || @@ -68,10 +69,10 @@ func CheckShardReplication(t *testing.T, ts topo.Server) { } } - if err := ts.DeleteShardReplication(cell, "test_keyspace", "-10"); err != nil { + if err := ts.DeleteShardReplication(ctx, cell, "test_keyspace", "-10"); err != nil { t.Errorf("DeleteShardReplication(existing) failed: %v", err) } - if err := ts.DeleteShardReplication(cell, "test_keyspace", "-10"); err != topo.ErrNoNode { + if err := ts.DeleteShardReplication(ctx, cell, "test_keyspace", "-10"); err != topo.ErrNoNode { t.Errorf("DeleteShardReplication(again) returned: %v", err) } } diff --git a/go/vt/topo/test/serving.go b/go/vt/topo/test/serving.go index 9d0368e1f7..e3d1f3e182 100644 --- a/go/vt/topo/test/serving.go +++ b/go/vt/topo/test/serving.go @@ -15,13 +15,13 @@ import ( // CheckServingGraph makes sure the serving graph functions work properly. func CheckServingGraph(ctx context.Context, t *testing.T, ts topo.Server) { - cell := getLocalCell(t, ts) + cell := getLocalCell(ctx, t, ts) // test individual cell/keyspace/shard/type entries - if _, err := ts.GetSrvTabletTypesPerShard(cell, "test_keyspace", "-10"); err != topo.ErrNoNode { + if _, err := ts.GetSrvTabletTypesPerShard(ctx, cell, "test_keyspace", "-10"); err != topo.ErrNoNode { t.Errorf("GetSrvTabletTypesPerShard(invalid): %v", err) } - if _, err := ts.GetEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != topo.ErrNoNode { + if _, err := ts.GetEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != topo.ErrNoNode { t.Errorf("GetEndPoints(invalid): %v", err) } @@ -38,18 +38,18 @@ func CheckServingGraph(ctx context.Context, t *testing.T, ts topo.Server) { if err := topo.UpdateEndPoints(ctx, ts, cell, "test_keyspace", "-10", topo.TYPE_MASTER, &endPoints); err != nil { t.Fatalf("UpdateEndPoints(master): %v", err) } - if types, err := ts.GetSrvTabletTypesPerShard(cell, "test_keyspace", "-10"); err != nil || len(types) != 1 || types[0] != topo.TYPE_MASTER { + if types, err := ts.GetSrvTabletTypesPerShard(ctx, cell, "test_keyspace", "-10"); err != nil || len(types) != 1 || types[0] != topo.TYPE_MASTER { t.Errorf("GetSrvTabletTypesPerShard(1): %v %v", err, types) } // Delete the SrvShard (need to delete endpoints first). - if err := ts.DeleteEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != nil { + if err := ts.DeleteEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != nil { t.Errorf("DeleteEndPoints: %v", err) } - if err := ts.DeleteSrvShard(cell, "test_keyspace", "-10"); err != nil { + if err := ts.DeleteSrvShard(ctx, cell, "test_keyspace", "-10"); err != nil { t.Errorf("DeleteSrvShard: %v", err) } - if _, err := ts.GetSrvShard(cell, "test_keyspace", "-10"); err != topo.ErrNoNode { + if _, err := ts.GetSrvShard(ctx, cell, "test_keyspace", "-10"); err != topo.ErrNoNode { t.Errorf("GetSrvShard(deleted) got %v, want ErrNoNode", err) } @@ -58,7 +58,7 @@ func CheckServingGraph(ctx context.Context, t *testing.T, ts topo.Server) { t.Fatalf("UpdateEndPoints(master): %v", err) } - addrs, err := ts.GetEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER) + addrs, err := ts.GetEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_MASTER) if err != nil { t.Errorf("GetEndPoints: %v", err) } @@ -69,26 +69,26 @@ func CheckServingGraph(ctx context.Context, t *testing.T, ts topo.Server) { t.Errorf("GetSrcTabletType(1).NamedPortmap: want %v, got %v", endPoints.Entries[0].NamedPortMap, pm) } - if err := ts.UpdateTabletEndpoint(cell, "test_keyspace", "-10", topo.TYPE_REPLICA, &topo.EndPoint{Uid: 2, Host: "host2"}); err != nil { + if err := ts.UpdateTabletEndpoint(ctx, cell, "test_keyspace", "-10", topo.TYPE_REPLICA, &topo.EndPoint{Uid: 2, Host: "host2"}); err != nil { t.Fatalf("UpdateTabletEndpoint(invalid): %v", err) } - if err := ts.UpdateTabletEndpoint(cell, "test_keyspace", "-10", topo.TYPE_MASTER, &topo.EndPoint{Uid: 1, Host: "host2"}); err != nil { + if err := ts.UpdateTabletEndpoint(ctx, cell, "test_keyspace", "-10", topo.TYPE_MASTER, &topo.EndPoint{Uid: 1, Host: "host2"}); err != nil { t.Fatalf("UpdateTabletEndpoint(master): %v", err) } - if addrs, err := ts.GetEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != nil || len(addrs.Entries) != 1 || addrs.Entries[0].Uid != 1 { + if addrs, err := ts.GetEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != nil || len(addrs.Entries) != 1 || addrs.Entries[0].Uid != 1 { t.Errorf("GetEndPoints(2): %v %v", err, addrs) } - if err := ts.UpdateTabletEndpoint(cell, "test_keyspace", "-10", topo.TYPE_MASTER, &topo.EndPoint{Uid: 3, Host: "host3"}); err != nil { + if err := ts.UpdateTabletEndpoint(ctx, cell, "test_keyspace", "-10", topo.TYPE_MASTER, &topo.EndPoint{Uid: 3, Host: "host3"}); err != nil { t.Fatalf("UpdateTabletEndpoint(master): %v", err) } - if addrs, err := ts.GetEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != nil || len(addrs.Entries) != 2 { + if addrs, err := ts.GetEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != nil || len(addrs.Entries) != 2 { t.Errorf("GetEndPoints(2): %v %v", err, addrs) } - if err := ts.DeleteEndPoints(cell, "test_keyspace", "-10", topo.TYPE_REPLICA); err != topo.ErrNoNode { + if err := ts.DeleteEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_REPLICA); err != topo.ErrNoNode { t.Errorf("DeleteEndPoints(unknown): %v", err) } - if err := ts.DeleteEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != nil { + if err := ts.DeleteEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != nil { t.Errorf("DeleteEndPoints(master): %v", err) } @@ -98,13 +98,13 @@ func CheckServingGraph(ctx context.Context, t *testing.T, ts topo.Server) { KeyRange: newKeyRange("-10"), MasterCell: "test", } - if err := ts.UpdateSrvShard(cell, "test_keyspace", "-10", &srvShard); err != nil { + if err := ts.UpdateSrvShard(ctx, cell, "test_keyspace", "-10", &srvShard); err != nil { t.Fatalf("UpdateSrvShard(1): %v", err) } - if _, err := ts.GetSrvShard(cell, "test_keyspace", "666"); err != topo.ErrNoNode { + if _, err := ts.GetSrvShard(ctx, cell, "test_keyspace", "666"); err != topo.ErrNoNode { t.Errorf("GetSrvShard(invalid): %v", err) } - if s, err := ts.GetSrvShard(cell, "test_keyspace", "-10"); err != nil || + if s, err := ts.GetSrvShard(ctx, cell, "test_keyspace", "-10"); err != nil || s.Name != "-10" || s.KeyRange != newKeyRange("-10") || s.MasterCell != "test" { @@ -129,13 +129,13 @@ func CheckServingGraph(ctx context.Context, t *testing.T, ts topo.Server) { topo.TYPE_REPLICA: "other_keyspace", }, } - if err := ts.UpdateSrvKeyspace(cell, "test_keyspace", &srvKeyspace); err != nil { + if err := ts.UpdateSrvKeyspace(ctx, cell, "test_keyspace", &srvKeyspace); err != nil { t.Errorf("UpdateSrvKeyspace(1): %v", err) } - if _, err := ts.GetSrvKeyspace(cell, "test_keyspace666"); err != topo.ErrNoNode { + if _, err := ts.GetSrvKeyspace(ctx, cell, "test_keyspace666"); err != topo.ErrNoNode { t.Errorf("GetSrvKeyspace(invalid): %v", err) } - if k, err := ts.GetSrvKeyspace(cell, "test_keyspace"); err != nil || + if k, err := ts.GetSrvKeyspace(ctx, cell, "test_keyspace"); err != nil || len(k.Partitions) != 1 || len(k.Partitions[topo.TYPE_MASTER].ShardReferences) != 1 || k.Partitions[topo.TYPE_MASTER].ShardReferences[0].Name != "-80" || @@ -145,15 +145,15 @@ func CheckServingGraph(ctx context.Context, t *testing.T, ts topo.Server) { k.ServedFrom[topo.TYPE_REPLICA] != "other_keyspace" { t.Errorf("GetSrvKeyspace(valid): %v %v", err, k) } - if k, err := ts.GetSrvKeyspaceNames(cell); err != nil || len(k) != 1 || k[0] != "test_keyspace" { + if k, err := ts.GetSrvKeyspaceNames(ctx, cell); err != nil || len(k) != 1 || k[0] != "test_keyspace" { t.Errorf("GetSrvKeyspaceNames(): %v", err) } // check that updating a SrvKeyspace out of the blue works - if err := ts.UpdateSrvKeyspace(cell, "unknown_keyspace_so_far", &srvKeyspace); err != nil { + if err := ts.UpdateSrvKeyspace(ctx, cell, "unknown_keyspace_so_far", &srvKeyspace); err != nil { t.Fatalf("UpdateSrvKeyspace(2): %v", err) } - if k, err := ts.GetSrvKeyspace(cell, "unknown_keyspace_so_far"); err != nil || + if k, err := ts.GetSrvKeyspace(ctx, cell, "unknown_keyspace_so_far"); err != nil || len(k.Partitions) != 1 || len(k.Partitions[topo.TYPE_MASTER].ShardReferences) != 1 || k.Partitions[topo.TYPE_MASTER].ShardReferences[0].Name != "-80" || @@ -167,13 +167,13 @@ func CheckServingGraph(ctx context.Context, t *testing.T, ts topo.Server) { // CheckWatchEndPoints makes sure WatchEndPoints works as expected func CheckWatchEndPoints(ctx context.Context, t *testing.T, ts topo.Server) { - cell := getLocalCell(t, ts) + cell := getLocalCell(ctx, t, ts) keyspace := "test_keyspace" shard := "-10" tabletType := topo.TYPE_MASTER // start watching, should get nil first - notifications, stopWatching, err := ts.WatchEndPoints(cell, keyspace, shard, tabletType) + notifications, stopWatching, err := ts.WatchEndPoints(ctx, cell, keyspace, shard, tabletType) if err != nil { t.Fatalf("WatchEndPoints failed: %v", err) } @@ -212,7 +212,7 @@ func CheckWatchEndPoints(ctx context.Context, t *testing.T, ts topo.Server) { } // delete the endpoints, should get a notification - if err := ts.DeleteEndPoints(cell, keyspace, shard, tabletType); err != nil { + if err := ts.DeleteEndPoints(ctx, cell, keyspace, shard, tabletType); err != nil { t.Fatalf("DeleteEndPoints failed: %v", err) } for { diff --git a/go/vt/topo/test/shard.go b/go/vt/topo/test/shard.go index 5e9c7514ff..ecd874ec84 100644 --- a/go/vt/topo/test/shard.go +++ b/go/vt/topo/test/shard.go @@ -27,30 +27,30 @@ func shardEqual(left, right *topo.Shard) (bool, error) { // CheckShard verifies the Shard operations work correctly func CheckShard(ctx context.Context, t *testing.T, ts topo.Server) { - if err := ts.CreateKeyspace("test_keyspace", &topo.Keyspace{}); err != nil { + if err := ts.CreateKeyspace(ctx, "test_keyspace", &topo.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace: %v", err) } - if err := topo.CreateShard(ts, "test_keyspace", "b0-c0"); err != nil { + if err := topo.CreateShard(ctx, ts, "test_keyspace", "b0-c0"); err != nil { t.Fatalf("CreateShard: %v", err) } - if err := topo.CreateShard(ts, "test_keyspace", "b0-c0"); err != topo.ErrNodeExists { + if err := topo.CreateShard(ctx, ts, "test_keyspace", "b0-c0"); err != topo.ErrNodeExists { t.Errorf("CreateShard called second time, got: %v", err) } // Delete shard and see if we can re-create it. - if err := ts.DeleteShard("test_keyspace", "b0-c0"); err != nil { + if err := ts.DeleteShard(ctx, "test_keyspace", "b0-c0"); err != nil { t.Fatalf("DeleteShard: %v", err) } - if err := topo.CreateShard(ts, "test_keyspace", "b0-c0"); err != nil { + if err := topo.CreateShard(ctx, ts, "test_keyspace", "b0-c0"); err != nil { t.Fatalf("CreateShard: %v", err) } // Delete ALL shards. - if err := ts.DeleteKeyspaceShards("test_keyspace"); err != nil { + if err := ts.DeleteKeyspaceShards(ctx, "test_keyspace"); err != nil { t.Fatalf("DeleteKeyspaceShards: %v", err) } - if err := topo.CreateShard(ts, "test_keyspace", "b0-c0"); err != nil { + if err := topo.CreateShard(ctx, ts, "test_keyspace", "b0-c0"); err != nil { t.Fatalf("CreateShard: %v", err) } @@ -130,7 +130,7 @@ func CheckShard(ctx context.Context, t *testing.T, ts topo.Server) { } // test GetShardNames - shards, err := ts.GetShardNames("test_keyspace") + shards, err := ts.GetShardNames(ctx, "test_keyspace") if err != nil { t.Errorf("GetShardNames: %v", err) } @@ -138,12 +138,12 @@ func CheckShard(ctx context.Context, t *testing.T, ts topo.Server) { t.Errorf(`GetShardNames: want [ "b0-c0" ], got %v`, shards) } - if _, err := ts.GetShardNames("test_keyspace666"); err != topo.ErrNoNode { + if _, err := ts.GetShardNames(ctx, "test_keyspace666"); err != topo.ErrNoNode { t.Errorf("GetShardNames(666): %v", err) } // test ValidateShard - if err := ts.ValidateShard("test_keyspace", "b0-c0"); err != nil { + if err := ts.ValidateShard(ctx, "test_keyspace", "b0-c0"); err != nil { t.Errorf("ValidateShard(test_keyspace, b0-c0) failed: %v", err) } } diff --git a/go/vt/topo/test/tablet.go b/go/vt/topo/test/tablet.go index db54e0fbd7..9d6298ce0b 100644 --- a/go/vt/topo/test/tablet.go +++ b/go/vt/topo/test/tablet.go @@ -27,7 +27,7 @@ func tabletEqual(left, right *topo.Tablet) (bool, error) { // CheckTablet verifies the topo server API is correct for managing tablets. func CheckTablet(ctx context.Context, t *testing.T, ts topo.Server) { - cell := getLocalCell(t, ts) + cell := getLocalCell(ctx, t, ts) tablet := &topo.Tablet{ Alias: topo.TabletAlias{Cell: cell, Uid: 1}, Hostname: "localhost", @@ -42,18 +42,18 @@ func CheckTablet(ctx context.Context, t *testing.T, ts topo.Server) { Type: topo.TYPE_MASTER, KeyRange: newKeyRange("-10"), } - if err := ts.CreateTablet(tablet); err != nil { + if err := ts.CreateTablet(ctx, tablet); err != nil { t.Errorf("CreateTablet: %v", err) } - if err := ts.CreateTablet(tablet); err != topo.ErrNodeExists { + if err := ts.CreateTablet(ctx, tablet); err != topo.ErrNodeExists { t.Errorf("CreateTablet(again): %v", err) } - if _, err := ts.GetTablet(topo.TabletAlias{Cell: cell, Uid: 666}); err != topo.ErrNoNode { + if _, err := ts.GetTablet(ctx, topo.TabletAlias{Cell: cell, Uid: 666}); err != topo.ErrNoNode { t.Errorf("GetTablet(666): %v", err) } - ti, err := ts.GetTablet(tablet.Alias) + ti, err := ts.GetTablet(ctx, tablet.Alias) if err != nil { t.Errorf("GetTablet %v: %v", tablet.Alias, err) } @@ -63,11 +63,11 @@ func CheckTablet(ctx context.Context, t *testing.T, ts topo.Server) { t.Errorf("put and got tablets are not identical:\n%#v\n%#v", tablet, ti.Tablet) } - if _, err := ts.GetTabletsByCell("666"); err != topo.ErrNoNode { + if _, err := ts.GetTabletsByCell(ctx, "666"); err != topo.ErrNoNode { t.Errorf("GetTabletsByCell(666): %v", err) } - inCell, err := ts.GetTabletsByCell(cell) + inCell, err := ts.GetTabletsByCell(ctx, cell) if err != nil { t.Errorf("GetTabletsByCell: %v", err) } @@ -80,7 +80,7 @@ func CheckTablet(ctx context.Context, t *testing.T, ts topo.Server) { t.Errorf("UpdateTablet: %v", err) } - ti, err = ts.GetTablet(tablet.Alias) + ti, err = ts.GetTablet(ctx, tablet.Alias) if err != nil { t.Errorf("GetTablet %v: %v", tablet.Alias, err) } @@ -94,7 +94,7 @@ func CheckTablet(ctx context.Context, t *testing.T, ts topo.Server) { }); err != nil { t.Errorf("UpdateTabletFields: %v", err) } - ti, err = ts.GetTablet(tablet.Alias) + ti, err = ts.GetTablet(ctx, tablet.Alias) if err != nil { t.Errorf("GetTablet %v: %v", tablet.Alias, err) } @@ -103,14 +103,14 @@ func CheckTablet(ctx context.Context, t *testing.T, ts topo.Server) { t.Errorf("ti.Hostname: want %v, got %v", want, ti.Hostname) } - if err := ts.DeleteTablet(tablet.Alias); err != nil { + if err := ts.DeleteTablet(ctx, tablet.Alias); err != nil { t.Errorf("DeleteTablet: %v", err) } - if err := ts.DeleteTablet(tablet.Alias); err != topo.ErrNoNode { + if err := ts.DeleteTablet(ctx, tablet.Alias); err != topo.ErrNoNode { t.Errorf("DeleteTablet(again): %v", err) } - if _, err := ts.GetTablet(tablet.Alias); err != topo.ErrNoNode { + if _, err := ts.GetTablet(ctx, tablet.Alias); err != topo.ErrNoNode { t.Errorf("GetTablet: expected error, tablet was deleted: %v", err) } diff --git a/go/vt/topo/test/testing.go b/go/vt/topo/test/testing.go index 25fac0cca6..5dcea70a1b 100644 --- a/go/vt/topo/test/testing.go +++ b/go/vt/topo/test/testing.go @@ -9,6 +9,7 @@ import ( "github.com/youtube/vitess/go/vt/key" "github.com/youtube/vitess/go/vt/topo" + "golang.org/x/net/context" ) func newKeyRange(value string) key.KeyRange { @@ -19,8 +20,8 @@ func newKeyRange(value string) key.KeyRange { return result } -func getLocalCell(t *testing.T, ts topo.Server) string { - cells, err := ts.GetKnownCells() +func getLocalCell(ctx context.Context, t *testing.T, ts topo.Server) string { + cells, err := ts.GetKnownCells(ctx) if err != nil { t.Fatalf("GetKnownCells: %v", err) } diff --git a/go/vt/topo/test/vschema.go b/go/vt/topo/test/vschema.go index 2fe424304a..0b66d847d0 100644 --- a/go/vt/topo/test/vschema.go +++ b/go/vt/topo/test/vschema.go @@ -9,15 +9,17 @@ import ( "testing" "github.com/youtube/vitess/go/vt/topo" + "golang.org/x/net/context" ) -func CheckVSchema(t *testing.T, ts topo.Server) { +// CheckVSchema runs the tests on the VSchema part of the API +func CheckVSchema(ctx context.Context, t *testing.T, ts topo.Server) { schemafier, ok := ts.(topo.Schemafier) if !ok { t.Errorf("%T is not a Schemafier", ts) return } - got, err := schemafier.GetVSchema() + got, err := schemafier.GetVSchema(ctx) if err != nil { t.Error(err) } @@ -26,12 +28,12 @@ func CheckVSchema(t *testing.T, ts topo.Server) { t.Errorf("GetVSchema: %s, want %s", got, want) } - err = schemafier.SaveVSchema(`{ "Keyspaces": {}}`) + err = schemafier.SaveVSchema(ctx, `{ "Keyspaces": {}}`) if err != nil { t.Error(err) } - got, err = schemafier.GetVSchema() + got, err = schemafier.GetVSchema(ctx) if err != nil { t.Error(err) } @@ -40,12 +42,12 @@ func CheckVSchema(t *testing.T, ts topo.Server) { t.Errorf("GetVSchema: %s, want %s", got, want) } - err = schemafier.SaveVSchema(`{ "Keyspaces": { "aa": { "Sharded": false}}}`) + err = schemafier.SaveVSchema(ctx, `{ "Keyspaces": { "aa": { "Sharded": false}}}`) if err != nil { t.Error(err) } - got, err = schemafier.GetVSchema() + got, err = schemafier.GetVSchema(ctx) if err != nil { t.Error(err) } @@ -54,7 +56,7 @@ func CheckVSchema(t *testing.T, ts topo.Server) { t.Errorf("GetVSchema: %s, want %s", got, want) } - err = schemafier.SaveVSchema("invalid") + err = schemafier.SaveVSchema(ctx, "invalid") want = "Unmarshal failed:" if err == nil || !strings.HasPrefix(err.Error(), want) { t.Errorf("SaveVSchema: %v, must start with %s", err, want) diff --git a/go/vt/topo/wildcards.go b/go/vt/topo/wildcards.go index fd996eb3fa..9d0a0e38bd 100644 --- a/go/vt/topo/wildcards.go +++ b/go/vt/topo/wildcards.go @@ -10,21 +10,22 @@ import ( "strings" "github.com/youtube/vitess/go/fileutil" + "golang.org/x/net/context" ) // WildcardBackend is a subset of Server for the methods used by the // wildcard code. This lets us test with a very simple fake topo server. type WildcardBackend interface { // GetKeyspaces returns the known keyspaces. They shall be sorted. - GetKeyspaces() ([]string, error) + GetKeyspaces(ctx context.Context) ([]string, error) // GetShard reads a shard and returns it. // Can return ErrNoNode - GetShard(keyspace, shard string) (*ShardInfo, error) + GetShard(ctx context.Context, keyspace, shard string) (*ShardInfo, error) // GetShardNames returns the known shards in a keyspace. // Can return ErrNoNode - GetShardNames(keyspace string) ([]string, error) + GetShardNames(ctx context.Context, keyspace string) ([]string, error) } // ResolveKeyspaceWildcard will resolve keyspace wildcards. @@ -32,14 +33,14 @@ type WildcardBackend interface { // doesn't exist, it is still returned). // - If the param is a wildcard, it will get all keyspaces and returns // the ones which match the wildcard (which may be an empty list). -func ResolveKeyspaceWildcard(server WildcardBackend, param string) ([]string, error) { +func ResolveKeyspaceWildcard(ctx context.Context, server WildcardBackend, param string) ([]string, error) { if !fileutil.HasWildcard(param) { return []string{param}, nil } - result := make([]string, 0) + var result []string - keyspaces, err := server.GetKeyspaces() + keyspaces, err := server.GetKeyspaces(ctx) if err != nil { return nil, fmt.Errorf("failed to read keyspaces from topo: %v", err) } @@ -69,7 +70,7 @@ type KeyspaceShard struct { // doesn't exist) // - us*/* returns all shards in all keyspaces that start with 'us'. If no such // keyspace exists, list is empty (it is not an error). -func ResolveShardWildcard(server WildcardBackend, param string) ([]KeyspaceShard, error) { +func ResolveShardWildcard(ctx context.Context, server WildcardBackend, param string) ([]KeyspaceShard, error) { parts := strings.Split(param, "/") if len(parts) != 2 { return nil, fmt.Errorf("invalid shard path: %v", param) @@ -78,7 +79,7 @@ func ResolveShardWildcard(server WildcardBackend, param string) ([]KeyspaceShard // get all the matched keyspaces first, remember if it was a wildcard keyspaceHasWildcards := fileutil.HasWildcard(parts[0]) - matchedKeyspaces, err := ResolveKeyspaceWildcard(server, parts[0]) + matchedKeyspaces, err := ResolveKeyspaceWildcard(ctx, server, parts[0]) if err != nil { return nil, err } @@ -88,7 +89,7 @@ func ResolveShardWildcard(server WildcardBackend, param string) ([]KeyspaceShard shard := parts[1] if fileutil.HasWildcard(shard) { // get all the shards for the keyspace - shardNames, err := server.GetShardNames(matchedKeyspace) + shardNames, err := server.GetShardNames(ctx, matchedKeyspace) switch err { case nil: // got all the shards, we can keep going @@ -119,7 +120,7 @@ func ResolveShardWildcard(server WildcardBackend, param string) ([]KeyspaceShard } if keyspaceHasWildcards { // keyspace was a wildcard, shard is not, just try it - _, err := server.GetShard(matchedKeyspace, shard) + _, err := server.GetShard(ctx, matchedKeyspace, shard) switch err { case nil: // shard exists, add it diff --git a/go/vt/topo/wildcards_test.go b/go/vt/topo/wildcards_test.go index cdabef1d85..8d85c0e94e 100644 --- a/go/vt/topo/wildcards_test.go +++ b/go/vt/topo/wildcards_test.go @@ -3,6 +3,8 @@ package topo import ( "fmt" "testing" + + "golang.org/x/net/context" ) type fakeWildcardBackend struct { @@ -10,14 +12,14 @@ type fakeWildcardBackend struct { shards map[string][]string } -func (fwb *fakeWildcardBackend) GetKeyspaces() ([]string, error) { +func (fwb *fakeWildcardBackend) GetKeyspaces(ctx context.Context) ([]string, error) { if fwb.keyspaces == nil { return nil, fmt.Errorf("fake error") } return fwb.keyspaces, nil } -func (fwb *fakeWildcardBackend) GetShard(keyspace, shard string) (*ShardInfo, error) { +func (fwb *fakeWildcardBackend) GetShard(ctx context.Context, keyspace, shard string) (*ShardInfo, error) { shards, ok := fwb.shards[keyspace] if !ok { return nil, ErrNoNode @@ -33,7 +35,7 @@ func (fwb *fakeWildcardBackend) GetShard(keyspace, shard string) (*ShardInfo, er return nil, ErrNoNode } -func (fwb *fakeWildcardBackend) GetShardNames(keyspace string) ([]string, error) { +func (fwb *fakeWildcardBackend) GetShardNames(ctx context.Context, keyspace string) ([]string, error) { shards, ok := fwb.shards[keyspace] if !ok { return nil, ErrNoNode @@ -45,7 +47,8 @@ func (fwb *fakeWildcardBackend) GetShardNames(keyspace string) ([]string, error) } func validateKeyspaceWildcard(t *testing.T, fwb *fakeWildcardBackend, param string, expected []string) { - r, err := ResolveKeyspaceWildcard(fwb, param) + ctx := context.Background() + r, err := ResolveKeyspaceWildcard(ctx, fwb, param) if err != nil { if expected != nil { t.Errorf("was not expecting an error but got: %v", err) @@ -77,7 +80,8 @@ func TestKeyspaceWildcards(t *testing.T) { } func validateShardWildcard(t *testing.T, fwb *fakeWildcardBackend, param string, expected []KeyspaceShard) { - r, err := ResolveShardWildcard(fwb, param) + ctx := context.Background() + r, err := ResolveShardWildcard(ctx, fwb, param) if err != nil { if expected != nil { t.Errorf("was not expecting an error but got: %v", err) diff --git a/go/vt/topotools/rebuild.go b/go/vt/topotools/rebuild.go index 8aa4fbb4b2..8dc5d3cfd1 100644 --- a/go/vt/topotools/rebuild.go +++ b/go/vt/topotools/rebuild.go @@ -33,7 +33,7 @@ func RebuildShard(ctx context.Context, log logutil.Logger, ts topo.Server, keysp ctx = trace.NewContext(ctx, span) // read the existing shard info. It has to exist. - shardInfo, err := ts.GetShard(keyspace, shard) + shardInfo, err := ts.GetShard(ctx, keyspace, shard) if err != nil { return nil, err } @@ -69,7 +69,7 @@ func RebuildShard(ctx context.Context, log logutil.Logger, ts topo.Server, keysp } // read the ShardReplication object to find tablets - sri, err := ts.GetShardReplication(cell, keyspace, shard) + sri, err := ts.GetShardReplication(ctx, cell, keyspace, shard) if err != nil { rec.RecordError(fmt.Errorf("GetShardReplication(%v, %v, %v) failed: %v", cell, keyspace, shard, err)) return @@ -119,7 +119,7 @@ func rebuildCellSrvShard(ctx context.Context, log logutil.Logger, ts topo.Server // Get all existing db types so they can be removed if nothing // had been edited. - existingTabletTypes, err := ts.GetSrvTabletTypesPerShard(cell, shardInfo.Keyspace(), shardInfo.ShardName()) + existingTabletTypes, err := ts.GetSrvTabletTypesPerShard(ctx, cell, shardInfo.Keyspace(), shardInfo.ShardName()) if err != nil { if err != topo.ErrNoNode { return err @@ -180,7 +180,7 @@ func rebuildCellSrvShard(ctx context.Context, log logutil.Logger, ts topo.Server span := trace.NewSpanFromContext(ctx) span.StartClient("TopoServer.UpdateEndPoints") span.Annotate("tablet_type", string(tabletType)) - if err := ts.UpdateEndPoints(cell, shardInfo.Keyspace(), shardInfo.ShardName(), tabletType, addrs); err != nil { + if err := ts.UpdateEndPoints(ctx, cell, shardInfo.Keyspace(), shardInfo.ShardName(), tabletType, addrs); err != nil { rec.RecordError(fmt.Errorf("writing endpoints for cell %v shard %v/%v tabletType %v failed: %v", cell, shardInfo.Keyspace(), shardInfo.ShardName(), tabletType, err)) } span.Finish() @@ -198,7 +198,7 @@ func rebuildCellSrvShard(ctx context.Context, log logutil.Logger, ts topo.Server span := trace.NewSpanFromContext(ctx) span.StartClient("TopoServer.DeleteEndPoints") span.Annotate("tablet_type", string(tabletType)) - if err := ts.DeleteEndPoints(cell, shardInfo.Keyspace(), shardInfo.ShardName(), tabletType); err != nil { + if err := ts.DeleteEndPoints(ctx, cell, shardInfo.Keyspace(), shardInfo.ShardName(), tabletType); err != nil { log.Warningf("unable to remove stale db type %v from serving graph: %v", tabletType, err) } span.Finish() @@ -222,7 +222,7 @@ func rebuildCellSrvShard(ctx context.Context, log logutil.Logger, ts topo.Server span.Annotate("keyspace", shardInfo.Keyspace()) span.Annotate("shard", shardInfo.ShardName()) span.Annotate("cell", cell) - if err := ts.UpdateSrvShard(cell, shardInfo.Keyspace(), shardInfo.ShardName(), srvShard); err != nil { + if err := ts.UpdateSrvShard(ctx, cell, shardInfo.Keyspace(), shardInfo.ShardName(), srvShard); err != nil { rec.RecordError(fmt.Errorf("writing serving data in cell %v for %v/%v failed: %v", cell, shardInfo.Keyspace(), shardInfo.ShardName(), err)) } span.Finish() diff --git a/go/vt/topotools/rebuild_test.go b/go/vt/topotools/rebuild_test.go index e45656daf3..fd2b5b433c 100644 --- a/go/vt/topotools/rebuild_test.go +++ b/go/vt/topotools/rebuild_test.go @@ -41,14 +41,14 @@ func TestRebuildShardRace(t *testing.T) { } // Check initial state. - ep, err := ts.GetEndPoints(cells[0], keyspace, shard, topo.TYPE_MASTER) + ep, err := ts.GetEndPoints(ctx, cells[0], keyspace, shard, topo.TYPE_MASTER) if err != nil { t.Fatalf("GetEndPoints: %v", err) } if got, want := len(ep.Entries), 1; got != want { t.Fatalf("len(Entries) = %v, want %v", got, want) } - ep, err = ts.GetEndPoints(cells[0], keyspace, shard, topo.TYPE_REPLICA) + ep, err = ts.GetEndPoints(ctx, cells[0], keyspace, shard, topo.TYPE_REPLICA) if err != nil { t.Fatalf("GetEndPoints: %v", err) } @@ -104,10 +104,10 @@ func TestRebuildShardRace(t *testing.T) { <-done // Check that the rebuild picked up both changes. - if _, err := ts.GetEndPoints(cells[0], keyspace, shard, topo.TYPE_MASTER); err == nil || !strings.Contains(err.Error(), "node doesn't exist") { + if _, err := ts.GetEndPoints(ctx, cells[0], keyspace, shard, topo.TYPE_MASTER); err == nil || !strings.Contains(err.Error(), "node doesn't exist") { t.Errorf("first change wasn't picked up by second rebuild") } - if _, err := ts.GetEndPoints(cells[0], keyspace, shard, topo.TYPE_REPLICA); err == nil || !strings.Contains(err.Error(), "node doesn't exist") { + if _, err := ts.GetEndPoints(ctx, cells[0], keyspace, shard, topo.TYPE_REPLICA); err == nil || !strings.Contains(err.Error(), "node doesn't exist") { t.Errorf("second change was overwritten by first rebuild finishing late") } } diff --git a/go/vt/topotools/shard.go b/go/vt/topotools/shard.go index 6063926955..aedb771714 100644 --- a/go/vt/topotools/shard.go +++ b/go/vt/topotools/shard.go @@ -22,7 +22,7 @@ func CreateShard(ctx context.Context, ts topo.Server, keyspace, shard string) er } // now try to create within the lock, may already exist - err = topo.CreateShard(ts, keyspace, shard) + err = topo.CreateShard(ctx, ts, keyspace, shard) // and unlock and return return node.UnlockKeyspace(ctx, ts, keyspace, lockPath, err) @@ -31,10 +31,10 @@ func CreateShard(ctx context.Context, ts topo.Server, keyspace, shard string) er // GetOrCreateShard will return the shard object, or create one if it doesn't // already exist. Note the shard creation is protected by a keyspace Lock. func GetOrCreateShard(ctx context.Context, ts topo.Server, keyspace, shard string) (*topo.ShardInfo, error) { - si, finalErr := ts.GetShard(keyspace, shard) + si, finalErr := ts.GetShard(ctx, keyspace, shard) if finalErr == topo.ErrNoNode { // create the keyspace, maybe it already exists - if err := ts.CreateKeyspace(keyspace, &topo.Keyspace{}); err != nil && err != topo.ErrNodeExists { + if err := ts.CreateKeyspace(ctx, keyspace, &topo.Keyspace{}); err != nil && err != topo.ErrNodeExists { return nil, fmt.Errorf("CreateKeyspace(%v) failed: %v", keyspace, err) } @@ -46,13 +46,13 @@ func GetOrCreateShard(ctx context.Context, ts topo.Server, keyspace, shard strin } // now try to create within the lock, may already exist - if err := topo.CreateShard(ts, keyspace, shard); err != nil && err != topo.ErrNodeExists { + if err := topo.CreateShard(ctx, ts, keyspace, shard); err != nil && err != topo.ErrNodeExists { return nil, node.UnlockKeyspace(ctx, ts, keyspace, lockPath, fmt.Errorf("CreateShard(%v/%v) failed: %v", keyspace, shard, err)) } // try to read the shard again, maybe someone created it // in between the original GetShard and the LockKeyspace - si, finalErr = ts.GetShard(keyspace, shard) + si, finalErr = ts.GetShard(ctx, keyspace, shard) // and unlock if err := node.UnlockKeyspace(ctx, ts, keyspace, lockPath, finalErr); err != nil { diff --git a/go/vt/topotools/shard_test.go b/go/vt/topotools/shard_test.go index 0bd0dd4be5..445f4cd80f 100644 --- a/go/vt/topotools/shard_test.go +++ b/go/vt/topotools/shard_test.go @@ -40,7 +40,7 @@ func TestCreateShard(t *testing.T) { } // create keyspace - if err := ts.CreateKeyspace(keyspace, &topo.Keyspace{}); err != nil { + if err := ts.CreateKeyspace(ctx, keyspace, &topo.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace failed: %v", err) } diff --git a/go/vt/topotools/split.go b/go/vt/topotools/split.go index efa80b5b31..87e471de08 100644 --- a/go/vt/topotools/split.go +++ b/go/vt/topotools/split.go @@ -10,6 +10,7 @@ import ( "github.com/youtube/vitess/go/vt/key" "github.com/youtube/vitess/go/vt/topo" + "golang.org/x/net/context" ) // OverlappingShards contains sets of shards that overlap which each-other. @@ -52,8 +53,8 @@ func OverlappingShardsForShard(os []*OverlappingShards, shardName string) *Overl // having 40-80, 40-60 and 40-50 in the same keyspace is not supported and // will return an error). // If shards don't perfectly overlap, they are not returned. -func FindOverlappingShards(ts topo.Server, keyspace string) ([]*OverlappingShards, error) { - shardMap, err := topo.FindAllShardsInKeyspace(ts, keyspace) +func FindOverlappingShards(ctx context.Context, ts topo.Server, keyspace string) ([]*OverlappingShards, error) { + shardMap, err := topo.FindAllShardsInKeyspace(ctx, ts, keyspace) if err != nil { return nil, err } diff --git a/go/vt/topotools/tablet.go b/go/vt/topotools/tablet.go index 82cae2885f..eb6573fd2b 100644 --- a/go/vt/topotools/tablet.go +++ b/go/vt/topotools/tablet.go @@ -49,7 +49,7 @@ func ConfigureTabletHook(hk *hook.Hook, tabletAlias topo.TabletAlias) { // remote actions. And if 'force' is false, we also run an optional // hook. func Scrap(ctx context.Context, ts topo.Server, tabletAlias topo.TabletAlias, force bool) error { - tablet, err := ts.GetTablet(tabletAlias) + tablet, err := ts.GetTablet(ctx, tabletAlias) if err != nil { return err } @@ -65,7 +65,7 @@ func Scrap(ctx context.Context, ts topo.Server, tabletAlias topo.TabletAlias, fo } if wasAssigned { - err = topo.DeleteTabletReplicationData(ts, tablet.Tablet) + err = topo.DeleteTabletReplicationData(ctx, ts, tablet.Tablet) if err != nil { if err == topo.ErrNoNode { log.V(6).Infof("no ShardReplication object for cell %v", tablet.Alias.Cell) @@ -100,7 +100,7 @@ func Scrap(ctx context.Context, ts topo.Server, tabletAlias topo.TabletAlias, fo // - if health is an empty map, we clear the Tablet's Health record. // - if health has values, we overwrite the Tablet's Health record. func ChangeType(ctx context.Context, ts topo.Server, tabletAlias topo.TabletAlias, newType topo.TabletType, health map[string]string) error { - tablet, err := ts.GetTablet(tabletAlias) + tablet, err := ts.GetTablet(ctx, tabletAlias) if err != nil { return err } diff --git a/go/vt/topotools/topology.go b/go/vt/topotools/topology.go index 8112e2fbb8..9768b0ee9c 100644 --- a/go/vt/topotools/topology.go +++ b/go/vt/topotools/topology.go @@ -234,14 +234,14 @@ type ServingGraph struct { } // DbServingGraph returns the ServingGraph for the given cell. -func DbServingGraph(ts topo.Server, cell string) (servingGraph *ServingGraph) { +func DbServingGraph(ctx context.Context, ts topo.Server, cell string) (servingGraph *ServingGraph) { servingGraph = &ServingGraph{ Cell: cell, Keyspaces: make(map[string]*KeyspaceNodes), } rec := concurrency.AllErrorRecorder{} - keyspaces, err := ts.GetSrvKeyspaceNames(cell) + keyspaces, err := ts.GetSrvKeyspaceNames(ctx, cell) if err != nil { servingGraph.Errors = append(servingGraph.Errors, fmt.Sprintf("GetSrvKeyspaceNames failed: %v", err)) return @@ -255,7 +255,7 @@ func DbServingGraph(ts topo.Server, cell string) (servingGraph *ServingGraph) { go func(keyspace string, kn *KeyspaceNodes) { defer wg.Done() - ks, err := ts.GetSrvKeyspace(cell, keyspace) + ks, err := ts.GetSrvKeyspace(ctx, cell, keyspace) if err != nil { rec.RecordError(fmt.Errorf("GetSrvKeyspace(%v, %v) failed: %v", cell, keyspace, err)) return @@ -283,13 +283,13 @@ func DbServingGraph(ts topo.Server, cell string) (servingGraph *ServingGraph) { wg.Add(1) go func(shard string, sn *ShardNodes) { defer wg.Done() - tabletTypes, err := ts.GetSrvTabletTypesPerShard(cell, keyspace, shard) + tabletTypes, err := ts.GetSrvTabletTypesPerShard(ctx, cell, keyspace, shard) if err != nil { rec.RecordError(fmt.Errorf("GetSrvTabletTypesPerShard(%v, %v, %v) failed: %v", cell, keyspace, shard, err)) return } for _, tabletType := range tabletTypes { - endPoints, err := ts.GetEndPoints(cell, keyspace, shard, tabletType) + endPoints, err := ts.GetEndPoints(ctx, cell, keyspace, shard, tabletType) if err != nil { rec.RecordError(fmt.Errorf("GetEndPoints(%v, %v, %v, %v) failed: %v", cell, keyspace, shard, tabletType, err)) continue diff --git a/go/vt/topotools/utils.go b/go/vt/topotools/utils.go index f20fc07a0b..f2f4a1f10c 100644 --- a/go/vt/topotools/utils.go +++ b/go/vt/topotools/utils.go @@ -27,7 +27,7 @@ func FindTabletByIPAddrAndPort(tabletMap map[topo.TabletAlias]*topo.TabletInfo, // GetAllTablets returns a sorted list of tablets. func GetAllTablets(ctx context.Context, ts topo.Server, cell string) ([]*topo.TabletInfo, error) { - aliases, err := ts.GetTabletsByCell(cell) + aliases, err := ts.GetTabletsByCell(ctx, cell) if err != nil { return nil, err } @@ -56,7 +56,7 @@ func GetAllTablets(ctx context.Context, ts topo.Server, cell string) ([]*topo.Ta // GetAllTabletsAcrossCells returns all tablets from known cells. // If it returns topo.ErrPartialResult, then the list is valid, but partial. func GetAllTabletsAcrossCells(ctx context.Context, ts topo.Server) ([]*topo.TabletInfo, error) { - cells, err := ts.GetKnownCells() + cells, err := ts.GetKnownCells(ctx) if err != nil { return nil, err } diff --git a/go/vt/vtctl/plugin_zktopo.go b/go/vt/vtctl/plugin_zktopo.go index 6453dffd36..f114cfdd26 100644 --- a/go/vt/vtctl/plugin_zktopo.go +++ b/go/vt/vtctl/plugin_zktopo.go @@ -103,7 +103,7 @@ func commandExportZkns(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla if err != nil { return err } - return wr.ExportZkns(cell) + return wr.ExportZkns(ctx, cell) } func commandExportZknsForKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { diff --git a/go/vt/vtctl/reparent.go b/go/vt/vtctl/reparent.go index 55507b58d1..d488a51c02 100644 --- a/go/vt/vtctl/reparent.go +++ b/go/vt/vtctl/reparent.go @@ -61,7 +61,7 @@ func commandDemoteMaster(ctx context.Context, wr *wrangler.Wrangler, subFlags *f if err != nil { return err } - tabletInfo, err := wr.TopoServer().GetTablet(tabletAlias) + tabletInfo, err := wr.TopoServer().GetTablet(ctx, tabletAlias) if err != nil { return err } diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index 9583493874..c60678754f 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -431,7 +431,7 @@ func getFileParam(flag, flagFile, name string) (string, error) { // For instance: // us* // using plain matching // * // using plain matching -func keyspaceParamsToKeyspaces(wr *wrangler.Wrangler, params []string) ([]string, error) { +func keyspaceParamsToKeyspaces(ctx context.Context, wr *wrangler.Wrangler, params []string) ([]string, error) { result := make([]string, 0, len(params)) for _, param := range params { if param[0] == '/' { @@ -442,7 +442,7 @@ func keyspaceParamsToKeyspaces(wr *wrangler.Wrangler, params []string) ([]string } else { // this is not a path, so assume a keyspace name, // possibly with wildcards - keyspaces, err := topo.ResolveKeyspaceWildcard(wr.TopoServer(), param) + keyspaces, err := topo.ResolveKeyspaceWildcard(ctx, wr.TopoServer(), param) if err != nil { return nil, fmt.Errorf("Failed to resolve keyspace wildcard %v: %v", param, err) } @@ -457,7 +457,7 @@ func keyspaceParamsToKeyspaces(wr *wrangler.Wrangler, params []string) ([]string // For instance: // user/* // using plain matching // */0 // using plain matching -func shardParamsToKeyspaceShards(wr *wrangler.Wrangler, params []string) ([]topo.KeyspaceShard, error) { +func shardParamsToKeyspaceShards(ctx context.Context, wr *wrangler.Wrangler, params []string) ([]topo.KeyspaceShard, error) { result := make([]topo.KeyspaceShard, 0, len(params)) for _, param := range params { if param[0] == '/' { @@ -472,7 +472,7 @@ func shardParamsToKeyspaceShards(wr *wrangler.Wrangler, params []string) ([]topo } else { // this is not a path, so assume a keyspace // name / shard name, each possibly with wildcards - keyspaceShards, err := topo.ResolveShardWildcard(wr.TopoServer(), param) + keyspaceShards, err := topo.ResolveShardWildcard(ctx, wr.TopoServer(), param) if err != nil { return nil, fmt.Errorf("Failed to resolve keyspace/shard wildcard %v: %v", param, err) } @@ -573,7 +573,7 @@ func commandGetTablet(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag if err != nil { return err } - tabletInfo, err := wr.TopoServer().GetTablet(tabletAlias) + tabletInfo, err := wr.TopoServer().GetTablet(ctx, tabletAlias) if err == nil { wr.Logger().Printf("%v\n", jscfg.ToJSON(tabletInfo)) } @@ -601,7 +601,7 @@ func commandUpdateTabletAddrs(ctx context.Context, wr *wrangler.Wrangler, subFla if err != nil { return err } - return wr.TopoServer().UpdateTabletFields(tabletAlias, func(tablet *topo.Tablet) error { + return wr.TopoServer().UpdateTabletFields(ctx, tabletAlias, func(tablet *topo.Tablet) error { if *hostname != "" { tablet.Hostname = *hostname } @@ -656,7 +656,7 @@ func commandDeleteTablet(ctx context.Context, wr *wrangler.Wrangler, subFlags *f return err } for _, tabletAlias := range tabletAliases { - if err := wr.DeleteTablet(tabletAlias); err != nil { + if err := wr.DeleteTablet(ctx, tabletAlias); err != nil { return err } } @@ -675,7 +675,7 @@ func commandSetReadOnly(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl if err != nil { return err } - ti, err := wr.TopoServer().GetTablet(tabletAlias) + ti, err := wr.TopoServer().GetTablet(ctx, tabletAlias) if err != nil { return fmt.Errorf("failed reading tablet %v: %v", tabletAlias, err) } @@ -694,7 +694,7 @@ func commandSetReadWrite(ctx context.Context, wr *wrangler.Wrangler, subFlags *f if err != nil { return err } - ti, err := wr.TopoServer().GetTablet(tabletAlias) + ti, err := wr.TopoServer().GetTablet(ctx, tabletAlias) if err != nil { return fmt.Errorf("failed reading tablet %v: %v", tabletAlias, err) } @@ -713,7 +713,7 @@ func commandStartSlave(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla if err != nil { return err } - ti, err := wr.TopoServer().GetTablet(tabletAlias) + ti, err := wr.TopoServer().GetTablet(ctx, tabletAlias) if err != nil { return fmt.Errorf("failed reading tablet %v: %v", tabletAlias, err) } @@ -732,7 +732,7 @@ func commandStopSlave(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag if err != nil { return err } - ti, err := wr.TopoServer().GetTablet(tabletAlias) + ti, err := wr.TopoServer().GetTablet(ctx, tabletAlias) if err != nil { return fmt.Errorf("failed reading tablet %v: %v", tabletAlias, err) } @@ -759,7 +759,7 @@ func commandChangeSlaveType(ctx context.Context, wr *wrangler.Wrangler, subFlags return err } if *dryRun { - ti, err := wr.TopoServer().GetTablet(tabletAlias) + ti, err := wr.TopoServer().GetTablet(ctx, tabletAlias) if err != nil { return fmt.Errorf("failed reading tablet %v: %v", tabletAlias, err) } @@ -785,7 +785,7 @@ func commandPing(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.Flag if err != nil { return err } - tabletInfo, err := wr.TopoServer().GetTablet(tabletAlias) + tabletInfo, err := wr.TopoServer().GetTablet(ctx, tabletAlias) if err != nil { return err } @@ -803,7 +803,7 @@ func commandRefreshState(ctx context.Context, wr *wrangler.Wrangler, subFlags *f if err != nil { return err } - tabletInfo, err := wr.TopoServer().GetTablet(tabletAlias) + tabletInfo, err := wr.TopoServer().GetTablet(ctx, tabletAlias) if err != nil { return err } @@ -825,7 +825,7 @@ func commandRunHealthCheck(ctx context.Context, wr *wrangler.Wrangler, subFlags if err != nil { return err } - tabletInfo, err := wr.TopoServer().GetTablet(tabletAlias) + tabletInfo, err := wr.TopoServer().GetTablet(ctx, tabletAlias) if err != nil { return err } @@ -843,7 +843,7 @@ func commandHealthStream(ctx context.Context, wr *wrangler.Wrangler, subFlags *f if err != nil { return err } - tabletInfo, err := wr.TopoServer().GetTablet(tabletAlias) + tabletInfo, err := wr.TopoServer().GetTablet(ctx, tabletAlias) if err != nil { return err } @@ -878,7 +878,7 @@ func commandSleep(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.Fla if err != nil { return err } - ti, err := wr.TopoServer().GetTablet(tabletAlias) + ti, err := wr.TopoServer().GetTablet(ctx, tabletAlias) if err != nil { return err } @@ -902,7 +902,7 @@ func commandBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.Fl if err != nil { return err } - tabletInfo, err := wr.TopoServer().GetTablet(tabletAlias) + tabletInfo, err := wr.TopoServer().GetTablet(ctx, tabletAlias) if err != nil { return err } @@ -976,7 +976,7 @@ func commandCreateShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl return err } if *parent { - if err := wr.TopoServer().CreateKeyspace(keyspace, &topo.Keyspace{}); err != nil && err != topo.ErrNodeExists { + if err := wr.TopoServer().CreateKeyspace(ctx, keyspace, &topo.Keyspace{}); err != nil && err != topo.ErrNodeExists { return err } } @@ -1001,7 +1001,7 @@ func commandGetShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag. if err != nil { return err } - shardInfo, err := wr.TopoServer().GetShard(keyspace, shard) + shardInfo, err := wr.TopoServer().GetShard(ctx, keyspace, shard) if err == nil { wr.Logger().Printf("%v\n", jscfg.ToJSON(shardInfo)) } @@ -1022,7 +1022,7 @@ func commandRebuildShardGraph(ctx context.Context, wr *wrangler.Wrangler, subFla cellArray = strings.Split(*cells, ",") } - keyspaceShards, err := shardParamsToKeyspaceShards(wr, subFlags.Args()) + keyspaceShards, err := shardParamsToKeyspaceShards(ctx, wr, subFlags.Args()) if err != nil { return err } @@ -1046,7 +1046,7 @@ func commandTabletExternallyReparented(ctx context.Context, wr *wrangler.Wrangle if err != nil { return err } - ti, err := wr.TopoServer().GetTablet(tabletAlias) + ti, err := wr.TopoServer().GetTablet(ctx, tabletAlias) if err != nil { return err } @@ -1259,7 +1259,7 @@ func commandShardReplicationRemove(ctx context.Context, wr *wrangler.Wrangler, s if err != nil { return err } - return topo.RemoveShardReplicationRecord(wr.TopoServer(), tabletAlias.Cell, keyspace, shard, tabletAlias) + return topo.RemoveShardReplicationRecord(ctx, wr.TopoServer(), tabletAlias.Cell, keyspace, shard, tabletAlias) } func commandShardReplicationFix(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -1275,7 +1275,7 @@ func commandShardReplicationFix(ctx context.Context, wr *wrangler.Wrangler, subF if err != nil { return err } - return topo.FixShardReplication(wr.TopoServer(), wr.Logger(), cell, keyspace, shard) + return topo.FixShardReplication(ctx, wr.TopoServer(), wr.Logger(), cell, keyspace, shard) } func commandRemoveShardCell(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -1302,7 +1302,7 @@ func commandDeleteShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl return fmt.Errorf("action DeleteShard requires [...]") } - keyspaceShards, err := shardParamsToKeyspaceShards(wr, subFlags.Args()) + keyspaceShards, err := shardParamsToKeyspaceShards(ctx, wr, subFlags.Args()) if err != nil { return err } @@ -1356,7 +1356,7 @@ func commandCreateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags } } } - err := wr.TopoServer().CreateKeyspace(keyspace, ki) + err := wr.TopoServer().CreateKeyspace(ctx, keyspace, ki) if *force && err == topo.ErrNodeExists { log.Infof("keyspace %v already exists (ignoring error with -force)", keyspace) err = nil @@ -1373,7 +1373,7 @@ func commandGetKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl } keyspace := subFlags.Arg(0) - keyspaceInfo, err := wr.TopoServer().GetKeyspace(keyspace) + keyspaceInfo, err := wr.TopoServer().GetKeyspace(ctx, keyspace) if err == nil { wr.Logger().Printf("%v\n", jscfg.ToJSON(keyspaceInfo)) } @@ -1444,7 +1444,7 @@ func commandRebuildKeyspaceGraph(ctx context.Context, wr *wrangler.Wrangler, sub cellArray = strings.Split(*cells, ",") } - keyspaces, err := keyspaceParamsToKeyspaces(wr, subFlags.Args()) + keyspaces, err := keyspaceParamsToKeyspaces(ctx, wr, subFlags.Args()) if err != nil { return err } @@ -1534,7 +1534,7 @@ func commandFindAllShardsInKeyspace(ctx context.Context, wr *wrangler.Wrangler, } keyspace := subFlags.Arg(0) - result, err := topo.FindAllShardsInKeyspace(wr.TopoServer(), keyspace) + result, err := topo.FindAllShardsInKeyspace(ctx, wr.TopoServer(), keyspace) if err == nil { wr.Logger().Printf("%v\n", jscfg.ToJSON(result)) } @@ -1564,7 +1564,7 @@ func commandResolve(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.F if err != nil { return err } - addrs, err := topo.LookupVtName(wr.TopoServer(), "local", parts[0], parts[1], tabletType, namedPort) + addrs, err := topo.LookupVtName(ctx, wr.TopoServer(), "local", parts[0], parts[1], tabletType, namedPort) if err != nil { return err } @@ -1597,7 +1597,7 @@ func commandRebuildReplicationGraph(ctx context.Context, wr *wrangler.Wrangler, cells := strings.Split(subFlags.Arg(0), ",") keyspaceParams := strings.Split(subFlags.Arg(1), ",") - keyspaces, err := keyspaceParamsToKeyspaces(wr, keyspaceParams) + keyspaces, err := keyspaceParamsToKeyspaces(ctx, wr, keyspaceParams) if err != nil { return err } @@ -1866,7 +1866,7 @@ func commandGetVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla if !ok { return fmt.Errorf("%T does no support the vschema operations", ts) } - schema, err := schemafier.GetVSchema() + schema, err := schemafier.GetVSchema(ctx) if err != nil { return err } @@ -1896,7 +1896,7 @@ func commandApplyVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *f } s = string(schema) } - return schemafier.SaveVSchema(s) + return schemafier.SaveVSchema(ctx, s) } func commandGetSrvKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -1907,7 +1907,7 @@ func commandGetSrvKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags return fmt.Errorf("action GetSrvKeyspace requires ") } - srvKeyspace, err := wr.TopoServer().GetSrvKeyspace(subFlags.Arg(0), subFlags.Arg(1)) + srvKeyspace, err := wr.TopoServer().GetSrvKeyspace(ctx, subFlags.Arg(0), subFlags.Arg(1)) if err == nil { wr.Logger().Printf("%v\n", jscfg.ToJSON(srvKeyspace)) } @@ -1922,7 +1922,7 @@ func commandGetSrvKeyspaceNames(ctx context.Context, wr *wrangler.Wrangler, subF return fmt.Errorf("action GetSrvKeyspaceNames requires ") } - srvKeyspaceNames, err := wr.TopoServer().GetSrvKeyspaceNames(subFlags.Arg(0)) + srvKeyspaceNames, err := wr.TopoServer().GetSrvKeyspaceNames(ctx, subFlags.Arg(0)) if err != nil { return err } @@ -1944,7 +1944,7 @@ func commandGetSrvShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl if err != nil { return err } - srvShard, err := wr.TopoServer().GetSrvShard(subFlags.Arg(0), keyspace, shard) + srvShard, err := wr.TopoServer().GetSrvShard(ctx, subFlags.Arg(0), keyspace, shard) if err == nil { wr.Logger().Printf("%v\n", jscfg.ToJSON(srvShard)) } @@ -1964,7 +1964,7 @@ func commandGetEndPoints(ctx context.Context, wr *wrangler.Wrangler, subFlags *f return err } tabletType := topo.TabletType(subFlags.Arg(2)) - endPoints, err := wr.TopoServer().GetEndPoints(subFlags.Arg(0), keyspace, shard, tabletType) + endPoints, err := wr.TopoServer().GetEndPoints(ctx, subFlags.Arg(0), keyspace, shard, tabletType) if err == nil { wr.Logger().Printf("%v\n", jscfg.ToJSON(endPoints)) } @@ -1983,7 +1983,7 @@ func commandGetShardReplication(ctx context.Context, wr *wrangler.Wrangler, subF if err != nil { return err } - shardReplication, err := wr.TopoServer().GetShardReplication(subFlags.Arg(0), keyspace, shard) + shardReplication, err := wr.TopoServer().GetShardReplication(ctx, subFlags.Arg(0), keyspace, shard) if err == nil { wr.Logger().Printf("%v\n", jscfg.ToJSON(shardReplication)) } diff --git a/go/vt/vtctl/vtctlclienttest/client.go b/go/vt/vtctl/vtctlclienttest/client.go index 4c41254949..2808407b2e 100644 --- a/go/vt/vtctl/vtctlclienttest/client.go +++ b/go/vt/vtctl/vtctlclienttest/client.go @@ -26,6 +26,8 @@ func CreateTopoServer(t *testing.T) topo.Server { // TestSuite runs the test suite on the given topo server and client func TestSuite(t *testing.T, ts topo.Server, client vtctlclient.VtctlClient) { + ctx := context.Background() + // Create a fake tablet tablet := &topo.Tablet{ Alias: topo.TabletAlias{Cell: "cell1", Uid: 1}, @@ -40,12 +42,11 @@ func TestSuite(t *testing.T, ts topo.Server, client vtctlclient.VtctlClient) { Keyspace: "test_keyspace", Type: topo.TYPE_MASTER, } - if err := ts.CreateTablet(tablet); err != nil { + if err := ts.CreateTablet(ctx, tablet); err != nil { t.Errorf("CreateTablet: %v", err) } // run a command that's gonna return something on the log channel - ctx := context.Background() logs, errFunc := client.ExecuteVtctlCommand(ctx, []string{"ListAllTablets", "cell1"}, 30*time.Second, 10*time.Second) if err := errFunc(); err != nil { t.Fatalf("Cannot execute remote command: %v", err) diff --git a/go/vt/vtgate/sandbox_test.go b/go/vt/vtgate/sandbox_test.go index 31a5732905..aa40069840 100644 --- a/go/vt/vtgate/sandbox_test.go +++ b/go/vt/vtgate/sandbox_test.go @@ -227,7 +227,7 @@ type sandboxTopo struct { callbackGetEndPoints func(st *sandboxTopo) } -func (sct *sandboxTopo) GetSrvKeyspaceNames(context context.Context, cell string) ([]string, error) { +func (sct *sandboxTopo) GetSrvKeyspaceNames(ctx context.Context, cell string) ([]string, error) { sandboxMu.Lock() defer sandboxMu.Unlock() keyspaces := make([]string, 0, 1) @@ -237,7 +237,7 @@ func (sct *sandboxTopo) GetSrvKeyspaceNames(context context.Context, cell string return keyspaces, nil } -func (sct *sandboxTopo) GetSrvKeyspace(context context.Context, cell, keyspace string) (*topo.SrvKeyspace, error) { +func (sct *sandboxTopo) GetSrvKeyspace(ctx context.Context, cell, keyspace string) (*topo.SrvKeyspace, error) { sand := getSandbox(keyspace) if sand.SrvKeyspaceCallback != nil { sand.SrvKeyspaceCallback() @@ -264,11 +264,11 @@ func (sct *sandboxTopo) GetSrvKeyspace(context context.Context, cell, keyspace s return createShardedSrvKeyspace(sand.ShardSpec, sand.KeyspaceServedFrom) } -func (sct *sandboxTopo) GetSrvShard(context context.Context, cell, keyspace, shard string) (*topo.SrvShard, error) { +func (sct *sandboxTopo) GetSrvShard(ctx context.Context, cell, keyspace, shard string) (*topo.SrvShard, error) { return nil, fmt.Errorf("Unsupported") } -func (sct *sandboxTopo) GetEndPoints(context context.Context, cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) { +func (sct *sandboxTopo) GetEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) { sand := getSandbox(keyspace) sand.EndPointCounter++ if sct.callbackGetEndPoints != nil { @@ -286,7 +286,7 @@ func (sct *sandboxTopo) GetEndPoints(context context.Context, cell, keyspace, sh return ep, nil } -func sandboxDialer(context context.Context, endPoint topo.EndPoint, keyspace, shard string, timeout time.Duration) (tabletconn.TabletConn, error) { +func sandboxDialer(ctx context.Context, endPoint topo.EndPoint, keyspace, shard string, timeout time.Duration) (tabletconn.TabletConn, error) { sand := getSandbox(keyspace) sand.sandmu.Lock() defer sand.sandmu.Unlock() @@ -377,7 +377,7 @@ func (sbc *sandboxConn) setResults(r []*mproto.QueryResult) { sbc.results = r } -func (sbc *sandboxConn) Execute(context context.Context, query string, bindVars map[string]interface{}, transactionID int64) (*mproto.QueryResult, error) { +func (sbc *sandboxConn) Execute(ctx context.Context, query string, bindVars map[string]interface{}, transactionID int64) (*mproto.QueryResult, error) { sbc.ExecCount.Add(1) bv := make(map[string]interface{}) for k, v := range bindVars { @@ -396,7 +396,7 @@ func (sbc *sandboxConn) Execute(context context.Context, query string, bindVars return sbc.getNextResult(), nil } -func (sbc *sandboxConn) ExecuteBatch(context context.Context, queries []tproto.BoundQuery, transactionID int64) (*tproto.QueryResultList, error) { +func (sbc *sandboxConn) ExecuteBatch(ctx context.Context, queries []tproto.BoundQuery, transactionID int64) (*tproto.QueryResultList, error) { sbc.ExecCount.Add(1) if sbc.mustDelay != 0 { time.Sleep(sbc.mustDelay) @@ -412,7 +412,7 @@ func (sbc *sandboxConn) ExecuteBatch(context context.Context, queries []tproto.B return qrl, nil } -func (sbc *sandboxConn) StreamExecute(context context.Context, query string, bindVars map[string]interface{}, transactionID int64) (<-chan *mproto.QueryResult, tabletconn.ErrFunc, error) { +func (sbc *sandboxConn) StreamExecute(ctx context.Context, query string, bindVars map[string]interface{}, transactionID int64) (<-chan *mproto.QueryResult, tabletconn.ErrFunc, error) { sbc.ExecCount.Add(1) bv := make(map[string]interface{}) for k, v := range bindVars { @@ -432,7 +432,7 @@ func (sbc *sandboxConn) StreamExecute(context context.Context, query string, bin return ch, func() error { return err }, err } -func (sbc *sandboxConn) Begin(context context.Context) (int64, error) { +func (sbc *sandboxConn) Begin(ctx context.Context) (int64, error) { sbc.ExecCount.Add(1) sbc.BeginCount.Add(1) if sbc.mustDelay != 0 { @@ -445,7 +445,7 @@ func (sbc *sandboxConn) Begin(context context.Context) (int64, error) { return sbc.TransactionID.Add(1), nil } -func (sbc *sandboxConn) Commit(context context.Context, transactionID int64) error { +func (sbc *sandboxConn) Commit(ctx context.Context, transactionID int64) error { sbc.ExecCount.Add(1) sbc.CommitCount.Add(1) if sbc.mustDelay != 0 { @@ -454,7 +454,7 @@ func (sbc *sandboxConn) Commit(context context.Context, transactionID int64) err return sbc.getError() } -func (sbc *sandboxConn) Rollback(context context.Context, transactionID int64) error { +func (sbc *sandboxConn) Rollback(ctx context.Context, transactionID int64) error { sbc.ExecCount.Add(1) sbc.RollbackCount.Add(1) if sbc.mustDelay != 0 { @@ -467,7 +467,7 @@ var sandboxSQRowCount = int64(10) // Fake SplitQuery creates splits from the original query by appending the // split index as a comment to the SQL. RowCount is always sandboxSQRowCount -func (sbc *sandboxConn) SplitQuery(context context.Context, query tproto.BoundQuery, splitCount int) ([]tproto.QuerySplit, error) { +func (sbc *sandboxConn) SplitQuery(ctx context.Context, query tproto.BoundQuery, splitCount int) ([]tproto.QuerySplit, error) { splits := []tproto.QuerySplit{} for i := 0; i < splitCount; i++ { split := tproto.QuerySplit{ diff --git a/go/vt/vtgate/srv_topo_server.go b/go/vt/vtgate/srv_topo_server.go index 512255ec99..4d6b3b042e 100644 --- a/go/vt/vtgate/srv_topo_server.go +++ b/go/vt/vtgate/srv_topo_server.go @@ -36,13 +36,13 @@ const ( // SrvTopoServer is a subset of topo.Server that only contains the serving // graph read-only calls used by clients to resolve serving addresses. type SrvTopoServer interface { - GetSrvKeyspaceNames(context context.Context, cell string) ([]string, error) + GetSrvKeyspaceNames(ctx context.Context, cell string) ([]string, error) - GetSrvKeyspace(context context.Context, cell, keyspace string) (*topo.SrvKeyspace, error) + GetSrvKeyspace(ctx context.Context, cell, keyspace string) (*topo.SrvKeyspace, error) - GetSrvShard(context context.Context, cell, keyspace, shard string) (*topo.SrvShard, error) + GetSrvShard(ctx context.Context, cell, keyspace, shard string) (*topo.SrvShard, error) - GetEndPoints(context context.Context, cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) + GetEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) } // ResilientSrvTopoServer is an implementation of SrvTopoServer based @@ -105,10 +105,10 @@ type srvKeyspaceNamesEntry struct { // the mutex protects any access to this structure (read or write) mutex sync.Mutex - insertionTime time.Time - value []string - lastError error - lastErrorContext context.Context + insertionTime time.Time + value []string + lastError error + lastErrorCtx context.Context } type srvKeyspaceEntry struct { @@ -119,10 +119,10 @@ type srvKeyspaceEntry struct { // the mutex protects any access to this structure (read or write) mutex sync.Mutex - insertionTime time.Time - value *topo.SrvKeyspace - lastError error - lastErrorContext context.Context + insertionTime time.Time + value *topo.SrvKeyspace + lastError error + lastErrorCtx context.Context } type srvShardEntry struct { @@ -134,10 +134,10 @@ type srvShardEntry struct { // the mutex protects any access to this structure (read or write) mutex sync.Mutex - insertionTime time.Time - value *topo.SrvShard - lastError error - lastErrorContext context.Context + insertionTime time.Time + value *topo.SrvShard + lastError error + lastErrorCtx context.Context } type endPointsEntry struct { @@ -157,9 +157,9 @@ type endPointsEntry struct { value *topo.EndPoints // originalValue is the end points that were returned from // the topology server. - originalValue *topo.EndPoints - lastError error - lastErrorContext context.Context + originalValue *topo.EndPoints + lastError error + lastErrorCtx context.Context } func endPointIsHealthy(ep topo.EndPoint) bool { @@ -214,7 +214,7 @@ func NewResilientSrvTopoServer(base topo.Server, counterPrefix string) *Resilien } // GetSrvKeyspaceNames returns all keyspace names for the given cell. -func (server *ResilientSrvTopoServer) GetSrvKeyspaceNames(context context.Context, cell string) ([]string, error) { +func (server *ResilientSrvTopoServer) GetSrvKeyspaceNames(ctx context.Context, cell string) ([]string, error) { server.counts.Add(queryCategory, 1) // find the entry in the cache, add it if not there @@ -241,14 +241,14 @@ func (server *ResilientSrvTopoServer) GetSrvKeyspaceNames(context context.Contex } // not in cache or too old, get the real value - result, err := server.topoServer.GetSrvKeyspaceNames(cell) + result, err := server.topoServer.GetSrvKeyspaceNames(ctx, cell) if err != nil { if entry.insertionTime.IsZero() { server.counts.Add(errorCategory, 1) - log.Errorf("GetSrvKeyspaceNames(%v, %v) failed: %v (no cached value, caching and returning error)", context, cell, err) + log.Errorf("GetSrvKeyspaceNames(%v, %v) failed: %v (no cached value, caching and returning error)", ctx, cell, err) } else { server.counts.Add(cachedCategory, 1) - log.Warningf("GetSrvKeyspaceNames(%v, %v) failed: %v (returning cached value: %v %v)", context, cell, err, entry.value, entry.lastError) + log.Warningf("GetSrvKeyspaceNames(%v, %v) failed: %v (returning cached value: %v %v)", ctx, cell, err, entry.value, entry.lastError) return entry.value, entry.lastError } } @@ -257,12 +257,12 @@ func (server *ResilientSrvTopoServer) GetSrvKeyspaceNames(context context.Contex entry.insertionTime = time.Now() entry.value = result entry.lastError = err - entry.lastErrorContext = context + entry.lastErrorCtx = ctx return result, err } // GetSrvKeyspace returns SrvKeyspace object for the given cell and keyspace. -func (server *ResilientSrvTopoServer) GetSrvKeyspace(context context.Context, cell, keyspace string) (*topo.SrvKeyspace, error) { +func (server *ResilientSrvTopoServer) GetSrvKeyspace(ctx context.Context, cell, keyspace string) (*topo.SrvKeyspace, error) { server.counts.Add(queryCategory, 1) // find the entry in the cache, add it if not there @@ -290,14 +290,14 @@ func (server *ResilientSrvTopoServer) GetSrvKeyspace(context context.Context, ce } // not in cache or too old, get the real value - result, err := server.topoServer.GetSrvKeyspace(cell, keyspace) + result, err := server.topoServer.GetSrvKeyspace(ctx, cell, keyspace) if err != nil { if entry.insertionTime.IsZero() { server.counts.Add(errorCategory, 1) - log.Errorf("GetSrvKeyspace(%v, %v, %v) failed: %v (no cached value, caching and returning error)", context, cell, keyspace, err) + log.Errorf("GetSrvKeyspace(%v, %v, %v) failed: %v (no cached value, caching and returning error)", ctx, cell, keyspace, err) } else { server.counts.Add(cachedCategory, 1) - log.Warningf("GetSrvKeyspace(%v, %v, %v) failed: %v (returning cached value: %v %v)", context, cell, keyspace, err, entry.value, entry.lastError) + log.Warningf("GetSrvKeyspace(%v, %v, %v) failed: %v (returning cached value: %v %v)", ctx, cell, keyspace, err, entry.value, entry.lastError) return entry.value, entry.lastError } } @@ -306,12 +306,12 @@ func (server *ResilientSrvTopoServer) GetSrvKeyspace(context context.Context, ce entry.insertionTime = time.Now() entry.value = result entry.lastError = err - entry.lastErrorContext = context + entry.lastErrorCtx = ctx return result, err } // GetSrvShard returns SrvShard object for the given cell, keyspace, and shard. -func (server *ResilientSrvTopoServer) GetSrvShard(context context.Context, cell, keyspace, shard string) (*topo.SrvShard, error) { +func (server *ResilientSrvTopoServer) GetSrvShard(ctx context.Context, cell, keyspace, shard string) (*topo.SrvShard, error) { server.counts.Add(queryCategory, 1) // find the entry in the cache, add it if not there @@ -340,14 +340,14 @@ func (server *ResilientSrvTopoServer) GetSrvShard(context context.Context, cell, } // not in cache or too old, get the real value - result, err := server.topoServer.GetSrvShard(cell, keyspace, shard) + result, err := server.topoServer.GetSrvShard(ctx, cell, keyspace, shard) if err != nil { if entry.insertionTime.IsZero() { server.counts.Add(errorCategory, 1) - log.Errorf("GetSrvShard(%v, %v, %v, %v) failed: %v (no cached value, caching and returning error)", context, cell, keyspace, shard, err) + log.Errorf("GetSrvShard(%v, %v, %v, %v) failed: %v (no cached value, caching and returning error)", ctx, cell, keyspace, shard, err) } else { server.counts.Add(cachedCategory, 1) - log.Warningf("GetSrvShard(%v, %v, %v, %v) failed: %v (returning cached value: %v %v)", context, cell, keyspace, shard, err, entry.value, entry.lastError) + log.Warningf("GetSrvShard(%v, %v, %v, %v) failed: %v (returning cached value: %v %v)", ctx, cell, keyspace, shard, err, entry.value, entry.lastError) return entry.value, entry.lastError } } @@ -356,12 +356,12 @@ func (server *ResilientSrvTopoServer) GetSrvShard(context context.Context, cell, entry.insertionTime = time.Now() entry.value = result entry.lastError = err - entry.lastErrorContext = context + entry.lastErrorCtx = ctx return result, err } // GetEndPoints return all endpoints for the given cell, keyspace, shard, and tablet type. -func (server *ResilientSrvTopoServer) GetEndPoints(context context.Context, cell, keyspace, shard string, tabletType topo.TabletType) (result *topo.EndPoints, err error) { +func (server *ResilientSrvTopoServer) GetEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) (result *topo.EndPoints, err error) { shard = strings.ToLower(shard) key := []string{cell, keyspace, shard, string(tabletType)} @@ -421,22 +421,22 @@ func (server *ResilientSrvTopoServer) GetEndPoints(context context.Context, cell } // not in cache or too old, get the real value - result, err = server.topoServer.GetEndPoints(cell, keyspace, shard, tabletType) + result, err = server.topoServer.GetEndPoints(ctx, cell, keyspace, shard, tabletType) // get remote endpoints for master if enabled if err != nil && server.enableRemoteMaster && tabletType == topo.TYPE_MASTER { remote = true server.counts.Add(remoteQueryCategory, 1) server.endPointCounters.remoteLookups.Add(key, 1) var ss *topo.SrvShard - ss, err = server.topoServer.GetSrvShard(cell, keyspace, shard) + ss, err = server.topoServer.GetSrvShard(ctx, cell, keyspace, shard) if err != nil { server.counts.Add(remoteErrorCategory, 1) server.endPointCounters.remoteLookupErrors.Add(key, 1) log.Errorf("GetEndPoints(%v, %v, %v, %v, %v) failed to get SrvShard for remote master: %v", - context, cell, keyspace, shard, tabletType, err) + ctx, cell, keyspace, shard, tabletType, err) } else { if ss.MasterCell != "" && ss.MasterCell != cell { - result, err = server.topoServer.GetEndPoints(ss.MasterCell, keyspace, shard, tabletType) + result, err = server.topoServer.GetEndPoints(ctx, ss.MasterCell, keyspace, shard, tabletType) } } } @@ -444,11 +444,11 @@ func (server *ResilientSrvTopoServer) GetEndPoints(context context.Context, cell server.endPointCounters.lookupErrors.Add(key, 1) if entry.insertionTime.IsZero() { server.counts.Add(errorCategory, 1) - log.Errorf("GetEndPoints(%v, %v, %v, %v, %v) failed: %v (no cached value, caching and returning error)", context, cell, keyspace, shard, tabletType, err) + log.Errorf("GetEndPoints(%v, %v, %v, %v, %v) failed: %v (no cached value, caching and returning error)", ctx, cell, keyspace, shard, tabletType, err) } else { server.counts.Add(cachedCategory, 1) server.endPointCounters.staleCacheFallbacks.Add(key, 1) - log.Warningf("GetEndPoints(%v, %v, %v, %v, %v) failed: %v (returning cached value: %v %v)", context, cell, keyspace, shard, tabletType, err, entry.value, entry.lastError) + log.Warningf("GetEndPoints(%v, %v, %v, %v, %v) failed: %v (returning cached value: %v %v)", ctx, cell, keyspace, shard, tabletType, err, entry.value, entry.lastError) return entry.value, entry.lastError } } @@ -458,7 +458,7 @@ func (server *ResilientSrvTopoServer) GetEndPoints(context context.Context, cell entry.originalValue = result entry.value = filterUnhealthyServers(result) entry.lastError = err - entry.lastErrorContext = context + entry.lastErrorCtx = ctx entry.remote = remote return entry.value, err } @@ -468,10 +468,10 @@ func (server *ResilientSrvTopoServer) GetEndPoints(context context.Context, cell // SrvKeyspaceNamesCacheStatus is the current value for SrvKeyspaceNames type SrvKeyspaceNamesCacheStatus struct { - Cell string - Value []string - LastError error - LastErrorContext context.Context + Cell string + Value []string + LastError error + LastErrorCtx context.Context } // SrvKeyspaceNamesCacheStatusList is used for sorting @@ -494,11 +494,11 @@ func (skncsl SrvKeyspaceNamesCacheStatusList) Swap(i, j int) { // SrvKeyspaceCacheStatus is the current value for a SrvKeyspace object type SrvKeyspaceCacheStatus struct { - Cell string - Keyspace string - Value *topo.SrvKeyspace - LastError error - LastErrorContext context.Context + Cell string + Keyspace string + Value *topo.SrvKeyspace + LastError error + LastErrorCtx context.Context } // StatusAsHTML returns an HTML version of our status. @@ -553,12 +553,12 @@ func (skcsl SrvKeyspaceCacheStatusList) Swap(i, j int) { // SrvShardCacheStatus is the current value for a SrvShard object type SrvShardCacheStatus struct { - Cell string - Keyspace string - Shard string - Value *topo.SrvShard - LastError error - LastErrorContext context.Context + Cell string + Keyspace string + Shard string + Value *topo.SrvShard + LastError error + LastErrorCtx context.Context } // StatusAsHTML returns an HTML version of our status. @@ -596,14 +596,14 @@ func (sscsl SrvShardCacheStatusList) Swap(i, j int) { // EndPointsCacheStatus is the current value for an EndPoints object type EndPointsCacheStatus struct { - Cell string - Keyspace string - Shard string - TabletType topo.TabletType - Value *topo.EndPoints - OriginalValue *topo.EndPoints - LastError error - LastErrorContext context.Context + Cell string + Keyspace string + Shard string + TabletType topo.TabletType + Value *topo.EndPoints + OriginalValue *topo.EndPoints + LastError error + LastErrorCtx context.Context } // StatusAsHTML returns an HTML version of our status. @@ -691,10 +691,10 @@ func (server *ResilientSrvTopoServer) CacheStatus() *ResilientSrvTopoServerCache for _, entry := range server.srvKeyspaceNamesCache { entry.mutex.Lock() result.SrvKeyspaceNames = append(result.SrvKeyspaceNames, &SrvKeyspaceNamesCacheStatus{ - Cell: entry.cell, - Value: entry.value, - LastError: entry.lastError, - LastErrorContext: entry.lastErrorContext, + Cell: entry.cell, + Value: entry.value, + LastError: entry.lastError, + LastErrorCtx: entry.lastErrorCtx, }) entry.mutex.Unlock() } @@ -702,11 +702,11 @@ func (server *ResilientSrvTopoServer) CacheStatus() *ResilientSrvTopoServerCache for _, entry := range server.srvKeyspaceCache { entry.mutex.Lock() result.SrvKeyspaces = append(result.SrvKeyspaces, &SrvKeyspaceCacheStatus{ - Cell: entry.cell, - Keyspace: entry.keyspace, - Value: entry.value, - LastError: entry.lastError, - LastErrorContext: entry.lastErrorContext, + Cell: entry.cell, + Keyspace: entry.keyspace, + Value: entry.value, + LastError: entry.lastError, + LastErrorCtx: entry.lastErrorCtx, }) entry.mutex.Unlock() } @@ -714,12 +714,12 @@ func (server *ResilientSrvTopoServer) CacheStatus() *ResilientSrvTopoServerCache for _, entry := range server.srvShardCache { entry.mutex.Lock() result.SrvShards = append(result.SrvShards, &SrvShardCacheStatus{ - Cell: entry.cell, - Keyspace: entry.keyspace, - Shard: entry.shard, - Value: entry.value, - LastError: entry.lastError, - LastErrorContext: entry.lastErrorContext, + Cell: entry.cell, + Keyspace: entry.keyspace, + Shard: entry.shard, + Value: entry.value, + LastError: entry.lastError, + LastErrorCtx: entry.lastErrorCtx, }) entry.mutex.Unlock() } @@ -727,14 +727,14 @@ func (server *ResilientSrvTopoServer) CacheStatus() *ResilientSrvTopoServerCache for _, entry := range server.endPointsCache { entry.mutex.Lock() result.EndPoints = append(result.EndPoints, &EndPointsCacheStatus{ - Cell: entry.cell, - Keyspace: entry.keyspace, - Shard: entry.shard, - TabletType: entry.tabletType, - Value: entry.value, - OriginalValue: entry.originalValue, - LastError: entry.lastError, - LastErrorContext: entry.lastErrorContext, + Cell: entry.cell, + Keyspace: entry.keyspace, + Shard: entry.shard, + TabletType: entry.tabletType, + Value: entry.value, + OriginalValue: entry.originalValue, + LastError: entry.lastError, + LastErrorCtx: entry.lastErrorCtx, }) entry.mutex.Unlock() } diff --git a/go/vt/vtgate/srv_topo_server_test.go b/go/vt/vtgate/srv_topo_server_test.go index 31191b495e..61b41710a0 100644 --- a/go/vt/vtgate/srv_topo_server_test.go +++ b/go/vt/vtgate/srv_topo_server_test.go @@ -195,11 +195,11 @@ type fakeTopo struct { callCount int } -func (ft *fakeTopo) GetSrvKeyspaceNames(cell string) ([]string, error) { +func (ft *fakeTopo) GetSrvKeyspaceNames(ctx context.Context, cell string) ([]string, error) { return []string{ft.keyspace}, nil } -func (ft *fakeTopo) GetSrvKeyspace(cell, keyspace string) (*topo.SrvKeyspace, error) { +func (ft *fakeTopo) GetSrvKeyspace(ctx context.Context, cell, keyspace string) (*topo.SrvKeyspace, error) { ft.callCount++ if keyspace == ft.keyspace { return &topo.SrvKeyspace{}, nil @@ -207,7 +207,7 @@ func (ft *fakeTopo) GetSrvKeyspace(cell, keyspace string) (*topo.SrvKeyspace, er return nil, fmt.Errorf("Unknown keyspace") } -func (ft *fakeTopo) GetEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) { +func (ft *fakeTopo) GetEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) { return nil, fmt.Errorf("No endpoints") } @@ -217,14 +217,14 @@ type fakeTopoRemoteMaster struct { remoteCell string } -func (ft *fakeTopoRemoteMaster) GetSrvShard(cell, keyspace, shard string) (*topo.SrvShard, error) { +func (ft *fakeTopoRemoteMaster) GetSrvShard(ctx context.Context, cell, keyspace, shard string) (*topo.SrvShard, error) { return &topo.SrvShard{ Name: shard, MasterCell: ft.remoteCell, }, nil } -func (ft *fakeTopoRemoteMaster) GetEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) { +func (ft *fakeTopoRemoteMaster) GetEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) { if cell != ft.cell && cell != ft.remoteCell { return nil, fmt.Errorf("GetEndPoints: invalid cell: %v", cell) } diff --git a/go/vt/worker/diff_utils.go b/go/vt/worker/diff_utils.go index 9194dc2fa5..31aafe74b7 100644 --- a/go/vt/worker/diff_utils.go +++ b/go/vt/worker/diff_utils.go @@ -32,7 +32,7 @@ type QueryResultReader struct { // NewQueryResultReaderForTablet creates a new QueryResultReader for // the provided tablet / sql query func NewQueryResultReaderForTablet(ctx context.Context, ts topo.Server, tabletAlias topo.TabletAlias, sql string) (*QueryResultReader, error) { - tablet, err := ts.GetTablet(tabletAlias) + tablet, err := ts.GetTablet(ctx, tabletAlias) if err != nil { return nil, err } diff --git a/go/vt/worker/split_clone.go b/go/vt/worker/split_clone.go index f4386636bf..650eed7be5 100644 --- a/go/vt/worker/split_clone.go +++ b/go/vt/worker/split_clone.go @@ -186,7 +186,7 @@ func (scw *SplitCloneWorker) Run(ctx context.Context) error { func (scw *SplitCloneWorker) run(ctx context.Context) error { // first state: read what we need to do - if err := scw.init(); err != nil { + if err := scw.init(ctx); err != nil { return fmt.Errorf("init() failed: %v", err) } if err := checkDone(ctx); err != nil { @@ -211,18 +211,18 @@ func (scw *SplitCloneWorker) run(ctx context.Context) error { // init phase: // - read the destination keyspace, make sure it has 'servedFrom' values -func (scw *SplitCloneWorker) init() error { +func (scw *SplitCloneWorker) init(ctx context.Context) error { scw.setState(WorkerStateInit) var err error // read the keyspace and validate it - scw.keyspaceInfo, err = scw.wr.TopoServer().GetKeyspace(scw.keyspace) + scw.keyspaceInfo, err = scw.wr.TopoServer().GetKeyspace(ctx, scw.keyspace) if err != nil { return fmt.Errorf("cannot read keyspace %v: %v", scw.keyspace, err) } // find the OverlappingShards in the keyspace - osList, err := topotools.FindOverlappingShards(scw.wr.TopoServer(), scw.keyspace) + osList, err := topotools.FindOverlappingShards(ctx, scw.wr.TopoServer(), scw.keyspace) if err != nil { return fmt.Errorf("cannot FindOverlappingShards in %v: %v", scw.keyspace, err) } @@ -283,7 +283,7 @@ func (scw *SplitCloneWorker) findTargets(ctx context.Context) error { // get the tablet info for them, and stop their replication scw.sourceTablets = make([]*topo.TabletInfo, len(scw.sourceAliases)) for i, alias := range scw.sourceAliases { - scw.sourceTablets[i], err = scw.wr.TopoServer().GetTablet(alias) + scw.sourceTablets[i], err = scw.wr.TopoServer().GetTablet(ctx, alias) if err != nil { return fmt.Errorf("cannot read tablet %v: %v", alias, err) } diff --git a/go/vt/worker/split_clone_test.go b/go/vt/worker/split_clone_test.go index 5f9107988a..1b66122b14 100644 --- a/go/vt/worker/split_clone_test.go +++ b/go/vt/worker/split_clone_test.go @@ -271,7 +271,7 @@ func testSplitClone(t *testing.T, strategy string) { // add the topo and schema data we'll need ctx := context.Background() - if err := topo.CreateShard(ts, "ks", "80-"); err != nil { + if err := topo.CreateShard(ctx, ts, "ks", "80-"); err != nil { t.Fatalf("CreateShard(\"-80\") failed: %v", err) } if err := wr.SetKeyspaceShardingInfo(ctx, "ks", "keyspace_id", key.KIT_UINT64, 4, false); err != nil { diff --git a/go/vt/worker/split_diff.go b/go/vt/worker/split_diff.go index c21552548f..732858df8e 100644 --- a/go/vt/worker/split_diff.go +++ b/go/vt/worker/split_diff.go @@ -115,7 +115,7 @@ func (sdw *SplitDiffWorker) Run(ctx context.Context) error { func (sdw *SplitDiffWorker) run(ctx context.Context) error { // first state: read what we need to do - if err := sdw.init(); err != nil { + if err := sdw.init(ctx); err != nil { return fmt.Errorf("init() failed: %v", err) } if err := checkDone(ctx); err != nil { @@ -148,15 +148,15 @@ func (sdw *SplitDiffWorker) run(ctx context.Context) error { // init phase: // - read the shard info, make sure it has sources -func (sdw *SplitDiffWorker) init() error { +func (sdw *SplitDiffWorker) init(ctx context.Context) error { sdw.SetState(WorkerStateInit) var err error - sdw.keyspaceInfo, err = sdw.wr.TopoServer().GetKeyspace(sdw.keyspace) + sdw.keyspaceInfo, err = sdw.wr.TopoServer().GetKeyspace(ctx, sdw.keyspace) if err != nil { return fmt.Errorf("cannot read keyspace %v: %v", sdw.keyspace, err) } - sdw.shardInfo, err = sdw.wr.TopoServer().GetShard(sdw.keyspace, sdw.shard) + sdw.shardInfo, err = sdw.wr.TopoServer().GetShard(ctx, sdw.keyspace, sdw.shard) if err != nil { return fmt.Errorf("cannot read shard %v/%v: %v", sdw.keyspace, sdw.shard, err) } @@ -218,7 +218,7 @@ func (sdw *SplitDiffWorker) findTargets(ctx context.Context) error { func (sdw *SplitDiffWorker) synchronizeReplication(ctx context.Context) error { sdw.SetState(WorkerStateSyncReplication) - masterInfo, err := sdw.wr.TopoServer().GetTablet(sdw.shardInfo.MasterAlias) + masterInfo, err := sdw.wr.TopoServer().GetTablet(ctx, sdw.shardInfo.MasterAlias) if err != nil { return fmt.Errorf("synchronizeReplication: cannot get Tablet record for master %v: %v", sdw.shardInfo.MasterAlias, err) } @@ -246,7 +246,7 @@ func (sdw *SplitDiffWorker) synchronizeReplication(ctx context.Context) error { } // read the tablet - sourceTablet, err := sdw.wr.TopoServer().GetTablet(sdw.sourceAliases[i]) + sourceTablet, err := sdw.wr.TopoServer().GetTablet(ctx, sdw.sourceAliases[i]) if err != nil { return err } @@ -285,7 +285,7 @@ func (sdw *SplitDiffWorker) synchronizeReplication(ctx context.Context) error { // 4 - wait until the destination checker is equal or passed // that master binlog position, and stop its replication. sdw.wr.Logger().Infof("Waiting for destination checker %v to catch up to %v", sdw.destinationAlias, masterPos) - destinationTablet, err := sdw.wr.TopoServer().GetTablet(sdw.destinationAlias) + destinationTablet, err := sdw.wr.TopoServer().GetTablet(ctx, sdw.destinationAlias) if err != nil { return err } diff --git a/go/vt/worker/split_diff_test.go b/go/vt/worker/split_diff_test.go index 72a2b154b5..5ddc4851ed 100644 --- a/go/vt/worker/split_diff_test.go +++ b/go/vt/worker/split_diff_test.go @@ -173,7 +173,7 @@ func TestSplitDiff(t *testing.T) { } // add the topo and schema data we'll need - if err := topo.CreateShard(ts, "ks", "80-"); err != nil { + if err := topo.CreateShard(ctx, ts, "ks", "80-"); err != nil { t.Fatalf("CreateShard(\"-80\") failed: %v", err) } wr.SetSourceShards(ctx, "ks", "-40", []topo.TabletAlias{sourceRdonly1.Tablet.Alias}, nil) diff --git a/go/vt/worker/sqldiffer.go b/go/vt/worker/sqldiffer.go index 1aa9141bcb..7c21a0eeca 100644 --- a/go/vt/worker/sqldiffer.go +++ b/go/vt/worker/sqldiffer.go @@ -170,7 +170,7 @@ func (worker *SQLDiffWorker) synchronizeReplication(ctx context.Context) error { // stop replication on subset slave worker.wr.Logger().Infof("Stopping replication on subset slave %v", worker.subset.alias) - subsetTablet, err := worker.wr.TopoServer().GetTablet(worker.subset.alias) + subsetTablet, err := worker.wr.TopoServer().GetTablet(ctx, worker.subset.alias) if err != nil { return err } @@ -201,7 +201,7 @@ func (worker *SQLDiffWorker) synchronizeReplication(ctx context.Context) error { // stop replication on superset slave worker.wr.Logger().Infof("Stopping replication on superset slave %v", worker.superset.alias) - supersetTablet, err := worker.wr.TopoServer().GetTablet(worker.superset.alias) + supersetTablet, err := worker.wr.TopoServer().GetTablet(ctx, worker.superset.alias) if err != nil { return err } diff --git a/go/vt/worker/topo_utils.go b/go/vt/worker/topo_utils.go index 81d14b9ea5..9bedba26d9 100644 --- a/go/vt/worker/topo_utils.go +++ b/go/vt/worker/topo_utils.go @@ -23,8 +23,8 @@ var ( // FindHealthyRdonlyEndPoint returns a random healthy endpoint. // Since we don't want to use them all, we require at least // minHealthyEndPoints servers to be healthy. -func FindHealthyRdonlyEndPoint(wr *wrangler.Wrangler, cell, keyspace, shard string) (topo.TabletAlias, error) { - endPoints, err := wr.TopoServer().GetEndPoints(cell, keyspace, shard, topo.TYPE_RDONLY) +func FindHealthyRdonlyEndPoint(ctx context.Context, wr *wrangler.Wrangler, cell, keyspace, shard string) (topo.TabletAlias, error) { + endPoints, err := wr.TopoServer().GetEndPoints(ctx, cell, keyspace, shard, topo.TYPE_RDONLY) if err != nil { return topo.TabletAlias{}, fmt.Errorf("GetEndPoints(%v,%v,%v,rdonly) failed: %v", cell, keyspace, shard, err) } @@ -51,7 +51,7 @@ func FindHealthyRdonlyEndPoint(wr *wrangler.Wrangler, cell, keyspace, shard stri // - mark it as worker // - tag it with our worker process func FindWorkerTablet(ctx context.Context, wr *wrangler.Wrangler, cleaner *wrangler.Cleaner, cell, keyspace, shard string) (topo.TabletAlias, error) { - tabletAlias, err := FindHealthyRdonlyEndPoint(wr, cell, keyspace, shard) + tabletAlias, err := FindHealthyRdonlyEndPoint(ctx, wr, cell, keyspace, shard) if err != nil { return topo.TabletAlias{}, err } @@ -60,7 +60,7 @@ func FindWorkerTablet(ctx context.Context, wr *wrangler.Wrangler, cleaner *wrang // vttablet reloads the worker URL when it reloads the tablet. ourURL := servenv.ListeningURL.String() wr.Logger().Infof("Adding tag[worker]=%v to tablet %v", ourURL, tabletAlias) - if err := wr.TopoServer().UpdateTabletFields(tabletAlias, func(tablet *topo.Tablet) error { + if err := wr.TopoServer().UpdateTabletFields(ctx, tabletAlias, func(tablet *topo.Tablet) error { if tablet.Tags == nil { tablet.Tags = make(map[string]string) } diff --git a/go/vt/worker/vertical_split_clone.go b/go/vt/worker/vertical_split_clone.go index 25cfe7680a..3405cc7498 100644 --- a/go/vt/worker/vertical_split_clone.go +++ b/go/vt/worker/vertical_split_clone.go @@ -173,7 +173,7 @@ func (vscw *VerticalSplitCloneWorker) Run(ctx context.Context) error { func (vscw *VerticalSplitCloneWorker) run(ctx context.Context) error { // first state: read what we need to do - if err := vscw.init(); err != nil { + if err := vscw.init(ctx); err != nil { return fmt.Errorf("init() failed: %v", err) } if err := checkDone(ctx); err != nil { @@ -201,11 +201,11 @@ func (vscw *VerticalSplitCloneWorker) run(ctx context.Context) error { // init phase: // - read the destination keyspace, make sure it has 'servedFrom' values -func (vscw *VerticalSplitCloneWorker) init() error { +func (vscw *VerticalSplitCloneWorker) init(ctx context.Context) error { vscw.setState(WorkerStateInit) // read the keyspace and validate it - destinationKeyspaceInfo, err := vscw.wr.TopoServer().GetKeyspace(vscw.destinationKeyspace) + destinationKeyspaceInfo, err := vscw.wr.TopoServer().GetKeyspace(ctx, vscw.destinationKeyspace) if err != nil { return fmt.Errorf("cannot read destination keyspace %v: %v", vscw.destinationKeyspace, err) } @@ -250,7 +250,7 @@ func (vscw *VerticalSplitCloneWorker) findTargets(ctx context.Context) error { vscw.wr.Logger().Infof("Using tablet %v as the source", vscw.sourceAlias) // get the tablet info for it - vscw.sourceTablet, err = vscw.wr.TopoServer().GetTablet(vscw.sourceAlias) + vscw.sourceTablet, err = vscw.wr.TopoServer().GetTablet(ctx, vscw.sourceAlias) if err != nil { return fmt.Errorf("cannot read tablet %v: %v", vscw.sourceTablet, err) } diff --git a/go/vt/worker/vertical_split_clone_test.go b/go/vt/worker/vertical_split_clone_test.go index 22d0761d44..f768628e3b 100644 --- a/go/vt/worker/vertical_split_clone_test.go +++ b/go/vt/worker/vertical_split_clone_test.go @@ -245,7 +245,8 @@ func testVerticalSplitClone(t *testing.T, strategy string) { topo.TYPE_REPLICA: &topo.KeyspaceServedFrom{Keyspace: "source_ks"}, topo.TYPE_RDONLY: &topo.KeyspaceServedFrom{Keyspace: "source_ks"}, } - wr.TopoServer().CreateKeyspace("destination_ks", ki) + ctx := context.Background() + wr.TopoServer().CreateKeyspace(ctx, "destination_ks", ki) destMaster := testlib.NewFakeTablet(t, wr, "cell1", 10, topo.TYPE_MASTER, testlib.TabletKeyspaceShard(t, "destination_ks", "0")) @@ -258,7 +259,6 @@ func testVerticalSplitClone(t *testing.T, strategy string) { } // add the topo and schema data we'll need - ctx := context.Background() if err := wr.RebuildKeyspaceGraph(ctx, "source_ks", nil, true); err != nil { t.Fatalf("RebuildKeyspaceGraph failed: %v", err) } diff --git a/go/vt/worker/vertical_split_diff.go b/go/vt/worker/vertical_split_diff.go index 3d29bfeda1..f56ebb74d5 100644 --- a/go/vt/worker/vertical_split_diff.go +++ b/go/vt/worker/vertical_split_diff.go @@ -115,7 +115,7 @@ func (vsdw *VerticalSplitDiffWorker) Run(ctx context.Context) error { func (vsdw *VerticalSplitDiffWorker) run(ctx context.Context) error { // first state: read what we need to do - if err := vsdw.init(); err != nil { + if err := vsdw.init(ctx); err != nil { return fmt.Errorf("init() failed: %v", err) } if err := checkDone(ctx); err != nil { @@ -148,13 +148,13 @@ func (vsdw *VerticalSplitDiffWorker) run(ctx context.Context) error { // init phase: // - read the shard info, make sure it has sources -func (vsdw *VerticalSplitDiffWorker) init() error { +func (vsdw *VerticalSplitDiffWorker) init(ctx context.Context) error { vsdw.SetState(WorkerStateInit) var err error // read the keyspace and validate it - vsdw.keyspaceInfo, err = vsdw.wr.TopoServer().GetKeyspace(vsdw.keyspace) + vsdw.keyspaceInfo, err = vsdw.wr.TopoServer().GetKeyspace(ctx, vsdw.keyspace) if err != nil { return fmt.Errorf("cannot read keyspace %v: %v", vsdw.keyspace, err) } @@ -163,7 +163,7 @@ func (vsdw *VerticalSplitDiffWorker) init() error { } // read the shardinfo and validate it - vsdw.shardInfo, err = vsdw.wr.TopoServer().GetShard(vsdw.keyspace, vsdw.shard) + vsdw.shardInfo, err = vsdw.wr.TopoServer().GetShard(ctx, vsdw.keyspace, vsdw.shard) if err != nil { return fmt.Errorf("cannot read shard %v/%v: %v", vsdw.keyspace, vsdw.shard, err) } @@ -224,7 +224,7 @@ func (vsdw *VerticalSplitDiffWorker) findTargets(ctx context.Context) error { func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) error { vsdw.SetState(WorkerStateSyncReplication) - masterInfo, err := vsdw.wr.TopoServer().GetTablet(vsdw.shardInfo.MasterAlias) + masterInfo, err := vsdw.wr.TopoServer().GetTablet(ctx, vsdw.shardInfo.MasterAlias) if err != nil { return fmt.Errorf("synchronizeReplication: cannot get Tablet record for master %v: %v", vsdw.shardInfo.MasterAlias, err) } @@ -253,7 +253,7 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) // stop replication vsdw.wr.Logger().Infof("Stopping slave %v at a minimum of %v", vsdw.sourceAlias, pos.Position) - sourceTablet, err := vsdw.wr.TopoServer().GetTablet(vsdw.sourceAlias) + sourceTablet, err := vsdw.wr.TopoServer().GetTablet(ctx, vsdw.sourceAlias) if err != nil { return err } @@ -288,7 +288,7 @@ func (vsdw *VerticalSplitDiffWorker) synchronizeReplication(ctx context.Context) // 4 - wait until the destination checker is equal or passed // that master binlog position, and stop its replication. vsdw.wr.Logger().Infof("Waiting for destination checker %v to catch up to %v", vsdw.destinationAlias, masterPos) - destinationTablet, err := vsdw.wr.TopoServer().GetTablet(vsdw.destinationAlias) + destinationTablet, err := vsdw.wr.TopoServer().GetTablet(ctx, vsdw.destinationAlias) if err != nil { return err } diff --git a/go/vt/worker/vertical_split_diff_test.go b/go/vt/worker/vertical_split_diff_test.go index 180c49dd3c..18737bcd82 100644 --- a/go/vt/worker/vertical_split_diff_test.go +++ b/go/vt/worker/vertical_split_diff_test.go @@ -101,7 +101,7 @@ func TestVerticalSplitDiff(t *testing.T) { topo.TYPE_REPLICA: &topo.KeyspaceServedFrom{Keyspace: "source_ks"}, topo.TYPE_RDONLY: &topo.KeyspaceServedFrom{Keyspace: "source_ks"}, } - wr.TopoServer().CreateKeyspace("destination_ks", ki) + wr.TopoServer().CreateKeyspace(ctx, "destination_ks", ki) destMaster := testlib.NewFakeTablet(t, wr, "cell1", 10, topo.TYPE_MASTER, testlib.TabletKeyspaceShard(t, "destination_ks", "0")) diff --git a/go/vt/wrangler/cleaner.go b/go/vt/wrangler/cleaner.go index 32ee43579c..120cc8a1b1 100644 --- a/go/vt/wrangler/cleaner.go +++ b/go/vt/wrangler/cleaner.go @@ -191,7 +191,7 @@ func RecordTabletTagAction(cleaner *Cleaner, tabletAlias topo.TabletAlias, name, // CleanUp is part of CleanerAction interface. func (tta TabletTagAction) CleanUp(ctx context.Context, wr *Wrangler) error { - return wr.TopoServer().UpdateTabletFields(tta.TabletAlias, func(tablet *topo.Tablet) error { + return wr.TopoServer().UpdateTabletFields(ctx, tta.TabletAlias, func(tablet *topo.Tablet) error { if tablet.Tags == nil { tablet.Tags = make(map[string]string) } diff --git a/go/vt/wrangler/hook.go b/go/vt/wrangler/hook.go index 4eb19fe50f..8852600426 100644 --- a/go/vt/wrangler/hook.go +++ b/go/vt/wrangler/hook.go @@ -19,7 +19,7 @@ func (wr *Wrangler) ExecuteHook(ctx context.Context, tabletAlias topo.TabletAlia if strings.Contains(hook.Name, "/") { return nil, fmt.Errorf("hook name cannot have a '/' in it") } - ti, err := wr.ts.GetTablet(tabletAlias) + ti, err := wr.ts.GetTablet(ctx, tabletAlias) if err != nil { return nil, err } diff --git a/go/vt/wrangler/keyspace.go b/go/vt/wrangler/keyspace.go index e1514acf31..ee1643248b 100644 --- a/go/vt/wrangler/keyspace.go +++ b/go/vt/wrangler/keyspace.go @@ -42,13 +42,13 @@ func (wr *Wrangler) SetKeyspaceShardingInfo(ctx context.Context, keyspace, shard return err } - err = wr.setKeyspaceShardingInfo(keyspace, shardingColumnName, shardingColumnType, splitShardCount, force) + err = wr.setKeyspaceShardingInfo(ctx, keyspace, shardingColumnName, shardingColumnType, splitShardCount, force) return wr.unlockKeyspace(ctx, keyspace, actionNode, lockPath, err) } -func (wr *Wrangler) setKeyspaceShardingInfo(keyspace, shardingColumnName string, shardingColumnType key.KeyspaceIdType, splitShardCount int32, force bool) error { - ki, err := wr.ts.GetKeyspace(keyspace) +func (wr *Wrangler) setKeyspaceShardingInfo(ctx context.Context, keyspace, shardingColumnName string, shardingColumnType key.KeyspaceIdType, splitShardCount int32, force bool) error { + ki, err := wr.ts.GetKeyspace(ctx, keyspace) if err != nil { return err } @@ -72,7 +72,7 @@ func (wr *Wrangler) setKeyspaceShardingInfo(keyspace, shardingColumnName string, ki.ShardingColumnName = shardingColumnName ki.ShardingColumnType = shardingColumnType ki.SplitShardCount = splitShardCount - return topo.UpdateKeyspace(wr.ts, ki) + return topo.UpdateKeyspace(ctx, wr.ts, ki) } // MigrateServedTypes is used during horizontal splits to migrate a @@ -92,7 +92,7 @@ func (wr *Wrangler) MigrateServedTypes(ctx context.Context, keyspace, shard stri // find overlapping shards in this keyspace wr.Logger().Infof("Finding the overlapping shards in keyspace %v", keyspace) - osList, err := topotools.FindOverlappingShards(wr.ts, keyspace) + osList, err := topotools.FindOverlappingShards(ctx, wr.ts, keyspace) if err != nil { return fmt.Errorf("FindOverlappingShards failed: %v", err) } @@ -216,7 +216,7 @@ func (wr *Wrangler) getMastersPosition(ctx context.Context, shards []*topo.Shard go func(si *topo.ShardInfo) { defer wg.Done() wr.Logger().Infof("Gathering master position for %v", si.MasterAlias) - ti, err := wr.ts.GetTablet(si.MasterAlias) + ti, err := wr.ts.GetTablet(ctx, si.MasterAlias) if err != nil { rec.RecordError(err) return @@ -260,7 +260,7 @@ func (wr *Wrangler) waitForFilteredReplication(ctx context.Context, sourcePositi // and wait for it wr.Logger().Infof("Waiting for %v to catch up", si.MasterAlias) - tablet, err := wr.ts.GetTablet(si.MasterAlias) + tablet, err := wr.ts.GetTablet(ctx, si.MasterAlias) if err != nil { rec.RecordError(err) return @@ -287,7 +287,7 @@ func (wr *Wrangler) refreshMasters(ctx context.Context, shards []*topo.ShardInfo go func(si *topo.ShardInfo) { defer wg.Done() wr.Logger().Infof("RefreshState master %v", si.MasterAlias) - ti, err := wr.ts.GetTablet(si.MasterAlias) + ti, err := wr.ts.GetTablet(ctx, si.MasterAlias) if err != nil { rec.RecordError(err) return @@ -310,12 +310,12 @@ func (wr *Wrangler) migrateServedTypes(ctx context.Context, keyspace string, sou // re-read all the shards so we are up to date wr.Logger().Infof("Re-reading all shards") for i, si := range sourceShards { - if sourceShards[i], err = wr.ts.GetShard(si.Keyspace(), si.ShardName()); err != nil { + if sourceShards[i], err = wr.ts.GetShard(ctx, si.Keyspace(), si.ShardName()); err != nil { return err } } for i, si := range destinationShards { - if destinationShards[i], err = wr.ts.GetShard(si.Keyspace(), si.ShardName()); err != nil { + if destinationShards[i], err = wr.ts.GetShard(ctx, si.Keyspace(), si.ShardName()); err != nil { return err } } @@ -465,7 +465,7 @@ func (wr *Wrangler) migrateServedTypes(ctx context.Context, keyspace string, sou // served type from a keyspace to another. func (wr *Wrangler) MigrateServedFrom(ctx context.Context, keyspace, shard string, servedType topo.TabletType, cells []string, reverse bool, filteredReplicationWaitTime time.Duration) error { // read the destination keyspace, check it - ki, err := wr.ts.GetKeyspace(keyspace) + ki, err := wr.ts.GetKeyspace(ctx, keyspace) if err != nil { return err } @@ -474,7 +474,7 @@ func (wr *Wrangler) MigrateServedFrom(ctx context.Context, keyspace, shard strin } // read the destination shard, check it - si, err := wr.ts.GetShard(keyspace, shard) + si, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return err } @@ -532,7 +532,7 @@ func (wr *Wrangler) MigrateServedFrom(ctx context.Context, keyspace, shard strin func (wr *Wrangler) migrateServedFrom(ctx context.Context, ki *topo.KeyspaceInfo, destinationShard *topo.ShardInfo, servedType topo.TabletType, cells []string, reverse bool, filteredReplicationWaitTime time.Duration) (err error) { // re-read and update keyspace info record - ki, err = wr.ts.GetKeyspace(ki.KeyspaceName()) + ki, err = wr.ts.GetKeyspace(ctx, ki.KeyspaceName()) if err != nil { return err } @@ -543,7 +543,7 @@ func (wr *Wrangler) migrateServedFrom(ctx context.Context, ki *topo.KeyspaceInfo } // re-read and check the destination shard - destinationShard, err = wr.ts.GetShard(destinationShard.Keyspace(), destinationShard.ShardName()) + destinationShard, err = wr.ts.GetShard(ctx, destinationShard.Keyspace(), destinationShard.ShardName()) if err != nil { return err } @@ -555,7 +555,7 @@ func (wr *Wrangler) migrateServedFrom(ctx context.Context, ki *topo.KeyspaceInfo // read the source shard, we'll need its master, and we'll need to // update the blacklisted tables. var sourceShard *topo.ShardInfo - sourceShard, err = wr.ts.GetShard(destinationShard.SourceShards[0].Keyspace, destinationShard.SourceShards[0].Shard) + sourceShard, err = wr.ts.GetShard(ctx, destinationShard.SourceShards[0].Keyspace, destinationShard.SourceShards[0].Shard) if err != nil { return err } @@ -587,7 +587,7 @@ func (wr *Wrangler) migrateServedFrom(ctx context.Context, ki *topo.KeyspaceInfo func (wr *Wrangler) replicaMigrateServedFrom(ctx context.Context, ki *topo.KeyspaceInfo, sourceShard *topo.ShardInfo, destinationShard *topo.ShardInfo, servedType topo.TabletType, cells []string, reverse bool, tables []string, ev *events.MigrateServedFrom) error { // Save the destination keyspace (its ServedFrom has been changed) event.DispatchUpdate(ev, "updating keyspace") - if err := topo.UpdateKeyspace(wr.ts, ki); err != nil { + if err := topo.UpdateKeyspace(ctx, wr.ts, ki); err != nil { return err } @@ -622,11 +622,11 @@ func (wr *Wrangler) replicaMigrateServedFrom(ctx context.Context, ki *topo.Keysp // replication and starts accepting writes func (wr *Wrangler) masterMigrateServedFrom(ctx context.Context, ki *topo.KeyspaceInfo, sourceShard *topo.ShardInfo, destinationShard *topo.ShardInfo, tables []string, ev *events.MigrateServedFrom, filteredReplicationWaitTime time.Duration) error { // Read the data we need - sourceMasterTabletInfo, err := wr.ts.GetTablet(sourceShard.MasterAlias) + sourceMasterTabletInfo, err := wr.ts.GetTablet(ctx, sourceShard.MasterAlias) if err != nil { return err } - destinationMasterTabletInfo, err := wr.ts.GetTablet(destinationShard.MasterAlias) + destinationMasterTabletInfo, err := wr.ts.GetTablet(ctx, destinationShard.MasterAlias) if err != nil { return err } @@ -664,7 +664,7 @@ func (wr *Wrangler) masterMigrateServedFrom(ctx context.Context, ki *topo.Keyspa // Update the destination keyspace (its ServedFrom has changed) event.DispatchUpdate(ev, "updating keyspace") - if err = topo.UpdateKeyspace(wr.ts, ki); err != nil { + if err = topo.UpdateKeyspace(ctx, wr.ts, ki); err != nil { return err } @@ -694,19 +694,19 @@ func (wr *Wrangler) SetKeyspaceServedFrom(ctx context.Context, keyspace string, return err } - err = wr.setKeyspaceServedFrom(keyspace, servedType, cells, sourceKeyspace, remove) + err = wr.setKeyspaceServedFrom(ctx, keyspace, servedType, cells, sourceKeyspace, remove) return wr.unlockKeyspace(ctx, keyspace, actionNode, lockPath, err) } -func (wr *Wrangler) setKeyspaceServedFrom(keyspace string, servedType topo.TabletType, cells []string, sourceKeyspace string, remove bool) error { - ki, err := wr.ts.GetKeyspace(keyspace) +func (wr *Wrangler) setKeyspaceServedFrom(ctx context.Context, keyspace string, servedType topo.TabletType, cells []string, sourceKeyspace string, remove bool) error { + ki, err := wr.ts.GetKeyspace(ctx, keyspace) if err != nil { return err } if err := ki.UpdateServedFromMap(servedType, cells, sourceKeyspace, remove, nil); err != nil { return err } - return topo.UpdateKeyspace(wr.ts, ki) + return topo.UpdateKeyspace(ctx, wr.ts, ki) } // RefreshTablesByShard calls RefreshState on all the tables of a diff --git a/go/vt/wrangler/permissions.go b/go/vt/wrangler/permissions.go index e68412079d..3240236a9d 100644 --- a/go/vt/wrangler/permissions.go +++ b/go/vt/wrangler/permissions.go @@ -18,7 +18,7 @@ import ( // GetPermissions returns the permissions set on a remote tablet func (wr *Wrangler) GetPermissions(ctx context.Context, tabletAlias topo.TabletAlias) (*myproto.Permissions, error) { - tablet, err := wr.ts.GetTablet(tabletAlias) + tablet, err := wr.ts.GetTablet(ctx, tabletAlias) if err != nil { return nil, err } @@ -43,7 +43,7 @@ func (wr *Wrangler) diffPermissions(ctx context.Context, masterPermissions *mypr // ValidatePermissionsShard validates all the permissions are the same // in a shard func (wr *Wrangler) ValidatePermissionsShard(ctx context.Context, keyspace, shard string) error { - si, err := wr.ts.GetShard(keyspace, shard) + si, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return err } @@ -86,7 +86,7 @@ func (wr *Wrangler) ValidatePermissionsShard(ctx context.Context, keyspace, shar // in a keyspace func (wr *Wrangler) ValidatePermissionsKeyspace(ctx context.Context, keyspace string) error { // find all the shards - shards, err := wr.ts.GetShardNames(keyspace) + shards, err := wr.ts.GetShardNames(ctx, keyspace) if err != nil { return err } @@ -101,7 +101,7 @@ func (wr *Wrangler) ValidatePermissionsKeyspace(ctx context.Context, keyspace st } // find the reference permissions using the first shard's master - si, err := wr.ts.GetShard(keyspace, shards[0]) + si, err := wr.ts.GetShard(ctx, keyspace, shards[0]) if err != nil { return err } diff --git a/go/vt/wrangler/rebuild.go b/go/vt/wrangler/rebuild.go index 4f8ef0d787..be9f95b987 100644 --- a/go/vt/wrangler/rebuild.go +++ b/go/vt/wrangler/rebuild.go @@ -65,14 +65,14 @@ func (wr *Wrangler) findCellsForRebuild(ki *topo.KeyspaceInfo, shardMap map[stri func (wr *Wrangler) rebuildKeyspace(ctx context.Context, keyspace string, cells []string, rebuildSrvShards bool) error { wr.logger.Infof("rebuildKeyspace %v", keyspace) - ki, err := wr.ts.GetKeyspace(keyspace) + ki, err := wr.ts.GetKeyspace(ctx, keyspace) if err != nil { return err } var shardCache map[string]*topo.ShardInfo if rebuildSrvShards { - shards, err := wr.ts.GetShardNames(keyspace) + shards, err := wr.ts.GetShardNames(ctx, keyspace) if err != nil { return nil } @@ -101,7 +101,7 @@ func (wr *Wrangler) rebuildKeyspace(ctx context.Context, keyspace string, cells } } else { - shardCache, err = topo.FindAllShardsInKeyspace(wr.ts, keyspace) + shardCache, err = topo.FindAllShardsInKeyspace(ctx, wr.ts, keyspace) if err != nil { return err } @@ -118,7 +118,7 @@ func (wr *Wrangler) rebuildKeyspace(ctx context.Context, keyspace string, cells // Then we add the cells from the keyspaces we might be 'ServedFrom'. for _, ksf := range ki.ServedFromMap { - servedFromShards, err := topo.FindAllShardsInKeyspace(wr.ts, ksf.Keyspace) + servedFromShards, err := topo.FindAllShardsInKeyspace(ctx, wr.ts, ksf.Keyspace) if err != nil { return err } @@ -159,7 +159,7 @@ func (wr *Wrangler) rebuildKeyspace(ctx context.Context, keyspace string, cells // and then finally save the keyspace objects for cell, srvKeyspace := range srvKeyspaceMap { wr.logger.Infof("updating keyspace serving graph in cell %v for %v", cell, keyspace) - if err := wr.ts.UpdateSrvKeyspace(cell, keyspace, srvKeyspace); err != nil { + if err := wr.ts.UpdateSrvKeyspace(ctx, cell, keyspace, srvKeyspace); err != nil { return fmt.Errorf("writing serving data failed: %v", err) } } @@ -225,7 +225,7 @@ func (wr *Wrangler) RebuildReplicationGraph(ctx context.Context, cells []string, for _, keyspace := range keyspaces { wr.logger.Infof("delete keyspace shards: %v", keyspace) - if err := wr.ts.DeleteKeyspaceShards(keyspace); err != nil { + if err := wr.ts.DeleteKeyspaceShards(ctx, keyspace); err != nil { return err } } @@ -249,7 +249,7 @@ func (wr *Wrangler) RebuildReplicationGraph(ctx context.Context, cells []string, keyspacesToRebuild[ti.Keyspace] = true shardPath := ti.Keyspace + "/" + ti.Shard if !shardsCreated[shardPath] { - if err := topo.CreateShard(wr.ts, ti.Keyspace, ti.Shard); err != nil && err != topo.ErrNodeExists { + if err := topo.CreateShard(ctx, wr.ts, ti.Keyspace, ti.Shard); err != nil && err != topo.ErrNodeExists { wr.logger.Warningf("failed re-creating shard %v: %v", shardPath, err) hasErr = true } else { diff --git a/go/vt/wrangler/reparent.go b/go/vt/wrangler/reparent.go index 6b0096a928..875eefe410 100644 --- a/go/vt/wrangler/reparent.go +++ b/go/vt/wrangler/reparent.go @@ -41,7 +41,7 @@ type rpcContext struct { // ShardReplicationStatuses returns the ReplicationStatus for each tablet in a shard. func (wr *Wrangler) ShardReplicationStatuses(ctx context.Context, keyspace, shard string) ([]*topo.TabletInfo, []*myproto.ReplicationStatus, error) { - shardInfo, err := wr.ts.GetShard(keyspace, shard) + shardInfo, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return nil, nil, err } @@ -115,12 +115,12 @@ func (wr *Wrangler) ReparentTablet(ctx context.Context, tabletAlias topo.TabletA // Get current shard master tablet. // Sanity check they are in the same keyspace/shard. // Issue a SetMaster to the tablet. - ti, err := wr.ts.GetTablet(tabletAlias) + ti, err := wr.ts.GetTablet(ctx, tabletAlias) if err != nil { return err } - shardInfo, err := wr.ts.GetShard(ti.Keyspace, ti.Shard) + shardInfo, err := wr.ts.GetShard(ctx, ti.Keyspace, ti.Shard) if err != nil { return err } @@ -128,7 +128,7 @@ func (wr *Wrangler) ReparentTablet(ctx context.Context, tabletAlias topo.TabletA return fmt.Errorf("no master tablet for shard %v/%v", ti.Keyspace, ti.Shard) } - masterTi, err := wr.ts.GetTablet(shardInfo.MasterAlias) + masterTi, err := wr.ts.GetTablet(ctx, shardInfo.MasterAlias) if err != nil { return err } @@ -170,7 +170,7 @@ func (wr *Wrangler) InitShardMaster(ctx context.Context, keyspace, shard string, } func (wr *Wrangler) initShardMasterLocked(ctx context.Context, ev *events.Reparent, keyspace, shard string, masterElectTabletAlias topo.TabletAlias, force bool, waitSlaveTimeout time.Duration) error { - shardInfo, err := wr.ts.GetShard(keyspace, shard) + shardInfo, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return err } @@ -335,7 +335,7 @@ func (wr *Wrangler) PlannedReparentShard(ctx context.Context, keyspace, shard st } func (wr *Wrangler) plannedReparentShardLocked(ctx context.Context, ev *events.Reparent, keyspace, shard string, masterElectTabletAlias topo.TabletAlias, waitSlaveTimeout time.Duration) error { - shardInfo, err := wr.ts.GetShard(keyspace, shard) + shardInfo, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return err } @@ -467,7 +467,7 @@ func (wr *Wrangler) EmergencyReparentShard(ctx context.Context, keyspace, shard } func (wr *Wrangler) emergencyReparentShardLocked(ctx context.Context, ev *events.Reparent, keyspace, shard string, masterElectTabletAlias topo.TabletAlias, waitSlaveTimeout time.Duration) error { - shardInfo, err := wr.ts.GetShard(keyspace, shard) + shardInfo, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return err } @@ -497,7 +497,7 @@ func (wr *Wrangler) emergencyReparentShardLocked(ctx context.Context, ev *events if ok { delete(tabletMap, shardInfo.MasterAlias) } else { - oldMasterTabletInfo, err = wr.ts.GetTablet(shardInfo.MasterAlias) + oldMasterTabletInfo, err = wr.ts.GetTablet(ctx, shardInfo.MasterAlias) if err != nil { wr.logger.Warningf("cannot read old master tablet %v, won't touch it: %v", shardInfo.MasterAlias, err) scrapOldMaster = false diff --git a/go/vt/wrangler/schema.go b/go/vt/wrangler/schema.go index 0f96b380c9..e5438b2d14 100644 --- a/go/vt/wrangler/schema.go +++ b/go/vt/wrangler/schema.go @@ -26,7 +26,7 @@ import ( // GetSchema uses an RPC to get the schema from a remote tablet func (wr *Wrangler) GetSchema(ctx context.Context, tabletAlias topo.TabletAlias, tables, excludeTables []string, includeViews bool) (*myproto.SchemaDefinition, error) { - ti, err := wr.ts.GetTablet(tabletAlias) + ti, err := wr.ts.GetTablet(ctx, tabletAlias) if err != nil { return nil, err } @@ -36,7 +36,7 @@ func (wr *Wrangler) GetSchema(ctx context.Context, tabletAlias topo.TabletAlias, // ReloadSchema forces the remote tablet to reload its schema. func (wr *Wrangler) ReloadSchema(ctx context.Context, tabletAlias topo.TabletAlias) error { - ti, err := wr.ts.GetTablet(tabletAlias) + ti, err := wr.ts.GetTablet(ctx, tabletAlias) if err != nil { return err } @@ -60,7 +60,7 @@ func (wr *Wrangler) diffSchema(ctx context.Context, masterSchema *myproto.Schema // ValidateSchemaShard will diff the schema from all the tablets in the shard. func (wr *Wrangler) ValidateSchemaShard(ctx context.Context, keyspace, shard string, excludeTables []string, includeViews bool) error { - si, err := wr.ts.GetShard(keyspace, shard) + si, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return err } @@ -104,7 +104,7 @@ func (wr *Wrangler) ValidateSchemaShard(ctx context.Context, keyspace, shard str // the keyspace. func (wr *Wrangler) ValidateSchemaKeyspace(ctx context.Context, keyspace string, excludeTables []string, includeViews bool) error { // find all the shards - shards, err := wr.ts.GetShardNames(keyspace) + shards, err := wr.ts.GetShardNames(ctx, keyspace) if err != nil { return err } @@ -119,7 +119,7 @@ func (wr *Wrangler) ValidateSchemaKeyspace(ctx context.Context, keyspace string, } // find the reference schema using the first shard's master - si, err := wr.ts.GetShard(keyspace, shards[0]) + si, err := wr.ts.GetShard(ctx, keyspace, shards[0]) if err != nil { return err } @@ -154,7 +154,7 @@ func (wr *Wrangler) ValidateSchemaKeyspace(ctx context.Context, keyspace string, // then diffs all tablets in the other shards for _, shard := range shards[1:] { - si, err := wr.ts.GetShard(keyspace, shard) + si, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { er.RecordError(err) continue @@ -185,7 +185,7 @@ func (wr *Wrangler) ValidateSchemaKeyspace(ctx context.Context, keyspace string, // PreflightSchema will try a schema change on the remote tablet. func (wr *Wrangler) PreflightSchema(ctx context.Context, tabletAlias topo.TabletAlias, change string) (*myproto.SchemaChangeResult, error) { - ti, err := wr.ts.GetTablet(tabletAlias) + ti, err := wr.ts.GetTablet(ctx, tabletAlias) if err != nil { return nil, err } @@ -194,7 +194,7 @@ func (wr *Wrangler) PreflightSchema(ctx context.Context, tabletAlias topo.Tablet // ApplySchema will apply a schema change on the remote tablet. func (wr *Wrangler) ApplySchema(ctx context.Context, tabletAlias topo.TabletAlias, sc *myproto.SchemaChange) (*myproto.SchemaChangeResult, error) { - ti, err := wr.ts.GetTablet(tabletAlias) + ti, err := wr.ts.GetTablet(ctx, tabletAlias) if err != nil { return nil, err } @@ -209,7 +209,7 @@ func (wr *Wrangler) ApplySchema(ctx context.Context, tabletAlias topo.TabletAlia // very quickly. func (wr *Wrangler) ApplySchemaShard(ctx context.Context, keyspace, shard, change string, newParentTabletAlias topo.TabletAlias, simple, force bool, waitSlaveTimeout time.Duration) (*myproto.SchemaChangeResult, error) { // read the shard - shardInfo, err := wr.ts.GetShard(keyspace, shard) + shardInfo, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return nil, err } @@ -266,7 +266,7 @@ func (wr *Wrangler) applySchemaShard(ctx context.Context, shardInfo *topo.ShardI continue } - ti, err := wr.ts.GetTablet(alias) + ti, err := wr.ts.GetTablet(ctx, alias) if err != nil { return nil, err } @@ -342,7 +342,7 @@ func (wr *Wrangler) applySchemaShardComplex(ctx context.Context, statusArray []* } // take this guy out of the serving graph if necessary - ti, err := wr.ts.GetTablet(status.ti.Alias) + ti, err := wr.ts.GetTablet(ctx, status.ti.Alias) if err != nil { return nil, err } @@ -425,11 +425,11 @@ func (wr *Wrangler) CopySchemaShard(ctx context.Context, srcTabletAlias topo.Tab if err != nil { return err } - shardInfo, err := wr.ts.GetShard(keyspace, shard) + shardInfo, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return err } - tabletInfo, err := wr.ts.GetTablet(shardInfo.MasterAlias) + tabletInfo, err := wr.ts.GetTablet(ctx, shardInfo.MasterAlias) if err != nil { return err } diff --git a/go/vt/wrangler/shard.go b/go/vt/wrangler/shard.go index 3bcfb04fa3..12202c52fd 100644 --- a/go/vt/wrangler/shard.go +++ b/go/vt/wrangler/shard.go @@ -51,7 +51,7 @@ func (wr *Wrangler) updateShardCellsAndMaster(ctx context.Context, si *topo.Shar } // re-read the shard with the lock - si, err = wr.ts.GetShard(keyspace, shard) + si, err = wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return wr.unlockShard(ctx, keyspace, shard, actionNode, lockPath, err) } @@ -96,7 +96,7 @@ func (wr *Wrangler) SetShardServedTypes(ctx context.Context, keyspace, shard str } func (wr *Wrangler) setShardServedTypes(ctx context.Context, keyspace, shard string, cells []string, servedType topo.TabletType, remove bool) error { - si, err := wr.ts.GetShard(keyspace, shard) + si, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return err } @@ -130,7 +130,7 @@ func (wr *Wrangler) SetShardTabletControl(ctx context.Context, keyspace, shard s } func (wr *Wrangler) setShardTabletControl(ctx context.Context, keyspace, shard string, tabletType topo.TabletType, cells []string, remove, disableQueryService bool, tables []string) error { - shardInfo, err := wr.ts.GetShard(keyspace, shard) + shardInfo, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return err } @@ -153,7 +153,7 @@ func (wr *Wrangler) setShardTabletControl(ctx context.Context, keyspace, shard s // to entirely remove a shard. It can only work if there are no tablets // in that shard. func (wr *Wrangler) DeleteShard(ctx context.Context, keyspace, shard string) error { - shardInfo, err := wr.ts.GetShard(keyspace, shard) + shardInfo, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return err } @@ -168,7 +168,7 @@ func (wr *Wrangler) DeleteShard(ctx context.Context, keyspace, shard string) err // remove the replication graph and serving graph in each cell for _, cell := range shardInfo.Cells { - if err := wr.ts.DeleteShardReplication(cell, keyspace, shard); err != nil { + if err := wr.ts.DeleteShardReplication(ctx, cell, keyspace, shard); err != nil { wr.Logger().Warningf("Cannot delete ShardReplication in cell %v for %v/%v: %v", cell, keyspace, shard, err) } @@ -177,17 +177,17 @@ func (wr *Wrangler) DeleteShard(ctx context.Context, keyspace, shard string) err continue } - if err := wr.ts.DeleteEndPoints(cell, keyspace, shard, t); err != nil && err != topo.ErrNoNode { + if err := wr.ts.DeleteEndPoints(ctx, cell, keyspace, shard, t); err != nil && err != topo.ErrNoNode { wr.Logger().Warningf("Cannot delete EndPoints in cell %v for %v/%v/%v: %v", cell, keyspace, shard, t, err) } } - if err := wr.ts.DeleteSrvShard(cell, keyspace, shard); err != nil && err != topo.ErrNoNode { + if err := wr.ts.DeleteSrvShard(ctx, cell, keyspace, shard); err != nil && err != topo.ErrNoNode { wr.Logger().Warningf("Cannot delete SrvShard in cell %v for %v/%v: %v", cell, keyspace, shard, err) } } - return wr.ts.DeleteShard(keyspace, shard) + return wr.ts.DeleteShard(ctx, keyspace, shard) } // RemoveShardCell will remove a cell from the Cells list in a shard. @@ -207,7 +207,7 @@ func (wr *Wrangler) RemoveShardCell(ctx context.Context, keyspace, shard, cell s } func (wr *Wrangler) removeShardCell(ctx context.Context, keyspace, shard, cell string, force bool) error { - shardInfo, err := wr.ts.GetShard(keyspace, shard) + shardInfo, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return err } @@ -223,7 +223,7 @@ func (wr *Wrangler) removeShardCell(ctx context.Context, keyspace, shard, cell s } // get the ShardReplication object in the cell - sri, err := wr.ts.GetShardReplication(cell, keyspace, shard) + sri, err := wr.ts.GetShardReplication(ctx, cell, keyspace, shard) switch err { case nil: if len(sri.ReplicationLinks) > 0 { @@ -231,7 +231,7 @@ func (wr *Wrangler) removeShardCell(ctx context.Context, keyspace, shard, cell s } // ShardReplication object is now useless, remove it - if err := wr.ts.DeleteShardReplication(cell, keyspace, shard); err != nil { + if err := wr.ts.DeleteShardReplication(ctx, cell, keyspace, shard); err != nil { return fmt.Errorf("error deleting ShardReplication object in cell %v: %v", cell, err) } @@ -273,7 +273,7 @@ func (wr *Wrangler) SourceShardDelete(ctx context.Context, keyspace, shard strin } func (wr *Wrangler) sourceShardDelete(ctx context.Context, keyspace, shard string, uid uint32) error { - si, err := wr.ts.GetShard(keyspace, shard) + si, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return err } @@ -306,7 +306,7 @@ func (wr *Wrangler) SourceShardAdd(ctx context.Context, keyspace, shard string, } func (wr *Wrangler) sourceShardAdd(ctx context.Context, keyspace, shard string, uid uint32, skeyspace, sshard string, keyRange key.KeyRange, tables []string) error { - si, err := wr.ts.GetShard(keyspace, shard) + si, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return err } diff --git a/go/vt/wrangler/split.go b/go/vt/wrangler/split.go index 8775407e02..0034930d70 100644 --- a/go/vt/wrangler/split.go +++ b/go/vt/wrangler/split.go @@ -15,7 +15,7 @@ import ( // on a Shard. func (wr *Wrangler) SetSourceShards(ctx context.Context, keyspace, shard string, sources []topo.TabletAlias, tables []string) error { // read the shard - shardInfo, err := wr.ts.GetShard(keyspace, shard) + shardInfo, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return err } diff --git a/go/vt/wrangler/tablet.go b/go/vt/wrangler/tablet.go index bfa62afc43..cf364cbb35 100644 --- a/go/vt/wrangler/tablet.go +++ b/go/vt/wrangler/tablet.go @@ -37,7 +37,7 @@ func (wr *Wrangler) InitTablet(ctx context.Context, tablet *topo.Tablet, force, // create the parent keyspace and shard if needed si, err = topotools.GetOrCreateShard(ctx, wr.ts, tablet.Keyspace, tablet.Shard) } else { - si, err = wr.ts.GetShard(tablet.Keyspace, tablet.Shard) + si, err = wr.ts.GetShard(ctx, tablet.Keyspace, tablet.Shard) if err == topo.ErrNoNode { return fmt.Errorf("missing parent shard, use -parent option to create it, or CreateKeyspace / CreateShard") } @@ -64,7 +64,7 @@ func (wr *Wrangler) InitTablet(ctx context.Context, tablet *topo.Tablet, force, if err != nil && err == topo.ErrNodeExists { // Try to update nicely, but if it fails fall back to force behavior. if update || force { - oldTablet, err := wr.ts.GetTablet(tablet.Alias) + oldTablet, err := wr.ts.GetTablet(ctx, tablet.Alias) if err != nil { wr.Logger().Warningf("failed reading tablet %v: %v", tablet.Alias, err) } else { @@ -93,7 +93,7 @@ func (wr *Wrangler) InitTablet(ctx context.Context, tablet *topo.Tablet, force, wr.Logger().Errorf("failed scrapping tablet %v: %v", tablet.Alias, err) return err } - if err := wr.ts.DeleteTablet(tablet.Alias); err != nil { + if err := wr.ts.DeleteTablet(ctx, tablet.Alias); err != nil { // we ignore this wr.Logger().Errorf("failed deleting tablet %v: %v", tablet.Alias, err) } @@ -110,7 +110,7 @@ func (wr *Wrangler) InitTablet(ctx context.Context, tablet *topo.Tablet, force, // from the Shard object (only if that was the right master) func (wr *Wrangler) Scrap(ctx context.Context, tabletAlias topo.TabletAlias, force, skipRebuild bool) error { // load the tablet, see if we'll need to rebuild - ti, err := wr.ts.GetTablet(tabletAlias) + ti, err := wr.ts.GetTablet(ctx, tabletAlias) if err != nil { return err } @@ -144,7 +144,7 @@ func (wr *Wrangler) Scrap(ctx context.Context, tabletAlias topo.TabletAlias, for } // read the shard with the lock - si, err := wr.ts.GetShard(ti.Keyspace, ti.Shard) + si, err := wr.ts.GetShard(ctx, ti.Keyspace, ti.Shard) if err != nil { return wr.unlockShard(ctx, ti.Keyspace, ti.Shard, actionNode, lockPath, err) } @@ -203,7 +203,7 @@ func (wr *Wrangler) ChangeTypeNoRebuild(ctx context.Context, tabletAlias topo.Ta // Load tablet to find keyspace and shard assignment. // Don't load after the ChangeType which might have unassigned // the tablet. - ti, err := wr.ts.GetTablet(tabletAlias) + ti, err := wr.ts.GetTablet(ctx, tabletAlias) if err != nil { return false, "", "", "", err } @@ -220,7 +220,7 @@ func (wr *Wrangler) ChangeTypeNoRebuild(ctx context.Context, tabletAlias topo.Ta if !ti.Tablet.IsInServingGraph() { // re-read the tablet, see if we become serving - ti, err = wr.ts.GetTablet(tabletAlias) + ti, err = wr.ts.GetTablet(ctx, tabletAlias) if err != nil { return false, "", "", "", err } @@ -235,7 +235,7 @@ func (wr *Wrangler) ChangeTypeNoRebuild(ctx context.Context, tabletAlias topo.Ta // same as ChangeType, but assume we already have the shard lock, // and do not have the option to force anything. func (wr *Wrangler) changeTypeInternal(ctx context.Context, tabletAlias topo.TabletAlias, dbType topo.TabletType) error { - ti, err := wr.ts.GetTablet(tabletAlias) + ti, err := wr.ts.GetTablet(ctx, tabletAlias) if err != nil { return err } @@ -258,8 +258,8 @@ func (wr *Wrangler) changeTypeInternal(ctx context.Context, tabletAlias topo.Tab // DeleteTablet will get the tablet record, and if it's scrapped, will // delete the record from the topology. -func (wr *Wrangler) DeleteTablet(tabletAlias topo.TabletAlias) error { - ti, err := wr.ts.GetTablet(tabletAlias) +func (wr *Wrangler) DeleteTablet(ctx context.Context, tabletAlias topo.TabletAlias) error { + ti, err := wr.ts.GetTablet(ctx, tabletAlias) if err != nil { return err } @@ -267,12 +267,12 @@ func (wr *Wrangler) DeleteTablet(tabletAlias topo.TabletAlias) error { if ti.Type != topo.TYPE_SCRAP { return fmt.Errorf("Can only delete scrapped tablets") } - return wr.TopoServer().DeleteTablet(tabletAlias) + return wr.TopoServer().DeleteTablet(ctx, tabletAlias) } // ExecuteFetchAsDba executes a query remotely using the DBA pool func (wr *Wrangler) ExecuteFetchAsDba(ctx context.Context, tabletAlias topo.TabletAlias, query string, maxRows int, wantFields, disableBinlogs bool, reloadSchema bool) (*mproto.QueryResult, error) { - ti, err := wr.ts.GetTablet(tabletAlias) + ti, err := wr.ts.GetTablet(ctx, tabletAlias) if err != nil { return nil, err } diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go index b4c6e3f02d..8ac84d35d2 100644 --- a/go/vt/wrangler/testlib/backup_test.go +++ b/go/vt/wrangler/testlib/backup_test.go @@ -88,7 +88,7 @@ func TestBackupRestore(t *testing.T) { sourceTablet.StartActionLoop(t, wr) defer sourceTablet.StopActionLoop(t) - ti, err := ts.GetTablet(sourceTablet.Tablet.Alias) + ti, err := ts.GetTablet(ctx, sourceTablet.Tablet.Alias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -149,7 +149,7 @@ func TestBackupRestore(t *testing.T) { destTablet.StartActionLoop(t, wr) defer destTablet.StopActionLoop(t) - if err := destTablet.Agent.RestoreFromBackup(); err != nil { + if err := destTablet.Agent.RestoreFromBackup(ctx); err != nil { t.Fatalf("RestoreFromBackup failed: %v", err) } diff --git a/go/vt/wrangler/testlib/init_shard_master_test.go b/go/vt/wrangler/testlib/init_shard_master_test.go index f0c04cb2b9..ce9df831a7 100644 --- a/go/vt/wrangler/testlib/init_shard_master_test.go +++ b/go/vt/wrangler/testlib/init_shard_master_test.go @@ -98,7 +98,7 @@ func TestInitMasterShard(t *testing.T) { if master.FakeMysqlDaemon.ReadOnly { t.Errorf("master was not turned read-write") } - si, err := ts.GetShard(master.Tablet.Keyspace, master.Tablet.Shard) + si, err := ts.GetShard(ctx, master.Tablet.Keyspace, master.Tablet.Shard) if err != nil { t.Fatalf("GetShard failed: %v", err) } @@ -211,7 +211,7 @@ func TestInitMasterShardOneSlaveFails(t *testing.T) { // also change the master alias in the Shard object, to make sure it // is set back. - si, err := ts.GetShard(master.Tablet.Keyspace, master.Tablet.Shard) + si, err := ts.GetShard(ctx, master.Tablet.Keyspace, master.Tablet.Shard) if err != nil { t.Fatalf("GetShard failed: %v", err) } @@ -235,7 +235,7 @@ func TestInitMasterShardOneSlaveFails(t *testing.T) { if master.FakeMysqlDaemon.ReadOnly { t.Errorf("master was not turned read-write") } - si, err = ts.GetShard(master.Tablet.Keyspace, master.Tablet.Shard) + si, err = ts.GetShard(ctx, master.Tablet.Keyspace, master.Tablet.Shard) if err != nil { t.Fatalf("GetShard failed: %v", err) } diff --git a/go/vt/wrangler/testlib/reparent_external_test.go b/go/vt/wrangler/testlib/reparent_external_test.go index 8015e3bc6b..e406364686 100644 --- a/go/vt/wrangler/testlib/reparent_external_test.go +++ b/go/vt/wrangler/testlib/reparent_external_test.go @@ -47,7 +47,7 @@ func testTabletExternallyReparented(t *testing.T, fast bool) { // Add a new Cell to the Shard, that doesn't map to any read topo cell, // to simulate a data center being unreachable. - si, err := ts.GetShard("test_keyspace", "0") + si, err := ts.GetShard(ctx, "test_keyspace", "0") if err != nil { t.Fatalf("GetShard failed: %v", err) } @@ -117,7 +117,7 @@ func testTabletExternallyReparented(t *testing.T, fast bool) { // First test: reparent to the same master, make sure it works // as expected. tmc := tmclient.NewTabletManagerClient() - ti, err := ts.GetTablet(oldMaster.Tablet.Alias) + ti, err := ts.GetTablet(ctx, oldMaster.Tablet.Alias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -130,7 +130,7 @@ func testTabletExternallyReparented(t *testing.T, fast bool) { // This tests a bad case; the new designated master is a slave, // but we should do what we're told anyway - ti, err = ts.GetTablet(goodSlave1.Tablet.Alias) + ti, err = ts.GetTablet(ctx, goodSlave1.Tablet.Alias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -140,7 +140,7 @@ func testTabletExternallyReparented(t *testing.T, fast bool) { // This tests the good case, where everything works as planned t.Logf("TabletExternallyReparented(new master) expecting success") - ti, err = ts.GetTablet(newMaster.Tablet.Alias) + ti, err = ts.GetTablet(ctx, newMaster.Tablet.Alias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -154,7 +154,7 @@ func testTabletExternallyReparented(t *testing.T, fast bool) { // Now double-check the serving graph is good. // Should only have one good replica left. - addrs, err := ts.GetEndPoints("cell1", "test_keyspace", "0", topo.TYPE_REPLICA) + addrs, err := ts.GetEndPoints(ctx, "cell1", "test_keyspace", "0", topo.TYPE_REPLICA) if err != nil { t.Fatalf("GetEndPoints failed at the end: %v", err) } @@ -177,6 +177,7 @@ func TestTabletExternallyReparentedWithDifferentMysqlPortFast(t *testing.T) { func testTabletExternallyReparentedWithDifferentMysqlPort(t *testing.T, fast bool) { tabletmanager.SetReparentFlags(fast, time.Minute /* finalizeTimeout */) + ctx := context.Background() ts := zktopo.NewTestServer(t, []string{"cell1"}) wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), time.Second) @@ -208,7 +209,7 @@ func testTabletExternallyReparentedWithDifferentMysqlPort(t *testing.T, fast boo // This tests the good case, where everything works as planned t.Logf("TabletExternallyReparented(new master) expecting success") tmc := tmclient.NewTabletManagerClient() - ti, err := ts.GetTablet(newMaster.Tablet.Alias) + ti, err := ts.GetTablet(ctx, newMaster.Tablet.Alias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -230,6 +231,7 @@ func TestTabletExternallyReparentedContinueOnUnexpectedMasterFast(t *testing.T) func testTabletExternallyReparentedContinueOnUnexpectedMaster(t *testing.T, fast bool) { tabletmanager.SetReparentFlags(fast, time.Minute /* finalizeTimeout */) + ctx := context.Background() ts := zktopo.NewTestServer(t, []string{"cell1"}) wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), time.Second) @@ -257,7 +259,7 @@ func testTabletExternallyReparentedContinueOnUnexpectedMaster(t *testing.T, fast // This tests the good case, where everything works as planned t.Logf("TabletExternallyReparented(new master) expecting success") tmc := tmclient.NewTabletManagerClient() - ti, err := ts.GetTablet(newMaster.Tablet.Alias) + ti, err := ts.GetTablet(ctx, newMaster.Tablet.Alias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -277,6 +279,7 @@ func TestTabletExternallyReparentedFailedOldMasterFast(t *testing.T) { func testTabletExternallyReparentedFailedOldMaster(t *testing.T, fast bool) { tabletmanager.SetReparentFlags(fast, time.Minute /* finalizeTimeout */) + ctx := context.Background() ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"}) wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), time.Second) @@ -304,7 +307,7 @@ func testTabletExternallyReparentedFailedOldMaster(t *testing.T, fast bool) { // The reparent should work as expected here t.Logf("TabletExternallyReparented(new master) expecting success") tmc := tmclient.NewTabletManagerClient() - ti, err := ts.GetTablet(newMaster.Tablet.Alias) + ti, err := ts.GetTablet(ctx, newMaster.Tablet.Alias) if err != nil { t.Fatalf("GetTablet failed: %v", err) } @@ -318,7 +321,7 @@ func testTabletExternallyReparentedFailedOldMaster(t *testing.T, fast bool) { // Now double-check the serving graph is good. // Should only have one good replica left. - addrs, err := ts.GetEndPoints("cell1", "test_keyspace", "0", topo.TYPE_REPLICA) + addrs, err := ts.GetEndPoints(ctx, "cell1", "test_keyspace", "0", topo.TYPE_REPLICA) if err != nil { t.Fatalf("GetEndPoints failed at the end: %v", err) } @@ -327,7 +330,7 @@ func testTabletExternallyReparentedFailedOldMaster(t *testing.T, fast bool) { } // check the old master was converted to spare - tablet, err := ts.GetTablet(oldMaster.Tablet.Alias) + tablet, err := ts.GetTablet(ctx, oldMaster.Tablet.Alias) if err != nil { t.Fatalf("GetTablet(%v) failed: %v", oldMaster.Tablet.Alias, err) } diff --git a/go/vt/wrangler/testlib/reparent_utils_test.go b/go/vt/wrangler/testlib/reparent_utils_test.go index 55faffa1db..5fdf36a37a 100644 --- a/go/vt/wrangler/testlib/reparent_utils_test.go +++ b/go/vt/wrangler/testlib/reparent_utils_test.go @@ -24,14 +24,14 @@ func TestShardReplicationStatuses(t *testing.T) { wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), time.Second) // create shard and tablets - if err := topo.CreateShard(ts, "test_keyspace", "0"); err != nil { + if err := topo.CreateShard(ctx, ts, "test_keyspace", "0"); err != nil { t.Fatalf("CreateShard failed: %v", err) } master := NewFakeTablet(t, wr, "cell1", 1, topo.TYPE_MASTER) slave := NewFakeTablet(t, wr, "cell1", 2, topo.TYPE_REPLICA) // mark the master inside the shard - si, err := ts.GetShard("test_keyspace", "0") + si, err := ts.GetShard(ctx, "test_keyspace", "0") if err != nil { t.Fatalf("GetShard failed: %v", err) } @@ -92,14 +92,14 @@ func TestReparentTablet(t *testing.T) { wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), time.Second) // create shard and tablets - if err := topo.CreateShard(ts, "test_keyspace", "0"); err != nil { + if err := topo.CreateShard(ctx, ts, "test_keyspace", "0"); err != nil { t.Fatalf("CreateShard failed: %v", err) } master := NewFakeTablet(t, wr, "cell1", 1, topo.TYPE_MASTER) slave := NewFakeTablet(t, wr, "cell1", 2, topo.TYPE_REPLICA) // mark the master inside the shard - si, err := ts.GetShard("test_keyspace", "0") + si, err := ts.GetShard(ctx, "test_keyspace", "0") if err != nil { t.Fatalf("GetShard failed: %v", err) } diff --git a/go/vt/wrangler/validator.go b/go/vt/wrangler/validator.go index f7f366e79d..1378da4cc0 100644 --- a/go/vt/wrangler/validator.go +++ b/go/vt/wrangler/validator.go @@ -44,13 +44,13 @@ func (wr *Wrangler) waitForResults(wg *sync.WaitGroup, results chan error) error func (wr *Wrangler) validateAllTablets(ctx context.Context, wg *sync.WaitGroup, results chan<- error) { cellSet := make(map[string]bool, 16) - keyspaces, err := wr.ts.GetKeyspaces() + keyspaces, err := wr.ts.GetKeyspaces(ctx) if err != nil { results <- fmt.Errorf("TopologyServer.GetKeyspaces failed: %v", err) return } for _, keyspace := range keyspaces { - shards, err := wr.ts.GetShardNames(keyspace) + shards, err := wr.ts.GetShardNames(ctx, keyspace) if err != nil { results <- fmt.Errorf("TopologyServer.GetShardNames(%v) failed: %v", keyspace, err) return @@ -69,7 +69,7 @@ func (wr *Wrangler) validateAllTablets(ctx context.Context, wg *sync.WaitGroup, } for cell := range cellSet { - aliases, err := wr.ts.GetTabletsByCell(cell) + aliases, err := wr.ts.GetTabletsByCell(ctx, cell) if err != nil { results <- fmt.Errorf("TopologyServer.GetTabletsByCell(%v) failed: %v", cell, err) continue @@ -79,7 +79,7 @@ func (wr *Wrangler) validateAllTablets(ctx context.Context, wg *sync.WaitGroup, wg.Add(1) go func(alias topo.TabletAlias) { defer wg.Done() - if err := topo.Validate(wr.ts, alias); err != nil { + if err := topo.Validate(ctx, wr.ts, alias); err != nil { results <- fmt.Errorf("Validate(%v) failed: %v", alias, err) } else { wr.Logger().Infof("tablet %v is valid", alias) @@ -91,7 +91,7 @@ func (wr *Wrangler) validateAllTablets(ctx context.Context, wg *sync.WaitGroup, func (wr *Wrangler) validateKeyspace(ctx context.Context, keyspace string, pingTablets bool, wg *sync.WaitGroup, results chan<- error) { // Validate replication graph by traversing each shard. - shards, err := wr.ts.GetShardNames(keyspace) + shards, err := wr.ts.GetShardNames(ctx, keyspace) if err != nil { results <- fmt.Errorf("TopologyServer.GetShardNames(%v) failed: %v", keyspace, err) return @@ -108,7 +108,7 @@ func (wr *Wrangler) validateKeyspace(ctx context.Context, keyspace string, pingT // FIXME(msolomon) This validate presumes the master is up and running. // Even when that isn't true, there are validation processes that might be valuable. func (wr *Wrangler) validateShard(ctx context.Context, keyspace, shard string, pingTablets bool, wg *sync.WaitGroup, results chan<- error) { - shardInfo, err := wr.ts.GetShard(keyspace, shard) + shardInfo, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { results <- fmt.Errorf("TopologyServer.GetShard(%v, %v) failed: %v", keyspace, shard, err) return @@ -148,7 +148,7 @@ func (wr *Wrangler) validateShard(ctx context.Context, keyspace, shard string, p wg.Add(1) go func(alias topo.TabletAlias) { defer wg.Done() - if err := topo.Validate(wr.ts, alias); err != nil { + if err := topo.Validate(ctx, wr.ts, alias); err != nil { results <- fmt.Errorf("Validate(%v) failed: %v", alias, err) } else { wr.Logger().Infof("tablet %v is valid", alias) @@ -243,7 +243,7 @@ func (wr *Wrangler) Validate(ctx context.Context, pingTablets bool) error { }() // Validate replication graph by traversing each keyspace and then each shard. - keyspaces, err := wr.ts.GetKeyspaces() + keyspaces, err := wr.ts.GetKeyspaces(ctx) if err != nil { results <- fmt.Errorf("GetKeyspaces failed: %v", err) } else { diff --git a/go/vt/wrangler/version.go b/go/vt/wrangler/version.go index c0c7c05c5b..68c129f8aa 100644 --- a/go/vt/wrangler/version.go +++ b/go/vt/wrangler/version.go @@ -45,8 +45,8 @@ var getVersionFromTablet = func(tabletAddr string) (string, error) { } // GetVersion returns the version string from a tablet -func (wr *Wrangler) GetVersion(tabletAlias topo.TabletAlias) (string, error) { - tablet, err := wr.ts.GetTablet(tabletAlias) +func (wr *Wrangler) GetVersion(ctx context.Context, tabletAlias topo.TabletAlias) (string, error) { + tablet, err := wr.ts.GetTablet(ctx, tabletAlias) if err != nil { return "", err } @@ -60,10 +60,10 @@ func (wr *Wrangler) GetVersion(tabletAlias topo.TabletAlias) (string, error) { } // helper method to asynchronously get and diff a version -func (wr *Wrangler) diffVersion(masterVersion string, masterAlias topo.TabletAlias, alias topo.TabletAlias, wg *sync.WaitGroup, er concurrency.ErrorRecorder) { +func (wr *Wrangler) diffVersion(ctx context.Context, masterVersion string, masterAlias topo.TabletAlias, alias topo.TabletAlias, wg *sync.WaitGroup, er concurrency.ErrorRecorder) { defer wg.Done() log.Infof("Gathering version for %v", alias) - slaveVersion, err := wr.GetVersion(alias) + slaveVersion, err := wr.GetVersion(ctx, alias) if err != nil { er.RecordError(err) return @@ -77,7 +77,7 @@ func (wr *Wrangler) diffVersion(masterVersion string, masterAlias topo.TabletAli // ValidateVersionShard validates all versions are the same in all // tablets in a shard func (wr *Wrangler) ValidateVersionShard(ctx context.Context, keyspace, shard string) error { - si, err := wr.ts.GetShard(keyspace, shard) + si, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return err } @@ -87,7 +87,7 @@ func (wr *Wrangler) ValidateVersionShard(ctx context.Context, keyspace, shard st return fmt.Errorf("No master in shard %v/%v", keyspace, shard) } log.Infof("Gathering version for master %v", si.MasterAlias) - masterVersion, err := wr.GetVersion(si.MasterAlias) + masterVersion, err := wr.GetVersion(ctx, si.MasterAlias) if err != nil { return err } @@ -108,7 +108,7 @@ func (wr *Wrangler) ValidateVersionShard(ctx context.Context, keyspace, shard st } wg.Add(1) - go wr.diffVersion(masterVersion, si.MasterAlias, alias, &wg, &er) + go wr.diffVersion(ctx, masterVersion, si.MasterAlias, alias, &wg, &er) } wg.Wait() if er.HasErrors() { @@ -121,7 +121,7 @@ func (wr *Wrangler) ValidateVersionShard(ctx context.Context, keyspace, shard st // tablets in a keyspace func (wr *Wrangler) ValidateVersionKeyspace(ctx context.Context, keyspace string) error { // find all the shards - shards, err := wr.ts.GetShardNames(keyspace) + shards, err := wr.ts.GetShardNames(ctx, keyspace) if err != nil { return err } @@ -136,7 +136,7 @@ func (wr *Wrangler) ValidateVersionKeyspace(ctx context.Context, keyspace string } // find the reference version using the first shard's master - si, err := wr.ts.GetShard(keyspace, shards[0]) + si, err := wr.ts.GetShard(ctx, keyspace, shards[0]) if err != nil { return err } @@ -145,7 +145,7 @@ func (wr *Wrangler) ValidateVersionKeyspace(ctx context.Context, keyspace string } referenceAlias := si.MasterAlias log.Infof("Gathering version for reference master %v", referenceAlias) - referenceVersion, err := wr.GetVersion(referenceAlias) + referenceVersion, err := wr.GetVersion(ctx, referenceAlias) if err != nil { return err } @@ -166,7 +166,7 @@ func (wr *Wrangler) ValidateVersionKeyspace(ctx context.Context, keyspace string } wg.Add(1) - go wr.diffVersion(referenceVersion, referenceAlias, alias, &wg, &er) + go wr.diffVersion(ctx, referenceVersion, referenceAlias, alias, &wg, &er) } } wg.Wait() diff --git a/go/vt/wrangler/zkns.go b/go/vt/wrangler/zkns.go index babe5742ad..2ec1694f99 100644 --- a/go/vt/wrangler/zkns.go +++ b/go/vt/wrangler/zkns.go @@ -18,7 +18,7 @@ import ( // ExportZkns exports addresses from the VT serving graph to a legacy zkns server. // Note these functions only work with a zktopo. -func (wr *Wrangler) ExportZkns(cell string) error { +func (wr *Wrangler) ExportZkns(ctx context.Context, cell string) error { zkTopo, ok := wr.ts.(*zktopo.Server) if !ok { return fmt.Errorf("ExportZkns only works with zktopo") @@ -45,7 +45,7 @@ func (wr *Wrangler) ExportZkns(cell string) error { continue } - if _, err = wr.exportVtnsToZkns(zconn, addrPath, zknsAddrPath); err != nil { + if _, err = wr.exportVtnsToZkns(ctx, zconn, addrPath, zknsAddrPath); err != nil { return err } } @@ -60,7 +60,7 @@ func (wr *Wrangler) ExportZknsForKeyspace(ctx context.Context, keyspace string) } zconn := zkTopo.GetZConn() - shardNames, err := wr.ts.GetShardNames(keyspace) + shardNames, err := wr.ts.GetShardNames(ctx, keyspace) if err != nil { return err } @@ -115,7 +115,7 @@ func (wr *Wrangler) ExportZknsForKeyspace(ctx context.Context, keyspace string) if stat.NumChildren() > 0 { continue } - zknsPathsWritten, err := wr.exportVtnsToZkns(zconn, vtnsAddrPath, zknsAddrPath) + zknsPathsWritten, err := wr.exportVtnsToZkns(ctx, zconn, vtnsAddrPath, zknsAddrPath) if err != nil { return err } @@ -141,7 +141,7 @@ func (wr *Wrangler) ExportZknsForKeyspace(ctx context.Context, keyspace string) return nil } -func (wr *Wrangler) exportVtnsToZkns(zconn zk.Conn, vtnsAddrPath, zknsAddrPath string) ([]string, error) { +func (wr *Wrangler) exportVtnsToZkns(ctx context.Context, zconn zk.Conn, vtnsAddrPath, zknsAddrPath string) ([]string, error) { zknsPaths := make([]string, 0, 32) parts := strings.Split(vtnsAddrPath, "/") if len(parts) != 8 && len(parts) != 9 { @@ -154,7 +154,7 @@ func (wr *Wrangler) exportVtnsToZkns(zconn zk.Conn, vtnsAddrPath, zknsAddrPath s if tabletType == "action" || tabletType == "actionlog" { return nil, nil } - addrs, err := wr.ts.GetEndPoints(cell, keyspace, shard, tabletType) + addrs, err := wr.ts.GetEndPoints(ctx, cell, keyspace, shard, tabletType) if err != nil { return nil, err } diff --git a/go/vt/zktopo/cell.go b/go/vt/zktopo/cell.go index 321050fac8..2e8b37060e 100644 --- a/go/vt/zktopo/cell.go +++ b/go/vt/zktopo/cell.go @@ -8,13 +8,15 @@ import ( "sort" "github.com/youtube/vitess/go/zk" + "golang.org/x/net/context" ) /* This file contains the cell management methods of zktopo.Server */ -func (zkts *Server) GetKnownCells() ([]string, error) { +// GetKnownCells is part of the topo.Server interface +func (zkts *Server) GetKnownCells(ctx context.Context) ([]string, error) { cellsWithGlobal, err := zk.ZkKnownCells() if err != nil { return cellsWithGlobal, err diff --git a/go/vt/zktopo/keyspace.go b/go/vt/zktopo/keyspace.go index 5573018ca3..fdb784ffc2 100644 --- a/go/vt/zktopo/keyspace.go +++ b/go/vt/zktopo/keyspace.go @@ -15,6 +15,7 @@ import ( "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/events" "github.com/youtube/vitess/go/zk" + "golang.org/x/net/context" "launchpad.net/gozk/zookeeper" ) @@ -27,7 +28,7 @@ const ( ) // CreateKeyspace is part of the topo.Server interface -func (zkts *Server) CreateKeyspace(keyspace string, value *topo.Keyspace) error { +func (zkts *Server) CreateKeyspace(ctx context.Context, keyspace string, value *topo.Keyspace) error { keyspacePath := path.Join(globalKeyspacesPath, keyspace) pathList := []string{ keyspacePath, @@ -63,7 +64,7 @@ func (zkts *Server) CreateKeyspace(keyspace string, value *topo.Keyspace) error } // UpdateKeyspace is part of the topo.Server interface -func (zkts *Server) UpdateKeyspace(ki *topo.KeyspaceInfo, existingVersion int64) (int64, error) { +func (zkts *Server) UpdateKeyspace(ctx context.Context, ki *topo.KeyspaceInfo, existingVersion int64) (int64, error) { keyspacePath := path.Join(globalKeyspacesPath, ki.KeyspaceName()) data := jscfg.ToJSON(ki.Keyspace) stat, err := zkts.zconn.Set(keyspacePath, data, int(existingVersion)) @@ -82,7 +83,7 @@ func (zkts *Server) UpdateKeyspace(ki *topo.KeyspaceInfo, existingVersion int64) } // GetKeyspace is part of the topo.Server interface -func (zkts *Server) GetKeyspace(keyspace string) (*topo.KeyspaceInfo, error) { +func (zkts *Server) GetKeyspace(ctx context.Context, keyspace string) (*topo.KeyspaceInfo, error) { keyspacePath := path.Join(globalKeyspacesPath, keyspace) data, stat, err := zkts.zconn.Get(keyspacePath) if err != nil { @@ -101,7 +102,7 @@ func (zkts *Server) GetKeyspace(keyspace string) (*topo.KeyspaceInfo, error) { } // GetKeyspaces is part of the topo.Server interface -func (zkts *Server) GetKeyspaces() ([]string, error) { +func (zkts *Server) GetKeyspaces(ctx context.Context) ([]string, error) { children, _, err := zkts.zconn.Children(globalKeyspacesPath) if err != nil { if zookeeper.IsError(err, zookeeper.ZNONODE) { @@ -115,7 +116,7 @@ func (zkts *Server) GetKeyspaces() ([]string, error) { } // DeleteKeyspaceShards is part of the topo.Server interface -func (zkts *Server) DeleteKeyspaceShards(keyspace string) error { +func (zkts *Server) DeleteKeyspaceShards(ctx context.Context, keyspace string) error { shardsPath := path.Join(globalKeyspacesPath, keyspace, "shards") if err := zk.DeleteRecursive(zkts.zconn, shardsPath, -1); err != nil && !zookeeper.IsError(err, zookeeper.ZNONODE) { return err diff --git a/go/vt/zktopo/lock.go b/go/vt/zktopo/lock.go index 3f14269296..4002e9a48a 100644 --- a/go/vt/zktopo/lock.go +++ b/go/vt/zktopo/lock.go @@ -106,6 +106,7 @@ func (zkts *Server) unlockForAction(lockPath, results string) error { return zk.DeleteRecursive(zkts.zconn, lockPath, -1) } +// LockKeyspaceForAction is part of topo.Server interface func (zkts *Server) LockKeyspaceForAction(ctx context.Context, keyspace, contents string) (string, error) { // Action paths end in a trailing slash to that when we create // sequential nodes, they are created as children, not siblings. @@ -113,10 +114,12 @@ func (zkts *Server) LockKeyspaceForAction(ctx context.Context, keyspace, content return zkts.lockForAction(ctx, actionDir, contents) } -func (zkts *Server) UnlockKeyspaceForAction(keyspace, lockPath, results string) error { +// UnlockKeyspaceForAction is part of topo.Server interface +func (zkts *Server) UnlockKeyspaceForAction(ctx context.Context, keyspace, lockPath, results string) error { return zkts.unlockForAction(lockPath, results) } +// LockShardForAction is part of topo.Server interface func (zkts *Server) LockShardForAction(ctx context.Context, keyspace, shard, contents string) (string, error) { // Action paths end in a trailing slash to that when we create // sequential nodes, they are created as children, not siblings. @@ -124,10 +127,12 @@ func (zkts *Server) LockShardForAction(ctx context.Context, keyspace, shard, con return zkts.lockForAction(ctx, actionDir, contents) } -func (zkts *Server) UnlockShardForAction(keyspace, shard, lockPath, results string) error { +// UnlockShardForAction is part of topo.Server interface +func (zkts *Server) UnlockShardForAction(ctx context.Context, keyspace, shard, lockPath, results string) error { return zkts.unlockForAction(lockPath, results) } +// LockSrvShardForAction is part of topo.Server interface func (zkts *Server) LockSrvShardForAction(ctx context.Context, cell, keyspace, shard, contents string) (string, error) { // Action paths end in a trailing slash to that when we create // sequential nodes, they are created as children, not siblings. @@ -146,6 +151,7 @@ func (zkts *Server) LockSrvShardForAction(ctx context.Context, cell, keyspace, s return p, err } -func (zkts *Server) UnlockSrvShardForAction(cell, keyspace, shard, lockPath, results string) error { +// UnlockSrvShardForAction is part of topo.Server interface +func (zkts *Server) UnlockSrvShardForAction(ctx context.Context, cell, keyspace, shard, lockPath, results string) error { return zkts.unlockForAction(lockPath, results) } diff --git a/go/vt/zktopo/replication_graph.go b/go/vt/zktopo/replication_graph.go index 1dbd712409..8c8dce2ee9 100644 --- a/go/vt/zktopo/replication_graph.go +++ b/go/vt/zktopo/replication_graph.go @@ -12,6 +12,7 @@ import ( "github.com/youtube/vitess/go/jscfg" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/zk" + "golang.org/x/net/context" "launchpad.net/gozk/zookeeper" ) @@ -24,7 +25,7 @@ func shardReplicationPath(cell, keyspace, shard string) string { } // UpdateShardReplicationFields is part of the topo.Server interface -func (zkts *Server) UpdateShardReplicationFields(cell, keyspace, shard string, update func(*topo.ShardReplication) error) error { +func (zkts *Server) UpdateShardReplicationFields(ctx context.Context, cell, keyspace, shard string, update func(*topo.ShardReplication) error) error { // create the parent directory to be sure it's here zkDir := path.Join("/zk", cell, "vt", "replication", keyspace) if _, err := zk.CreateRecursive(zkts.zconn, zkDir, "", 0, zookeeper.WorldACL(zookeeper.PERM_ALL)); err != nil && !zookeeper.IsError(err, zookeeper.ZNODEEXISTS) { @@ -57,7 +58,7 @@ func (zkts *Server) UpdateShardReplicationFields(cell, keyspace, shard string, u } // GetShardReplication is part of the topo.Server interface -func (zkts *Server) GetShardReplication(cell, keyspace, shard string) (*topo.ShardReplicationInfo, error) { +func (zkts *Server) GetShardReplication(ctx context.Context, cell, keyspace, shard string) (*topo.ShardReplicationInfo, error) { zkPath := shardReplicationPath(cell, keyspace, shard) data, _, err := zkts.zconn.Get(zkPath) if err != nil { @@ -76,7 +77,7 @@ func (zkts *Server) GetShardReplication(cell, keyspace, shard string) (*topo.Sha } // DeleteShardReplication is part of the topo.Server interface -func (zkts *Server) DeleteShardReplication(cell, keyspace, shard string) error { +func (zkts *Server) DeleteShardReplication(ctx context.Context, cell, keyspace, shard string) error { zkPath := shardReplicationPath(cell, keyspace, shard) err := zkts.zconn.Delete(zkPath, -1) if err != nil { diff --git a/go/vt/zktopo/serving_graph.go b/go/vt/zktopo/serving_graph.go index ed036e090a..ec3d99bd3f 100644 --- a/go/vt/zktopo/serving_graph.go +++ b/go/vt/zktopo/serving_graph.go @@ -15,6 +15,7 @@ import ( "github.com/youtube/vitess/go/jscfg" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/zk" + "golang.org/x/net/context" "launchpad.net/gozk/zookeeper" ) @@ -44,7 +45,7 @@ func zkPathForVtName(cell, keyspace, shard string, tabletType topo.TabletType) s } // GetSrvTabletTypesPerShard is part of the topo.Server interface -func (zkts *Server) GetSrvTabletTypesPerShard(cell, keyspace, shard string) ([]topo.TabletType, error) { +func (zkts *Server) GetSrvTabletTypesPerShard(ctx context.Context, cell, keyspace, shard string) ([]topo.TabletType, error) { zkSgShardPath := zkPathForVtShard(cell, keyspace, shard) children, _, err := zkts.zconn.Children(zkSgShardPath) if err != nil { @@ -65,7 +66,7 @@ func (zkts *Server) GetSrvTabletTypesPerShard(cell, keyspace, shard string) ([]t } // UpdateEndPoints is part of the topo.Server interface -func (zkts *Server) UpdateEndPoints(cell, keyspace, shard string, tabletType topo.TabletType, addrs *topo.EndPoints) error { +func (zkts *Server) UpdateEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType, addrs *topo.EndPoints) error { path := zkPathForVtName(cell, keyspace, shard, tabletType) data := jscfg.ToJSON(addrs) _, err := zk.CreateRecursive(zkts.zconn, path, data, 0, zookeeper.WorldACL(zookeeper.PERM_ALL)) @@ -83,7 +84,7 @@ func (zkts *Server) UpdateEndPoints(cell, keyspace, shard string, tabletType top } // GetEndPoints is part of the topo.Server interface -func (zkts *Server) GetEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) { +func (zkts *Server) GetEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) { path := zkPathForVtName(cell, keyspace, shard, tabletType) data, _, err := zkts.zconn.Get(path) if err != nil { @@ -102,7 +103,7 @@ func (zkts *Server) GetEndPoints(cell, keyspace, shard string, tabletType topo.T } // DeleteEndPoints is part of the topo.Server interface -func (zkts *Server) DeleteEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) error { +func (zkts *Server) DeleteEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) error { path := zkPathForVtName(cell, keyspace, shard, tabletType) err := zkts.zconn.Delete(path, -1) if err != nil { @@ -115,7 +116,7 @@ func (zkts *Server) DeleteEndPoints(cell, keyspace, shard string, tabletType top } // UpdateSrvShard is part of the topo.Server interface -func (zkts *Server) UpdateSrvShard(cell, keyspace, shard string, srvShard *topo.SrvShard) error { +func (zkts *Server) UpdateSrvShard(ctx context.Context, cell, keyspace, shard string, srvShard *topo.SrvShard) error { path := zkPathForVtShard(cell, keyspace, shard) data := jscfg.ToJSON(srvShard) _, err := zkts.zconn.Set(path, data, -1) @@ -123,7 +124,7 @@ func (zkts *Server) UpdateSrvShard(cell, keyspace, shard string, srvShard *topo. } // GetSrvShard is part of the topo.Server interface -func (zkts *Server) GetSrvShard(cell, keyspace, shard string) (*topo.SrvShard, error) { +func (zkts *Server) GetSrvShard(ctx context.Context, cell, keyspace, shard string) (*topo.SrvShard, error) { path := zkPathForVtShard(cell, keyspace, shard) data, stat, err := zkts.zconn.Get(path) if err != nil { @@ -142,7 +143,7 @@ func (zkts *Server) GetSrvShard(cell, keyspace, shard string) (*topo.SrvShard, e } // DeleteSrvShard is part of the topo.Server interface -func (zkts *Server) DeleteSrvShard(cell, keyspace, shard string) error { +func (zkts *Server) DeleteSrvShard(ctx context.Context, cell, keyspace, shard string) error { path := zkPathForVtShard(cell, keyspace, shard) err := zkts.zconn.Delete(path, -1) if err != nil { @@ -155,7 +156,7 @@ func (zkts *Server) DeleteSrvShard(cell, keyspace, shard string) error { } // UpdateSrvKeyspace is part of the topo.Server interface -func (zkts *Server) UpdateSrvKeyspace(cell, keyspace string, srvKeyspace *topo.SrvKeyspace) error { +func (zkts *Server) UpdateSrvKeyspace(ctx context.Context, cell, keyspace string, srvKeyspace *topo.SrvKeyspace) error { path := zkPathForVtKeyspace(cell, keyspace) data := jscfg.ToJSON(srvKeyspace) _, err := zkts.zconn.Set(path, data, -1) @@ -166,7 +167,7 @@ func (zkts *Server) UpdateSrvKeyspace(cell, keyspace string, srvKeyspace *topo.S } // GetSrvKeyspace is part of the topo.Server interface -func (zkts *Server) GetSrvKeyspace(cell, keyspace string) (*topo.SrvKeyspace, error) { +func (zkts *Server) GetSrvKeyspace(ctx context.Context, cell, keyspace string) (*topo.SrvKeyspace, error) { path := zkPathForVtKeyspace(cell, keyspace) data, stat, err := zkts.zconn.Get(path) if err != nil { @@ -185,7 +186,7 @@ func (zkts *Server) GetSrvKeyspace(cell, keyspace string) (*topo.SrvKeyspace, er } // GetSrvKeyspaceNames is part of the topo.Server interface -func (zkts *Server) GetSrvKeyspaceNames(cell string) ([]string, error) { +func (zkts *Server) GetSrvKeyspaceNames(ctx context.Context, cell string) ([]string, error) { children, _, err := zkts.zconn.Children(zkPathForCell(cell)) if err != nil { if zookeeper.IsError(err, zookeeper.ZNONODE) { @@ -239,7 +240,7 @@ func (zkts *Server) updateTabletEndpoint(oldValue string, oldStat zk.Stat, addr } // UpdateTabletEndpoint is part of the topo.Server interface -func (zkts *Server) UpdateTabletEndpoint(cell, keyspace, shard string, tabletType topo.TabletType, addr *topo.EndPoint) error { +func (zkts *Server) UpdateTabletEndpoint(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType, addr *topo.EndPoint) error { path := zkPathForVtName(cell, keyspace, shard, tabletType) f := func(oldValue string, oldStat zk.Stat) (string, error) { return zkts.updateTabletEndpoint(oldValue, oldStat, addr) @@ -252,7 +253,7 @@ func (zkts *Server) UpdateTabletEndpoint(cell, keyspace, shard string, tabletTyp } // WatchEndPoints is part of the topo.Server interface -func (zkts *Server) WatchEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) (<-chan *topo.EndPoints, chan<- struct{}, error) { +func (zkts *Server) WatchEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) (<-chan *topo.EndPoints, chan<- struct{}, error) { filePath := zkPathForVtName(cell, keyspace, shard, tabletType) notifications := make(chan *topo.EndPoints, 10) diff --git a/go/vt/zktopo/shard.go b/go/vt/zktopo/shard.go index 87849224b3..f1275e5f16 100644 --- a/go/vt/zktopo/shard.go +++ b/go/vt/zktopo/shard.go @@ -15,6 +15,7 @@ import ( "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/events" "github.com/youtube/vitess/go/zk" + "golang.org/x/net/context" "launchpad.net/gozk/zookeeper" ) @@ -23,7 +24,7 @@ This file contains the shard management code for zktopo.Server */ // CreateShard is part of the topo.Server interface -func (zkts *Server) CreateShard(keyspace, shard string, value *topo.Shard) error { +func (zkts *Server) CreateShard(ctx context.Context, keyspace, shard string, value *topo.Shard) error { shardPath := path.Join(globalKeyspacesPath, keyspace, "shards", shard) pathList := []string{ shardPath, @@ -58,7 +59,7 @@ func (zkts *Server) CreateShard(keyspace, shard string, value *topo.Shard) error } // UpdateShard is part of the topo.Server interface -func (zkts *Server) UpdateShard(si *topo.ShardInfo, existingVersion int64) (int64, error) { +func (zkts *Server) UpdateShard(ctx context.Context, si *topo.ShardInfo, existingVersion int64) (int64, error) { shardPath := path.Join(globalKeyspacesPath, si.Keyspace(), "shards", si.ShardName()) stat, err := zkts.zconn.Set(shardPath, jscfg.ToJSON(si.Shard), int(existingVersion)) if err != nil { @@ -76,7 +77,7 @@ func (zkts *Server) UpdateShard(si *topo.ShardInfo, existingVersion int64) (int6 } // ValidateShard is part of the topo.Server interface -func (zkts *Server) ValidateShard(keyspace, shard string) error { +func (zkts *Server) ValidateShard(ctx context.Context, keyspace, shard string) error { shardPath := path.Join(globalKeyspacesPath, keyspace, "shards", shard) zkPaths := []string{ path.Join(shardPath, "action"), @@ -92,7 +93,7 @@ func (zkts *Server) ValidateShard(keyspace, shard string) error { } // GetShard is part of the topo.Server interface -func (zkts *Server) GetShard(keyspace, shard string) (*topo.ShardInfo, error) { +func (zkts *Server) GetShard(ctx context.Context, keyspace, shard string) (*topo.ShardInfo, error) { shardPath := path.Join(globalKeyspacesPath, keyspace, "shards", shard) data, stat, err := zkts.zconn.Get(shardPath) if err != nil { @@ -111,7 +112,7 @@ func (zkts *Server) GetShard(keyspace, shard string) (*topo.ShardInfo, error) { } // GetShardNames is part of the topo.Server interface -func (zkts *Server) GetShardNames(keyspace string) ([]string, error) { +func (zkts *Server) GetShardNames(ctx context.Context, keyspace string) ([]string, error) { shardsPath := path.Join(globalKeyspacesPath, keyspace, "shards") children, _, err := zkts.zconn.Children(shardsPath) if err != nil { @@ -126,7 +127,7 @@ func (zkts *Server) GetShardNames(keyspace string) ([]string, error) { } // DeleteShard is part of the topo.Server interface -func (zkts *Server) DeleteShard(keyspace, shard string) error { +func (zkts *Server) DeleteShard(ctx context.Context, keyspace, shard string) error { shardPath := path.Join(globalKeyspacesPath, keyspace, "shards", shard) err := zk.DeleteRecursive(zkts.zconn, shardPath, -1) if err != nil { diff --git a/go/vt/zktopo/tablet.go b/go/vt/zktopo/tablet.go index 3f9eeb9862..ba2bb02ee2 100644 --- a/go/vt/zktopo/tablet.go +++ b/go/vt/zktopo/tablet.go @@ -14,6 +14,7 @@ import ( "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/events" "github.com/youtube/vitess/go/zk" + "golang.org/x/net/context" "launchpad.net/gozk/zookeeper" ) @@ -48,7 +49,7 @@ func tabletInfoFromJSON(data string, version int64) (*topo.TabletInfo, error) { } // CreateTablet is part of the topo.Server interface -func (zkts *Server) CreateTablet(tablet *topo.Tablet) error { +func (zkts *Server) CreateTablet(ctx context.Context, tablet *topo.Tablet) error { zkTabletPath := TabletPathForAlias(tablet.Alias) // Create /zk//vt/tablets/ @@ -68,7 +69,7 @@ func (zkts *Server) CreateTablet(tablet *topo.Tablet) error { } // UpdateTablet is part of the topo.Server interface -func (zkts *Server) UpdateTablet(tablet *topo.TabletInfo, existingVersion int64) (int64, error) { +func (zkts *Server) UpdateTablet(ctx context.Context, tablet *topo.TabletInfo, existingVersion int64) (int64, error) { zkTabletPath := TabletPathForAlias(tablet.Alias) stat, err := zkts.zconn.Set(zkTabletPath, tablet.JSON(), int(existingVersion)) if err != nil { @@ -89,7 +90,7 @@ func (zkts *Server) UpdateTablet(tablet *topo.TabletInfo, existingVersion int64) } // UpdateTabletFields is part of the topo.Server interface -func (zkts *Server) UpdateTabletFields(tabletAlias topo.TabletAlias, update func(*topo.Tablet) error) error { +func (zkts *Server) UpdateTabletFields(ctx context.Context, tabletAlias topo.TabletAlias, update func(*topo.Tablet) error) error { // Store the last tablet value so we can log it if the change succeeds. var lastTablet *topo.Tablet @@ -127,10 +128,10 @@ func (zkts *Server) UpdateTabletFields(tabletAlias topo.TabletAlias, update func } // DeleteTablet is part of the topo.Server interface -func (zkts *Server) DeleteTablet(alias topo.TabletAlias) error { - // We need to find out the keyspace and shard names because those are required - // in the TabletChange event. - ti, tiErr := zkts.GetTablet(alias) +func (zkts *Server) DeleteTablet(ctx context.Context, alias topo.TabletAlias) error { + // We need to find out the keyspace and shard names because + // those are required in the TabletChange event. + ti, tiErr := zkts.GetTablet(ctx, alias) zkTabletPath := TabletPathForAlias(alias) err := zk.DeleteRecursive(zkts.zconn, zkTabletPath, -1) @@ -158,7 +159,7 @@ func (zkts *Server) DeleteTablet(alias topo.TabletAlias) error { } // GetTablet is part of the topo.Server interface -func (zkts *Server) GetTablet(alias topo.TabletAlias) (*topo.TabletInfo, error) { +func (zkts *Server) GetTablet(ctx context.Context, alias topo.TabletAlias) (*topo.TabletInfo, error) { zkTabletPath := TabletPathForAlias(alias) data, stat, err := zkts.zconn.Get(zkTabletPath) if err != nil { @@ -171,7 +172,7 @@ func (zkts *Server) GetTablet(alias topo.TabletAlias) (*topo.TabletInfo, error) } // GetTabletsByCell is part of the topo.Server interface -func (zkts *Server) GetTabletsByCell(cell string) ([]topo.TabletAlias, error) { +func (zkts *Server) GetTabletsByCell(ctx context.Context, cell string) ([]topo.TabletAlias, error) { zkTabletsPath := tabletDirectoryForCell(cell) children, _, err := zkts.zconn.Children(zkTabletsPath) if err != nil { diff --git a/go/vt/zktopo/testserver.go b/go/vt/zktopo/testserver.go index 14b3391b4b..9e837aab44 100644 --- a/go/vt/zktopo/testserver.go +++ b/go/vt/zktopo/testserver.go @@ -11,6 +11,8 @@ import ( "launchpad.net/gozk/zookeeper" ) +// TestServer is a proxy for a real implementation of topo.Server that +// provides hooks for testing. type TestServer struct { topo.Server localCells []string @@ -18,6 +20,7 @@ type TestServer struct { HookLockSrvShardForAction func() } +// NewTestServer returns a new TestServer (with the required paths created) func NewTestServer(t *testing.T, cells []string) *TestServer { zconn := fakezk.NewConn() @@ -33,7 +36,8 @@ func NewTestServer(t *testing.T, cells []string) *TestServer { return &TestServer{Server: NewServer(zconn), localCells: cells} } -func (s *TestServer) GetKnownCells() ([]string, error) { +// GetKnownCells is part of topo.Server interface +func (s *TestServer) GetKnownCells(ctx context.Context) ([]string, error) { return s.localCells, nil } @@ -46,16 +50,16 @@ func (s *TestServer) LockSrvShardForAction(ctx context.Context, cell, keyspace, return s.Server.LockSrvShardForAction(ctx, cell, keyspace, shard, contents) } -// TODO(sougou): Remove these two functions after they're -// migrated into topo.Server. // SaveVSchema has to be redefined here. // Otherwise the test type assertion fails. -func (s *TestServer) SaveVSchema(vschema string) error { - return s.Server.(topo.Schemafier).SaveVSchema(vschema) +// TODO(sougou): Remove these two functions after they're +// migrated into topo.Server. +func (s *TestServer) SaveVSchema(ctx context.Context, vschema string) error { + return s.Server.(topo.Schemafier).SaveVSchema(ctx, vschema) } // GetVSchema has to be redefined here. // Otherwise the test type assertion fails. -func (s *TestServer) GetVSchema() (string, error) { - return s.Server.(topo.Schemafier).GetVSchema() +func (s *TestServer) GetVSchema(ctx context.Context) (string, error) { + return s.Server.(topo.Schemafier).GetVSchema(ctx) } diff --git a/go/vt/zktopo/vschema.go b/go/vt/zktopo/vschema.go index 33cbe60446..037120bd85 100644 --- a/go/vt/zktopo/vschema.go +++ b/go/vt/zktopo/vschema.go @@ -6,6 +6,7 @@ package zktopo import ( "github.com/youtube/vitess/go/vt/vtgate/planbuilder" + "golang.org/x/net/context" // vindexes needs to be imported so that they register // themselves against vtgate/planbuilder. This will allow // us to sanity check the schema being uploaded. @@ -23,7 +24,7 @@ const ( ) // SaveVSchema saves the JSON vschema into the topo. -func (zkts *Server) SaveVSchema(vschema string) error { +func (zkts *Server) SaveVSchema(ctx context.Context, vschema string) error { _, err := planbuilder.NewSchema([]byte(vschema)) if err != nil { return err @@ -33,7 +34,7 @@ func (zkts *Server) SaveVSchema(vschema string) error { } // GetVSchema fetches the JSON vschema from the topo. -func (zkts *Server) GetVSchema() (string, error) { +func (zkts *Server) GetVSchema(ctx context.Context) (string, error) { data, _, err := zkts.zconn.Get(globalVSchemaPath) if err != nil { if zookeeper.IsError(err, zookeeper.ZNONODE) { diff --git a/go/vt/zktopo/zktopo_test.go b/go/vt/zktopo/zktopo_test.go index de4cb09d16..cf5d782c89 100644 --- a/go/vt/zktopo/zktopo_test.go +++ b/go/vt/zktopo/zktopo_test.go @@ -14,27 +14,31 @@ import ( ) func TestKeyspace(t *testing.T) { + ctx := context.Background() ts := NewTestServer(t, []string{"test"}) defer ts.Close() - test.CheckKeyspace(t, ts) + test.CheckKeyspace(ctx, t, ts) } func TestShard(t *testing.T) { + ctx := context.Background() ts := NewTestServer(t, []string{"test"}) defer ts.Close() - test.CheckShard(context.Background(), t, ts) + test.CheckShard(ctx, t, ts) } func TestTablet(t *testing.T) { + ctx := context.Background() ts := NewTestServer(t, []string{"test"}) defer ts.Close() - test.CheckTablet(context.Background(), t, ts) + test.CheckTablet(ctx, t, ts) } func TestShardReplication(t *testing.T) { + ctx := context.Background() ts := NewTestServer(t, []string{"test"}) defer ts.Close() - test.CheckShardReplication(t, ts) + test.CheckShardReplication(ctx, t, ts) } func TestServingGraph(t *testing.T) { @@ -51,43 +55,48 @@ func TestWatchEndPoints(t *testing.T) { } func TestKeyspaceLock(t *testing.T) { + ctx := context.Background() ts := NewTestServer(t, []string{"test"}) defer ts.Close() - test.CheckKeyspaceLock(t, ts) + test.CheckKeyspaceLock(ctx, t, ts) } func TestShardLock(t *testing.T) { + ctx := context.Background() if testing.Short() { t.Skip("skipping wait-based test in short mode.") } ts := NewTestServer(t, []string{"test"}) defer ts.Close() - test.CheckShardLock(t, ts) + test.CheckShardLock(ctx, t, ts) } func TestSrvShardLock(t *testing.T) { + ctx := context.Background() if testing.Short() { t.Skip("skipping wait-based test in short mode.") } ts := NewTestServer(t, []string{"test"}) defer ts.Close() - test.CheckSrvShardLock(t, ts) + test.CheckSrvShardLock(ctx, t, ts) } func TestVSchema(t *testing.T) { + ctx := context.Background() ts := NewTestServer(t, []string{"test"}) defer ts.Close() - test.CheckVSchema(t, ts) + test.CheckVSchema(ctx, t, ts) } // TestPurgeActions is a ZK specific unit test func TestPurgeActions(t *testing.T) { + ctx := context.Background() ts := NewTestServer(t, []string{"test"}) defer ts.Close() - if err := ts.CreateKeyspace("test_keyspace", &topo.Keyspace{}); err != nil { + if err := ts.CreateKeyspace(ctx, "test_keyspace", &topo.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace: %v", err) } @@ -115,10 +124,11 @@ func TestPurgeActions(t *testing.T) { // TestPruneActionLogs is a ZK specific unit test func TestPruneActionLogs(t *testing.T) { + ctx := context.Background() ts := NewTestServer(t, []string{"test"}) defer ts.Close() - if err := ts.CreateKeyspace("test_keyspace", &topo.Keyspace{}); err != nil { + if err := ts.CreateKeyspace(ctx, "test_keyspace", &topo.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace: %v", err) } From 4c1630fcf510c44a5065bb28ab14cff42dfcfd82 Mon Sep 17 00:00:00 2001 From: Michael Berlin Date: Tue, 26 May 2015 15:15:11 -0700 Subject: [PATCH 090/128] java/vtgate-client: Change flags type from int to long. While the wire protocol uses a long/int64 to store the MySQL flags value, the code for processing it did use an "int" (see Bsonify.java). This required downcasting from long to int. To avoid this, we change the processing code. --- .../main/java/com/youtube/vitess/vtgate/Field.java | 11 +++++------ .../vitess/vtgate/rpcclient/gorpc/Bsonify.java | 4 ++-- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/Field.java b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/Field.java index 3276fcc6ed..d7dd8f6ecb 100644 --- a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/Field.java +++ b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/Field.java @@ -3,7 +3,6 @@ package com.youtube.vitess.vtgate; import com.google.common.annotations.VisibleForTesting; import com.google.common.primitives.UnsignedLong; -import com.youtube.vitess.vtgate.Field.Flag; import com.youtube.vitess.vtgate.Row.Cell; import org.apache.commons.lang.CharEncoding; @@ -40,9 +39,9 @@ public class Field { VT_ON_UPDATE_NOW_FLAG(8192), /* Field is set to NOW on UPDATE */ VT_NUM_FLAG(32768); /* Field is num (for clients) */ - public int mysqlFlag; + public long mysqlFlag; - Flag(int mysqlFlag) { + Flag(long mysqlFlag) { this.mysqlFlag = mysqlFlag; } } @@ -100,9 +99,9 @@ public class Field { private String name; private FieldType type; - private int mysqlFlags; + private long mysqlFlags; - public static Field newFieldFromMysql(String name, int mysqlTypeId, int mysqlFlags) { + public static Field newFieldFromMysql(String name, int mysqlTypeId, long mysqlFlags) { for (FieldType ft : FieldType.values()) { if (ft.mysqlType == mysqlTypeId) { return new Field(name, ft, mysqlFlags); @@ -117,7 +116,7 @@ public class Field { return new Field("dummyField", fieldType, flag.mysqlFlag); } - private Field(String name, FieldType type, int mysqlFlags) { + private Field(String name, FieldType type, long mysqlFlags) { this.name = name; this.type = type; this.mysqlFlags = mysqlFlags; diff --git a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/rpcclient/gorpc/Bsonify.java b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/rpcclient/gorpc/Bsonify.java index 966756480f..ecb36899e6 100644 --- a/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/rpcclient/gorpc/Bsonify.java +++ b/java/vtgate-client/src/main/java/com/youtube/vitess/vtgate/rpcclient/gorpc/Bsonify.java @@ -156,10 +156,10 @@ public class Bsonify { BSONObject fieldBson = (BSONObject) field; String fieldName = new String((byte[]) fieldBson.get("Name")); int mysqlType = Ints.checkedCast((Long) fieldBson.get("Type")); - int mysqlFlags = Flag.VT_ZEROVALUE_FLAG.mysqlFlag; + long mysqlFlags = Flag.VT_ZEROVALUE_FLAG.mysqlFlag; Object flags = fieldBson.get("Flags"); if (flags != null) { - mysqlFlags = Ints.checkedCast((Long) flags); + mysqlFlags = (Long) flags; } fieldList.add(Field.newFieldFromMysql(fieldName, mysqlType, mysqlFlags)); } From 1e87496ad44afee67c946c1ce1c7e7a4df5c8d5d Mon Sep 17 00:00:00 2001 From: Joshua Thompson Date: Tue, 26 May 2015 15:50:21 -0700 Subject: [PATCH 091/128] replace gcloud alpha container kubectl with kubectl --- examples/kubernetes/README.md | 2 +- examples/kubernetes/cluster-up.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/kubernetes/README.md b/examples/kubernetes/README.md index 125b017738..afe4e45e85 100644 --- a/examples/kubernetes/README.md +++ b/examples/kubernetes/README.md @@ -30,7 +30,7 @@ If you're running in Container Engine, set the `KUBECTL` environment variable to point to the `gcloud` command: ``` -$ export KUBECTL='gcloud alpha container kubectl' +$ export KUBECTL='kubectl' ``` If you're running Kubernetes manually, set the `KUBECTL` environment variable diff --git a/examples/kubernetes/cluster-up.sh b/examples/kubernetes/cluster-up.sh index db79832a89..725ff00fef 100755 --- a/examples/kubernetes/cluster-up.sh +++ b/examples/kubernetes/cluster-up.sh @@ -89,7 +89,7 @@ if [ -z "$GOPATH" ]; then exit -1 fi -export KUBECTL='gcloud alpha container kubectl' +export KUBECTL='kubectl' go get github.com/youtube/vitess/go/cmd/vtctlclient gcloud config set compute/zone $GKE_ZONE project_id=`gcloud config list project | sed -n 2p | cut -d " " -f 3` From 9f02107679c6e5dbecf11ccbd84bc0891c8e0b30 Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Tue, 26 May 2015 16:23:13 -0700 Subject: [PATCH 092/128] Check error on Close() and Flush() in Backup. We are probably doing a bad job of checking these errors in a lot of places, but it's particularly important to get it right for backups. We don't worry about Close() on the Reader side. When using an AllErrorRecorder, we don't have to worry about hiding the Write error with a Close error. But we do need to make sure not to call Close as part of saving the arguments of a defer statement. --- go/vt/mysqlctl/backup.go | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index 649b560c22..5db9a31934 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -297,8 +297,7 @@ func backup(mysqld MysqlDaemon, logger logutil.Logger, bh backupstorage.BackupHa return nil } -func backupFiles(mysqld MysqlDaemon, logger logutil.Logger, bh backupstorage.BackupHandle, fes []FileEntry, replicationPosition proto.ReplicationPosition, backupConcurrency int) error { - +func backupFiles(mysqld MysqlDaemon, logger logutil.Logger, bh backupstorage.BackupHandle, fes []FileEntry, replicationPosition proto.ReplicationPosition, backupConcurrency int) (err error) { sema := sync2.NewSemaphore(backupConcurrency, 0) rec := concurrency.AllErrorRecorder{} wg := sync.WaitGroup{} @@ -330,7 +329,7 @@ func backupFiles(mysqld MysqlDaemon, logger logutil.Logger, bh backupstorage.Bac rec.RecordError(fmt.Errorf("cannot add file: %v", err)) return } - defer wc.Close() + defer func() { rec.RecordError(wc.Close()) }() dst := bufio.NewWriterSize(wc, 2*1024*1024) // create the hasher and the tee on top @@ -358,7 +357,7 @@ func backupFiles(mysqld MysqlDaemon, logger logutil.Logger, bh backupstorage.Bac } // flush the buffer to finish writing, save the hash - dst.Flush() + rec.RecordError(dst.Flush()) fes[i].Hash = hasher.HashString() }(i, fe) } @@ -373,7 +372,11 @@ func backupFiles(mysqld MysqlDaemon, logger logutil.Logger, bh backupstorage.Bac if err != nil { return fmt.Errorf("cannot add %v to backup: %v", backupManifest, err) } - defer wc.Close() + defer func() { + if closeErr := wc.Close(); err == nil { + err = closeErr + } + }() // JSON-encode and write the MANIFEST bm := &BackupManifest{ @@ -450,7 +453,7 @@ func restoreFiles(cnf *Mycnf, bh backupstorage.BackupHandle, fes []FileEntry, re rec.RecordError(err) return } - defer dstFile.Close() + defer func() { rec.RecordError(dstFile.Close()) }() // create a buffering output dst := bufio.NewWriterSize(dstFile, 2*1024*1024) @@ -468,7 +471,7 @@ func restoreFiles(cnf *Mycnf, bh backupstorage.BackupHandle, fes []FileEntry, re rec.RecordError(err) return } - defer gz.Close() + defer func() { rec.RecordError(gz.Close()) }() // copy the data. Will also write to the hasher if _, err = io.Copy(dst, gz); err != nil { @@ -484,7 +487,7 @@ func restoreFiles(cnf *Mycnf, bh backupstorage.BackupHandle, fes []FileEntry, re } // flush the buffer - dst.Flush() + rec.RecordError(dst.Flush()) }(i, fe) } wg.Wait() From c13bf8dd209da0729af2c54bcbac72d4a51ce2ee Mon Sep 17 00:00:00 2001 From: Joshua Thompson Date: Tue, 26 May 2015 16:50:59 -0700 Subject: [PATCH 093/128] Tweak README.md wording --- examples/kubernetes/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/kubernetes/README.md b/examples/kubernetes/README.md index afe4e45e85..4c79fc6125 100644 --- a/examples/kubernetes/README.md +++ b/examples/kubernetes/README.md @@ -27,7 +27,8 @@ $ go get github.com/youtube/vitess/go/cmd/vtctlclient ### Set the path to kubectl If you're running in Container Engine, set the `KUBECTL` environment variable -to point to the `gcloud` command: +to point to the `kubectl` command provided by the Google Cloud SDK (if you've +already added gcloud to your PATH, you likely have kubectl): ``` $ export KUBECTL='kubectl' From 742db23f23a001cda91737f37b0f6a59c82d2f36 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Thu, 21 May 2015 22:31:52 -0700 Subject: [PATCH 094/128] add LocalController to apply schema changes in local file system. 1. A LocalController monitors a given directory, it first finds a schema change sql in the directory and then applies to its keyspace. 2. If a schema change is applied successfully, it moves the original sql file to a "complete" dir and coressponding execution log to a "log" dir. If it fails, the original sql file will be moved to a "error" dir. 3. Add a test case to test/schema.py so it creates a sql file in the schema change dir and this change will be picked up by vtctld. --- go/cmd/vtctld/vtctld.go | 18 +- go/vt/schemamanager/local_controller.go | 220 +++++++++++++++++++ go/vt/schemamanager/local_controller_test.go | 204 +++++++++++++++++ go/vt/schemamanager/schemamanager.go | 24 +- go/vt/schemamanager/tablet_executor.go | 3 + test/schema.py | 25 +++ test/utils.py | 4 + 7 files changed, 485 insertions(+), 13 deletions(-) create mode 100644 go/vt/schemamanager/local_controller.go create mode 100644 go/vt/schemamanager/local_controller_test.go diff --git a/go/cmd/vtctld/vtctld.go b/go/cmd/vtctld/vtctld.go index b57d96ff31..196d7aff66 100644 --- a/go/cmd/vtctld/vtctld.go +++ b/go/cmd/vtctld/vtctld.go @@ -21,10 +21,11 @@ import ( ) var ( - templateDir = flag.String("templates", "", "directory containing templates") - debug = flag.Bool("debug", false, "recompile templates for every request") - schemaChangeDir = flag.String("schema-change-dir", "", "directory contains schema changes for all keyspaces. Each keyspace has its own directory and schema changes are expected to live in '$KEYSPACE/input' dir. e.g. test_keyspace/input/*sql, each sql file represents a schema change") - schemaChangeController = flag.String("schema-change-controller", "", "schema change controller is responsible for finding schema changes and responsing schema change events") + templateDir = flag.String("templates", "", "directory containing templates") + debug = flag.Bool("debug", false, "recompile templates for every request") + schemaChangeDir = flag.String("schema-change-dir", "", "directory contains schema changes for all keyspaces. Each keyspace has its own directory and schema changes are expected to live in '$KEYSPACE/input' dir. e.g. test_keyspace/input/*sql, each sql file represents a schema change") + schemaChangeController = flag.String("schema-change-controller", "", "schema change controller is responsible for finding schema changes and responsing schema change events") + schemaChangeCheckInterval = flag.Int("schema-change-check-interval", 60, "this value decides how often we check schema change dir, in seconds") ) func init() { @@ -504,7 +505,11 @@ func main() { ) }) if *schemaChangeDir != "" { - timer := timer.NewTimer(1 * time.Minute) + interval := 60 + if *schemaChangeCheckInterval > 0 { + interval = *schemaChangeCheckInterval + } + timer := timer.NewTimer(time.Duration(interval) * time.Second) controllerFactory, err := schemamanager.GetControllerFactory(*schemaChangeController) if err != nil { @@ -520,11 +525,12 @@ func main() { return } - schemamanager.Run( + err = schemamanager.Run( controller, schemamanager.NewTabletExecutor( tmclient.NewTabletManagerClient(), ts), ) + log.Errorf("Schema change failed, error: %v", err) }) servenv.OnClose(func() { timer.Stop() }) } diff --git a/go/vt/schemamanager/local_controller.go b/go/vt/schemamanager/local_controller.go new file mode 100644 index 0000000000..b7183a6089 --- /dev/null +++ b/go/vt/schemamanager/local_controller.go @@ -0,0 +1,220 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemamanager + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "time" + + log "github.com/golang/glog" +) + +// LocalController listens to the specified schema change dir and applies schema changes. +// schema change dir lay out +// | +// |----keyspace_01 +// |----input +// |---- create_test_table.sql +// |---- alter_test_table_02.sql +// |---- ... +// |----complete // contains completed schema changes in yyyy/MM/dd +// |----2015 +// |----01 +// |----01 +// |--- create_table_table_02.sql +// |----log // contains detailed execution information about schema changes +// |----2015 +// |----01 +// |----01 +// |--- create_table_table_02.sql +// |----error // contains failed schema changes +// |----2015 +// |----01 +// |----01 +// |--- create_table_table_03.sql +// Schema Change Files: ${keyspace}/input/*.sql +// Error Files: ${keysapce}/error/${YYYY}/${MM}/${DD}/*.sql +// Log Files: ${keysapce}/log/${YYYY}/${MM}/${DD}/*.sql +// Complete Files: ${keysapce}/compelte/${YYYY}/${MM}/${DD}/*.sql +type LocalController struct { + schemaChangeDir string + keyspace string + sqlPath string + sqlFilename string + errorDir string + logDir string + completeDir string +} + +// NewLocalController creates a new LocalController instance. +func NewLocalController(schemaChangeDir string) *LocalController { + return &LocalController{ + schemaChangeDir: schemaChangeDir, + } +} + +// Open goes through the schema change dir and find a keyspace with a pending +// schema change. +func (controller *LocalController) Open() error { + // find all keyspace directories. + fileInfos, err := ioutil.ReadDir(controller.schemaChangeDir) + if err != nil { + return err + } + for _, fileinfo := range fileInfos { + if !fileinfo.IsDir() { + continue + } + dirpath := path.Join(controller.schemaChangeDir, fileinfo.Name()) + schemaChanges, err := ioutil.ReadDir(path.Join(dirpath, "input")) + if err != nil { + log.Warningf("there is no input dir in %s", dirpath) + continue + } + // found a schema change + if len(schemaChanges) > 0 { + controller.keyspace = fileinfo.Name() + controller.sqlFilename = schemaChanges[0].Name() + controller.sqlPath = path.Join(dirpath, "input", schemaChanges[0].Name()) + + currentTime := time.Now() + datePart := fmt.Sprintf( + "%d/%d/%d", + currentTime.Year(), + currentTime.Month(), + currentTime.Day()) + + controller.errorDir = path.Join(dirpath, "error", datePart) + controller.completeDir = path.Join(dirpath, "complete", datePart) + controller.logDir = path.Join(dirpath, "log", datePart) + // the remaining schema changes will be picked by the next runs + break + } + } + return nil +} + +// Read reads schema changes. +func (controller *LocalController) Read() ([]string, error) { + if controller.keyspace == "" || controller.sqlPath == "" { + return []string{}, nil + } + data, err := ioutil.ReadFile(controller.sqlPath) + if err != nil { + return nil, err + } + return strings.Split(string(data), ";"), nil +} + +// GetKeyspace returns current keyspace that is ready for applying schema change. +func (controller *LocalController) GetKeyspace() string { + return controller.keyspace +} + +// Close reset keyspace, sqlPath, errorDir, logDir and completeDir. +func (controller *LocalController) Close() { + controller.keyspace = "" + controller.sqlPath = "" + controller.sqlFilename = "" + controller.errorDir = "" + controller.logDir = "" + controller.completeDir = "" +} + +// OnReadSuccess is no-op +func (controller *LocalController) OnReadSuccess() error { + return nil +} + +// OnReadFail is no-op +func (controller *LocalController) OnReadFail(err error) error { + log.Errorf("failed to read file: %s, error: %v", controller.sqlPath, err) + return nil +} + +// OnValidationSuccess is no-op +func (controller *LocalController) OnValidationSuccess() error { + return nil +} + +// OnValidationFail is no-op +func (controller *LocalController) OnValidationFail(err error) error { + return controller.moveToErrorDir() +} + +// OnExecutorComplete is no-op +func (controller *LocalController) OnExecutorComplete(result *ExecuteResult) error { + if len(result.FailedShards) > 0 || result.ExecutorErr != "" { + return controller.moveToErrorDir() + } + if err := os.MkdirAll(controller.completeDir, os.ModePerm); err != nil { + return err + } + if err := os.MkdirAll(controller.logDir, os.ModePerm); err != nil { + return err + } + + if err := controller.writeToLogDir(result); err != nil { + return err + } + + return os.Rename( + controller.sqlPath, + path.Join(controller.completeDir, controller.sqlFilename)) +} + +func (controller *LocalController) moveToErrorDir() error { + if err := os.MkdirAll(controller.errorDir, os.ModePerm); err != nil { + return err + } + return os.Rename( + controller.sqlPath, + path.Join(controller.errorDir, controller.sqlFilename)) +} + +func (controller *LocalController) writeToLogDir(result *ExecuteResult) error { + logFile, err := os.Create(path.Join(controller.logDir, controller.sqlFilename)) + if err != nil { + return err + } + defer logFile.Close() + + logFile.WriteString(fmt.Sprintf("-- new file: %s\n", controller.sqlPath)) + for _, sql := range result.Sqls { + logFile.WriteString(sql) + logFile.WriteString(";\n") + } + rowsReturned := uint64(0) + rowsAffected := uint64(0) + for _, queryResult := range result.SuccessShards { + rowsReturned += uint64(len(queryResult.Result.Rows)) + rowsAffected += queryResult.Result.RowsAffected + } + logFile.WriteString(fmt.Sprintf("-- Rows returned: %d\n", rowsReturned)) + logFile.WriteString(fmt.Sprintf("-- Rows affected: %d\n", rowsAffected)) + logFile.WriteString("-- \n") + logFile.WriteString(fmt.Sprintf("-- ran in %fs\n", result.TotalTimeSpent.Seconds())) + logFile.WriteString("-- Execution succeeded\n") + return nil +} + +var _ Controller = (*LocalController)(nil) + +func init() { + RegisterControllerFactory( + "local", + func(params map[string]string) (Controller, error) { + schemaChangeDir, ok := params[SchemaChangeDirName] + if !ok { + return nil, fmt.Errorf("unable to construct a LocalController instance because param: %s is missing in params: %v", SchemaChangeDirName, params) + } + return NewLocalController(schemaChangeDir), nil + }, + ) +} diff --git a/go/vt/schemamanager/local_controller_test.go b/go/vt/schemamanager/local_controller_test.go new file mode 100644 index 0000000000..285204841c --- /dev/null +++ b/go/vt/schemamanager/local_controller_test.go @@ -0,0 +1,204 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemamanager + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "reflect" + "strings" + "testing" + + mproto "github.com/youtube/vitess/go/mysql/proto" +) + +func TestLocalControllerNoSchemaChanges(t *testing.T) { + schemaChangeDir, err := ioutil.TempDir("", "localcontroller-test") + defer os.RemoveAll(schemaChangeDir) + if err != nil { + t.Fatalf("failed to create temp schema change dir, error: %v", err) + } + controller := NewLocalController(schemaChangeDir) + if err := controller.Open(); err != nil { + t.Fatalf("Open should succeed, but got error: %v", err) + } + defer controller.Close() + data, err := controller.Read() + if err != nil { + t.Fatalf("Read should succeed, but got error: %v", err) + } + if len(data) != 0 { + t.Fatalf("there is no schema change, Read should return empty data") + } +} + +func TestLocalControllerOpen(t *testing.T) { + controller := NewLocalController("") + + if err := controller.Open(); err == nil { + t.Fatalf("Open should fail, no such dir") + } + + schemaChangeDir, err := ioutil.TempDir("", "localcontroller-test") + defer os.RemoveAll(schemaChangeDir) + + // create a file under schema change dir + _, err = os.Create(path.Join(schemaChangeDir, "create_test_table.sql")) + if err != nil { + t.Fatalf("failed to create sql file, error: %v", err) + } + + controller = NewLocalController(schemaChangeDir) + if err := controller.Open(); err != nil { + t.Fatalf("Open should succeed") + } + data, err := controller.Read() + if err != nil { + t.Fatalf("Read should succeed, but got error: %v", err) + } + if len(data) != 0 { + t.Fatalf("there is no schema change, Read should return empty data") + } + controller.Close() + + testKeyspaceDir := path.Join(schemaChangeDir, "test_keyspace") + if err := os.MkdirAll(testKeyspaceDir, os.ModePerm); err != nil { + t.Fatalf("failed to create test_keyspace dir, error: %v", err) + } + + controller = NewLocalController(schemaChangeDir) + if err := controller.Open(); err != nil { + t.Fatalf("Open should succeed") + } + data, err = controller.Read() + if err != nil { + t.Fatalf("Read should succeed, but got error: %v", err) + } + if len(data) != 0 { + t.Fatalf("there is no schema change, Read should return empty data") + } + controller.Close() +} + +func TestLocalControllerSchemaChange(t *testing.T) { + schemaChangeDir, err := ioutil.TempDir("", "localcontroller-test") + if err != nil { + t.Fatalf("failed to create temp schema change dir, error: %v", err) + } + defer os.RemoveAll(schemaChangeDir) + + testKeyspaceInputDir := path.Join(schemaChangeDir, "test_keyspace/input") + if err := os.MkdirAll(testKeyspaceInputDir, os.ModePerm); err != nil { + t.Fatalf("failed to create test_keyspace dir, error: %v", err) + } + + file, err := os.Create(path.Join(testKeyspaceInputDir, "create_test_table.sql")) + if err != nil { + t.Fatalf("failed to create sql file, error: %v", err) + } + + sqls := []string{ + "create table test_table_01 (id int)", + "create table test_table_02 (id string)", + } + + file.WriteString(strings.Join(sqls, ";")) + file.Close() + + controller := NewLocalController(schemaChangeDir) + if err := controller.Open(); err != nil { + t.Fatalf("Open should succeed, but got error: %v", err) + } + + defer controller.Close() + + data, err := controller.Read() + if err != nil { + t.Fatalf("Read should succeed, but got error: %v", err) + } + + if !reflect.DeepEqual(sqls, data) { + t.Fatalf("expect to get sqls: %v, but got: %v", sqls, data) + } + + if controller.GetKeyspace() != "test_keyspace" { + t.Fatalf("expect to get keyspace: 'test_keyspace', but got: '%s'", + controller.GetKeyspace()) + } + + // test various callbacks + if err := controller.OnReadSuccess(); err != nil { + t.Fatalf("OnReadSuccess should succeed, but got error: %v", err) + } + + if err := controller.OnReadFail(fmt.Errorf("read fail")); err != nil { + t.Fatalf("OnReadFail should succeed, but got error: %v", err) + } + + errorPath := path.Join(controller.errorDir, controller.sqlFilename) + + if err := controller.OnValidationSuccess(); err != nil { + t.Fatalf("OnReadSuccess should succeed, but got error: %v", err) + } + + // move sql file from error dir to input dir for OnValidationFail test + os.Rename(errorPath, controller.sqlPath) + + if err := controller.OnValidationFail(fmt.Errorf("validation fail")); err != nil { + t.Fatalf("OnValidationFail should succeed, but got error: %v", err) + } + + if _, err := os.Stat(errorPath); os.IsNotExist(err) { + t.Fatalf("sql file should be moved to error dir, error: %v", err) + } + + // move sql file from error dir to input dir for OnExecutorComplete test + os.Rename(errorPath, controller.sqlPath) + + result := &ExecuteResult{ + Sqls: []string{"create table test_table (id int)"}, + SuccessShards: []ShardResult{ + ShardResult{ + Shard: "0", + Result: &mproto.QueryResult{}, + }, + }, + } + logPath := path.Join(controller.logDir, controller.sqlFilename) + completePath := path.Join(controller.completeDir, controller.sqlFilename) + if err := controller.OnExecutorComplete(result); err != nil { + t.Fatalf("OnExecutorComplete should succeed, but got error: %v", err) + } + if _, err := os.Stat(completePath); os.IsNotExist(err) { + t.Fatalf("sql file should be moved to complete dir, error: %v", err) + } + + if _, err := os.Stat(logPath); os.IsNotExist(err) { + t.Fatalf("sql file should be moved to log dir, error: %v", err) + } + + // move sql file from error dir to input dir for OnExecutorComplete test + os.Rename(completePath, controller.sqlPath) + + result = &ExecuteResult{ + Sqls: []string{"create table test_table (id int)"}, + FailedShards: []ShardWithError{ + ShardWithError{ + Shard: "0", + Err: "execute error", + }, + }, + } + + if err := controller.OnExecutorComplete(result); err != nil { + t.Fatalf("OnExecutorComplete should succeed, but got error: %v", err) + } + + if _, err := os.Stat(errorPath); os.IsNotExist(err) { + t.Fatalf("sql file should be moved to error dir, error: %v", err) + } +} diff --git a/go/vt/schemamanager/schemamanager.go b/go/vt/schemamanager/schemamanager.go index d039ccf48b..cd54f55aa1 100644 --- a/go/vt/schemamanager/schemamanager.go +++ b/go/vt/schemamanager/schemamanager.go @@ -7,12 +7,14 @@ package schemamanager import ( "encoding/json" "fmt" + "time" log "github.com/golang/glog" mproto "github.com/youtube/vitess/go/mysql/proto" ) const ( + // SchemaChangeDirName is the key name in the ControllerFactory params. SchemaChangeDirName = "schema_change_dir" ) @@ -48,11 +50,12 @@ type Executor interface { // ExecuteResult contains information about schema management state type ExecuteResult struct { - FailedShards []ShardWithError - SuccessShards []ShardResult - CurSqlIndex int - Sqls []string - ExecutorErr string + FailedShards []ShardWithError + SuccessShards []ShardResult + CurSqlIndex int + Sqls []string + ExecutorErr string + TotalTimeSpent time.Duration } // ShardWithError contains information why a shard failed to execute given sql @@ -93,9 +96,16 @@ func Run(controller Controller, executor Executor) error { controller.OnValidationFail(err) return err } - controller.OnValidationSuccess() + + if err := controller.OnValidationSuccess(); err != nil { + return err + } + result := executor.Execute(sqls) - controller.OnExecutorComplete(result) + + if err := controller.OnExecutorComplete(result); err != nil { + return err + } if result.ExecutorErr != "" || len(result.FailedShards) > 0 { out, _ := json.MarshalIndent(result, "", " ") return fmt.Errorf("Schema change failed, ExecuteResult: %v\n", string(out)) diff --git a/go/vt/schemamanager/tablet_executor.go b/go/vt/schemamanager/tablet_executor.go index 6369c14c50..6ce3271eb3 100644 --- a/go/vt/schemamanager/tablet_executor.go +++ b/go/vt/schemamanager/tablet_executor.go @@ -7,6 +7,7 @@ package schemamanager import ( "fmt" "sync" + "time" log "github.com/golang/glog" "github.com/youtube/vitess/go/vt/mysqlctl/proto" @@ -155,6 +156,8 @@ func (exec *TabletExecutor) Execute(sqls []string) *ExecuteResult { execResult.ExecutorErr = "executor is closed" return &execResult } + startTime := time.Now() + defer func() { execResult.TotalTimeSpent = time.Since(startTime) }() // make sure every schema change introduces a table definition change if err := exec.preflightSchemaChanges(sqls); err != nil { diff --git a/test/schema.py b/test/schema.py index daa37c5970..0554992232 100755 --- a/test/schema.py +++ b/test/schema.py @@ -2,6 +2,8 @@ import logging import unittest +import os +import time import environment import utils @@ -89,6 +91,9 @@ class TestSchema(unittest.TestCase): # run checks now before we start the tablets utils.validate_topology() + + utils.Vtctld().start() + # create databases, start the tablets for t in [shard_0_master, shard_0_replica1, shard_0_replica2, shard_0_rdonly, shard_0_backup, shard_1_master, shard_1_replica1, @@ -212,5 +217,25 @@ class TestSchema(unittest.TestCase): self.assertEqual(shard_0_schema, shard_1_schema) self.assertEqual(shard_0_schema, shard_2_schema) + # test schema changes + os.makedirs(os.path.join(utils.vtctld.schema_change_dir, test_keyspace)) + input_path = os.path.join(utils.vtctld.schema_change_dir, test_keyspace, "input") + os.makedirs(input_path) + sql_path = os.path.join(input_path, "create_test_table_x.sql") + with open(sql_path, 'w') as handler: + handler.write("create table test_table_x (id int)") + + timeout = 10 + # wait until this sql file being consumed by autoschema + while os.path.isfile(sql_path): + timeout = utils.wait_step('waiting for vtctld to pick up schema changes', + timeout, + sleep_time=0.2) + + # check number of tables + self._check_tables(shard_0_master, 5) + self._check_tables(shard_1_master, 5) + self._check_tables(shard_2_master, 5) + if __name__ == '__main__': utils.main() diff --git a/test/utils.py b/test/utils.py index dc8b5b82f9..bf30ef9fcb 100644 --- a/test/utils.py +++ b/test/utils.py @@ -764,6 +764,7 @@ class Vtctld(object): def __init__(self): self.port = environment.reserve_ports(1) + self.schema_change_dir = os.path.join(environment.tmproot, 'schema_change_test') if protocols_flavor().vtctl_client_protocol() == "grpc": self.grpc_port = environment.reserve_ports(1) @@ -786,6 +787,9 @@ class Vtctld(object): '-templates', environment.vttop + '/go/cmd/vtctld/templates', '-log_dir', environment.vtlogroot, '-port', str(self.port), + '-schema-change-dir', self.schema_change_dir, + '-schema-change-controller', 'local', + '-schema-change-check-interval', '1', ] + \ environment.topo_server().flags() + \ protocols_flavor().tablet_manager_protocol_flags() From 29707d37230a05b3c79aa17f59cc95af0f7486ac Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Tue, 26 May 2015 15:36:42 -0700 Subject: [PATCH 095/128] add schema change user flag in vtctld --- go/cmd/vtctld/vtctld.go | 2 ++ go/vt/schemamanager/schemamanager.go | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/go/cmd/vtctld/vtctld.go b/go/cmd/vtctld/vtctld.go index 196d7aff66..3447c0f9b4 100644 --- a/go/cmd/vtctld/vtctld.go +++ b/go/cmd/vtctld/vtctld.go @@ -26,6 +26,7 @@ var ( schemaChangeDir = flag.String("schema-change-dir", "", "directory contains schema changes for all keyspaces. Each keyspace has its own directory and schema changes are expected to live in '$KEYSPACE/input' dir. e.g. test_keyspace/input/*sql, each sql file represents a schema change") schemaChangeController = flag.String("schema-change-controller", "", "schema change controller is responsible for finding schema changes and responsing schema change events") schemaChangeCheckInterval = flag.Int("schema-change-check-interval", 60, "this value decides how often we check schema change dir, in seconds") + schemaChangeUser = flag.String("schema-change-user", "", "The user who submits this schema change.") ) func init() { @@ -519,6 +520,7 @@ func main() { timer.Start(func() { controller, err := controllerFactory(map[string]string{ schemamanager.SchemaChangeDirName: *schemaChangeDir, + schemamanager.SchemaChangeUser: *schemaChangeUser, }) if err != nil { log.Errorf("failed to get controller, error: %v", err) diff --git a/go/vt/schemamanager/schemamanager.go b/go/vt/schemamanager/schemamanager.go index cd54f55aa1..ff93ee423c 100644 --- a/go/vt/schemamanager/schemamanager.go +++ b/go/vt/schemamanager/schemamanager.go @@ -15,7 +15,11 @@ import ( const ( // SchemaChangeDirName is the key name in the ControllerFactory params. + // It specifies the schema change directory. SchemaChangeDirName = "schema_change_dir" + // SchemaChangeUser is the key name in the ControllerFactory params. + // It specifies the user who submits this schema change. + SchemaChangeUser = "schema_change_user" ) // ControllerFactory takes a set params and construct a Controller instance. From 4234d8dc5031d9e087d8d317d0e6e8ad5d4bf109 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Tue, 26 May 2015 16:00:23 -0700 Subject: [PATCH 096/128] rename schemamanager.Controller.GetKeyspace to Keyspace --- go/vt/schemamanager/local_controller.go | 4 ++-- go/vt/schemamanager/local_controller_test.go | 4 ++-- go/vt/schemamanager/plain_controller.go | 4 ++-- go/vt/schemamanager/plain_controller_test.go | 2 +- go/vt/schemamanager/schemamanager.go | 4 ++-- go/vt/schemamanager/schemamanager_test.go | 2 +- go/vt/schemamanager/ui_controller.go | 4 ++-- go/vt/schemamanager/ui_controller_test.go | 2 +- 8 files changed, 13 insertions(+), 13 deletions(-) diff --git a/go/vt/schemamanager/local_controller.go b/go/vt/schemamanager/local_controller.go index b7183a6089..1306e4d8f2 100644 --- a/go/vt/schemamanager/local_controller.go +++ b/go/vt/schemamanager/local_controller.go @@ -112,8 +112,8 @@ func (controller *LocalController) Read() ([]string, error) { return strings.Split(string(data), ";"), nil } -// GetKeyspace returns current keyspace that is ready for applying schema change. -func (controller *LocalController) GetKeyspace() string { +// Keyspace returns current keyspace that is ready for applying schema change. +func (controller *LocalController) Keyspace() string { return controller.keyspace } diff --git a/go/vt/schemamanager/local_controller_test.go b/go/vt/schemamanager/local_controller_test.go index 285204841c..9df601ec41 100644 --- a/go/vt/schemamanager/local_controller_test.go +++ b/go/vt/schemamanager/local_controller_test.go @@ -125,9 +125,9 @@ func TestLocalControllerSchemaChange(t *testing.T) { t.Fatalf("expect to get sqls: %v, but got: %v", sqls, data) } - if controller.GetKeyspace() != "test_keyspace" { + if controller.Keyspace() != "test_keyspace" { t.Fatalf("expect to get keyspace: 'test_keyspace', but got: '%s'", - controller.GetKeyspace()) + controller.Keyspace()) } // test various callbacks diff --git a/go/vt/schemamanager/plain_controller.go b/go/vt/schemamanager/plain_controller.go index 5b97b132ed..40c7185487 100644 --- a/go/vt/schemamanager/plain_controller.go +++ b/go/vt/schemamanager/plain_controller.go @@ -45,8 +45,8 @@ func (controller *PlainController) Read() ([]string, error) { func (controller *PlainController) Close() { } -// GetKeyspace returns keyspace to apply schema. -func (controller *PlainController) GetKeyspace() string { +// Keyspace returns keyspace to apply schema. +func (controller *PlainController) Keyspace() string { return controller.keyspace } diff --git a/go/vt/schemamanager/plain_controller_test.go b/go/vt/schemamanager/plain_controller_test.go index 7816d23ad1..9a3d9554d1 100644 --- a/go/vt/schemamanager/plain_controller_test.go +++ b/go/vt/schemamanager/plain_controller_test.go @@ -17,7 +17,7 @@ func TestPlainController(t *testing.T) { t.Fatalf("controller.Open should succeed, but got error: %v", err) } - keyspace := controller.GetKeyspace() + keyspace := controller.Keyspace() if keyspace != "test_keyspace" { t.Fatalf("expect to get keyspace: 'test_keyspace', but got keyspace: '%s'", keyspace) } diff --git a/go/vt/schemamanager/schemamanager.go b/go/vt/schemamanager/schemamanager.go index ff93ee423c..30ac7a14e6 100644 --- a/go/vt/schemamanager/schemamanager.go +++ b/go/vt/schemamanager/schemamanager.go @@ -36,7 +36,7 @@ type Controller interface { Open() error Read() (sqls []string, err error) Close() - GetKeyspace() string + Keyspace() string OnReadSuccess() error OnReadFail(err error) error OnValidationSuccess() error @@ -89,7 +89,7 @@ func Run(controller Controller, executor Executor) error { } controller.OnReadSuccess() - keyspace := controller.GetKeyspace() + keyspace := controller.Keyspace() if err := executor.Open(keyspace); err != nil { log.Errorf("failed to open executor: %v", err) return err diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index aeb593b9ed..3bd74ef510 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -467,7 +467,7 @@ func (controller *fakeController) Read() ([]string, error) { func (controller *fakeController) Close() { } -func (controller *fakeController) GetKeyspace() string { +func (controller *fakeController) Keyspace() string { return controller.keyspace } diff --git a/go/vt/schemamanager/ui_controller.go b/go/vt/schemamanager/ui_controller.go index 651190afc3..d728039160 100644 --- a/go/vt/schemamanager/ui_controller.go +++ b/go/vt/schemamanager/ui_controller.go @@ -52,8 +52,8 @@ func (controller *UIController) Read() ([]string, error) { func (controller *UIController) Close() { } -// GetKeyspace returns keyspace to apply schema. -func (controller *UIController) GetKeyspace() string { +// Keyspace returns keyspace to apply schema. +func (controller *UIController) Keyspace() string { return controller.keyspace } diff --git a/go/vt/schemamanager/ui_controller_test.go b/go/vt/schemamanager/ui_controller_test.go index 9be646ed7d..475a8645d2 100644 --- a/go/vt/schemamanager/ui_controller_test.go +++ b/go/vt/schemamanager/ui_controller_test.go @@ -20,7 +20,7 @@ func TestUIController(t *testing.T) { t.Fatalf("controller.Open should succeed, but got error: %v", err) } - keyspace := controller.GetKeyspace() + keyspace := controller.Keyspace() if keyspace != "test_keyspace" { t.Fatalf("expect to get keyspace: 'test_keyspace', but got keyspace: '%s'", keyspace) } From 4e0db81d7daadc286355d0d4ebb86936e15f21d9 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 27 May 2015 07:38:45 -0700 Subject: [PATCH 097/128] Removing topo/faketopo/fixture.go, has a nasty dependency on wrangler. It was only used in one place, copying the relevant code there. For higher level unit tests, wrangler/testlib is preferred. Signed-off-by: Alain Jobart --- go/vt/topo/test/faketopo/fixture.go | 110 ---------------------------- 1 file changed, 110 deletions(-) delete mode 100644 go/vt/topo/test/faketopo/fixture.go diff --git a/go/vt/topo/test/faketopo/fixture.go b/go/vt/topo/test/faketopo/fixture.go deleted file mode 100644 index 88c8b6211b..0000000000 --- a/go/vt/topo/test/faketopo/fixture.go +++ /dev/null @@ -1,110 +0,0 @@ -package faketopo - -import ( - "fmt" - "testing" - "time" - - "github.com/youtube/vitess/go/vt/key" - "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/mysqlctl" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" - "github.com/youtube/vitess/go/vt/topo" - "github.com/youtube/vitess/go/vt/wrangler" - "golang.org/x/net/context" -) - -const ( - // TestShard is the shard we use in tests - TestShard = "0" - - // TestKeyspace is the keyspace we use in tests - TestKeyspace = "test_keyspace" -) - -func newKeyRange(value string) key.KeyRange { - _, result, err := topo.ValidateShardName(value) - if err != nil { - panic(err) - } - return result -} - -type tabletPack struct { - *topo.Tablet - mysql *mysqlctl.FakeMysqlDaemon -} - -// Fixture is a fixture that provides a fresh topology, to which you -// can add tablets that react to events and have fake MySQL -// daemons. It uses an in memory fake ZooKeeper to store its -// data. When you are done with the fixture you have to call its -// TearDown method. -type Fixture struct { - *testing.T - tablets map[int]*tabletPack - done chan struct{} - Topo topo.Server - Wrangler *wrangler.Wrangler -} - -// New creates a topology fixture. -func New(t *testing.T, logger logutil.Logger, ts topo.Server, cells []string) *Fixture { - wr := wrangler.New(logger, ts, tmclient.NewTabletManagerClient(), 1*time.Second) - - return &Fixture{ - T: t, - Topo: ts, - Wrangler: wr, - done: make(chan struct{}, 1), - tablets: make(map[int]*tabletPack), - } -} - -// TearDown releases any resources used by the fixture. -func (fix *Fixture) TearDown() { - close(fix.done) -} - -// AddTablet adds a new tablet to the topology and starts its event -// loop. -func (fix *Fixture) AddTablet(uid int, cell string, tabletType topo.TabletType) *topo.Tablet { - tablet := &topo.Tablet{ - Alias: topo.TabletAlias{Cell: cell, Uid: uint32(uid)}, - Hostname: fmt.Sprintf("%vbsr%v", cell, uid), - IPAddr: fmt.Sprintf("212.244.218.%v", uid), - Portmap: map[string]int{ - "vt": 3333 + 10*uid, - "mysql": 3334 + 10*uid, - }, - Keyspace: TestKeyspace, - Type: tabletType, - Shard: TestShard, - KeyRange: newKeyRange(TestShard), - } - - if err := fix.Wrangler.InitTablet(context.Background(), tablet, true, true, false); err != nil { - fix.Fatalf("CreateTablet: %v", err) - } - mysqlDaemon := &mysqlctl.FakeMysqlDaemon{} - mysqlDaemon.MysqlPort = 3334 + 10*uid - - pack := &tabletPack{Tablet: tablet, mysql: mysqlDaemon} - fix.tablets[uid] = pack - - return tablet -} - -// GetTablet returns a fresh copy of the tablet identified by uid. -func (fix *Fixture) GetTablet(uid int) *topo.TabletInfo { - tablet, ok := fix.tablets[uid] - if !ok { - panic("bad tablet uid") - } - ti, err := fix.Topo.GetTablet(context.Background(), tablet.Alias) - if err != nil { - fix.Fatalf("GetTablet %v: %v", tablet.Alias, err) - } - return ti - -} From 8e3d48386d9cad3d3e135b62dece6b5762df9f00 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 27 May 2015 07:39:56 -0700 Subject: [PATCH 098/128] Missed in previous commit. --- go/vt/topotools/rebuild_test.go | 67 ++++++++++++++++++++++++--------- go/vt/topotools/shard_test.go | 8 ---- 2 files changed, 49 insertions(+), 26 deletions(-) diff --git a/go/vt/topotools/rebuild_test.go b/go/vt/topotools/rebuild_test.go index fd2b5b433c..8d5f0791de 100644 --- a/go/vt/topotools/rebuild_test.go +++ b/go/vt/topotools/rebuild_test.go @@ -5,6 +5,7 @@ package topotools_test import ( + "fmt" "strings" "testing" "time" @@ -13,13 +14,41 @@ import ( "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/topo" - "github.com/youtube/vitess/go/vt/topo/test/faketopo" "github.com/youtube/vitess/go/vt/zktopo" _ "github.com/youtube/vitess/go/vt/tabletmanager/gorpctmclient" . "github.com/youtube/vitess/go/vt/topotools" ) +const ( + testShard = "0" + testKeyspace = "test_keyspace" +) + +func addTablet(ctx context.Context, t *testing.T, ts topo.Server, uid int, cell string, tabletType topo.TabletType) *topo.TabletInfo { + tablet := &topo.Tablet{ + Alias: topo.TabletAlias{Cell: cell, Uid: uint32(uid)}, + Hostname: fmt.Sprintf("%vbsr%v", cell, uid), + IPAddr: fmt.Sprintf("212.244.218.%v", uid), + Portmap: map[string]int{ + "vt": 3333 + 10*uid, + "mysql": 3334 + 10*uid, + }, + Keyspace: testKeyspace, + Type: tabletType, + Shard: testShard, + } + if err := topo.CreateTablet(ctx, ts, tablet); err != nil { + t.Fatalf("CreateTablet: %v", err) + } + + ti, err := ts.GetTablet(ctx, tablet.Alias) + if err != nil { + t.Fatalf("GetTablet: %v", err) + } + return ti +} + func TestRebuildShardRace(t *testing.T) { ctx := context.Background() cells := []string{"test_cell"} @@ -27,28 +56,32 @@ func TestRebuildShardRace(t *testing.T) { // Set up topology. ts := zktopo.NewTestServer(t, cells) - f := faketopo.New(t, logger, ts, cells) - defer f.TearDown() + si, err := GetOrCreateShard(ctx, ts, testKeyspace, testShard) + if err != nil { + t.Fatalf("GetOrCreateShard: %v", err) + } + si.Cells = append(si.Cells, cells[0]) + if err := topo.UpdateShard(ctx, ts, si); err != nil { + t.Fatalf("UpdateShard: %v", err) + } - keyspace := faketopo.TestKeyspace - shard := faketopo.TestShard - f.AddTablet(1, "test_cell", topo.TYPE_MASTER) - f.AddTablet(2, "test_cell", topo.TYPE_REPLICA) + masterInfo := addTablet(ctx, t, ts, 1, cells[0], topo.TYPE_MASTER) + replicaInfo := addTablet(ctx, t, ts, 2, cells[0], topo.TYPE_REPLICA) // Do an initial rebuild. - if _, err := RebuildShard(ctx, logger, f.Topo, keyspace, shard, cells, time.Minute); err != nil { + if _, err := RebuildShard(ctx, logger, ts, testKeyspace, testShard, cells, time.Minute); err != nil { t.Fatalf("RebuildShard: %v", err) } // Check initial state. - ep, err := ts.GetEndPoints(ctx, cells[0], keyspace, shard, topo.TYPE_MASTER) + ep, err := ts.GetEndPoints(ctx, cells[0], testKeyspace, testShard, topo.TYPE_MASTER) if err != nil { t.Fatalf("GetEndPoints: %v", err) } if got, want := len(ep.Entries), 1; got != want { t.Fatalf("len(Entries) = %v, want %v", got, want) } - ep, err = ts.GetEndPoints(ctx, cells[0], keyspace, shard, topo.TYPE_REPLICA) + ep, err = ts.GetEndPoints(ctx, cells[0], testKeyspace, testShard, topo.TYPE_REPLICA) if err != nil { t.Fatalf("GetEndPoints: %v", err) } @@ -70,15 +103,14 @@ func TestRebuildShardRace(t *testing.T) { } } - // Make a change and start a rebuild that will stall when it tries to get - // the SrvShard lock. - masterInfo := f.GetTablet(1) + // Make a change and start a rebuild that will stall when it + // tries to get the SrvShard lock. masterInfo.Type = topo.TYPE_SPARE if err := topo.UpdateTablet(ctx, ts, masterInfo); err != nil { t.Fatalf("UpdateTablet: %v", err) } go func() { - if _, err := RebuildShard(ctx, logger, f.Topo, keyspace, shard, cells, time.Minute); err != nil { + if _, err := RebuildShard(ctx, logger, ts, testKeyspace, testShard, cells, time.Minute); err != nil { t.Fatalf("RebuildShard: %v", err) } close(done) @@ -89,12 +121,11 @@ func TestRebuildShardRace(t *testing.T) { // While the first rebuild is stalled, make another change and start a rebuild // that doesn't stall. - replicaInfo := f.GetTablet(2) replicaInfo.Type = topo.TYPE_SPARE if err := topo.UpdateTablet(ctx, ts, replicaInfo); err != nil { t.Fatalf("UpdateTablet: %v", err) } - if _, err := RebuildShard(ctx, logger, f.Topo, keyspace, shard, cells, time.Minute); err != nil { + if _, err := RebuildShard(ctx, logger, ts, testKeyspace, testShard, cells, time.Minute); err != nil { t.Fatalf("RebuildShard: %v", err) } @@ -104,10 +135,10 @@ func TestRebuildShardRace(t *testing.T) { <-done // Check that the rebuild picked up both changes. - if _, err := ts.GetEndPoints(ctx, cells[0], keyspace, shard, topo.TYPE_MASTER); err == nil || !strings.Contains(err.Error(), "node doesn't exist") { + if _, err := ts.GetEndPoints(ctx, cells[0], testKeyspace, testShard, topo.TYPE_MASTER); err == nil || !strings.Contains(err.Error(), "node doesn't exist") { t.Errorf("first change wasn't picked up by second rebuild") } - if _, err := ts.GetEndPoints(ctx, cells[0], keyspace, shard, topo.TYPE_REPLICA); err == nil || !strings.Contains(err.Error(), "node doesn't exist") { + if _, err := ts.GetEndPoints(ctx, cells[0], testKeyspace, testShard, topo.TYPE_REPLICA); err == nil || !strings.Contains(err.Error(), "node doesn't exist") { t.Errorf("second change was overwritten by first rebuild finishing late") } } diff --git a/go/vt/topotools/shard_test.go b/go/vt/topotools/shard_test.go index 445f4cd80f..ceb045df6d 100644 --- a/go/vt/topotools/shard_test.go +++ b/go/vt/topotools/shard_test.go @@ -11,9 +11,7 @@ import ( "testing" "time" - "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/topo" - "github.com/youtube/vitess/go/vt/topo/test/faketopo" "github.com/youtube/vitess/go/vt/zktopo" "golang.org/x/net/context" @@ -24,12 +22,9 @@ import ( func TestCreateShard(t *testing.T) { ctx := context.Background() cells := []string{"test_cell"} - logger := logutil.NewMemoryLogger() // Set up topology. ts := zktopo.NewTestServer(t, cells) - f := faketopo.New(t, logger, ts, cells) - defer f.TearDown() keyspace := "test_keyspace" shard := "0" @@ -56,12 +51,9 @@ func TestCreateShard(t *testing.T) { func TestGetOrCreateShard(t *testing.T) { ctx := context.Background() cells := []string{"test_cell"} - logger := logutil.NewMemoryLogger() // Set up topology. ts := zktopo.NewTestServer(t, cells) - f := faketopo.New(t, logger, ts, cells) - defer f.TearDown() // and do massive parallel GetOrCreateShard keyspace := "test_keyspace" From 77fc9db5d5cec1579d61bbee5a83edb61d4df321 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 27 May 2015 07:45:42 -0700 Subject: [PATCH 099/128] Using faketopo in this test, now that wrangler dependency is gone. --- go/vt/schemamanager/schemamanager_test.go | 151 +--------------------- 1 file changed, 4 insertions(+), 147 deletions(-) diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index 3bd74ef510..f85909bd27 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -15,6 +15,7 @@ import ( _ "github.com/youtube/vitess/go/vt/tabletmanager/gorpctmclient" "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/topo/test/faketopo" "golang.org/x/net/context" ) @@ -243,7 +244,9 @@ func (client *fakeTabletManagerClient) ExecuteFetchAsDba(ctx context.Context, ta return client.TabletManagerClient.ExecuteFetchAsDba(ctx, tablet, query, maxRows, wantFields, disableBinlogs, reloadSchema) } -type fakeTopo struct{} +type fakeTopo struct { + faketopo.FakeTopo +} func newFakeTopo() *fakeTopo { return &fakeTopo{} @@ -276,152 +279,6 @@ func (topoServer *fakeTopo) GetTablet(ctx context.Context, tabletAlias topo.Tabl }, nil } -func (topoServer *fakeTopo) GetSrvKeyspaceNames(ctx context.Context, cell string) ([]string, error) { - return nil, fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) GetSrvKeyspace(ctx context.Context, cell, keyspace string) (*topo.SrvKeyspace, error) { - return nil, fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) GetEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) { - return nil, fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) Close() {} - -func (topoServer *fakeTopo) GetKnownCells(ctx context.Context) ([]string, error) { - return nil, fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) CreateKeyspace(ctx context.Context, keyspace string, value *topo.Keyspace) error { - return fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) UpdateKeyspace(ctx context.Context, ki *topo.KeyspaceInfo, existingVersion int64) (int64, error) { - return 0, fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) GetKeyspace(ctx context.Context, keyspace string) (*topo.KeyspaceInfo, error) { - return nil, fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) GetKeyspaces(ctx context.Context) ([]string, error) { - return nil, fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) DeleteKeyspaceShards(ctx context.Context, keyspace string) error { - return fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) CreateShard(ctx context.Context, keyspace, shard string, value *topo.Shard) error { - return fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) UpdateShard(ctx context.Context, si *topo.ShardInfo, existingVersion int64) (int64, error) { - return 0, fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) ValidateShard(ctx context.Context, keyspace, shard string) error { - return fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) DeleteShard(ctx context.Context, keyspace, shard string) error { - return fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) CreateTablet(ctx context.Context, tablet *topo.Tablet) error { - return fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) UpdateTablet(ctx context.Context, tablet *topo.TabletInfo, existingVersion int64) (newVersion int64, err error) { - return 0, fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) UpdateTabletFields(ctx context.Context, tabletAlias topo.TabletAlias, update func(*topo.Tablet) error) error { - return fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) DeleteTablet(ctx context.Context, alias topo.TabletAlias) error { - return fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) GetTabletsByCell(ctx context.Context, cell string) ([]topo.TabletAlias, error) { - return nil, fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) UpdateShardReplicationFields(ctx context.Context, cell, keyspace, shard string, update func(*topo.ShardReplication) error) error { - return fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) GetShardReplication(ctx context.Context, cell, keyspace, shard string) (*topo.ShardReplicationInfo, error) { - return nil, fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) DeleteShardReplication(ctx context.Context, cell, keyspace, shard string) error { - return fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) LockSrvShardForAction(ctx context.Context, cell, keyspace, shard, contents string) (string, error) { - return "", fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) UnlockSrvShardForAction(ctx context.Context, cell, keyspace, shard, lockPath, results string) error { - return fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) GetSrvTabletTypesPerShard(ctx context.Context, cell, keyspace, shard string) ([]topo.TabletType, error) { - return nil, fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) UpdateEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType, addrs *topo.EndPoints) error { - return fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) DeleteEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) error { - return fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) WatchEndPoints(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType) (<-chan *topo.EndPoints, chan<- struct{}, error) { - return nil, nil, fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) UpdateSrvShard(ctx context.Context, cell, keyspace, shard string, srvShard *topo.SrvShard) error { - return fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) GetSrvShard(ctx context.Context, cell, keyspace, shard string) (*topo.SrvShard, error) { - return nil, fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) DeleteSrvShard(ctx context.Context, cell, keyspace, shard string) error { - return fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) UpdateSrvKeyspace(ctx context.Context, cell, keyspace string, srvKeyspace *topo.SrvKeyspace) error { - return fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) UpdateTabletEndpoint(ctx context.Context, cell, keyspace, shard string, tabletType topo.TabletType, addr *topo.EndPoint) error { - return fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) LockKeyspaceForAction(ctx context.Context, keyspace, contents string) (string, error) { - return "", fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) UnlockKeyspaceForAction(ctx context.Context, keyspace, lockPath, results string) error { - return fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) LockShardForAction(ctx context.Context, keyspace, shard, contents string) (string, error) { - return "", fmt.Errorf("not implemented") -} - -func (topoServer *fakeTopo) UnlockShardForAction(ctx context.Context, keyspace, shard, lockPath, results string) error { - return fmt.Errorf("not implemented") -} - type fakeController struct { sqls []string keyspace string From cf0dd7b193859fe121d627c0b19d1626e17a3f8c Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 27 May 2015 08:11:29 -0700 Subject: [PATCH 100/128] Fixing use of the wrong Context. --- go/vt/tabletmanager/after_action.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/vt/tabletmanager/after_action.go b/go/vt/tabletmanager/after_action.go index 8c9fd87bf5..5cb98512ec 100644 --- a/go/vt/tabletmanager/after_action.go +++ b/go/vt/tabletmanager/after_action.go @@ -227,7 +227,7 @@ func (agent *ActionAgent) changeCallback(ctx context.Context, oldTablet, newTabl // See if we need to start or stop any binlog player if agent.BinlogPlayerMap != nil { if newTablet.Type == topo.TYPE_MASTER { - agent.BinlogPlayerMap.RefreshMap(ctx, newTablet, keyspaceInfo, shardInfo) + agent.BinlogPlayerMap.RefreshMap(agent.batchCtx, newTablet, keyspaceInfo, shardInfo) } else { agent.BinlogPlayerMap.StopAllPlayersAndReset() } From 332eb024986ba47908214566baae703d9424a3f9 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 27 May 2015 09:46:23 -0700 Subject: [PATCH 101/128] Adding context to a method, so we can use topo server. --- go/cmd/vtctld/template.go | 4 ++-- go/cmd/vtctld/vtctld.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go/cmd/vtctld/template.go b/go/cmd/vtctld/template.go index 58dc7641c3..1529a60988 100644 --- a/go/cmd/vtctld/template.go +++ b/go/cmd/vtctld/template.go @@ -235,7 +235,7 @@ func (loader *TemplateLoader) ServeTemplate(templateName string, data interface{ var ( modifyDbTopology func(context.Context, topo.Server, *topotools.Topology) error - modifyDbServingGraph func(topo.Server, *topotools.ServingGraph) + modifyDbServingGraph func(context.Context, topo.Server, *topotools.ServingGraph) ) // SetDbTopologyPostprocessor installs a hook that can modify @@ -249,7 +249,7 @@ func SetDbTopologyPostprocessor(f func(context.Context, topo.Server, *topotools. // SetDbServingGraphPostprocessor installs a hook that can modify // topotools.ServingGraph struct before it's displayed. -func SetDbServingGraphPostprocessor(f func(topo.Server, *topotools.ServingGraph)) { +func SetDbServingGraphPostprocessor(f func(context.Context, topo.Server, *topotools.ServingGraph)) { if modifyDbServingGraph != nil { panic("Cannot set multiple DbServingGraph postprocessors") } diff --git a/go/cmd/vtctld/vtctld.go b/go/cmd/vtctld/vtctld.go index 3447c0f9b4..9e884d8b83 100644 --- a/go/cmd/vtctld/vtctld.go +++ b/go/cmd/vtctld/vtctld.go @@ -269,7 +269,7 @@ func main() { ctx := context.Background() servingGraph := topotools.DbServingGraph(ctx, ts, cell) if modifyDbServingGraph != nil { - modifyDbServingGraph(ts, servingGraph) + modifyDbServingGraph(ctx, ts, servingGraph) } templateLoader.ServeTemplate("serving_graph.html", servingGraph, w, r) }) From 7e13662142bd7147d23af1d7fa1ff0fdbe10c61f Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 27 May 2015 10:02:18 -0700 Subject: [PATCH 102/128] Fixing a couple instances of context.TODO() --- go/cmd/zkclient2/zkclient2.go | 25 +++++++++++++------------ go/vt/tabletmanager/agent.go | 2 +- go/vt/wrangler/testlib/fake_tablet.go | 2 +- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/go/cmd/zkclient2/zkclient2.go b/go/cmd/zkclient2/zkclient2.go index e306ea52b7..f2dd0d0fee 100644 --- a/go/cmd/zkclient2/zkclient2.go +++ b/go/cmd/zkclient2/zkclient2.go @@ -50,12 +50,12 @@ func connect() *rpcplus.Client { return rpcClient } -func getSrvKeyspaceNames(rpcClient *rpcplus.Client, cell string, verbose bool) { +func getSrvKeyspaceNames(ctx context.Context, rpcClient *rpcplus.Client, cell string, verbose bool) { req := &topo.GetSrvKeyspaceNamesArgs{ Cell: cell, } reply := &topo.SrvKeyspaceNames{} - if err := rpcClient.Call(context.TODO(), "TopoReader.GetSrvKeyspaceNames", req, reply); err != nil { + if err := rpcClient.Call(ctx, "TopoReader.GetSrvKeyspaceNames", req, reply); err != nil { log.Fatalf("TopoReader.GetSrvKeyspaceNames error: %v", err) } if verbose { @@ -65,13 +65,13 @@ func getSrvKeyspaceNames(rpcClient *rpcplus.Client, cell string, verbose bool) { } } -func getSrvKeyspace(rpcClient *rpcplus.Client, cell, keyspace string, verbose bool) { +func getSrvKeyspace(ctx context.Context, rpcClient *rpcplus.Client, cell, keyspace string, verbose bool) { req := &topo.GetSrvKeyspaceArgs{ Cell: cell, Keyspace: keyspace, } reply := &topo.SrvKeyspace{} - if err := rpcClient.Call(context.TODO(), "TopoReader.GetSrvKeyspace", req, reply); err != nil { + if err := rpcClient.Call(ctx, "TopoReader.GetSrvKeyspace", req, reply); err != nil { log.Fatalf("TopoReader.GetSrvKeyspace error: %v", err) } if verbose { @@ -89,7 +89,7 @@ func getSrvKeyspace(rpcClient *rpcplus.Client, cell, keyspace string, verbose bo } } -func getEndPoints(rpcClient *rpcplus.Client, cell, keyspace, shard, tabletType string, verbose bool) { +func getEndPoints(ctx context.Context, rpcClient *rpcplus.Client, cell, keyspace, shard, tabletType string, verbose bool) { req := &topo.GetEndPointsArgs{ Cell: cell, Keyspace: keyspace, @@ -97,7 +97,7 @@ func getEndPoints(rpcClient *rpcplus.Client, cell, keyspace, shard, tabletType s TabletType: topo.TabletType(tabletType), } reply := &topo.EndPoints{} - if err := rpcClient.Call(context.TODO(), "TopoReader.GetEndPoints", req, reply); err != nil { + if err := rpcClient.Call(ctx, "TopoReader.GetEndPoints", req, reply); err != nil { log.Fatalf("TopoReader.GetEndPoints error: %v", err) } if verbose { @@ -109,14 +109,14 @@ func getEndPoints(rpcClient *rpcplus.Client, cell, keyspace, shard, tabletType s // qps is a function used by tests to run a vtgate load check. // It will get the same srvKeyspaces as fast as possible and display the QPS. -func qps(cell string, keyspaces []string) { +func qps(ctx context.Context, cell string, keyspaces []string) { var count sync2.AtomicInt32 for _, keyspace := range keyspaces { for i := 0; i < 10; i++ { go func() { rpcClient := connect() for true { - getSrvKeyspace(rpcClient, cell, keyspace, false) + getSrvKeyspace(ctx, rpcClient, cell, keyspace, false) count.Add(1) } }() @@ -157,10 +157,11 @@ func main() { defer pprof.StopCPUProfile() } + ctx := context.Background() if *mode == "getSrvKeyspaceNames" { rpcClient := connect() if len(args) == 1 { - getSrvKeyspaceNames(rpcClient, args[0], true) + getSrvKeyspaceNames(ctx, rpcClient, args[0], true) } else { log.Errorf("getSrvKeyspaceNames only takes one argument") exit.Return(1) @@ -169,7 +170,7 @@ func main() { } else if *mode == "getSrvKeyspace" { rpcClient := connect() if len(args) == 2 { - getSrvKeyspace(rpcClient, args[0], args[1], true) + getSrvKeyspace(ctx, rpcClient, args[0], args[1], true) } else { log.Errorf("getSrvKeyspace only takes two arguments") exit.Return(1) @@ -178,14 +179,14 @@ func main() { } else if *mode == "getEndPoints" { rpcClient := connect() if len(args) == 4 { - getEndPoints(rpcClient, args[0], args[1], args[2], args[3], true) + getEndPoints(ctx, rpcClient, args[0], args[1], args[2], args[3], true) } else { log.Errorf("getEndPoints only takes four arguments") exit.Return(1) } } else if *mode == "qps" { - qps(args[0], args[1:]) + qps(ctx, args[0], args[1:]) } else { flag.Usage() diff --git a/go/vt/tabletmanager/agent.go b/go/vt/tabletmanager/agent.go index a1c4d39430..0675b5cea8 100644 --- a/go/vt/tabletmanager/agent.go +++ b/go/vt/tabletmanager/agent.go @@ -390,7 +390,7 @@ func (agent *ActionAgent) verifyServingAddrs(ctx context.Context) error { // the initial state change callback to start tablet services. func (agent *ActionAgent) Start(ctx context.Context, mysqlPort, vtPort, vtsPort int) error { var err error - if _, err = agent.readTablet(context.TODO()); err != nil { + if _, err = agent.readTablet(ctx); err != nil { return err } diff --git a/go/vt/wrangler/testlib/fake_tablet.go b/go/vt/wrangler/testlib/fake_tablet.go index 92009fa587..f47a6fef90 100644 --- a/go/vt/wrangler/testlib/fake_tablet.go +++ b/go/vt/wrangler/testlib/fake_tablet.go @@ -127,7 +127,7 @@ func (ft *FakeTablet) StartActionLoop(t *testing.T, wr *wrangler.Wrangler) { // create a test agent on that port, and re-read the record // (it has new ports and IP) - ft.Agent = tabletmanager.NewTestActionAgent(context.TODO(), wr.TopoServer(), ft.Tablet.Alias, port, ft.FakeMysqlDaemon) + ft.Agent = tabletmanager.NewTestActionAgent(context.Background(), wr.TopoServer(), ft.Tablet.Alias, port, ft.FakeMysqlDaemon) ft.Tablet = ft.Agent.Tablet().Tablet // create the RPC server From 9e4b91aef0cb5cc79b5ce09ec19c7dca60fc4540 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 27 May 2015 10:07:21 -0700 Subject: [PATCH 103/128] Removing TryGet from conn pool, seems unused. --- go/pools/resource_pool.go | 9 +----- go/pools/resource_pool_test.go | 44 ++--------------------------- go/vt/dbconnpool/connection_pool.go | 16 +---------- go/vt/tabletserver/connpool.go | 14 --------- go/vt/tabletserver/connpool_test.go | 43 ---------------------------- 5 files changed, 4 insertions(+), 122 deletions(-) diff --git a/go/pools/resource_pool.go b/go/pools/resource_pool.go index 6b5771baec..5eac3eeb85 100644 --- a/go/pools/resource_pool.go +++ b/go/pools/resource_pool.go @@ -77,7 +77,7 @@ func NewResourcePool(factory Factory, capacity, maxCap int, idleTimeout time.Dur // Close empties the pool calling Close on all its resources. // You can call Close while there are outstanding resources. // It waits for all resources to be returned (Put). -// After a Close, Get and TryGet are not allowed. +// After a Close, Get is not allowed. func (rp *ResourcePool) Close() { _ = rp.SetCapacity(0) } @@ -95,13 +95,6 @@ func (rp *ResourcePool) Get(ctx context.Context) (resource Resource, err error) return rp.get(ctx, true) } -// TryGet will return the next available resource. If none is available, and capacity -// has not been reached, it will create a new one using the factory. Otherwise, -// it will return nil with no error. -func (rp *ResourcePool) TryGet() (resource Resource, err error) { - return rp.get(context.TODO(), false) -} - func (rp *ResourcePool) get(ctx context.Context, wait bool) (resource Resource, err error) { // If ctx has already expired, avoid racing with rp's resource channel. select { diff --git a/go/pools/resource_pool_test.go b/go/pools/resource_pool_test.go index eefab2f26c..e5af98a8dd 100644 --- a/go/pools/resource_pool_test.go +++ b/go/pools/resource_pool_test.go @@ -74,38 +74,6 @@ func TestOpen(t *testing.T) { } } - // Test TryGet - r, err := p.TryGet() - if err != nil { - t.Errorf("Unexpected error %v", err) - } - if r != nil { - t.Errorf("Expecting nil") - } - for i := 0; i < 5; i++ { - p.Put(resources[i]) - _, available, _, _, _, _ := p.Stats() - if available != int64(i+1) { - t.Errorf("expecting %d, received %d", 5-i-1, available) - } - } - for i := 0; i < 5; i++ { - r, err := p.TryGet() - resources[i] = r - if err != nil { - t.Errorf("Unexpected error %v", err) - } - if r == nil { - t.Errorf("Expecting non-nil") - } - if lastID.Get() != 5 { - t.Errorf("Expecting 5, received %d", lastID.Get()) - } - if count.Get() != 5 { - t.Errorf("Expecting 5, received %d", count.Get()) - } - } - // Test that Get waits ch := make(chan bool) go func() { @@ -139,7 +107,7 @@ func TestOpen(t *testing.T) { } // Test Close resource - r, err = p.Get(ctx) + r, err := p.Get(ctx) if err != nil { t.Errorf("Unexpected error %v", err) } @@ -241,15 +209,6 @@ func TestShrinking(t *testing.T) { t.Errorf(`expecting '%s', received '%s'`, expected, stats) } - // TryGet is allowed when shrinking - r, err := p.TryGet() - if err != nil { - t.Errorf("Unexpected error %v", err) - } - if r != nil { - t.Errorf("Expecting nil") - } - // Get is allowed when shrinking, but it will wait getdone := make(chan bool) go func() { @@ -278,6 +237,7 @@ func TestShrinking(t *testing.T) { // Ensure no deadlock if SetCapacity is called after we start // waiting for a resource + var err error for i := 0; i < 3; i++ { resources[i], err = p.Get(ctx) if err != nil { diff --git a/go/vt/dbconnpool/connection_pool.go b/go/vt/dbconnpool/connection_pool.go index 5ed1b85b1a..a3054f28a2 100644 --- a/go/vt/dbconnpool/connection_pool.go +++ b/go/vt/dbconnpool/connection_pool.go @@ -107,7 +107,7 @@ func (cp *ConnectionPool) Get(timeout time.Duration) (PoolConnection, error) { ctx := context.Background() if timeout != 0 { var cancel func() - ctx, cancel = context.WithTimeout(context.Background(), timeout) + ctx, cancel = context.WithTimeout(ctx, timeout) defer cancel() } r, err := p.Get(ctx) @@ -117,20 +117,6 @@ func (cp *ConnectionPool) Get(timeout time.Duration) (PoolConnection, error) { return r.(PoolConnection), nil } -// TryGet returns a connection, or nil. -// You must call Recycle on the PoolConnection once done. -func (cp *ConnectionPool) TryGet() (PoolConnection, error) { - p := cp.pool() - if p == nil { - return nil, ErrConnPoolClosed - } - r, err := p.TryGet() - if err != nil || r == nil { - return nil, err - } - return r.(PoolConnection), nil -} - // Put puts a connection into the pool. func (cp *ConnectionPool) Put(conn PoolConnection) { p := cp.pool() diff --git a/go/vt/tabletserver/connpool.go b/go/vt/tabletserver/connpool.go index ea471e2a6b..8acfdbc712 100644 --- a/go/vt/tabletserver/connpool.go +++ b/go/vt/tabletserver/connpool.go @@ -107,20 +107,6 @@ func (cp *ConnPool) Get(ctx context.Context) (*DBConn, error) { return r.(*DBConn), nil } -// TryGet returns a connection, or nil. -// You must call Recycle on the DBConn once done. -func (cp *ConnPool) TryGet() (*DBConn, error) { - p := cp.pool() - if p == nil { - return nil, ErrConnPoolClosed - } - r, err := p.TryGet() - if err != nil || r == nil { - return nil, err - } - return r.(*DBConn), nil -} - // Put puts a connection into the pool. func (cp *ConnPool) Put(conn *DBConn) { p := cp.pool() diff --git a/go/vt/tabletserver/connpool_test.go b/go/vt/tabletserver/connpool_test.go index 4f432c4a3c..489be5d661 100644 --- a/go/vt/tabletserver/connpool_test.go +++ b/go/vt/tabletserver/connpool_test.go @@ -13,49 +13,6 @@ import ( "golang.org/x/net/context" ) -func TestConnPoolTryGetWhilePoolIsClosed(t *testing.T) { - fakesqldb.Register() - testUtils := newTestUtils() - connPool := testUtils.newConnPool() - _, err := connPool.TryGet() - if err != ErrConnPoolClosed { - t.Fatalf("pool is closed, should get ErrConnPoolClosed") - } -} - -func TestConnPoolTryGetWhenFailedToConnectToDB(t *testing.T) { - db := fakesqldb.Register() - testUtils := newTestUtils() - db.EnableConnFail() - appParams := &sqldb.ConnParams{} - dbaParams := &sqldb.ConnParams{} - connPool := testUtils.newConnPool() - connPool.Open(appParams, dbaParams) - defer connPool.Close() - _, err := connPool.TryGet() - if err == nil { - t.Fatalf("should get a connection error") - } -} - -func TestConnPoolTryGet(t *testing.T) { - fakesqldb.Register() - testUtils := newTestUtils() - appParams := &sqldb.ConnParams{} - dbaParams := &sqldb.ConnParams{} - connPool := testUtils.newConnPool() - connPool.Open(appParams, dbaParams) - defer connPool.Close() - dbConn, err := connPool.TryGet() - if err != nil { - t.Fatalf("should get an error, but got: %v", err) - } - if dbConn == nil { - t.Fatalf("db conn should not be nil") - } - dbConn.Recycle() -} - func TestConnPoolGet(t *testing.T) { fakesqldb.Register() testUtils := newTestUtils() From e2dc9d5b22dbb8764e04e6d632385940bd589b69 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 27 May 2015 13:47:41 -0700 Subject: [PATCH 104/128] Splitting up the go client into a user API (using structs), and a connection layer API (using an interface). That way the RPC implementations are smaller. --- go/vt/client/client.go | 10 +- go/vt/client/client_test.go | 2 +- go/vt/vtgate/fakerpcvtgateconn/conn.go | 198 +++++++++------------ go/vt/vtgate/gorpcvtgateconn/conn.go | 85 +++------ go/vt/vtgate/vtgateconn/vtgateconn.go | 174 ++++++++++++++---- go/vt/vtgate/vtgateconn/vtgateconn_test.go | 18 +- go/vt/vtgate/vtgateconntest/client.go | 30 ++-- 7 files changed, 276 insertions(+), 241 deletions(-) diff --git a/go/vt/client/client.go b/go/vt/client/client.go index c4e8f3071a..d11ef942c8 100644 --- a/go/vt/client/client.go +++ b/go/vt/client/client.go @@ -52,17 +52,13 @@ type conn struct { TabletType topo.TabletType `json:"tablet_type"` Streaming bool Timeout time.Duration - vtgateConn vtgateconn.VTGateConn - tx vtgateconn.VTGateTx + vtgateConn *vtgateconn.VTGateConn + tx *vtgateconn.VTGateTx } func (c *conn) dial() error { - dialer := vtgateconn.GetDialerWithProtocol(c.Protocol) - if dialer == nil { - return fmt.Errorf("could not find dialer for protocol %s", c.Protocol) - } var err error - c.vtgateConn, err = dialer(context.Background(), c.Address, c.Timeout) + c.vtgateConn, err = vtgateconn.DialProtocol(context.Background(), c.Protocol, c.Address, c.Timeout) return err } diff --git a/go/vt/client/client_test.go b/go/vt/client/client_test.go index 3914974b24..ef338828c5 100644 --- a/go/vt/client/client_test.go +++ b/go/vt/client/client_test.go @@ -94,7 +94,7 @@ func TestDial(t *testing.T) { _ = c.Close() _, err = drv{}.Open(`{"protocol": "none"}`) - want := "could not find dialer for protocol none" + want := "no dialer registered for VTGate protocol none" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("err: %v, want %s", err, want) } diff --git a/go/vt/vtgate/fakerpcvtgateconn/conn.go b/go/vt/vtgate/fakerpcvtgateconn/conn.go index dc352bf9c1..febde09bb3 100644 --- a/go/vt/vtgate/fakerpcvtgateconn/conn.go +++ b/go/vt/vtgate/fakerpcvtgateconn/conn.go @@ -2,6 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// Package fakerpcvtgateconn provides a fake implementation of +// vtgateconn.Impl that doesn't do any RPC, but uses a local +// map to return results. package fakerpcvtgateconn import ( @@ -35,15 +38,25 @@ type splitQueryResponse struct { err error } -// FakeVTGateConn provides a fake implementation of vtgateconn.VTGateConn +// FakeVTGateConn provides a fake implementation of vtgateconn.Impl type FakeVTGateConn struct { execMap map[string]*queryResponse splitQueryMap map[string]*splitQueryResponse } -// NewFakeVTGateConn creates a new FakeVTConn instance -func NewFakeVTGateConn(ctx context.Context, address string, timeout time.Duration) *FakeVTGateConn { - return &FakeVTGateConn{execMap: make(map[string]*queryResponse)} +// RegisterFakeVTGateConnDialer registers the proper dialer for this fake, +// and returns the underlying instance that will be returned by the dialer, +// and the protocol to use to get this fake. +func RegisterFakeVTGateConnDialer() (*FakeVTGateConn, string) { + protocol := "fake" + impl := &FakeVTGateConn{ + execMap: make(map[string]*queryResponse), + splitQueryMap: make(map[string]*splitQueryResponse), + } + vtgateconn.RegisterDialer(protocol, func(ctx context.Context, address string, timeout time.Duration) (vtgateconn.Impl, error) { + return impl, nil + }) + return impl, protocol } // AddQuery adds a query and expected result. @@ -78,65 +91,65 @@ func (conn *FakeVTGateConn) AddSplitQuery( } } -// Execute please see vtgateconn.VTGateConn.Execute -func (conn *FakeVTGateConn) Execute( - ctx context.Context, - query string, - bindVars map[string]interface{}, - tabletType topo.TabletType) (*mproto.QueryResult, error) { - return conn.execute( - ctx, - &proto.Query{ - Sql: query, - BindVariables: bindVars, - TabletType: tabletType, - Session: nil, - }) -} - -func (conn *FakeVTGateConn) execute(ctx context.Context, query *proto.Query) (*mproto.QueryResult, error) { +// Execute please see vtgateconn.Impl.Execute +func (conn *FakeVTGateConn) Execute(ctx context.Context, sql string, bindVars map[string]interface{}, tabletType topo.TabletType, session interface{}) (*mproto.QueryResult, interface{}, error) { + var s *proto.Session + if session != nil { + s = session.(*proto.Session) + } + query := &proto.Query{ + Sql: sql, + BindVariables: bindVars, + TabletType: tabletType, + Session: s, + } response, ok := conn.execMap[query.Sql] if !ok { - return nil, fmt.Errorf("no match for: %s", query.Sql) + return nil, nil, fmt.Errorf("no match for: %s", query.Sql) } if !reflect.DeepEqual(query, response.execQuery) { - return nil, fmt.Errorf( + return nil, nil, fmt.Errorf( "Execute: %+v, want %+v", query, response.execQuery) } var reply mproto.QueryResult reply = *response.reply - return &reply, nil + if s != nil { + s = newSession(true, "test_keyspace", []string{}, topo.TYPE_MASTER) + } + return &reply, s, nil } -// ExecuteShard please see vtgateconn.VTGateConn.ExecuteShard -func (conn *FakeVTGateConn) ExecuteShard(ctx context.Context, query string, keyspace string, shards []string, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) { - return conn.executeShard( - ctx, - &proto.QueryShard{ - Sql: query, - BindVariables: bindVars, - TabletType: tabletType, - Keyspace: keyspace, - Shards: shards, - Session: nil, - }) -} - -func (conn *FakeVTGateConn) executeShard(ctx context.Context, query *proto.QueryShard) (*mproto.QueryResult, error) { +// ExecuteShard please see vtgateconn.Impl.ExecuteShard +func (conn *FakeVTGateConn) ExecuteShard(ctx context.Context, sql string, keyspace string, shards []string, bindVars map[string]interface{}, tabletType topo.TabletType, session interface{}) (*mproto.QueryResult, interface{}, error) { + var s *proto.Session + if session != nil { + s = session.(*proto.Session) + } + query := &proto.QueryShard{ + Sql: sql, + BindVariables: bindVars, + TabletType: tabletType, + Keyspace: keyspace, + Shards: shards, + Session: s, + } response, ok := conn.execMap[getShardQueryKey(query)] if !ok { - return nil, fmt.Errorf("no match for: %s", query.Sql) + return nil, nil, fmt.Errorf("no match for: %s", query.Sql) } if !reflect.DeepEqual(query, response.shardQuery) { - return nil, fmt.Errorf( - "Execute: %+v, want %+v", query, response.shardQuery) + return nil, nil, fmt.Errorf( + "ExecuteShard: %+v, want %+v", query, response.shardQuery) } var reply mproto.QueryResult reply = *response.reply - return &reply, nil + if s != nil { + s = newSession(true, keyspace, shards, tabletType) + } + return &reply, s, nil } -// StreamExecute please see vtgateconn.VTGateConn.StreamExecute +// StreamExecute please see vtgateconn.Impl.StreamExecute func (conn *FakeVTGateConn) StreamExecute(ctx context.Context, query string, bindVars map[string]interface{}, tabletType topo.TabletType) (<-chan *mproto.QueryResult, vtgateconn.ErrFunc) { resultChan := make(chan *mproto.QueryResult) @@ -152,7 +165,7 @@ func (conn *FakeVTGateConn) StreamExecute(ctx context.Context, query string, bin Session: nil, } if !reflect.DeepEqual(queryProto, response.execQuery) { - err := fmt.Errorf("Execute: %+v, want %+v", query, response.execQuery) + err := fmt.Errorf("StreamExecute: %+v, want %+v", query, response.execQuery) return resultChan, func() error { return err } } if response.err != nil { @@ -171,17 +184,27 @@ func (conn *FakeVTGateConn) StreamExecute(ctx context.Context, query string, bin return resultChan, nil } -// Begin please see vtgateconn.VTGateConn.Begin -func (conn *FakeVTGateConn) Begin(ctx context.Context) (vtgateconn.VTGateTx, error) { - tx := &fakeVTGateTx{ - conn: conn, - session: &proto.Session{ - InTransaction: true, - }} - return tx, nil +// Begin please see vtgateconn.Impl.Begin +func (conn *FakeVTGateConn) Begin(ctx context.Context) (interface{}, error) { + return &proto.Session{ + InTransaction: true, + }, nil } -// SplitQuery please see vtgateconn.VTGateConn.SplitQuery +// Commit please see vtgateconn.Impl.Commit +func (conn *FakeVTGateConn) Commit(ctx context.Context, session interface{}) error { + if session == nil { + return errors.New("commit: not in transaction") + } + return nil +} + +// Rollback please see vtgateconn.Impl.Rollback +func (conn *FakeVTGateConn) Rollback(ctx context.Context, session interface{}) error { + return nil +} + +// SplitQuery please see vtgateconn.Impl.SplitQuery func (conn *FakeVTGateConn) SplitQuery(ctx context.Context, keyspace string, query tproto.BoundQuery, splitCount int) ([]proto.SplitQueryPart, error) { response, ok := conn.splitQueryMap[getSplitQueryKey(keyspace, &query, splitCount)] if !ok { @@ -194,68 +217,10 @@ func (conn *FakeVTGateConn) SplitQuery(ctx context.Context, keyspace string, que return reply, nil } -// Close please see vtgateconn.VTGateConn.Close +// Close please see vtgateconn.Impl.Close func (conn *FakeVTGateConn) Close() { } -type fakeVTGateTx struct { - conn *FakeVTGateConn - session *proto.Session -} - -// fakeVtgateTx has to implement vtgateconn.VTGateTx interface -var _ vtgateconn.VTGateTx = (*fakeVTGateTx)(nil) - -func (tx *fakeVTGateTx) Execute(ctx context.Context, query string, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) { - if tx.session == nil { - return nil, errors.New("execute: not in transaction") - } - r, err := tx.conn.execute( - ctx, - &proto.Query{ - Sql: query, - BindVariables: bindVars, - TabletType: tabletType, - Session: tx.session, - }) - tx.session = newSession(true, "test_keyspace", []string{}, topo.TYPE_MASTER) - return r, err -} - -func (tx *fakeVTGateTx) ExecuteShard(ctx context.Context, query string, keyspace string, shards []string, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) { - if tx.session == nil { - return nil, errors.New("executeShard: not in transaction") - } - r, err := tx.conn.executeShard( - ctx, - &proto.QueryShard{ - Sql: query, - BindVariables: bindVars, - TabletType: tabletType, - Keyspace: keyspace, - Shards: shards, - Session: tx.session, - }) - tx.session = newSession(true, keyspace, shards, tabletType) - return r, err -} - -func (tx *fakeVTGateTx) Commit(ctx context.Context) error { - if tx.session == nil { - return errors.New("commit: not in transaction") - } - defer func() { tx.session = nil }() - return nil -} - -func (tx *fakeVTGateTx) Rollback(ctx context.Context) error { - if tx.session == nil { - return nil - } - defer func() { tx.session = nil }() - return nil -} - func getShardQueryKey(request *proto.QueryShard) string { sort.Strings(request.Shards) return fmt.Sprintf("%s-%s", request.Sql, strings.Join(request.Shards, ":")) @@ -285,8 +250,5 @@ func newSession( } } -// Make sure FakeVTGateConn implements vtgateconn.VTGateConn -var _ (vtgateconn.VTGateConn) = (*FakeVTGateConn)(nil) - -// Make sure fakeVTGateTx implements vtgateconn.VtGateTx -var _ (vtgateconn.VTGateTx) = (*fakeVTGateTx)(nil) +// Make sure FakeVTGateConn implements vtgateconn.Impl +var _ (vtgateconn.Impl) = (*FakeVTGateConn)(nil) diff --git a/go/vt/vtgate/gorpcvtgateconn/conn.go b/go/vt/vtgate/gorpcvtgateconn/conn.go index a7b93a1796..95030c85d7 100644 --- a/go/vt/vtgate/gorpcvtgateconn/conn.go +++ b/go/vt/vtgate/gorpcvtgateconn/conn.go @@ -29,7 +29,7 @@ type vtgateConn struct { rpcConn *rpcplus.Client } -func dial(ctx context.Context, address string, timeout time.Duration) (vtgateconn.VTGateConn, error) { +func dial(ctx context.Context, address string, timeout time.Duration) (vtgateconn.Impl, error) { network := "tcp" if strings.Contains(address, "/") { network = "unix" @@ -41,17 +41,16 @@ func dial(ctx context.Context, address string, timeout time.Duration) (vtgatecon return &vtgateConn{rpcConn: rpcConn}, nil } -func (conn *vtgateConn) Execute(ctx context.Context, query string, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) { - r, _, err := conn.execute(ctx, query, bindVars, tabletType, nil) - return r, err -} - -func (conn *vtgateConn) execute(ctx context.Context, query string, bindVars map[string]interface{}, tabletType topo.TabletType, session *proto.Session) (*mproto.QueryResult, *proto.Session, error) { +func (conn *vtgateConn) Execute(ctx context.Context, query string, bindVars map[string]interface{}, tabletType topo.TabletType, session interface{}) (*mproto.QueryResult, interface{}, error) { + var s *proto.Session + if session != nil { + s = session.(*proto.Session) + } request := proto.Query{ Sql: query, BindVariables: bindVars, TabletType: tabletType, - Session: session, + Session: s, } var result proto.QueryResult if err := conn.rpcConn.Call(ctx, "VTGate.Execute", request, &result); err != nil { @@ -63,19 +62,18 @@ func (conn *vtgateConn) execute(ctx context.Context, query string, bindVars map[ return result.Result, result.Session, nil } -func (conn *vtgateConn) ExecuteShard(ctx context.Context, query string, keyspace string, shards []string, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) { - r, _, err := conn.executeShard(ctx, query, keyspace, shards, bindVars, tabletType, nil) - return r, err -} - -func (conn *vtgateConn) executeShard(ctx context.Context, query string, keyspace string, shards []string, bindVars map[string]interface{}, tabletType topo.TabletType, session *proto.Session) (*mproto.QueryResult, *proto.Session, error) { +func (conn *vtgateConn) ExecuteShard(ctx context.Context, query string, keyspace string, shards []string, bindVars map[string]interface{}, tabletType topo.TabletType, session interface{}) (*mproto.QueryResult, interface{}, error) { + var s *proto.Session + if session != nil { + s = session.(*proto.Session) + } request := proto.QueryShard{ Sql: query, BindVariables: bindVars, Keyspace: keyspace, Shards: shards, TabletType: tabletType, - Session: session, + Session: s, } var result proto.QueryResult if err := conn.rpcConn.Call(ctx, "VTGate.ExecuteShard", request, &result); err != nil { @@ -106,12 +104,22 @@ func (conn *vtgateConn) StreamExecute(ctx context.Context, query string, bindVar return srout, func() error { return c.Error } } -func (conn *vtgateConn) Begin(ctx context.Context) (vtgateconn.VTGateTx, error) { - tx := &vtgateTx{conn: conn, session: &proto.Session{}} - if err := conn.rpcConn.Call(ctx, "VTGate.Begin", &rpc.Unused{}, tx.session); err != nil { +func (conn *vtgateConn) Begin(ctx context.Context) (interface{}, error) { + session := &proto.Session{} + if err := conn.rpcConn.Call(ctx, "VTGate.Begin", &rpc.Unused{}, session); err != nil { return nil, err } - return tx, nil + return session, nil +} + +func (conn *vtgateConn) Commit(ctx context.Context, session interface{}) error { + s := session.(*proto.Session) + return conn.rpcConn.Call(ctx, "VTGate.Commit", s, &rpc.Unused{}) +} + +func (conn *vtgateConn) Rollback(ctx context.Context, session interface{}) error { + s := session.(*proto.Session) + return conn.rpcConn.Call(ctx, "VTGate.Rollback", s, &rpc.Unused{}) } func (conn *vtgateConn) SplitQuery(ctx context.Context, keyspace string, query tproto.BoundQuery, splitCount int) ([]proto.SplitQueryPart, error) { @@ -130,42 +138,3 @@ func (conn *vtgateConn) SplitQuery(ctx context.Context, keyspace string, query t func (conn *vtgateConn) Close() { conn.rpcConn.Close() } - -type vtgateTx struct { - conn *vtgateConn - session *proto.Session -} - -func (tx *vtgateTx) Execute(ctx context.Context, query string, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) { - if tx.session == nil { - return nil, errors.New("execute: not in transaction") - } - r, session, err := tx.conn.execute(ctx, query, bindVars, tabletType, tx.session) - tx.session = session - return r, err -} - -func (tx *vtgateTx) ExecuteShard(ctx context.Context, query string, keyspace string, shards []string, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) { - if tx.session == nil { - return nil, errors.New("executeShard: not in transaction") - } - r, session, err := tx.conn.executeShard(ctx, query, keyspace, shards, bindVars, tabletType, tx.session) - tx.session = session - return r, err -} - -func (tx *vtgateTx) Commit(ctx context.Context) error { - if tx.session == nil { - return errors.New("commit: not in transaction") - } - defer func() { tx.session = nil }() - return tx.conn.rpcConn.Call(ctx, "VTGate.Commit", tx.session, &rpc.Unused{}) -} - -func (tx *vtgateTx) Rollback(ctx context.Context) error { - if tx.session == nil { - return nil - } - defer func() { tx.session = nil }() - return tx.conn.rpcConn.Call(ctx, "VTGate.Rollback", tx.session, &rpc.Unused{}) -} diff --git a/go/vt/vtgate/vtgateconn/vtgateconn.go b/go/vt/vtgate/vtgateconn/vtgateconn.go index 79a21f8ef7..0dd3832d81 100644 --- a/go/vt/vtgate/vtgateconn/vtgateconn.go +++ b/go/vt/vtgate/vtgateconn/vtgateconn.go @@ -6,6 +6,7 @@ package vtgateconn import ( "flag" + "fmt" "time" log "github.com/golang/glog" @@ -40,16 +41,119 @@ type OperationalError string func (e OperationalError) Error() string { return string(e) } -// DialerFunc represents a function that will return a VTGateConn object that can communicate with a VTGate. -type DialerFunc func(ctx context.Context, address string, timeout time.Duration) (VTGateConn, error) - // VTGateConn defines the interface for a vtgate client. // It can be used concurrently across goroutines. -type VTGateConn interface { +type VTGateConn struct { + impl Impl +} + +// Execute executes a non-streaming query on vtgate. +func (conn *VTGateConn) Execute(ctx context.Context, query string, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) { + res, _, err := conn.impl.Execute(ctx, query, bindVars, tabletType, nil) + return res, err +} + +// ExecuteShard executes a non-streaming query for multiple shards on vtgate. +func (conn *VTGateConn) ExecuteShard(ctx context.Context, query string, keyspace string, shards []string, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) { + res, _, err := conn.impl.ExecuteShard(ctx, query, keyspace, shards, bindVars, tabletType, nil) + return res, err +} + +// StreamExecute executes a streaming query on vtgate. It returns a channel, ErrFunc and error. +// If error is non-nil, it means that the StreamExecute failed to send the request. Otherwise, +// you can pull values from the channel till it's closed. Following this, you can call ErrFunc +// to see if the stream ended normally or due to a failure. +func (conn *VTGateConn) StreamExecute(ctx context.Context, query string, bindVars map[string]interface{}, tabletType topo.TabletType) (<-chan *mproto.QueryResult, ErrFunc) { + return conn.impl.StreamExecute(ctx, query, bindVars, tabletType) +} + +// Begin starts a transaction and returns a VTGateTX. +func (conn *VTGateConn) Begin(ctx context.Context) (*VTGateTx, error) { + session, err := conn.impl.Begin(ctx) + if err != nil { + return nil, err + } + + return &VTGateTx{ + impl: conn.impl, + session: session, + }, nil +} + +// Close must be called for releasing resources. +func (conn *VTGateConn) Close() { + conn.impl.Close() + conn.impl = nil +} + +// SplitQuery splits a query into equally sized smaller queries by +// appending primary key range clauses to the original query +func (conn *VTGateConn) SplitQuery(ctx context.Context, keyspace string, query tproto.BoundQuery, splitCount int) ([]proto.SplitQueryPart, error) { + return conn.impl.SplitQuery(ctx, keyspace, query, splitCount) +} + +// VTGateTx defines an ongoing transaction. +// It should not be concurrently used across goroutines. +type VTGateTx struct { + impl Impl + session interface{} +} + +// Execute executes a query on vtgate within the current transaction. +func (tx *VTGateTx) Execute(ctx context.Context, query string, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) { + if tx.session == nil { + return nil, fmt.Errorf("execute: not in transaction") + } + res, session, err := tx.impl.Execute(ctx, query, bindVars, tabletType, tx.session) + tx.session = session + return res, err +} + +// ExecuteShard executes a query for multiple shards on vtgate within the current transaction. +func (tx *VTGateTx) ExecuteShard(ctx context.Context, query string, keyspace string, shards []string, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) { + if tx.session == nil { + return nil, fmt.Errorf("executeShard: not in transaction") + } + res, session, err := tx.impl.ExecuteShard(ctx, query, keyspace, shards, bindVars, tabletType, tx.session) + tx.session = session + return res, err +} + +// Commit commits the current transaction. +func (tx *VTGateTx) Commit(ctx context.Context) error { + if tx.session == nil { + return fmt.Errorf("commit: not in transaction") + } + err := tx.impl.Commit(ctx, tx.session) + tx.session = nil + return err +} + +// Rollback rolls back the current transaction. +func (tx *VTGateTx) Rollback(ctx context.Context) error { + if tx.session == nil { + return nil + } + err := tx.impl.Rollback(ctx, tx.session) + tx.session = nil + return err +} + +// ErrFunc is used to check for streaming errors. +type ErrFunc func() error + +// +// The rest of this file is for the protocol implementations. +// + +// Impl defines the interface for a vtgate client protocol +// implementation. It can be used concurrently across goroutines. +type Impl interface { // Execute executes a non-streaming query on vtgate. - Execute(ctx context.Context, query string, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) + Execute(ctx context.Context, query string, bindVars map[string]interface{}, tabletType topo.TabletType, session interface{}) (*mproto.QueryResult, interface{}, error) + // ExecuteShard executes a non-streaming query for multiple shards on vtgate. - ExecuteShard(ctx context.Context, query string, keyspace string, shards []string, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) + ExecuteShard(ctx context.Context, query string, keyspace string, shards []string, bindVars map[string]interface{}, tabletType topo.TabletType, session interface{}) (*mproto.QueryResult, interface{}, error) // StreamExecute executes a streaming query on vtgate. It returns a channel, ErrFunc and error. // If error is non-nil, it means that the StreamExecute failed to send the request. Otherwise, @@ -58,32 +162,24 @@ type VTGateConn interface { StreamExecute(ctx context.Context, query string, bindVars map[string]interface{}, tabletType topo.TabletType) (<-chan *mproto.QueryResult, ErrFunc) // Begin starts a transaction and returns a VTGateTX. - Begin(ctx context.Context) (VTGateTx, error) + Begin(ctx context.Context) (interface{}, error) - // Close must be called for releasing resources. - Close() + // Commit commits the current transaction. + Commit(ctx context.Context, session interface{}) error + + // Rollback rolls back the current transaction. + Rollback(ctx context.Context, session interface{}) error // SplitQuery splits a query into equally sized smaller queries by // appending primary key range clauses to the original query SplitQuery(ctx context.Context, keyspace string, query tproto.BoundQuery, splitCount int) ([]proto.SplitQueryPart, error) + + // Close must be called for releasing resources. + Close() } -// VTGateTx defines the interface for the transaction object created by Begin. -// It should not be concurrently used across goroutines. -type VTGateTx interface { - // Execute executes a query on vtgate within the current transaction. - Execute(ctx context.Context, query string, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) - // ExecuteShard executes a query for multiple shards on vtgate within the current transaction. - ExecuteShard(ctx context.Context, query string, keyspace string, shards []string, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) - - // Commit commits the current transaction. - Commit(ctx context.Context) error - // Rollback rolls back the current transaction. - Rollback(ctx context.Context) error -} - -// ErrFunc is used to check for streaming errors. -type ErrFunc func() error +// DialerFunc represents a function that will return a VTGateConn object that can communicate with a VTGate. +type DialerFunc func(ctx context.Context, address string, timeout time.Duration) (Impl, error) var dialers = make(map[string]DialerFunc) @@ -97,17 +193,23 @@ func RegisterDialer(name string, dialer DialerFunc) { dialers[name] = dialer } -// GetDialer returns the dialer to use, described by the command line flag -func GetDialer() DialerFunc { - return GetDialerWithProtocol(*vtgateProtocol) +// DialProtocol dials a specific protocol, and returns the *VTGateConn +func DialProtocol(ctx context.Context, protocol string, address string, timeout time.Duration) (*VTGateConn, error) { + dialer, ok := dialers[protocol] + if !ok { + return nil, fmt.Errorf("no dialer registered for VTGate protocol %s", protocol) + } + impl, err := dialer(ctx, address, timeout) + if err != nil { + return nil, err + } + return &VTGateConn{ + impl: impl, + }, nil } -// GetDialerWithProtocol returns the dialer to use, described by the given protocol -func GetDialerWithProtocol(protocol string) DialerFunc { - td, ok := dialers[protocol] - if !ok { - log.Warningf("No dialer registered for VTGate protocol %s", protocol) - return nil - } - return td +// Dial dials using the command-line specified protocol, and returns +// the *VTGateConn. +func Dial(ctx context.Context, address string, timeout time.Duration) (*VTGateConn, error) { + return DialProtocol(ctx, *vtgateProtocol, address, timeout) } diff --git a/go/vt/vtgate/vtgateconn/vtgateconn_test.go b/go/vt/vtgate/vtgateconn/vtgateconn_test.go index e442b8982e..09857068e5 100644 --- a/go/vt/vtgate/vtgateconn/vtgateconn_test.go +++ b/go/vt/vtgate/vtgateconn/vtgateconn_test.go @@ -12,7 +12,7 @@ import ( ) func TestRegisterDialer(t *testing.T) { - dialerFunc := func(context.Context, string, time.Duration) (VTGateConn, error) { + dialerFunc := func(context.Context, string, time.Duration) (Impl, error) { return nil, nil } RegisterDialer("test1", dialerFunc) @@ -20,17 +20,17 @@ func TestRegisterDialer(t *testing.T) { } func TestGetDialerWithProtocol(t *testing.T) { - var protocol = "test2" - var dialerFunc = GetDialerWithProtocol(protocol) - if dialerFunc != nil { - t.Fatalf("protocol: %s is not registered, should return nil", protocol) + protocol := "test2" + c, err := DialProtocol(context.Background(), protocol, "", 0) + if err == nil || err.Error() != "no dialer registered for VTGate protocol "+protocol { + t.Fatalf("protocol: %s is not registered, should return error: %v", protocol, err) } - RegisterDialer(protocol, func(context.Context, string, time.Duration) (VTGateConn, error) { + RegisterDialer(protocol, func(context.Context, string, time.Duration) (Impl, error) { return nil, nil }) - dialerFunc = GetDialerWithProtocol(protocol) - if dialerFunc == nil { - t.Fatalf("dialerFunc has been registered, should not get nil") + c, err = DialProtocol(context.Background(), protocol, "", 0) + if err != nil || c == nil { + t.Fatalf("dialerFunc has been registered, should not get nil: %v %v", err, c) } } diff --git a/go/vt/vtgate/vtgateconntest/client.go b/go/vt/vtgate/vtgateconntest/client.go index 6250d0b750..eab2200acf 100644 --- a/go/vt/vtgate/vtgateconntest/client.go +++ b/go/vt/vtgate/vtgateconntest/client.go @@ -12,6 +12,7 @@ import ( "reflect" "strings" "testing" + "time" mproto "github.com/youtube/vitess/go/mysql/proto" "github.com/youtube/vitess/go/sqltypes" @@ -220,7 +221,12 @@ func (f *fakeVTGateService) HandlePanic(err *error) { } // TestSuite runs all the tests -func TestSuite(t *testing.T, conn vtgateconn.VTGateConn, fakeServer vtgateservice.VTGateService) { +func TestSuite(t *testing.T, impl vtgateconn.Impl, fakeServer vtgateservice.VTGateService) { + vtgateconn.RegisterDialer("test", func(ctx context.Context, address string, timeout time.Duration) (vtgateconn.Impl, error) { + return impl, nil + }) + conn, _ := vtgateconn.DialProtocol(context.Background(), "test", "", 0) + testExecute(t, conn) testExecuteShard(t, conn) testStreamExecute(t, conn) @@ -245,7 +251,7 @@ func expectPanic(t *testing.T, err error) { } } -func testExecute(t *testing.T, conn vtgateconn.VTGateConn) { +func testExecute(t *testing.T, conn *vtgateconn.VTGateConn) { ctx := context.Background() execCase := execMap["request1"] qr, err := conn.Execute(ctx, execCase.execQuery.Sql, execCase.execQuery.BindVariables, execCase.execQuery.TabletType) @@ -269,14 +275,14 @@ func testExecute(t *testing.T, conn vtgateconn.VTGateConn) { } } -func testExecutePanic(t *testing.T, conn vtgateconn.VTGateConn) { +func testExecutePanic(t *testing.T, conn *vtgateconn.VTGateConn) { ctx := context.Background() execCase := execMap["request1"] _, err := conn.Execute(ctx, execCase.execQuery.Sql, execCase.execQuery.BindVariables, execCase.execQuery.TabletType) expectPanic(t, err) } -func testExecuteShard(t *testing.T, conn vtgateconn.VTGateConn) { +func testExecuteShard(t *testing.T, conn *vtgateconn.VTGateConn) { ctx := context.Background() execCase := execMap["request1"] qr, err := conn.ExecuteShard(ctx, execCase.execQuery.Sql, "ks", []string{"1", "2"}, execCase.execQuery.BindVariables, execCase.execQuery.TabletType) @@ -300,14 +306,14 @@ func testExecuteShard(t *testing.T, conn vtgateconn.VTGateConn) { } } -func testExecuteShardPanic(t *testing.T, conn vtgateconn.VTGateConn) { +func testExecuteShardPanic(t *testing.T, conn *vtgateconn.VTGateConn) { ctx := context.Background() execCase := execMap["request1"] _, err := conn.ExecuteShard(ctx, execCase.execQuery.Sql, "ks", []string{"1", "2"}, execCase.execQuery.BindVariables, execCase.execQuery.TabletType) expectPanic(t, err) } -func testStreamExecute(t *testing.T, conn vtgateconn.VTGateConn) { +func testStreamExecute(t *testing.T, conn *vtgateconn.VTGateConn) { ctx := context.Background() execCase := execMap["request1"] packets, errFunc := conn.StreamExecute(ctx, execCase.execQuery.Sql, execCase.execQuery.BindVariables, execCase.execQuery.TabletType) @@ -352,7 +358,7 @@ func testStreamExecute(t *testing.T, conn vtgateconn.VTGateConn) { } } -func testStreamExecutePanic(t *testing.T, conn vtgateconn.VTGateConn) { +func testStreamExecutePanic(t *testing.T, conn *vtgateconn.VTGateConn) { ctx := context.Background() execCase := execMap["request1"] packets, errFunc := conn.StreamExecute(ctx, execCase.execQuery.Sql, execCase.execQuery.BindVariables, execCase.execQuery.TabletType) @@ -363,7 +369,7 @@ func testStreamExecutePanic(t *testing.T, conn vtgateconn.VTGateConn) { expectPanic(t, err) } -func testTxPass(t *testing.T, conn vtgateconn.VTGateConn) { +func testTxPass(t *testing.T, conn *vtgateconn.VTGateConn) { ctx := context.Background() tx, err := conn.Begin(ctx) if err != nil { @@ -394,13 +400,13 @@ func testTxPass(t *testing.T, conn vtgateconn.VTGateConn) { } } -func testBeginPanic(t *testing.T, conn vtgateconn.VTGateConn) { +func testBeginPanic(t *testing.T, conn *vtgateconn.VTGateConn) { ctx := context.Background() _, err := conn.Begin(ctx) expectPanic(t, err) } -func testTxFail(t *testing.T, conn vtgateconn.VTGateConn) { +func testTxFail(t *testing.T, conn *vtgateconn.VTGateConn) { ctx := context.Background() tx, err := conn.Begin(ctx) if err != nil { @@ -446,7 +452,7 @@ func testTxFail(t *testing.T, conn vtgateconn.VTGateConn) { } } -func testSplitQuery(t *testing.T, conn vtgateconn.VTGateConn) { +func testSplitQuery(t *testing.T, conn *vtgateconn.VTGateConn) { ctx := context.Background() qsl, err := conn.SplitQuery(ctx, splitQueryRequest.Keyspace, splitQueryRequest.Query, splitQueryRequest.SplitCount) if err != nil { @@ -457,7 +463,7 @@ func testSplitQuery(t *testing.T, conn vtgateconn.VTGateConn) { } } -func testSplitQueryPanic(t *testing.T, conn vtgateconn.VTGateConn) { +func testSplitQueryPanic(t *testing.T, conn *vtgateconn.VTGateConn) { ctx := context.Background() _, err := conn.SplitQuery(ctx, splitQueryRequest.Keyspace, splitQueryRequest.Query, splitQueryRequest.SplitCount) expectPanic(t, err) From 54421665c4a0af13ca9771c12faaad8b5be329e9 Mon Sep 17 00:00:00 2001 From: Michael Berlin Date: Wed, 27 May 2015 13:38:49 -0700 Subject: [PATCH 105/128] Process new "Flags" MySQL field when converting a result set to Go types. This change breaks the previous behavior: Before, unsigned BIGINT fields were always mapped to an int64 unless they were out of range for it. Now BIGINT are mapped to int64 or uint64 based on the unsigned information in the schema. Smaller MySQL integer types are mapped to int64 as before, regardless of their signed information. --- go/mysql/proto/structs.go | 29 ++--- go/mysql/proto/structs_test.go | 102 +++++++++--------- go/vt/client2/tablet/tclient.go | 4 +- go/vt/vtgate/router.go | 2 +- go/vt/vtgate/router_dml_test.go | 2 +- go/vt/vtgate/vindexes/lookup_hash.go | 4 +- .../vtgate/vindexes/lookup_hash_auto_test.go | 2 +- .../vindexes/lookup_hash_unique_auto_test.go | 2 +- go/vt/vtgate/vtgateconn/rows.go | 6 +- go/vt/vtgate/vtgateconn/rows_test.go | 44 +++++++- go/vt/worker/diff_utils.go | 5 +- go/vt/worker/diff_utils_test.go | 20 ++-- 12 files changed, 130 insertions(+), 92 deletions(-) diff --git a/go/mysql/proto/structs.go b/go/mysql/proto/structs.go index 293821d28a..4a68c53dd1 100644 --- a/go/mysql/proto/structs.go +++ b/go/mysql/proto/structs.go @@ -99,27 +99,28 @@ type Charset struct { // Convert takes a type and a value, and returns the type: // - nil for NULL value -// - int64 if possible, otherwise, uint64 +// - uint64 for unsigned BIGINT values +// - int64 for all other integer values (signed and unsigned) // - float64 for floating point values that fit in a float // - []byte for everything else -// TODO(mberlin): Make this a method of "Field" and consider VT_UNSIGNED_FLAG in "Flags" as well. -func Convert(mysqlType int64, val sqltypes.Value) (interface{}, error) { +func Convert(field Field, val sqltypes.Value) (interface{}, error) { if val.IsNull() { return nil, nil } - switch mysqlType { - case VT_TINY, VT_SHORT, VT_LONG, VT_LONGLONG, VT_INT24: - val := val.String() - signed, err := strconv.ParseInt(val, 0, 64) - if err == nil { - return signed, nil + switch field.Type { + case VT_LONGLONG: + if field.Flags&VT_UNSIGNED_FLAG == VT_UNSIGNED_FLAG { + return strconv.ParseUint(val.String(), 0, 64) } - unsigned, err := strconv.ParseUint(val, 0, 64) - if err == nil { - return unsigned, nil - } - return nil, err + return strconv.ParseInt(val.String(), 0, 64) + case VT_TINY, VT_SHORT, VT_LONG, VT_INT24: + // Regardless of whether UNSIGNED_FLAG is set in field.Flags, we map all + // signed and unsigned values to a signed Go type because + // - Go doesn't officially support uint64 in their SQL interface + // - there is no loss of the value + // The only exception we make are for unsigned BIGINTs, see VT_LONGLONG above. + return strconv.ParseInt(val.String(), 0, 64) case VT_FLOAT, VT_DOUBLE: return strconv.ParseFloat(val.String(), 64) } diff --git a/go/mysql/proto/structs_test.go b/go/mysql/proto/structs_test.go index 568e6b67f9..0639e5f820 100644 --- a/go/mysql/proto/structs_test.go +++ b/go/mysql/proto/structs_test.go @@ -12,81 +12,79 @@ import ( func TestConvert(t *testing.T) { cases := []struct { - Desc string - Typ int64 - Val sqltypes.Value - Want interface{} + Field Field + Val sqltypes.Value + Want interface{} }{{ - Desc: "null", - Typ: VT_LONG, - Val: sqltypes.Value{}, - Want: nil, + Field: Field{"null", VT_LONG, VT_ZEROVALUE_FLAG}, + Val: sqltypes.Value{}, + Want: nil, }, { - Desc: "decimal", - Typ: VT_DECIMAL, - Val: sqltypes.MakeString([]byte("aa")), - Want: "aa", + Field: Field{"decimal", VT_DECIMAL, VT_ZEROVALUE_FLAG}, + Val: sqltypes.MakeString([]byte("aa")), + Want: "aa", }, { - Desc: "tiny", - Typ: VT_TINY, - Val: sqltypes.MakeString([]byte("1")), + Field: Field{"tiny", VT_TINY, VT_ZEROVALUE_FLAG}, + Val: sqltypes.MakeString([]byte("1")), + Want: int64(1), + }, { + Field: Field{"short", VT_SHORT, VT_ZEROVALUE_FLAG}, + Val: sqltypes.MakeString([]byte("1")), + Want: int64(1), + }, { + Field: Field{"long", VT_LONG, VT_ZEROVALUE_FLAG}, + Val: sqltypes.MakeString([]byte("1")), + Want: int64(1), + }, { + Field: Field{"unsigned long", VT_LONG, VT_UNSIGNED_FLAG}, + Val: sqltypes.MakeString([]byte("1")), + // Unsigned types which aren't VT_LONGLONG are mapped to int64. Want: int64(1), }, { - Desc: "short", - Typ: VT_SHORT, - Val: sqltypes.MakeString([]byte("1")), - Want: int64(1), + Field: Field{"longlong", VT_LONGLONG, VT_ZEROVALUE_FLAG}, + Val: sqltypes.MakeString([]byte("1")), + Want: int64(1), }, { - Desc: "long", - Typ: VT_LONG, - Val: sqltypes.MakeString([]byte("1")), - Want: int64(1), + Field: Field{"int24", VT_INT24, VT_ZEROVALUE_FLAG}, + Val: sqltypes.MakeString([]byte("1")), + Want: int64(1), }, { - Desc: "longlong", - Typ: VT_LONGLONG, - Val: sqltypes.MakeString([]byte("1")), - Want: int64(1), + Field: Field{"float", VT_FLOAT, VT_ZEROVALUE_FLAG}, + Val: sqltypes.MakeString([]byte("1")), + Want: float64(1), }, { - Desc: "int24", - Typ: VT_INT24, - Val: sqltypes.MakeString([]byte("1")), - Want: int64(1), + Field: Field{"double", VT_DOUBLE, VT_ZEROVALUE_FLAG}, + Val: sqltypes.MakeString([]byte("1")), + Want: float64(1), }, { - Desc: "float", - Typ: VT_FLOAT, - Val: sqltypes.MakeString([]byte("1")), - Want: float64(1), + Field: Field{"large int out of range for int64", VT_LONGLONG, VT_ZEROVALUE_FLAG}, + // 2^63, out of range for int64 + Val: sqltypes.MakeString([]byte("9223372036854775808")), + Want: `strconv.ParseInt: parsing "9223372036854775808": value out of range`, }, { - Desc: "double", - Typ: VT_DOUBLE, - Val: sqltypes.MakeString([]byte("1")), - Want: float64(1), - }, { - Desc: "large int", - Typ: VT_LONGLONG, + Field: Field{"large int", VT_LONGLONG, VT_UNSIGNED_FLAG}, + // 2^63, not out of range for uint64 Val: sqltypes.MakeString([]byte("9223372036854775808")), Want: uint64(9223372036854775808), }, { - Desc: "float for int", - Typ: VT_LONGLONG, - Val: sqltypes.MakeString([]byte("1.1")), - Want: `strconv.ParseUint: parsing "1.1": invalid syntax`, + Field: Field{"float for int", VT_LONGLONG, VT_ZEROVALUE_FLAG}, + Val: sqltypes.MakeString([]byte("1.1")), + Want: `strconv.ParseInt: parsing "1.1": invalid syntax`, }, { - Desc: "string for float", - Typ: VT_FLOAT, - Val: sqltypes.MakeString([]byte("aa")), - Want: `strconv.ParseFloat: parsing "aa": invalid syntax`, + Field: Field{"string for float", VT_FLOAT, VT_ZEROVALUE_FLAG}, + Val: sqltypes.MakeString([]byte("aa")), + Want: `strconv.ParseFloat: parsing "aa": invalid syntax`, }} for _, c := range cases { - r, err := Convert(c.Typ, c.Val) + r, err := Convert(c.Field, c.Val) if err != nil { r = err.Error() } else if _, ok := r.([]byte); ok { r = string(r.([]byte)) } if r != c.Want { - t.Errorf("%s: %+v, want %+v", c.Desc, r, c.Want) + t.Errorf("%s: %+v, want %+v", c.Field.Name, r, c.Want) } } } diff --git a/go/vt/client2/tablet/tclient.go b/go/vt/client2/tablet/tclient.go index 394edff5f0..4cd82e0901 100644 --- a/go/vt/client2/tablet/tclient.go +++ b/go/vt/client2/tablet/tclient.go @@ -257,7 +257,7 @@ func (result *Result) Next() (row []interface{}) { row = make([]interface{}, len(result.qr.Rows[result.index])) for i, v := range result.qr.Rows[result.index] { var err error - row[i], err = mproto.Convert(result.qr.Fields[i].Type, v) + row[i], err = mproto.Convert(result.qr.Fields[i], v) if err != nil { panic(err) // unexpected } @@ -311,7 +311,7 @@ func (sr *StreamResult) Next() (row []interface{}) { row = make([]interface{}, len(sr.qr.Rows[sr.index])) for i, v := range sr.qr.Rows[sr.index] { var err error - row[i], err = mproto.Convert(sr.columns.Fields[i].Type, v) + row[i], err = mproto.Convert(sr.columns.Fields[i], v) if err != nil { panic(err) // unexpected } diff --git a/go/vt/vtgate/router.go b/go/vt/vtgate/router.go index 5e371d078a..2c83ebae70 100644 --- a/go/vt/vtgate/router.go +++ b/go/vt/vtgate/router.go @@ -437,7 +437,7 @@ func (rtr *Router) deleteVindexEntries(vcursor *requestContext, plan *planbuilde for i, colVindex := range plan.Table.Owned { keys := make(map[interface{}]bool) for _, row := range result.Rows { - k, err := mproto.Convert(result.Fields[i].Type, row[i]) + k, err := mproto.Convert(result.Fields[i], row[i]) if err != nil { return err } diff --git a/go/vt/vtgate/router_dml_test.go b/go/vt/vtgate/router_dml_test.go index 6678983140..ecc01d7ce1 100644 --- a/go/vt/vtgate/router_dml_test.go +++ b/go/vt/vtgate/router_dml_test.go @@ -268,7 +268,7 @@ func TestDeleteVindexFail(t *testing.T) { }}, }}) _, err = routerExec(router, "delete from user where id = 1", nil) - want = `execDeleteEqual: strconv.ParseUint: parsing "foo": invalid syntax` + want = `execDeleteEqual: strconv.ParseInt: parsing "foo": invalid syntax` if err == nil || err.Error() != want { t.Errorf("routerExec: %v, want %v", err, want) } diff --git a/go/vt/vtgate/vindexes/lookup_hash.go b/go/vt/vtgate/vindexes/lookup_hash.go index 0cfc238476..c636995d72 100644 --- a/go/vt/vtgate/vindexes/lookup_hash.go +++ b/go/vt/vtgate/vindexes/lookup_hash.go @@ -243,7 +243,7 @@ func (lkp *lookup) Map1(vcursor planbuilder.VCursor, ids []interface{}) ([]key.K if len(result.Rows) != 1 { return nil, fmt.Errorf("lookup.Map: unexpected multiple results from vindex %s: %v", lkp.Table, id) } - inum, err := mproto.Convert(result.Fields[0].Type, result.Rows[0][0]) + inum, err := mproto.Convert(result.Fields[0], result.Rows[0][0]) if err != nil { return nil, fmt.Errorf("lookup.Map: %v", err) } @@ -272,7 +272,7 @@ func (lkp *lookup) Map2(vcursor planbuilder.VCursor, ids []interface{}) ([][]key } var ksids []key.KeyspaceId for _, row := range result.Rows { - inum, err := mproto.Convert(result.Fields[0].Type, row[0]) + inum, err := mproto.Convert(result.Fields[0], row[0]) if err != nil { return nil, fmt.Errorf("lookup.Map: %v", err) } diff --git a/go/vt/vtgate/vindexes/lookup_hash_auto_test.go b/go/vt/vtgate/vindexes/lookup_hash_auto_test.go index c10c19df5c..cff95bfdd6 100644 --- a/go/vt/vtgate/vindexes/lookup_hash_auto_test.go +++ b/go/vt/vtgate/vindexes/lookup_hash_auto_test.go @@ -72,7 +72,7 @@ func TestLookupHashAutoMapBadData(t *testing.T) { } vc := &vcursor{result: result} _, err := lha.(planbuilder.NonUnique).Map(vc, []interface{}{1, int32(2)}) - want := `lookup.Map: strconv.ParseUint: parsing "1.1": invalid syntax` + want := `lookup.Map: strconv.ParseInt: parsing "1.1": invalid syntax` if err == nil || err.Error() != want { t.Errorf("lha.Map: %v, want %v", err, want) } diff --git a/go/vt/vtgate/vindexes/lookup_hash_unique_auto_test.go b/go/vt/vtgate/vindexes/lookup_hash_unique_auto_test.go index dd2e409664..f9cafcb2a7 100644 --- a/go/vt/vtgate/vindexes/lookup_hash_unique_auto_test.go +++ b/go/vt/vtgate/vindexes/lookup_hash_unique_auto_test.go @@ -81,7 +81,7 @@ func TestLookupHashUniqueAutoMapBadData(t *testing.T) { } vc := &vcursor{result: result} _, err := lhua.(planbuilder.Unique).Map(vc, []interface{}{1, int32(2)}) - want := `lookup.Map: strconv.ParseUint: parsing "1.1": invalid syntax` + want := `lookup.Map: strconv.ParseInt: parsing "1.1": invalid syntax` if err == nil || err.Error() != want { t.Errorf("lhua.Map: %v, want %v", err, want) } diff --git a/go/vt/vtgate/vtgateconn/rows.go b/go/vt/vtgate/vtgateconn/rows.go index 681683e1b7..38aaba4684 100644 --- a/go/vt/vtgate/vtgateconn/rows.go +++ b/go/vt/vtgate/vtgateconn/rows.go @@ -46,6 +46,10 @@ func (ri *rows) Next(dest []driver.Value) error { return err } +// populateRow populates a row of data using the table's field descriptions. +// The returned types for "dest" include the list from the interface +// specification at https://golang.org/pkg/database/sql/driver/#Value +// and in addition the type "uint64" for unsigned BIGINT MySQL records. func populateRow(dest []driver.Value, fields []mproto.Field, row []sqltypes.Value) error { if len(dest) != len(fields) { return fmt.Errorf("length mismatch: dest is %d, fields are %d", len(dest), len(fields)) @@ -55,7 +59,7 @@ func populateRow(dest []driver.Value, fields []mproto.Field, row []sqltypes.Valu } var err error for i := range dest { - dest[i], err = mproto.Convert(fields[i].Type, row[i]) + dest[i], err = mproto.Convert(fields[i], row[i]) if err != nil { return fmt.Errorf("conversion error: field: %v, val: %v: %v", fields[i], row[i], err) } diff --git a/go/vt/vtgate/vtgateconn/rows_test.go b/go/vt/vtgate/vtgateconn/rows_test.go index 88906fc700..695afc1b16 100644 --- a/go/vt/vtgate/vtgateconn/rows_test.go +++ b/go/vt/vtgate/vtgateconn/rows_test.go @@ -24,29 +24,59 @@ var result1 = mproto.QueryResult{ Name: "field3", Type: mproto.VT_VAR_STRING, }, + // Signed types which are smaller than uint64, will become an int64. + mproto.Field{ + Name: "field4", + Type: mproto.VT_LONG, + Flags: mproto.VT_UNSIGNED_FLAG, + }, + // Signed uint64 values must be mapped to uint64. + mproto.Field{ + Name: "field5", + Type: mproto.VT_LONGLONG, + Flags: mproto.VT_UNSIGNED_FLAG, + }, }, - RowsAffected: 3, + RowsAffected: 2, InsertId: 0, Rows: [][]sqltypes.Value{ []sqltypes.Value{ sqltypes.MakeString([]byte("1")), sqltypes.MakeString([]byte("1.1")), sqltypes.MakeString([]byte("value1")), + sqltypes.MakeString([]byte("2147483647")), // 2^31-1, NOT out of range for int32 => should become int64 + sqltypes.MakeString([]byte("9223372036854775807")), // 2^63-1, NOT out of range for int64 }, []sqltypes.Value{ sqltypes.MakeString([]byte("2")), sqltypes.MakeString([]byte("2.2")), sqltypes.MakeString([]byte("value2")), + sqltypes.MakeString([]byte("4294967295")), // 2^32, out of range for int32 => should become int64 + sqltypes.MakeString([]byte("18446744073709551615")), // 2^64, out of range for int64 }, }, } +func logMismatchedTypes(t *testing.T, gotRow, wantRow []driver.Value) { + for i := 1; i < len(wantRow); i++ { + got := gotRow[i] + want := wantRow[i] + v1 := reflect.ValueOf(got) + v2 := reflect.ValueOf(want) + if v1.Type() != v2.Type() { + t.Errorf("Wrong type: field: %d got: %T want: %T", i+1, got, want) + } + } +} + func TestRows(t *testing.T) { ri := NewRows(&result1) wantCols := []string{ "field1", "field2", "field3", + "field4", + "field5", } gotCols := ri.Columns() if !reflect.DeepEqual(gotCols, wantCols) { @@ -57,20 +87,25 @@ func TestRows(t *testing.T) { int64(1), float64(1.1), []byte("value1"), + int64(2147483647), + uint64(9223372036854775807), } - gotRow := make([]driver.Value, 3) + gotRow := make([]driver.Value, len(wantRow)) err := ri.Next(gotRow) if err != nil { t.Error(err) } if !reflect.DeepEqual(gotRow, wantRow) { - t.Errorf("row1: %v, want %v", gotRow, wantRow) + t.Errorf("row1: %v, want %v type: %T", gotRow, wantRow, wantRow[3]) + logMismatchedTypes(t, gotRow, wantRow) } wantRow = []driver.Value{ int64(2), float64(2.2), []byte("value2"), + int64(4294967295), + uint64(18446744073709551615), } err = ri.Next(gotRow) if err != nil { @@ -78,6 +113,7 @@ func TestRows(t *testing.T) { } if !reflect.DeepEqual(gotRow, wantRow) { t.Errorf("row1: %v, want %v", gotRow, wantRow) + logMismatchedTypes(t, gotRow, wantRow) } err = ri.Next(gotRow) @@ -131,7 +167,7 @@ func TestRowsFail(t *testing.T) { ri = NewRows(&badResult2) dest = make([]driver.Value, 1) err = ri.Next(dest) - want = `conversion error: field: {field1 3 0}, val: value: strconv.ParseUint: parsing "value": invalid syntax` + want = `conversion error: field: {field1 3 0}, val: value: strconv.ParseInt: parsing "value": invalid syntax` if err == nil || err.Error() != want { t.Errorf("Next: %v, want %s", err, want) } diff --git a/go/vt/worker/diff_utils.go b/go/vt/worker/diff_utils.go index 31aafe74b7..617ec4e017 100644 --- a/go/vt/worker/diff_utils.go +++ b/go/vt/worker/diff_utils.go @@ -263,12 +263,11 @@ func RowsEqual(left, right []sqltypes.Value) int { // TODO: This can panic if types for left and right don't match. func CompareRows(fields []mproto.Field, compareCount int, left, right []sqltypes.Value) (int, error) { for i := 0; i < compareCount; i++ { - fieldType := fields[i].Type - lv, err := mproto.Convert(fieldType, left[i]) + lv, err := mproto.Convert(fields[i], left[i]) if err != nil { return 0, err } - rv, err := mproto.Convert(fieldType, right[i]) + rv, err := mproto.Convert(fields[i], right[i]) if err != nil { return 0, err } diff --git a/go/vt/worker/diff_utils_test.go b/go/vt/worker/diff_utils_test.go index ea3da3d39a..8bf206c57f 100644 --- a/go/vt/worker/diff_utils_test.go +++ b/go/vt/worker/diff_utils_test.go @@ -50,15 +50,15 @@ func TestCompareRows(t *testing.T) { want int }{ { - fields: []mproto.Field{{Name: "a", Type: mproto.VT_LONG}}, + fields: []mproto.Field{{"a", mproto.VT_LONG, mproto.VT_ZEROVALUE_FLAG}}, left: []sqltypes.Value{{sqltypes.Numeric("123")}}, right: []sqltypes.Value{{sqltypes.Numeric("14")}}, want: 1, }, { fields: []mproto.Field{ - {Name: "a", Type: mproto.VT_LONG}, - {Name: "b", Type: mproto.VT_LONG}, + {"a", mproto.VT_LONG, mproto.VT_ZEROVALUE_FLAG}, + {"b", mproto.VT_LONG, mproto.VT_ZEROVALUE_FLAG}, }, left: []sqltypes.Value{ {sqltypes.Numeric("555")}, @@ -71,43 +71,43 @@ func TestCompareRows(t *testing.T) { want: -1, }, { - fields: []mproto.Field{{Name: "a", Type: mproto.VT_LONG}}, + fields: []mproto.Field{{"a", mproto.VT_LONG, mproto.VT_ZEROVALUE_FLAG}}, left: []sqltypes.Value{{sqltypes.Numeric("144")}}, right: []sqltypes.Value{{sqltypes.Numeric("144")}}, want: 0, }, { - fields: []mproto.Field{{Name: "a", Type: mproto.VT_LONGLONG}}, + fields: []mproto.Field{{"a", mproto.VT_LONGLONG, mproto.VT_UNSIGNED_FLAG}}, left: []sqltypes.Value{{sqltypes.Numeric("9223372036854775809")}}, right: []sqltypes.Value{{sqltypes.Numeric("9223372036854775810")}}, want: -1, }, { - fields: []mproto.Field{{Name: "a", Type: mproto.VT_LONGLONG}}, + fields: []mproto.Field{{"a", mproto.VT_LONGLONG, mproto.VT_UNSIGNED_FLAG}}, left: []sqltypes.Value{{sqltypes.Numeric("9223372036854775819")}}, right: []sqltypes.Value{{sqltypes.Numeric("9223372036854775810")}}, want: 1, }, { - fields: []mproto.Field{{Name: "a", Type: mproto.VT_DOUBLE}}, + fields: []mproto.Field{{"a", mproto.VT_DOUBLE, mproto.VT_ZEROVALUE_FLAG}}, left: []sqltypes.Value{{sqltypes.Fractional("3.14")}}, right: []sqltypes.Value{{sqltypes.Fractional("3.2")}}, want: -1, }, { - fields: []mproto.Field{{Name: "a", Type: mproto.VT_DOUBLE}}, + fields: []mproto.Field{{"a", mproto.VT_DOUBLE, mproto.VT_ZEROVALUE_FLAG}}, left: []sqltypes.Value{{sqltypes.Fractional("123.4")}}, right: []sqltypes.Value{{sqltypes.Fractional("123.2")}}, want: 1, }, { - fields: []mproto.Field{{Name: "a", Type: mproto.VT_STRING}}, + fields: []mproto.Field{{"a", mproto.VT_STRING, mproto.VT_ZEROVALUE_FLAG}}, left: []sqltypes.Value{{sqltypes.String("abc")}}, right: []sqltypes.Value{{sqltypes.String("abb")}}, want: 1, }, { - fields: []mproto.Field{{Name: "a", Type: mproto.VT_STRING}}, + fields: []mproto.Field{{"a", mproto.VT_STRING, mproto.VT_ZEROVALUE_FLAG}}, left: []sqltypes.Value{{sqltypes.String("abc")}}, right: []sqltypes.Value{{sqltypes.String("abd")}}, want: -1, From d028b15eb9c56b7368c2881fb77fb5eff344e470 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 27 May 2015 16:12:22 -0700 Subject: [PATCH 106/128] Removing a couple uses of vquery, soon to be gone. --- test/tablet.py | 2 +- test/tabletmanager.py | 12 ++++++------ test/vertical_split.py | 15 ++++++++++----- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/test/tablet.py b/test/tablet.py index 4cb2621062..30b4fb43ae 100644 --- a/test/tablet.py +++ b/test/tablet.py @@ -329,7 +329,7 @@ class Tablet(object): expected_state = 'NOT_SERVING' self.start_vttablet(wait_for_state=expected_state, **kwargs) - def conn(self): + def conn(self, user=None, password=None): conn = tablet.TabletConnection( 'localhost:%d' % self.port, self.tablet_type, self.keyspace, self.shard, 30) diff --git a/test/tabletmanager.py b/test/tabletmanager.py index 23e15118d1..a577634a03 100755 --- a/test/tabletmanager.py +++ b/test/tabletmanager.py @@ -239,12 +239,12 @@ class TestTabletManager(unittest.TestCase): tablet_62344.start_vttablet(auth=True) utils.run_vtctl(['SetReadWrite', tablet_62344.tablet_alias]) - out, err = tablet_62344.vquery('select * from vt_select_test', - path='test_keyspace/0', verbose=True, - user='ala', password=r'ma kota') - logging.debug("Got rows: " + err) - if 'Row count: 4' not in err: - self.fail("query didn't go through: %s, %s" % (err, out)) + # make sure we can connect using secure connection + conn = tablet_62344.conn(user='ala', password=r'ma kota') + results, rowcount, lastrowid, fields = conn._execute('select * from vt_select_test', {}) + logging.debug("Got results: %s", str(results)) + self.assertEqual(len(results), 4, 'got wrong result length: %s' % str(results)) + conn.close() tablet_62344.kill_vttablet() # TODO(szopa): Test that non-authenticated queries do not pass diff --git a/test/vertical_split.py b/test/vertical_split.py index 36266382ea..83aaf8f41c 100755 --- a/test/vertical_split.py +++ b/test/vertical_split.py @@ -11,6 +11,7 @@ import time import unittest from zk import zkocc +from vtdb import dbexceptions from vtdb import topology from vtdb import vtclient @@ -206,16 +207,20 @@ index by_msg (msg) # check we can or cannot access the tables utils.run_vtctl(['ReloadSchema', tablet.tablet_alias]) + conn = tablet.conn() for t in ["moving1", "moving2"]: if expected and "moving.*" in expected: # table is blacklisted, should get the error - out, err = tablet.vquery("select count(1) from %s" % t, - path='source_keyspace/0', raise_on_error=False) - self.assertTrue(err.find("retry: Query disallowed due to rule: enforce blacklisted tables") != -1, "Cannot find the right error message in query for blacklisted table: out=\n%serr=\n%s" % (out, err)) + try: + results, rowcount, lastrowid, fields = conn._execute("select count(1) from %s" % t, {}) + self.fail("blacklisted query execution worked") + except dbexceptions.RetryError as e: + self.assertTrue(str(e).find("retry: Query disallowed due to rule: enforce blacklisted tables") != -1, "Cannot find the right error message in query for blacklisted table: %s" % e) else: # table is not blacklisted, should just work - tablet.vquery("select count(1) from %s" % t, path='source_keyspace/0') - + results, rowcount, lastrowid, fields = conn._execute("select count(1) from %s" % t, {}) + logging.debug("Got %d rows from table %s on tablet %s", results[0][0], t, tablet.tablet_alias) + conn.close() def _populate_topo_cache(self): topology.read_topology(self.vtgate_client) From 4946bf4288f0edc858a7c85ae24368ac086c4826 Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Wed, 27 May 2015 16:02:06 -0700 Subject: [PATCH 107/128] add context to schemamanager --- go/cmd/vtctld/vtctld.go | 2 + go/vt/schemamanager/local_controller.go | 25 ++++----- go/vt/schemamanager/local_controller_test.go | 55 ++++++++++---------- go/vt/schemamanager/plain_controller.go | 16 +++--- go/vt/schemamanager/plain_controller_test.go | 17 +++--- go/vt/schemamanager/schemamanager.go | 43 +++++++-------- go/vt/schemamanager/schemamanager_test.go | 41 ++++++++++----- go/vt/schemamanager/tablet_executor.go | 27 +++++----- go/vt/schemamanager/tablet_executor_test.go | 29 ++++++----- go/vt/schemamanager/ui_controller.go | 15 +++--- go/vt/schemamanager/ui_controller_test.go | 18 ++++--- go/vt/wrangler/schema.go | 1 + 12 files changed, 161 insertions(+), 128 deletions(-) diff --git a/go/cmd/vtctld/vtctld.go b/go/cmd/vtctld/vtctld.go index 9e884d8b83..191290c047 100644 --- a/go/cmd/vtctld/vtctld.go +++ b/go/cmd/vtctld/vtctld.go @@ -501,6 +501,7 @@ func main() { ts) schemamanager.Run( + context.Background(), schemamanager.NewUIController(sqlStr, keyspace, w), executor, ) @@ -528,6 +529,7 @@ func main() { } err = schemamanager.Run( + context.Background(), controller, schemamanager.NewTabletExecutor( tmclient.NewTabletManagerClient(), ts), diff --git a/go/vt/schemamanager/local_controller.go b/go/vt/schemamanager/local_controller.go index 1306e4d8f2..4e5c27a1e8 100644 --- a/go/vt/schemamanager/local_controller.go +++ b/go/vt/schemamanager/local_controller.go @@ -13,6 +13,7 @@ import ( "time" log "github.com/golang/glog" + "golang.org/x/net/context" ) // LocalController listens to the specified schema change dir and applies schema changes. @@ -61,7 +62,7 @@ func NewLocalController(schemaChangeDir string) *LocalController { // Open goes through the schema change dir and find a keyspace with a pending // schema change. -func (controller *LocalController) Open() error { +func (controller *LocalController) Open(ctx context.Context) error { // find all keyspace directories. fileInfos, err := ioutil.ReadDir(controller.schemaChangeDir) if err != nil { @@ -101,7 +102,7 @@ func (controller *LocalController) Open() error { } // Read reads schema changes. -func (controller *LocalController) Read() ([]string, error) { +func (controller *LocalController) Read(ctx context.Context) ([]string, error) { if controller.keyspace == "" || controller.sqlPath == "" { return []string{}, nil } @@ -128,30 +129,30 @@ func (controller *LocalController) Close() { } // OnReadSuccess is no-op -func (controller *LocalController) OnReadSuccess() error { +func (controller *LocalController) OnReadSuccess(ctx context.Context) error { return nil } // OnReadFail is no-op -func (controller *LocalController) OnReadFail(err error) error { +func (controller *LocalController) OnReadFail(ctx context.Context, err error) error { log.Errorf("failed to read file: %s, error: %v", controller.sqlPath, err) return nil } // OnValidationSuccess is no-op -func (controller *LocalController) OnValidationSuccess() error { +func (controller *LocalController) OnValidationSuccess(ctx context.Context) error { return nil } // OnValidationFail is no-op -func (controller *LocalController) OnValidationFail(err error) error { - return controller.moveToErrorDir() +func (controller *LocalController) OnValidationFail(ctx context.Context, err error) error { + return controller.moveToErrorDir(ctx) } // OnExecutorComplete is no-op -func (controller *LocalController) OnExecutorComplete(result *ExecuteResult) error { +func (controller *LocalController) OnExecutorComplete(ctx context.Context, result *ExecuteResult) error { if len(result.FailedShards) > 0 || result.ExecutorErr != "" { - return controller.moveToErrorDir() + return controller.moveToErrorDir(ctx) } if err := os.MkdirAll(controller.completeDir, os.ModePerm); err != nil { return err @@ -160,7 +161,7 @@ func (controller *LocalController) OnExecutorComplete(result *ExecuteResult) err return err } - if err := controller.writeToLogDir(result); err != nil { + if err := controller.writeToLogDir(ctx, result); err != nil { return err } @@ -169,7 +170,7 @@ func (controller *LocalController) OnExecutorComplete(result *ExecuteResult) err path.Join(controller.completeDir, controller.sqlFilename)) } -func (controller *LocalController) moveToErrorDir() error { +func (controller *LocalController) moveToErrorDir(ctx context.Context) error { if err := os.MkdirAll(controller.errorDir, os.ModePerm); err != nil { return err } @@ -178,7 +179,7 @@ func (controller *LocalController) moveToErrorDir() error { path.Join(controller.errorDir, controller.sqlFilename)) } -func (controller *LocalController) writeToLogDir(result *ExecuteResult) error { +func (controller *LocalController) writeToLogDir(ctx context.Context, result *ExecuteResult) error { logFile, err := os.Create(path.Join(controller.logDir, controller.sqlFilename)) if err != nil { return err diff --git a/go/vt/schemamanager/local_controller_test.go b/go/vt/schemamanager/local_controller_test.go index 9df601ec41..be43317bce 100644 --- a/go/vt/schemamanager/local_controller_test.go +++ b/go/vt/schemamanager/local_controller_test.go @@ -14,6 +14,7 @@ import ( "testing" mproto "github.com/youtube/vitess/go/mysql/proto" + "golang.org/x/net/context" ) func TestLocalControllerNoSchemaChanges(t *testing.T) { @@ -23,11 +24,12 @@ func TestLocalControllerNoSchemaChanges(t *testing.T) { t.Fatalf("failed to create temp schema change dir, error: %v", err) } controller := NewLocalController(schemaChangeDir) - if err := controller.Open(); err != nil { + ctx := context.Background() + if err := controller.Open(ctx); err != nil { t.Fatalf("Open should succeed, but got error: %v", err) } defer controller.Close() - data, err := controller.Read() + data, err := controller.Read(ctx) if err != nil { t.Fatalf("Read should succeed, but got error: %v", err) } @@ -38,8 +40,9 @@ func TestLocalControllerNoSchemaChanges(t *testing.T) { func TestLocalControllerOpen(t *testing.T) { controller := NewLocalController("") + ctx := context.Background() - if err := controller.Open(); err == nil { + if err := controller.Open(ctx); err == nil { t.Fatalf("Open should fail, no such dir") } @@ -53,10 +56,10 @@ func TestLocalControllerOpen(t *testing.T) { } controller = NewLocalController(schemaChangeDir) - if err := controller.Open(); err != nil { + if err := controller.Open(ctx); err != nil { t.Fatalf("Open should succeed") } - data, err := controller.Read() + data, err := controller.Read(ctx) if err != nil { t.Fatalf("Read should succeed, but got error: %v", err) } @@ -71,10 +74,10 @@ func TestLocalControllerOpen(t *testing.T) { } controller = NewLocalController(schemaChangeDir) - if err := controller.Open(); err != nil { + if err := controller.Open(ctx); err != nil { t.Fatalf("Open should succeed") } - data, err = controller.Read() + data, err = controller.Read(ctx) if err != nil { t.Fatalf("Read should succeed, but got error: %v", err) } @@ -110,13 +113,15 @@ func TestLocalControllerSchemaChange(t *testing.T) { file.Close() controller := NewLocalController(schemaChangeDir) - if err := controller.Open(); err != nil { + ctx := context.Background() + + if err := controller.Open(ctx); err != nil { t.Fatalf("Open should succeed, but got error: %v", err) } defer controller.Close() - data, err := controller.Read() + data, err := controller.Read(ctx) if err != nil { t.Fatalf("Read should succeed, but got error: %v", err) } @@ -131,24 +136,24 @@ func TestLocalControllerSchemaChange(t *testing.T) { } // test various callbacks - if err := controller.OnReadSuccess(); err != nil { + if err := controller.OnReadSuccess(ctx); err != nil { t.Fatalf("OnReadSuccess should succeed, but got error: %v", err) } - if err := controller.OnReadFail(fmt.Errorf("read fail")); err != nil { + if err := controller.OnReadFail(ctx, fmt.Errorf("read fail")); err != nil { t.Fatalf("OnReadFail should succeed, but got error: %v", err) } errorPath := path.Join(controller.errorDir, controller.sqlFilename) - if err := controller.OnValidationSuccess(); err != nil { + if err := controller.OnValidationSuccess(ctx); err != nil { t.Fatalf("OnReadSuccess should succeed, but got error: %v", err) } // move sql file from error dir to input dir for OnValidationFail test os.Rename(errorPath, controller.sqlPath) - if err := controller.OnValidationFail(fmt.Errorf("validation fail")); err != nil { + if err := controller.OnValidationFail(ctx, fmt.Errorf("validation fail")); err != nil { t.Fatalf("OnValidationFail should succeed, but got error: %v", err) } @@ -161,16 +166,14 @@ func TestLocalControllerSchemaChange(t *testing.T) { result := &ExecuteResult{ Sqls: []string{"create table test_table (id int)"}, - SuccessShards: []ShardResult{ - ShardResult{ - Shard: "0", - Result: &mproto.QueryResult{}, - }, - }, + SuccessShards: []ShardResult{{ + Shard: "0", + Result: &mproto.QueryResult{}, + }}, } logPath := path.Join(controller.logDir, controller.sqlFilename) completePath := path.Join(controller.completeDir, controller.sqlFilename) - if err := controller.OnExecutorComplete(result); err != nil { + if err := controller.OnExecutorComplete(ctx, result); err != nil { t.Fatalf("OnExecutorComplete should succeed, but got error: %v", err) } if _, err := os.Stat(completePath); os.IsNotExist(err) { @@ -186,15 +189,13 @@ func TestLocalControllerSchemaChange(t *testing.T) { result = &ExecuteResult{ Sqls: []string{"create table test_table (id int)"}, - FailedShards: []ShardWithError{ - ShardWithError{ - Shard: "0", - Err: "execute error", - }, - }, + FailedShards: []ShardWithError{{ + Shard: "0", + Err: "execute error", + }}, } - if err := controller.OnExecutorComplete(result); err != nil { + if err := controller.OnExecutorComplete(ctx, result); err != nil { t.Fatalf("OnExecutorComplete should succeed, but got error: %v", err) } diff --git a/go/vt/schemamanager/plain_controller.go b/go/vt/schemamanager/plain_controller.go index 40c7185487..db1b0c7542 100644 --- a/go/vt/schemamanager/plain_controller.go +++ b/go/vt/schemamanager/plain_controller.go @@ -8,6 +8,8 @@ import ( "encoding/json" "fmt" "strings" + + "golang.org/x/net/context" ) // PlainController implements Controller interface. @@ -32,12 +34,12 @@ func NewPlainController(sqlStr string, keyspace string) *PlainController { } // Open is a no-op. -func (controller *PlainController) Open() error { +func (controller *PlainController) Open(ctx context.Context) error { return nil } // Read reads schema changes -func (controller *PlainController) Read() ([]string, error) { +func (controller *PlainController) Read(ctx context.Context) ([]string, error) { return controller.sqls, nil } @@ -52,31 +54,31 @@ func (controller *PlainController) Keyspace() string { // OnReadSuccess is called when schemamanager successfully // reads all sql statements. -func (controller *PlainController) OnReadSuccess() error { +func (controller *PlainController) OnReadSuccess(ctx context.Context) error { fmt.Println("Successfully read all schema changes.") return nil } // OnReadFail is called when schemamanager fails to read all sql statements. -func (controller *PlainController) OnReadFail(err error) error { +func (controller *PlainController) OnReadFail(ctx context.Context, err error) error { fmt.Printf("Failed to read schema changes, error: %v\n", err) return err } // OnValidationSuccess is called when schemamanager successfully validates all sql statements. -func (controller *PlainController) OnValidationSuccess() error { +func (controller *PlainController) OnValidationSuccess(ctx context.Context) error { fmt.Println("Successfully validate all sqls.") return nil } // OnValidationFail is called when schemamanager fails to validate sql statements. -func (controller *PlainController) OnValidationFail(err error) error { +func (controller *PlainController) OnValidationFail(ctx context.Context, err error) error { fmt.Printf("Failed to validate sqls, error: %v\n", err) return err } // OnExecutorComplete is called when schemamanager finishes applying schema changes. -func (controller *PlainController) OnExecutorComplete(result *ExecuteResult) error { +func (controller *PlainController) OnExecutorComplete(ctx context.Context, result *ExecuteResult) error { out, _ := json.MarshalIndent(result, "", " ") fmt.Printf("Executor finished, result: %s\n", string(out)) return nil diff --git a/go/vt/schemamanager/plain_controller_test.go b/go/vt/schemamanager/plain_controller_test.go index 9a3d9554d1..09f1a71d46 100644 --- a/go/vt/schemamanager/plain_controller_test.go +++ b/go/vt/schemamanager/plain_controller_test.go @@ -7,12 +7,15 @@ package schemamanager import ( "fmt" "testing" + + "golang.org/x/net/context" ) func TestPlainController(t *testing.T) { sql := "CREATE TABLE test_table (pk int)" controller := NewPlainController(sql, "test_keyspace") - err := controller.Open() + ctx := context.Background() + err := controller.Open(ctx) if err != nil { t.Fatalf("controller.Open should succeed, but got error: %v", err) } @@ -22,7 +25,7 @@ func TestPlainController(t *testing.T) { t.Fatalf("expect to get keyspace: 'test_keyspace', but got keyspace: '%s'", keyspace) } - sqls, err := controller.Read() + sqls, err := controller.Read(ctx) if err != nil { t.Fatalf("controller.Read should succeed, but got error: %v", err) } @@ -33,29 +36,29 @@ func TestPlainController(t *testing.T) { t.Fatalf("expect to get sql: '%s', but got: '%s'", sql, sqls[0]) } defer controller.Close() - err = controller.OnReadSuccess() + err = controller.OnReadSuccess(ctx) if err != nil { t.Fatalf("OnDataSourcerReadSuccess should succeed") } errReadFail := fmt.Errorf("read fail") - err = controller.OnReadFail(errReadFail) + err = controller.OnReadFail(ctx, errReadFail) if err != errReadFail { t.Fatalf("should get error:%v, but get: %v", errReadFail, err) } - err = controller.OnValidationSuccess() + err = controller.OnValidationSuccess(ctx) if err != nil { t.Fatalf("OnValidationSuccess should succeed") } errValidationFail := fmt.Errorf("validation fail") - err = controller.OnValidationFail(errValidationFail) + err = controller.OnValidationFail(ctx, errValidationFail) if err != errValidationFail { t.Fatalf("should get error:%v, but get: %v", errValidationFail, err) } - err = controller.OnExecutorComplete(&ExecuteResult{}) + err = controller.OnExecutorComplete(ctx, &ExecuteResult{}) if err != nil { t.Fatalf("OnExecutorComplete should succeed") } diff --git a/go/vt/schemamanager/schemamanager.go b/go/vt/schemamanager/schemamanager.go index 30ac7a14e6..ead43529ed 100644 --- a/go/vt/schemamanager/schemamanager.go +++ b/go/vt/schemamanager/schemamanager.go @@ -11,6 +11,7 @@ import ( log "github.com/golang/glog" mproto "github.com/youtube/vitess/go/mysql/proto" + "golang.org/x/net/context" ) const ( @@ -33,22 +34,22 @@ var ( // certain keyspace and also handling various events happened during schema // change. type Controller interface { - Open() error - Read() (sqls []string, err error) + Open(ctx context.Context) error + Read(ctx context.Context) (sqls []string, err error) Close() Keyspace() string - OnReadSuccess() error - OnReadFail(err error) error - OnValidationSuccess() error - OnValidationFail(err error) error - OnExecutorComplete(*ExecuteResult) error + OnReadSuccess(ctx context.Context) error + OnReadFail(ctx context.Context, err error) error + OnValidationSuccess(ctx context.Context) error + OnValidationFail(ctx context.Context, err error) error + OnExecutorComplete(ctx context.Context, result *ExecuteResult) error } // Executor applies schema changes to underlying system type Executor interface { - Open(keyspace string) error - Validate(sqls []string) error - Execute(sqls []string) *ExecuteResult + Open(ctx context.Context, keyspace string) error + Validate(ctx context.Context, sqls []string) error + Execute(ctx context.Context, sqls []string) *ExecuteResult Close() } @@ -75,39 +76,39 @@ type ShardResult struct { } // Run schema changes on Vitess through VtGate -func Run(controller Controller, executor Executor) error { - if err := controller.Open(); err != nil { +func Run(ctx context.Context, controller Controller, executor Executor) error { + if err := controller.Open(ctx); err != nil { log.Errorf("failed to open data sourcer: %v", err) return err } defer controller.Close() - sqls, err := controller.Read() + sqls, err := controller.Read(ctx) if err != nil { log.Errorf("failed to read data from data sourcer: %v", err) - controller.OnReadFail(err) + controller.OnReadFail(ctx, err) return err } - controller.OnReadSuccess() + controller.OnReadSuccess(ctx) keyspace := controller.Keyspace() - if err := executor.Open(keyspace); err != nil { + if err := executor.Open(ctx, keyspace); err != nil { log.Errorf("failed to open executor: %v", err) return err } defer executor.Close() - if err := executor.Validate(sqls); err != nil { + if err := executor.Validate(ctx, sqls); err != nil { log.Errorf("validation fail: %v", err) - controller.OnValidationFail(err) + controller.OnValidationFail(ctx, err) return err } - if err := controller.OnValidationSuccess(); err != nil { + if err := controller.OnValidationSuccess(ctx); err != nil { return err } - result := executor.Execute(sqls) + result := executor.Execute(ctx, sqls) - if err := controller.OnExecutorComplete(result); err != nil { + if err := controller.OnExecutorComplete(ctx, result); err != nil { return err } if result.ExecutorErr != "" || len(result.FailedShards) > 0 { diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index f85909bd27..64f717c16f 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -27,7 +27,9 @@ var ( func TestSchemaManagerControllerOpenFail(t *testing.T) { controller := newFakeController( []string{"select * from test_db"}, true, false, false) - err := Run(controller, newFakeExecutor()) + ctx := context.Background() + + err := Run(ctx, controller, newFakeExecutor()) if err != errControllerOpen { t.Fatalf("controller.Open fail, shoud get error: %v, but get error: %v", errControllerOpen, err) @@ -37,7 +39,8 @@ func TestSchemaManagerControllerOpenFail(t *testing.T) { func TestSchemaManagerControllerReadFail(t *testing.T) { controller := newFakeController( []string{"select * from test_db"}, false, true, false) - err := Run(controller, newFakeExecutor()) + ctx := context.Background() + err := Run(ctx, controller, newFakeExecutor()) if err != errControllerRead { t.Fatalf("controller.Read fail, shoud get error: %v, but get error: %v", errControllerRead, err) @@ -50,7 +53,9 @@ func TestSchemaManagerControllerReadFail(t *testing.T) { func TestSchemaManagerValidationFail(t *testing.T) { controller := newFakeController( []string{"invalid sql"}, false, false, false) - err := Run(controller, newFakeExecutor()) + ctx := context.Background() + + err := Run(ctx, controller, newFakeExecutor()) if err == nil { t.Fatalf("run schema change should fail due to executor.Validate fail") } @@ -63,7 +68,9 @@ func TestSchemaManagerExecutorOpenFail(t *testing.T) { executor := NewTabletExecutor( newFakeTabletManagerClient(), newFakeTopo()) - err := Run(controller, executor) + ctx := context.Background() + + err := Run(ctx, controller, executor) if err == nil { t.Fatalf("run schema change should fail due to executor.Open fail") } @@ -75,7 +82,9 @@ func TestSchemaManagerExecutorExecuteFail(t *testing.T) { executor := NewTabletExecutor( newFakeTabletManagerClient(), newFakeTopo()) - err := Run(controller, executor) + ctx := context.Background() + + err := Run(ctx, controller, executor) if err == nil { t.Fatalf("run schema change should fail due to executor.Execute fail") } @@ -106,7 +115,9 @@ func TestSchemaManagerRun(t *testing.T) { fakeTmc, newFakeTopo()) - err := Run(controller, executor) + ctx := context.Background() + err := Run(ctx, controller, executor) + if err != nil { t.Fatalf("schema change should success but get error: %v", err) } @@ -149,7 +160,9 @@ func TestSchemaManagerExecutorFail(t *testing.T) { fakeTmc.EnableExecuteFetchAsDbaError = true executor := NewTabletExecutor(fakeTmc, newFakeTopo()) - err := Run(controller, executor) + ctx := context.Background() + err := Run(ctx, controller, executor) + if err == nil { t.Fatalf("schema change should fail") } @@ -307,14 +320,14 @@ func (controller *fakeController) SetKeyspace(keyspace string) { controller.keyspace = keyspace } -func (controller *fakeController) Open() error { +func (controller *fakeController) Open(ctx context.Context) error { if controller.openFail { return errControllerOpen } return nil } -func (controller *fakeController) Read() ([]string, error) { +func (controller *fakeController) Read(ctx context.Context) ([]string, error) { if controller.readFail { return nil, errControllerRead } @@ -328,27 +341,27 @@ func (controller *fakeController) Keyspace() string { return controller.keyspace } -func (controller *fakeController) OnReadSuccess() error { +func (controller *fakeController) OnReadSuccess(ctx context.Context) error { controller.onReadSuccessTriggered = true return nil } -func (controller *fakeController) OnReadFail(err error) error { +func (controller *fakeController) OnReadFail(ctx context.Context, err error) error { controller.onReadFailTriggered = true return err } -func (controller *fakeController) OnValidationSuccess() error { +func (controller *fakeController) OnValidationSuccess(ctx context.Context) error { controller.onValidationSuccessTriggered = true return nil } -func (controller *fakeController) OnValidationFail(err error) error { +func (controller *fakeController) OnValidationFail(ctx context.Context, err error) error { controller.onValidationFailTriggered = true return err } -func (controller *fakeController) OnExecutorComplete(*ExecuteResult) error { +func (controller *fakeController) OnExecutorComplete(ctx context.Context, result *ExecuteResult) error { controller.onExecutorCompleteTriggered = true return nil } diff --git a/go/vt/schemamanager/tablet_executor.go b/go/vt/schemamanager/tablet_executor.go index 6ce3271eb3..7eb2412d99 100644 --- a/go/vt/schemamanager/tablet_executor.go +++ b/go/vt/schemamanager/tablet_executor.go @@ -38,8 +38,7 @@ func NewTabletExecutor( } // Open opens a connection to the master for every shard -func (exec *TabletExecutor) Open(keyspace string) error { - ctx := context.TODO() +func (exec *TabletExecutor) Open(ctx context.Context, keyspace string) error { if !exec.isClosed { return nil } @@ -71,7 +70,7 @@ func (exec *TabletExecutor) Open(keyspace string) error { } // Validate validates a list of sql statements -func (exec *TabletExecutor) Validate(sqls []string) error { +func (exec *TabletExecutor) Validate(ctx context.Context, sqls []string) error { if exec.isClosed { return fmt.Errorf("executor is closed") } @@ -87,20 +86,20 @@ func (exec *TabletExecutor) Validate(sqls []string) error { } parsedDDLs[i] = ddl } - return exec.detectBigSchemaChanges(parsedDDLs) + return exec.detectBigSchemaChanges(ctx, parsedDDLs) } // a schema change that satisfies any following condition is considered // to be a big schema change and will be rejected. // 1. Alter more than 100,000 rows. // 2. Change a table with more than 2,000,000 rows (Drops are fine). -func (exec *TabletExecutor) detectBigSchemaChanges(parsedDDLs []*sqlparser.DDL) error { +func (exec *TabletExecutor) detectBigSchemaChanges(ctx context.Context, parsedDDLs []*sqlparser.DDL) error { // exec.tabletInfos is guaranteed to have at least one element; // Otherwise, Open should fail and executor should fail. masterTabletInfo := exec.tabletInfos[0] // get database schema, excluding views. dbSchema, err := exec.tmClient.GetSchema( - context.Background(), masterTabletInfo, []string{}, []string{}, false) + ctx, masterTabletInfo, []string{}, []string{}, false) if err != nil { return fmt.Errorf("unable to get database schema, error: %v", err) } @@ -127,11 +126,11 @@ func (exec *TabletExecutor) detectBigSchemaChanges(parsedDDLs []*sqlparser.DDL) return nil } -func (exec *TabletExecutor) preflightSchemaChanges(sqls []string) error { +func (exec *TabletExecutor) preflightSchemaChanges(ctx context.Context, sqls []string) error { exec.schemaDiffs = make([]*proto.SchemaChangeResult, len(sqls)) for i := range sqls { schemaDiff, err := exec.tmClient.PreflightSchema( - context.Background(), exec.tabletInfos[0], sqls[i]) + ctx, exec.tabletInfos[0], sqls[i]) if err != nil { return err } @@ -149,7 +148,7 @@ func (exec *TabletExecutor) preflightSchemaChanges(sqls []string) error { } // Execute applies schema changes -func (exec *TabletExecutor) Execute(sqls []string) *ExecuteResult { +func (exec *TabletExecutor) Execute(ctx context.Context, sqls []string) *ExecuteResult { execResult := ExecuteResult{} execResult.Sqls = sqls if exec.isClosed { @@ -160,14 +159,14 @@ func (exec *TabletExecutor) Execute(sqls []string) *ExecuteResult { defer func() { execResult.TotalTimeSpent = time.Since(startTime) }() // make sure every schema change introduces a table definition change - if err := exec.preflightSchemaChanges(sqls); err != nil { + if err := exec.preflightSchemaChanges(ctx, sqls); err != nil { execResult.ExecutorErr = err.Error() return &execResult } for index, sql := range sqls { execResult.CurSqlIndex = index - exec.executeOnAllTablets(&execResult, sql) + exec.executeOnAllTablets(ctx, &execResult, sql) if len(execResult.FailedShards) > 0 { break } @@ -175,14 +174,14 @@ func (exec *TabletExecutor) Execute(sqls []string) *ExecuteResult { return &execResult } -func (exec *TabletExecutor) executeOnAllTablets(execResult *ExecuteResult, sql string) { +func (exec *TabletExecutor) executeOnAllTablets(ctx context.Context, execResult *ExecuteResult, sql string) { var wg sync.WaitGroup numOfMasterTablets := len(exec.tabletInfos) wg.Add(numOfMasterTablets) errChan := make(chan ShardWithError, numOfMasterTablets) successChan := make(chan ShardResult, numOfMasterTablets) for i := range exec.tabletInfos { - go exec.executeOneTablet(&wg, exec.tabletInfos[i], sql, errChan, successChan) + go exec.executeOneTablet(ctx, &wg, exec.tabletInfos[i], sql, errChan, successChan) } wg.Wait() close(errChan) @@ -198,13 +197,13 @@ func (exec *TabletExecutor) executeOnAllTablets(execResult *ExecuteResult, sql s } func (exec *TabletExecutor) executeOneTablet( + ctx context.Context, wg *sync.WaitGroup, tabletInfo *topo.TabletInfo, sql string, errChan chan ShardWithError, successChan chan ShardResult) { defer wg.Done() - ctx := context.Background() result, err := exec.tmClient.ExecuteFetchAsDba(ctx, tabletInfo, sql, 10, false, false, true) if err != nil { errChan <- ShardWithError{Shard: tabletInfo.Shard, Err: err.Error()} diff --git a/go/vt/schemamanager/tablet_executor_test.go b/go/vt/schemamanager/tablet_executor_test.go index 481bbf6ccf..7f3b3903d2 100644 --- a/go/vt/schemamanager/tablet_executor_test.go +++ b/go/vt/schemamanager/tablet_executor_test.go @@ -8,17 +8,20 @@ import ( "testing" "github.com/youtube/vitess/go/vt/mysqlctl/proto" + "golang.org/x/net/context" ) func TestTabletExecutorOpen(t *testing.T) { executor := newFakeExecutor() - if err := executor.Open("test_keyspace"); err != nil { + ctx := context.Background() + + if err := executor.Open(ctx, "test_keyspace"); err != nil { t.Fatalf("executor.Open should succeed") } defer executor.Close() - if err := executor.Open("test_keyspace"); err != nil { + if err := executor.Open(ctx, "test_keyspace"); err != nil { t.Fatalf("open an opened executor should also succeed") } } @@ -52,45 +55,46 @@ func TestTabletExecutorValidate(t *testing.T) { executor := NewTabletExecutor( fakeTmc, newFakeTopo()) + ctx := context.Background() sqls := []string{ "ALTER TABLE test_table ADD COLUMN new_id bigint(20)", "CREATE TABLE test_table_02 (pk int)", } - if err := executor.Validate(sqls); err == nil { + if err := executor.Validate(ctx, sqls); err == nil { t.Fatalf("validate should fail because executor is closed") } - executor.Open("test_keyspace") + executor.Open(ctx, "test_keyspace") defer executor.Close() // schema changes with DMLs should fail - if err := executor.Validate([]string{ + if err := executor.Validate(ctx, []string{ "INSERT INTO test_table VALUES(1)"}); err == nil { t.Fatalf("schema changes are for DDLs") } // validates valid ddls - if err := executor.Validate(sqls); err != nil { + if err := executor.Validate(ctx, sqls); err != nil { t.Fatalf("executor.Validate should succeed, but got error: %v", err) } // alter a table with more than 100,000 rows - if err := executor.Validate([]string{ + if err := executor.Validate(ctx, []string{ "ALTER TABLE test_table_03 ADD COLUMN new_id bigint(20)", }); err == nil { t.Fatalf("executor.Validate should fail, alter a table more than 100,000 rows") } // change a table with more than 2,000,000 rows - if err := executor.Validate([]string{ + if err := executor.Validate(ctx, []string{ "RENAME TABLE test_table_04 TO test_table_05", }); err == nil { t.Fatalf("executor.Validate should fail, change a table more than 2,000,000 rows") } - if err := executor.Validate([]string{ + if err := executor.Validate(ctx, []string{ "DROP TABLE test_table_04", }); err != nil { t.Fatalf("executor.Validate should succeed, drop a table with more than 2,000,000 rows is allowed") @@ -99,18 +103,19 @@ func TestTabletExecutorValidate(t *testing.T) { func TestTabletExecutorExecute(t *testing.T) { executor := newFakeExecutor() + ctx := context.Background() sqls := []string{"DROP TABLE unknown_table"} - result := executor.Execute(sqls) + result := executor.Execute(ctx, sqls) if result.ExecutorErr == "" { t.Fatalf("execute should fail, call execute.Open first") } - executor.Open("test_keyspace") + executor.Open(ctx, "test_keyspace") defer executor.Close() - result = executor.Execute(sqls) + result = executor.Execute(ctx, sqls) if result.ExecutorErr == "" { t.Fatalf("execute should fail, ddl does not introduce any table schema change") } diff --git a/go/vt/schemamanager/ui_controller.go b/go/vt/schemamanager/ui_controller.go index d728039160..f87cba44d4 100644 --- a/go/vt/schemamanager/ui_controller.go +++ b/go/vt/schemamanager/ui_controller.go @@ -11,6 +11,7 @@ import ( "strings" log "github.com/golang/glog" + "golang.org/x/net/context" ) // UIController handles schema events. @@ -39,12 +40,12 @@ func NewUIController( } // Open is a no-op. -func (controller *UIController) Open() error { +func (controller *UIController) Open(ctx context.Context) error { return nil } // Read reads schema changes -func (controller *UIController) Read() ([]string, error) { +func (controller *UIController) Read(ctx context.Context) ([]string, error) { return controller.sqls, nil } @@ -58,35 +59,35 @@ func (controller *UIController) Keyspace() string { } // OnReadSuccess is no-op -func (controller *UIController) OnReadSuccess() error { +func (controller *UIController) OnReadSuccess(ctx context.Context) error { controller.writer.Write( []byte(fmt.Sprintf("OnReadSuccess, sqls: %v\n", controller.sqls))) return nil } // OnReadFail is no-op -func (controller *UIController) OnReadFail(err error) error { +func (controller *UIController) OnReadFail(ctx context.Context, err error) error { controller.writer.Write( []byte(fmt.Sprintf("OnReadFail, error: %v\n", err))) return err } // OnValidationSuccess is no-op -func (controller *UIController) OnValidationSuccess() error { +func (controller *UIController) OnValidationSuccess(ctx context.Context) error { controller.writer.Write( []byte(fmt.Sprintf("OnValidationSuccess, sqls: %v\n", controller.sqls))) return nil } // OnValidationFail is no-op -func (controller *UIController) OnValidationFail(err error) error { +func (controller *UIController) OnValidationFail(ctx context.Context, err error) error { controller.writer.Write( []byte(fmt.Sprintf("OnValidationFail, error: %v\n", err))) return err } // OnExecutorComplete is no-op -func (controller *UIController) OnExecutorComplete(result *ExecuteResult) error { +func (controller *UIController) OnExecutorComplete(ctx context.Context, result *ExecuteResult) error { data, err := json.Marshal(result) if err != nil { log.Errorf("Failed to serialize ExecuteResult: %v", err) diff --git a/go/vt/schemamanager/ui_controller_test.go b/go/vt/schemamanager/ui_controller_test.go index 475a8645d2..b5713d695b 100644 --- a/go/vt/schemamanager/ui_controller_test.go +++ b/go/vt/schemamanager/ui_controller_test.go @@ -9,13 +9,17 @@ import ( "net/http/httptest" "strings" "testing" + + "golang.org/x/net/context" ) func TestUIController(t *testing.T) { sql := "CREATE TABLE test_table (pk int)" response := httptest.NewRecorder() controller := NewUIController(sql, "test_keyspace", response) - err := controller.Open() + ctx := context.Background() + + err := controller.Open(ctx) if err != nil { t.Fatalf("controller.Open should succeed, but got error: %v", err) } @@ -25,7 +29,7 @@ func TestUIController(t *testing.T) { t.Fatalf("expect to get keyspace: 'test_keyspace', but got keyspace: '%s'", keyspace) } - sqls, err := controller.Read() + sqls, err := controller.Read(ctx) if err != nil { t.Fatalf("controller.Read should succeed, but got error: %v", err) } @@ -36,7 +40,7 @@ func TestUIController(t *testing.T) { t.Fatalf("expect to get sql: '%s', but got: '%s'", sql, sqls[0]) } defer controller.Close() - err = controller.OnReadSuccess() + err = controller.OnReadSuccess(ctx) if err != nil { t.Fatalf("OnDataSourcerReadSuccess should succeed") } @@ -44,7 +48,7 @@ func TestUIController(t *testing.T) { t.Fatalf("controller.OnReadSuccess should write to http response") } errReadFail := fmt.Errorf("read fail") - err = controller.OnReadFail(errReadFail) + err = controller.OnReadFail(ctx, errReadFail) if err != errReadFail { t.Fatalf("should get error:%v, but get: %v", errReadFail, err) } @@ -53,7 +57,7 @@ func TestUIController(t *testing.T) { t.Fatalf("controller.OnReadFail should write to http response") } - err = controller.OnValidationSuccess() + err = controller.OnValidationSuccess(ctx) if err != nil { t.Fatalf("OnValidationSuccess should succeed") } @@ -63,7 +67,7 @@ func TestUIController(t *testing.T) { } errValidationFail := fmt.Errorf("validation fail") - err = controller.OnValidationFail(errValidationFail) + err = controller.OnValidationFail(ctx, errValidationFail) if err != errValidationFail { t.Fatalf("should get error:%v, but get: %v", errValidationFail, err) } @@ -72,7 +76,7 @@ func TestUIController(t *testing.T) { t.Fatalf("controller.OnValidationFail should write to http response") } - err = controller.OnExecutorComplete(&ExecuteResult{}) + err = controller.OnExecutorComplete(ctx, &ExecuteResult{}) if err != nil { t.Fatalf("OnExecutorComplete should succeed") } diff --git a/go/vt/wrangler/schema.go b/go/vt/wrangler/schema.go index e5438b2d14..7e5827d934 100644 --- a/go/vt/wrangler/schema.go +++ b/go/vt/wrangler/schema.go @@ -409,6 +409,7 @@ func (wr *Wrangler) ApplySchemaKeyspace(ctx context.Context, keyspace string, ch } err = schemamanager.Run( + ctx, schemamanager.NewPlainController(change, keyspace), schemamanager.NewTabletExecutor(wr.tmc, wr.ts), ) From a12007a1233b4223ed9461b147e0752a337b741c Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 27 May 2015 16:25:59 -0700 Subject: [PATCH 108/128] No more vquery, connect to tablet directly instead. --- test/rowcache_invalidator.py | 16 ++++++++-------- test/tablet.py | 9 --------- 2 files changed, 8 insertions(+), 17 deletions(-) diff --git a/test/rowcache_invalidator.py b/test/rowcache_invalidator.py index 085c24c142..294c95bc35 100755 --- a/test/rowcache_invalidator.py +++ b/test/rowcache_invalidator.py @@ -141,17 +141,17 @@ class RowCacheInvalidator(unittest.TestCase): logging.debug("vt_insert_test stats %s" % stats_dict) misses = stats_dict['Misses'] hits = stats_dict["Hits"] - replica_tablet.vquery("select * from vt_insert_test where id=%d" % (id), - path='test_keyspace/0') + conn = replica_tablet.conn() + conn._execute("select * from vt_insert_test where id=%d" % (id), {}) stats_dict = self.replica_stats()['vt_insert_test'] self.assertEqual(stats_dict['Misses'] - misses, 1, "This shouldn't have hit the cache") - replica_tablet.vquery("select * from vt_insert_test where id=%d" % (id), - path='test_keyspace/0') + conn._execute("select * from vt_insert_test where id=%d" % (id), {}) stats_dict = self.replica_stats()['vt_insert_test'] self.assertEqual(stats_dict['Hits'] - hits, 1, "This should have hit the cache") + conn.close() def test_outofband_statements(self): start = self.replica_vars()['InternalErrors'].get('Invalidation', 0) @@ -267,16 +267,16 @@ class RowCacheInvalidator(unittest.TestCase): stats_dict = self.replica_stats()['vt_insert_test'] misses = stats_dict['Misses'] hits = stats_dict["Hits"] - replica_tablet.vquery("select * from vt_insert_test where id=%d" % (id), - path='test_keyspace/0') + conn = replica_tablet.conn() + conn._execute("select * from vt_insert_test where id=%d" % (id), {}) stats_dict = self.replica_stats()['vt_insert_test'] self.assertEqual(stats_dict['Misses'] - misses, 1, "This shouldn't have hit the cache") - replica_tablet.vquery("select * from vt_insert_test where id=%d" % (id), - path='test_keyspace/0') + conn._execute("select * from vt_insert_test where id=%d" % (id), {}) hits2 = self.replica_stats()['vt_insert_test']['Hits'] self.assertEqual(hits2 - hits, 1, "This should have hit the cache") + conn.close() def test_service_disabled(self): # perform some inserts, then change state to stop the invalidator diff --git a/test/tablet.py b/test/tablet.py index 30b4fb43ae..c06ec07612 100644 --- a/test/tablet.py +++ b/test/tablet.py @@ -193,15 +193,6 @@ class Tablet(object): finally: conn.close() - # path is either: - # - keyspace/shard for vttablet and vttablet-streaming - # - zk path for vtdb, vtdb-streaming - def vquery(self, query, path='', user=None, password=None, driver=None, - verbose=False, raise_on_error=True): - return utils.vtclient2(self.port, path, query, user=user, - password=password, driver=driver, - verbose=verbose, raise_on_error=raise_on_error) - def assert_table_count(self, dbname, table, n, where=''): result = self.mquery(dbname, 'select count(*) from ' + table + ' ' + where) if result[0][0] != n: From 1514b67dd1614ec9a041e39d9eec124d2e0af286 Mon Sep 17 00:00:00 2001 From: Michael Berlin Date: Wed, 27 May 2015 16:28:47 -0700 Subject: [PATCH 109/128] java/vtgate-client: Fix minor formatting issues which I accidentally introduced. --- .../java/com/youtube/vitess/vtgate/integration/FailuresIT.java | 1 - .../java/com/youtube/vitess/vtgate/integration/VtGateIT.java | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/FailuresIT.java b/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/FailuresIT.java index a0225c5505..4b390b6ad5 100644 --- a/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/FailuresIT.java +++ b/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/FailuresIT.java @@ -122,7 +122,6 @@ public class FailuresIT { } } - /** * Create env with two shards each having a master and replica */ diff --git a/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/VtGateIT.java b/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/VtGateIT.java index 6facd87768..efae28bebd 100644 --- a/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/VtGateIT.java +++ b/java/vtgate-client/src/test/java/com/youtube/vitess/vtgate/integration/VtGateIT.java @@ -1,4 +1,4 @@ - package com.youtube.vitess.vtgate.integration; +package com.youtube.vitess.vtgate.integration; import com.google.common.collect.Lists; import com.google.common.primitives.UnsignedLong; From f5c971f384d3940b77e87fff4d0dae4a4f34e306 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 27 May 2015 16:55:11 -0700 Subject: [PATCH 110/128] Removing vtclient2 specific tests in integration tests. --- test/sharded.py | 58 +++---------------------------------------------- test/utils.py | 32 --------------------------- 2 files changed, 3 insertions(+), 87 deletions(-) diff --git a/test/sharded.py b/test/sharded.py index 1f33134b2c..39cb3c89bd 100755 --- a/test/sharded.py +++ b/test/sharded.py @@ -71,22 +71,6 @@ primary key (id) class TestSharded(unittest.TestCase): - def _check_rows(self, to_look_for, driver="vtdb"): - out, err = utils.vtclient2(0, "/test_nj/test_keyspace/master", "select id, msg from vt_select_test", driver=driver, verbose=True) - for pattern in to_look_for: - if pattern not in err: - logging.error("vtclient2 returned:\n%s\n%s", out, err) - self.fail('wrong vtclient2 output, missing: ' + pattern) - logging.debug("_check_rows:\n%s\n%s", out, err) - - def _check_rows_schema_diff(self, driver): - out, err = utils.vtclient2(0, "/test_nj/test_keyspace/master", "select * from vt_select_test", driver=driver, verbose=False, raise_on_error=False) - if "column[0] name mismatch: id != msg" not in err and \ - "column[0] name mismatch: msg != id" not in err: - logging.error("vtclient2 returned:\n%s\n%s", out, err) - self.fail('wrong vtclient2 output, missing "name mismatch" of some kind') - logging.debug("_check_rows_schema_diff:\n%s\n%s", out, err) - def test_sharding(self): shard_0_master.init_tablet( 'master', 'test_keyspace', '-80') @@ -143,49 +127,13 @@ class TestSharded(unittest.TestCase): utils.pause("Before the sql scatter query") - # note the order of the rows is not guaranteed, as the go routines - # doing the work can go out of order - self._check_rows(["Index\tid\tmsg", - "1\ttest 1", - "10\ttest 10"]) - - # write a value, re-read them all - utils.vtclient2(3803, "/test_nj/test_keyspace/master", "insert into vt_select_test (id, msg) values (:keyspace_id, 'test 2')", bindvars='{"keyspace_id": 2}', driver="vtdb", verbose=True) - self._check_rows(["Index\tid\tmsg", - "1\ttest 1", - "2\ttest 2", - "10\ttest 10"]) - - # make sure the '2' value was written on first shard + # make sure the '1' value was written on first shard rows = shard_0_master.mquery('vt_test_keyspace', "select id, msg from vt_select_test order by id") - self.assertEqual(rows, ((1, 'test 1'), (2, 'test 2'), ), + self.assertEqual(rows, ((1, 'test 1'), ), 'wrong mysql_query output: %s' % str(rows)) utils.pause("After db writes") - # now use various topo servers and streaming or both for the same query - self._check_rows(["Index\tid\tmsg", - "1\ttest 1", - "2\ttest 2", - "10\ttest 10"], - driver="vtdb-streaming") - if environment.topo_server().flavor() == 'zookeeper': - self._check_rows(["Index\tid\tmsg", - "1\ttest 1", - "2\ttest 2", - "10\ttest 10"], - driver="vtdb-zk") - self._check_rows(["Index\tid\tmsg", - "1\ttest 1", - "2\ttest 2", - "10\ttest 10"], - driver="vtdb-zk-streaming") - - # make sure the schema checking works - self._check_rows_schema_diff("vtdb") - if environment.topo_server().flavor() == 'zookeeper': - self._check_rows_schema_diff("vtdb-zk") - # throw in some schema validation step # we created the schema differently, so it should show utils.run_vtctl(['ValidateSchemaShard', 'test_keyspace/-80']) @@ -238,7 +186,7 @@ class TestSharded(unittest.TestCase): "", "test_keyspace", "-80", 10.0) conn.dial() (results, rowcount, lastrowid, fields) = conn._execute("select id, msg from vt_select_test order by id", {}) - self.assertEqual(results, [(1, 'test 1'), (2, 'test 2'), ], + self.assertEqual(results, [(1, 'test 1'), ], 'wrong conn._execute output: %s' % str(results)) # connect to shard 80- diff --git a/test/utils.py b/test/utils.py index bf30ef9fcb..3275e832c5 100644 --- a/test/utils.py +++ b/test/utils.py @@ -571,38 +571,6 @@ def _get_vtworker_cmd(clargs, log_level='', auto_log=False): cmd = args + clargs return cmd, port -# vtclient2 helpers -# driver is one of: -# - vttablet (default), vttablet-streaming -# - vtdb, vtdb-streaming (default topo server) -# - vtdb-zk, vtdb-zk-streaming (forced zk topo server) -# path is either: keyspace/shard for vttablet* or zk path for vtdb* -def vtclient2(uid, path, query, bindvars=None, user=None, password=None, driver=None, - verbose=False, raise_on_error=True): - if (user is None) != (password is None): - raise TypeError("you should provide either both or none of user and password") - - # for ZK paths to not have // in the path, that confuses things - if path.startswith('/'): - path = path[1:] - server = "localhost:%u/%s" % (uid, path) - - cmdline = environment.binary_args('vtclient2') + ['-server', server] - cmdline += environment.topo_server().flags() - cmdline += protocols_flavor().tabletconn_protocol_flags() - if user is not None: - cmdline.extend(['-tablet-bson-username', user, - '-tablet-bson-password', password]) - if bindvars: - cmdline.extend(['-bindvars', bindvars]) - if driver: - cmdline.extend(['-driver', driver]) - if verbose: - cmdline.extend(['-alsologtostderr', '-verbose']) - cmdline.append(query) - - return run(cmdline, raise_on_error=raise_on_error, trap_output=True) - # mysql helpers def mysql_query(uid, dbname, query): conn = MySQLdb.Connect(user='vt_dba', From e4c10bb7fd72fbebe04d5cbcda375d9a4d7d4fa6 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 28 May 2015 08:43:02 -0700 Subject: [PATCH 111/128] Fixing race conditions in this test. We were cancelling the wrong context and then using it. --- go/vt/topo/test/lock.go | 50 ++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/go/vt/topo/test/lock.go b/go/vt/topo/test/lock.go index 6b16f37bf3..df208b5984 100644 --- a/go/vt/topo/test/lock.go +++ b/go/vt/topo/test/lock.go @@ -29,7 +29,6 @@ func CheckKeyspaceLock(ctx context.Context, t *testing.T, ts topo.Server) { } func checkKeyspaceLockTimeout(ctx context.Context, t *testing.T, ts topo.Server) { - ctx, ctxCancel := context.WithCancel(ctx) lockPath, err := ts.LockKeyspaceForAction(ctx, "test_keyspace", "fake-content") if err != nil { t.Fatalf("LockKeyspaceForAction: %v", err) @@ -38,33 +37,34 @@ func checkKeyspaceLockTimeout(ctx context.Context, t *testing.T, ts topo.Server) // test we can't take the lock again fastCtx, cancel := context.WithTimeout(ctx, timeUntilLockIsTaken) if _, err := ts.LockKeyspaceForAction(fastCtx, "test_keyspace", "unused-fake-content"); err != topo.ErrTimeout { - t.Errorf("LockKeyspaceForAction(again): %v", err) + t.Fatalf("LockKeyspaceForAction(again): %v", err) } cancel() // test we can interrupt taking the lock + interruptCtx, cancel := context.WithCancel(ctx) go func() { time.Sleep(timeUntilLockIsTaken) - ctxCancel() + cancel() }() - if _, err := ts.LockKeyspaceForAction(ctx, "test_keyspace", "unused-fake-content"); err != topo.ErrInterrupted { - t.Errorf("LockKeyspaceForAction(interrupted): %v", err) + if _, err := ts.LockKeyspaceForAction(interruptCtx, "test_keyspace", "unused-fake-content"); err != topo.ErrInterrupted { + t.Fatalf("LockKeyspaceForAction(interrupted): %v", err) } if err := ts.UnlockKeyspaceForAction(ctx, "test_keyspace", lockPath, "fake-results"); err != nil { - t.Errorf("UnlockKeyspaceForAction(): %v", err) + t.Fatalf("UnlockKeyspaceForAction(): %v", err) } // test we can't unlock again if err := ts.UnlockKeyspaceForAction(ctx, "test_keyspace", lockPath, "fake-results"); err == nil { - t.Error("UnlockKeyspaceForAction(again) worked") + t.Fatalf("UnlockKeyspaceForAction(again) worked") } } // checkKeyspaceLockMissing makes sure we can't lock a non-existing keyspace func checkKeyspaceLockMissing(ctx context.Context, t *testing.T, ts topo.Server) { if _, err := ts.LockKeyspaceForAction(ctx, "test_keyspace_666", "fake-content"); err == nil { - t.Errorf("LockKeyspaceForAction(test_keyspace_666) worked for non-existing keyspace") + t.Fatalf("LockKeyspaceForAction(test_keyspace_666) worked for non-existing keyspace") } } @@ -82,7 +82,7 @@ func checkKeyspaceLockUnblocks(ctx context.Context, t *testing.T, ts topo.Server t.Fatalf("LockKeyspaceForAction(test_keyspace) failed: %v", err) } if err = ts.UnlockKeyspaceForAction(ctx, "test_keyspace", lockPath, "fake-results"); err != nil { - t.Errorf("UnlockKeyspaceForAction(test_keyspace): %v", err) + t.Fatalf("UnlockKeyspaceForAction(test_keyspace): %v", err) } close(finished) }() @@ -126,7 +126,6 @@ func CheckShardLock(ctx context.Context, t *testing.T, ts topo.Server) { } func checkShardLockTimeout(ctx context.Context, t *testing.T, ts topo.Server) { - ctx, ctxCancel := context.WithCancel(ctx) lockPath, err := ts.LockShardForAction(ctx, "test_keyspace", "10-20", "fake-content") if err != nil { t.Fatalf("LockShardForAction: %v", err) @@ -135,21 +134,22 @@ func checkShardLockTimeout(ctx context.Context, t *testing.T, ts topo.Server) { // test we can't take the lock again fastCtx, cancel := context.WithTimeout(ctx, timeUntilLockIsTaken) if _, err := ts.LockShardForAction(fastCtx, "test_keyspace", "10-20", "unused-fake-content"); err != topo.ErrTimeout { - t.Errorf("LockShardForAction(again): %v", err) + t.Fatalf("LockShardForAction(again): %v", err) } cancel() // test we can interrupt taking the lock + interruptCtx, cancel := context.WithCancel(ctx) go func() { time.Sleep(timeUntilLockIsTaken) - ctxCancel() + cancel() }() - if _, err := ts.LockShardForAction(ctx, "test_keyspace", "10-20", "unused-fake-content"); err != topo.ErrInterrupted { - t.Errorf("LockShardForAction(interrupted): %v", err) + if _, err := ts.LockShardForAction(interruptCtx, "test_keyspace", "10-20", "unused-fake-content"); err != topo.ErrInterrupted { + t.Fatalf("LockShardForAction(interrupted): %v", err) } if err := ts.UnlockShardForAction(ctx, "test_keyspace", "10-20", lockPath, "fake-results"); err != nil { - t.Errorf("UnlockShardForAction(): %v", err) + t.Fatalf("UnlockShardForAction(): %v", err) } // test we can't unlock again @@ -161,7 +161,7 @@ func checkShardLockTimeout(ctx context.Context, t *testing.T, ts topo.Server) { func checkShardLockMissing(ctx context.Context, t *testing.T, ts topo.Server) { // test we can't lock a non-existing shard if _, err := ts.LockShardForAction(ctx, "test_keyspace", "20-30", "fake-content"); err == nil { - t.Errorf("LockShardForAction(test_keyspace/20-30) worked for non-existing shard") + t.Fatalf("LockShardForAction(test_keyspace/20-30) worked for non-existing shard") } } @@ -179,7 +179,7 @@ func checkShardLockUnblocks(ctx context.Context, t *testing.T, ts topo.Server) { t.Fatalf("LockShardForAction(test_keyspace, 10-20) failed: %v", err) } if err = ts.UnlockShardForAction(ctx, "test_keyspace", "10-20", lockPath, "fake-results"); err != nil { - t.Errorf("UnlockShardForAction(test_keyspace, 10-20): %v", err) + t.Fatalf("UnlockShardForAction(test_keyspace, 10-20): %v", err) } close(finished) }() @@ -218,14 +218,13 @@ func checkSrvShardLockGeneral(ctx context.Context, t *testing.T, ts topo.Server) cell := getLocalCell(ctx, t, ts) // make sure we can create the lock even if no directory exists - ctx, ctxCancel := context.WithCancel(ctx) lockPath, err := ts.LockSrvShardForAction(ctx, cell, "test_keyspace", "10-20", "fake-content") if err != nil { t.Fatalf("LockSrvShardForAction: %v", err) } if err := ts.UnlockSrvShardForAction(ctx, cell, "test_keyspace", "10-20", lockPath, "fake-results"); err != nil { - t.Errorf("UnlockShardForAction: %v", err) + t.Fatalf("UnlockShardForAction: %v", err) } // now take the lock again after the root exists @@ -237,22 +236,23 @@ func checkSrvShardLockGeneral(ctx context.Context, t *testing.T, ts topo.Server) // test we can't take the lock again fastCtx, cancel := context.WithTimeout(ctx, timeUntilLockIsTaken) if _, err := ts.LockSrvShardForAction(fastCtx, cell, "test_keyspace", "10-20", "unused-fake-content"); err != topo.ErrTimeout { - t.Errorf("LockSrvShardForAction(again): %v", err) + t.Fatalf("LockSrvShardForAction(again): %v", err) } cancel() // test we can interrupt taking the lock + interruptCtx, cancel := context.WithCancel(ctx) go func() { time.Sleep(timeUntilLockIsTaken) - ctxCancel() + cancel() }() - if _, err := ts.LockSrvShardForAction(ctx, cell, "test_keyspace", "10-20", "unused-fake-content"); err != topo.ErrInterrupted { - t.Errorf("LockSrvShardForAction(interrupted): %v", err) + if _, err := ts.LockSrvShardForAction(interruptCtx, cell, "test_keyspace", "10-20", "unused-fake-content"); err != topo.ErrInterrupted { + t.Fatalf("LockSrvShardForAction(interrupted): %v", err) } // unlock now if err := ts.UnlockSrvShardForAction(ctx, cell, "test_keyspace", "10-20", lockPath, "fake-results"); err != nil { - t.Errorf("UnlockSrvShardForAction(): %v", err) + t.Fatalf("UnlockSrvShardForAction(): %v", err) } // test we can't unlock again @@ -277,7 +277,7 @@ func checkSrvShardLockUnblocks(ctx context.Context, t *testing.T, ts topo.Server t.Fatalf("LockSrvShardForAction(test, test_keyspace, 10-20) failed: %v", err) } if err = ts.UnlockSrvShardForAction(ctx, cell, "test_keyspace", "10-20", lockPath, "fake-results"); err != nil { - t.Errorf("UnlockSrvShardForAction(test, test_keyspace, 10-20): %v", err) + t.Fatalf("UnlockSrvShardForAction(test, test_keyspace, 10-20): %v", err) } close(finished) }() From 83804753745dac60c05625ade9d1cbf3506579c5 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 28 May 2015 09:19:10 -0700 Subject: [PATCH 112/128] Adding a remote vtctl test library, using it for backup tests. The purpose for this is to add unit test coverage to the vtctl library, and also make unit tests closer to what users would do when calling 'vtctl' commands. --- go/vt/vtctl/gorpcvtctlclient/client_test.go | 6 +- go/vt/wrangler/testlib/backup_test.go | 16 +--- go/vt/wrangler/testlib/vtctl_pipe.go | 84 +++++++++++++++++++++ 3 files changed, 89 insertions(+), 17 deletions(-) create mode 100644 go/vt/wrangler/testlib/vtctl_pipe.go diff --git a/go/vt/vtctl/gorpcvtctlclient/client_test.go b/go/vt/vtctl/gorpcvtctlclient/client_test.go index e31395c657..f5f52c2212 100644 --- a/go/vt/vtctl/gorpcvtctlclient/client_test.go +++ b/go/vt/vtctl/gorpcvtctlclient/client_test.go @@ -5,7 +5,6 @@ package gorpcvtctlclient import ( - "fmt" "net" "net/http" "testing" @@ -27,13 +26,12 @@ func TestVtctlServer(t *testing.T) { if err != nil { t.Fatalf("Cannot listen: %v", err) } - port := listener.Addr().(*net.TCPAddr).Port // Create a Go Rpc server and listen on the port server := rpcplus.NewServer() server.Register(gorpcvtctlserver.NewVtctlServer(ts)) - // create the HTTP server, serve the server from it + // Create the HTTP server, serve the server from it handler := http.NewServeMux() bsonrpc.ServeCustomRPC(handler, server, false) httpServer := http.Server{ @@ -42,7 +40,7 @@ func TestVtctlServer(t *testing.T) { go httpServer.Serve(listener) // Create a VtctlClient Go Rpc client to talk to the fake server - client, err := goRPCVtctlClientFactory(fmt.Sprintf("localhost:%v", port), 30*time.Second) + client, err := goRPCVtctlClientFactory(listener.Addr().String(), 30*time.Second) if err != nil { t.Fatalf("Cannot create client: %v", err) } diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go index 8ac84d35d2..ad54745bf5 100644 --- a/go/vt/wrangler/testlib/backup_test.go +++ b/go/vt/wrangler/testlib/backup_test.go @@ -29,6 +29,8 @@ func TestBackupRestore(t *testing.T) { ctx := context.Background() ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"}) wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), time.Second) + vp := NewVtctlPipe(t, ts) + defer vp.Close() // Initialize our temp dirs root, err := ioutil.TempDir("", "backuptest") @@ -88,22 +90,10 @@ func TestBackupRestore(t *testing.T) { sourceTablet.StartActionLoop(t, wr) defer sourceTablet.StopActionLoop(t) - ti, err := ts.GetTablet(ctx, sourceTablet.Tablet.Alias) - if err != nil { - t.Fatalf("GetTablet failed: %v", err) - } - // run the backup - logStream, errFunc, err := wr.TabletManagerClient().Backup(ctx, ti, 4) - if err != nil { + if err := vp.Run([]string{"Backup", sourceTablet.Tablet.Alias.String()}); err != nil { t.Fatalf("Backup failed: %v", err) } - for e := range logStream { - t.Logf("%v", e) - } - if err := errFunc(); err != nil { - t.Fatalf("Backup errFunc failed: %v", err) - } // verify the full status if err := sourceTablet.FakeMysqlDaemon.CheckSuperQueryList(); err != nil { diff --git a/go/vt/wrangler/testlib/vtctl_pipe.go b/go/vt/wrangler/testlib/vtctl_pipe.go new file mode 100644 index 0000000000..6be11a3956 --- /dev/null +++ b/go/vt/wrangler/testlib/vtctl_pipe.go @@ -0,0 +1,84 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testlib + +import ( + "net" + "net/http" + "testing" + "time" + + "github.com/youtube/vitess/go/rpcplus" + "github.com/youtube/vitess/go/rpcwrap/bsonrpc" + "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/vtctl/gorpcvtctlserver" + "github.com/youtube/vitess/go/vt/vtctl/vtctlclient" + "golang.org/x/net/context" + + // we need to import the gorpcvtctlclient library so the go rpc + // vtctl client is registered and can be used. + _ "github.com/youtube/vitess/go/vt/vtctl/gorpcvtctlclient" +) + +// VtctlPipe is a vtctl server based on a topo server, and a client that +// is connected to it via bson rpc. +type VtctlPipe struct { + listener net.Listener + client vtctlclient.VtctlClient + t *testing.T +} + +// NewVtctlPipe creates a new VtctlPipe based on the given topo server. +func NewVtctlPipe(t *testing.T, ts topo.Server) *VtctlPipe { + // Listen on a random port + listener, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatalf("Cannot listen: %v", err) + } + + // Create a Go Rpc server and listen on the port + server := rpcplus.NewServer() + server.Register(gorpcvtctlserver.NewVtctlServer(ts)) + + // Create the HTTP server, serve the server from it + handler := http.NewServeMux() + bsonrpc.ServeCustomRPC(handler, server, false) + httpServer := http.Server{ + Handler: handler, + } + go httpServer.Serve(listener) + + // Create a VtctlClient Go Rpc client to talk to the fake server + client, err := vtctlclient.New(listener.Addr().String(), 30*time.Second) + if err != nil { + t.Fatalf("Cannot create client: %v", err) + } + + return &VtctlPipe{ + listener: listener, + client: client, + t: t, + } +} + +// Close will stop listening and free up all resources. +func (vp *VtctlPipe) Close() { + vp.client.Close() + vp.listener.Close() +} + +// Run executes the provided command remotely, logs the output in the +// test logs, and returns the command error. +func (vp *VtctlPipe) Run(args []string) error { + actionTimeout := 30 * time.Second + lockTimeout := 10 * time.Second + ctx := context.Background() + + c, errFunc := vp.client.ExecuteVtctlCommand(ctx, args, actionTimeout, lockTimeout) + for le := range c { + vp.t.Logf(le.String()) + } + return errFunc() +} From fabda3743404e6fc6320350206201811878e25c3 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 28 May 2015 13:00:25 -0700 Subject: [PATCH 113/128] Transforming vtclient2 into vtclient, a pure go sql client. It links in the vitess driver. Adding an integration test in vtgatev3. --- .../plugin_etcdtopo.go | 0 go/cmd/vtclient/plugin_gorpcvtgateconn.go | 11 + .../{vtclient2 => vtclient}/plugin_zktopo.go | 0 go/cmd/vtclient/vtclient.go | 189 +++++++++++++++++ go/cmd/vtclient2/plugin_gorpctabletconn.go | 11 - go/cmd/vtclient2/vtclient2.go | 193 ------------------ go/vt/client/client.go | 6 +- test/protocols_flavor.py | 10 + test/utils.py | 19 ++ test/vtgatev3_test.py | 17 ++ 10 files changed, 251 insertions(+), 205 deletions(-) rename go/cmd/{vtclient2 => vtclient}/plugin_etcdtopo.go (100%) create mode 100644 go/cmd/vtclient/plugin_gorpcvtgateconn.go rename go/cmd/{vtclient2 => vtclient}/plugin_zktopo.go (100%) create mode 100644 go/cmd/vtclient/vtclient.go delete mode 100644 go/cmd/vtclient2/plugin_gorpctabletconn.go delete mode 100644 go/cmd/vtclient2/vtclient2.go diff --git a/go/cmd/vtclient2/plugin_etcdtopo.go b/go/cmd/vtclient/plugin_etcdtopo.go similarity index 100% rename from go/cmd/vtclient2/plugin_etcdtopo.go rename to go/cmd/vtclient/plugin_etcdtopo.go diff --git a/go/cmd/vtclient/plugin_gorpcvtgateconn.go b/go/cmd/vtclient/plugin_gorpcvtgateconn.go new file mode 100644 index 0000000000..70eb9d2b52 --- /dev/null +++ b/go/cmd/vtclient/plugin_gorpcvtgateconn.go @@ -0,0 +1,11 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// Imports and register the gorpc vtgateconn client + +import ( + _ "github.com/youtube/vitess/go/vt/vtgate/gorpcvtgateconn" +) diff --git a/go/cmd/vtclient2/plugin_zktopo.go b/go/cmd/vtclient/plugin_zktopo.go similarity index 100% rename from go/cmd/vtclient2/plugin_zktopo.go rename to go/cmd/vtclient/plugin_zktopo.go diff --git a/go/cmd/vtclient/vtclient.go b/go/cmd/vtclient/vtclient.go new file mode 100644 index 0000000000..32413ea7c7 --- /dev/null +++ b/go/cmd/vtclient/vtclient.go @@ -0,0 +1,189 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "database/sql" + "encoding/json" + "flag" + "fmt" + "os" + "strings" + "time" + + log "github.com/golang/glog" + "github.com/youtube/vitess/go/exit" + "github.com/youtube/vitess/go/vt/logutil" + + // import the 'vitess' sql driver + _ "github.com/youtube/vitess/go/vt/client" +) + +var ( + usage = ` +vtclient connects to a vtgate server using the standard go driver API. +Version 3 of the API is used, we do not send any hint to the server. + +For query bound variables, we assume place-holders in the query string +in the form of :v1, :v2, etc. +` + server = flag.String("server", "", "vtgate server to connect to") + tabletType = flag.String("tablet_type", "rdonly", "tablet type to direct queries to") + timeout = flag.Duration("timeout", 30*time.Second, "timeout for queries") + streaming = flag.Bool("streaming", false, "use a streaming query") + bindVariables = newBindvars("bind_variables", "bind variables as a json list") +) + +func init() { + flag.Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + flag.PrintDefaults() + fmt.Fprintf(os.Stderr, usage) + } +} + +type bindvars []interface{} + +func (bv *bindvars) String() string { + b, err := json.Marshal(bv) + if err != nil { + return err.Error() + } + return string(b) +} + +func (bv *bindvars) Set(s string) (err error) { + err = json.Unmarshal([]byte(s), &bv) + if err != nil { + return err + } + // json reads all numbers as float64 + // So, we just ditch floats for bindvars + for i, v := range *bv { + if f, ok := v.(float64); ok { + if f > 0 { + (*bv)[i] = uint64(f) + } else { + (*bv)[i] = int64(f) + } + } + } + + return nil +} + +// For internal flag compatibility +func (bv *bindvars) Get() interface{} { + return bv +} + +func newBindvars(name, usage string) *bindvars { + var bv bindvars + flag.Var(&bv, name, usage) + return &bv +} + +// FIXME(alainjobart) this is a cheap trick. Should probably use the +// query parser if we needed this to be 100% reliable. +func isDml(sql string) bool { + lower := strings.TrimSpace(strings.ToLower(sql)) + return strings.HasPrefix(lower, "insert") || strings.HasPrefix(lower, "update") || strings.HasPrefix(lower, "delete") +} + +func main() { + defer exit.Recover() + defer logutil.Flush() + + flag.Parse() + args := flag.Args() + + if len(args) == 0 { + flag.Usage() + exit.Return(1) + } + + connStr := fmt.Sprintf(`{"address": "%s", "tablet_type": "%s", "streaming": %v, "timeout": %d}`, *server, *tabletType, *streaming, int64(30*(*timeout))) + db, err := sql.Open("vitess", connStr) + if err != nil { + log.Errorf("client error: %v", err) + exit.Return(1) + } + + log.Infof("Sending the query...") + now := time.Now() + + // handle dml + if isDml(args[0]) { + tx, err := db.Begin() + if err != nil { + log.Errorf("begin failed: %v", err) + exit.Return(1) + } + + result, err := db.Exec(args[0], []interface{}(*bindVariables)...) + if err != nil { + log.Errorf("exec failed: %v", err) + exit.Return(1) + } + + err = tx.Commit() + if err != nil { + log.Errorf("commit failed: %v", err) + exit.Return(1) + } + + rowsAffected, err := result.RowsAffected() + lastInsertId, err := result.LastInsertId() + log.Infof("Total time: %v / Row affected: %v / Last Insert Id: %v", time.Now().Sub(now), rowsAffected, lastInsertId) + } else { + + // launch the query + rows, err := db.Query(args[0], []interface{}(*bindVariables)...) + if err != nil { + log.Errorf("client error: %v", err) + exit.Return(1) + } + defer rows.Close() + + // print the headers + cols, err := rows.Columns() + if err != nil { + log.Errorf("client error: %v", err) + exit.Return(1) + } + line := "Index" + for _, field := range cols { + line += "\t" + field + } + fmt.Printf("%s\n", line) + + // get the rows + rowIndex := 0 + for rows.Next() { + row := make([]interface{}, len(cols)) + for i := range row { + var col string + row[i] = &col + } + if err := rows.Scan(row...); err != nil { + log.Errorf("client error: %v", err) + exit.Return(1) + } + + // print the line + line := fmt.Sprintf("%d", rowIndex) + for _, value := range row { + line += fmt.Sprintf("\t%v", *(value.(*string))) + } + fmt.Printf("%s\n", line) + rowIndex++ + } + if err := rows.Err(); err != nil { + log.Errorf("Error %v\n", err) + exit.Return(1) + } + log.Infof("Total time: %v / Row count: %v", time.Now().Sub(now), rowIndex) + } +} diff --git a/go/cmd/vtclient2/plugin_gorpctabletconn.go b/go/cmd/vtclient2/plugin_gorpctabletconn.go deleted file mode 100644 index 4b4c06f6b6..0000000000 --- a/go/cmd/vtclient2/plugin_gorpctabletconn.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -// Imports and register the gorpc tabletconn client - -import ( - _ "github.com/youtube/vitess/go/vt/tabletserver/gorpctabletconn" -) diff --git a/go/cmd/vtclient2/vtclient2.go b/go/cmd/vtclient2/vtclient2.go deleted file mode 100644 index 66f2654bc3..0000000000 --- a/go/cmd/vtclient2/vtclient2.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2012, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "encoding/json" - "flag" - "fmt" - "os" - "strings" - "time" - - log "github.com/golang/glog" - "github.com/youtube/vitess/go/db" - "github.com/youtube/vitess/go/exit" - "github.com/youtube/vitess/go/vt/client2" - _ "github.com/youtube/vitess/go/vt/client2/tablet" - "github.com/youtube/vitess/go/vt/logutil" -) - -var usage = ` -The parameters are first the SQL command, then the bound variables. -For query arguments, we assume place-holders in the query string -in the form of :v0, :v1, etc. -` - -var count = flag.Int("count", 1, "how many times to run the query") -var bindvars = FlagMap("bindvars", "bind vars as a json dictionary") -var server = flag.String("server", "localhost:6603/test_keyspace/0", "vtocc server as [user:password@]hostname:port/keyspace/shard[#keyrangestart-keyrangeend]") -var driver = flag.String("driver", "vttablet", "which driver to use (one of vttablet, vttablet-streaming, vtdb, vtdb-streaming)") -var verbose = flag.Bool("verbose", false, "show results") - -func init() { - flag.Usage = func() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - flag.PrintDefaults() - fmt.Fprintf(os.Stderr, usage) - - } -} - -//---------------------------------- - -type Map map[string]interface{} - -func (m *Map) String() string { - b, err := json.Marshal(*m) - if err != nil { - return err.Error() - } - return string(b) -} - -func (m *Map) Set(s string) (err error) { - err = json.Unmarshal([]byte(s), m) - if err != nil { - return err - } - // json reads all numbers as float64 - // So, we just ditch floats for bindvars - for k, v := range *m { - f, ok := v.(float64) - if ok { - if f > 0 { - (*m)[k] = uint64(f) - } else { - (*m)[k] = int64(f) - } - } - } - - return nil -} - -// For internal flag compatibility -func (m *Map) Get() interface{} { - return m -} - -func FlagMap(name, usage string) (m map[string]interface{}) { - m = make(map[string]interface{}) - mm := Map(m) - flag.Var(&mm, name, usage) - return m -} - -// FIXME(alainjobart) this is a cheap trick. Should probably use the -// query parser if we needed this to be 100% reliable. -func isDml(sql string) bool { - lower := strings.TrimSpace(strings.ToLower(sql)) - return strings.HasPrefix(lower, "insert") || strings.HasPrefix(lower, "update") -} - -func main() { - defer exit.Recover() - defer logutil.Flush() - - flag.Parse() - args := flag.Args() - - if len(args) == 0 { - flag.Usage() - exit.Return(1) - } - - client2.RegisterShardedDrivers() - conn, err := db.Open(*driver, *server) - if err != nil { - log.Errorf("client error: %v", err) - exit.Return(1) - } - - log.Infof("Sending the query...") - now := time.Now() - - // handle dml - if isDml(args[0]) { - t, err := conn.Begin() - if err != nil { - log.Errorf("begin failed: %v", err) - exit.Return(1) - } - - r, err := conn.Exec(args[0], bindvars) - if err != nil { - log.Errorf("exec failed: %v", err) - exit.Return(1) - } - - err = t.Commit() - if err != nil { - log.Errorf("commit failed: %v", err) - exit.Return(1) - } - - n, err := r.RowsAffected() - log.Infof("Total time: %v / Row affected: %v", time.Now().Sub(now), n) - } else { - - // launch the query - r, err := conn.Exec(args[0], bindvars) - if err != nil { - log.Errorf("client error: %v", err) - exit.Return(1) - } - - // get the headers - cols := r.Columns() - if err != nil { - log.Errorf("client error: %v", err) - exit.Return(1) - } - - // print the header - if *verbose { - line := "Index" - for _, field := range cols { - line += "\t" + field - } - log.Infof(line) - } - - // get the rows - rowIndex := 0 - for row := r.Next(); row != nil; row = r.Next() { - // print the line if needed - if *verbose { - line := fmt.Sprintf("%d", rowIndex) - for _, value := range row { - if value != nil { - switch value.(type) { - case []byte: - line += fmt.Sprintf("\t%s", value) - default: - line += fmt.Sprintf("\t%v", value) - } - } else { - line += "\t" - } - } - log.Infof(line) - } - rowIndex++ - } - if err := r.Err(); err != nil { - log.Errorf("Error %v\n", err) - exit.Return(1) - } - log.Infof("Total time: %v / Row count: %v", time.Now().Sub(now), rowIndex) - } -} diff --git a/go/vt/client/client.go b/go/vt/client/client.go index d11ef942c8..fef6a8594a 100644 --- a/go/vt/client/client.go +++ b/go/vt/client/client.go @@ -58,7 +58,11 @@ type conn struct { func (c *conn) dial() error { var err error - c.vtgateConn, err = vtgateconn.DialProtocol(context.Background(), c.Protocol, c.Address, c.Timeout) + if c.Protocol == "" { + c.vtgateConn, err = vtgateconn.Dial(context.Background(), c.Address, c.Timeout) + } else { + c.vtgateConn, err = vtgateconn.DialProtocol(context.Background(), c.Protocol, c.Address, c.Timeout) + } return err } diff --git a/test/protocols_flavor.py b/test/protocols_flavor.py index 2c2e3b5c7f..b8d9389d25 100644 --- a/test/protocols_flavor.py +++ b/test/protocols_flavor.py @@ -22,6 +22,10 @@ class ProtocolsFlavor(object): """Returns the flags to use for specifying the query service protocol.""" return ['-tablet_protocol', 'gorpc'] + def vtgate_protocol_flags(self): + """Returns the flags to use for specifying the vtgate protocol.""" + return ['-vtgate_protocol', 'gorpc'] + def rpc_timeout_message(self): """Returns the error message used by the protocol to indicate a timeout.""" raise NotImplementedError('Implementations need to overwrite this') @@ -41,6 +45,9 @@ class GoRpcProtocolsFlavor(ProtocolsFlavor): def tabletconn_protocol_flags(self): return ['-tablet_protocol', 'gorpc'] + def vtgate_protocol_flags(self): + return ['-vtgate_protocol', 'gorpc'] + def rpc_timeout_message(self): return 'timeout waiting for' @@ -60,6 +67,9 @@ class GRpcProtocolsFlavor(ProtocolsFlavor): def tabletconn_protocol_flags(self): return ['-tablet_protocol', 'gorpc'] + def vtgate_protocol_flags(self): + return ['-vtgate_protocol', 'gorpc'] + def rpc_timeout_message(self): return 'timeout waiting for' diff --git a/test/utils.py b/test/utils.py index 3275e832c5..576be5cdc3 100644 --- a/test/utils.py +++ b/test/utils.py @@ -460,6 +460,25 @@ def vtgate_kill(sp): kill_sub_process(sp, soft=True) sp.wait() +def vtgate_vtclient(vtgate_port, sql, tablet_type='master', bindvars=None, + streaming=False, verbose=False, raise_on_error=False): + """vtgate_vtclient uses the vtclient binary to send a query to vtgate. + """ + args = environment.binary_args('vtclient') + [ + '-server', 'localhost:%u' % vtgate_port, + '-tablet_type', tablet_type] + protocols_flavor().vtgate_protocol_flags() + if bindvars: + args.extend(['-bind_variables', json.dumps(bindvars)]) + if streaming: + args.append('-streaming') + if verbose: + args.append('-alsologtostderr') + args.append(sql) + + out, err = run(args, raise_on_error=raise_on_error, trap_output=True) + out = out.splitlines() + return out, err + # vtctl helpers # The modes are not all equivalent, and we don't really thrive for it. # If a client needs to rely on vtctl's command line behavior, make diff --git a/test/vtgatev3_test.py b/test/vtgatev3_test.py index 102856e375..cb973a24a2 100755 --- a/test/vtgatev3_test.py +++ b/test/vtgatev3_test.py @@ -630,6 +630,23 @@ class TestVTGateFunctions(unittest.TestCase): finally: vtgate_conn.rollback() + def test_vtclient(self): + """This test uses vtclient to send and receive various queries. + """ + utils.vtgate_vtclient(vtgate_port, 'insert into vt_user_extra(user_id, email) values (:v1, :v2)', bindvars=[10, "test 10"]) + + out, err = utils.vtgate_vtclient(vtgate_port, 'select * from vt_user_extra where user_id = :v1', bindvars=[10]) + self.assertEqual(out, ["Index\tuser_id\temail","0\t10\ttest 10"]) + + utils.vtgate_vtclient(vtgate_port, 'update vt_user_extra set email=:v2 where user_id = :v1', bindvars=[10, "test 1000"]) + + out, err = utils.vtgate_vtclient(vtgate_port, 'select * from vt_user_extra where user_id = :v1', bindvars=[10], streaming=True) + self.assertEqual(out, ["Index\tuser_id\temail","0\t10\ttest 1000"]) + + utils.vtgate_vtclient(vtgate_port, 'delete from vt_user_extra where user_id = :v1', bindvars=[10]) + + out, err = utils.vtgate_vtclient(vtgate_port, 'select * from vt_user_extra where user_id = :v1', bindvars=[10]) + self.assertEqual(out, ["Index\tuser_id\temail"]) if __name__ == '__main__': utils.main() From 6726dd31a9e2a44ddde9331c1b3a34f1b9921a83 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 28 May 2015 15:04:33 -0700 Subject: [PATCH 114/128] Removing loadgen, it's not used. --- test/goloadgen/goloadgen.go | 137 --------------------------- test/loadgen.py | 182 ------------------------------------ 2 files changed, 319 deletions(-) delete mode 100644 test/goloadgen/goloadgen.go delete mode 100755 test/loadgen.py diff --git a/test/goloadgen/goloadgen.go b/test/goloadgen/goloadgen.go deleted file mode 100644 index d365595a76..0000000000 --- a/test/goloadgen/goloadgen.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2012, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "flag" - "fmt" - "math/rand" - "os" - "sync" - "time" - - "github.com/youtube/vitess/go/vt/client2/tablet" -) - -func main() { - goroutines := flag.Int("goroutines", 100, "Number of client goroutines to run") - connections := flag.Int("connections", 20000, "Number of connections to create") - queries := flag.Int("queries", 1*1024*1024, "Numberof queries to run") - flag.Parse() - - perconnections := *connections / *goroutines - perqueries := *queries / *goroutines - for i := 0; i < *goroutines; i++ { - if i != *goroutines-1 { - go run(perconnections, perqueries) - *connections -= perconnections - *queries -= perqueries - } else { - go run(*connections, *queries) - } - } - registry.Wait() -} - -func run(connections, queries int) { - registry.Add(1) - defer registry.Done() - - conns := make([]*tablet.Conn, connections) - for i := 0; i < connections; i++ { - var err error - conns[i], err = tablet.DialTablet(server, false) - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - return - } - } - time.Sleep(time.Second) - connid := 0 - count := 0 - bindVars := make(map[string]interface{}) - for { - for _, plan := range distribution { - bindVars["id"] = rand.Intn(10000) + 1 - for i := 0; i < plan.iterations; i++ { - _, err := conns[connid].Exec(baseQuery+plan.whereClause, bindVars) - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - return - } - count++ - connid = (connid + 1) % connections - if count >= queries { - return - } - } - } - } -} - -var registry sync.WaitGroup - -var server = "localhost:9461/vt_test_keyspace" - -var baseQuery = ` -select - id, - num1, - char1, - char2, - num2, - char3, - char4, - char5, - num3, - char6, - char7, - date1, - char8, - char9, - num4, - char10, - num5, - num6, - num7, - num8, - char11, - num9, - num10, - num11, - num12, - num13, - num14, - char12, - num15, - num16, - num17, - num18, - num19 -from vt_load -` - -type RequestPlan struct { - rowsHint int - iterations, variations int - whereClause string -} - -var distribution = []RequestPlan{ - {1, 63000, 0, " where id = :id"}, - {0, 14000, 0, " where id = 0"}, - {10000, 1, 0, ""}, - {2, 3000, 0, " where id >= :id limit 2"}, - {1000, 1, 0, " where id >= :id limit 1000"}, - {5, 3000, 0, " where id >= :id limit 5"}, - {10, 3000, 0, " where id >= :id limit 10"}, - {20, 6000, 0, " where id >= :id limit 20"}, - {2000, 1, 0, " where id >= :id limit 2000"}, - {50, 7000, 0, " where id >= :id limit 50"}, - {100, 1000, 0, " where id >= :id limit 100"}, - {5000, 1, 0, " limit 5000"}, - {200, 1000, 0, " where id >= :id limit 200"}, - {500, 1000, 0, " where id >= :id limit 500"}, -} diff --git a/test/loadgen.py b/test/loadgen.py deleted file mode 100755 index f1ccc70ba6..0000000000 --- a/test/loadgen.py +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/env python - -import json -import logging -import optparse -import os -import subprocess -import sys - -import utils - -from queryservice_tests import test_env - -create_table = """ -create table vt_load( - id bigint(20), - num1 int(10) unsigned, - char1 varchar(20), - char2 varchar(20), - num2 int(10) unsigned, - char3 varchar(140), - char4 char(1), - char5 varchar(40), - num3 int(10) unsigned, - char6 varchar(300), - char7 varchar(150), - date1 date, - char8 varchar(16), - char9 varchar(120), - num4 bigint(20), - char10 char(1), - num5 bigint(20), - num6 bigint(20), - num7 bigint(20), - num8 int(11), - char11 char(1), - num9 tinyint(3) unsigned, - num10 int(10) unsigned, - num11 bigint(20), - num12 bigint(20), - num13 bigint(20), - num14 int(10) unsigned, - char12 varchar(10), - num15 bigint(20) unsigned, - num16 bigint(20) unsigned, - num17 bigint(20) unsigned, - num18 bigint(20), - num19 int(10) unsigned, - PRIMARY KEY (id) -) ENGINE=InnoDB -""" - -insert = """ -insert into vt_load values( -%(id)s, -%(num1)s, -%(char1)s, -%(char2)s, -%(num2)s, -%(char3)s, -%(char4)s, -%(char5)s, -%(num3)s, -%(char6)s, -%(char7)s, -%(date1)s, -%(char8)s, -%(char9)s, -%(num4)s, -%(char10)s, -%(num5)s, -%(num6)s, -%(num7)s, -%(num8)s, -%(char11)s, -%(num9)s, -%(num10)s, -%(num11)s, -%(num12)s, -%(num13)s, -%(num14)s, -%(char12)s, -%(num15)s, -%(num16)s, -%(num17)s, -%(num18)s, -%(num19)s -) -""" - -fixed_values = { - "num1": 1114367205, - "char1": "abcdef", - "char2": None, - "num2": 8736, - "char3": "asdasdas@asdasd.asdas", - "char4": "a", - "char5": "11.22.33.44", - "num3": 0, - "char6": "Asdihdfiuevkdj", - "char7": "Basdfihjdsfoieuw", - "date1": "1995-02-06", - "char8": 98765, - "char9": "Poidsf", - "num4": 12323, - "char10": "b", - "num5": 4641528869078863271, - "num6": 123, - "num7": 12345, - "num8": 784233, - "char11": "A", - "num9": 94, - "num10": 1360128451, - "num11": -1328510013, - "num12": None, - "num13": 89343, - "num14": 384734, - "char12": "en_US", - "num15": 0, - "num16": None, - "num17": 12609028137540273996, - "num18": 329527359100, - "num19": 0, -} - -def init_data(env): - env.execute(create_table) - env.execute("begin") - for pk in range(10000): - fixed_values["id"] = pk + 1 - env.execute(insert, fixed_values) - env.execute("commit") - -if __name__ == "__main__": - parser = optparse.OptionParser(usage="usage: %prog [options] [test_names]") - # Options used by test_env - parser.add_option("-m", "--memcache", action="store_true", default=False, - help="starts a memcached, and tests rowcache") - parser.add_option("-e", "--env", default='vttablet,vtocc', - help="Environment that will be used. Valid options: vttablet, vtocc") - parser.add_option("-q", "--quiet", action="store_const", const=0, dest="verbose", default=1) - parser.add_option("-v", "--verbose", action="store_const", const=2, dest="verbose", default=1) - - # Options for the load generator - parser.add_option("--gomaxprocs", type="int", default="8") - parser.add_option("--goroutines", type="int", default=50) - parser.add_option("--connections", type="int", default=15000) - parser.add_option("--queries", type="int", default=3000000) - - (options, args) = parser.parse_args() - utils.options = options - logging.getLogger().setLevel(logging.ERROR) - - if options.env == 'vttablet': - env = test_env.VttabletTestEnv() - elif options.env == 'vtocc': - env = test_env.VtoccTestEnv() - else: - raise Exception("Valid options for -e: vtocc, vttablet") - - try: - os.environ["GOMAXPROCS"] = str(options.gomaxprocs) - env.setUp() - init_data(env) - subprocess.call([ - 'go', - 'run', - '%s/test/goloadgen/goloadgen.go' % env.vttop, - "--goroutines=%d" % options.goroutines, - "--connections=%d" % options.connections, - "--queries=%d" % options.queries]) - dvars = env.debug_vars() - print dvars["memstats"]["PauseNs"] - print dvars["memstats"]["BySize"] - dvars["memstats"]["PauseNs"] = None - dvars["memstats"]["BySize"] = None - json.dump(dvars, sys.stdout, indent=2, sort_keys=True) - print - json.dump(env.query_stats(), sys.stdout, indent=2) - print - finally: - env.tearDown() From f2d76eda2a9d830b9e047b1b5907f74449c72650 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 28 May 2015 15:17:19 -0700 Subject: [PATCH 115/128] Replacing use of 'vtctl Query' with direct vttablet connection. --- test/tabletmanager.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/test/tabletmanager.py b/test/tabletmanager.py index a577634a03..b50238aa06 100755 --- a/test/tabletmanager.py +++ b/test/tabletmanager.py @@ -95,12 +95,11 @@ class TestTabletManager(unittest.TestCase): tablet_62344.start_vttablet() # make sure the query service is started right away - result, _ = utils.run_vtctl(['Query', 'test_nj', 'test_keyspace', - 'select * from vt_select_test'], - mode=utils.VTCTL_VTCTL, trap_output=True) - rows = result.splitlines() - self.assertEqual(len(rows), 5, "expected 5 rows in vt_select_test: %s %s" % - (str(rows), result)) + conn = tablet_62344.conn() + results, rowcount, lastrowid, fields = conn._execute('select * from vt_select_test', {}) + self.assertEqual(len(results), 4, "expected 4 rows in vt_select_test: %s %s" % + (str(results), str(fields))) + conn.close() # make sure direct dba queries work query_result = utils.run_vtctl_json(['ExecuteFetchAsDba', '-want_fields', tablet_62344.tablet_alias, 'select * from vt_test_keyspace.vt_select_test']) From 880baf382891b379534ca39ff0e112c0d292ec03 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 28 May 2015 15:19:59 -0700 Subject: [PATCH 116/128] Removing obsolete 'vtctl Query'. --- go/vt/vtctl/vtctl.go | 42 ------------------------------------------ 1 file changed, 42 deletions(-) diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index c60678754f..db99554117 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -19,7 +19,6 @@ import ( "github.com/youtube/vitess/go/flagutil" "github.com/youtube/vitess/go/jscfg" "github.com/youtube/vitess/go/netutil" - "github.com/youtube/vitess/go/vt/client2" hk "github.com/youtube/vitess/go/vt/hook" "github.com/youtube/vitess/go/vt/key" "github.com/youtube/vitess/go/vt/logutil" @@ -98,9 +97,6 @@ var commands = []commandGroup{ command{"HealthStream", commandHealthStream, "", "Streams the health status out of a tablet."}, - command{"Query", commandQuery, - " ", - "Send a SQL query to a tablet."}, command{"Sleep", commandSleep, " ", "Block the action queue for the specified duration (mostly for testing)."}, @@ -378,34 +374,6 @@ func dumpTablets(ctx context.Context, wr *wrangler.Wrangler, tabletAliases []top return nil } -func kquery(wr *wrangler.Wrangler, cell, keyspace, query string) error { - sconn, err := client2.Dial(wr.TopoServer(), cell, keyspace, "master", false, 5*time.Second) - if err != nil { - return err - } - rows, err := sconn.Exec(query, nil) - if err != nil { - return err - } - cols := rows.Columns() - wr.Logger().Printf("%v\n", strings.Join(cols, "\t")) - - rowStrs := make([]string, len(cols)+1) - for row := rows.Next(); row != nil; row = rows.Next() { - for i, value := range row { - switch value.(type) { - case []byte: - rowStrs[i] = fmt.Sprintf("%q", value) - default: - rowStrs[i] = fmt.Sprintf("%v", value) - } - } - - wr.Logger().Printf("%v\n", strings.Join(rowStrs, "\t")) - } - return nil -} - // getFileParam returns a string containing either flag is not "", // or the content of the file named flagFile func getFileParam(flag, flagFile, name string) (string, error) { @@ -857,16 +825,6 @@ func commandHealthStream(ctx context.Context, wr *wrangler.Wrangler, subFlags *f return errFunc() } -func commandQuery(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - if err := subFlags.Parse(args); err != nil { - return err - } - if subFlags.NArg() != 3 { - return fmt.Errorf("action Query requires 3") - } - return kquery(wr, subFlags.Arg(0), subFlags.Arg(1), subFlags.Arg(2)) -} - func commandSleep(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { if err := subFlags.Parse(args); err != nil { return err From b84cc3405363904334a695751bcbe0d1e4634723 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 28 May 2015 15:24:22 -0700 Subject: [PATCH 117/128] Removing obsolete client2. --- go/vt/client2/deprecated_router.go | 290 ------------ go/vt/client2/deprecated_router_test.go | 105 ----- go/vt/client2/sharded.go | 592 ------------------------ go/vt/client2/tablet/tclient.go | 332 ------------- go/vt/client2/tablet/vclient.go | 178 ------- 5 files changed, 1497 deletions(-) delete mode 100644 go/vt/client2/deprecated_router.go delete mode 100644 go/vt/client2/deprecated_router_test.go delete mode 100644 go/vt/client2/sharded.go delete mode 100644 go/vt/client2/tablet/tclient.go delete mode 100644 go/vt/client2/tablet/vclient.go diff --git a/go/vt/client2/deprecated_router.go b/go/vt/client2/deprecated_router.go deleted file mode 100644 index accafeb057..0000000000 --- a/go/vt/client2/deprecated_router.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2012, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package client2 - -import ( - "fmt" - "strconv" - - "github.com/youtube/vitess/go/vt/key" - "github.com/youtube/vitess/go/vt/sqlparser" -) - -const ( - EID_NODE = iota - VALUE_NODE - LIST_NODE - OTHER_NODE -) - -type RoutingPlan struct { - criteria sqlparser.SQLNode -} - -func GetShardList(sql string, bindVariables map[string]interface{}, tabletKeys []key.KeyspaceId) (shardlist []int, err error) { - plan, err := buildPlan(sql) - if err != nil { - return nil, err - } - return shardListFromPlan(plan, bindVariables, tabletKeys) -} - -func buildPlan(sql string) (plan *RoutingPlan, err error) { - statement, err := sqlparser.Parse(sql) - if err != nil { - return nil, err - } - return getRoutingPlan(statement) -} - -func shardListFromPlan(plan *RoutingPlan, bindVariables map[string]interface{}, tabletKeys []key.KeyspaceId) (shardList []int, err error) { - if plan.criteria == nil { - return makeList(0, len(tabletKeys)), nil - } - - switch criteria := plan.criteria.(type) { - case sqlparser.Values: - index, err := findInsertShard(criteria, bindVariables, tabletKeys) - if err != nil { - return nil, err - } - return []int{index}, nil - case *sqlparser.ComparisonExpr: - switch criteria.Operator { - case "=", "<=>": - index, err := findShard(criteria.Right, bindVariables, tabletKeys) - if err != nil { - return nil, err - } - return []int{index}, nil - case "<", "<=": - index, err := findShard(criteria.Right, bindVariables, tabletKeys) - if err != nil { - return nil, err - } - return makeList(0, index+1), nil - case ">", ">=": - index, err := findShard(criteria.Right, bindVariables, tabletKeys) - if err != nil { - return nil, err - } - return makeList(index, len(tabletKeys)), nil - case "in": - return findShardList(criteria.Right, bindVariables, tabletKeys) - } - case *sqlparser.RangeCond: - if criteria.Operator == "between" { - start, err := findShard(criteria.From, bindVariables, tabletKeys) - if err != nil { - return nil, err - } - last, err := findShard(criteria.To, bindVariables, tabletKeys) - if err != nil { - return nil, err - } - if last < start { - start, last = last, start - } - return makeList(start, last+1), nil - } - } - return makeList(0, len(tabletKeys)), nil -} - -func getRoutingPlan(statement sqlparser.Statement) (plan *RoutingPlan, err error) { - plan = &RoutingPlan{} - if ins, ok := statement.(*sqlparser.Insert); ok { - if sel, ok := ins.Rows.(sqlparser.SelectStatement); ok { - return getRoutingPlan(sel) - } - plan.criteria, err = routingAnalyzeValues(ins.Rows.(sqlparser.Values)) - if err != nil { - return nil, err - } - return plan, nil - } - var where *sqlparser.Where - switch stmt := statement.(type) { - case *sqlparser.Select: - where = stmt.Where - case *sqlparser.Update: - where = stmt.Where - case *sqlparser.Delete: - where = stmt.Where - } - if where != nil { - plan.criteria = routingAnalyzeBoolean(where.Expr) - } - return plan, nil -} - -func routingAnalyzeValues(vals sqlparser.Values) (sqlparser.Values, error) { - // Analyze first value of every item in the list - for i := 0; i < len(vals); i++ { - switch tuple := vals[i].(type) { - case sqlparser.ValTuple: - result := routingAnalyzeValue(tuple[0]) - if result != VALUE_NODE { - return nil, fmt.Errorf("insert is too complex") - } - default: - return nil, fmt.Errorf("insert is too complex") - } - } - return vals, nil -} - -func routingAnalyzeBoolean(node sqlparser.BoolExpr) sqlparser.BoolExpr { - switch node := node.(type) { - case *sqlparser.AndExpr: - left := routingAnalyzeBoolean(node.Left) - right := routingAnalyzeBoolean(node.Right) - if left != nil && right != nil { - return nil - } else if left != nil { - return left - } else { - return right - } - case *sqlparser.ParenBoolExpr: - return routingAnalyzeBoolean(node.Expr) - case *sqlparser.ComparisonExpr: - switch { - case sqlparser.StringIn(node.Operator, "=", "<", ">", "<=", ">=", "<=>"): - left := routingAnalyzeValue(node.Left) - right := routingAnalyzeValue(node.Right) - if (left == EID_NODE && right == VALUE_NODE) || (left == VALUE_NODE && right == EID_NODE) { - return node - } - case node.Operator == "in": - left := routingAnalyzeValue(node.Left) - right := routingAnalyzeValue(node.Right) - if left == EID_NODE && right == LIST_NODE { - return node - } - } - case *sqlparser.RangeCond: - if node.Operator != "between" { - return nil - } - left := routingAnalyzeValue(node.Left) - from := routingAnalyzeValue(node.From) - to := routingAnalyzeValue(node.To) - if left == EID_NODE && from == VALUE_NODE && to == VALUE_NODE { - return node - } - } - return nil -} - -func routingAnalyzeValue(valExpr sqlparser.ValExpr) int { - switch node := valExpr.(type) { - case *sqlparser.ColName: - if string(node.Name) == "entity_id" { - return EID_NODE - } - case sqlparser.ValTuple: - for _, n := range node { - if routingAnalyzeValue(n) != VALUE_NODE { - return OTHER_NODE - } - } - return LIST_NODE - case sqlparser.StrVal, sqlparser.NumVal, sqlparser.ValArg: - return VALUE_NODE - } - return OTHER_NODE -} - -func findShardList(valExpr sqlparser.ValExpr, bindVariables map[string]interface{}, tabletKeys []key.KeyspaceId) ([]int, error) { - shardset := make(map[int]bool) - switch node := valExpr.(type) { - case sqlparser.ValTuple: - for _, n := range node { - index, err := findShard(n, bindVariables, tabletKeys) - if err != nil { - return nil, err - } - shardset[index] = true - } - } - shardlist := make([]int, len(shardset)) - index := 0 - for k := range shardset { - shardlist[index] = k - index++ - } - return shardlist, nil -} - -func findInsertShard(vals sqlparser.Values, bindVariables map[string]interface{}, tabletKeys []key.KeyspaceId) (int, error) { - index := -1 - for i := 0; i < len(vals); i++ { - first_value_expression := vals[i].(sqlparser.ValTuple)[0] - newIndex, err := findShard(first_value_expression, bindVariables, tabletKeys) - if err != nil { - return -1, err - } - if index == -1 { - index = newIndex - } else if index != newIndex { - return -1, fmt.Errorf("insert has multiple shard targets") - } - } - return index, nil -} - -func findShard(valExpr sqlparser.ValExpr, bindVariables map[string]interface{}, tabletKeys []key.KeyspaceId) (int, error) { - value, err := getBoundValue(valExpr, bindVariables) - if err != nil { - return -1, err - } - return key.FindShardForValue(value, tabletKeys), nil -} - -func getBoundValue(valExpr sqlparser.ValExpr, bindVariables map[string]interface{}) (string, error) { - switch node := valExpr.(type) { - case sqlparser.ValTuple: - if len(node) != 1 { - return "", fmt.Errorf("tuples not allowed as insert values") - } - // TODO: Change parser to create single value tuples into non-tuples. - return getBoundValue(node[0], bindVariables) - case sqlparser.StrVal: - return string(node), nil - case sqlparser.NumVal: - val, err := strconv.ParseInt(string(node), 10, 64) - if err != nil { - return "", err - } - return key.Uint64Key(val).String(), nil - case sqlparser.ValArg: - value, err := findBindValue(node, bindVariables) - if err != nil { - return "", err - } - return key.EncodeValue(value), nil - } - panic("Unexpected token") -} - -func findBindValue(valArg sqlparser.ValArg, bindVariables map[string]interface{}) (interface{}, error) { - if bindVariables == nil { - return nil, fmt.Errorf("No bind variable for " + string(valArg)) - } - value, ok := bindVariables[string(valArg[1:])] - if !ok { - return nil, fmt.Errorf("No bind variable for " + string(valArg)) - } - return value, nil -} - -func makeList(start, end int) []int { - list := make([]int, end-start) - for i := start; i < end; i++ { - list[i-start] = i - } - return list -} diff --git a/go/vt/client2/deprecated_router_test.go b/go/vt/client2/deprecated_router_test.go deleted file mode 100644 index 5d9bc57b34..0000000000 --- a/go/vt/client2/deprecated_router_test.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2012, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package client2 - -import ( - "bufio" - "fmt" - "io" - "os" - "sort" - "strings" - "testing" - - "github.com/youtube/vitess/go/testfiles" - "github.com/youtube/vitess/go/vt/key" -) - -func TestRouting(t *testing.T) { - tabletkeys := []key.KeyspaceId{ - "\x00\x00\x00\x00\x00\x00\x00\x02", - "\x00\x00\x00\x00\x00\x00\x00\x04", - "\x00\x00\x00\x00\x00\x00\x00\x06", - "a", - "b", - "d", - } - bindVariables := make(map[string]interface{}) - bindVariables["id0"] = 0 - bindVariables["id2"] = 2 - bindVariables["id3"] = 3 - bindVariables["id4"] = 4 - bindVariables["id6"] = 6 - bindVariables["id8"] = 8 - bindVariables["ids"] = []interface{}{1, 4} - bindVariables["a"] = "a" - bindVariables["b"] = "b" - bindVariables["c"] = "c" - bindVariables["d"] = "d" - bindVariables["e"] = "e" - for tcase := range iterateFiles("sqlparser_test/routing_cases.txt") { - if tcase.output == "" { - tcase.output = tcase.input - } - out, err := GetShardList(tcase.input, bindVariables, tabletkeys) - if err != nil { - if err.Error() != tcase.output { - t.Error(fmt.Sprintf("Line:%v\n%s\n%s", tcase.lineno, tcase.input, err)) - } - continue - } - sort.Ints(out) - outstr := fmt.Sprintf("%v", out) - if outstr != tcase.output { - t.Error(fmt.Sprintf("Line:%v\n%s\n%s", tcase.lineno, tcase.output, outstr)) - } - } -} - -// TODO(sougou): This is now duplicated in three plcaes. Refactor. -type testCase struct { - file string - lineno int - input string - output string -} - -func iterateFiles(pattern string) (testCaseIterator chan testCase) { - names := testfiles.Glob(pattern) - testCaseIterator = make(chan testCase) - go func() { - defer close(testCaseIterator) - for _, name := range names { - fd, err := os.OpenFile(name, os.O_RDONLY, 0) - if err != nil { - panic(fmt.Sprintf("Could not open file %s", name)) - } - - r := bufio.NewReader(fd) - lineno := 0 - for { - line, err := r.ReadString('\n') - lines := strings.Split(strings.TrimRight(line, "\n"), "#") - lineno++ - if err != nil { - if err != io.EOF { - panic(fmt.Sprintf("Error reading file %s: %s", name, err.Error())) - } - break - } - input := lines[0] - output := "" - if len(lines) > 1 { - output = lines[1] - } - if input == "" { - continue - } - testCaseIterator <- testCase{name, lineno, input, output} - } - } - }() - return testCaseIterator -} diff --git a/go/vt/client2/sharded.go b/go/vt/client2/sharded.go deleted file mode 100644 index 2ef7f08383..0000000000 --- a/go/vt/client2/sharded.go +++ /dev/null @@ -1,592 +0,0 @@ -// Copyright 2012, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package client2 - -import ( - "fmt" - "net/url" - "path" - "strings" - "sync" - "time" - - "github.com/youtube/vitess/go/db" - mproto "github.com/youtube/vitess/go/mysql/proto" - "github.com/youtube/vitess/go/vt/client2/tablet" - "github.com/youtube/vitess/go/vt/key" - "github.com/youtube/vitess/go/vt/topo" - "github.com/youtube/vitess/go/vt/zktopo" - "github.com/youtube/vitess/go/zk" - "golang.org/x/net/context" -) - -// The sharded client handles writing to multiple shards across the -// database. -// -// The ShardedConn can handles several separate aspects: -// * loading/reloading tablet addresses on demand from zk -// * maintaining at most one connection to each tablet as required -// * transaction tracking across shards -// * preflight checking all transactions before attempting to commit -// (reduce partial commit probability) -// -// NOTE: Queries with aggregate results will not produce expected -// results right now. For instance, running a count(*) on a table -// across all tablets will return one row per tablet. In the future, -// the SQL parser and query engine can handle these more -// automatically. For now, clients will have to do the rollup at a -// higher level. - -var ( - ErrNotConnected = fmt.Errorf("vt: not connected") -) - -type VtClientError struct { - msg string - partial bool -} - -func (err VtClientError) Error() string { - return err.msg -} - -func (err VtClientError) Partial() bool { - return err.partial -} - -// Not thread safe, as per sql package. -type ShardedConn struct { - ts topo.Server - cell string - keyspace string - tabletType topo.TabletType - stream bool // Use streaming RPC - - srvKeyspace *topo.SrvKeyspace - // Keep a map per shard mapping tabletType to a real connection. - // connByType []map[string]*Conn - - // Sorted list of the max keys for each shard. - shardMaxKeys []key.KeyspaceId - conns []*tablet.VtConn - - timeout time.Duration // How long should we wait for a given operation? - - // Currently running transaction (or nil if not inside a transaction) - currentTransaction *MetaTx -} - -// FIXME(msolomon) Normally a connect method would actually connect up -// to the appropriate endpoints. In the distributed case, it's unclear -// that this is necessary. You have to deal with transient failures -// anyway, so the whole system degenerates to managing connections on -// demand. -func Dial(ts topo.Server, cell, keyspace string, tabletType topo.TabletType, stream bool, timeout time.Duration) (*ShardedConn, error) { - sc := &ShardedConn{ - ts: ts, - cell: cell, - keyspace: keyspace, - tabletType: tabletType, - stream: stream, - } - err := sc.readKeyspace() - if err != nil { - return nil, err - } - return sc, nil -} - -func (sc *ShardedConn) Close() error { - if sc.conns == nil { - return nil - } - if sc.currentTransaction != nil { - sc.rollback() - } - - for _, conn := range sc.conns { - if conn != nil { - conn.Close() - } - } - sc.conns = nil - sc.srvKeyspace = nil - sc.shardMaxKeys = nil - return nil -} - -func (sc *ShardedConn) readKeyspace() error { - ctx := context.TODO() - sc.Close() - var err error - sc.srvKeyspace, err = sc.ts.GetSrvKeyspace(ctx, sc.cell, sc.keyspace) - if err != nil { - return fmt.Errorf("vt: GetSrvKeyspace failed %v", err) - } - - sc.conns = make([]*tablet.VtConn, len(sc.srvKeyspace.Partitions[sc.tabletType].ShardReferences)) - sc.shardMaxKeys = make([]key.KeyspaceId, len(sc.srvKeyspace.Partitions[sc.tabletType].ShardReferences)) - - for i, shardReference := range sc.srvKeyspace.Partitions[sc.tabletType].ShardReferences { - sc.shardMaxKeys[i] = shardReference.KeyRange.End - } - - // Disabled for now. - // sc.connByType = make([]map[string]*Conn, len(sc.srvKeyspace.ShardReferences)) - // for i := 0; i < len(sc.connByType); i++ { - // sc.connByType[i] = make(map[string]*Conn, 8) - // } - return nil -} - -// A "transaction" that may be across and thus, not transactional at -// this point. -type MetaTx struct { - // The connections involved in this transaction, in the order they - // were added to the transaction. - shardedConn *ShardedConn - conns []*tablet.VtConn -} - -// makes sure the given transaction was issued a Begin() call -func (tx *MetaTx) begin(conn *tablet.VtConn) (err error) { - for _, v := range tx.conns { - if v == conn { - return - } - } - - _, err = conn.Begin() - if err != nil { - // the caller will need to take care of the rollback, - // and therefore issue a rollback on all pre-existing - // transactions - return err - } - tx.conns = append(tx.conns, conn) - return nil -} - -func (tx *MetaTx) Commit() (err error) { - if tx.shardedConn.currentTransaction == nil { - return tablet.ErrBadRollback - } - - commit := true - for _, conn := range tx.conns { - if commit { - if err = conn.Commit(); err != nil { - commit = false - } - } - if !commit { - conn.Rollback() - } - } - tx.shardedConn.currentTransaction = nil - return err -} - -func (tx *MetaTx) Rollback() error { - if tx.shardedConn.currentTransaction == nil { - return tablet.ErrBadRollback - } - var someErr error - for _, conn := range tx.conns { - if err := conn.Rollback(); err != nil { - someErr = err - } - } - tx.shardedConn.currentTransaction = nil - return someErr -} - -func (sc *ShardedConn) Begin() (db.Tx, error) { - if sc.srvKeyspace == nil { - return nil, ErrNotConnected - } - if sc.currentTransaction != nil { - return nil, tablet.ErrNoNestedTxn - } - tx := &MetaTx{sc, make([]*tablet.VtConn, 0, 32)} - sc.currentTransaction = tx - return tx, nil -} - -func (sc *ShardedConn) rollback() error { - if sc.currentTransaction == nil { - return tablet.ErrBadRollback - } - var someErr error - for _, conn := range sc.conns { - if conn.TransactionId != 0 { - if err := conn.Rollback(); err != nil { - someErr = err - } - } - } - sc.currentTransaction = nil - return someErr -} - -func (sc *ShardedConn) Exec(query string, bindVars map[string]interface{}) (db.Result, error) { - if sc.srvKeyspace == nil { - return nil, ErrNotConnected - } - shards, err := GetShardList(query, bindVars, sc.shardMaxKeys) - if err != nil { - return nil, err - } - if sc.stream { - return sc.execOnShardsStream(query, bindVars, shards) - } - return sc.execOnShards(query, bindVars, shards) -} - -// FIXME(msolomon) define key interface "Keyer" or force a concrete type? -func (sc *ShardedConn) ExecWithKey(query string, bindVars map[string]interface{}, keyVal interface{}) (db.Result, error) { - shardIdx, err := key.FindShardForKey(keyVal, sc.shardMaxKeys) - if err != nil { - return nil, err - } - if sc.stream { - return sc.execOnShardsStream(query, bindVars, []int{shardIdx}) - } - return sc.execOnShards(query, bindVars, []int{shardIdx}) -} - -type tabletResult struct { - error - *tablet.Result -} - -func (sc *ShardedConn) execOnShards(query string, bindVars map[string]interface{}, shards []int) (metaResult *tablet.Result, err error) { - rchan := make(chan tabletResult, len(shards)) - for _, shardIdx := range shards { - go func(shardIdx int) { - qr, err := sc.execOnShard(query, bindVars, shardIdx) - if err != nil { - rchan <- tabletResult{error: err} - } else { - rchan <- tabletResult{Result: qr.(*tablet.Result)} - } - }(shardIdx) - } - - results := make([]tabletResult, len(shards)) - rowCount := int64(0) - rowsAffected := int64(0) - lastInsertId := int64(0) - var hasError error - for i := range results { - results[i] = <-rchan - if results[i].error != nil { - hasError = results[i].error - continue - } - affected, _ := results[i].RowsAffected() - insertId, _ := results[i].LastInsertId() - rowsAffected += affected - if insertId > 0 { - if lastInsertId == 0 { - lastInsertId = insertId - } - // FIXME(msolomon) issue an error when you have multiple last inserts? - } - rowCount += results[i].RowsRetrieved() - } - - // FIXME(msolomon) allow partial result set? - if hasError != nil { - return nil, fmt.Errorf("vt: partial result set (%v)", hasError) - } - - for _, tr := range results { - if tr.error != nil { - return nil, tr.error - } - // FIXME(msolomon) This error message should be a const. Should this - // be deferred until we get a next query? - if tr.error != nil && tr.error.Error() == "retry: unavailable" { - sc.readKeyspace() - } - } - - var fields []mproto.Field - if len(results) > 0 { - fields = results[0].Fields() - } - - // check the schemas all match (both names and types) - if len(results) > 1 { - firstFields := results[0].Fields() - for _, r := range results[1:] { - fields := r.Fields() - if len(fields) != len(firstFields) { - return nil, fmt.Errorf("vt: column count mismatch: %v != %v", len(firstFields), len(fields)) - } - for i, name := range fields { - if name.Name != firstFields[i].Name { - return nil, fmt.Errorf("vt: column[%v] name mismatch: %v != %v", i, name.Name, firstFields[i].Name) - } - } - } - } - - // Combine results. - metaResult = tablet.NewResult(rowCount, rowsAffected, lastInsertId, fields) - curIndex := 0 - rows := metaResult.Rows() - for _, tr := range results { - for _, row := range tr.Rows() { - rows[curIndex] = row - curIndex++ - } - } - - return metaResult, nil -} - -func (sc *ShardedConn) execOnShard(query string, bindVars map[string]interface{}, shardIdx int) (db.Result, error) { - if sc.conns[shardIdx] == nil { - conn, err := sc.dial(shardIdx) - if err != nil { - return nil, err - } - sc.conns[shardIdx] = conn - } - conn := sc.conns[shardIdx] - - // if we haven't started the transaction on that shard and need to, now is the time - if sc.currentTransaction != nil { - err := sc.currentTransaction.begin(conn) - if err != nil { - return nil, err - } - } - - // Retries should have already taken place inside the tablet connection. - // At this point, all that's left are more sinister failures. - // FIXME(msolomon) reload just this shard unless the failure pertains to - // needing to reload the entire keyspace. - return conn.Exec(query, bindVars) -} - -// when doing a streaming query, we send this structure back -type streamTabletResult struct { - error - row []interface{} -} - -// our streaming result, just aggregates from all streaming results -// it implements both driver.Result and driver.Rows -type multiStreamResult struct { - cols []string - - // results flow through this, maybe with errors - rows chan streamTabletResult - - err error -} - -// driver.Result interface -func (*multiStreamResult) LastInsertId() (int64, error) { - return 0, tablet.ErrNoLastInsertId -} - -func (*multiStreamResult) RowsAffected() (int64, error) { - return 0, tablet.ErrNoRowsAffected -} - -// driver.Rows interface -func (sr *multiStreamResult) Columns() []string { - return sr.cols -} - -func (sr *multiStreamResult) Close() error { - close(sr.rows) - return nil -} - -// read from the stream and gets the next value -// if one of the go routines returns an error, we want to save it and return it -// eventually. (except if it's EOF, then we just know that routine is done) -func (sr *multiStreamResult) Next() (row []interface{}) { - for { - str, ok := <-sr.rows - if !ok { - return nil - } - if str.error != nil { - sr.err = str.error - continue - } - return str.row - } -} - -func (sr *multiStreamResult) Err() error { - return sr.err -} - -func (sc *ShardedConn) execOnShardsStream(query string, bindVars map[string]interface{}, shards []int) (msr *multiStreamResult, err error) { - // we synchronously do the exec on each shard - // so we can get the Columns from the first one - // and check the others match them - var cols []string - qrs := make([]db.Result, len(shards)) - for i, shardIdx := range shards { - qr, err := sc.execOnShard(query, bindVars, shardIdx) - if err != nil { - // FIXME(alainjobart) if the first queries went through - // we need to cancel them - return nil, err - } - - // we know the result is a tablet.StreamResult, - // and we use it as a driver.Rows - qrs[i] = qr.(db.Result) - - // save the columns or check they match - if i == 0 { - cols = qrs[i].Columns() - } else { - ncols := qrs[i].Columns() - if len(ncols) != len(cols) { - return nil, fmt.Errorf("vt: column count mismatch: %v != %v", len(ncols), len(cols)) - } - for i, name := range cols { - if name != ncols[i] { - return nil, fmt.Errorf("vt: column[%v] name mismatch: %v != %v", i, name, ncols[i]) - } - } - } - } - - // now we create the result, its channel, and run background - // routines to stream results - msr = &multiStreamResult{cols: cols, rows: make(chan streamTabletResult, 10*len(shards))} - var wg sync.WaitGroup - for i, shardIdx := range shards { - wg.Add(1) - go func(i, shardIdx int) { - defer wg.Done() - for row := qrs[i].Next(); row != nil; row = qrs[i].Next() { - msr.rows <- streamTabletResult{row: row} - } - if err := qrs[i].Err(); err != nil { - msr.rows <- streamTabletResult{error: err} - } - }(i, shardIdx) - } - - // Close channel once all data has been sent - go func() { - wg.Wait() - close(msr.rows) - }() - - return msr, nil -} - -/* -type ClientQuery struct { - Sql string - BindVariables map[string]interface{} -} - -// FIXME(msolomon) There are multiple options for an efficient ExecMulti. -// * Use a special stmt object, buffer all statements, connections, etc and send when it's ready. -// * Take a list of (sql, bind) pairs and just send that - have to parse and route that anyway. -// * Probably need separate support for the a MultiTx too. -func (sc *ShardedConn) ExecuteBatch(queryList []ClientQuery, keyVal interface{}) (*tabletserver.QueryResult, error) { - shardIdx, err := key.FindShardForKey(keyVal, sc.shardMaxKeys) - shards := []int{shardIdx} - - if err = sc.tabletPrepare(shardIdx); err != nil { - return nil, err - } - - reqs := make([]tabletserver.Query, len(queryList)) - for i, cq := range queryList { - reqs[i] = tabletserver.Query{ - Sql: cq.Sql, - BindVariables: cq.BindVariables, - TransactionId: sc.conns[shardIdx].TransactionId, - SessionId: sc.conns[shardIdx].SessionId, - } - } - res := new(tabletserver.QueryResult) - err = sc.conns[shardIdx].Call("SqlQuery.ExecuteBatch", reqs, res) - if err != nil { - return nil, err - } - return res, nil -} -*/ - -func (sc *ShardedConn) dial(shardIdx int) (conn *tablet.VtConn, err error) { - ctx := context.TODO() - shardReference := &(sc.srvKeyspace.Partitions[sc.tabletType].ShardReferences[shardIdx]) - addrs, err := sc.ts.GetEndPoints(ctx, sc.cell, sc.keyspace, shardReference.Name, sc.tabletType) - if err != nil { - return nil, fmt.Errorf("vt: GetEndPoints failed %v", err) - } - - srvs, err := topo.SrvEntries(addrs, "") - if err != nil { - return nil, err - } - - // Try to connect to any address. - for _, srv := range srvs { - name := topo.SrvAddr(srv) + "/" + sc.keyspace + "/" + shardReference.Name - conn, err = tablet.DialVtdb(name, sc.stream, tablet.DefaultTimeout) - if err == nil { - return conn, nil - } - } - return nil, err -} - -type sDriver struct { - ts topo.Server - stream bool -} - -// for direct zk connection: vtzk://host:port/cell/keyspace/tabletType -// we always use a MetaConn, host and port are ignored. -// the driver name dictates if we streaming or not -func (driver *sDriver) Open(name string) (sc db.Conn, err error) { - if !strings.HasPrefix(name, "vtzk://") { - // add a default protocol talking to zk - name = "vtzk://" + name - } - u, err := url.Parse(name) - if err != nil { - return nil, err - } - - dbi, tabletType := path.Split(u.Path) - dbi = strings.Trim(dbi, "/") - tabletType = strings.Trim(tabletType, "/") - cell, keyspace := path.Split(dbi) - cell = strings.Trim(cell, "/") - keyspace = strings.Trim(keyspace, "/") - return Dial(driver.ts, cell, keyspace, topo.TabletType(tabletType), driver.stream, tablet.DefaultTimeout) -} - -func RegisterShardedDrivers() { - // default topo server - ts := topo.GetServer() - db.Register("vtdb", &sDriver{ts, false}) - db.Register("vtdb-streaming", &sDriver{ts, true}) - - // forced zk topo server - zconn := zk.NewMetaConn() - zkts := zktopo.NewServer(zconn) - db.Register("vtdb-zk", &sDriver{zkts, false}) - db.Register("vtdb-zk-streaming", &sDriver{zkts, true}) -} diff --git a/go/vt/client2/tablet/tclient.go b/go/vt/client2/tablet/tclient.go deleted file mode 100644 index 4cd82e0901..0000000000 --- a/go/vt/client2/tablet/tclient.go +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright 2012, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package tablet is an API compliant to the requirements of database/sql -// Open expects name to be "hostname:port/keyspace/shard" -// For query arguments, we assume place-holders in the query string -// in the form of :v0, :v1, etc. -package tablet - -import ( - "errors" - "fmt" - "net/url" - "strings" - "time" - - log "github.com/golang/glog" - "github.com/youtube/vitess/go/db" - mproto "github.com/youtube/vitess/go/mysql/proto" - "github.com/youtube/vitess/go/netutil" - "github.com/youtube/vitess/go/sqltypes" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" - "github.com/youtube/vitess/go/vt/topo" - "golang.org/x/net/context" -) - -var ( - ErrNoNestedTxn = errors.New("vt: no nested transactions") - ErrBadCommit = errors.New("vt: commit without corresponding begin") - ErrBadRollback = errors.New("vt: rollback without corresponding begin") - ErrNoLastInsertId = errors.New("vt: no LastInsertId available after streaming statement") - ErrNoRowsAffected = errors.New("vt: no RowsAffected available after streaming statement") - ErrFieldLengthMismatch = errors.New("vt: no RowsAffected available after streaming statement") -) - -type TabletError struct { - err error - addr string -} - -func (te TabletError) Error() string { - return fmt.Sprintf("vt: client error on %v %v", te.addr, te.err) -} - -// Not thread safe, as per sql package. -type Conn struct { - dbi *url.URL - stream bool - tabletConn tabletconn.TabletConn - TransactionId int64 - timeout time.Duration -} - -type Tx struct { - conn *Conn -} - -type StreamResult struct { - errFunc tabletconn.ErrFunc - sr <-chan *mproto.QueryResult - columns *mproto.QueryResult - // current result and index on it - qr *mproto.QueryResult - index int - err error -} - -func (conn *Conn) keyspace() string { - return strings.Split(conn.dbi.Path, "/")[1] -} - -func (conn *Conn) shard() string { - return strings.Split(conn.dbi.Path, "/")[2] -} - -// parseDbi parses the dbi and a URL. The dbi may or may not contain -// the scheme part. -func parseDbi(dbi string) (*url.URL, error) { - if !strings.HasPrefix(dbi, "vttp://") { - dbi = "vttp://" + dbi - } - return url.Parse(dbi) -} - -func DialTablet(dbi string, stream bool, timeout time.Duration) (conn *Conn, err error) { - conn = new(Conn) - if conn.dbi, err = parseDbi(dbi); err != nil { - return - } - conn.stream = stream - conn.timeout = timeout - if err = conn.dial(); err != nil { - return nil, conn.fmtErr(err) - } - return -} - -// Format error for exported methods to give callers more information. -func (conn *Conn) fmtErr(err error) error { - if err == nil { - return nil - } - return TabletError{err, conn.dbi.Host} -} - -func (conn *Conn) dial() (err error) { - // build the endpoint in the right format - host, port, err := netutil.SplitHostPort(conn.dbi.Host) - if err != nil { - return err - } - endPoint := topo.EndPoint{ - Host: host, - NamedPortMap: map[string]int{ - "vt": port, - }, - } - - // and dial - tabletConn, err := tabletconn.GetDialer()(context.TODO(), endPoint, conn.keyspace(), conn.shard(), conn.timeout) - if err != nil { - return err - } - conn.tabletConn = tabletConn - return -} - -func (conn *Conn) Close() error { - conn.tabletConn.Close() - return nil -} - -func (conn *Conn) Exec(query string, bindVars map[string]interface{}) (db.Result, error) { - if conn.stream { - sr, errFunc, err := conn.tabletConn.StreamExecute(context.TODO(), query, bindVars, conn.TransactionId) - if err != nil { - return nil, conn.fmtErr(err) - } - // read the columns, or grab the error - cols, ok := <-sr - if !ok { - return nil, conn.fmtErr(errFunc()) - } - return &StreamResult{errFunc, sr, cols, nil, 0, nil}, nil - } - - qr, err := conn.tabletConn.Execute(context.TODO(), query, bindVars, conn.TransactionId) - if err != nil { - return nil, conn.fmtErr(err) - } - return &Result{qr, 0, nil}, nil -} - -func (conn *Conn) Begin() (db.Tx, error) { - if conn.TransactionId != 0 { - return &Tx{}, ErrNoNestedTxn - } - if transactionId, err := conn.tabletConn.Begin(context.TODO()); err != nil { - return &Tx{}, conn.fmtErr(err) - } else { - conn.TransactionId = transactionId - } - return &Tx{conn}, nil -} - -func (conn *Conn) Commit() error { - if conn.TransactionId == 0 { - return ErrBadCommit - } - // NOTE(msolomon) Unset the transaction_id irrespective of the RPC's - // response. The intent of commit is that no more statements can be - // made on this transaction, so we guarantee that. Transient errors - // between the db and the client shouldn't affect this part of the - // bookkeeping. According to the Go Driver API, this will not be - // called concurrently. Defer this because we this affects the - // session referenced in the request. - defer func() { conn.TransactionId = 0 }() - return conn.fmtErr(conn.tabletConn.Commit(context.TODO(), conn.TransactionId)) -} - -func (conn *Conn) Rollback() error { - if conn.TransactionId == 0 { - return ErrBadRollback - } - // See note in Commit about the behavior of TransactionId. - defer func() { conn.TransactionId = 0 }() - return conn.fmtErr(conn.tabletConn.Rollback(context.TODO(), conn.TransactionId)) -} - -// driver.Tx interface (forwarded to Conn) -func (tx *Tx) Commit() error { - return tx.conn.Commit() -} - -func (tx *Tx) Rollback() error { - return tx.conn.Rollback() -} - -type Result struct { - qr *mproto.QueryResult - index int - err error -} - -// TODO(mberlin): Populate flags here as well (e.g. to correctly identify unsigned integer type)? -func NewResult(rowCount, rowsAffected, insertId int64, fields []mproto.Field) *Result { - return &Result{ - qr: &mproto.QueryResult{ - Rows: make([][]sqltypes.Value, int(rowCount)), - Fields: fields, - RowsAffected: uint64(rowsAffected), - InsertId: uint64(insertId), - }, - } -} - -func (result *Result) RowsRetrieved() int64 { - return int64(len(result.qr.Rows)) -} - -func (result *Result) LastInsertId() (int64, error) { - return int64(result.qr.InsertId), nil -} - -func (result *Result) RowsAffected() (int64, error) { - return int64(result.qr.RowsAffected), nil -} - -// driver.Rows interface -func (result *Result) Columns() []string { - cols := make([]string, len(result.qr.Fields)) - for i, f := range result.qr.Fields { - cols[i] = f.Name - } - return cols -} - -func (result *Result) Rows() [][]sqltypes.Value { - return result.qr.Rows -} - -// FIXME(msolomon) This should be intependent of the mysql module. -func (result *Result) Fields() []mproto.Field { - return result.qr.Fields -} - -func (result *Result) Close() error { - result.index = 0 - return nil -} - -func (result *Result) Next() (row []interface{}) { - if result.index >= len(result.qr.Rows) { - return nil - } - row = make([]interface{}, len(result.qr.Rows[result.index])) - for i, v := range result.qr.Rows[result.index] { - var err error - row[i], err = mproto.Convert(result.qr.Fields[i], v) - if err != nil { - panic(err) // unexpected - } - } - result.index++ - return row -} - -func (result *Result) Err() error { - return result.err -} - -// driver.Result interface -func (*StreamResult) LastInsertId() (int64, error) { - return 0, ErrNoLastInsertId -} - -func (*StreamResult) RowsAffected() (int64, error) { - return 0, ErrNoRowsAffected -} - -// driver.Rows interface -func (sr *StreamResult) Columns() (cols []string) { - cols = make([]string, len(sr.columns.Fields)) - for i, f := range sr.columns.Fields { - cols[i] = f.Name - } - return cols -} - -func (*StreamResult) Close() error { - return nil -} - -func (sr *StreamResult) Next() (row []interface{}) { - if sr.qr == nil { - // we need to read the next record that may contain - // multiple rows - qr, ok := <-sr.sr - if !ok { - if sr.errFunc() != nil { - log.Warningf("vt: error reading the next value %v", sr.errFunc()) - sr.err = sr.errFunc() - } - return nil - } - sr.qr = qr - sr.index = 0 - } - - row = make([]interface{}, len(sr.qr.Rows[sr.index])) - for i, v := range sr.qr.Rows[sr.index] { - var err error - row[i], err = mproto.Convert(sr.columns.Fields[i], v) - if err != nil { - panic(err) // unexpected - } - } - - sr.index++ - if sr.index == len(sr.qr.Rows) { - // we reached the end of our rows, nil it so next run - // will fetch the next one - sr.qr = nil - } - - return row -} - -func (sr *StreamResult) Err() error { - return sr.err -} diff --git a/go/vt/client2/tablet/vclient.go b/go/vt/client2/tablet/vclient.go deleted file mode 100644 index 50031e007e..0000000000 --- a/go/vt/client2/tablet/vclient.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2012, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package tablet implements some additional error handling logic to -// make the client more robust in the face of transient problems with -// easy solutions. -package tablet - -import ( - "fmt" - "net" - "strings" - "time" - - log "github.com/golang/glog" - "github.com/youtube/vitess/go/db" -) - -const ( - ErrTypeFatal = 1 //errors.New("vt: fatal: reresolve endpoint") - ErrTypeRetry = 2 //errors.New("vt: retry: reconnect endpoint") - ErrTypeApp = 3 //errors.New("vt: app level error") -) - -const ( - DefaultReconnectDelay = 2 * time.Millisecond - DefaultMaxAttempts = 2 - DefaultTimeout = 30 * time.Second -) - -var zeroTime time.Time - -// Layer some logic on top of the basic tablet protocol to support -// fast retry when we can. - -type VtConn struct { - Conn - maxAttempts int // How many times should try each retriable operation? - timeFailed time.Time // This is the time a client transitioned from presumable health to failure. - reconnectDelay time.Duration -} - -// How long should we wait to try to recover? -// FIXME(msolomon) not sure if maxAttempts is still useful -func (vtc *VtConn) recoveryTimeout() time.Duration { - return vtc.timeout * 2 -} - -func (vtc *VtConn) handleErr(err error) (int, error) { - now := time.Now() - if vtc.timeFailed.IsZero() { - vtc.timeFailed = now - } else if now.Sub(vtc.timeFailed) > vtc.recoveryTimeout() { - vtc.Close() - return ErrTypeFatal, fmt.Errorf("vt: max recovery time exceeded: %v", err) - } - - errType := ErrTypeApp - if tabletErr, ok := err.(TabletError); ok { - msg := strings.ToLower(tabletErr.err.Error()) - if strings.HasPrefix(msg, "fatal") { - errType = ErrTypeFatal - } else if strings.HasPrefix(msg, "retry") { - errType = ErrTypeRetry - } - } else if netErr, ok := err.(net.Error); ok && netErr.Temporary() { - errType = ErrTypeRetry - } - - if errType == ErrTypeRetry && vtc.TransactionId != 0 { - errType = ErrTypeApp - err = fmt.Errorf("vt: cannot retry within a transaction: %v", err) - time.Sleep(vtc.reconnectDelay) - vtc.Close() - dialErr := vtc.dial() - log.Warningf("vt: redial error %v", dialErr) - } - - return errType, err -} - -func (vtc *VtConn) Exec(query string, bindVars map[string]interface{}) (db.Result, error) { - attempt := 0 - for { - result, err := vtc.Conn.Exec(query, bindVars) - if err == nil { - vtc.timeFailed = zeroTime - return result, nil - } - - errType, err := vtc.handleErr(err) - if errType != ErrTypeRetry { - return nil, err - } - for { - attempt++ - if attempt > vtc.maxAttempts { - return nil, fmt.Errorf("vt: max recovery attempts exceeded: %v", err) - } - vtc.Close() - time.Sleep(vtc.reconnectDelay) - if err := vtc.dial(); err == nil { - break - } - log.Warningf("vt: error dialing on exec %v", vtc.Conn.dbi.Host) - } - } -} - -func (vtc *VtConn) Begin() (db.Tx, error) { - attempt := 0 - for { - tx, err := vtc.Conn.Begin() - if err == nil { - vtc.timeFailed = zeroTime - return tx, nil - } - - errType, err := vtc.handleErr(err) - if errType != ErrTypeRetry { - return nil, err - } - for { - attempt++ - if attempt > vtc.maxAttempts { - return nil, fmt.Errorf("vt: max recovery attempts exceeded: %v", err) - } - vtc.Close() - time.Sleep(vtc.reconnectDelay) - if err := vtc.dial(); err == nil { - break - } - log.Warningf("vt: error dialing on begin %v", vtc.Conn.dbi.Host) - } - } -} - -func (vtc *VtConn) Commit() (err error) { - if err = vtc.Conn.Commit(); err == nil { - vtc.timeFailed = zeroTime - return nil - } - - // Not much we can do at this point, just annotate the error and return. - _, err = vtc.handleErr(err) - return err -} - -func DialVtdb(dbi string, stream bool, timeout time.Duration) (*VtConn, error) { - url, err := parseDbi(dbi) - if err != nil { - return nil, err - } - conn := &VtConn{ - Conn: Conn{dbi: url, stream: stream, timeout: timeout}, - maxAttempts: DefaultMaxAttempts, - reconnectDelay: DefaultReconnectDelay, - } - - if err := conn.dial(); err != nil { - return nil, err - } - return conn, nil -} - -type vDriver struct { - stream bool -} - -func (driver *vDriver) Open(name string) (db.Conn, error) { - return DialVtdb(name, driver.stream, DefaultTimeout) -} - -func init() { - db.Register("vttablet", &vDriver{}) - db.Register("vttablet-streaming", &vDriver{true}) -} From 8058dd0359e0d0155e4d1b43b6466292517973be Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 28 May 2015 16:16:28 -0700 Subject: [PATCH 118/128] Using right DBA abstraction in copy_schema_shard_test.go. Also using VtctlPipe. --- go/vt/mysqlctl/mysql_daemon.go | 3 - .../testlib/copy_schema_shard_test.go | 58 +++++++------------ 2 files changed, 20 insertions(+), 41 deletions(-) diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index de32676dd4..9e4132f7f7 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -160,9 +160,6 @@ type FakeMysqlDaemon struct { // If nil we'll return an error. ApplySchemaChangeResult *proto.SchemaChangeResult - // DbaConnectionFactory is the factory for making fake dba connections - DbaConnectionFactory func() (dbconnpool.PoolConnection, error) - // DbAppConnectionFactory is the factory for making fake db app connections DbAppConnectionFactory func() (dbconnpool.PoolConnection, error) diff --git a/go/vt/wrangler/testlib/copy_schema_shard_test.go b/go/vt/wrangler/testlib/copy_schema_shard_test.go index 4c153bf09d..a7d86e5b3f 100644 --- a/go/vt/wrangler/testlib/copy_schema_shard_test.go +++ b/go/vt/wrangler/testlib/copy_schema_shard_test.go @@ -6,12 +6,10 @@ package testlib import ( "fmt" - "sync/atomic" "testing" "time" mproto "github.com/youtube/vitess/go/mysql/proto" - "github.com/youtube/vitess/go/vt/dbconnpool" "github.com/youtube/vitess/go/vt/logutil" myproto "github.com/youtube/vitess/go/vt/mysqlctl/proto" _ "github.com/youtube/vitess/go/vt/tabletmanager/gorpctmclient" @@ -21,7 +19,6 @@ import ( "github.com/youtube/vitess/go/vt/vttest/fakesqldb" "github.com/youtube/vitess/go/vt/wrangler" "github.com/youtube/vitess/go/vt/zktopo" - "golang.org/x/net/context" ) type ExpectedExecuteFetch struct { @@ -93,41 +90,12 @@ func (fpc *FakePoolConnection) Reconnect() error { return nil } -// on the destinations -func DestinationsFactory(t *testing.T) func() (dbconnpool.PoolConnection, error) { - var queryIndex int64 = -1 - - return func() (dbconnpool.PoolConnection, error) { - qi := atomic.AddInt64(&queryIndex, 1) - switch { - case qi == 0: - return NewFakePoolConnectionQuery(t, "CREATE DATABASE `vt_ks` /*!40100 DEFAULT CHARACTER SET utf8 */"), nil - case qi == 1: - return NewFakePoolConnectionQuery(t, "CREATE TABLE `vt_ks`.`resharding1` (\n"+ - " `id` bigint(20) NOT NULL AUTO_INCREMENT,\n"+ - " `msg` varchar(64) DEFAULT NULL,\n"+ - " `keyspace_id` bigint(20) unsigned NOT NULL,\n"+ - " PRIMARY KEY (`id`),\n"+ - " KEY `by_msg` (`msg`)\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8"), nil - case qi == 2: - return NewFakePoolConnectionQuery(t, "CREATE TABLE `view1` (\n"+ - " `id` bigint(20) NOT NULL AUTO_INCREMENT,\n"+ - " `msg` varchar(64) DEFAULT NULL,\n"+ - " `keyspace_id` bigint(20) unsigned NOT NULL,\n"+ - " PRIMARY KEY (`id`),\n"+ - " KEY `by_msg` (`msg`)\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8"), nil - } - - return nil, fmt.Errorf("Unexpected connection") - } -} - func TestCopySchemaShard(t *testing.T) { - fakesqldb.Register() + db := fakesqldb.Register() ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"}) wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), time.Second) + vp := NewVtctlPipe(t, ts) + defer vp.Close() sourceMaster := NewFakeTablet(t, wr, "cell1", 0, topo.TYPE_MASTER, TabletKeyspaceShard(t, "ks", "-80")) @@ -158,10 +126,24 @@ func TestCopySchemaShard(t *testing.T) { }, } - destinationMaster.FakeMysqlDaemon.DbaConnectionFactory = DestinationsFactory(t) + db.AddQuery("USE vt_ks", &mproto.QueryResult{}) + db.AddQuery("CREATE DATABASE `vt_ks` /*!40100 DEFAULT CHARACTER SET utf8 */", &mproto.QueryResult{}) + db.AddQuery("CREATE TABLE `vt_ks`.`resharding1` (\n"+ + " `id` bigint(20) NOT NULL AUTO_INCREMENT,\n"+ + " `msg` varchar(64) DEFAULT NULL,\n"+ + " `keyspace_id` bigint(20) unsigned NOT NULL,\n"+ + " PRIMARY KEY (`id`),\n"+ + " KEY `by_msg` (`msg`)\n"+ + ") ENGINE=InnoDB DEFAULT CHARSET=utf8", &mproto.QueryResult{}) + db.AddQuery("CREATE TABLE `view1` (\n"+ + " `id` bigint(20) NOT NULL AUTO_INCREMENT,\n"+ + " `msg` varchar(64) DEFAULT NULL,\n"+ + " `keyspace_id` bigint(20) unsigned NOT NULL,\n"+ + " PRIMARY KEY (`id`),\n"+ + " KEY `by_msg` (`msg`)\n"+ + ") ENGINE=InnoDB DEFAULT CHARSET=utf8", &mproto.QueryResult{}) - if err := wr.CopySchemaShard(context.Background(), sourceRdonly.Tablet.Alias, nil, nil, true, "ks", "-40"); err != nil { + if err := vp.Run([]string{"CopySchemaShard", "-include-views", sourceRdonly.Tablet.Alias.String(), "ks/-40"}); err != nil { t.Fatalf("CopySchemaShard failed: %v", err) } - } From a02c96c73b3f43210c7f48901e4ac30527f5a0ef Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 29 May 2015 08:37:09 -0700 Subject: [PATCH 119/128] Deleting unused plugins. vtclient doesn't use topo. --- go/cmd/vtclient/plugin_etcdtopo.go | 11 ----------- go/cmd/vtclient/plugin_zktopo.go | 11 ----------- 2 files changed, 22 deletions(-) delete mode 100644 go/cmd/vtclient/plugin_etcdtopo.go delete mode 100644 go/cmd/vtclient/plugin_zktopo.go diff --git a/go/cmd/vtclient/plugin_etcdtopo.go b/go/cmd/vtclient/plugin_etcdtopo.go deleted file mode 100644 index 1bd833657b..0000000000 --- a/go/cmd/vtclient/plugin_etcdtopo.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2014, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -// This plugin imports etcdtopo to register the etcd implementation of TopoServer. - -import ( - _ "github.com/youtube/vitess/go/vt/etcdtopo" -) diff --git a/go/cmd/vtclient/plugin_zktopo.go b/go/cmd/vtclient/plugin_zktopo.go deleted file mode 100644 index 77409609bc..0000000000 --- a/go/cmd/vtclient/plugin_zktopo.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -// Imports and register the Zookeeper TopologyServer - -import ( - _ "github.com/youtube/vitess/go/vt/zktopo" -) From e83e214573b5044d07100c4be0e3eec8d2dba305 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 29 May 2015 09:01:09 -0700 Subject: [PATCH 120/128] Moving go client rows and streaming_rows out of vtgateconn and into the go client driver. --- go/vt/client/client.go | 4 +-- go/vt/{vtgate/vtgateconn => client}/rows.go | 6 ++--- .../vtgateconn => client}/rows_test.go | 12 ++++----- .../vtgateconn => client}/streaming_rows.go | 9 ++++--- .../streaming_rows_test.go | 27 ++++++++++--------- 5 files changed, 30 insertions(+), 28 deletions(-) rename go/vt/{vtgate/vtgateconn => client}/rows.go (94%) rename go/vt/{vtgate/vtgateconn => client}/rows_test.go (95%) rename go/vt/{vtgate/vtgateconn => client}/streaming_rows.go (88%) rename go/vt/{vtgate/vtgateconn => client}/streaming_rows_test.go (86%) diff --git a/go/vt/client/client.go b/go/vt/client/client.go index fef6a8594a..6835719a5d 100644 --- a/go/vt/client/client.go +++ b/go/vt/client/client.go @@ -150,7 +150,7 @@ func (s *stmt) Query(args []driver.Value) (driver.Rows, error) { defer cancel() if s.c.Streaming { qrc, errFunc := s.c.vtgateConn.StreamExecute(ctx, s.query, makeBindVars(args), s.c.TabletType) - return vtgateconn.NewStreamingRows(qrc, errFunc), nil + return newStreamingRows(qrc, errFunc), nil } var qr *mproto.QueryResult var err error @@ -162,7 +162,7 @@ func (s *stmt) Query(args []driver.Value) (driver.Rows, error) { if err != nil { return nil, err } - return vtgateconn.NewRows(qr), nil + return newRows(qr), nil } func makeBindVars(args []driver.Value) map[string]interface{} { diff --git a/go/vt/vtgate/vtgateconn/rows.go b/go/vt/client/rows.go similarity index 94% rename from go/vt/vtgate/vtgateconn/rows.go rename to go/vt/client/rows.go index 38aaba4684..22067778dd 100644 --- a/go/vt/vtgate/vtgateconn/rows.go +++ b/go/vt/client/rows.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package vtgateconn +package client import ( "database/sql/driver" @@ -20,8 +20,8 @@ type rows struct { index int } -// NewRows creates a new rows from qr. -func NewRows(qr *mproto.QueryResult) driver.Rows { +// newRows creates a new rows from qr. +func newRows(qr *mproto.QueryResult) driver.Rows { return &rows{qr: qr} } diff --git a/go/vt/vtgate/vtgateconn/rows_test.go b/go/vt/client/rows_test.go similarity index 95% rename from go/vt/vtgate/vtgateconn/rows_test.go rename to go/vt/client/rows_test.go index 695afc1b16..f7d3d5a88c 100644 --- a/go/vt/vtgate/vtgateconn/rows_test.go +++ b/go/vt/client/rows_test.go @@ -1,4 +1,4 @@ -package vtgateconn +package client import ( "database/sql/driver" @@ -10,7 +10,7 @@ import ( "github.com/youtube/vitess/go/sqltypes" ) -var result1 = mproto.QueryResult{ +var rowsResult1 = mproto.QueryResult{ Fields: []mproto.Field{ mproto.Field{ Name: "field1", @@ -70,7 +70,7 @@ func logMismatchedTypes(t *testing.T, gotRow, wantRow []driver.Value) { } func TestRows(t *testing.T) { - ri := NewRows(&result1) + ri := newRows(&rowsResult1) wantCols := []string{ "field1", "field2", @@ -148,7 +148,7 @@ var badResult2 = mproto.QueryResult{ } func TestRowsFail(t *testing.T) { - ri := NewRows(&badResult1) + ri := newRows(&badResult1) var dest []driver.Value err := ri.Next(dest) want := "length mismatch: dest is 0, fields are 1" @@ -156,7 +156,7 @@ func TestRowsFail(t *testing.T) { t.Errorf("Next: %v, want %s", err, want) } - ri = NewRows(&badResult1) + ri = newRows(&badResult1) dest = make([]driver.Value, 1) err = ri.Next(dest) want = "internal error: length mismatch: dest is 1, fields are 0" @@ -164,7 +164,7 @@ func TestRowsFail(t *testing.T) { t.Errorf("Next: %v, want %s", err, want) } - ri = NewRows(&badResult2) + ri = newRows(&badResult2) dest = make([]driver.Value, 1) err = ri.Next(dest) want = `conversion error: field: {field1 3 0}, val: value: strconv.ParseInt: parsing "value": invalid syntax` diff --git a/go/vt/vtgate/vtgateconn/streaming_rows.go b/go/vt/client/streaming_rows.go similarity index 88% rename from go/vt/vtgate/vtgateconn/streaming_rows.go rename to go/vt/client/streaming_rows.go index 74440f5f5b..5d05b0daba 100644 --- a/go/vt/vtgate/vtgateconn/streaming_rows.go +++ b/go/vt/client/streaming_rows.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package vtgateconn +package client import ( "database/sql/driver" @@ -10,21 +10,22 @@ import ( "io" mproto "github.com/youtube/vitess/go/mysql/proto" + "github.com/youtube/vitess/go/vt/vtgate/vtgateconn" ) // streamingRows creates a database/sql/driver compliant Row iterator // for a streaming query. type streamingRows struct { qrc <-chan *mproto.QueryResult - errFunc ErrFunc + errFunc vtgateconn.ErrFunc failed error fields []mproto.Field qr *mproto.QueryResult index int } -// NewStreamingRows creates a new streamingRows from qrc and errFunc. -func NewStreamingRows(qrc <-chan *mproto.QueryResult, errFunc ErrFunc) driver.Rows { +// newStreamingRows creates a new streamingRows from qrc and errFunc. +func newStreamingRows(qrc <-chan *mproto.QueryResult, errFunc vtgateconn.ErrFunc) driver.Rows { return &streamingRows{qrc: qrc, errFunc: errFunc} } diff --git a/go/vt/vtgate/vtgateconn/streaming_rows_test.go b/go/vt/client/streaming_rows_test.go similarity index 86% rename from go/vt/vtgate/vtgateconn/streaming_rows_test.go rename to go/vt/client/streaming_rows_test.go index 4e22dd77e1..6515f35413 100644 --- a/go/vt/vtgate/vtgateconn/streaming_rows_test.go +++ b/go/vt/client/streaming_rows_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package vtgateconn +package client import ( "database/sql/driver" @@ -14,6 +14,7 @@ import ( mproto "github.com/youtube/vitess/go/mysql/proto" "github.com/youtube/vitess/go/sqltypes" + "github.com/youtube/vitess/go/vt/vtgate/vtgateconn" ) var packet1 = mproto.QueryResult{ @@ -54,7 +55,7 @@ var packet3 = mproto.QueryResult{ } func TestStreamingRows(t *testing.T) { - qrc, errFunc := func() (<-chan *mproto.QueryResult, ErrFunc) { + qrc, errFunc := func() (<-chan *mproto.QueryResult, vtgateconn.ErrFunc) { ch := make(chan *mproto.QueryResult) go func() { ch <- &packet1 @@ -64,7 +65,7 @@ func TestStreamingRows(t *testing.T) { }() return ch, func() error { return nil } }() - ri := NewStreamingRows(qrc, errFunc) + ri := newStreamingRows(qrc, errFunc) wantCols := []string{ "field1", "field2", @@ -111,7 +112,7 @@ func TestStreamingRows(t *testing.T) { } func TestStreamingRowsReversed(t *testing.T) { - qrc, errFunc := func() (<-chan *mproto.QueryResult, ErrFunc) { + qrc, errFunc := func() (<-chan *mproto.QueryResult, vtgateconn.ErrFunc) { ch := make(chan *mproto.QueryResult) go func() { ch <- &packet1 @@ -121,7 +122,7 @@ func TestStreamingRowsReversed(t *testing.T) { }() return ch, func() error { return nil } }() - ri := NewStreamingRows(qrc, errFunc) + ri := newStreamingRows(qrc, errFunc) wantRow := []driver.Value{ int64(1), @@ -151,14 +152,14 @@ func TestStreamingRowsReversed(t *testing.T) { } func TestStreamingRowsError(t *testing.T) { - qrc, errFunc := func() (<-chan *mproto.QueryResult, ErrFunc) { + qrc, errFunc := func() (<-chan *mproto.QueryResult, vtgateconn.ErrFunc) { ch := make(chan *mproto.QueryResult) go func() { close(ch) }() return ch, func() error { return errors.New("error before fields") } }() - ri := NewStreamingRows(qrc, errFunc) + ri := newStreamingRows(qrc, errFunc) gotCols := ri.Columns() if gotCols != nil { t.Errorf("cols: %v, want nil", gotCols) @@ -171,7 +172,7 @@ func TestStreamingRowsError(t *testing.T) { } _ = ri.Close() - qrc, errFunc = func() (<-chan *mproto.QueryResult, ErrFunc) { + qrc, errFunc = func() (<-chan *mproto.QueryResult, vtgateconn.ErrFunc) { ch := make(chan *mproto.QueryResult) go func() { ch <- &packet1 @@ -179,7 +180,7 @@ func TestStreamingRowsError(t *testing.T) { }() return ch, func() error { return errors.New("error after fields") } }() - ri = NewStreamingRows(qrc, errFunc) + ri = newStreamingRows(qrc, errFunc) wantCols := []string{ "field1", "field2", @@ -202,7 +203,7 @@ func TestStreamingRowsError(t *testing.T) { } _ = ri.Close() - qrc, errFunc = func() (<-chan *mproto.QueryResult, ErrFunc) { + qrc, errFunc = func() (<-chan *mproto.QueryResult, vtgateconn.ErrFunc) { ch := make(chan *mproto.QueryResult) go func() { ch <- &packet1 @@ -211,7 +212,7 @@ func TestStreamingRowsError(t *testing.T) { }() return ch, func() error { return errors.New("error after rows") } }() - ri = NewStreamingRows(qrc, errFunc) + ri = newStreamingRows(qrc, errFunc) gotRow = make([]driver.Value, 3) err = ri.Next(gotRow) if err != nil { @@ -224,7 +225,7 @@ func TestStreamingRowsError(t *testing.T) { } _ = ri.Close() - qrc, errFunc = func() (<-chan *mproto.QueryResult, ErrFunc) { + qrc, errFunc = func() (<-chan *mproto.QueryResult, vtgateconn.ErrFunc) { ch := make(chan *mproto.QueryResult) go func() { ch <- &packet2 @@ -232,7 +233,7 @@ func TestStreamingRowsError(t *testing.T) { }() return ch, func() error { return nil } }() - ri = NewStreamingRows(qrc, errFunc) + ri = newStreamingRows(qrc, errFunc) gotRow = make([]driver.Value, 3) err = ri.Next(gotRow) wantErr = "first packet did not return fields" From b048ef916d07b76b03343c8cb73de2a584b7eafc Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 29 May 2015 10:25:33 -0700 Subject: [PATCH 121/128] Adding missing APIs to go client. --- go/vt/vtgate/fakerpcvtgateconn/conn.go | 41 +++++++ go/vt/vtgate/gorpcvtgateconn/conn.go | 161 +++++++++++++++++++++++++ go/vt/vtgate/vtgateconn/vtgateconn.go | 140 +++++++++++++++++++-- 3 files changed, 335 insertions(+), 7 deletions(-) diff --git a/go/vt/vtgate/fakerpcvtgateconn/conn.go b/go/vt/vtgate/fakerpcvtgateconn/conn.go index febde09bb3..e22d81b720 100644 --- a/go/vt/vtgate/fakerpcvtgateconn/conn.go +++ b/go/vt/vtgate/fakerpcvtgateconn/conn.go @@ -18,6 +18,7 @@ import ( mproto "github.com/youtube/vitess/go/mysql/proto" "github.com/youtube/vitess/go/sqltypes" + "github.com/youtube/vitess/go/vt/key" tproto "github.com/youtube/vitess/go/vt/tabletserver/proto" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vtgate/proto" @@ -149,6 +150,31 @@ func (conn *FakeVTGateConn) ExecuteShard(ctx context.Context, sql string, keyspa return &reply, s, nil } +// ExecuteKeyspaceIds please see vtgateconn.Impl.ExecuteKeyspaceIds +func (conn *FakeVTGateConn) ExecuteKeyspaceIds(ctx context.Context, query string, keyspace string, keyspaceIds []key.KeyspaceId, bindVars map[string]interface{}, tabletType topo.TabletType, session interface{}) (*mproto.QueryResult, interface{}, error) { + panic("not implemented") +} + +// ExecuteKeyRanges please see vtgateconn.Impl.ExecuteKeyRanges +func (conn *FakeVTGateConn) ExecuteKeyRanges(ctx context.Context, query string, keyspace string, keyRanges []key.KeyRange, bindVars map[string]interface{}, tabletType topo.TabletType, session interface{}) (*mproto.QueryResult, interface{}, error) { + panic("not implemented") +} + +// ExecuteEntityIds please see vtgateconn.Impl.ExecuteEntityIds +func (conn *FakeVTGateConn) ExecuteEntityIds(ctx context.Context, query string, keyspace string, entityColumnName string, entityKeyspaceIDs []proto.EntityId, bindVars map[string]interface{}, tabletType topo.TabletType, session interface{}) (*mproto.QueryResult, interface{}, error) { + panic("not implemented") +} + +// ExecuteBatchShard please see vtgateconn.Impl.ExecuteBatchShard +func (conn *FakeVTGateConn) ExecuteBatchShard(ctx context.Context, queries []tproto.BoundQuery, keyspace string, shards []string, tabletType topo.TabletType, session interface{}) ([]mproto.QueryResult, interface{}, error) { + panic("not implemented") +} + +// ExecuteBatchKeyspaceIds please see vtgateconn.Impl.ExecuteBatchKeyspaceIds +func (conn *FakeVTGateConn) ExecuteBatchKeyspaceIds(ctx context.Context, queries []tproto.BoundQuery, keyspace string, keyspaceIds []key.KeyspaceId, tabletType topo.TabletType, session interface{}) ([]mproto.QueryResult, interface{}, error) { + panic("not implemented") +} + // StreamExecute please see vtgateconn.Impl.StreamExecute func (conn *FakeVTGateConn) StreamExecute(ctx context.Context, query string, bindVars map[string]interface{}, tabletType topo.TabletType) (<-chan *mproto.QueryResult, vtgateconn.ErrFunc) { @@ -184,6 +210,21 @@ func (conn *FakeVTGateConn) StreamExecute(ctx context.Context, query string, bin return resultChan, nil } +// StreamExecuteShard please see vtgateconn.Impl.StreamExecuteShard +func (conn *FakeVTGateConn) StreamExecuteShard(ctx context.Context, query string, keyspace string, shards []string, bindVars map[string]interface{}, tabletType topo.TabletType) (<-chan *mproto.QueryResult, vtgateconn.ErrFunc) { + panic("not implemented") +} + +// StreamExecuteKeyRanges please see vtgateconn.Impl.StreamExecuteKeyRanges +func (conn *FakeVTGateConn) StreamExecuteKeyRanges(ctx context.Context, query string, keyspace string, keyRanges []key.KeyRange, bindVars map[string]interface{}, tabletType topo.TabletType) (<-chan *mproto.QueryResult, vtgateconn.ErrFunc) { + panic("not implemented") +} + +// StreamExecuteKeyspaceIds please see vtgateconn.Impl.StreamExecuteKeyspaceIds +func (conn *FakeVTGateConn) StreamExecuteKeyspaceIds(ctx context.Context, query string, keyspace string, keyspaceIds []key.KeyspaceId, bindVars map[string]interface{}, tabletType topo.TabletType) (<-chan *mproto.QueryResult, vtgateconn.ErrFunc) { + panic("not implemented") +} + // Begin please see vtgateconn.Impl.Begin func (conn *FakeVTGateConn) Begin(ctx context.Context) (interface{}, error) { return &proto.Session{ diff --git a/go/vt/vtgate/gorpcvtgateconn/conn.go b/go/vt/vtgate/gorpcvtgateconn/conn.go index 95030c85d7..2e78de0772 100644 --- a/go/vt/vtgate/gorpcvtgateconn/conn.go +++ b/go/vt/vtgate/gorpcvtgateconn/conn.go @@ -13,6 +13,7 @@ import ( mproto "github.com/youtube/vitess/go/mysql/proto" "github.com/youtube/vitess/go/rpcplus" "github.com/youtube/vitess/go/rpcwrap/bsonrpc" + "github.com/youtube/vitess/go/vt/key" "github.com/youtube/vitess/go/vt/rpc" tproto "github.com/youtube/vitess/go/vt/tabletserver/proto" "github.com/youtube/vitess/go/vt/topo" @@ -85,6 +86,120 @@ func (conn *vtgateConn) ExecuteShard(ctx context.Context, query string, keyspace return result.Result, result.Session, nil } +func (conn *vtgateConn) ExecuteKeyspaceIds(ctx context.Context, query string, keyspace string, keyspaceIds []key.KeyspaceId, bindVars map[string]interface{}, tabletType topo.TabletType, session interface{}) (*mproto.QueryResult, interface{}, error) { + var s *proto.Session + if session != nil { + s = session.(*proto.Session) + } + request := proto.KeyspaceIdQuery{ + Sql: query, + BindVariables: bindVars, + Keyspace: keyspace, + KeyspaceIds: keyspaceIds, + TabletType: tabletType, + Session: s, + } + var result proto.QueryResult + if err := conn.rpcConn.Call(ctx, "VTGate.ExecuteKeyspaceIds", request, &result); err != nil { + return nil, session, err + } + if result.Error != "" { + return nil, result.Session, errors.New(result.Error) + } + return result.Result, result.Session, nil +} + +func (conn *vtgateConn) ExecuteKeyRanges(ctx context.Context, query string, keyspace string, keyRanges []key.KeyRange, bindVars map[string]interface{}, tabletType topo.TabletType, session interface{}) (*mproto.QueryResult, interface{}, error) { + var s *proto.Session + if session != nil { + s = session.(*proto.Session) + } + request := proto.KeyRangeQuery{ + Sql: query, + BindVariables: bindVars, + Keyspace: keyspace, + KeyRanges: keyRanges, + TabletType: tabletType, + Session: s, + } + var result proto.QueryResult + if err := conn.rpcConn.Call(ctx, "VTGate.ExecuteKeyRanges", request, &result); err != nil { + return nil, session, err + } + if result.Error != "" { + return nil, result.Session, errors.New(result.Error) + } + return result.Result, result.Session, nil +} + +func (conn *vtgateConn) ExecuteEntityIds(ctx context.Context, query string, keyspace string, entityColumnName string, entityKeyspaceIDs []proto.EntityId, bindVars map[string]interface{}, tabletType topo.TabletType, session interface{}) (*mproto.QueryResult, interface{}, error) { + var s *proto.Session + if session != nil { + s = session.(*proto.Session) + } + request := proto.EntityIdsQuery{ + Sql: query, + BindVariables: bindVars, + Keyspace: keyspace, + EntityColumnName: entityColumnName, + EntityKeyspaceIDs: entityKeyspaceIDs, + TabletType: tabletType, + Session: s, + } + var result proto.QueryResult + if err := conn.rpcConn.Call(ctx, "VTGate.ExecuteEntityIds", request, &result); err != nil { + return nil, session, err + } + if result.Error != "" { + return nil, result.Session, errors.New(result.Error) + } + return result.Result, result.Session, nil +} + +func (conn *vtgateConn) ExecuteBatchShard(ctx context.Context, queries []tproto.BoundQuery, keyspace string, shards []string, tabletType topo.TabletType, session interface{}) ([]mproto.QueryResult, interface{}, error) { + var s *proto.Session + if session != nil { + s = session.(*proto.Session) + } + request := proto.BatchQueryShard{ + Queries: queries, + Keyspace: keyspace, + Shards: shards, + TabletType: tabletType, + Session: s, + } + var result proto.QueryResultList + if err := conn.rpcConn.Call(ctx, "VTGate.ExecuteBatchShard", request, &result); err != nil { + return nil, session, err + } + if result.Error != "" { + return nil, result.Session, errors.New(result.Error) + } + return result.List, result.Session, nil +} + +func (conn *vtgateConn) ExecuteBatchKeyspaceIds(ctx context.Context, queries []tproto.BoundQuery, keyspace string, keyspaceIds []key.KeyspaceId, tabletType topo.TabletType, session interface{}) ([]mproto.QueryResult, interface{}, error) { + var s *proto.Session + if session != nil { + s = session.(*proto.Session) + } + request := proto.KeyspaceIdBatchQuery{ + Queries: queries, + Keyspace: keyspace, + KeyspaceIds: keyspaceIds, + TabletType: tabletType, + Session: s, + } + var result proto.QueryResultList + if err := conn.rpcConn.Call(ctx, "VTGate.ExecuteBatchKeyspaceIds", request, &result); err != nil { + return nil, session, err + } + if result.Error != "" { + return nil, result.Session, errors.New(result.Error) + } + return result.List, result.Session, nil +} + func (conn *vtgateConn) StreamExecute(ctx context.Context, query string, bindVars map[string]interface{}, tabletType topo.TabletType) (<-chan *mproto.QueryResult, vtgateconn.ErrFunc) { req := &proto.Query{ Sql: query, @@ -94,6 +209,52 @@ func (conn *vtgateConn) StreamExecute(ctx context.Context, query string, bindVar } sr := make(chan *proto.QueryResult, 10) c := conn.rpcConn.StreamGo("VTGate.StreamExecute", req, sr) + return sendStreamResults(c, sr) +} + +func (conn *vtgateConn) StreamExecuteShard(ctx context.Context, query string, keyspace string, shards []string, bindVars map[string]interface{}, tabletType topo.TabletType) (<-chan *mproto.QueryResult, vtgateconn.ErrFunc) { + req := &proto.QueryShard{ + Sql: query, + BindVariables: bindVars, + Keyspace: keyspace, + Shards: shards, + TabletType: tabletType, + Session: nil, + } + sr := make(chan *proto.QueryResult, 10) + c := conn.rpcConn.StreamGo("VTGate.StreamExecuteShard", req, sr) + return sendStreamResults(c, sr) +} + +func (conn *vtgateConn) StreamExecuteKeyRanges(ctx context.Context, query string, keyspace string, keyRanges []key.KeyRange, bindVars map[string]interface{}, tabletType topo.TabletType) (<-chan *mproto.QueryResult, vtgateconn.ErrFunc) { + req := &proto.KeyRangeQuery{ + Sql: query, + BindVariables: bindVars, + Keyspace: keyspace, + KeyRanges: keyRanges, + TabletType: tabletType, + Session: nil, + } + sr := make(chan *proto.QueryResult, 10) + c := conn.rpcConn.StreamGo("VTGate.StreamExecuteKeyRanges", req, sr) + return sendStreamResults(c, sr) +} + +func (conn *vtgateConn) StreamExecuteKeyspaceIds(ctx context.Context, query string, keyspace string, keyspaceIds []key.KeyspaceId, bindVars map[string]interface{}, tabletType topo.TabletType) (<-chan *mproto.QueryResult, vtgateconn.ErrFunc) { + req := &proto.KeyspaceIdQuery{ + Sql: query, + BindVariables: bindVars, + Keyspace: keyspace, + KeyspaceIds: keyspaceIds, + TabletType: tabletType, + Session: nil, + } + sr := make(chan *proto.QueryResult, 10) + c := conn.rpcConn.StreamGo("VTGate.StreamExecuteKeyspaceIds", req, sr) + return sendStreamResults(c, sr) +} + +func sendStreamResults(c *rpcplus.Call, sr chan *proto.QueryResult) (<-chan *mproto.QueryResult, vtgateconn.ErrFunc) { srout := make(chan *mproto.QueryResult, 1) go func() { defer close(srout) diff --git a/go/vt/vtgate/vtgateconn/vtgateconn.go b/go/vt/vtgate/vtgateconn/vtgateconn.go index 0dd3832d81..8143332328 100644 --- a/go/vt/vtgate/vtgateconn/vtgateconn.go +++ b/go/vt/vtgate/vtgateconn/vtgateconn.go @@ -11,6 +11,7 @@ import ( log "github.com/golang/glog" mproto "github.com/youtube/vitess/go/mysql/proto" + "github.com/youtube/vitess/go/vt/key" tproto "github.com/youtube/vitess/go/vt/tabletserver/proto" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vtgate/proto" @@ -48,6 +49,7 @@ type VTGateConn struct { } // Execute executes a non-streaming query on vtgate. +// This is using v3 API. func (conn *VTGateConn) Execute(ctx context.Context, query string, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) { res, _, err := conn.impl.Execute(ctx, query, bindVars, tabletType, nil) return res, err @@ -59,14 +61,67 @@ func (conn *VTGateConn) ExecuteShard(ctx context.Context, query string, keyspace return res, err } -// StreamExecute executes a streaming query on vtgate. It returns a channel, ErrFunc and error. -// If error is non-nil, it means that the StreamExecute failed to send the request. Otherwise, -// you can pull values from the channel till it's closed. Following this, you can call ErrFunc +// ExecuteKeyspaceIds executes a non-streaming query for multiple keyspace_ids. +func (conn *VTGateConn) ExecuteKeyspaceIds(ctx context.Context, query string, keyspace string, keyspaceIds []key.KeyspaceId, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) { + res, _, err := conn.impl.ExecuteKeyspaceIds(ctx, query, keyspace, keyspaceIds, bindVars, tabletType, nil) + return res, err +} + +// ExecuteKeyRanges executes a non-streaming query on a key range. +func (conn *VTGateConn) ExecuteKeyRanges(ctx context.Context, query string, keyspace string, keyRanges []key.KeyRange, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) { + res, _, err := conn.impl.ExecuteKeyRanges(ctx, query, keyspace, keyRanges, bindVars, tabletType, nil) + return res, err +} + +// ExecuteEntityIds executes a non-streaming query for multiple entities. +func (conn *VTGateConn) ExecuteEntityIds(ctx context.Context, query string, keyspace string, entityColumnName string, entityKeyspaceIDs []proto.EntityId, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) { + res, _, err := conn.impl.ExecuteEntityIds(ctx, query, keyspace, entityColumnName, entityKeyspaceIDs, bindVars, tabletType, nil) + return res, err +} + +// ExecuteBatchShard executes a set of non-streaming queries for multiple shards. +func (conn *VTGateConn) ExecuteBatchShard(ctx context.Context, queries []tproto.BoundQuery, keyspace string, shards []string, tabletType topo.TabletType) ([]mproto.QueryResult, error) { + res, _, err := conn.impl.ExecuteBatchShard(ctx, queries, keyspace, shards, tabletType, nil) + return res, err +} + +// ExecuteBatchKeyspaceIds executes a set of non-streaming queries for multiple keyspace ids. +func (conn *VTGateConn) ExecuteBatchKeyspaceIds(ctx context.Context, queries []tproto.BoundQuery, keyspace string, keyspaceIds []key.KeyspaceId, tabletType topo.TabletType) ([]mproto.QueryResult, error) { + res, _, err := conn.impl.ExecuteBatchKeyspaceIds(ctx, queries, keyspace, keyspaceIds, tabletType, nil) + return res, err +} + +// StreamExecute executes a streaming query on vtgate. It returns a channel, and ErrFunc. +// You can pull values from the channel till it's closed. Following this, you can call ErrFunc // to see if the stream ended normally or due to a failure. func (conn *VTGateConn) StreamExecute(ctx context.Context, query string, bindVars map[string]interface{}, tabletType topo.TabletType) (<-chan *mproto.QueryResult, ErrFunc) { return conn.impl.StreamExecute(ctx, query, bindVars, tabletType) } +// StreamExecuteShard executes a streaming query on vtgate, on a set of shards. +// It returns a channel, and ErrFunc. +// You can pull values from the channel till it's closed. Following this, you can call ErrFunc +// to see if the stream ended normally or due to a failure. +func (conn *VTGateConn) StreamExecuteShard(ctx context.Context, query string, keyspace string, shards []string, bindVars map[string]interface{}, tabletType topo.TabletType) (<-chan *mproto.QueryResult, ErrFunc) { + return conn.impl.StreamExecuteShard(ctx, query, keyspace, shards, bindVars, tabletType) +} + +// StreamExecuteKeyRanges executes a streaming query on vtgate, on a set of keyranges. +// It returns a channel, and ErrFunc. +// You can pull values from the channel till it's closed. Following this, you can call ErrFunc +// to see if the stream ended normally or due to a failure. +func (conn *VTGateConn) StreamExecuteKeyRanges(ctx context.Context, query string, keyspace string, keyRanges []key.KeyRange, bindVars map[string]interface{}, tabletType topo.TabletType) (<-chan *mproto.QueryResult, ErrFunc) { + return conn.impl.StreamExecuteKeyRanges(ctx, query, keyspace, keyRanges, bindVars, tabletType) +} + +// StreamExecuteKeyspaceIds executes a streaming query on vtgate, for the given keyspaceIds. +// It returns a channel, and ErrFunc. +// You can pull values from the channel till it's closed. Following this, you can call ErrFunc +// to see if the stream ended normally or due to a failure. +func (conn *VTGateConn) StreamExecuteKeyspaceIds(ctx context.Context, query string, keyspace string, keyspaceIds []key.KeyspaceId, bindVars map[string]interface{}, tabletType topo.TabletType) (<-chan *mproto.QueryResult, ErrFunc) { + return conn.impl.StreamExecuteKeyspaceIds(ctx, query, keyspace, keyspaceIds, bindVars, tabletType) +} + // Begin starts a transaction and returns a VTGateTX. func (conn *VTGateConn) Begin(ctx context.Context) (*VTGateTx, error) { session, err := conn.impl.Begin(ctx) @@ -119,6 +174,56 @@ func (tx *VTGateTx) ExecuteShard(ctx context.Context, query string, keyspace str return res, err } +// ExecuteKeyspaceIds executes a non-streaming query for multiple keyspace_ids. +func (tx *VTGateTx) ExecuteKeyspaceIds(ctx context.Context, query string, keyspace string, keyspaceIds []key.KeyspaceId, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) { + if tx.session == nil { + return nil, fmt.Errorf("executeKeyspaceIds: not in transaction") + } + res, session, err := tx.impl.ExecuteKeyspaceIds(ctx, query, keyspace, keyspaceIds, bindVars, tabletType, tx.session) + tx.session = session + return res, err +} + +// ExecuteKeyRanges executes a non-streaming query on a key range. +func (tx *VTGateTx) ExecuteKeyRanges(ctx context.Context, query string, keyspace string, keyRanges []key.KeyRange, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) { + if tx.session == nil { + return nil, fmt.Errorf("executeKeyRanges: not in transaction") + } + res, session, err := tx.impl.ExecuteKeyRanges(ctx, query, keyspace, keyRanges, bindVars, tabletType, tx.session) + tx.session = session + return res, err +} + +// ExecuteEntityIds executes a non-streaming query for multiple entities. +func (tx *VTGateTx) ExecuteEntityIds(ctx context.Context, query string, keyspace string, entityColumnName string, entityKeyspaceIDs []proto.EntityId, bindVars map[string]interface{}, tabletType topo.TabletType) (*mproto.QueryResult, error) { + if tx.session == nil { + return nil, fmt.Errorf("executeEntityIds: not in transaction") + } + res, session, err := tx.impl.ExecuteEntityIds(ctx, query, keyspace, entityColumnName, entityKeyspaceIDs, bindVars, tabletType, tx.session) + tx.session = session + return res, err +} + +// ExecuteBatchShard executes a set of non-streaming queries for multiple shards. +func (tx *VTGateTx) ExecuteBatchShard(ctx context.Context, queries []tproto.BoundQuery, keyspace string, shards []string, tabletType topo.TabletType) ([]mproto.QueryResult, error) { + if tx.session == nil { + return nil, fmt.Errorf("executeBatchShard: not in transaction") + } + res, session, err := tx.impl.ExecuteBatchShard(ctx, queries, keyspace, shards, tabletType, tx.session) + tx.session = session + return res, err +} + +// ExecuteBatchKeyspaceIds executes a set of non-streaming queries for multiple keyspace ids. +func (tx *VTGateTx) ExecuteBatchKeyspaceIds(ctx context.Context, queries []tproto.BoundQuery, keyspace string, keyspaceIds []key.KeyspaceId, tabletType topo.TabletType) ([]mproto.QueryResult, error) { + if tx.session == nil { + return nil, fmt.Errorf("executeBatchKeyspaceIds: not in transaction") + } + res, session, err := tx.impl.ExecuteBatchKeyspaceIds(ctx, queries, keyspace, keyspaceIds, tabletType, tx.session) + tx.session = session + return res, err +} + // Commit commits the current transaction. func (tx *VTGateTx) Commit(ctx context.Context) error { if tx.session == nil { @@ -155,12 +260,33 @@ type Impl interface { // ExecuteShard executes a non-streaming query for multiple shards on vtgate. ExecuteShard(ctx context.Context, query string, keyspace string, shards []string, bindVars map[string]interface{}, tabletType topo.TabletType, session interface{}) (*mproto.QueryResult, interface{}, error) - // StreamExecute executes a streaming query on vtgate. It returns a channel, ErrFunc and error. - // If error is non-nil, it means that the StreamExecute failed to send the request. Otherwise, - // you can pull values from the channel till it's closed. Following this, you can call ErrFunc - // to see if the stream ended normally or due to a failure. + // ExecuteKeyspaceIds executes a non-streaming query for multiple keyspace_ids. + ExecuteKeyspaceIds(ctx context.Context, query string, keyspace string, keyspaceIds []key.KeyspaceId, bindVars map[string]interface{}, tabletType topo.TabletType, session interface{}) (*mproto.QueryResult, interface{}, error) + + // ExecuteKeyRanges executes a non-streaming query on a key range. + ExecuteKeyRanges(ctx context.Context, query string, keyspace string, keyRanges []key.KeyRange, bindVars map[string]interface{}, tabletType topo.TabletType, session interface{}) (*mproto.QueryResult, interface{}, error) + + // ExecuteEntityIds executes a non-streaming query for multiple entities. + ExecuteEntityIds(ctx context.Context, query string, keyspace string, entityColumnName string, entityKeyspaceIDs []proto.EntityId, bindVars map[string]interface{}, tabletType topo.TabletType, session interface{}) (*mproto.QueryResult, interface{}, error) + + // ExecuteBatchShard executes a set of non-streaming queries for multiple shards. + ExecuteBatchShard(ctx context.Context, queries []tproto.BoundQuery, keyspace string, shards []string, tabletType topo.TabletType, session interface{}) ([]mproto.QueryResult, interface{}, error) + + // ExecuteBatchKeyspaceIds executes a set of non-streaming queries for multiple keyspace ids. + ExecuteBatchKeyspaceIds(ctx context.Context, queries []tproto.BoundQuery, keyspace string, keyspaceIds []key.KeyspaceId, tabletType topo.TabletType, session interface{}) ([]mproto.QueryResult, interface{}, error) + + // StreamExecute executes a streaming query on vtgate. StreamExecute(ctx context.Context, query string, bindVars map[string]interface{}, tabletType topo.TabletType) (<-chan *mproto.QueryResult, ErrFunc) + // StreamExecuteShard executes a streaming query on vtgate, on a set of shards. + StreamExecuteShard(ctx context.Context, query string, keyspace string, shards []string, bindVars map[string]interface{}, tabletType topo.TabletType) (<-chan *mproto.QueryResult, ErrFunc) + + // StreamExecuteKeyRanges executes a streaming query on vtgate, on a set of keyranges. + StreamExecuteKeyRanges(ctx context.Context, query string, keyspace string, keyRanges []key.KeyRange, bindVars map[string]interface{}, tabletType topo.TabletType) (<-chan *mproto.QueryResult, ErrFunc) + + // StreamExecuteKeyspaceIds executes a streaming query on vtgate, for the given keyspaceIds. + StreamExecuteKeyspaceIds(ctx context.Context, query string, keyspace string, keyspaceIds []key.KeyspaceId, bindVars map[string]interface{}, tabletType topo.TabletType) (<-chan *mproto.QueryResult, ErrFunc) + // Begin starts a transaction and returns a VTGateTX. Begin(ctx context.Context) (interface{}, error) From dbcbbf2a9dc2898a398f2c0ef77f0c731e267994 Mon Sep 17 00:00:00 2001 From: sougou Date: Fri, 29 May 2015 11:03:07 -0700 Subject: [PATCH 122/128] Update Getting Started links --- README.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index c1a4a5d9ff..6bf143b401 100644 --- a/README.md +++ b/README.md @@ -24,10 +24,9 @@ and a more [detailed presentation from @Scale '14](http://youtu.be/5yDO-tmIoXY). ### Using Vitess - * [Getting Started](http://vitess.io/getting-started/): - running Vitess on Kubernetes. - * [Building](http://vitess.io/doc/GettingStarted): - how to manually build Vitess. + * Getting Started + * [On Kubernetes](http://vitess.io/getting-started/). + * [From the ground up](http://vitess.io/doc/GettingStarted). * [Tools](http://vitess.io/doc/Tools): all Vitess tools and servers. * [vttablet/vtocc](http://vitess.io/doc/vtocc): From 52cf702ffd919a24cf0a6525bfd6523f9816a575 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 29 May 2015 12:51:28 -0700 Subject: [PATCH 123/128] Adding unit test for all non-streaming vtgate methods. --- go/vt/vtgate/vtgateconntest/client.go | 532 +++++++++++++++++++++++++- 1 file changed, 527 insertions(+), 5 deletions(-) diff --git a/go/vt/vtgate/vtgateconntest/client.go b/go/vt/vtgate/vtgateconntest/client.go index eab2200acf..0b2bb2489f 100644 --- a/go/vt/vtgate/vtgateconntest/client.go +++ b/go/vt/vtgate/vtgateconntest/client.go @@ -70,6 +70,15 @@ func (f *fakeVTGateService) ExecuteKeyspaceIds(ctx context.Context, query *proto if f.panics { panic(fmt.Errorf("test forced panic")) } + execCase, ok := execMap[query.Sql] + if !ok { + return fmt.Errorf("no match for: %s", query.Sql) + } + if !reflect.DeepEqual(query, execCase.keyspaceIdQuery) { + f.t.Errorf("Execute: %+v, want %+v", query, execCase.keyspaceIdQuery) + return nil + } + *reply = *execCase.reply return nil } @@ -78,6 +87,15 @@ func (f *fakeVTGateService) ExecuteKeyRanges(ctx context.Context, query *proto.K if f.panics { panic(fmt.Errorf("test forced panic")) } + execCase, ok := execMap[query.Sql] + if !ok { + return fmt.Errorf("no match for: %s", query.Sql) + } + if !reflect.DeepEqual(query, execCase.keyRangeQuery) { + f.t.Errorf("Execute: %+v, want %+v", query, execCase.keyRangeQuery) + return nil + } + *reply = *execCase.reply return nil } @@ -86,6 +104,15 @@ func (f *fakeVTGateService) ExecuteEntityIds(ctx context.Context, query *proto.E if f.panics { panic(fmt.Errorf("test forced panic")) } + execCase, ok := execMap[query.Sql] + if !ok { + return fmt.Errorf("no match for: %s", query.Sql) + } + if !reflect.DeepEqual(query, execCase.entityIdsQuery) { + f.t.Errorf("Execute: %+v, want %+v", query, execCase.entityIdsQuery) + return nil + } + *reply = *execCase.reply return nil } @@ -94,6 +121,19 @@ func (f *fakeVTGateService) ExecuteBatchShard(ctx context.Context, batchQuery *p if f.panics { panic(fmt.Errorf("test forced panic")) } + execCase, ok := execMap[batchQuery.Queries[0].Sql] + if !ok { + return fmt.Errorf("no match for: %s", batchQuery.Queries[0].Sql) + } + if !reflect.DeepEqual(batchQuery, execCase.batchQueryShard) { + f.t.Errorf("Execute: %+v, want %+v", batchQuery, execCase.batchQueryShard) + return nil + } + reply.Error = execCase.reply.Error + if reply.Error == "" { + reply.List = []mproto.QueryResult{*execCase.reply.Result} + } + reply.Session = execCase.reply.Session return nil } @@ -102,6 +142,19 @@ func (f *fakeVTGateService) ExecuteBatchKeyspaceIds(ctx context.Context, batchQu if f.panics { panic(fmt.Errorf("test forced panic")) } + execCase, ok := execMap[batchQuery.Queries[0].Sql] + if !ok { + return fmt.Errorf("no match for: %s", batchQuery.Queries[0].Sql) + } + if !reflect.DeepEqual(batchQuery, execCase.keyspaceIdBatchQuery) { + f.t.Errorf("Execute: %+v, want %+v", batchQuery, execCase.keyspaceIdBatchQuery) + return nil + } + reply.Error = execCase.reply.Error + if reply.Error == "" { + reply.List = []mproto.QueryResult{*execCase.reply.Result} + } + reply.Session = execCase.reply.Session return nil } @@ -143,6 +196,31 @@ func (f *fakeVTGateService) StreamExecuteShard(ctx context.Context, query *proto if f.panics { panic(fmt.Errorf("test forced panic")) } + execCase, ok := execMap[query.Sql] + if !ok { + return fmt.Errorf("no match for: %s", query.Sql) + } + if !reflect.DeepEqual(query, execCase.shardQuery) { + f.t.Errorf("Execute: %+v, want %+v", query, execCase.shardQuery) + return nil + } + if execCase.reply.Result != nil { + result := proto.QueryResult{Result: &mproto.QueryResult{}} + result.Result.Fields = execCase.reply.Result.Fields + if err := sendReply(&result); err != nil { + return err + } + for _, row := range execCase.reply.Result.Rows { + result := proto.QueryResult{Result: &mproto.QueryResult{}} + result.Result.Rows = [][]sqltypes.Value{row} + if err := sendReply(&result); err != nil { + return err + } + } + } + if execCase.reply.Error != "" { + return errors.New(execCase.reply.Error) + } return nil } @@ -151,6 +229,31 @@ func (f *fakeVTGateService) StreamExecuteKeyRanges(ctx context.Context, query *p if f.panics { panic(fmt.Errorf("test forced panic")) } + execCase, ok := execMap[query.Sql] + if !ok { + return fmt.Errorf("no match for: %s", query.Sql) + } + if !reflect.DeepEqual(query, execCase.keyRangeQuery) { + f.t.Errorf("Execute: %+v, want %+v", query, execCase.keyRangeQuery) + return nil + } + if execCase.reply.Result != nil { + result := proto.QueryResult{Result: &mproto.QueryResult{}} + result.Result.Fields = execCase.reply.Result.Fields + if err := sendReply(&result); err != nil { + return err + } + for _, row := range execCase.reply.Result.Rows { + result := proto.QueryResult{Result: &mproto.QueryResult{}} + result.Result.Rows = [][]sqltypes.Value{row} + if err := sendReply(&result); err != nil { + return err + } + } + } + if execCase.reply.Error != "" { + return errors.New(execCase.reply.Error) + } return nil } @@ -159,6 +262,31 @@ func (f *fakeVTGateService) StreamExecuteKeyspaceIds(ctx context.Context, query if f.panics { panic(fmt.Errorf("test forced panic")) } + execCase, ok := execMap[query.Sql] + if !ok { + return fmt.Errorf("no match for: %s", query.Sql) + } + if !reflect.DeepEqual(query, execCase.keyspaceIdQuery) { + f.t.Errorf("Execute: %+v, want %+v", query, execCase.keyspaceIdQuery) + return nil + } + if execCase.reply.Result != nil { + result := proto.QueryResult{Result: &mproto.QueryResult{}} + result.Result.Fields = execCase.reply.Result.Fields + if err := sendReply(&result); err != nil { + return err + } + for _, row := range execCase.reply.Result.Rows { + result := proto.QueryResult{Result: &mproto.QueryResult{}} + result.Result.Rows = [][]sqltypes.Value{row} + if err := sendReply(&result); err != nil { + return err + } + } + } + if execCase.reply.Error != "" { + return errors.New(execCase.reply.Error) + } return nil } @@ -229,6 +357,11 @@ func TestSuite(t *testing.T, impl vtgateconn.Impl, fakeServer vtgateservice.VTGa testExecute(t, conn) testExecuteShard(t, conn) + testExecuteKeyspaceIds(t, conn) + testExecuteKeyRanges(t, conn) + testExecuteEntityIds(t, conn) + testExecuteBatchShard(t, conn) + testExecuteBatchKeyspaceIds(t, conn) testStreamExecute(t, conn) testTxPass(t, conn) testTxFail(t, conn) @@ -238,6 +371,11 @@ func TestSuite(t *testing.T, impl vtgateconn.Impl, fakeServer vtgateservice.VTGa fakeServer.(*fakeVTGateService).panics = true testExecutePanic(t, conn) testExecuteShardPanic(t, conn) + testExecuteKeyspaceIdsPanic(t, conn) + testExecuteKeyRangesPanic(t, conn) + testExecuteEntityIdsPanic(t, conn) + testExecuteBatchShardPanic(t, conn) + testExecuteBatchKeyspaceIdsPanic(t, conn) testStreamExecutePanic(t, conn) testBeginPanic(t, conn) testSplitQueryPanic(t, conn) @@ -285,7 +423,7 @@ func testExecutePanic(t *testing.T, conn *vtgateconn.VTGateConn) { func testExecuteShard(t *testing.T, conn *vtgateconn.VTGateConn) { ctx := context.Background() execCase := execMap["request1"] - qr, err := conn.ExecuteShard(ctx, execCase.execQuery.Sql, "ks", []string{"1", "2"}, execCase.execQuery.BindVariables, execCase.execQuery.TabletType) + qr, err := conn.ExecuteShard(ctx, execCase.shardQuery.Sql, execCase.shardQuery.Keyspace, execCase.shardQuery.Shards, execCase.shardQuery.BindVariables, execCase.shardQuery.TabletType) if err != nil { t.Error(err) } @@ -313,6 +451,166 @@ func testExecuteShardPanic(t *testing.T, conn *vtgateconn.VTGateConn) { expectPanic(t, err) } +func testExecuteKeyspaceIds(t *testing.T, conn *vtgateconn.VTGateConn) { + ctx := context.Background() + execCase := execMap["request1"] + qr, err := conn.ExecuteKeyspaceIds(ctx, execCase.keyspaceIdQuery.Sql, execCase.keyspaceIdQuery.Keyspace, execCase.keyspaceIdQuery.KeyspaceIds, execCase.keyspaceIdQuery.BindVariables, execCase.keyspaceIdQuery.TabletType) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(qr, execCase.reply.Result) { + t.Errorf("Unexpected result from Execute: got %+v want %+v", qr, execCase.reply.Result) + } + + _, err = conn.ExecuteKeyspaceIds(ctx, "none", "", []key.KeyspaceId{}, nil, "") + want := "no match for: none" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("none request: %v, want %v", err, want) + } + + execCase = execMap["errorRequst"] + _, err = conn.ExecuteKeyspaceIds(ctx, execCase.keyspaceIdQuery.Sql, execCase.keyspaceIdQuery.Keyspace, execCase.keyspaceIdQuery.KeyspaceIds, execCase.keyspaceIdQuery.BindVariables, execCase.keyspaceIdQuery.TabletType) + want = "app error" + if err == nil || err.Error() != want { + t.Errorf("errorRequst: %v, want %v", err, want) + } +} + +func testExecuteKeyspaceIdsPanic(t *testing.T, conn *vtgateconn.VTGateConn) { + ctx := context.Background() + execCase := execMap["request1"] + _, err := conn.ExecuteKeyspaceIds(ctx, execCase.keyspaceIdQuery.Sql, execCase.keyspaceIdQuery.Keyspace, execCase.keyspaceIdQuery.KeyspaceIds, execCase.keyspaceIdQuery.BindVariables, execCase.keyspaceIdQuery.TabletType) + expectPanic(t, err) +} + +func testExecuteKeyRanges(t *testing.T, conn *vtgateconn.VTGateConn) { + ctx := context.Background() + execCase := execMap["request1"] + qr, err := conn.ExecuteKeyRanges(ctx, execCase.keyRangeQuery.Sql, execCase.keyRangeQuery.Keyspace, execCase.keyRangeQuery.KeyRanges, execCase.keyRangeQuery.BindVariables, execCase.keyRangeQuery.TabletType) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(qr, execCase.reply.Result) { + t.Errorf("Unexpected result from Execute: got %+v want %+v", qr, execCase.reply.Result) + } + + _, err = conn.ExecuteKeyRanges(ctx, "none", "", []key.KeyRange{}, nil, "") + want := "no match for: none" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("none request: %v, want %v", err, want) + } + + execCase = execMap["errorRequst"] + _, err = conn.ExecuteKeyRanges(ctx, execCase.keyRangeQuery.Sql, execCase.keyRangeQuery.Keyspace, execCase.keyRangeQuery.KeyRanges, execCase.keyRangeQuery.BindVariables, execCase.keyRangeQuery.TabletType) + want = "app error" + if err == nil || err.Error() != want { + t.Errorf("errorRequst: %v, want %v", err, want) + } +} + +func testExecuteKeyRangesPanic(t *testing.T, conn *vtgateconn.VTGateConn) { + ctx := context.Background() + execCase := execMap["request1"] + _, err := conn.ExecuteKeyRanges(ctx, execCase.keyRangeQuery.Sql, execCase.keyRangeQuery.Keyspace, execCase.keyRangeQuery.KeyRanges, execCase.keyRangeQuery.BindVariables, execCase.keyRangeQuery.TabletType) + expectPanic(t, err) +} + +func testExecuteEntityIds(t *testing.T, conn *vtgateconn.VTGateConn) { + ctx := context.Background() + execCase := execMap["request1"] + qr, err := conn.ExecuteEntityIds(ctx, execCase.entityIdsQuery.Sql, execCase.entityIdsQuery.Keyspace, execCase.entityIdsQuery.EntityColumnName, execCase.entityIdsQuery.EntityKeyspaceIDs, execCase.entityIdsQuery.BindVariables, execCase.entityIdsQuery.TabletType) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(qr, execCase.reply.Result) { + t.Errorf("Unexpected result from Execute: got %+v want %+v", qr, execCase.reply.Result) + } + + _, err = conn.ExecuteEntityIds(ctx, "none", "", "", []proto.EntityId{}, nil, "") + want := "no match for: none" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("none request: %v, want %v", err, want) + } + + execCase = execMap["errorRequst"] + _, err = conn.ExecuteEntityIds(ctx, execCase.entityIdsQuery.Sql, execCase.entityIdsQuery.Keyspace, execCase.entityIdsQuery.EntityColumnName, execCase.entityIdsQuery.EntityKeyspaceIDs, execCase.entityIdsQuery.BindVariables, execCase.entityIdsQuery.TabletType) + want = "app error" + if err == nil || err.Error() != want { + t.Errorf("errorRequst: %v, want %v", err, want) + } +} + +func testExecuteEntityIdsPanic(t *testing.T, conn *vtgateconn.VTGateConn) { + ctx := context.Background() + execCase := execMap["request1"] + _, err := conn.ExecuteEntityIds(ctx, execCase.entityIdsQuery.Sql, execCase.entityIdsQuery.Keyspace, execCase.entityIdsQuery.EntityColumnName, execCase.entityIdsQuery.EntityKeyspaceIDs, execCase.entityIdsQuery.BindVariables, execCase.entityIdsQuery.TabletType) + expectPanic(t, err) +} + +func testExecuteBatchShard(t *testing.T, conn *vtgateconn.VTGateConn) { + ctx := context.Background() + execCase := execMap["request1"] + ql, err := conn.ExecuteBatchShard(ctx, execCase.batchQueryShard.Queries, execCase.batchQueryShard.Keyspace, execCase.batchQueryShard.Shards, execCase.batchQueryShard.TabletType) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(&ql[0], execCase.reply.Result) { + t.Errorf("Unexpected result from Execute: got %+v want %+v", ql, execCase.reply.Result) + } + + _, err = conn.ExecuteBatchShard(ctx, []tproto.BoundQuery{tproto.BoundQuery{Sql: "none"}}, "", []string{}, "") + want := "no match for: none" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("none request: %v, want %v", err, want) + } + + execCase = execMap["errorRequst"] + _, err = conn.ExecuteBatchShard(ctx, execCase.batchQueryShard.Queries, execCase.batchQueryShard.Keyspace, execCase.batchQueryShard.Shards, execCase.batchQueryShard.TabletType) + want = "app error" + if err == nil || err.Error() != want { + t.Errorf("errorRequst: %v, want %v", err, want) + } +} + +func testExecuteBatchShardPanic(t *testing.T, conn *vtgateconn.VTGateConn) { + ctx := context.Background() + execCase := execMap["request1"] + _, err := conn.ExecuteBatchShard(ctx, execCase.batchQueryShard.Queries, execCase.batchQueryShard.Keyspace, execCase.batchQueryShard.Shards, execCase.batchQueryShard.TabletType) + expectPanic(t, err) +} + +func testExecuteBatchKeyspaceIds(t *testing.T, conn *vtgateconn.VTGateConn) { + ctx := context.Background() + execCase := execMap["request1"] + ql, err := conn.ExecuteBatchKeyspaceIds(ctx, execCase.keyspaceIdBatchQuery.Queries, execCase.keyspaceIdBatchQuery.Keyspace, execCase.keyspaceIdBatchQuery.KeyspaceIds, execCase.keyspaceIdBatchQuery.TabletType) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(&ql[0], execCase.reply.Result) { + t.Errorf("Unexpected result from Execute: got %+v want %+v", ql, execCase.reply.Result) + } + + _, err = conn.ExecuteBatchKeyspaceIds(ctx, []tproto.BoundQuery{tproto.BoundQuery{Sql: "none"}}, "", []key.KeyspaceId{}, "") + want := "no match for: none" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("none request: %v, want %v", err, want) + } + + execCase = execMap["errorRequst"] + _, err = conn.ExecuteBatchKeyspaceIds(ctx, execCase.keyspaceIdBatchQuery.Queries, execCase.keyspaceIdBatchQuery.Keyspace, execCase.keyspaceIdBatchQuery.KeyspaceIds, execCase.keyspaceIdBatchQuery.TabletType) + want = "app error" + if err == nil || err.Error() != want { + t.Errorf("errorRequst: %v, want %v", err, want) + } +} + +func testExecuteBatchKeyspaceIdsPanic(t *testing.T, conn *vtgateconn.VTGateConn) { + ctx := context.Background() + execCase := execMap["request1"] + _, err := conn.ExecuteBatchKeyspaceIds(ctx, execCase.keyspaceIdBatchQuery.Queries, execCase.keyspaceIdBatchQuery.Keyspace, execCase.keyspaceIdBatchQuery.KeyspaceIds, execCase.keyspaceIdBatchQuery.TabletType) + expectPanic(t, err) +} + func testStreamExecute(t *testing.T, conn *vtgateconn.VTGateConn) { ctx := context.Background() execCase := execMap["request1"] @@ -470,10 +768,15 @@ func testSplitQueryPanic(t *testing.T, conn *vtgateconn.VTGateConn) { } var execMap = map[string]struct { - execQuery *proto.Query - shardQuery *proto.QueryShard - reply *proto.QueryResult - err error + execQuery *proto.Query + shardQuery *proto.QueryShard + keyspaceIdQuery *proto.KeyspaceIdQuery + keyRangeQuery *proto.KeyRangeQuery + entityIdsQuery *proto.EntityIdsQuery + batchQueryShard *proto.BatchQueryShard + keyspaceIdBatchQuery *proto.KeyspaceIdBatchQuery + reply *proto.QueryResult + err error }{ "request1": { execQuery: &proto.Query{ @@ -494,6 +797,79 @@ var execMap = map[string]struct { TabletType: topo.TYPE_RDONLY, Session: nil, }, + keyspaceIdQuery: &proto.KeyspaceIdQuery{ + Sql: "request1", + BindVariables: map[string]interface{}{ + "bind1": int64(0), + }, + Keyspace: "ks", + KeyspaceIds: []key.KeyspaceId{ + key.KeyspaceId("a"), + }, + TabletType: topo.TYPE_RDONLY, + Session: nil, + }, + keyRangeQuery: &proto.KeyRangeQuery{ + Sql: "request1", + BindVariables: map[string]interface{}{ + "bind1": int64(0), + }, + Keyspace: "ks", + KeyRanges: []key.KeyRange{ + key.KeyRange{ + Start: key.KeyspaceId("s"), + End: key.KeyspaceId("e"), + }, + }, + TabletType: topo.TYPE_RDONLY, + Session: nil, + }, + entityIdsQuery: &proto.EntityIdsQuery{ + Sql: "request1", + BindVariables: map[string]interface{}{ + "bind1": int64(0), + }, + Keyspace: "ks", + EntityColumnName: "column", + EntityKeyspaceIDs: []proto.EntityId{ + proto.EntityId{ + ExternalID: []byte{105, 100, 49}, + KeyspaceID: key.KeyspaceId("k"), + }, + }, + TabletType: topo.TYPE_RDONLY, + Session: nil, + }, + batchQueryShard: &proto.BatchQueryShard{ + Queries: []tproto.BoundQuery{ + tproto.BoundQuery{ + Sql: "request1", + BindVariables: map[string]interface{}{ + "bind1": int64(0), + }, + }, + }, + Keyspace: "ks", + Shards: []string{"-80", "80-"}, + TabletType: topo.TYPE_RDONLY, + Session: nil, + }, + keyspaceIdBatchQuery: &proto.KeyspaceIdBatchQuery{ + Queries: []tproto.BoundQuery{ + tproto.BoundQuery{ + Sql: "request1", + BindVariables: map[string]interface{}{ + "bind1": int64(0), + }, + }, + }, + Keyspace: "ks", + KeyspaceIds: []key.KeyspaceId{ + key.KeyspaceId("ki1"), + }, + TabletType: topo.TYPE_RDONLY, + Session: nil, + }, reply: &proto.QueryResult{ Result: &result1, Session: nil, @@ -515,6 +891,79 @@ var execMap = map[string]struct { Shards: []string{}, Session: nil, }, + keyspaceIdQuery: &proto.KeyspaceIdQuery{ + Sql: "errorRequst", + BindVariables: map[string]interface{}{ + "bind1": int64(0), + }, + Keyspace: "ks", + KeyspaceIds: []key.KeyspaceId{ + key.KeyspaceId("a"), + }, + TabletType: topo.TYPE_RDONLY, + Session: nil, + }, + keyRangeQuery: &proto.KeyRangeQuery{ + Sql: "errorRequst", + BindVariables: map[string]interface{}{ + "bind1": int64(0), + }, + Keyspace: "ks", + KeyRanges: []key.KeyRange{ + key.KeyRange{ + Start: key.KeyspaceId("s"), + End: key.KeyspaceId("e"), + }, + }, + TabletType: topo.TYPE_RDONLY, + Session: nil, + }, + entityIdsQuery: &proto.EntityIdsQuery{ + Sql: "errorRequst", + BindVariables: map[string]interface{}{ + "bind1": int64(0), + }, + Keyspace: "ks", + EntityColumnName: "column", + EntityKeyspaceIDs: []proto.EntityId{ + proto.EntityId{ + ExternalID: []byte{105, 100, 49}, + KeyspaceID: key.KeyspaceId("k"), + }, + }, + TabletType: topo.TYPE_RDONLY, + Session: nil, + }, + batchQueryShard: &proto.BatchQueryShard{ + Queries: []tproto.BoundQuery{ + tproto.BoundQuery{ + Sql: "errorRequst", + BindVariables: map[string]interface{}{ + "bind1": int64(0), + }, + }, + }, + Keyspace: "ks", + Shards: []string{"-80", "80-"}, + TabletType: topo.TYPE_RDONLY, + Session: nil, + }, + keyspaceIdBatchQuery: &proto.KeyspaceIdBatchQuery{ + Queries: []tproto.BoundQuery{ + tproto.BoundQuery{ + Sql: "errorRequst", + BindVariables: map[string]interface{}{ + "bind1": int64(0), + }, + }, + }, + Keyspace: "ks", + KeyspaceIds: []key.KeyspaceId{ + key.KeyspaceId("ki1"), + }, + TabletType: topo.TYPE_RDONLY, + Session: nil, + }, reply: &proto.QueryResult{ Result: nil, Session: nil, @@ -536,6 +985,79 @@ var execMap = map[string]struct { Shards: []string{}, Session: session1, }, + keyspaceIdQuery: &proto.KeyspaceIdQuery{ + Sql: "txRequest", + BindVariables: map[string]interface{}{ + "bind1": int64(0), + }, + Keyspace: "ks", + KeyspaceIds: []key.KeyspaceId{ + key.KeyspaceId("a"), + }, + TabletType: topo.TYPE_RDONLY, + Session: nil, + }, + keyRangeQuery: &proto.KeyRangeQuery{ + Sql: "txRequest", + BindVariables: map[string]interface{}{ + "bind1": int64(0), + }, + Keyspace: "ks", + KeyRanges: []key.KeyRange{ + key.KeyRange{ + Start: key.KeyspaceId("s"), + End: key.KeyspaceId("e"), + }, + }, + TabletType: topo.TYPE_RDONLY, + Session: nil, + }, + entityIdsQuery: &proto.EntityIdsQuery{ + Sql: "txRequest", + BindVariables: map[string]interface{}{ + "bind1": int64(0), + }, + Keyspace: "ks", + EntityColumnName: "column", + EntityKeyspaceIDs: []proto.EntityId{ + proto.EntityId{ + ExternalID: []byte{105, 100, 49}, + KeyspaceID: key.KeyspaceId("k"), + }, + }, + TabletType: topo.TYPE_RDONLY, + Session: nil, + }, + batchQueryShard: &proto.BatchQueryShard{ + Queries: []tproto.BoundQuery{ + tproto.BoundQuery{ + Sql: "txRequest", + BindVariables: map[string]interface{}{ + "bind1": int64(0), + }, + }, + }, + Keyspace: "ks", + Shards: []string{"-80", "80-"}, + TabletType: topo.TYPE_RDONLY, + Session: nil, + }, + keyspaceIdBatchQuery: &proto.KeyspaceIdBatchQuery{ + Queries: []tproto.BoundQuery{ + tproto.BoundQuery{ + Sql: "txRequest", + BindVariables: map[string]interface{}{ + "bind1": int64(0), + }, + }, + }, + Keyspace: "ks", + KeyspaceIds: []key.KeyspaceId{ + key.KeyspaceId("ki1"), + }, + TabletType: topo.TYPE_RDONLY, + Session: nil, + }, reply: &proto.QueryResult{ Result: nil, Session: session2, From 340e2c00b09145183882d406095c72927e585f74 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 29 May 2015 13:38:26 -0700 Subject: [PATCH 124/128] Adding test for streaming queries and sessions. --- go/vt/vtgate/vtgateconntest/client.go | 297 +++++++++++++++++++++++++- 1 file changed, 288 insertions(+), 9 deletions(-) diff --git a/go/vt/vtgate/vtgateconntest/client.go b/go/vt/vtgate/vtgateconntest/client.go index 0b2bb2489f..875840ded7 100644 --- a/go/vt/vtgate/vtgateconntest/client.go +++ b/go/vt/vtgate/vtgateconntest/client.go @@ -130,7 +130,7 @@ func (f *fakeVTGateService) ExecuteBatchShard(ctx context.Context, batchQuery *p return nil } reply.Error = execCase.reply.Error - if reply.Error == "" { + if reply.Error == "" && execCase.reply.Result != nil { reply.List = []mproto.QueryResult{*execCase.reply.Result} } reply.Session = execCase.reply.Session @@ -151,7 +151,7 @@ func (f *fakeVTGateService) ExecuteBatchKeyspaceIds(ctx context.Context, batchQu return nil } reply.Error = execCase.reply.Error - if reply.Error == "" { + if reply.Error == "" && execCase.reply.Result != nil { reply.List = []mproto.QueryResult{*execCase.reply.Result} } reply.Session = execCase.reply.Session @@ -363,6 +363,9 @@ func TestSuite(t *testing.T, impl vtgateconn.Impl, fakeServer vtgateservice.VTGa testExecuteBatchShard(t, conn) testExecuteBatchKeyspaceIds(t, conn) testStreamExecute(t, conn) + testStreamExecuteShard(t, conn) + testStreamExecuteKeyRanges(t, conn) + testStreamExecuteKeyspaceIds(t, conn) testTxPass(t, conn) testTxFail(t, conn) testSplitQuery(t, conn) @@ -377,6 +380,9 @@ func TestSuite(t *testing.T, impl vtgateconn.Impl, fakeServer vtgateservice.VTGa testExecuteBatchShardPanic(t, conn) testExecuteBatchKeyspaceIdsPanic(t, conn) testStreamExecutePanic(t, conn) + testStreamExecuteShardPanic(t, conn) + testStreamExecuteKeyRangesPanic(t, conn) + testStreamExecuteKeyspaceIdsPanic(t, conn) testBeginPanic(t, conn) testSplitQueryPanic(t, conn) } @@ -667,13 +673,186 @@ func testStreamExecutePanic(t *testing.T, conn *vtgateconn.VTGateConn) { expectPanic(t, err) } +func testStreamExecuteShard(t *testing.T, conn *vtgateconn.VTGateConn) { + ctx := context.Background() + execCase := execMap["request1"] + packets, errFunc := conn.StreamExecuteShard(ctx, execCase.shardQuery.Sql, execCase.shardQuery.Keyspace, execCase.shardQuery.Shards, execCase.execQuery.BindVariables, execCase.execQuery.TabletType) + var qr mproto.QueryResult + for packet := range packets { + if len(packet.Fields) != 0 { + qr.Fields = packet.Fields + } + if len(packet.Rows) != 0 { + qr.Rows = append(qr.Rows, packet.Rows...) + } + } + wantResult := *execCase.reply.Result + wantResult.RowsAffected = 0 + wantResult.InsertId = 0 + if !reflect.DeepEqual(qr, wantResult) { + t.Errorf("Unexpected result from Execute: got %+v want %+v", qr, wantResult) + } + err := errFunc() + if err != nil { + t.Error(err) + } + + packets, errFunc = conn.StreamExecuteShard(ctx, "none", "", []string{}, nil, "") + for packet := range packets { + t.Errorf("packet: %+v, want none", packet) + } + err = errFunc() + want := "no match for: none" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("none request: %v, want %v", err, want) + } + + execCase = execMap["errorRequst"] + packets, errFunc = conn.StreamExecuteShard(ctx, execCase.shardQuery.Sql, execCase.shardQuery.Keyspace, execCase.shardQuery.Shards, execCase.execQuery.BindVariables, execCase.execQuery.TabletType) + for packet := range packets { + t.Errorf("packet: %+v, want none", packet) + } + err = errFunc() + want = "app error" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("errorRequst: %v, want %v", err, want) + } +} + +func testStreamExecuteShardPanic(t *testing.T, conn *vtgateconn.VTGateConn) { + ctx := context.Background() + execCase := execMap["request1"] + packets, errFunc := conn.StreamExecuteShard(ctx, execCase.shardQuery.Sql, execCase.shardQuery.Keyspace, execCase.shardQuery.Shards, execCase.execQuery.BindVariables, execCase.execQuery.TabletType) + if _, ok := <-packets; ok { + t.Fatalf("Received packets instead of panic?") + } + err := errFunc() + expectPanic(t, err) +} + +func testStreamExecuteKeyRanges(t *testing.T, conn *vtgateconn.VTGateConn) { + ctx := context.Background() + execCase := execMap["request1"] + packets, errFunc := conn.StreamExecuteKeyRanges(ctx, execCase.keyRangeQuery.Sql, execCase.keyRangeQuery.Keyspace, execCase.keyRangeQuery.KeyRanges, execCase.keyRangeQuery.BindVariables, execCase.keyRangeQuery.TabletType) + var qr mproto.QueryResult + for packet := range packets { + if len(packet.Fields) != 0 { + qr.Fields = packet.Fields + } + if len(packet.Rows) != 0 { + qr.Rows = append(qr.Rows, packet.Rows...) + } + } + wantResult := *execCase.reply.Result + wantResult.RowsAffected = 0 + wantResult.InsertId = 0 + if !reflect.DeepEqual(qr, wantResult) { + t.Errorf("Unexpected result from Execute: got %+v want %+v", qr, wantResult) + } + err := errFunc() + if err != nil { + t.Error(err) + } + + packets, errFunc = conn.StreamExecuteKeyRanges(ctx, "none", "", []key.KeyRange{}, nil, "") + for packet := range packets { + t.Errorf("packet: %+v, want none", packet) + } + err = errFunc() + want := "no match for: none" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("none request: %v, want %v", err, want) + } + + execCase = execMap["errorRequst"] + packets, errFunc = conn.StreamExecuteKeyRanges(ctx, execCase.keyRangeQuery.Sql, execCase.keyRangeQuery.Keyspace, execCase.keyRangeQuery.KeyRanges, execCase.keyRangeQuery.BindVariables, execCase.keyRangeQuery.TabletType) + for packet := range packets { + t.Errorf("packet: %+v, want none", packet) + } + err = errFunc() + want = "app error" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("errorRequst: %v, want %v", err, want) + } +} + +func testStreamExecuteKeyRangesPanic(t *testing.T, conn *vtgateconn.VTGateConn) { + ctx := context.Background() + execCase := execMap["request1"] + packets, errFunc := conn.StreamExecuteKeyRanges(ctx, execCase.keyRangeQuery.Sql, execCase.keyRangeQuery.Keyspace, execCase.keyRangeQuery.KeyRanges, execCase.keyRangeQuery.BindVariables, execCase.keyRangeQuery.TabletType) + if _, ok := <-packets; ok { + t.Fatalf("Received packets instead of panic?") + } + err := errFunc() + expectPanic(t, err) +} + +func testStreamExecuteKeyspaceIds(t *testing.T, conn *vtgateconn.VTGateConn) { + ctx := context.Background() + execCase := execMap["request1"] + packets, errFunc := conn.StreamExecuteKeyspaceIds(ctx, execCase.keyspaceIdQuery.Sql, execCase.keyspaceIdQuery.Keyspace, execCase.keyspaceIdQuery.KeyspaceIds, execCase.keyspaceIdQuery.BindVariables, execCase.keyspaceIdQuery.TabletType) + var qr mproto.QueryResult + for packet := range packets { + if len(packet.Fields) != 0 { + qr.Fields = packet.Fields + } + if len(packet.Rows) != 0 { + qr.Rows = append(qr.Rows, packet.Rows...) + } + } + wantResult := *execCase.reply.Result + wantResult.RowsAffected = 0 + wantResult.InsertId = 0 + if !reflect.DeepEqual(qr, wantResult) { + t.Errorf("Unexpected result from Execute: got %+v want %+v", qr, wantResult) + } + err := errFunc() + if err != nil { + t.Error(err) + } + + packets, errFunc = conn.StreamExecuteKeyspaceIds(ctx, "none", "", []key.KeyspaceId{}, nil, "") + for packet := range packets { + t.Errorf("packet: %+v, want none", packet) + } + err = errFunc() + want := "no match for: none" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("none request: %v, want %v", err, want) + } + + execCase = execMap["errorRequst"] + packets, errFunc = conn.StreamExecuteKeyspaceIds(ctx, execCase.keyspaceIdQuery.Sql, execCase.keyspaceIdQuery.Keyspace, execCase.keyspaceIdQuery.KeyspaceIds, execCase.keyspaceIdQuery.BindVariables, execCase.keyspaceIdQuery.TabletType) + for packet := range packets { + t.Errorf("packet: %+v, want none", packet) + } + err = errFunc() + want = "app error" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("errorRequst: %v, want %v", err, want) + } +} + +func testStreamExecuteKeyspaceIdsPanic(t *testing.T, conn *vtgateconn.VTGateConn) { + ctx := context.Background() + execCase := execMap["request1"] + packets, errFunc := conn.StreamExecuteKeyspaceIds(ctx, execCase.keyspaceIdQuery.Sql, execCase.keyspaceIdQuery.Keyspace, execCase.keyspaceIdQuery.KeyspaceIds, execCase.keyspaceIdQuery.BindVariables, execCase.keyspaceIdQuery.TabletType) + if _, ok := <-packets; ok { + t.Fatalf("Received packets instead of panic?") + } + err := errFunc() + expectPanic(t, err) +} + func testTxPass(t *testing.T, conn *vtgateconn.VTGateConn) { ctx := context.Background() + execCase := execMap["txRequest"] + + // Execute tx, err := conn.Begin(ctx) if err != nil { t.Error(err) } - execCase := execMap["txRequest"] _, err = tx.Execute(ctx, execCase.execQuery.Sql, execCase.execQuery.BindVariables, execCase.execQuery.TabletType) if err != nil { t.Error(err) @@ -683,11 +862,11 @@ func testTxPass(t *testing.T, conn *vtgateconn.VTGateConn) { t.Error(err) } + // ExecuteShard tx, err = conn.Begin(ctx) if err != nil { t.Error(err) } - execCase = execMap["txRequest"] _, err = tx.ExecuteShard(ctx, execCase.shardQuery.Sql, execCase.shardQuery.Keyspace, execCase.shardQuery.Shards, execCase.shardQuery.BindVariables, execCase.shardQuery.TabletType) if err != nil { t.Error(err) @@ -696,6 +875,76 @@ func testTxPass(t *testing.T, conn *vtgateconn.VTGateConn) { if err != nil { t.Error(err) } + + // ExecuteKeyspaceIds + tx, err = conn.Begin(ctx) + if err != nil { + t.Error(err) + } + _, err = tx.ExecuteKeyspaceIds(ctx, execCase.keyspaceIdQuery.Sql, execCase.keyspaceIdQuery.Keyspace, execCase.keyspaceIdQuery.KeyspaceIds, execCase.keyspaceIdQuery.BindVariables, execCase.keyspaceIdQuery.TabletType) + if err != nil { + t.Error(err) + } + err = tx.Rollback(ctx) + if err != nil { + t.Error(err) + } + + // ExecuteKeyRanges + tx, err = conn.Begin(ctx) + if err != nil { + t.Error(err) + } + _, err = tx.ExecuteKeyRanges(ctx, execCase.keyRangeQuery.Sql, execCase.keyRangeQuery.Keyspace, execCase.keyRangeQuery.KeyRanges, execCase.keyRangeQuery.BindVariables, execCase.keyRangeQuery.TabletType) + if err != nil { + t.Error(err) + } + err = tx.Rollback(ctx) + if err != nil { + t.Error(err) + } + + // ExecuteEntityIds + tx, err = conn.Begin(ctx) + if err != nil { + t.Error(err) + } + _, err = tx.ExecuteEntityIds(ctx, execCase.entityIdsQuery.Sql, execCase.entityIdsQuery.Keyspace, execCase.entityIdsQuery.EntityColumnName, execCase.entityIdsQuery.EntityKeyspaceIDs, execCase.entityIdsQuery.BindVariables, execCase.entityIdsQuery.TabletType) + if err != nil { + t.Error(err) + } + err = tx.Rollback(ctx) + if err != nil { + t.Error(err) + } + + // ExecuteBatchShard + tx, err = conn.Begin(ctx) + if err != nil { + t.Error(err) + } + _, err = tx.ExecuteBatchShard(ctx, execCase.batchQueryShard.Queries, execCase.batchQueryShard.Keyspace, execCase.batchQueryShard.Shards, execCase.batchQueryShard.TabletType) + if err != nil { + t.Error(err) + } + err = tx.Rollback(ctx) + if err != nil { + t.Error(err) + } + + // ExecuteBatchKeyspaceIds + tx, err = conn.Begin(ctx) + if err != nil { + t.Error(err) + } + _, err = tx.ExecuteBatchKeyspaceIds(ctx, execCase.keyspaceIdBatchQuery.Queries, execCase.keyspaceIdBatchQuery.Keyspace, execCase.keyspaceIdBatchQuery.KeyspaceIds, execCase.keyspaceIdBatchQuery.TabletType) + if err != nil { + t.Error(err) + } + err = tx.Rollback(ctx) + if err != nil { + t.Error(err) + } } func testBeginPanic(t *testing.T, conn *vtgateconn.VTGateConn) { @@ -728,6 +977,36 @@ func testTxFail(t *testing.T, conn *vtgateconn.VTGateConn) { t.Errorf("ExecuteShard: %v, want %v", err, want) } + _, err = tx.ExecuteKeyspaceIds(ctx, "", "", nil, nil, "") + want = "executeKeyspaceIds: not in transaction" + if err == nil || err.Error() != want { + t.Errorf("ExecuteShard: %v, want %v", err, want) + } + + _, err = tx.ExecuteKeyRanges(ctx, "", "", nil, nil, "") + want = "executeKeyRanges: not in transaction" + if err == nil || err.Error() != want { + t.Errorf("ExecuteShard: %v, want %v", err, want) + } + + _, err = tx.ExecuteEntityIds(ctx, "", "", "", nil, nil, "") + want = "executeEntityIds: not in transaction" + if err == nil || err.Error() != want { + t.Errorf("ExecuteShard: %v, want %v", err, want) + } + + _, err = tx.ExecuteBatchShard(ctx, nil, "", nil, "") + want = "executeBatchShard: not in transaction" + if err == nil || err.Error() != want { + t.Errorf("ExecuteShard: %v, want %v", err, want) + } + + _, err = tx.ExecuteBatchKeyspaceIds(ctx, nil, "", nil, "") + want = "executeBatchKeyspaceIds: not in transaction" + if err == nil || err.Error() != want { + t.Errorf("ExecuteShard: %v, want %v", err, want) + } + err = tx.Commit(ctx) want = "commit: not in transaction" if err == nil || err.Error() != want { @@ -995,7 +1274,7 @@ var execMap = map[string]struct { key.KeyspaceId("a"), }, TabletType: topo.TYPE_RDONLY, - Session: nil, + Session: session1, }, keyRangeQuery: &proto.KeyRangeQuery{ Sql: "txRequest", @@ -1010,7 +1289,7 @@ var execMap = map[string]struct { }, }, TabletType: topo.TYPE_RDONLY, - Session: nil, + Session: session1, }, entityIdsQuery: &proto.EntityIdsQuery{ Sql: "txRequest", @@ -1026,7 +1305,7 @@ var execMap = map[string]struct { }, }, TabletType: topo.TYPE_RDONLY, - Session: nil, + Session: session1, }, batchQueryShard: &proto.BatchQueryShard{ Queries: []tproto.BoundQuery{ @@ -1040,7 +1319,7 @@ var execMap = map[string]struct { Keyspace: "ks", Shards: []string{"-80", "80-"}, TabletType: topo.TYPE_RDONLY, - Session: nil, + Session: session1, }, keyspaceIdBatchQuery: &proto.KeyspaceIdBatchQuery{ Queries: []tproto.BoundQuery{ @@ -1056,7 +1335,7 @@ var execMap = map[string]struct { key.KeyspaceId("ki1"), }, TabletType: topo.TYPE_RDONLY, - Session: nil, + Session: session1, }, reply: &proto.QueryResult{ Result: nil, From fc3079860e6ce651b0e5b2acb5856b21c236c307 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 29 May 2015 14:12:27 -0700 Subject: [PATCH 125/128] Running a few commands through VtctlPipe. That adds unit test coverage to vtctl.go. --- go/vt/wrangler/testlib/emergency_reparent_shard_test.go | 5 +++-- go/vt/wrangler/testlib/init_shard_master_test.go | 4 +++- go/vt/wrangler/testlib/planned_reparent_shard_test.go | 6 +++--- go/vt/wrangler/testlib/reparent_external_test.go | 4 +++- 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go index c826581ed0..14a826011d 100644 --- a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go @@ -20,9 +20,10 @@ import ( ) func TestEmergencyReparentShard(t *testing.T) { - ctx := context.Background() ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"}) wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), time.Second) + vp := NewVtctlPipe(t, ts) + defer vp.Close() // Create a master, a couple good slaves oldMaster := NewFakeTablet(t, wr, "cell1", 0, topo.TYPE_MASTER) @@ -93,7 +94,7 @@ func TestEmergencyReparentShard(t *testing.T) { defer goodSlave2.StopActionLoop(t) // run EmergencyReparentShard - if err := wr.EmergencyReparentShard(ctx, newMaster.Tablet.Keyspace, newMaster.Tablet.Shard, newMaster.Tablet.Alias, 10*time.Second); err != nil { + if err := vp.Run([]string{"EmergencyReparentShard", "-wait_slave_timeout", "10s", newMaster.Tablet.Keyspace + "/" + newMaster.Tablet.Shard, newMaster.Tablet.Alias.String()}); err != nil { t.Fatalf("EmergencyReparentShard failed: %v", err) } diff --git a/go/vt/wrangler/testlib/init_shard_master_test.go b/go/vt/wrangler/testlib/init_shard_master_test.go index ce9df831a7..915da49319 100644 --- a/go/vt/wrangler/testlib/init_shard_master_test.go +++ b/go/vt/wrangler/testlib/init_shard_master_test.go @@ -25,6 +25,8 @@ func TestInitMasterShard(t *testing.T) { ctx := context.Background() ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"}) wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), time.Second) + vp := NewVtctlPipe(t, ts) + defer vp.Close() // Create a master, a couple good slaves master := NewFakeTablet(t, wr, "cell1", 0, topo.TYPE_MASTER) @@ -90,7 +92,7 @@ func TestInitMasterShard(t *testing.T) { defer goodSlave2.StopActionLoop(t) // run InitShardMaster - if err := wr.InitShardMaster(ctx, master.Tablet.Keyspace, master.Tablet.Shard, master.Tablet.Alias, false /*force*/, 10*time.Second); err != nil { + if err := vp.Run([]string{"InitShardMaster", "-wait_slave_timeout", "10s", master.Tablet.Keyspace + "/" + master.Tablet.Shard, master.Tablet.Alias.String()}); err != nil { t.Fatalf("InitShardMaster failed: %v", err) } diff --git a/go/vt/wrangler/testlib/planned_reparent_shard_test.go b/go/vt/wrangler/testlib/planned_reparent_shard_test.go index 8ec6d505d2..6279041241 100644 --- a/go/vt/wrangler/testlib/planned_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/planned_reparent_shard_test.go @@ -15,15 +15,15 @@ import ( "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/wrangler" "github.com/youtube/vitess/go/vt/zktopo" - "golang.org/x/net/context" "time" ) func TestPlannedReparentShard(t *testing.T) { - ctx := context.Background() ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"}) wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), time.Second) + vp := NewVtctlPipe(t, ts) + defer vp.Close() // Create a master, a couple good slaves oldMaster := NewFakeTablet(t, wr, "cell1", 0, topo.TYPE_MASTER) @@ -95,7 +95,7 @@ func TestPlannedReparentShard(t *testing.T) { defer goodSlave2.StopActionLoop(t) // run PlannedReparentShard - if err := wr.PlannedReparentShard(ctx, newMaster.Tablet.Keyspace, newMaster.Tablet.Shard, newMaster.Tablet.Alias, 10*time.Second); err != nil { + if err := vp.Run([]string{"PlannedReparentShard", "-wait_slave_timeout", "10s", newMaster.Tablet.Keyspace + "/" + newMaster.Tablet.Shard, newMaster.Tablet.Alias.String()}); err != nil { t.Fatalf("PlannedReparentShard failed: %v", err) } diff --git a/go/vt/wrangler/testlib/reparent_external_test.go b/go/vt/wrangler/testlib/reparent_external_test.go index e406364686..90e5bb3d38 100644 --- a/go/vt/wrangler/testlib/reparent_external_test.go +++ b/go/vt/wrangler/testlib/reparent_external_test.go @@ -37,6 +37,8 @@ func testTabletExternallyReparented(t *testing.T, fast bool) { ctx := context.Background() ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"}) wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), time.Second) + vp := NewVtctlPipe(t, ts) + defer vp.Close() // Create an old master, a new master, two good slaves, one bad slave oldMaster := NewFakeTablet(t, wr, "cell1", 0, topo.TYPE_MASTER) @@ -121,7 +123,7 @@ func testTabletExternallyReparented(t *testing.T, fast bool) { if err != nil { t.Fatalf("GetTablet failed: %v", err) } - if err := tmc.TabletExternallyReparented(context.Background(), ti, ""); err != nil { + if err := vp.Run([]string{"TabletExternallyReparented", oldMaster.Tablet.Alias.String()}); err != nil { t.Fatalf("TabletExternallyReparented(same master) should have worked") } From 4be492cc15edbc50dbebff4fe3af697e38a39371 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 29 May 2015 15:10:50 -0700 Subject: [PATCH 126/128] Adding unit test for MigrateServedFrom. --- .../testlib/migrate_served_from_test.go | 175 ++++++++++++++++++ 1 file changed, 175 insertions(+) create mode 100644 go/vt/wrangler/testlib/migrate_served_from_test.go diff --git a/go/vt/wrangler/testlib/migrate_served_from_test.go b/go/vt/wrangler/testlib/migrate_served_from_test.go new file mode 100644 index 0000000000..003517c1a8 --- /dev/null +++ b/go/vt/wrangler/testlib/migrate_served_from_test.go @@ -0,0 +1,175 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testlib + +import ( + "reflect" + "testing" + "time" + + mproto "github.com/youtube/vitess/go/mysql/proto" + "github.com/youtube/vitess/go/sqltypes" + "github.com/youtube/vitess/go/vt/logutil" + myproto "github.com/youtube/vitess/go/vt/mysqlctl/proto" + "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" + "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/wrangler" + "github.com/youtube/vitess/go/vt/zktopo" + "golang.org/x/net/context" +) + +func TestMigrateServedFrom(t *testing.T) { + ctx := context.Background() + ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"}) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), time.Second) + vp := NewVtctlPipe(t, ts) + defer vp.Close() + + // create the source keyspace tablets + sourceMaster := NewFakeTablet(t, wr, "cell1", 10, topo.TYPE_MASTER, + TabletKeyspaceShard(t, "source", "0")) + sourceReplica := NewFakeTablet(t, wr, "cell1", 11, topo.TYPE_REPLICA, + TabletKeyspaceShard(t, "source", "0")) + sourceRdonly := NewFakeTablet(t, wr, "cell1", 12, topo.TYPE_RDONLY, + TabletKeyspaceShard(t, "source", "0")) + + // create the destination keyspace, served form source + // double check it has all entries in map + if err := vp.Run([]string{"CreateKeyspace", "-served_from", "master:source,replica:source,rdonly:source", "dest"}); err != nil { + t.Fatalf("CreateKeyspace(dest) failed: %v", err) + } + ki, err := ts.GetKeyspace(ctx, "dest") + if err != nil { + t.Fatalf("GetKeyspace failed: %v", err) + } + if len(ki.ServedFromMap) != 3 { + t.Fatalf("bad initial dest ServedFrom: %v", ki.ServedFromMap) + } + + // create the destination keyspace tablets + destMaster := NewFakeTablet(t, wr, "cell1", 20, topo.TYPE_MASTER, + TabletKeyspaceShard(t, "dest", "0")) + destReplica := NewFakeTablet(t, wr, "cell1", 21, topo.TYPE_REPLICA, + TabletKeyspaceShard(t, "dest", "0")) + destRdonly := NewFakeTablet(t, wr, "cell1", 22, topo.TYPE_RDONLY, + TabletKeyspaceShard(t, "dest", "0")) + + // sourceRdonly will see the refresh + sourceRdonly.StartActionLoop(t, wr) + defer sourceRdonly.StopActionLoop(t) + + // sourceReplica will see the refresh + sourceReplica.StartActionLoop(t, wr) + defer sourceReplica.StopActionLoop(t) + + // sourceMaster will see the refresh, and has to respond to it + // also will be asked about its replication position. + sourceMaster.FakeMysqlDaemon.CurrentMasterPosition = myproto.ReplicationPosition{ + GTIDSet: myproto.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 892, + }, + } + sourceMaster.StartActionLoop(t, wr) + defer sourceMaster.StopActionLoop(t) + + // destRdonly will see the refresh + destRdonly.StartActionLoop(t, wr) + defer destRdonly.StopActionLoop(t) + + // destReplica will see the refresh + destReplica.StartActionLoop(t, wr) + defer destReplica.StopActionLoop(t) + + // destMaster will see the refresh, and has to respond to it. + // It will also need to respond to WaitBlpPosition, saying it's already caught up. + destMaster.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*mproto.QueryResult{ + "SELECT pos, flags FROM _vt.blp_checkpoint WHERE source_shard_uid=0": &mproto.QueryResult{ + Rows: [][]sqltypes.Value{ + []sqltypes.Value{ + sqltypes.MakeString([]byte(myproto.EncodeReplicationPosition(sourceMaster.FakeMysqlDaemon.CurrentMasterPosition))), + sqltypes.MakeString([]byte("")), + }, + }, + }, + } + destMaster.StartActionLoop(t, wr) + defer destMaster.StopActionLoop(t) + + // simulate the clone, by fixing the dest shard record + if err := vp.Run([]string{"SourceShardAdd", "--tables", "gone1,gone2", "dest/0", "0", "source/0"}); err != nil { + t.Fatalf("SourceShardAdd failed: %v", err) + } + + // migrate rdonly over + if err := vp.Run([]string{"MigrateServedFrom", "dest/0", "rdonly"}); err != nil { + t.Fatalf("MigrateServedFrom(rdonly) failed: %v", err) + } + + // check it's gone from keyspace + ki, err = ts.GetKeyspace(ctx, "dest") + if err != nil { + t.Fatalf("GetKeyspace failed: %v", err) + } + if _, ok := ki.ServedFromMap[topo.TYPE_RDONLY]; len(ki.ServedFromMap) != 2 || ok { + t.Fatalf("bad initial dest ServedFrom: %v", ki.ServedFromMap) + } + + // check the source shard has the right blacklisted tables + si, err := ts.GetShard(ctx, "source", "0") + if err != nil { + t.Fatalf("GetShard failed: %v", err) + } + if len(si.TabletControlMap) != 1 || !reflect.DeepEqual(si.TabletControlMap[topo.TYPE_RDONLY].BlacklistedTables, []string{"gone1", "gone2"}) { + t.Fatalf("rdonly type doesn't have right blacklisted tables") + } + + // migrate replica over + if err := vp.Run([]string{"MigrateServedFrom", "dest/0", "replica"}); err != nil { + t.Fatalf("MigrateServedFrom(replica) failed: %v", err) + } + + // check it's gone from keyspace + ki, err = ts.GetKeyspace(ctx, "dest") + if err != nil { + t.Fatalf("GetKeyspace failed: %v", err) + } + if _, ok := ki.ServedFromMap[topo.TYPE_REPLICA]; len(ki.ServedFromMap) != 1 || ok { + t.Fatalf("bad initial dest ServedFrom: %v", ki.ServedFromMap) + } + + // check the source shard has the right blacklisted tables + si, err = ts.GetShard(ctx, "source", "0") + if err != nil { + t.Fatalf("GetShard failed: %v", err) + } + if len(si.TabletControlMap) != 2 || !reflect.DeepEqual(si.TabletControlMap[topo.TYPE_REPLICA].BlacklistedTables, []string{"gone1", "gone2"}) { + t.Fatalf("replica type doesn't have right blacklisted tables") + } + + // migrate master over + if err := vp.Run([]string{"MigrateServedFrom", "dest/0", "master"}); err != nil { + t.Fatalf("MigrateServedFrom(master) failed: %v", err) + } + + // make sure ServedFromMap is empty + ki, err = ts.GetKeyspace(ctx, "dest") + if err != nil { + t.Fatalf("GetKeyspace failed: %v", err) + } + if len(ki.ServedFromMap) > 0 { + t.Fatalf("dest keyspace still is ServedFrom: %v", ki.ServedFromMap) + } + + // check the source shard has the right blacklisted tables + si, err = ts.GetShard(ctx, "source", "0") + if err != nil { + t.Fatalf("GetShard failed: %v", err) + } + if len(si.TabletControlMap) != 3 || !reflect.DeepEqual(si.TabletControlMap[topo.TYPE_MASTER].BlacklistedTables, []string{"gone1", "gone2"}) { + t.Fatalf("master type doesn't have right blacklisted tables") + } +} From 20ed05cc65b6c3a9a8dd8113ef63ad279c4e3cfb Mon Sep 17 00:00:00 2001 From: Shengzhe Yao Date: Fri, 29 May 2015 15:32:38 -0700 Subject: [PATCH 127/128] update vtctl code comments --- go/vt/vtctl/vtctl.go | 526 +++++++++++++++++++++++++------------------ 1 file changed, 303 insertions(+), 223 deletions(-) diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index db99554117..6fa7634a12 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -2,6 +2,85 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// The following comment section contains definitions for command arguments. +/* +COMMAND ARGUMENT DEFINITIONS + +- cell, cell name: A cell is a location for a service. Generally, a cell + resides in only one cluster. In Vitess, the terms "cell" and + "data center" are interchangeable. The argument value is a + string that does not contain whitespace. + +- tablet alias: A Tablet Alias uniquely identifies a vttablet. The argument + value is in the format + <cell name>-<uid>. + +- keyspace, keyspace name: The name of a sharded database that contains one + or more tables. Vitess distributes keyspace shards into multiple + machines and provides an SQL interface to query the data. The + argument value must be a string that does not contain whitespace. + +- port name: A port number. The argument value should be an integer between + 0 and 65535, inclusive. + +- shard, shard name: The name of a shard. The argument value is typically in + the format <range start>-<range end>. + +- keyspace/shard: The name of a sharded database that contains one or more + tables as well as the shard associated with the command. + The keyspace must be identified by a string that does not + contain whitepace, while the shard is typically identified + by a string in the format + <range start>-<range end>. + +- duration: The amount of time that the action queue should be blocked. + The value is a string that contains a possibly signed sequence + of decimal numbers, each with optional fraction and a unit + suffix, such as "300ms" or "1h45m". See the definition of the + Go language's ParseDuration + function for more details. Note that, in practice, the value + should be a positively signed value. + +- db type, tablet type: The vttablet's role. Valid values are: + -- backup: A slaved copy of data that is offline to queries other than + for backup purposes + -- batch: A slaved copy of data for OLAP load patterns (typically for + MapReduce jobs) + -- checker: A tablet that is running a checker process. The tablet is likely + lagging in replication. + -- experimental: A slaved copy of data that is ready but not serving query + traffic. The value indicates a special characteristic of + the tablet that indicates the tablet should not be + considered a potential master. Vitess also does not + worry about lag for experimental tablets when reparenting. + -- idle: An idle vttablet that does not have a keyspace, shard + or type assigned + -- lag: A slaved copy of data intentionally lagged for pseudo-backup. + -- lag_orphan: A tablet in the midst of a reparenting process. During that + process, the tablet goes into a lag_orphan state + until it is reparented properly. + -- master: A primary copy of data + -- rdonly: A slaved copy of data for OLAP load patterns + -- replica: A slaved copy of data ready to be promoted to master + -- restore: A tablet that has not been in the replication graph and is + restoring from a snapshot. Typically, a tablet progresses from + the idle state to the restore state + and then to the spare state. + -- schema_apply: A slaved copy of data that had been serving query traffic + but that is not applying a schema change. Following the + change, the tablet will revert to its serving type. + -- scrap: A tablet that contains data that needs to be wiped. + -- snapshot_source: A slaved copy of data where mysqld is not + running and where Vitess is serving data files to + clone slaves. Use this command to enter this mode: +