Speed up HybridScanForDeltaLakeTest (#326)

This commit is contained in:
Terry Kim 2021-01-19 20:47:48 -08:00 коммит произвёл GitHub
Родитель efe33ec596
Коммит 39781c5eb0
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
2 изменённых файлов: 12 добавлений и 8 удалений

Просмотреть файл

@ -92,6 +92,10 @@ fork in Test := true
javaOptions in Test ++= Seq(
"-Dspark.ui.enabled=false",
"-Dspark.ui.showConsoleProgress=false",
"-Dspark.databricks.delta.snapshotPartitions=2",
"-Dspark.sql.shuffle.partitions=5",
"-Ddelta.log.cacheSize=3",
"-Dspark.sql.sources.parallelPartitionDiscovery.parallelism=5",
"-Xmx1024m"
)

Просмотреть файл

@ -91,12 +91,12 @@ class ExplainTest extends SparkFunSuite with HyperspaceSuite {
*=============================================================
* SortMergeJoin [Col1#11], [Col1#21], Inner
* <----:- *(2) Sort [Col1#11 ASC NULLS FIRST], false, 0---->
* <----: +- Exchange hashpartitioning(Col1#11, 200)---->
* <----: +- Exchange hashpartitioning(Col1#11, 5)---->
* <----: +- *(1) Project [Col1#11, Col2#12]---->
* <----: +- *(1) Filter isnotnull(Col1#11)---->
* <----: +- *(1) FileScan parquet [Col1#11,Col2#12] Batched: true, Format: Parquet, Location: InMemoryFileIndex[src/test/resources/sampleparquet], PartitionFilters: [], PushedFilters: [IsNotNull(Col1)], ReadSchema: struct<Col1:string,Col2:int>---->
* <----+- *(4) Sort [Col1#21 ASC NULLS FIRST], false, 0---->
* <----+- ReusedExchange [Col1#21, Col2#22], Exchange hashpartitioning(Col1#11, 200)---->
* <----+- ReusedExchange [Col1#21, Col2#22], Exchange hashpartitioning(Col1#11, 5)---->
*
*=============================================================
*Indexes used:
@ -163,7 +163,7 @@ class ExplainTest extends SparkFunSuite with HyperspaceSuite {
.append(defaultDisplayMode.newLine)
.append("<----:- *(2) Sort [Col1#11 ASC NULLS FIRST], false, 0---->")
.append(defaultDisplayMode.newLine)
.append("<----: +- Exchange hashpartitioning(Col1#11, 200)---->")
.append("<----: +- Exchange hashpartitioning(Col1#11, 5)---->")
.append(defaultDisplayMode.newLine)
.append("<----: +- *(1) Project [Col1#11, Col2#12]---->")
.append(defaultDisplayMode.newLine)
@ -175,7 +175,7 @@ class ExplainTest extends SparkFunSuite with HyperspaceSuite {
.append(defaultDisplayMode.newLine)
.append("<----+- *(4) Sort [Col1#21 ASC NULLS FIRST], false, 0---->")
.append(defaultDisplayMode.newLine)
.append(" <----+- ReusedExchange [Col1#21, Col2#22], Exchange hashpartitioning(Col1#11, 200)---->")
.append(" <----+- ReusedExchange [Col1#21, Col2#22], Exchange hashpartitioning(Col1#11, 5)---->")
.append(defaultDisplayMode.newLine)
.append(defaultDisplayMode.newLine)
.append("=============================================================")
@ -465,12 +465,12 @@ class ExplainTest extends SparkFunSuite with HyperspaceSuite {
*=============================================================
* SortMergeJoin [Col1#11], [Col1#21], Inner
* <----:- *(2) Sort [Col1#11 ASC NULLS FIRST], false, 0---->
* <----: +- Exchange hashpartitioning(Col1#11, 200)---->
* <----: +- Exchange hashpartitioning(Col1#11, 5)---->
* <----: +- *(1) Project [Col1#11, Col2#12]---->
* <----: +- *(1) Filter isnotnull(Col1#11)---->
* <----: +- *(1) FileScan parquet [Col1#11,Col2#12] Batched: true, Format: Parquet, Location: InMemoryFileIndex[src/test/resources/sampleparquet], PartitionFilters: [], PushedFilters: [IsNotNull(Col1)], ReadSchema: struct<Col1:string,Col2:int>---->
* <----+- *(4) Sort [Col1#21 ASC NULLS FIRST], false, 0---->
* <----+- ReusedExchange [Col1#21, Col2#22], Exchange hashpartitioning(Col1#11, 200)---->
* <----+- ReusedExchange [Col1#21, Col2#22], Exchange hashpartitioning(Col1#11, 5)---->
*
*=============================================================
*Indexes used:
@ -537,7 +537,7 @@ class ExplainTest extends SparkFunSuite with HyperspaceSuite {
.append(defaultDisplayMode.newLine)
.append("<----:- *(2) Sort [Col1#11 ASC NULLS FIRST], false, 0---->")
.append(defaultDisplayMode.newLine)
.append("<----: +- Exchange hashpartitioning(Col1#11, 200)---->")
.append("<----: +- Exchange hashpartitioning(Col1#11, 5)---->")
.append(defaultDisplayMode.newLine)
.append("<----: +- *(1) Project [Col1#11, Col2#12]---->")
.append(defaultDisplayMode.newLine)
@ -549,7 +549,7 @@ class ExplainTest extends SparkFunSuite with HyperspaceSuite {
.append(defaultDisplayMode.newLine)
.append("<----+- *(4) Sort [Col1#21 ASC NULLS FIRST], false, 0---->")
.append(defaultDisplayMode.newLine)
.append(" <----+- ReusedExchange [Col1#21, Col2#22], Exchange hashpartitioning(Col1#11, 200)---->")
.append(" <----+- ReusedExchange [Col1#21, Col2#22], Exchange hashpartitioning(Col1#11, 5)---->")
.append(defaultDisplayMode.newLine)
.append(defaultDisplayMode.newLine)
.append("=============================================================")