зеркало из https://github.com/microsoft/spark.git
Minor formatting fixes
This commit is contained in:
Родитель
70f02fa912
Коммит
716e10ca32
|
@ -42,7 +42,7 @@ class HadoopWriter(@transient jobConf: JobConf) extends Logging with Serializabl
|
|||
setConfParams()
|
||||
|
||||
val jCtxt = getJobContext()
|
||||
getOutputCommitter().setupJob(jCtxt)
|
||||
getOutputCommitter().setupJob(jCtxt)
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -147,7 +147,6 @@ private[spark] class ShuffleMapTask(
|
|||
val blockId = "shuffle_" + dep.shuffleId + "_" + partition + "_" + i
|
||||
// Get a scala iterator from java map
|
||||
val iter: Iterator[(Any, Any)] = bucketIterators(i)
|
||||
// TODO: This should probably be DISK_ONLY
|
||||
blockManager.put(blockId, iter, StorageLevel.DISK_ONLY, false)
|
||||
}
|
||||
|
||||
|
|
|
@ -119,7 +119,7 @@ class AccumulatorSuite extends FunSuite with ShouldMatchers with BeforeAndAfter
|
|||
import SetAccum._
|
||||
val maxI = 1000
|
||||
for (nThreads <- List(1, 10)) { //test single & multi-threaded
|
||||
sc = new SparkContext("local[" + nThreads + "]", "test")
|
||||
sc = new SparkContext("local[" + nThreads + "]", "test")
|
||||
val acc: Accumulable[mutable.Set[Any], Any] = sc.accumulable(new mutable.HashSet[Any]())
|
||||
val groupedInts = (1 to (maxI/20)).map {x => (20 * (x - 1) to 20 * x).toSet}
|
||||
val d = sc.parallelize(groupedInts)
|
||||
|
|
Загрузка…
Ссылка в новой задаче