diff --git a/core/src/main/scala/spark/HadoopWriter.scala b/core/src/main/scala/spark/HadoopWriter.scala index 12b6a0954c..ebb51607e6 100644 --- a/core/src/main/scala/spark/HadoopWriter.scala +++ b/core/src/main/scala/spark/HadoopWriter.scala @@ -42,7 +42,7 @@ class HadoopWriter(@transient jobConf: JobConf) extends Logging with Serializabl setConfParams() val jCtxt = getJobContext() - getOutputCommitter().setupJob(jCtxt) + getOutputCommitter().setupJob(jCtxt) } diff --git a/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala b/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala index 966a5e173a..387aac3c1f 100644 --- a/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala +++ b/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala @@ -147,7 +147,6 @@ private[spark] class ShuffleMapTask( val blockId = "shuffle_" + dep.shuffleId + "_" + partition + "_" + i // Get a scala iterator from java map val iter: Iterator[(Any, Any)] = bucketIterators(i) - // TODO: This should probably be DISK_ONLY blockManager.put(blockId, iter, StorageLevel.DISK_ONLY, false) } diff --git a/core/src/test/scala/spark/AccumulatorSuite.scala b/core/src/test/scala/spark/AccumulatorSuite.scala index 403e675f37..b43730468e 100644 --- a/core/src/test/scala/spark/AccumulatorSuite.scala +++ b/core/src/test/scala/spark/AccumulatorSuite.scala @@ -119,7 +119,7 @@ class AccumulatorSuite extends FunSuite with ShouldMatchers with BeforeAndAfter import SetAccum._ val maxI = 1000 for (nThreads <- List(1, 10)) { //test single & multi-threaded - sc = new SparkContext("local[" + nThreads + "]", "test") + sc = new SparkContext("local[" + nThreads + "]", "test") val acc: Accumulable[mutable.Set[Any], Any] = sc.accumulable(new mutable.HashSet[Any]()) val groupedInts = (1 to (maxI/20)).map {x => (20 * (x - 1) to 20 * x).toSet} val d = sc.parallelize(groupedInts)