зеркало из https://github.com/microsoft/spark.git
165 строки
5.3 KiB
Bash
Executable File
165 строки
5.3 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
SCALA_VERSION=2.9.2
|
|
|
|
# Figure out where the Scala framework is installed
|
|
FWDIR="$(cd `dirname $0`; pwd)"
|
|
|
|
# Export this as SPARK_HOME
|
|
export SPARK_HOME="$FWDIR"
|
|
|
|
# Load environment variables from conf/spark-env.sh, if it exists
|
|
if [ -e $FWDIR/conf/spark-env.sh ] ; then
|
|
. $FWDIR/conf/spark-env.sh
|
|
fi
|
|
|
|
if [ -z "$1" ]; then
|
|
echo "Usage: run <spark-class> [<args>]" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# If this is a standalone cluster daemon, reset SPARK_JAVA_OPTS and SPARK_MEM to reasonable
|
|
# values for that; it doesn't need a lot
|
|
if [ "$1" = "spark.deploy.master.Master" -o "$1" = "spark.deploy.worker.Worker" ]; then
|
|
SPARK_MEM=${SPARK_DAEMON_MEMORY:-512m}
|
|
SPARK_JAVA_OPTS=$SPARK_DAEMON_JAVA_OPTS # Empty by default
|
|
fi
|
|
|
|
|
|
# Add java opts for master, worker, executor. The opts maybe null
|
|
case "$1" in
|
|
'spark.deploy.master.Master')
|
|
SPARK_JAVA_OPTS+=" $SPARK_MASTER_OPTS"
|
|
;;
|
|
'spark.deploy.worker.Worker')
|
|
SPARK_JAVA_OPTS+=" $SPARK_WORKER_OPTS"
|
|
;;
|
|
'spark.executor.StandaloneExecutorBackend')
|
|
SPARK_JAVA_OPTS+=" $SPARK_EXECUTOR_OPTS"
|
|
;;
|
|
'spark.executor.MesosExecutorBackend')
|
|
SPARK_JAVA_OPTS+=" $SPARK_EXECUTOR_OPTS"
|
|
;;
|
|
'spark.repl.Main')
|
|
SPARK_JAVA_OPTS+=" $SPARK_REPL_OPTS"
|
|
;;
|
|
esac
|
|
|
|
if [ "$SPARK_LAUNCH_WITH_SCALA" == "1" ]; then
|
|
if [ `command -v scala` ]; then
|
|
RUNNER="scala"
|
|
else
|
|
if [ -z "$SCALA_HOME" ]; then
|
|
echo "SCALA_HOME is not set" >&2
|
|
exit 1
|
|
fi
|
|
RUNNER="${SCALA_HOME}/bin/scala"
|
|
fi
|
|
else
|
|
if [ `command -v java` ]; then
|
|
RUNNER="java"
|
|
else
|
|
if [ -z "$JAVA_HOME" ]; then
|
|
echo "JAVA_HOME is not set" >&2
|
|
exit 1
|
|
fi
|
|
RUNNER="${JAVA_HOME}/bin/java"
|
|
fi
|
|
if [ -z "$SCALA_LIBRARY_PATH" ]; then
|
|
if [ -z "$SCALA_HOME" ]; then
|
|
echo "SCALA_HOME is not set" >&2
|
|
exit 1
|
|
fi
|
|
SCALA_LIBRARY_PATH="$SCALA_HOME/lib"
|
|
fi
|
|
fi
|
|
|
|
# Figure out how much memory to use per executor and set it as an environment
|
|
# variable so that our process sees it and can report it to Mesos
|
|
if [ -z "$SPARK_MEM" ] ; then
|
|
SPARK_MEM="512m"
|
|
fi
|
|
export SPARK_MEM
|
|
|
|
# Set JAVA_OPTS to be able to load native libraries and to set heap size
|
|
JAVA_OPTS="$SPARK_JAVA_OPTS"
|
|
JAVA_OPTS+=" -Djava.library.path=$SPARK_LIBRARY_PATH"
|
|
JAVA_OPTS+=" -Xms$SPARK_MEM -Xmx$SPARK_MEM"
|
|
# Load extra JAVA_OPTS from conf/java-opts, if it exists
|
|
if [ -e $FWDIR/conf/java-opts ] ; then
|
|
JAVA_OPTS+=" `cat $FWDIR/conf/java-opts`"
|
|
fi
|
|
export JAVA_OPTS
|
|
|
|
CORE_DIR="$FWDIR/core"
|
|
REPL_DIR="$FWDIR/repl"
|
|
EXAMPLES_DIR="$FWDIR/examples"
|
|
BAGEL_DIR="$FWDIR/bagel"
|
|
STREAMING_DIR="$FWDIR/streaming"
|
|
PYSPARK_DIR="$FWDIR/python"
|
|
|
|
# Exit if the user hasn't compiled Spark
|
|
if [ ! -e "$REPL_DIR/target" ]; then
|
|
echo "Failed to find Spark classes in $REPL_DIR/target" >&2
|
|
echo "You need to compile Spark before running this program" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Build up classpath
|
|
CLASSPATH="$SPARK_CLASSPATH"
|
|
CLASSPATH+=":$FWDIR/conf"
|
|
CLASSPATH+=":$CORE_DIR/target/scala-$SCALA_VERSION/classes"
|
|
if [ -n "$SPARK_TESTING" ] ; then
|
|
CLASSPATH+=":$CORE_DIR/target/scala-$SCALA_VERSION/test-classes"
|
|
CLASSPATH+=":$STREAMING_DIR/target/scala-$SCALA_VERSION/test-classes"
|
|
fi
|
|
CLASSPATH+=":$CORE_DIR/src/main/resources"
|
|
CLASSPATH+=":$REPL_DIR/target/scala-$SCALA_VERSION/classes"
|
|
CLASSPATH+=":$EXAMPLES_DIR/target/scala-$SCALA_VERSION/classes"
|
|
CLASSPATH+=":$STREAMING_DIR/target/scala-$SCALA_VERSION/classes"
|
|
CLASSPATH+=":$STREAMING_DIR/lib/org/apache/kafka/kafka/0.7.2-spark/*" # <-- our in-project Kafka Jar
|
|
if [ -e "$FWDIR/lib_managed" ]; then
|
|
CLASSPATH+=":$FWDIR/lib_managed/jars/*"
|
|
CLASSPATH+=":$FWDIR/lib_managed/bundles/*"
|
|
fi
|
|
CLASSPATH+=":$REPL_DIR/lib/*"
|
|
if [ -e repl-bin/target ]; then
|
|
for jar in `find "repl-bin/target" -name 'spark-repl-*-shaded-hadoop*.jar'`; do
|
|
CLASSPATH+=":$jar"
|
|
done
|
|
fi
|
|
CLASSPATH+=":$BAGEL_DIR/target/scala-$SCALA_VERSION/classes"
|
|
for jar in `find $PYSPARK_DIR/lib -name '*jar'`; do
|
|
CLASSPATH+=":$jar"
|
|
done
|
|
export CLASSPATH # Needed for spark-shell
|
|
|
|
# Figure out the JAR file that our examples were packaged into. This includes a bit of a hack
|
|
# to avoid the -sources and -doc packages that are built by publish-local.
|
|
if [ -e "$EXAMPLES_DIR/target/scala-$SCALA_VERSION/spark-examples"*[0-9T].jar ]; then
|
|
# Use the JAR from the SBT build
|
|
export SPARK_EXAMPLES_JAR=`ls "$EXAMPLES_DIR/target/scala-$SCALA_VERSION/spark-examples"*[0-9T].jar`
|
|
fi
|
|
if [ -e "$EXAMPLES_DIR/target/spark-examples-"*hadoop[12].jar ]; then
|
|
# Use the JAR from the Maven build
|
|
export SPARK_EXAMPLES_JAR=`ls "$EXAMPLES_DIR/target/spark-examples-"*hadoop[12].jar`
|
|
fi
|
|
|
|
# Figure out whether to run our class with java or with the scala launcher.
|
|
# In most cases, we'd prefer to execute our process with java because scala
|
|
# creates a shell script as the parent of its Java process, which makes it
|
|
# hard to kill the child with stuff like Process.destroy(). However, for
|
|
# the Spark shell, the wrapper is necessary to properly reset the terminal
|
|
# when we exit, so we allow it to set a variable to launch with scala.
|
|
if [ "$SPARK_LAUNCH_WITH_SCALA" == "1" ]; then
|
|
EXTRA_ARGS="" # Java options will be passed to scala as JAVA_OPTS
|
|
else
|
|
CLASSPATH+=":$SCALA_LIBRARY_PATH/scala-library.jar"
|
|
CLASSPATH+=":$SCALA_LIBRARY_PATH/scala-compiler.jar"
|
|
CLASSPATH+=":$SCALA_LIBRARY_PATH/jline.jar"
|
|
# The JVM doesn't read JAVA_OPTS by default so we need to pass it in
|
|
EXTRA_ARGS="$JAVA_OPTS"
|
|
fi
|
|
|
|
exec "$RUNNER" -cp "$CLASSPATH" $EXTRA_ARGS "$@"
|