Added option to configure number of worker instances and to set SPARK_MASTER_OPTS

Depends on: https://github.com/mesos/spark-ec2/pull/46

Author: Allan Douglas R. de Oliveira <allan@chaordicsystems.com>

Closes #612 from douglaz/ec2_configurable_workers and squashes the following commits:

d6c5d65 [Allan Douglas R. de Oliveira] Added master opts parameter
6c34671 [Allan Douglas R. de Oliveira] Use number of worker instances as string on template
ba528b9 [Allan Douglas R. de Oliveira] Added SPARK_WORKER_INSTANCES parameter
This commit is contained in:
Allan Douglas R. de Oliveira 2014-05-03 16:52:19 -07:00 коммит произвёл Patrick Wendell
Родитель 34719ba32e
Коммит 4669a84ab1
2 изменённых файлов: 12 добавлений и 2 удалений

Просмотреть файл

@ -28,3 +28,5 @@ export SPARK_VERSION="{{spark_version}}"
export SHARK_VERSION="{{shark_version}}"
export HADOOP_MAJOR_VERSION="{{hadoop_major_version}}"
export SWAP_MB="{{swap}}"
export SPARK_WORKER_INSTANCES="{{spark_worker_instances}}"
export SPARK_MASTER_OPTS="{{spark_master_opts}}"

Просмотреть файл

@ -103,6 +103,12 @@ def parse_args():
help="When destroying a cluster, delete the security groups that were created")
parser.add_option("--use-existing-master", action="store_true", default=False,
help="Launch fresh slaves, but use an existing stopped master if possible")
parser.add_option("--worker-instances", type="int", default=1,
help="Number of instances per worker: variable SPARK_WORKER_INSTANCES (default: 1)")
parser.add_option("--master-opts", type="string", default="",
help="Extra options to give to master through SPARK_MASTER_OPTS variable (e.g -Dspark.worker.timeout=180)")
(opts, args) = parser.parse_args()
if len(args) != 2:
@ -223,7 +229,7 @@ def launch_cluster(conn, opts, cluster_name):
sys.exit(1)
if opts.key_pair is None:
print >> stderr, "ERROR: Must provide a key pair name (-k) to use on instances."
sys.exit(1)
sys.exit(1)
print "Setting up security groups..."
master_group = get_or_make_group(conn, cluster_name + "-master")
slave_group = get_or_make_group(conn, cluster_name + "-slaves")
@ -551,7 +557,9 @@ def deploy_files(conn, root_dir, opts, master_nodes, slave_nodes, modules):
"modules": '\n'.join(modules),
"spark_version": spark_v,
"shark_version": shark_v,
"hadoop_major_version": opts.hadoop_major_version
"hadoop_major_version": opts.hadoop_major_version,
"spark_worker_instances": "%d" % opts.worker_instances,
"spark_master_opts": opts.master_opts
}
# Create a temp directory in which we will place all the files to be