-
Notifications
You must be signed in to change notification settings - Fork 27
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
6 changed files
with
173 additions
and
20 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,50 @@ | ||
#!/usr/bin/env bash | ||
################################################################################ | ||
# myspark.sh - start or stop a standalone Spark cluster. Cannot use the control | ||
# scripts provided with Spark 0.9.0 due to various bugs and lack of | ||
# functionality necessary to create transient Spark clusters. | ||
# | ||
# Glenn K. Lockwood, San Diego Supercomputer Center April 2014 | ||
################################################################################ | ||
|
||
function mh_print { | ||
echo -e "mySpark: $@" | ||
} | ||
|
||
if [ "z$1" == "zstart" ]; then | ||
action="Starting" | ||
elif [ "z$1" == "zstop" ]; then | ||
action="Stopping" | ||
else | ||
echo "Syntax: $0 <start|stop>" >&2 | ||
exit 1 | ||
fi | ||
|
||
### Establish the slaves file containing our worker nodes | ||
if [ "z$SPARK_SLAVES" == "z" ]; then | ||
SPARK_SLAVES=$SPARK_CONF_DIR/slaves | ||
fi | ||
mh_print "Using $SPARK_SLAVES as our slaves file" | ||
|
||
### Read in our cluster's unique configuration variables | ||
source $SPARK_CONF_DIR/spark-env.sh | ||
mh_print "Reading in $SPARK_CONF_DIR/spark-env.sh as our slaves file" | ||
|
||
### mySpark does not currently support multiple worker instances per node | ||
if [ "$action" == "Starting" ]; then | ||
cmd_ma="$SPARK_HOME/sbin/start-master.sh" | ||
cmd_sl="$SPARK_HOME/sbin/spark-daemon.sh --config $SPARK_CONF_DIR start org.apache.spark.deploy.worker.Worker 1 spark://$SPARK_MASTER_IP:$SPARK_MASTER_PORT" | ||
elif [ "$action" == "Stopping" ]; then | ||
cmd_ma="$SPARK_HOME/sbin/stop-master.sh" | ||
cmd_sl="$SPARK_HOME/sbin/spark-daemon.sh --config $SPARK_CONF_DIR stop org.apache.spark.deploy.worker.Worker 1" | ||
else | ||
exit 1 | ||
fi | ||
|
||
### Apply action across master + slave nodes | ||
$cmd_ma | ||
for slave in $(sort -u $SPARK_SLAVES) | ||
do | ||
mh_print "$action worker on $slave:\n $cmd_sl" | ||
ssh $slave "$cmd_sl" | ||
done |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,66 @@ | ||
#!/usr/bin/env bash | ||
################################################################################ | ||
# etc/myhadoop.conf - a list of environment variables that can be set system- | ||
# wide to alter the default behavior of a myHadoop installation. These will | ||
# be overridden by anything provided by the user as a myHadoop option when | ||
# myhadoop-configure.sh is executed. However these will NOT be overridden | ||
# by anything in the user's environment. | ||
# | ||
# This particular configuration is designed for FutureGrid's Sierra resource | ||
# | ||
# Glenn K. Lockwood, San Diego Supercomputer Center April 2014 | ||
################################################################################ | ||
|
||
################################################################################ | ||
# Variable: HADOOP_HOME | ||
# Command-line override: -h | ||
# | ||
# This is the base installation of Hadoop on the system. Note that if | ||
# this is defined here, it will override any HADOOP_HOME that may exist in | ||
# the user's environment, effectively railroading the user into using a | ||
# specific Hadoop version when using this installation of myHadoop unless | ||
# they override on the command line. | ||
# | ||
HADOOP_HOME=/N/u/glock/apps/hadoop-2.2.0 | ||
|
||
################################################################################ | ||
# Variable: MH_IPOIB_TRANSFORM | ||
# Command-line override: -i | ||
# | ||
# This is the regex substituion to be applied to all of the hosts in the node | ||
# list before constructing the masters/slaves list. For example, if | ||
# "node-0-1" can be accessed via IP over InfiniBand by "node-0-1.ibnet0", | ||
# the transform would be "s/$/.ibnet0/" | ||
# | ||
MH_IPOIB_TRANSFORM='s/\\([^.]*\\).*$/\\1ib/' | ||
|
||
################################################################################ | ||
# Variable: MH_SCRATCH_DIR | ||
# Command-line override: -s | ||
# | ||
# This is the lcoation of the node-local scratch space for a system. You | ||
# may include variables such as $USER and $SLURM_JOBID which will be evaluated | ||
# within the context of the user's myHadoop execution environment. This is | ||
# normally defined using the "-s" option when calling myhadoop-configure. | ||
# | ||
MH_SCRATCH_DIR=/scratch/$USER/$PBS_JOBID | ||
|
||
################################################################################ | ||
# Variable: MH_PERSIST_DIR | ||
# Command-line override: -p | ||
# | ||
# This specifies the location of a shared filesystem on which persistent | ||
# myHadoop instantiations should be stored when myhadoop-configure is called | ||
# in persistent mode. This is normally specified with the "-p" option when | ||
# running myhadoop-configure. NOTE THAT IF YOU SET THIS, ALL JOBS WILL BE | ||
# RUN IN PERSISTENT MODE unless the user explicitly requests -p '' | ||
# | ||
#MH_PERSIST_DIR= | ||
|
||
################################################################################ | ||
# Variable: HADOOP_CONF_DIR | ||
# Command-line override: -c | ||
# | ||
# This is the location of the user's per-job Hadoop configuration directory. | ||
# | ||
#HADOOP_CONF_DIR=$HOME/hadoop-conf.$(cut -d. -f1 <<< $PBS_JOBID) |