Commit fee3a90f authored by Hamilton, Steven P's avatar Hamilton, Steven P
Browse files

Updating to jsrun.

parent 8d93e37d
......@@ -3,7 +3,7 @@
#BSUB -J __jobname__
#BSUB -o __resultsdir__/__jobname__.o%J
#BSUB -e __resultsdir__/__jobname__.e%J
#BSUB -n __numberprocessors__
#BSUB -nnodes __numbernodes__
#BSUB -W __walltime__
#BSUB -P __projectid__
......@@ -13,11 +13,10 @@
#-----------------------------------------------------
source __rgtenvironmentalfile__
module load __nccstestharnessmodule__
module load gcc
module load gcc/4.8.5
module load python/3.5.2
module load py-h5py/2.6.0-py3
export PYTHONPATH=/autofs/nccs-svm1_sw/summitdev/.swci/1-compute/opt/spack/20170405/linux-rhel7-ppc64le/gcc-4.8.5/py-h5py-2.6.0-xypyoyacn6tdvaq4grvk2hvcijlkik45/lib/python3.5/site-packages:$PYTHONPATH
module load spectrum_mpi
module load spectrum-mpi
module list
#-----------------------------------------------------
......@@ -31,6 +30,7 @@ RESULTSDIR="__resultsdir__"
UNIQUE_ID_STRING="__unique_id_string__"
INPUTDIR=${STARTINGDIRECTORY}/../Inputs
NUMPROCS="__numberprocessors__"
NUM_NODES="__numbernodes__"
echo "EXECUTABLE"
echo ${EXECUTABLE}
......@@ -44,6 +44,8 @@ echo "STARTINGDIRECTORY"
echo ${STARTINGDIRECTORY}
echo "NUMPROCS"
echo ${NUMPROCS}
echo "NUM_NODES"
echo ${NUM_NODES}
#-----------------------------------------------------
# Ensure that we are in the correct starting -
......
......@@ -109,12 +109,13 @@ def make_batch_script(batch_recursive_mode,path_to_workspace,test_id_string,sche
workdir = os.path.join(path_to_workspace,"workdir")
resubmitme = batch_recursive_mode
walltime = "01:00"
joblaunchcommand = "mpirun --oversubscribe -np ${NUMPROCS} ${EXECUTABLE} -i c5g7_3d_cpu_1node.xml | tee stdout.txt"
joblaunchcommand = "jsrun --nrs ${NUM_NODES} --cpu_per_rs 20 --tasks_per_rs 160 --rs_per_host 1 -E LD_LIBRARY_PATH ${EXECUTABLE} -i c5g7_3d_cpu_1node.xml | tee stdout.txt"
rg_array = [
(re.compile("__jobname__"),jobname),
(re.compile("__walltime__"),walltime),
(re.compile("__numberprocessors__"),ranks),
(re.compile("__numbernodes__"),nodes),
(re.compile("__nccstestharnessmodule__"),nccstestharnessmodule),
(re.compile("__rgtenvironmentalfile__"),rgtenvironmentalfile),
(re.compile("__batchqueue__"),batchqueue),
......@@ -223,8 +224,8 @@ def send_to_scheduler(batchfilename,scheduler):
submit_command = "qsub "
qcommand = submit_command + batchfilename
elif sched_id == 1:
submit_command = "bsub"
qcommand = submit_command
submit_command = "bsub "
qcommand = submit_command + batchfilename
else:
print("Unsupported scheduler (sched_id = " + str(sched_id) + ")")
......@@ -238,8 +239,8 @@ def send_to_scheduler(batchfilename,scheduler):
p = subprocess.Popen(args,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(my_stdout, my_stderr) = p.communicate()
elif sched_id == 1:
p = subprocess.Popen(args,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(my_stdout, my_stderr) = p.communicate(input=data)
p = subprocess.Popen(args,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(my_stdout, my_stderr) = p.communicate()
else:
print("Unsupported scheduler (sched_id = " + str(sched_id) + ")")
......
......@@ -3,7 +3,7 @@
#BSUB -J __jobname__
#BSUB -o __resultsdir__/__jobname__.o%J
#BSUB -e __resultsdir__/__jobname__.e%J
#BSUB -n __numberprocessors__
#BSUB -nnodes __numbernodes__
#BSUB -W __walltime__
#BSUB -P __projectid__
......@@ -13,11 +13,10 @@
#-----------------------------------------------------
source __rgtenvironmentalfile__
module load __nccstestharnessmodule__
module load gcc
module load gcc/4.8.5
module load python/3.5.2
module load py-h5py/2.6.0-py3
export PYTHONPATH=/autofs/nccs-svm1_sw/summitdev/.swci/1-compute/opt/spack/20170405/linux-rhel7-ppc64le/gcc-4.8.5/py-h5py-2.6.0-xypyoyacn6tdvaq4grvk2hvcijlkik45/lib/python3.5/site-packages:$PYTHONPATH
module load spectrum_mpi
module load spectrum-mpi
module list
#-----------------------------------------------------
......@@ -31,6 +30,7 @@ RESULTSDIR="__resultsdir__"
UNIQUE_ID_STRING="__unique_id_string__"
INPUTDIR=${STARTINGDIRECTORY}/../Inputs
NUMPROCS="__numberprocessors__"
NUM_NODES="__numbernodes__"
echo "EXECUTABLE"
echo ${EXECUTABLE}
......@@ -44,6 +44,8 @@ echo "STARTINGDIRECTORY"
echo ${STARTINGDIRECTORY}
echo "NUMPROCS"
echo ${NUMPROCS}
echo "NUM_NODES"
echo ${NUM_NODES}
#-----------------------------------------------------
# Ensure that we are in the correct starting -
......
......@@ -109,12 +109,13 @@ def make_batch_script(batch_recursive_mode,path_to_workspace,test_id_string,sche
workdir = os.path.join(path_to_workspace,"workdir")
resubmitme = batch_recursive_mode
walltime = "01:00"
joblaunchcommand = "mpirun --oversubscribe --map-by ppr:160:node ${EXECUTABLE} -i c5g7_3d_cpu_32node.xml | tee stdout.txt"
joblaunchcommand = "jsrun --nrs ${NUM_NODES} --cpu_per_rs 20 --tasks_per_rs 160 --rs_per_host 1 -E LD_LIBRARY_PATH ${EXECUTABLE} -i c5g7_3d_cpu_32node.xml | tee stdout.txt"
rg_array = [
(re.compile("__jobname__"),jobname),
(re.compile("__walltime__"),walltime),
(re.compile("__numberprocessors__"),ranks),
(re.compile("__numbernodes__"),nodes),
(re.compile("__nccstestharnessmodule__"),nccstestharnessmodule),
(re.compile("__rgtenvironmentalfile__"),rgtenvironmentalfile),
(re.compile("__batchqueue__"),batchqueue),
......@@ -223,8 +224,8 @@ def send_to_scheduler(batchfilename,scheduler):
submit_command = "qsub "
qcommand = submit_command + batchfilename
elif sched_id == 1:
submit_command = "bsub"
qcommand = submit_command
submit_command = "bsub "
qcommand = submit_command + batchfilename
else:
print("Unsupported scheduler (sched_id = " + str(sched_id) + ")")
......@@ -238,8 +239,8 @@ def send_to_scheduler(batchfilename,scheduler):
p = subprocess.Popen(args,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(my_stdout, my_stderr) = p.communicate()
elif sched_id == 1:
p = subprocess.Popen(args,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(my_stdout, my_stderr) = p.communicate(input=data)
p = subprocess.Popen(args,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(my_stdout, my_stderr) = p.communicate()
else:
print("Unsupported scheduler (sched_id = " + str(sched_id) + ")")
......
......@@ -3,7 +3,7 @@
#BSUB -J __jobname__
#BSUB -o __resultsdir__/__jobname__.o%J
#BSUB -e __resultsdir__/__jobname__.e%J
#BSUB -n __numberprocessors__
#BSUB -nnodes __numbernodes__
#BSUB -W __walltime__
#BSUB -P __projectid__
......@@ -13,10 +13,10 @@
#-----------------------------------------------------
source __rgtenvironmentalfile__
module load __nccstestharnessmodule__
module load gcc
module load gcc/4.8.5
module load python/3.5.2
module load py-h5py/2.6.0
module load spectrum_mpi
module load py-h5py/2.6.0-py3
module load spectrum-mpi
module list
#-----------------------------------------------------
......@@ -30,6 +30,7 @@ RESULTSDIR="__resultsdir__"
UNIQUE_ID_STRING="__unique_id_string__"
INPUTDIR=${STARTINGDIRECTORY}/../Inputs
NUMPROCS="__numberprocessors__"
NUM_NODES="__numbernodes__"
echo "EXECUTABLE"
echo ${EXECUTABLE}
......@@ -43,6 +44,8 @@ echo "STARTINGDIRECTORY"
echo ${STARTINGDIRECTORY}
echo "NUMPROCS"
echo ${NUMPROCS}
echo "NUM_NODES"
echo ${NUM_NODES}
#-----------------------------------------------------
# Ensure that we are in the correct starting -
......
......@@ -99,7 +99,7 @@ def make_batch_script(batch_recursive_mode,path_to_workspace,test_id_string,sche
rgtenvironmentalfile = os.environ["RGT_ENVIRONMENTAL_FILE"]
jobname = "profugus_cpu_short"
nodes = "1"
ppn = "160"
ppn = "20"
ranks = str(int(nodes) * int(ppn))
batchqueue = "batch"
pbsaccountid = os.environ["RGT_PBS_JOB_ACCNT_ID"]
......@@ -109,11 +109,12 @@ def make_batch_script(batch_recursive_mode,path_to_workspace,test_id_string,sche
workdir = os.path.join(path_to_workspace,"workdir")
resubmitme = batch_recursive_mode
walltime = "01:00"
joblaunchcommand = "mpirun --oversubscribe -np ${NUMPROCS} ${EXECUTABLE} -i c5g7_3d_cpu_short.xml | tee stdout.txt"
joblaunchcommand = "jsrun --nrs ${NUM_NODES} --cpu_per_rs 20 --tasks_per_rs 160 --rs_per_host 1 -E LD_LIBRARY_PATH ${EXECUTABLE} -i c5g7_3d_cpu_short.xml | tee stdout.txt"
rg_array = [
(re.compile("__jobname__"),jobname),
(re.compile("__walltime__"),walltime),
(re.compile("__numbernodes__"),nodes),
(re.compile("__numberprocessors__"),ranks),
(re.compile("__nccstestharnessmodule__"),nccstestharnessmodule),
(re.compile("__rgtenvironmentalfile__"),rgtenvironmentalfile),
......@@ -223,8 +224,8 @@ def send_to_scheduler(batchfilename,scheduler):
submit_command = "qsub "
qcommand = submit_command + batchfilename
elif sched_id == 1:
submit_command = "bsub"
qcommand = submit_command
submit_command = "bsub "
qcommand = submit_command + batchfilename
else:
print("Unsupported scheduler (sched_id = " + str(sched_id) + ")")
......@@ -233,13 +234,13 @@ def send_to_scheduler(batchfilename,scheduler):
# Execute the command as a subprocess
args = shlex.split(qcommand)
my_stdout = None
my_stderr = None
my_stderr = None
if sched_id == 0:
p = subprocess.Popen(args,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(my_stdout, my_stderr) = p.communicate()
elif sched_id == 1:
p = subprocess.Popen(args,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(my_stdout, my_stderr) = p.communicate(input=data)
p = subprocess.Popen(args,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(my_stdout, my_stderr) = p.communicate()
else:
print("Unsupported scheduler (sched_id = " + str(sched_id) + ")")
......
......@@ -3,7 +3,7 @@
#BSUB -J __jobname__
#BSUB -o __resultsdir__/__jobname__.o%J
#BSUB -e __resultsdir__/__jobname__.e%J
#BSUB -n __numberprocessors__
#BSUB -nnodes __numbernodes__
#BSUB -W __walltime__
#BSUB -P __projectid__
......@@ -13,11 +13,10 @@
#-----------------------------------------------------
source __rgtenvironmentalfile__
module load __nccstestharnessmodule__
module load gcc
module load gcc/4.8.5
module load python/3.5.2
module load py-h5py/2.6.0-py3
export PYTHONPATH=/autofs/nccs-svm1_sw/summitdev/.swci/1-compute/opt/spack/20170405/linux-rhel7-ppc64le/gcc-4.8.5/py-h5py-2.6.0-xypyoyacn6tdvaq4grvk2hvcijlkik45/lib/python3.5/site-packages:$PYTHONPATH
module load spectrum_mpi
module load spectrum-mpi
module list
#-----------------------------------------------------
......@@ -31,6 +30,7 @@ RESULTSDIR="__resultsdir__"
UNIQUE_ID_STRING="__unique_id_string__"
INPUTDIR=${STARTINGDIRECTORY}/../Inputs
NUMPROCS="__numberprocessors__"
NUM_NODES="__numbernodes__"
echo "EXECUTABLE"
echo ${EXECUTABLE}
......@@ -44,6 +44,8 @@ echo "STARTINGDIRECTORY"
echo ${STARTINGDIRECTORY}
echo "NUMPROCS"
echo ${NUMPROCS}
echo "NUM_NODES"
echo ${NUM_NODES}
#-----------------------------------------------------
# Ensure that we are in the correct starting -
......
......@@ -99,7 +99,7 @@ def make_batch_script(batch_recursive_mode,path_to_workspace,test_id_string,sche
rgtenvironmentalfile = os.environ["RGT_ENVIRONMENTAL_FILE"]
jobname = "profugus_gpu_1node"
nodes = "1"
ppn = "20"
ppn = "160"
ranks = str(int(nodes) * int(ppn))
batchqueue = "batch"
pbsaccountid = os.environ["RGT_PBS_JOB_ACCNT_ID"]
......@@ -109,12 +109,13 @@ def make_batch_script(batch_recursive_mode,path_to_workspace,test_id_string,sche
workdir = os.path.join(path_to_workspace,"workdir")
resubmitme = batch_recursive_mode
walltime = "01:00"
joblaunchcommand = "mpirun --oversubscribe --map-by ppr:4:node ${EXECUTABLE} -i c5g7_3d_gpu_1node.xml | tee stdout.txt"
joblaunchcommand = "jsrun --nrs ${NUM_NODES} --cpu_per_rs 20 --gpu_per_rs 4 --tasks_per_rs 4 --rs_per_host 1 -E LD_LIBRARY_PATH ${EXECUTABLE} -i c5g7_3d_gpu_1node.xml | tee stdout.txt"
rg_array = [
(re.compile("__jobname__"),jobname),
(re.compile("__walltime__"),walltime),
(re.compile("__numberprocessors__"),ranks),
(re.compile("__numbernodes__"),nodes),
(re.compile("__nccstestharnessmodule__"),nccstestharnessmodule),
(re.compile("__rgtenvironmentalfile__"),rgtenvironmentalfile),
(re.compile("__batchqueue__"),batchqueue),
......@@ -223,8 +224,8 @@ def send_to_scheduler(batchfilename,scheduler):
submit_command = "qsub "
qcommand = submit_command + batchfilename
elif sched_id == 1:
submit_command = "bsub"
qcommand = submit_command
submit_command = "bsub "
qcommand = submit_command + batchfilename
else:
print("Unsupported scheduler (sched_id = " + str(sched_id) + ")")
......@@ -238,8 +239,8 @@ def send_to_scheduler(batchfilename,scheduler):
p = subprocess.Popen(args,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(my_stdout, my_stderr) = p.communicate()
elif sched_id == 1:
p = subprocess.Popen(args,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(my_stdout, my_stderr) = p.communicate(input=data)
p = subprocess.Popen(args,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(my_stdout, my_stderr) = p.communicate()
else:
print("Unsupported scheduler (sched_id = " + str(sched_id) + ")")
......
......@@ -3,7 +3,7 @@
#BSUB -J __jobname__
#BSUB -o __resultsdir__/__jobname__.o%J
#BSUB -e __resultsdir__/__jobname__.e%J
#BSUB -n __numberprocessors__
#BSUB -nnodes __numbernodes__
#BSUB -W __walltime__
#BSUB -P __projectid__
......@@ -13,11 +13,10 @@
#-----------------------------------------------------
source __rgtenvironmentalfile__
module load __nccstestharnessmodule__
module load gcc
module load gcc/4.8.5
module load python/3.5.2
module load py-h5py/2.6.0-py3
export PYTHONPATH=/autofs/nccs-svm1_sw/summitdev/.swci/1-compute/opt/spack/20170405/linux-rhel7-ppc64le/gcc-4.8.5/py-h5py-2.6.0-xypyoyacn6tdvaq4grvk2hvcijlkik45/lib/python3.5/site-packages:$PYTHONPATH
module load spectrum_mpi
module load spectrum-mpi
module list
#-----------------------------------------------------
......@@ -31,6 +30,7 @@ RESULTSDIR="__resultsdir__"
UNIQUE_ID_STRING="__unique_id_string__"
INPUTDIR=${STARTINGDIRECTORY}/../Inputs
NUMPROCS="__numberprocessors__"
NUM_NODES="__numbernodes__"
echo "EXECUTABLE"
echo ${EXECUTABLE}
......@@ -44,6 +44,8 @@ echo "STARTINGDIRECTORY"
echo ${STARTINGDIRECTORY}
echo "NUMPROCS"
echo ${NUMPROCS}
echo "NUM_NODES"
echo ${NUM_NODES}
#-----------------------------------------------------
# Ensure that we are in the correct starting -
......@@ -80,7 +82,6 @@ echo "Changed to working directory"
pwd
ls -l
#-----------------------------------------------------
# Link input files into current directory
# -
......
......@@ -109,12 +109,13 @@ def make_batch_script(batch_recursive_mode,path_to_workspace,test_id_string,sche
workdir = os.path.join(path_to_workspace,"workdir")
resubmitme = batch_recursive_mode
walltime = "01:00"
joblaunchcommand = "mpirun --oversubscribe --map-by ppr:4:node ${EXECUTABLE} -i c5g7_3d_gpu_32node.xml | tee stdout.txt"
joblaunchcommand = "jsrun --nrs ${NUM_NODES} --cpu_per_rs 20 --gpu_per_rs 4 --tasks_per_rs 4 --rs_per_host 1 -E LD_LIBRARY_PATH ${EXECUTABLE} -i c5g7_3d_gpu_32node.xml | tee stdout.txt"
rg_array = [
(re.compile("__jobname__"),jobname),
(re.compile("__walltime__"),walltime),
(re.compile("__numberprocessors__"),ranks),
(re.compile("__numbernodes__"),nodes),
(re.compile("__nccstestharnessmodule__"),nccstestharnessmodule),
(re.compile("__rgtenvironmentalfile__"),rgtenvironmentalfile),
(re.compile("__batchqueue__"),batchqueue),
......@@ -223,8 +224,8 @@ def send_to_scheduler(batchfilename,scheduler):
submit_command = "qsub "
qcommand = submit_command + batchfilename
elif sched_id == 1:
submit_command = "bsub"
qcommand = submit_command
submit_command = "bsub "
qcommand = submit_command + batchfilename
else:
print("Unsupported scheduler (sched_id = " + str(sched_id) + ")")
......@@ -238,8 +239,8 @@ def send_to_scheduler(batchfilename,scheduler):
p = subprocess.Popen(args,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(my_stdout, my_stderr) = p.communicate()
elif sched_id == 1:
p = subprocess.Popen(args,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(my_stdout, my_stderr) = p.communicate(input=data)
p = subprocess.Popen(args,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(my_stdout, my_stderr) = p.communicate()
else:
print("Unsupported scheduler (sched_id = " + str(sched_id) + ")")
......
......@@ -30,7 +30,7 @@ module load cmake
module load git
module load hdf5
module load cuda
module load spectrum_mpi
module load spectrum-mpi
module load netlib-lapack
rm -rf CMake*
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment