Skip to content
Snippets Groups Projects
Commit 251a418a authored by John Chilton's avatar John Chilton
Browse files

Merge pull request #95 from natefoo/jetstream-fixes

Fixes for things encountered when running jobs on Jetstream
parents d9a603c3 cadfc5ab
No related branches found
No related tags found
No related merge requests found
......@@ -3,6 +3,7 @@ Base Classes and Infrastructure Supporting Concret Manager Implementations.
"""
from collections import deque
import errno
import logging
import os
from os.path import exists, isdir, join, basename
......@@ -369,7 +370,11 @@ class DirectoryMaker(object):
makedir_args = [path]
if self.mode is not None:
makedir_args.append(self.mode)
if recursive:
makedirs(*makedir_args)
else:
os.mkdir(*makedir_args)
try:
if recursive:
makedirs(*makedir_args)
else:
os.mkdir(*makedir_args)
except (OSError, IOError) as exc:
if exc.errno != errno.EEXIST:
raise
export GALAXY_SLOTS_CONFIGURED="1"
if [ -n "$SLURM_NTASKS" ]; then
# May want to multiply this by ${SLURM_CPUS_PER_TASK:-1}.
# SLURM_NTASKS is total tasks over all nodes so this is
# shouldn't be used for multi-node requests.
GALAXY_SLOTS="$SLURM_NTASKS"
if [ -n "$SLURM_CPUS_ON_NODE" ]; then
# This should be valid on SLURM except in the case that srun is used to
# submit additional job steps under an existing allocation, which we do not
# currently do.
GALAXY_SLOTS="$SLURM_CPUS_ON_NODE"
elif [ -n "$SLURM_NTASKS" ] || [ -n "$SLURM_CPUS_PER_TASK" ]; then
# $SLURM_CPUS_ON_NODE should be set correctly on SLURM (even on old
# installations), but keep the $SLURM_NTASKS logic as a backup since this
# was the previous method under SLURM.
#
# Multiply these values since SLURM_NTASKS is total tasks over all nodes.
# GALAXY_SLOTS maps to CPUS on a single node and shouldn't be used for
# multi-node requests.
GALAXY_SLOTS=`expr "${SLURM_NTASKS:-1}" \* "${SLURM_CPUS_PER_TASK:-1}"`
elif [ -n "$NSLOTS" ]; then
GALAXY_SLOTS="$NSLOTS"
elif [ -n "$PBS_NCPUS" ]; then
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment