diff --git a/galaxy/jobs/metrics/instrumenters/core.py b/galaxy/jobs/metrics/instrumenters/core.py
index 1e0bdae167d192ec999902de739bec83d66f37d1..80be7855cb385629766f3e63feb36ccd718b5c4c 100644
--- a/galaxy/jobs/metrics/instrumenters/core.py
+++ b/galaxy/jobs/metrics/instrumenters/core.py
@@ -82,4 +82,4 @@ class CorePlugin( InstrumentPlugin ):
             pass
         return value
 
-__all__ = [ CorePlugin ]
+__all__ = [ 'CorePlugin' ]
diff --git a/galaxy/jobs/metrics/instrumenters/cpuinfo.py b/galaxy/jobs/metrics/instrumenters/cpuinfo.py
index 80adb2050e4934a6ccb912d7d07573003b58b37e..ca341337abbab2c6aed056ffc2ff5f320831f366 100644
--- a/galaxy/jobs/metrics/instrumenters/cpuinfo.py
+++ b/galaxy/jobs/metrics/instrumenters/cpuinfo.py
@@ -59,4 +59,4 @@ class CpuInfoPlugin( InstrumentPlugin ):
     def __instrument_cpuinfo_path( self, job_directory ):
         return self._instrument_file_path( job_directory, "cpuinfo" )
 
-__all__ = [ CpuInfoPlugin ]
+__all__ = [ 'CpuInfoPlugin' ]
diff --git a/galaxy/jobs/metrics/instrumenters/env.py b/galaxy/jobs/metrics/instrumenters/env.py
index 0610e8ca900eee25e3236c7f08ee16f1f041fa5d..3130591630dd4bb8f99ebfd4f3848c33a7b6d306 100644
--- a/galaxy/jobs/metrics/instrumenters/env.py
+++ b/galaxy/jobs/metrics/instrumenters/env.py
@@ -68,4 +68,4 @@ class EnvPlugin( InstrumentPlugin ):
     def __env_file( self, job_directory ):
         return self._instrument_file_path( job_directory, "vars" )
 
-__all__ = [ EnvPlugin ]
+__all__ = [ 'EnvPlugin' ]
diff --git a/galaxy/jobs/metrics/instrumenters/meminfo.py b/galaxy/jobs/metrics/instrumenters/meminfo.py
index e845b4879788c30ba04779c29b2c6e7376dce5ea..ea10c1e7e554806cbb716c925e60d95cc4737d35 100644
--- a/galaxy/jobs/metrics/instrumenters/meminfo.py
+++ b/galaxy/jobs/metrics/instrumenters/meminfo.py
@@ -56,4 +56,4 @@ class MemInfoPlugin( InstrumentPlugin ):
     def __instrument_meminfo_path( self, job_directory ):
         return self._instrument_file_path( job_directory, "meminfo" )
 
-__all__ = [ MemInfoPlugin ]
+__all__ = [ 'MemInfoPlugin' ]
diff --git a/galaxy/jobs/metrics/instrumenters/uname.py b/galaxy/jobs/metrics/instrumenters/uname.py
index 7568ac210f1f7d597d263b9d81d03ead6625e4da..0744ffef282900e8bdc61c0b67d5759addaa2449 100644
--- a/galaxy/jobs/metrics/instrumenters/uname.py
+++ b/galaxy/jobs/metrics/instrumenters/uname.py
@@ -31,4 +31,4 @@ class UnamePlugin( InstrumentPlugin ):
         return self._instrument_file_path( job_directory, "uname" )
 
 
-__all__ = [ UnamePlugin ]
+__all__ = [ 'UnamePlugin' ]
diff --git a/galaxy/objectstore/s3.py b/galaxy/objectstore/s3.py
index a6b97ab46683c76798cc13b582b4dbec7ac1d47e..361c40a915a20911c8a9b7beac3212c17faa9779 100644
--- a/galaxy/objectstore/s3.py
+++ b/galaxy/objectstore/s3.py
@@ -13,13 +13,14 @@ import time
 from datetime import datetime
 
 from galaxy.exceptions import ObjectNotFound
-from galaxy.util import umask_fix_perms
+from galaxy.util import string_as_bool, umask_fix_perms
 from galaxy.util.directory_hash import directory_hash_id
 from galaxy.util.sleeper import Sleeper
 from .s3_multipart_upload import multipart_upload
 from ..objectstore import ObjectStore, convert_bytes
 
 try:
+    # Imports are done this way to allow objectstore code to be used outside of Galaxy.
     import boto
     from boto.s3.key import Key
     from boto.s3.connection import S3Connection
@@ -27,7 +28,8 @@ try:
 except ImportError:
     boto = None
 
-NO_BOTO_ERROR_MESSAGE = "S3/Swift object store configured, but no boto dependency available. Please install and properly configure boto or modify object store configuration."
+NO_BOTO_ERROR_MESSAGE = ("S3/Swift object store configured, but no boto dependency available."
+                         "Please install and properly configure boto or modify object store configuration.")
 
 log = logging.getLogger( __name__ )
 logging.getLogger('boto').setLevel(logging.INFO)  # Otherwise boto is quite noisy
@@ -76,7 +78,8 @@ class S3ObjectStore(ObjectStore):
             self.secret_key = a_xml.get('secret_key')
             b_xml = config_xml.findall('bucket')[0]
             self.bucket = b_xml.get('name')
-            self.use_rr = b_xml.get('use_reduced_redundancy', False)
+            self.use_rr = string_as_bool(b_xml.get('use_reduced_redundancy', "False"))
+            self.max_chunk_size = int(b_xml.get('max_chunk_size', 250))
             cn_xml = config_xml.findall('connection')
             if not cn_xml:
                 cn_xml = {}
@@ -84,11 +87,20 @@ class S3ObjectStore(ObjectStore):
                 cn_xml = cn_xml[0]
             self.host = cn_xml.get('host', None)
             self.port = int(cn_xml.get('port', 6000))
-            self.is_secure = cn_xml.get('is_secure', True)
+            self.multipart = string_as_bool(cn_xml.get('multipart', 'True'))
+            self.is_secure = string_as_bool(cn_xml.get('is_secure', 'True'))
             self.conn_path = cn_xml.get('conn_path', '/')
             c_xml = config_xml.findall('cache')[0]
             self.cache_size = float(c_xml.get('size', -1))
-            self.cache_path = c_xml.get('path')
+            # for multipart upload
+            self.s3server = {'access_key': self.access_key,
+                             'secret_key': self.secret_key,
+                             'is_secure': self.is_secure,
+                             'max_chunk_size': self.max_chunk_size,
+                             'host': self.host,
+                             'port': self.port,
+                             'use_rr': self.use_rr,
+                             'conn_path': self.conn_path}
         except Exception:
             # Toss it back up after logging, we can't continue loading at this point.
             log.exception("Malformed ObjectStore Configuration XML -- unable to continue")
@@ -100,23 +112,23 @@ class S3ObjectStore(ObjectStore):
             total_size = 0
             # Is this going to be too expensive of an operation to be done frequently?
             file_list = []
-            for dirpath, dirnames, filenames in os.walk(self.staging_path):
-                for f in filenames:
-                    fp = os.path.join(dirpath, f)
-                    file_size = os.path.getsize(fp)
+            for dirpath, _, filenames in os.walk(self.staging_path):
+                for filename in filenames:
+                    filepath = os.path.join(dirpath, filename)
+                    file_size = os.path.getsize(filepath)
                     total_size += file_size
                     # Get the time given file was last accessed
-                    last_access_time = time.localtime(os.stat(fp)[7])
+                    last_access_time = time.localtime(os.stat(filepath)[7])
                     # Compose a tuple of the access time and the file path
-                    file_tuple = last_access_time, fp, file_size
+                    file_tuple = last_access_time, filepath, file_size
                     file_list.append(file_tuple)
             # Sort the file list (based on access time)
             file_list.sort()
             # Initiate cleaning once within 10% of the defined cache size?
             cache_limit = self.cache_size * 0.9
             if total_size > cache_limit:
-                log.info("Initiating cache cleaning: current cache size: %s; clean until smaller than: %s"
-                         % (convert_bytes(total_size), convert_bytes(cache_limit)))
+                log.info("Initiating cache cleaning: current cache size: %s; clean until smaller than: %s",
+                         convert_bytes(total_size), convert_bytes(cache_limit))
                 # How much to delete? If simply deleting up to the cache-10% limit,
                 # is likely to be deleting frequently and may run the risk of hitting
                 # the limit - maybe delete additional #%?
@@ -144,10 +156,10 @@ class S3ObjectStore(ObjectStore):
         # exceed delete_this_much; start deleting from the front of the file list,
         # which assumes the oldest files come first on the list.
         deleted_amount = 0
-        for i, f in enumerate(file_list):
+        for entry in enumerate(file_list):
             if deleted_amount < delete_this_much:
-                deleted_amount += f[2]
-                os.remove(f[1])
+                deleted_amount += entry[2]
+                os.remove(entry[1])
                 # Debugging code for printing deleted files' stats
                 # folder, file_name = os.path.split(f[1])
                 # file_date = time.strftime("%m/%d/%y %H:%M:%S", f[0])
@@ -155,7 +167,7 @@ class S3ObjectStore(ObjectStore):
                 #     % (i, file_name, convert_bytes(f[2]), file_date, \
                 #     convert_bytes(deleted_amount), convert_bytes(delete_this_much)))
             else:
-                log.debug("Cache cleaning done. Total space freed: %s" % convert_bytes(deleted_amount))
+                log.debug("Cache cleaning done. Total space freed: %s", convert_bytes(deleted_amount))
                 return
 
     def _get_bucket(self, bucket_name):
@@ -164,14 +176,14 @@ class S3ObjectStore(ObjectStore):
         for i in range(5):
             try:
                 bucket = self.conn.get_bucket(bucket_name)
-                log.debug("Using cloud object store with bucket '%s'" % bucket.name)
+                log.debug("Using cloud object store with bucket '%s'", bucket.name)
                 return bucket
             except S3ResponseError:
                 try:
-                    log.debug("Bucket not found, creating s3 bucket with handle '%s'" % bucket_name)
+                    log.debug("Bucket not found, creating s3 bucket with handle '%s'", bucket_name)
                     self.conn.create_bucket(bucket_name)
                 except S3ResponseError:
-                    log.exception("Could not get bucket '%s', attempt %s/5" % (bucket_name, i + 1))
+                    log.exception("Could not get bucket '%s', attempt %s/5", bucket_name, i + 1)
                     time.sleep(2)
         # All the attempts have been exhausted and connection was not established,
         # raise error
@@ -179,10 +191,10 @@ class S3ObjectStore(ObjectStore):
 
     def _fix_permissions(self, rel_path):
         """ Set permissions on rel_path"""
-        for basedir, dirs, files in os.walk(rel_path):
+        for basedir, _, files in os.walk(rel_path):
             umask_fix_perms(basedir, self.config.umask, 0777, self.config.gid)
-            for f in files:
-                path = os.path.join(basedir, f)
+            for filename in files:
+                path = os.path.join(basedir, filename)
                 # Ignore symlinks
                 if os.path.islink(path):
                     continue
@@ -212,11 +224,9 @@ class S3ObjectStore(ObjectStore):
             key = self.bucket.get_key(rel_path)
             if key:
                 return key.size
-        except S3ResponseError, ex:
-            log.error("Could not get size of key '%s' from S3: %s" % (rel_path, ex))
-        except Exception, ex:
-            log.error("Could not get reference to the key object '%s'; returning -1 for key size: %s" % (rel_path, ex))
-        return -1
+        except S3ResponseError:
+            log.exception("Could not get size of key '%s' from S3", rel_path)
+            return -1
 
     def _key_exists(self, rel_path):
         exists = False
@@ -224,16 +234,16 @@ class S3ObjectStore(ObjectStore):
             # A hackish way of testing if the rel_path is a folder vs a file
             is_dir = rel_path[-1] == '/'
             if is_dir:
-                rs = self.bucket.get_all_keys(prefix=rel_path)
-                if len(rs) > 0:
+                keyresult = self.bucket.get_all_keys(prefix=rel_path)
+                if len(keyresult) > 0:
                     exists = True
                 else:
                     exists = False
             else:
                 key = Key(self.bucket, rel_path)
                 exists = key.exists()
-        except S3ResponseError, ex:
-            log.error("Trouble checking existence of S3 key '%s': %s" % (rel_path, ex))
+        except S3ResponseError:
+            log.exception("Trouble checking existence of S3 key '%s'", rel_path)
             return False
         if rel_path[0] == '/':
             raise
@@ -274,36 +284,36 @@ class S3ObjectStore(ObjectStore):
         if not os.path.exists(self._get_cache_path(rel_path_dir)):
             os.makedirs(self._get_cache_path(rel_path_dir))
         # Now pull in the file
-        ok = self._download(rel_path)
+        file_ok = self._download(rel_path)
         self._fix_permissions(self._get_cache_path(rel_path_dir))
-        return ok
+        return file_ok
 
     def _transfer_cb(self, complete, total):
         self.transfer_progress += 10
 
     def _download(self, rel_path):
         try:
-            log.debug("Pulling key '%s' into cache to %s" % (rel_path, self._get_cache_path(rel_path)))
+            log.debug("Pulling key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
             key = self.bucket.get_key(rel_path)
             # Test if cache is large enough to hold the new file
             if self.cache_size > 0 and key.size > self.cache_size:
-                log.critical("File %s is larger (%s) than the cache size (%s). Cannot download."
-                             % (rel_path, key.size, self.cache_size))
+                log.critical("File %s is larger (%s) than the cache size (%s). Cannot download.",
+                             rel_path, key.size, self.cache_size)
                 return False
             if self.use_axel:
-                log.debug("Parallel pulled key '%s' into cache to %s" % (rel_path, self._get_cache_path(rel_path)))
+                log.debug("Parallel pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
                 ncores = multiprocessing.cpu_count()
                 url = key.generate_url(7200)
                 ret_code = subprocess.call("axel -a -n %s '%s'" % (ncores, url))
                 if ret_code == 0:
                     return True
             else:
-                log.debug("Pulled key '%s' into cache to %s" % (rel_path, self._get_cache_path(rel_path)))
+                log.debug("Pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
                 self.transfer_progress = 0  # Reset transfer progress counter
                 key.get_contents_to_filename(self._get_cache_path(rel_path), cb=self._transfer_cb, num_cb=10)
                 return True
-        except S3ResponseError, ex:
-            log.error("Problem downloading key '%s' from S3 bucket '%s': %s" % (rel_path, self.bucket.name, ex))
+        except S3ResponseError:
+            log.exception("Problem downloading key '%s' from S3 bucket '%s'", rel_path, self.bucket.name)
         return False
 
     def _push_to_os(self, rel_path, source_file=None, from_string=None):
@@ -319,31 +329,32 @@ class S3ObjectStore(ObjectStore):
             if os.path.exists(source_file):
                 key = Key(self.bucket, rel_path)
                 if os.path.getsize(source_file) == 0 and key.exists():
-                    log.debug("Wanted to push file '%s' to S3 key '%s' but its size is 0; skipping." % (source_file, rel_path))
+                    log.debug("Wanted to push file '%s' to S3 key '%s' but its size is 0; skipping.", source_file, rel_path)
                     return True
                 if from_string:
                     key.set_contents_from_string(from_string, reduced_redundancy=self.use_rr)
-                    log.debug("Pushed data from string '%s' to key '%s'" % (from_string, rel_path))
+                    log.debug("Pushed data from string '%s' to key '%s'", from_string, rel_path)
                 else:
                     start_time = datetime.now()
-                    log.debug("Pushing cache file '%s' of size %s bytes to key '%s'" % (source_file, os.path.getsize(source_file), rel_path))
+                    log.debug("Pushing cache file '%s' of size %s bytes to key '%s'", source_file, os.path.getsize(source_file), rel_path)
                     mb_size = os.path.getsize(source_file) / 1e6
-                    if mb_size < 10 or type(self) == SwiftObjectStore:
+                    if mb_size < 10 or (not self.multipart):
                         self.transfer_progress = 0  # Reset transfer progress counter
                         key.set_contents_from_filename(source_file,
                                                        reduced_redundancy=self.use_rr,
                                                        cb=self._transfer_cb,
                                                        num_cb=10)
                     else:
-                        multipart_upload(self.bucket, key.name, source_file, mb_size, self.access_key, self.secret_key, use_rr=self.use_rr)
+                        multipart_upload(self.s3server, self.bucket, key.name, source_file, mb_size)
                     end_time = datetime.now()
-                    log.debug("Pushed cache file '%s' to key '%s' (%s bytes transfered in %s sec)" % (source_file, rel_path, os.path.getsize(source_file), end_time - start_time))
+                    log.debug("Pushed cache file '%s' to key '%s' (%s bytes transfered in %s sec)",
+                              source_file, rel_path, os.path.getsize(source_file), end_time - start_time)
                 return True
             else:
-                log.error("Tried updating key '%s' from source file '%s', but source file does not exist."
-                          % (rel_path, source_file))
-        except S3ResponseError, ex:
-            log.error("Trouble pushing S3 key '%s' from file '%s': %s" % (rel_path, source_file, ex))
+                log.error("Tried updating key '%s' from source file '%s', but source file does not exist.",
+                          rel_path, source_file)
+        except S3ResponseError:
+            log.exception("Trouble pushing S3 key '%s' from file '%s'", rel_path, source_file)
         return False
 
     def file_ready(self, obj, **kwargs):
@@ -356,8 +367,8 @@ class S3ObjectStore(ObjectStore):
         if self._in_cache(rel_path):
             if os.path.getsize(self._get_cache_path(rel_path)) == self._get_size_in_s3(rel_path):
                 return True
-            log.debug("Waiting for dataset {0} to transfer from OS: {1}/{2}".format(rel_path,
-                      os.path.getsize(self._get_cache_path(rel_path)), self._get_size_in_s3(rel_path)))
+            log.debug("Waiting for dataset %s to transfer from OS: %s/%s", rel_path,
+                      os.path.getsize(self._get_cache_path(rel_path)), self._get_size_in_s3(rel_path))
         return False
 
     def exists(self, obj, **kwargs):
@@ -429,10 +440,10 @@ class S3ObjectStore(ObjectStore):
             try:
                 return os.path.getsize(self._get_cache_path(rel_path))
             except OSError, ex:
-                log.info("Could not get size of file '%s' in local cache, will try S3. Error: %s" % (rel_path, ex))
+                log.info("Could not get size of file '%s' in local cache, will try S3. Error: %s", rel_path, ex)
         elif self.exists(obj, **kwargs):
             return self._get_size_in_s3(rel_path)
-        log.warning("Did not find dataset '%s', returning 0 for size" % rel_path)
+        log.warning("Did not find dataset '%s', returning 0 for size", rel_path)
         return 0
 
     def delete(self, obj, entire_dir=False, **kwargs):
@@ -445,9 +456,9 @@ class S3ObjectStore(ObjectStore):
             # but requires iterating through each individual key in S3 and deleing it.
             if entire_dir and extra_dir:
                 shutil.rmtree(self._get_cache_path(rel_path))
-                rs = self.bucket.get_all_keys(prefix=rel_path)
-                for key in rs:
-                    log.debug("Deleting key %s" % key.name)
+                results = self.bucket.get_all_keys(prefix=rel_path)
+                for key in results:
+                    log.debug("Deleting key %s", key.name)
                     key.delete()
                 return True
             else:
@@ -455,14 +466,14 @@ class S3ObjectStore(ObjectStore):
                 os.unlink(self._get_cache_path(rel_path))
                 # Delete from S3 as well
                 if self._key_exists(rel_path):
-                        key = Key(self.bucket, rel_path)
-                        log.debug("Deleting key %s" % key.name)
-                        key.delete()
-                        return True
-        except S3ResponseError, ex:
-            log.error("Could not delete key '%s' from S3: %s" % (rel_path, ex))
-        except OSError, ex:
-            log.error('%s delete error %s' % (self._get_filename(obj, **kwargs), ex))
+                    key = Key(self.bucket, rel_path)
+                    log.debug("Deleting key %s", key.name)
+                    key.delete()
+                    return True
+        except S3ResponseError:
+            log.exception("Could not delete key '%s' from S3", rel_path)
+        except OSError:
+            log.exception('%s delete error', self.get_filename(obj, **kwargs))
         return False
 
     def get_data(self, obj, start=0, count=-1, **kwargs):
@@ -522,8 +533,8 @@ class S3ObjectStore(ObjectStore):
                         # FIXME? Should this be a `move`?
                         shutil.copy2(source_file, cache_file)
                     self._fix_permissions(cache_file)
-                except OSError, ex:
-                    log.error("Trouble copying source file '%s' to cache '%s': %s" % (source_file, cache_file, ex))
+                except OSError:
+                    log.exception("Trouble copying source file '%s' to cache '%s'", source_file, cache_file)
             else:
                 source_file = self._get_cache_path(rel_path)
             # Update the file on S3
@@ -538,8 +549,8 @@ class S3ObjectStore(ObjectStore):
             try:
                 key = Key(self.bucket, rel_path)
                 return key.generate_url(expires_in=86400)  # 24hrs
-            except S3ResponseError, ex:
-                log.warning("Trouble generating URL for dataset '%s': %s" % (rel_path, ex))
+            except S3ResponseError:
+                log.exception("Trouble generating URL for dataset '%s'", rel_path)
         return None
 
     def get_store_usage_percent(self):
diff --git a/galaxy/objectstore/s3_multipart_upload.py b/galaxy/objectstore/s3_multipart_upload.py
index 236996cfcb969e5a92cefe20aab2eb887ff72586..623fdc8e439c3b91a1a25db6bd7c694571af69d5 100644
--- a/galaxy/objectstore/s3_multipart_upload.py
+++ b/galaxy/objectstore/s3_multipart_upload.py
@@ -34,13 +34,23 @@ def map_wrap(f):
     return wrapper
 
 
-def mp_from_ids(mp_id, mp_keyname, mp_bucketname, aws_access_key_id, aws_secret_access_key):
+def mp_from_ids(s3server, mp_id, mp_keyname, mp_bucketname):
     """Get the multipart upload from the bucket and multipart IDs.
 
     This allows us to reconstitute a connection to the upload
     from within multiprocessing functions.
     """
-    conn = S3Connection(aws_access_key_id, aws_secret_access_key)
+    if s3server['host']:
+        conn = boto.connect_s3(aws_access_key_id=s3server['access_key'],
+                                    aws_secret_access_key=s3server['secret_key'],
+                                    is_secure=s3server['is_secure'],
+                                    host=s3server['host'],
+                                    port=s3server['port'],
+                                    calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+                                    path=s3server['conn_path'])
+    else:
+        conn = S3Connection(s3server['access_key'], s3server['secret_key'])
+
     bucket = conn.lookup(mp_bucketname)
     mp = boto.s3.multipart.MultiPartUpload(bucket)
     mp.key_name = mp_keyname
@@ -49,33 +59,36 @@ def mp_from_ids(mp_id, mp_keyname, mp_bucketname, aws_access_key_id, aws_secret_
 
 
 @map_wrap
-def transfer_part(mp_id, mp_keyname, mp_bucketname, i, part, aws_access_key_id, aws_secret_access_key):
+def transfer_part(s3server, mp_id, mp_keyname, mp_bucketname, i, part):
     """Transfer a part of a multipart upload. Designed to be run in parallel.
     """
-    mp = mp_from_ids(mp_id, mp_keyname, mp_bucketname, aws_access_key_id, aws_secret_access_key)
+    mp = mp_from_ids(s3server, mp_id, mp_keyname, mp_bucketname)
     with open(part) as t_handle:
         mp.upload_part_from_file(t_handle, i + 1)
     os.remove(part)
 
 
-def multipart_upload(bucket, s3_key_name, tarball, mb_size, aws_access_key_id, aws_secret_access_key, use_rr=True):
+def multipart_upload(s3server, bucket, s3_key_name, tarball, mb_size):
     """Upload large files using Amazon's multipart upload functionality.
     """
     cores = multiprocessing.cpu_count()
 
     def split_file(in_file, mb_size, split_num=5):
         prefix = os.path.join(os.path.dirname(in_file),
-                              "%sS3PART" % (os.path.basename(s3_key_name)))
-        # Split chunks so they are 5MB < chunk < 250MB
-        split_size = int(max(min(mb_size / (split_num * 2.0), 250), 5))
+                                "%sS3PART" % (os.path.basename(s3_key_name)))
+        max_chunk = s3server['max_chunk_size']
+        # Split chunks so they are 5MB < chunk < 250MB(max_chunk_size)
+        split_size = int(max(min(mb_size / (split_num * 2.0), max_chunk), 5))
         if not os.path.exists("%saa" % prefix):
             cl = ["split", "-b%sm" % split_size, in_file, prefix]
             subprocess.check_call(cl)
         return sorted(glob.glob("%s*" % prefix))
 
-    mp = bucket.initiate_multipart_upload(s3_key_name, reduced_redundancy=use_rr)
+    mp = bucket.initiate_multipart_upload(s3_key_name,
+                                          reduced_redundancy=s3server['use_rr'])
+
     with multimap(cores) as pmap:
-        for _ in pmap(transfer_part, ((mp.id, mp.key_name, mp.bucket_name, i, part, aws_access_key_id, aws_secret_access_key)
+        for _ in pmap(transfer_part, ((s3server, mp.id, mp.key_name, mp.bucket_name, i, part)
                                       for (i, part) in
                                       enumerate(split_file(tarball, mb_size, cores)))):
             pass
diff --git a/galaxy/tools/deps/__init__.py b/galaxy/tools/deps/__init__.py
index 3f4944b02e128793533f31e923b1798eb65367dc..f72b917ba1bfdb505c6c57281ee679daa562ced3 100644
--- a/galaxy/tools/deps/__init__.py
+++ b/galaxy/tools/deps/__init__.py
@@ -14,7 +14,7 @@ from galaxy.util import plugin_config
 
 
 def build_dependency_manager( config ):
-    if config.use_tool_dependencies:
+    if getattr( config, "use_tool_dependencies", False ):
         dependency_manager_kwds = {
             'default_base_path': config.tool_dependency_dir,
             'conf_file': config.dependency_resolvers_config_file,
diff --git a/galaxy/tools/deps/brew_exts.py b/galaxy/tools/deps/brew_exts.py
index fdf543b964a81f26e07507dcc539c00754a12dd7..ca0cd80398058437da2405d2977e883096ceca10 100755
--- a/galaxy/tools/deps/brew_exts.py
+++ b/galaxy/tools/deps/brew_exts.py
@@ -21,13 +21,17 @@
 
 from __future__ import print_function
 
-import argparse
+try:
+    import argparse
+except ImportError:
+    argparse = None
 import contextlib
 import json
 import glob
 import os
 import re
 import sys
+import string
 import subprocess
 
 WHITESPACE_PATTERN = re.compile("[\s]+")
@@ -43,6 +47,7 @@ NO_BREW_ERROR_MESSAGE = "Could not find brew on PATH, please place on path or pa
 CANNOT_DETERMINE_TAP_ERROR_MESSAGE = "Cannot determine tap of specified recipe - please use fully qualified recipe (e.g. homebrew/science/samtools)."
 VERBOSE = False
 RELAXED = False
+BREW_ARGS = []
 
 
 class BrewContext(object):
@@ -104,6 +109,7 @@ class RecipeContext(object):
 def main():
     global VERBOSE
     global RELAXED
+    global BREW_ARGS
     parser = argparse.ArgumentParser(description=DESCRIPTION)
     parser.add_argument("--brew", help="Path to linuxbrew 'brew' executable to target")
     actions = ["vinstall", "vuninstall", "vdeps", "vinfo", "env"]
@@ -114,11 +120,13 @@ def main():
     parser.add_argument('version', metavar='version', help="Version for action (e.g. 0.1.19).")
     parser.add_argument('--relaxed', action='store_true', help="Relaxed processing - for instance allow use of env on non-vinstall-ed recipes.")
     parser.add_argument('--verbose', action='store_true', help="Verbose output")
+    parser.add_argument('restargs', nargs=argparse.REMAINDER)
     args = parser.parse_args()
     if args.verbose:
         VERBOSE = True
     if args.relaxed:
         RELAXED = True
+    BREW_ARGS = args.restargs
     if not action:
         action = args.action
     brew_context = BrewContext(args)
@@ -159,7 +167,7 @@ class CommandLineException(Exception):
         return self.message
 
 
-def versioned_install(recipe_context, package=None, version=None):
+def versioned_install(recipe_context, package=None, version=None, installed_deps=[]):
     if package is None:
         package = recipe_context.recipe
         version = recipe_context.version
@@ -176,10 +184,15 @@ def versioned_install(recipe_context, package=None, version=None):
             versioned = version_info[2]
             if versioned:
                 dep_to_version[dep] = dep_version
+                if dep in installed_deps:
+                    continue
                 versioned_install(recipe_context, dep, dep_version)
+                installed_deps.append(dep)
             else:
                 # Install latest.
                 dep_to_version[dep] = None
+                if dep in installed_deps:
+                    continue
                 unversioned_install(dep)
         try:
             for dep in deps:
@@ -198,7 +211,16 @@ def versioned_install(recipe_context, package=None, version=None):
                 }
                 deps_metadata.append(dep_metadata)
 
-            brew_execute(["install", package])
+            cellar_root = recipe_context.brew_context.homebrew_cellar
+            cellar_path = recipe_context.cellar_path
+            env_actions = build_env_actions(deps_metadata, cellar_root, cellar_path, custom_only=True)
+            env = EnvAction.build_env(env_actions)
+            args = ["install"]
+            if VERBOSE:
+                args.append("--verbose")
+            args.extend(BREW_ARGS)
+            args.append(package)
+            brew_execute(args, env=env)
             deps = brew_execute(["deps", package])
             deps = [d.strip() for d in deps.split("\n") if d]
             metadata = {
@@ -278,10 +300,10 @@ def attempt_unlink(package):
         pass
 
 
-def brew_execute(args):
+def brew_execute(args, env=None):
     os.environ["HOMEBREW_NO_EMOJI"] = "1"  # simplify brew parsing.
     cmds = ["brew"] + args
-    return execute(cmds)
+    return execute(cmds, env=env)
 
 
 def build_env_statements_from_recipe_context(recipe_context, **kwds):
@@ -290,11 +312,20 @@ def build_env_statements_from_recipe_context(recipe_context, **kwds):
     return env_statements
 
 
-def build_env_statements(cellar_root, cellar_path, relaxed=None):
+def build_env_statements(cellar_root, cellar_path, relaxed=None, custom_only=False):
     deps = load_versioned_deps(cellar_path, relaxed=relaxed)
+    actions = build_env_actions(deps, cellar_root, cellar_path, relaxed, custom_only)
+    env_statements = []
+    for action in actions:
+        env_statements.extend(action.to_statements())
+    return "\n".join(env_statements)
+
+
+def build_env_actions(deps, cellar_root, cellar_path, relaxed=None, custom_only=False):
 
     path_appends = []
     ld_path_appends = []
+    actions = []
 
     def handle_keg(cellar_path):
         bin_path = os.path.join(cellar_path, "bin")
@@ -303,6 +334,14 @@ def build_env_statements(cellar_root, cellar_path, relaxed=None):
         lib_path = os.path.join(cellar_path, "lib")
         if os.path.isdir(lib_path):
             ld_path_appends.append(lib_path)
+        env_path = os.path.join(cellar_path, "platform_environment.json")
+        if os.path.exists(env_path):
+            with open(env_path, "r") as f:
+                env_metadata = json.load(f)
+                if "actions" in env_metadata:
+                    def to_action(desc):
+                        return EnvAction(cellar_path, desc)
+                    actions.extend(map(to_action, env_metadata["actions"]))
 
     for dep in deps:
         package = dep['name']
@@ -311,14 +350,54 @@ def build_env_statements(cellar_root, cellar_path, relaxed=None):
         handle_keg( dep_cellar_path )
 
     handle_keg( cellar_path )
-    env_statements = []
-    if path_appends:
-        env_statements.append("PATH=" + ":".join(path_appends) + ":$PATH")
-        env_statements.append("export PATH")
-    if ld_path_appends:
-        env_statements.append("LD_LIBRARY_PATH=" + ":".join(ld_path_appends) + ":$LD_LIBRARY_PATH")
-        env_statements.append("export LD_LIBRARY_PATH")
-    return "\n".join(env_statements)
+    if not custom_only:
+        if path_appends:
+            actions.append(EnvAction(cellar_path, {"action": "prepend", "variable": "PATH", "value": ":".join(path_appends)}))
+        if ld_path_appends:
+            actions.append(EnvAction(cellar_path, {"action": "prepend", "variable": "LD_LIBRARY_PATH", "value": ":".join(path_appends)}))
+    return actions
+
+
+class EnvAction(object):
+
+    def __init__(self, keg_root, action_description):
+        self.variable = action_description["variable"]
+        self.action = action_description["action"]
+        self.value = string.Template(action_description["value"]).safe_substitute({
+            'KEG_ROOT': keg_root,
+        })
+
+    @staticmethod
+    def build_env(env_actions):
+        new_env = os.environ.copy()
+        map(lambda env_action: env_action.modify_environ(new_env), env_actions)
+        return new_env
+
+    def modify_environ(self, environ):
+        if self.action == "set" or not environ.get(self.variable, ""):
+            environ[self.variable] = self.__eval("${value}")
+        elif self.action == "prepend":
+            environ[self.variable] = self.__eval("${value}:%s" % environ[self.variable])
+        else:
+            environ[self.variable] = self.__eval("%s:${value}" % environ[self.variable])
+
+    def __eval(self, template):
+        return string.Template(template).safe_substitute(
+            variable=self.variable,
+            value=self.value,
+        )
+
+    def to_statements(self):
+        if self.action == "set":
+            template = '''${variable}="${value}"'''
+        elif self.action == "prepend":
+            template = '''${variable}="${value}:$$${variable}"'''
+        else:
+            template = '''${variable}="$$${variable}:${value}"'''
+        return [
+            self.__eval(template),
+            "export %s" % self.variable
+        ]
 
 
 @contextlib.contextmanager
@@ -350,8 +429,15 @@ def git_execute(args):
     return execute(cmds)
 
 
-def execute(cmds):
-    p = subprocess.Popen(cmds, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+def execute(cmds, env=None):
+    subprocess_kwds = dict(
+        shell=False,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+    )
+    if env:
+        subprocess_kwds["env"] = env
+    p = subprocess.Popen(cmds, **subprocess_kwds)
     #log = p.stdout.read()
     global VERBOSE
     stdout, stderr = p.communicate()
@@ -363,7 +449,10 @@ def execute(cmds):
 
 
 def brew_deps(package):
-    stdout = brew_execute(["deps", package])
+    args = ["deps"]
+    args.extend(BREW_ARGS)
+    args.append(package)
+    stdout = brew_execute(args)
     return [p.strip() for p in stdout.split("\n") if p]
 
 
diff --git a/galaxy/tools/deps/containers.py b/galaxy/tools/deps/containers.py
index c2f1ce4f4eea3e3ba33ffde59b497412b228038f..65334dd046c85a958586376271a68b5e3120e6b3 100644
--- a/galaxy/tools/deps/containers.py
+++ b/galaxy/tools/deps/containers.py
@@ -244,7 +244,8 @@ class DockerContainer(Container):
             working_directory=working_directory,
             net=prop("net", "none"),  # By default, docker instance has networking disabled
             auto_rm=asbool(prop("auto_rm", docker_util.DEFAULT_AUTO_REMOVE)),
-            set_user=asbool(prop("set_user", docker_util.DEFAULT_SET_USER)),
+            set_user=prop("set_user", docker_util.DEFAULT_SET_USER),
+            run_extra_arguments=prop("run_extra_arguments", docker_util.DEFAULT_RUN_EXTRA_ARGUMENTS),
             **docker_host_props
         )
         return "%s\n%s" % (cache_command, run_command)
diff --git a/galaxy/tools/deps/docker_util.py b/galaxy/tools/deps/docker_util.py
index 91830debacbe96ea50a5ee25476a464e76cbd2d5..38bd51874f5ac0892c0bccb7683808ea995fb060 100644
--- a/galaxy/tools/deps/docker_util.py
+++ b/galaxy/tools/deps/docker_util.py
@@ -10,7 +10,8 @@ DEFAULT_NET = None
 DEFAULT_MEMORY = None
 DEFAULT_VOLUMES_FROM = None
 DEFAULT_AUTO_REMOVE = True
-DEFAULT_SET_USER = True
+DEFAULT_SET_USER = "$UID"
+DEFAULT_RUN_EXTRA_ARGUMENTS = None
 
 
 class DockerVolume(object):
@@ -116,6 +117,7 @@ def build_docker_run_command(
     working_directory=DEFAULT_WORKING_DIRECTORY,
     name=None,
     net=DEFAULT_NET,
+    run_extra_arguments=DEFAULT_RUN_EXTRA_ARGUMENTS,
     docker_cmd=DEFAULT_DOCKER_COMMAND,
     sudo=DEFAULT_SUDO,
     sudo_cmd=DEFAULT_SUDO_COMMAND,
@@ -148,8 +150,13 @@ def build_docker_run_command(
         command_parts.extend(["--net", net])
     if auto_rm:
         command_parts.append("--rm")
+    if run_extra_arguments:
+        command_parts.append(run_extra_arguments)
     if set_user:
-        command_parts.extend(["-u", str(os.geteuid())])
+        user = set_user
+        if set_user == DEFAULT_SET_USER:
+            user = str(os.geteuid())
+        command_parts.extend(["-u", user])
     full_image = image
     if tag:
         full_image = "%s:%s" % (full_image, tag)
diff --git a/galaxy/tools/deps/requirements.py b/galaxy/tools/deps/requirements.py
index 73fa7f5f4e09b41c5bb36a418d0a874d4d13c057..452d9557e29cd584a7c632882faa945cfbe39aab 100644
--- a/galaxy/tools/deps/requirements.py
+++ b/galaxy/tools/deps/requirements.py
@@ -45,6 +45,12 @@ class ContainerDescription( object ):
         return ContainerDescription( identifier=identifier, type=type )
 
 
+def parse_requirements_from_dict( root_dict ):
+    requirements = root_dict.get("requirements", [])
+    containers = root_dict.get("containers", [])
+    return map(ToolRequirement.from_dict, requirements), map(ContainerDescription.from_dict, containers)
+
+
 def parse_requirements_from_xml( xml_root ):
     """
 
diff --git a/galaxy/tools/deps/resolvers/galaxy_packages.py b/galaxy/tools/deps/resolvers/galaxy_packages.py
index 7f2504793152c3dffe1cc2f8b208ddc8a9fc2001..18e2ffe2790d355b0ae54e235900c7fd257db5c5 100644
--- a/galaxy/tools/deps/resolvers/galaxy_packages.py
+++ b/galaxy/tools/deps/resolvers/galaxy_packages.py
@@ -1,12 +1,13 @@
-from os.path import join, islink, realpath, basename, exists, abspath
+from os.path import join, islink, realpath, basename, exists
 
 from ..resolvers import DependencyResolver, INDETERMINATE_DEPENDENCY, Dependency
+from .resolver_mixins import UsesToolDependencyDirMixin
 
 import logging
 log = logging.getLogger( __name__ )
 
 
-class GalaxyPackageDependencyResolver(DependencyResolver):
+class GalaxyPackageDependencyResolver(DependencyResolver, UsesToolDependencyDirMixin):
     resolver_type = "galaxy_packages"
 
     def __init__(self, dependency_manager, **kwds):
@@ -16,7 +17,7 @@ class GalaxyPackageDependencyResolver(DependencyResolver):
         ## resolver that will just grab 'default' version of exact version
         ## unavailable.
         self.versionless = str(kwds.get('versionless', "false")).lower() == "true"
-        self.base_path = abspath( kwds.get('base_path', dependency_manager.default_base_path) )
+        self._init_base_path( dependency_manager, **kwds )
 
     def resolve( self, name, version, type, **kwds ):
         """
@@ -70,4 +71,4 @@ class GalaxyPackageDependency(Dependency):
             commands = 'PACKAGE_BASE=%s; export PACKAGE_BASE; . %s' % ( base_path, self.script )
         return commands
 
-__all__ = [GalaxyPackageDependencyResolver, GalaxyPackageDependency]
+__all__ = ['GalaxyPackageDependencyResolver', 'GalaxyPackageDependency']
diff --git a/galaxy/tools/deps/resolvers/homebrew.py b/galaxy/tools/deps/resolvers/homebrew.py
index 962697de0b367c75c2af334e1a6288a81c2b3587..b3261ee9ffa132d4f6b95c200e85d1068febc06c 100644
--- a/galaxy/tools/deps/resolvers/homebrew.py
+++ b/galaxy/tools/deps/resolvers/homebrew.py
@@ -12,20 +12,19 @@ This is still an experimental module and there will almost certainly be backward
 incompatible changes coming.
 """
 
-import os
 
-from ..brew_exts import DEFAULT_HOMEBREW_ROOT, recipe_cellar_path, build_env_statements
-from ..resolvers import DependencyResolver, INDETERMINATE_DEPENDENCY, Dependency
+from .resolver_mixins import UsesHomebrewMixin
+from ..resolvers import DependencyResolver, INDETERMINATE_DEPENDENCY
 
 # TODO: Implement prefer version linked...
 PREFER_VERSION_LINKED = 'linked'
 PREFER_VERSION_LATEST = 'latest'
-UNKNOWN_PREFER_VERSION_MESSAGE_TEMPLATE = "HomebrewDependencyResolver prefer_version must be latest %s"
+UNKNOWN_PREFER_VERSION_MESSAGE_TEMPLATE = "HomebrewDependencyResolver prefer_version must be %s"
 UNKNOWN_PREFER_VERSION_MESSAGE = UNKNOWN_PREFER_VERSION_MESSAGE_TEMPLATE % (PREFER_VERSION_LATEST)
 DEFAULT_PREFER_VERSION = PREFER_VERSION_LATEST
 
 
-class HomebrewDependencyResolver(DependencyResolver):
+class HomebrewDependencyResolver(DependencyResolver, UsesHomebrewMixin):
     resolver_type = "homebrew"
 
     def __init__(self, dependency_manager, **kwds):
@@ -37,12 +36,8 @@ class HomebrewDependencyResolver(DependencyResolver):
 
         if self.versionless and self.prefer_version not in [PREFER_VERSION_LATEST]:
             raise Exception(UNKNOWN_PREFER_VERSION_MESSAGE)
-        
-        cellar_root = kwds.get('cellar', None)
-        if cellar_root is None:
-            cellar_root = os.path.join(DEFAULT_HOMEBREW_ROOT, "Cellar")
 
-        self.cellar_root = cellar_root
+        self._init_homebrew(**kwds)
 
     def resolve(self, name, version, type, **kwds):
         if type != "package":
@@ -53,44 +48,9 @@ class HomebrewDependencyResolver(DependencyResolver):
         else:
             return self._find_dep_versioned(name, version)
 
-    def _find_dep_versioned(self, name, version):
-        recipe_path = recipe_cellar_path(self.cellar_root, name, version)
-        if not os.path.exists(recipe_path) or not os.path.isdir(recipe_path):
-            return INDETERMINATE_DEPENDENCY
-
-        commands = build_env_statements(self.cellar_root, recipe_path, relaxed=True)
-        return HomebrewDependency(commands)
-
-    def _find_dep_default(self, name, version):
-        installed_versions = self._installed_versions(name)
-        if not installed_versions:
-            return INDETERMINATE_DEPENDENCY
-
-        # Just grab newest installed version - may make sense some day to find
-        # the linked version instead.
-        default_version = sorted(installed_versions, reverse=True)[0]
-        return self._find_dep_versioned(name, default_version)
-
-    def _installed_versions(self, recipe):
-        recipe_base_path = os.path.join(self.cellar_root, recipe)
-        if not os.path.exists(recipe_base_path):
-            return []
-
-        names = os.path.listdir(recipe_base_path)
-        return filter(lambda n: os.path.isdir(os.path.join(recipe_base_path, n)), names)
-
-
-class HomebrewDependency(Dependency):
-
-    def __init__(self, commands):
-        self.commands = commands
-
-    def shell_commands(self, requirement):
-        return self.commands
-
 
 def _string_as_bool( value ):
     return str( value ).lower() == "true"
 
 
-__all__ = [HomebrewDependencyResolver]
+__all__ = ['HomebrewDependencyResolver']
diff --git a/galaxy/tools/deps/resolvers/modules.py b/galaxy/tools/deps/resolvers/modules.py
index 13bfe1f8f78704b054740d5a16a3d62175db2f9c..a07edcdb7f259110342733ccbe124dbf11c0af70 100644
--- a/galaxy/tools/deps/resolvers/modules.py
+++ b/galaxy/tools/deps/resolvers/modules.py
@@ -153,4 +153,4 @@ class ModuleDependency(Dependency):
 def _string_as_bool( value ):
     return str( value ).lower() == "true"
 
-__all__ = [ModuleDependencyResolver]
+__all__ = ['ModuleDependencyResolver']
diff --git a/galaxy/tools/deps/resolvers/tool_shed_packages.py b/galaxy/tools/deps/resolvers/tool_shed_packages.py
index d72b4d6a20be457007d32c9b2a4067e865f37ef2..334a5b4b7c94f2c29c77861a3755e7e03295bd38 100644
--- a/galaxy/tools/deps/resolvers/tool_shed_packages.py
+++ b/galaxy/tools/deps/resolvers/tool_shed_packages.py
@@ -1,10 +1,11 @@
 from os.path import abspath, join, exists
 
+from .resolver_mixins import UsesInstalledRepositoriesMixin
 from .galaxy_packages import GalaxyPackageDependencyResolver, GalaxyPackageDependency
 from ..resolvers import INDETERMINATE_DEPENDENCY
 
 
-class ToolShedPackageDependencyResolver(GalaxyPackageDependencyResolver):
+class ToolShedPackageDependencyResolver(GalaxyPackageDependencyResolver, UsesInstalledRepositoriesMixin):
     resolver_type = "tool_shed_packages"
 
     def __init__(self, dependency_manager, **kwds):
@@ -12,9 +13,8 @@ class ToolShedPackageDependencyResolver(GalaxyPackageDependencyResolver):
 
     def _find_dep_versioned( self, name, version, type='package', **kwds ):
         installed_tool_dependency = self._get_installed_dependency( name, type, version=version, **kwds )
-        base_path = self.base_path
         if installed_tool_dependency:
-            path = self._get_package_installed_dependency_path( installed_tool_dependency, base_path, name, version )
+            path = self._get_package_installed_dependency_path( installed_tool_dependency, name, version )
             return self._galaxy_package_dep(path, version)
         else:
             return INDETERMINATE_DEPENDENCY
@@ -29,26 +29,17 @@ class ToolShedPackageDependencyResolver(GalaxyPackageDependencyResolver):
                     return GalaxyPackageDependency(dependency.script, dependency.path, None)
         return INDETERMINATE_DEPENDENCY
 
-    def _get_installed_dependency( self, name, type, version=None, **kwds ):
-        installed_tool_dependencies = kwds.get("installed_tool_dependencies", [])
-        for installed_tool_dependency in (installed_tool_dependencies or []):
-            name_and_type_equal = installed_tool_dependency.name == name and installed_tool_dependency.type == type
-            if version:
-                if name_and_type_equal and installed_tool_dependency.version == version:
-                    return installed_tool_dependency
-            else:
-                if name_and_type_equal:
-                    return installed_tool_dependency
-        return None
-
-    def _get_package_installed_dependency_path( self, installed_tool_dependency, base_path, name, version ):
+    def _get_package_installed_dependency_path( self, installed_tool_dependency, name, version ):
         tool_shed_repository = installed_tool_dependency.tool_shed_repository
-        return join( base_path,
-                             name,
-                             version,
-                             tool_shed_repository.owner,
-                             tool_shed_repository.name,
-                             tool_shed_repository.installed_changeset_revision )
+        base_path = self.base_path
+        return join(
+            base_path,
+            name,
+            version,
+            tool_shed_repository.owner,
+            tool_shed_repository.name,
+            tool_shed_repository.installed_changeset_revision
+        )
 
     def _get_set_environment_installed_dependency_script_path( self, installed_tool_dependency, name ):
         tool_shed_repository = installed_tool_dependency.tool_shed_repository
@@ -64,4 +55,5 @@ class ToolShedPackageDependencyResolver(GalaxyPackageDependencyResolver):
             return GalaxyPackageDependency(script, path, None)
         return INDETERMINATE_DEPENDENCY
 
-__all__ = [ToolShedPackageDependencyResolver]
+
+__all__ = ['ToolShedPackageDependencyResolver']
diff --git a/galaxy/util/__init__.py b/galaxy/util/__init__.py
index d69832ef0447d80ae2b896acf7c9f87cf90e23d9..27e5e9c926b0dbebae5d80e8986ea9df4bb5892c 100644
--- a/galaxy/util/__init__.py
+++ b/galaxy/util/__init__.py
@@ -142,6 +142,9 @@ def asbool(obj):
     return bool(obj)
 
 
+string_as_bool = asbool
+
+
 def force_symlink(source, link_name):
     try:
         os.symlink(source, link_name)