Loading lib/galaxy/job_execution/setup.py +12 −8 Original line number Diff line number Diff line Loading @@ -213,22 +213,26 @@ class JobIO(Dictifiable): for value in ds.metadata.values(): if isinstance(value, MetadataFile): filenames.append(value.get_file_name()) if ds.dataset and ds.dataset.extra_files_path_exists(): filenames.append(ds.dataset.extra_files_path) return filenames def get_input_fnames(self) -> List[str]: def get_input_datasets(self) -> List[DatasetInstance]: job = self.job return [ da.dataset for da in job.input_datasets + job.input_library_datasets if da.dataset ] # da is JobToInputDatasetAssociation object def get_input_fnames(self) -> List[str]: filenames = [] for da in job.input_datasets + job.input_library_datasets: # da is JobToInputDatasetAssociation object if da.dataset: filenames.extend(self.get_input_dataset_fnames(da.dataset)) for ds in self.get_input_datasets(): filenames.extend(self.get_input_dataset_fnames(ds)) return filenames def get_input_paths(self) -> List[DatasetPath]: job = self.job paths = [] for da in job.input_datasets + job.input_library_datasets: # da is JobToInputDatasetAssociation object if da.dataset: paths.append(self.get_input_path(da.dataset)) for ds in self.get_input_datasets(): paths.append(self.get_input_path(ds)) return paths def get_input_path(self, dataset: DatasetInstance) -> DatasetPath: Loading lib/galaxy/jobs/splitters/basic.py +1 −1 Original line number Diff line number Diff line Loading @@ -13,7 +13,7 @@ def set_basic_defaults(job_wrapper): def do_split(job_wrapper): if len(job_wrapper.job_io.get_input_fnames()) > 1 or len(job_wrapper.job_io.get_output_fnames()) > 1: if len(job_wrapper.job_io.get_input_datasets()) > 1 or len(job_wrapper.job_io.get_output_fnames()) > 1: log.error("The basic splitter is not capable of handling jobs with multiple inputs or outputs.") raise Exception("Job Splitting Failed, the basic splitter only handles tools with one input and one output") # add in the missing information for splitting the one input and merging the one output Loading lib/galaxy/managers/users.py +1 −1 Original line number Diff line number Diff line Loading @@ -616,7 +616,7 @@ class UserManager(base.ModelManager, deletable.PurgableManagerMixin): reset_user = get_user_by_email(trans.sa_session, email, self.app.model.User) if not reset_user and email != email.lower(): reset_user = self._get_user_by_email_case_insensitive(trans.sa_session, email) if reset_user: if reset_user and not reset_user.deleted: prt = self.app.model.PasswordResetToken(reset_user) trans.sa_session.add(prt) with transaction(trans.sa_session): Loading lib/galaxy/managers/workflows.py +3 −3 Original line number Diff line number Diff line Loading @@ -1416,7 +1416,7 @@ class WorkflowContentsManager(UsesAnnotations): If `allow_upgrade`, the workflow and sub-workflows might use updated tool versions when refactoring. """ annotation_str = "" tag_str = "" tags_list = [] annotation_owner = None if stored is not None: if stored.id: Loading @@ -1427,7 +1427,7 @@ class WorkflowContentsManager(UsesAnnotations): or self.get_item_annotation_str(trans.sa_session, annotation_owner, stored) or "" ) tag_str = stored.make_tag_string_list() tags_list = stored.make_tag_string_list() else: # dry run with flushed workflow objects, just use the annotation annotations = stored.annotations Loading @@ -1440,7 +1440,7 @@ class WorkflowContentsManager(UsesAnnotations): data["format-version"] = "0.1" data["name"] = workflow.name data["annotation"] = annotation_str data["tags"] = tag_str data["tags"] = tags_list if workflow.uuid is not None: data["uuid"] = str(workflow.uuid) steps: Dict[int, Dict[str, Any]] = {} Loading lib/galaxy/workflow/refactor/execute.py +3 −0 Original line number Diff line number Diff line Loading @@ -531,6 +531,9 @@ class WorkflowRefactorExecutor: if upgrade_input["name"] == input_name: matching_input = upgrade_input break elif step.when_expression and f"inputs.{input_name}" in step.when_expression: # TODO: eventually track step inputs more formally matching_input = upgrade_input # In the future check parameter type, format, mapping status... if matching_input is None: Loading Loading
lib/galaxy/job_execution/setup.py +12 −8 Original line number Diff line number Diff line Loading @@ -213,22 +213,26 @@ class JobIO(Dictifiable): for value in ds.metadata.values(): if isinstance(value, MetadataFile): filenames.append(value.get_file_name()) if ds.dataset and ds.dataset.extra_files_path_exists(): filenames.append(ds.dataset.extra_files_path) return filenames def get_input_fnames(self) -> List[str]: def get_input_datasets(self) -> List[DatasetInstance]: job = self.job return [ da.dataset for da in job.input_datasets + job.input_library_datasets if da.dataset ] # da is JobToInputDatasetAssociation object def get_input_fnames(self) -> List[str]: filenames = [] for da in job.input_datasets + job.input_library_datasets: # da is JobToInputDatasetAssociation object if da.dataset: filenames.extend(self.get_input_dataset_fnames(da.dataset)) for ds in self.get_input_datasets(): filenames.extend(self.get_input_dataset_fnames(ds)) return filenames def get_input_paths(self) -> List[DatasetPath]: job = self.job paths = [] for da in job.input_datasets + job.input_library_datasets: # da is JobToInputDatasetAssociation object if da.dataset: paths.append(self.get_input_path(da.dataset)) for ds in self.get_input_datasets(): paths.append(self.get_input_path(ds)) return paths def get_input_path(self, dataset: DatasetInstance) -> DatasetPath: Loading
lib/galaxy/jobs/splitters/basic.py +1 −1 Original line number Diff line number Diff line Loading @@ -13,7 +13,7 @@ def set_basic_defaults(job_wrapper): def do_split(job_wrapper): if len(job_wrapper.job_io.get_input_fnames()) > 1 or len(job_wrapper.job_io.get_output_fnames()) > 1: if len(job_wrapper.job_io.get_input_datasets()) > 1 or len(job_wrapper.job_io.get_output_fnames()) > 1: log.error("The basic splitter is not capable of handling jobs with multiple inputs or outputs.") raise Exception("Job Splitting Failed, the basic splitter only handles tools with one input and one output") # add in the missing information for splitting the one input and merging the one output Loading
lib/galaxy/managers/users.py +1 −1 Original line number Diff line number Diff line Loading @@ -616,7 +616,7 @@ class UserManager(base.ModelManager, deletable.PurgableManagerMixin): reset_user = get_user_by_email(trans.sa_session, email, self.app.model.User) if not reset_user and email != email.lower(): reset_user = self._get_user_by_email_case_insensitive(trans.sa_session, email) if reset_user: if reset_user and not reset_user.deleted: prt = self.app.model.PasswordResetToken(reset_user) trans.sa_session.add(prt) with transaction(trans.sa_session): Loading
lib/galaxy/managers/workflows.py +3 −3 Original line number Diff line number Diff line Loading @@ -1416,7 +1416,7 @@ class WorkflowContentsManager(UsesAnnotations): If `allow_upgrade`, the workflow and sub-workflows might use updated tool versions when refactoring. """ annotation_str = "" tag_str = "" tags_list = [] annotation_owner = None if stored is not None: if stored.id: Loading @@ -1427,7 +1427,7 @@ class WorkflowContentsManager(UsesAnnotations): or self.get_item_annotation_str(trans.sa_session, annotation_owner, stored) or "" ) tag_str = stored.make_tag_string_list() tags_list = stored.make_tag_string_list() else: # dry run with flushed workflow objects, just use the annotation annotations = stored.annotations Loading @@ -1440,7 +1440,7 @@ class WorkflowContentsManager(UsesAnnotations): data["format-version"] = "0.1" data["name"] = workflow.name data["annotation"] = annotation_str data["tags"] = tag_str data["tags"] = tags_list if workflow.uuid is not None: data["uuid"] = str(workflow.uuid) steps: Dict[int, Dict[str, Any]] = {} Loading
lib/galaxy/workflow/refactor/execute.py +3 −0 Original line number Diff line number Diff line Loading @@ -531,6 +531,9 @@ class WorkflowRefactorExecutor: if upgrade_input["name"] == input_name: matching_input = upgrade_input break elif step.when_expression and f"inputs.{input_name}" in step.when_expression: # TODO: eventually track step inputs more formally matching_input = upgrade_input # In the future check parameter type, format, mapping status... if matching_input is None: Loading