Unverified Commit 368eeaa1 authored by Marius van den Beek's avatar Marius van den Beek Committed by GitHub
Browse files

Merge pull request #18462 from mvdbeek/add_input_extra_files_path

[24.0] Add input extra files to `get_input_fnames`
parents 6b8c7f75 7eaa54a0
Loading
Loading
Loading
Loading
+12 −8
Original line number Diff line number Diff line
@@ -213,22 +213,26 @@ class JobIO(Dictifiable):
        for value in ds.metadata.values():
            if isinstance(value, MetadataFile):
                filenames.append(value.get_file_name())
        if ds.dataset and ds.dataset.extra_files_path_exists():
            filenames.append(ds.dataset.extra_files_path)
        return filenames

    def get_input_fnames(self) -> List[str]:
    def get_input_datasets(self) -> List[DatasetInstance]:
        job = self.job
        return [
            da.dataset for da in job.input_datasets + job.input_library_datasets if da.dataset
        ]  # da is JobToInputDatasetAssociation object

    def get_input_fnames(self) -> List[str]:
        filenames = []
        for da in job.input_datasets + job.input_library_datasets:  # da is JobToInputDatasetAssociation object
            if da.dataset:
                filenames.extend(self.get_input_dataset_fnames(da.dataset))
        for ds in self.get_input_datasets():
            filenames.extend(self.get_input_dataset_fnames(ds))
        return filenames

    def get_input_paths(self) -> List[DatasetPath]:
        job = self.job
        paths = []
        for da in job.input_datasets + job.input_library_datasets:  # da is JobToInputDatasetAssociation object
            if da.dataset:
                paths.append(self.get_input_path(da.dataset))
        for ds in self.get_input_datasets():
            paths.append(self.get_input_path(ds))
        return paths

    def get_input_path(self, dataset: DatasetInstance) -> DatasetPath:
+1 −1
Original line number Diff line number Diff line
@@ -13,7 +13,7 @@ def set_basic_defaults(job_wrapper):


def do_split(job_wrapper):
    if len(job_wrapper.job_io.get_input_fnames()) > 1 or len(job_wrapper.job_io.get_output_fnames()) > 1:
    if len(job_wrapper.job_io.get_input_datasets()) > 1 or len(job_wrapper.job_io.get_output_fnames()) > 1:
        log.error("The basic splitter is not capable of handling jobs with multiple inputs or outputs.")
        raise Exception("Job Splitting Failed, the basic splitter only handles tools with one input and one output")
    # add in the missing information for splitting the one input and merging the one output