Unverified Commit 65e702b5 authored by John Chilton's avatar John Chilton Committed by GitHub
Browse files

Merge pull request #12088 from mvdbeek/fix_fetch_data_dicovered_output_metadata_setting

[21.01] Fix set metadata for primary discovered outputs
parents ae3c49a0 bd5596f3
Loading
Loading
Loading
Loading
+30 −16
Original line number Diff line number Diff line
@@ -139,6 +139,7 @@ def set_metadata_portable():
    version_string = ""

    export_store = None
    final_job_state = Job.states.OK
    if extended_metadata_collection:
        tool_dict = metadata_params["tool"]
        stdio_exit_code_dicts, stdio_regex_dicts = tool_dict["stdio_exit_codes"], tool_dict["stdio_regexes"]
@@ -185,7 +186,7 @@ def set_metadata_portable():
        if os.path.exists(COMMAND_VERSION_FILENAME):
            version_string = open(COMMAND_VERSION_FILENAME).read()

        job_context = ExpressionContext(dict(stdout=tool_stdout, stderr=tool_stderr))
        expression_context = ExpressionContext(dict(stdout=tool_stdout, stderr=tool_stderr))

        # Load outputs.
        export_store = store.DirectoryModelExportStore('metadata/outputs_populated', serialize_dataset_objects=True, for_edit=True, strip_metadata_files=False, serialize_jobs=False)
@@ -195,6 +196,27 @@ def set_metadata_portable():
        # Remove in 21.09, this should only happen for jobs that started on <= 20.09 and finish now
        import_model_store = None

    job_context = SessionlessJobContext(
        metadata_params,
        tool_provided_metadata,
        object_store,
        export_store,
        import_model_store,
        os.path.join(tool_job_working_directory, "working"),
        final_job_state=final_job_state,
    )

    unnamed_id_to_path = {}
    for unnamed_output_dict in job_context.tool_provided_metadata.get_unnamed_outputs():
        destination = unnamed_output_dict["destination"]
        elements = unnamed_output_dict["elements"]
        destination_type = destination["type"]
        if destination_type == 'hdas':
            for element in elements:
                filename = element.get('filename')
                if filename:
                    unnamed_id_to_path[element['object_id']] = os.path.join(job_context.job_working_directory, filename)

    for output_name, output_dict in outputs.items():
        dataset_instance_id = output_dict["id"]
        klass = getattr(galaxy.model, output_dict.get('model_class', 'HistoryDatasetAssociation'))
@@ -219,7 +241,7 @@ def set_metadata_portable():
        # Same block as below...
        set_meta_kwds = stringify_dictionary_keys(json.load(open(filename_kwds)))  # load kwds; need to ensure our keywords are not unicode
        try:
            dataset.dataset.external_filename = dataset_filename_override
            dataset.dataset.external_filename = unnamed_id_to_path.get(dataset_instance_id, dataset_filename_override)
            store_by = output_dict.get("object_store_store_by", legacy_object_store_store_by)
            extra_files_dir_name = "dataset_%s_files" % getattr(dataset.dataset, store_by)
            files_path = os.path.abspath(os.path.join(tool_job_working_directory, "working", extra_files_dir_name))
@@ -235,14 +257,17 @@ def set_metadata_portable():
                setattr(dataset.metadata, metadata_name, metadata_file_override)
            if output_dict.get("validate", False):
                set_validated_state(dataset)
            if dataset_instance_id not in unnamed_id_to_path:
                # We're going to run through set_metadata in collect_dynamic_outputs with more contextual metadata,
                # so skip set_meta here.
                set_meta(dataset, file_dict)

            if extended_metadata_collection:
                meta = tool_provided_metadata.get_dataset_meta(output_name, dataset.dataset.id, dataset.dataset.uuid)
                if meta:
                    context = ExpressionContext(meta, job_context)
                    context = ExpressionContext(meta, expression_context)
                else:
                    context = job_context
                    context = expression_context

                # Lazy and unattached
                # if getattr(dataset, "hidden_beneath_collection_instance", None):
@@ -301,17 +326,6 @@ def set_metadata_portable():

    if extended_metadata_collection:
        # discover extra outputs...

        job_context = SessionlessJobContext(
            metadata_params,
            tool_provided_metadata,
            object_store,
            export_store,
            import_model_store,
            os.path.join(tool_job_working_directory, "working"),
            final_job_state=final_job_state,
        )

        output_collections = {}
        for name, output_collection in metadata_params["output_collections"].items():
            output_collections[name] = import_model_store.sa_session.query(HistoryDatasetCollectionAssociation).find(output_collection["id"])
+22 −0
Original line number Diff line number Diff line
@@ -322,6 +322,28 @@ class ToolsUploadTestCase(ApiTestCase):
        assert output0["state"] == "ok"
        assert output1["state"] == "error"

    @uses_test_history(require_new=False)
    def test_fetch_bam_file_from_url_with_extension_set(self, history_id):
        destination = {"type": "hdas"}
        targets = [{
            "destination": destination,
            "items": [
                {
                    "src": "url",
                    "url": "https://raw.githubusercontent.com/galaxyproject/galaxy/dev/test-data/1.bam",
                    "ext": "bam"
                },
            ]
        }]
        payload = {
            "history_id": history_id,
            "targets": json.dumps(targets),
        }
        fetch_response = self.dataset_populator.fetch(payload)
        self._assert_status_code_is(fetch_response, 200)
        outputs = fetch_response.json()["outputs"]
        self.dataset_populator.get_history_dataset_details(history_id, dataset=outputs[0], assert_ok=True)

    @skip_without_datatype("velvet")
    def test_composite_datatype(self):
        with self.dataset_populator.test_history() as history_id: