Unverified Commit 13f9977d authored by mvdbeek's avatar mvdbeek
Browse files

Merge branch 'release_24.0' into release_24.1

parents e1599f3d 31c0eba7
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -286,7 +286,7 @@ class JobManager:

    def stop(self, job, message=None):
        if not job.finished:
            job.mark_deleted(self.app.config.track_jobs_in_database)
            job.mark_deleted(self.app.config.track_jobs_in_database, message)
            session = self.app.model.session
            with transaction(session):
                session.commit()
+15 −8
Original line number Diff line number Diff line
@@ -1880,7 +1880,7 @@ class Job(Base, JobLike, UsesCreateAndUpdateTime, Dictifiable, Serializable):
        else:
            self.state = Job.states.STOPPED

    def mark_deleted(self, track_jobs_in_database=False):
    def mark_deleted(self, track_jobs_in_database=False, message=None):
        """
        Mark this job as deleted, and mark any output datasets as discarded.
        """
@@ -1891,7 +1891,8 @@ class Job(Base, JobLike, UsesCreateAndUpdateTime, Dictifiable, Serializable):
            self.state = Job.states.DELETING
        else:
            self.state = Job.states.DELETED
        self.info = "Job output deleted by user before job completed."
        info = message or "Job output deleted by user before job completed."
        self.info = info
        for jtoda in self.output_datasets:
            output_hda = jtoda.dataset
            output_hda.deleted = True
@@ -1901,7 +1902,7 @@ class Job(Base, JobLike, UsesCreateAndUpdateTime, Dictifiable, Serializable):
                shared_hda.deleted = True
                shared_hda.blurb = "deleted"
                shared_hda.peek = "Job deleted"
                shared_hda.info = "Job output deleted by user before job completed"
                shared_hda.info = info

    def mark_failed(self, info="Job execution failed", blurb=None, peek=None):
        """
@@ -4905,11 +4906,16 @@ class DatasetInstance(RepresentById, UsesCreateAndUpdateTime, _HasTable):
    def display_info(self):
        return self.datatype.display_info(self)

    def get_converted_files_by_type(self, file_type):
    def get_converted_files_by_type(self, file_type, include_errored=False):
        for assoc in self.implicitly_converted_datasets:
            if not assoc.deleted and assoc.type == file_type:
                item = assoc.dataset or assoc.dataset_ldda
                if not item.deleted and item.state in Dataset.valid_input_states:
                valid_states = (
                    (Dataset.states.ERROR, *Dataset.valid_input_states)
                    if include_errored
                    else Dataset.valid_input_states
                )
                if not item.deleted and item.state in valid_states:
                    return item
        return None

@@ -4924,7 +4930,7 @@ class DatasetInstance(RepresentById, UsesCreateAndUpdateTime, _HasTable):
            depends_list = []
        return {dep: self.get_converted_dataset(trans, dep) for dep in depends_list}

    def get_converted_dataset(self, trans, target_ext, target_context=None, history=None):
    def get_converted_dataset(self, trans, target_ext, target_context=None, history=None, include_errored=False):
        """
        Return converted dataset(s) if they exist, along with a dict of dependencies.
        If not converted yet, do so and return None (the first time). If unconvertible, raise exception.
@@ -4936,7 +4942,7 @@ class DatasetInstance(RepresentById, UsesCreateAndUpdateTime, _HasTable):
        converted_dataset = self.get_metadata_dataset(target_ext)
        if converted_dataset:
            return converted_dataset
        converted_dataset = self.get_converted_files_by_type(target_ext)
        converted_dataset = self.get_converted_files_by_type(target_ext, include_errored=include_errored)
        if converted_dataset:
            return converted_dataset
        deps = {}
@@ -5160,7 +5166,8 @@ class DatasetInstance(RepresentById, UsesCreateAndUpdateTime, _HasTable):

        # Get converted dataset; this will start the conversion if necessary.
        try:
            converted_dataset = self.get_converted_dataset(trans, target_type)
            # Include errored datasets here, we let user chose to retry or view error
            converted_dataset = self.get_converted_dataset(trans, target_type, include_errored=True)
        except NoConverterException:
            return self.conversion_messages.NO_CONVERTER
        except ConverterDependencyException as dep_error:
+18 −0
Original line number Diff line number Diff line
@@ -1062,6 +1062,24 @@ steps:
        self._assert_status_code_is(empty_search_response, 200)
        assert len(empty_search_response.json()) == 0

    @pytest.mark.require_new_history
    def test_delete_job_with_message(self, history_id):
        input_dataset_id = self.__history_with_ok_dataset(history_id)
        inputs = json.dumps({"input1": {"src": "hda", "id": input_dataset_id}})
        search_payload = self._search_payload(history_id=history_id, tool_id="cat1", inputs=inputs)
        # create a job
        tool_response = self._post("tools", data=search_payload).json()
        job_id = tool_response["jobs"][0]["id"]
        output_dataset_id = tool_response["outputs"][0]["id"]
        # delete the job with message
        expected_message = "test message"
        delete_job_response = self._delete(f"jobs/{job_id}", data={"message": expected_message}, json=True)
        self._assert_status_code_is(delete_job_response, 200)
        # Check the output dataset is deleted and the info field contains the message
        dataset_details = self._get(f"histories/{history_id}/contents/{output_dataset_id}").json()
        assert dataset_details["deleted"] is True
        assert dataset_details["misc_info"] == expected_message

    @pytest.mark.require_new_history
    def test_destination_params(self, history_id):
        dataset_id = self.__history_with_ok_dataset(history_id)