Commit e3af76e3 authored by Cage, Gregory's avatar Cage, Gregory
Browse files

Merge branch 'update-dev-with-upstream-interactive-tool-updates' into 'dev'

Cherry-pick interactive tool updates from upstream Galaxy

See merge request !70
parents de5ebecd 1ac8ec77
Loading
Loading
Loading
Loading
Loading
+13 −2
Original line number Diff line number Diff line
@@ -2,18 +2,29 @@ gravity:
  gx_it_proxy:
    enable: true
    port: 4002

  #handlers:
  #  handler:
  #    processes: 3
  #    pools:
  #      - job-handlers
  #      - workflow-schedulers

galaxy:
  interactivetools_enable: true
  interactivetools_map: database/interactivetools_map.sqlite

  # outputs_to_working_directory will provide you with a better level of isolation. It is highly recommended to set
  # this parameter with InteractiveTools.
  outputs_to_working_directory: true
  interactivetools_map: database/interactivetools_map.sqlite

  # `galaxy_infrastructure_url` needs to be reachable from IT containers.
  # For local development you can map arbitrary hostnames. See `job_conf.xml.interactivetools`
  # For local development you can map arbitrary hostnames. See `job_conf.yml.interactivetools`
  # for an example.
  # In the local development case you should use the `http` protocol (e.g http://localhost:8080) to access
  # your Galaxy, so saving notebooks doesn't fail due to invalid certificates.
  galaxy_infrastructure_url: http://localhost:8080

  # Do not set the following 2 options if you are using an upstream proxy server like nginx
  interactivetools_upstream_proxy: false
  interactivetools_proxy_host: localhost:4002
+58 −0
Original line number Diff line number Diff line
## A sample job config for InteractiveTools using local runner. ##

runners:
  local:
    load: galaxy.jobs.runners.local:LocalJobRunner
    workers: 4

# Uncomment if dynamic handlers are defined in "gravity:handlers" section in galaxy.yml
#
#handling:
#  assign:
#    - db-skip-locked

execution:
  default: docker_dispatch
  environments:
    local:
      runner: local

    docker_local:
      runner: local
      docker_enabled: true
      #docker_volumes: $defaults,/mnt/galaxyData/libraries:ro,/mnt/galaxyData/indices:ro
      #docker_volumes_from: parent_container_name
      #docker_memory: 24G
      #docker_sudo: false
      #docker_sudo_cmd: /usr/bin/sudo -extra_param
      #docker_net: bridge
      #docker_auto_rm: true
      #docker_set_user: $UID
      docker_set_user:

      # InteractiveTools do need real hostnames or URLs to work - simply specifying IPs will not work.
      # If you develop interactive tools on your 'localhost' and don't have a proper domain name
      # you need to tell all Docker containers a hostname where Galaxy is running.
      # This can be done via the add-host parameter during the `docker run` command.
      # 'localhost' here is an arbitrary hostname that matches the IP address of your
      # Galaxy host. Make sure this hostname ('localhost') is also set in your galaxy.yml file, e.g.
      # `galaxy_infrastructure_url: http://localhost:8080`.
      #docker_run_extra_arguments: add-host localhost:host-gateway

      #docker_cmd: /usr/local/custom_docker/docker
      #docker_host:
      #docker_container_id_override: busybox:ubuntu-14.04
      #docker_default_container_id: busybox:ubuntu-14.04
      #require_container: true
      #container_monitor: true
      #container_monitor_result: file
      #container_monitor_command: python /path/to/galaxy/lib/galaxy_ext/container_monitor/monitor.py
      #container_monitor_get_ip_method: null
      #container_resolvers_config_file: null
      #container_resolvers:

    docker_dispatch:
      runner: dynamic
      type: docker_dispatch
      docker_destination_id: docker_local
      default_destination_id: local
 No newline at end of file
+38 −0
Original line number Diff line number Diff line
## A sample job config for InteractiveTools using local runner, configured for Podman ##

runners:
  local:
    load: galaxy.jobs.runners.local:LocalJobRunner
    workers: 4

# Uncomment if dynamic handlers are defined in "gravity:handlers" section in galaxy.yml
#
#handling:
#  assign:
#    - db-skip-locked

execution:
  default: docker_dispatch
  environments:
    local:
      runner: local

    docker_local:
      runner: local
      docker_enabled: true

      docker_set_user:

      # For containers running as root (on the inside)
      docker_run_extra_arguments: --security-opt label=disable
      # Should work for containers with non-root user (on the inside)
      #docker_run_extra_arguments:  --userns=keep-id --security-opt label=disable

      # Change to home directory of the galaxy user, not the directory of the galaxy installation
      docker_cmd: HOME="/home/galaxy"; podman

    docker_dispatch:
      runner: dynamic
      type: docker_dispatch
      docker_destination_id: docker_local
      default_destination_id: local
 No newline at end of file
+0 −12
Original line number Diff line number Diff line
@@ -1962,18 +1962,6 @@
:Type: str


~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``interactivetools_shorten_url``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

:Description:
    Shorten the uuid portion of the subdomain or path for interactive
    tools. Especially useful for avoiding the need for wildcard
    certificates by keeping subdomain under 63 chars
:Default: ``false``
:Type: bool


~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``retry_interactivetool_metadata_internally``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+2 −10
Original line number Diff line number Diff line
@@ -376,16 +376,8 @@ gravity:
```

In this example 4 processes will be started in total:
3 processes will act as job handlers and workflow schedulers, and one process will be dedicated to handling jobs for the `special` tag only. With the `job_conf.xml` configuration above these would be jobs created by the `test1` tool.
You can omit the `pools` argument, this will then default to:

```yaml
        ...
        pools:
          - job-handlers
          - workflow-schedulers
        ...
```
3 processes will act as job handlers and workflow schedulers, and one process will be dedicated to handling jobs for the 
`special` tag only. With the `job_conf.xml` configuration above these would be jobs created by the `test1` tool.

If you omit the `processes` argument this will default to a single process.
You can further customize the handler names using the `name_template` section,
Loading