Unverified Commit 40594831 authored by Matt Pryor's avatar Matt Pryor Committed by GitHub
Browse files

Merge pull request #167 from ESGF/future-architecture

Logging changes
parents 332bfd10 30a92ac6
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -95,6 +95,13 @@ build:jre:
    CONTEXT_DIR: $CI_PROJECT_DIR/images/jre
  needs: ["build:base"]

build:logstash:
  extends: .docker-build
  stage: build-2
  variables:
    CONTEXT_DIR: $CI_PROJECT_DIR/images/logstash
  needs: ["build:base"]

build:rsync:
  extends: .docker-build
  stage: build-2
+10 −8
Original line number Diff line number Diff line
@@ -11,17 +11,19 @@ Vagrant.configure(2) do |config|
  config.vm.network :private_network, ip: "192.168.100.100"

  # Set some virtualbox flags to improve time synchronisation between host and guest
  config.vm.provider :virtualbox do |virtualbox|
  config.vm.provider :virtualbox do |v|
    # 512MB RAM is not really enough
    v.memory = 4096
    # sync time every 10 seconds
    virtualbox.customize [ "guestproperty", "set", :id, "/VirtualBox/GuestAdd/VBoxService/--timesync-interval", 10000 ]
    v.customize [ "guestproperty", "set", :id, "/VirtualBox/GuestAdd/VBoxService/--timesync-interval", 10000 ]
    # adjustments if drift > 100 ms
    virtualbox.customize [ "guestproperty", "set", :id, "/VirtualBox/GuestAdd/VBoxService/--timesync-min-adjust", 100 ]
    v.customize [ "guestproperty", "set", :id, "/VirtualBox/GuestAdd/VBoxService/--timesync-min-adjust", 100 ]
    # sync time on restore
    virtualbox.customize [ "guestproperty", "set", :id, "/VirtualBox/GuestAdd/VBoxService/--timesync-set-on-restore", 1 ]
    v.customize [ "guestproperty", "set", :id, "/VirtualBox/GuestAdd/VBoxService/--timesync-set-on-restore", 1 ]
    # sync time on start
    virtualbox.customize [ "guestproperty", "set", :id, "/VirtualBox/GuestAdd/VBoxService/--timesync-set-start", 1 ]
    v.customize [ "guestproperty", "set", :id, "/VirtualBox/GuestAdd/VBoxService/--timesync-set-start", 1 ]
    # at 1 second drift, the time will be set and not "smoothly" adjusted
    virtualbox.customize [ "guestproperty", "set", :id, "/VirtualBox/GuestAdd/VBoxService/--timesync-set-threshold", 1000 ]
    v.customize [ "guestproperty", "set", :id, "/VirtualBox/GuestAdd/VBoxService/--timesync-set-threshold", 1000 ]
  end

  config.vm.provision :shell, inline: <<-SHELL
@@ -52,7 +54,7 @@ Vagrant.configure(2) do |config|
      {
        name: "CORDEX",
        path: "esg_cordex",
        location: "/test_data/group_workspaces/jasmin2/cp4cds1/data/c3s-cordex"
        location: "/test_data/group_workspaces/jasmin2/cp4cds1/vol1/data/c3s-cordex"
      }
    ]
    solr_replicas = [
@@ -66,7 +68,7 @@ Vagrant.configure(2) do |config|
      "index" => ["default"],
      "all:vars" => {
        "hostname" => "192.168.100.100.nip.io",
        "image_tag" => "issue-115-esg-search",
        "image_tag" => "future-architecture",
      },
      "data:vars" => {
        "data_mounts" => "#{data_mounts.to_json}",
+36 −0
Original line number Diff line number Diff line
@@ -50,6 +50,7 @@ data_datasets: []
#
# If a special user or groups are required to access data, they can be set here, which
# will result in the data node containers launching with the specified UID and GIDs
#
# By default, use the ESGF user
data_security_context_user: "1000"
# Due to permissions set inside the container, the user *must* belong to group 1000
@@ -73,3 +74,38 @@ fileserver_image_prefix: "{{ image_prefix }}"
fileserver_image_tag: "{{ image_tag }}"
fileserver_image_pull: "{{ image_pull }}"
fileserver_image_repository: nginx

# Configuration for the logstash stats exporter
logstash_enabled: false
# Settings for the logstash stats exporter image
logstash_image_prefix: "{{ image_prefix }}"
logstash_image_tag: "{{ image_tag }}"
logstash_image_pull: "{{ image_pull }}"
logstash_image_repository: logstash
# Settings for the stats server
logstash_stats_server: proxy02.cmcc.it
logstash_stats_port: 5044
logstash_stats_certificate: |
  -----BEGIN CERTIFICATE-----
  MIIDyjCCArICCQDSgl2omUxgLTANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMC
  SVQxDjAMBgNVBAgMBUl0YWx5MQ4wDAYDVQQHDAVMZWNjZTEYMBYGA1UECgwPQ01D
  QyBGb3VuZGF0aW9uMRgwFgYDVQQLDA9DTUNDIEZvdW5kYXRpb24xGDAWBgNVBAMM
  D3Byb3h5MDIuY21jYy5pdDEpMCcGCSqGSIb3DQEJARYac3RhdGlzdGljcy1zdXBw
  b3J0QGNtY2MuaXQwHhcNMjEwMTE4MTQ0MzUxWhcNMjIwMTE4MTQ0MzUxWjCBpjEL
  MAkGA1UEBhMCSVQxDjAMBgNVBAgMBUl0YWx5MQ4wDAYDVQQHDAVMZWNjZTEYMBYG
  A1UECgwPQ01DQyBGb3VuZGF0aW9uMRgwFgYDVQQLDA9DTUNDIEZvdW5kYXRpb24x
  GDAWBgNVBAMMD3Byb3h5MDIuY21jYy5pdDEpMCcGCSqGSIb3DQEJARYac3RhdGlz
  dGljcy1zdXBwb3J0QGNtY2MuaXQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
  AoIBAQDgvKp/26Raq/BNtycxGk9mWfC/o4XO6GiFk7vSB2WrinMWR60vwwoFxx9u
  cAcm4YsagvC/RIJ7uRZuNDMZloPhA/Eb8qzA9lvLi9msf3W8a5GPKI+KWQ8xoRzt
  aOGCApsFr4Kd2KYJNx/KvcR8aVOOTpHR39aA9ne8L25HpSA96Uw8HwX99yO5Dlh7
  VDC6u8+j3s57jxmGdpCq9bKJzpClnueLjy83L3bnumEhLN1j7rhSUANuBBoPyibS
  T3ngKZJZ/ZdEb0TzAwltv11XvepFcy1SAMoOjcFhfpD1EIIGnOdefvLc0DiB6mD5
  zwTWDlAWANQmZDdU1YsxIsdW8OHfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAM/u
  gs9qOKzVLiaSqIc2N+++x9wsPnzr0sSXR5liAtpG2PbnCcl9Q9ajsFTJOloaAYRC
  rXX7GaeLhgjZvtP45D6UDJB0NwfAam5udH0wen8DM2eG9y/0rvSyfmg4mARZcnVE
  eXfihMNBiA/+01NrPdNCdBfeLL9aAu2uigek6Uyyk5a7yXck0rJOpFTKYX3XS2q6
  i37PvE8vpwCoFJ1qaxq/i9pQQfCWUnY+Dt+Odn7+xZn2B1M0GOTfgLbLhKH3i8jP
  pi/9dHkTcvpE8o52xb8CbYFfL2cXp5uGqwxB2grNJFIgxfaVXZbL2NxSoGlN5scU
  82kdgbLq+v/uzhNxT7E=
  -----END CERTIFICATE-----
+36 −1
Original line number Diff line number Diff line
@@ -14,6 +14,36 @@
    src: fileserver.conf.j2
    dest: /esg/config/fileserver/fileserver.conf

# If the access log exporter is enabled, we use a FIFO pipe for the Nginx access log
# This will be followed by the exporter to get the access logs
# Unfortunately, in order to get the container to pick this up we have to splat
# the whole logs directory, so we also need to set up the other logs as symlinks to stdout/err
- name: Set up fileserver logs directory
  block:
    - name: Ensure fileserver logs directory exists
      file:
        path: /esg/logs/fileserver
        state: directory

    - name: Link error log to stderr
      file:
        path: /esg/logs/fileserver/error.log
        state: link
        src: /dev/stderr
        force: yes

    - name: Make the access log pipe
      command: mkfifo /esg/logs/fileserver/access.log
      args:
        creates: /esg/logs/fileserver/access.log

    - name: Transfer ownership of logs to security context user
      file:
        path: /esg/logs/fileserver
        recurse: yes
        owner: "{{ data_security_context_user }}"
  when: logstash_enabled | bool

- name: Start fileserver container
  docker_container:
    name: fileserver
@@ -33,9 +63,14 @@
    volumes: >-
      [
        "/esg/config/fileserver:/etc/nginx/conf.d:ro",
        # If logstash is enabled, mount the log directory
        {% if logstash_enabled %}
        "/esg/logs/fileserver:/var/log/nginx",
        {% endif %}
        # Then the data mounts
        {% for mount in data_mounts %}
        "{{ mount.host_path }}:{{ mount.mount_path }}:ro",
        {% endfor %}
      ]
    state: started
    restart: yes
    recreate: yes
+46 −0
Original line number Diff line number Diff line
---

- name: Create Docker network
  docker_network:
    name: esgf

- name: Ensure logstash config directories exist
  file:
    path: /esg/config/logstash/{{ item }}
    state: directory
  loop:
    - certs
    - pipelines

- name: Install certificate for stats server
  copy:
    content: "{{ logstash_stats_certificate }}"
    dest: /esg/config/logstash/certs/lumberjack.cert

- name: Install stats logstash configuration
  template:
    src: stats.conf.j2
    dest: /esg/config/logstash/pipelines/stats.conf

# Configure logstash to follow the THREDDS and fileserver logs
- name: Start logstash container
  docker_container:
    name: logstash
    image: "{{ logstash_image_prefix }}/{{ logstash_image_repository }}:{{ logstash_image_tag }}"
    pull: "{{ logstash_image_pull }}"
    detach: yes
    restart_policy: unless-stopped
    networks:
      - name: esgf
    networks_cli_compatible: yes
    user: "{{ data_security_context_user }}"
    groups: "{{ data_security_context_groups }}"
    volumes:
      # Logstash certificates
      - "/esg/config/logstash/certs:/etc/logstash/certs:ro"
      # Logstash configuration
      - "/esg/config/logstash/pipelines:/etc/logstash/conf.d:ro"
      # Log directory
      - "/esg/logs:/esg/logs:ro"
    state: started
    recreate: yes
Loading