diff --git a/aws_jira_dc_node.yml b/aws_jira_dc_node.yml index 41b7be6..b0470fb 100644 --- a/aws_jira_dc_node.yml +++ b/aws_jira_dc_node.yml @@ -24,5 +24,6 @@ - role: product_common - role: product_install - role: database_init + - role: restore_backups - role: jira_config - role: product_startup diff --git a/bin/install-ansible b/bin/install-ansible index b515cdf..196562e 100755 --- a/bin/install-ansible +++ b/bin/install-ansible @@ -6,8 +6,12 @@ source /etc/os-release if [[ $ID = "amzn" ]]; then yum install -y \ python3-devel \ - python3-pip + python3-pip \ + python2-boto3 \ + python2-botocore + else + # FIXME: Currently assumes Debian-based apt-get update && \ apt-get install -y \ python3-dev \ diff --git a/bitbucket-pipelines.yml b/bitbucket-pipelines.yml index ec993c6..d28b140 100644 --- a/bitbucket-pipelines.yml +++ b/bitbucket-pipelines.yml @@ -14,7 +14,7 @@ pipelines: - step: name: Pre Parallelization stage script: - - echo "Running tests in 28 batches" + - echo "Running tests in 29 batches" - step: name: Check if number of batches match actual number of scenarios script: @@ -251,4 +251,12 @@ pipelines: - apt-get update && ./bin/install-ansible --dev - ./bin/run-tests-in-batches --batch 28 + - step: + name: Molecule Test Batch - 29 + services: + - docker + script: + - apt-get update && ./bin/install-ansible --dev + - ./bin/run-tests-in-batches --batch 29 + diff --git a/group_vars/aws_node_local.yml b/group_vars/aws_node_local.yml index aff781d..bb380b9 100644 --- a/group_vars/aws_node_local.yml +++ b/group_vars/aws_node_local.yml @@ -132,3 +132,4 @@ atl_rds_subnet_group_name: "{{ lookup('env', 'ATL_RDS_SUBNET_GROUP_NAME') }}" atl_rds_security_group: "{{ lookup('env', 'ATL_RDS_SECURITY_GROUP') }}" atl_backup_manifest_url: "{{ lookup('env', 'ATL_BACKUP_MANIFEST_URL') }}" +atl_restore_required: "{{ atl_backup_manifest_url is defined and atl_backup_manifest_url != '' }}" diff --git a/roles/database_init/tasks/main.yml b/roles/database_init/tasks/main.yml index 99638f7..8827f99 100644 --- a/roles/database_init/tasks/main.yml +++ b/roles/database_init/tasks/main.yml @@ -1,40 +1,40 @@ --- -- name: Create application DB user - postgresql_user: - login_host: "{{ atl_db_host }}" - login_user: "{{ atl_db_root_user }}" - login_password: "{{ atl_db_root_password }}" - port: "{{ atl_db_port }}" - name: "{{ atl_jdbc_user }}" - password: "{{ atl_jdbc_password }}" - expires: 'infinity' - tags: - - new_only +- block: -- name: Update root privs for new user - postgresql_privs: - login_host: "{{ atl_db_host }}" - login_user: "{{ atl_db_root_user }}" - login_password: "{{ atl_db_root_password }}" - database: postgres - roles: "{{ atl_db_root_user }}" - objs: "{{ atl_jdbc_user }}" - type: group - tags: - - new_only + - name: Create application DB user + postgresql_user: + login_host: "{{ atl_db_host }}" + login_user: "{{ atl_db_root_user }}" + login_password: "{{ atl_db_root_password }}" + port: "{{ atl_db_port }}" + name: "{{ atl_jdbc_user }}" + password: "{{ atl_jdbc_password }}" + expires: 'infinity' + + - name: Update root privs for new user + postgresql_privs: + login_host: "{{ atl_db_host }}" + login_user: "{{ atl_db_root_user }}" + login_password: "{{ atl_db_root_password }}" + database: postgres + roles: "{{ atl_db_root_user }}" + objs: "{{ atl_jdbc_user }}" + type: group + + - name: Create new application database + postgresql_db: + login_host: "{{ atl_db_host }}" + login_user: "{{ atl_db_root_user }}" + login_password: "{{ atl_db_root_password }}" + port: "{{ atl_db_port }}" + name: "{{ atl_jdbc_db_name }}" + owner: "{{ atl_jdbc_user }}" + encoding: "{{ atl_jdbc_encoding }}" + lc_collate: "{{ atl_jdbc_collation }}" + lc_ctype: "{{ atl_jdbc_ctype }}" + template: "{{ atl_jdbc_template }}" + register: db_created -- name: Create application database - postgresql_db: - login_host: "{{ atl_db_host }}" - login_user: "{{ atl_db_root_user }}" - login_password: "{{ atl_db_root_password }}" - port: "{{ atl_db_port }}" - name: "{{ atl_jdbc_db_name }}" - owner: "{{ atl_jdbc_user }}" - encoding: "{{ atl_jdbc_encoding }}" - lc_collate: "{{ atl_jdbc_collation }}" - lc_ctype: "{{ atl_jdbc_ctype }}" - template: "{{ atl_jdbc_template }}" tags: - new_only diff --git a/roles/fetch_backups/tasks/main.yml b/roles/fetch_backups/tasks/main.yml deleted file mode 100644 index 45d7e1f..0000000 --- a/roles/fetch_backups/tasks/main.yml +++ /dev/null @@ -1,69 +0,0 @@ ---- - -# This role will attempt to fetch and load the backup manifest from a -# remote HTTP or S3 URL. On successful completion the contents of JSON -# or YAML document will be in the var `atl_backup_manifest`. - -- block: - - - name: Ensure temp directory is present - file: - path: "{{ atl_installer_temp }}" - state: directory - mode: 0750 - owner: "{{ atl_product_user }}" - group: "{{ atl_product_user }}" - changed_when: false # For Molecule idempotence check - - - name: Parse the manifest URL - set_fact: - atl_backup_manifest_url: "{{ atl_backup_manifest_url | urlsplit }}" - - - name: Extract manifest file information - set_fact: - atl_backup_manifest_bucket: "{{ atl_backup_manifest_url.hostname }}" - atl_backup_manifest_path: "{{ atl_backup_manifest_url.path }}" - atl_backup_manifest_dest: "{{ atl_installer_temp }}/{{ atl_backup_manifest_url.path | basename }}" - - - name: Fetch the manifest from S3 - aws_s3: - mode: get - bucket: "{{ atl_backup_manifest_bucket }}" - object: "{{ atl_backup_manifest_path }}" - dest: "{{ atl_backup_manifest_dest }}" - when: atl_backup_manifest_url.scheme == 's3' - - - name: Fetch the manifest from remote host - get_url: - url: "{{ atl_backup_manifest_url }}" - dest: "{{ atl_backup_manifest_dest }}" - when: atl_backup_manifest_url.scheme != 's3' - - - name: Load parameters from manifest - include_vars: - file: "{{ atl_backup_manifest_dest }}" - name: atl_backup_manifest - - - name: Define the DB and home dump destinations - set_fact: - atl_backup_db_dest: "{{ atl_installer_temp }}/{{ atl_backup_manifest.db_dump | basename }}" - atl_backup_home_dest: "{{ atl_installer_temp }}/{{ atl_backup_manifest.shared_home_dump | basename }}" - - # FIXME: Here we fetch the backups. However we may wish to stream - # these directly from S3 to the target DB/FS to avoid requiring - # disk-space for the intermediate files. - - name: Fetch DB backup from S3 - aws_s3: - mode: get - bucket: "{{ atl_backup_manifest.db_dump | urlsplit('hostname') }}" - object: "{{ atl_backup_manifest.db_dump | urlsplit('path') }}" - dest: "{{ atl_backup_db_dest }}" - - - name: Fetch Home backup from S3 - aws_s3: - mode: get - bucket: "{{ atl_backup_manifest.shared_home_dump | urlsplit('hostname') }}" - object: "{{ atl_backup_manifest.shared_home_dump | urlsplit('path') }}" - dest: "{{ atl_backup_home_dest }}" - - when: atl_backup_manifest_url is defined and atl_backup_manifest_url != '' diff --git a/roles/restore_backups/.yamllint b/roles/restore_backups/.yamllint new file mode 100644 index 0000000..a87f8ff --- /dev/null +++ b/roles/restore_backups/.yamllint @@ -0,0 +1,12 @@ +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + line-length: disable + truthy: disable + trailing-spaces: false diff --git a/roles/restore_backups/defaults/main.yml b/roles/restore_backups/defaults/main.yml new file mode 100644 index 0000000..6561c7e --- /dev/null +++ b/roles/restore_backups/defaults/main.yml @@ -0,0 +1,4 @@ +--- + +atl_backup_home_restore_canary_filename: ".slingshot_home_restore" +atl_backup_home_restore_canary_path: "{{ atl_product_home_shared }}/{{ atl_backup_home_restore_canary_filename }}" diff --git a/roles/restore_backups/molecule/default/Dockerfile.j2 b/roles/restore_backups/molecule/default/Dockerfile.j2 new file mode 100644 index 0000000..e6aa95d --- /dev/null +++ b/roles/restore_backups/molecule/default/Dockerfile.j2 @@ -0,0 +1,14 @@ +# Molecule managed + +{% if item.registry is defined %} +FROM {{ item.registry.url }}/{{ item.image }} +{% else %} +FROM {{ item.image }} +{% endif %} + +RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates && apt-get clean; \ + elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python*-dnf bash && dnf clean all; \ + elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \ + elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml && zypper clean -a; \ + elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \ + elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates && xbps-remove -O; fi diff --git a/roles/restore_backups/molecule/default/molecule.yml b/roles/restore_backups/molecule/default/molecule.yml new file mode 100644 index 0000000..7f082f6 --- /dev/null +++ b/roles/restore_backups/molecule/default/molecule.yml @@ -0,0 +1,36 @@ +--- +dependency: + name: galaxy +driver: + name: docker +lint: + name: yamllint +platforms: + - name: amazon_linux2 + image: amazonlinux:2 + groups: + - aws_node_local + ulimits: + - nofile:262144:262144 + - name: ubuntu_lts + image: ubuntu:bionic + groups: + - aws_node_local + ulimits: + - nofile:262144:262144 +provisioner: + name: ansible + options: + skip-tags: runtime_pkg + lint: + name: ansible-lint + options: + x: ["701"] + inventory: + links: + group_vars: ../../../../group_vars/ +verifier: + name: testinfra + lint: + name: flake8 + enabled: false diff --git a/roles/restore_backups/molecule/default/playbook.yml b/roles/restore_backups/molecule/default/playbook.yml new file mode 100644 index 0000000..ffd0c12 --- /dev/null +++ b/roles/restore_backups/molecule/default/playbook.yml @@ -0,0 +1,10 @@ +--- +- name: Converge + hosts: all + vars: + atl_backup_manifest_url: '' + atl_backup_home_restore_canary_path: '/tmp/canary.tmp' + + roles: + # Should be no-op + - role: restore_backups diff --git a/roles/restore_backups/molecule/default/tests/test_default.py b/roles/restore_backups/molecule/default/tests/test_default.py new file mode 100644 index 0000000..0a7276f --- /dev/null +++ b/roles/restore_backups/molecule/default/tests/test_default.py @@ -0,0 +1,10 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_no_canary_file(host): + assert not host.file('atl_backup_home_restore_canary_path').exists diff --git a/roles/restore_backups/tasks/amazon.yml b/roles/restore_backups/tasks/amazon.yml new file mode 100644 index 0000000..bf32125 --- /dev/null +++ b/roles/restore_backups/tasks/amazon.yml @@ -0,0 +1,7 @@ +--- + +# Amazon Linux 2 supplies extra packages via a special command. +- name: Enable Postgresql from 'extras' + command: amazon-linux-extras install -y "postgresql{{ postgres_version }}" + args: + creates: /usr/bin/psql diff --git a/roles/restore_backups/tasks/main.yml b/roles/restore_backups/tasks/main.yml new file mode 100644 index 0000000..4d6865f --- /dev/null +++ b/roles/restore_backups/tasks/main.yml @@ -0,0 +1,133 @@ +--- + +# This role will attempt to fetch and load the backup manifest from a +# remote S3 URL. On successful completion the contents of JSON or YAML +# document will be in the var `atl_backup_manifest`. +# +# PREREQUISITES: +# * `atl_backup_manifest_url` points at the manifest. +# * The shared home filesystem is mounted if necessary (e.g. NFS/EFS). +# * The database has been created and the variable `db_created` is +# registered with the result (i.e: `register: db_created`). +# +# NOTE: The actual DB/FS restore operations could potentially be split +# out into discrete roles, but currently that is not required. +# +# TODO: Support HTTPS with authentication. Deferred until after the +# initial testing release. + +- block: + + - name: Ensure temp directory is present + file: + path: "{{ atl_installer_temp }}" + state: directory + mode: 0750 + owner: "{{ atl_product_user }}" + group: "{{ atl_product_user }}" + changed_when: false # For Molecule idempotence check + + - name: Parse the manifest URL + set_fact: + atl_backup_manifest_url: "{{ atl_backup_manifest_url | urlsplit }}" + + - name: Extract manifest file information + set_fact: + atl_backup_manifest_bucket: "{{ atl_backup_manifest_url.hostname }}" + atl_backup_manifest_path: "{{ atl_backup_manifest_url.path }}" + atl_backup_manifest_dest: "{{ atl_installer_temp }}/{{ atl_backup_manifest_url.path | basename }}" + + - name: Fetch the manifest from S3 + aws_s3: + mode: get + overwrite: different + bucket: "{{ atl_backup_manifest_bucket }}" + object: "{{ atl_backup_manifest_path }}" + dest: "{{ atl_backup_manifest_dest }}" + when: atl_backup_manifest_url.scheme == 's3' + + - name: Load parameters from manifest + include_vars: + file: "{{ atl_backup_manifest_dest }}" + name: atl_backup_manifest + + - name: Define the DB and home dump destinations + set_fact: + # FIXME: The manifest format is still undecided so the + # following usages will need to be updated once it settles.. + atl_backup_id: "{{ atl_backup_manifest.name }}" + atl_backup_db_dest: "{{ atl_installer_temp }}/{{ atl_backup_manifest.artifacts.db.location.value | basename }}" + atl_backup_home_dest: "{{ atl_installer_temp }}/{{ atl_backup_manifest.artifacts.sharedHome.location.value | basename }}" + + # FIXME: Here we fetch the backups. However we may wish to stream + # these directly from S3 to the target DB/FS to avoid requiring + # disk-space for the intermediate files. + - name: Fetch DB backup from S3 + aws_s3: + mode: get + overwrite: different + bucket: "{{ atl_backup_manifest.artifacts.db.location.value | urlsplit('hostname') }}" + object: "{{ atl_backup_manifest.artifacts.db.location.value | urlsplit('path') }}" + dest: "{{ atl_backup_db_dest }}" + + - name: Fetch Home backup from S3 + aws_s3: + mode: get + overwrite: different + bucket: "{{ atl_backup_manifest.artifacts.sharedHome.location.value | urlsplit('hostname') }}" + object: "{{ atl_backup_manifest.artifacts.sharedHome.location.value | urlsplit('path') }}" + dest: "{{ atl_backup_home_dest }}" + + - name: Install distro-specific restore support packages + include_tasks: "{{ ansible_distribution|lower }}.yml" + + + - name: Restore application database + postgresql_db: + login_host: "{{ atl_db_host }}" + login_user: "{{ atl_db_root_user }}" + login_password: "{{ atl_db_root_password }}" + port: "{{ atl_db_port }}" + name: "{{ atl_jdbc_db_name }}" + owner: "{{ atl_jdbc_user }}" + encoding: "{{ atl_jdbc_encoding }}" + lc_collate: "{{ atl_jdbc_collation }}" + lc_ctype: "{{ atl_jdbc_ctype }}" + template: "{{ atl_jdbc_template }}" + # Depends on fetch_backup roles + state: restore + target: "{{ atl_backup_db_dest }}" + when: db_created.changed and atl_backup_db_dest is defined + + + - name: Check for the restore canary file + stat: + path: "{{ atl_backup_home_restore_canary_path }}" + register: restore_canary + + - block: + + - name: Create shared home if necessary + file: + path: "{{ atl_product_home_shared }}" + state: directory + mode: 0750 + owner: "{{ atl_product_user }}" + group: "{{ atl_product_user }}" + + - name: Restore the shared-home backup + unarchive: + src: "{{ atl_backup_home_dest }}" + dest: "{{ atl_product_home_shared }}" + owner: "{{ atl_product_user }}" + group: "{{ atl_product_user }}" + + - name: Create restore-canary if necessary + copy: + dest: "{{ atl_backup_home_restore_canary_path }}" + content: "{{ atl_backup_id }}" + + when: not restore_canary.stat.exists + + + when: atl_restore_required