Merged in DCD-686-backup-restore-operations (pull request #36)

DCD-686: Slingshot restore operations

Approved-by: Adam Brokes <abrokes@atlassian.com>
This commit is contained in:
Steve Smith
2019-10-31 00:29:18 +00:00
14 changed files with 276 additions and 105 deletions

View File

@@ -1,40 +1,40 @@
---
- name: Create application DB user
postgresql_user:
login_host: "{{ atl_db_host }}"
login_user: "{{ atl_db_root_user }}"
login_password: "{{ atl_db_root_password }}"
port: "{{ atl_db_port }}"
name: "{{ atl_jdbc_user }}"
password: "{{ atl_jdbc_password }}"
expires: 'infinity'
tags:
- new_only
- block:
- name: Update root privs for new user
postgresql_privs:
login_host: "{{ atl_db_host }}"
login_user: "{{ atl_db_root_user }}"
login_password: "{{ atl_db_root_password }}"
database: postgres
roles: "{{ atl_db_root_user }}"
objs: "{{ atl_jdbc_user }}"
type: group
tags:
- new_only
- name: Create application DB user
postgresql_user:
login_host: "{{ atl_db_host }}"
login_user: "{{ atl_db_root_user }}"
login_password: "{{ atl_db_root_password }}"
port: "{{ atl_db_port }}"
name: "{{ atl_jdbc_user }}"
password: "{{ atl_jdbc_password }}"
expires: 'infinity'
- name: Update root privs for new user
postgresql_privs:
login_host: "{{ atl_db_host }}"
login_user: "{{ atl_db_root_user }}"
login_password: "{{ atl_db_root_password }}"
database: postgres
roles: "{{ atl_db_root_user }}"
objs: "{{ atl_jdbc_user }}"
type: group
- name: Create new application database
postgresql_db:
login_host: "{{ atl_db_host }}"
login_user: "{{ atl_db_root_user }}"
login_password: "{{ atl_db_root_password }}"
port: "{{ atl_db_port }}"
name: "{{ atl_jdbc_db_name }}"
owner: "{{ atl_jdbc_user }}"
encoding: "{{ atl_jdbc_encoding }}"
lc_collate: "{{ atl_jdbc_collation }}"
lc_ctype: "{{ atl_jdbc_ctype }}"
template: "{{ atl_jdbc_template }}"
register: db_created
- name: Create application database
postgresql_db:
login_host: "{{ atl_db_host }}"
login_user: "{{ atl_db_root_user }}"
login_password: "{{ atl_db_root_password }}"
port: "{{ atl_db_port }}"
name: "{{ atl_jdbc_db_name }}"
owner: "{{ atl_jdbc_user }}"
encoding: "{{ atl_jdbc_encoding }}"
lc_collate: "{{ atl_jdbc_collation }}"
lc_ctype: "{{ atl_jdbc_ctype }}"
template: "{{ atl_jdbc_template }}"
tags:
- new_only

View File

@@ -1,69 +0,0 @@
---
# This role will attempt to fetch and load the backup manifest from a
# remote HTTP or S3 URL. On successful completion the contents of JSON
# or YAML document will be in the var `atl_backup_manifest`.
- block:
- name: Ensure temp directory is present
file:
path: "{{ atl_installer_temp }}"
state: directory
mode: 0750
owner: "{{ atl_product_user }}"
group: "{{ atl_product_user }}"
changed_when: false # For Molecule idempotence check
- name: Parse the manifest URL
set_fact:
atl_backup_manifest_url: "{{ atl_backup_manifest_url | urlsplit }}"
- name: Extract manifest file information
set_fact:
atl_backup_manifest_bucket: "{{ atl_backup_manifest_url.hostname }}"
atl_backup_manifest_path: "{{ atl_backup_manifest_url.path }}"
atl_backup_manifest_dest: "{{ atl_installer_temp }}/{{ atl_backup_manifest_url.path | basename }}"
- name: Fetch the manifest from S3
aws_s3:
mode: get
bucket: "{{ atl_backup_manifest_bucket }}"
object: "{{ atl_backup_manifest_path }}"
dest: "{{ atl_backup_manifest_dest }}"
when: atl_backup_manifest_url.scheme == 's3'
- name: Fetch the manifest from remote host
get_url:
url: "{{ atl_backup_manifest_url }}"
dest: "{{ atl_backup_manifest_dest }}"
when: atl_backup_manifest_url.scheme != 's3'
- name: Load parameters from manifest
include_vars:
file: "{{ atl_backup_manifest_dest }}"
name: atl_backup_manifest
- name: Define the DB and home dump destinations
set_fact:
atl_backup_db_dest: "{{ atl_installer_temp }}/{{ atl_backup_manifest.db_dump | basename }}"
atl_backup_home_dest: "{{ atl_installer_temp }}/{{ atl_backup_manifest.shared_home_dump | basename }}"
# FIXME: Here we fetch the backups. However we may wish to stream
# these directly from S3 to the target DB/FS to avoid requiring
# disk-space for the intermediate files.
- name: Fetch DB backup from S3
aws_s3:
mode: get
bucket: "{{ atl_backup_manifest.db_dump | urlsplit('hostname') }}"
object: "{{ atl_backup_manifest.db_dump | urlsplit('path') }}"
dest: "{{ atl_backup_db_dest }}"
- name: Fetch Home backup from S3
aws_s3:
mode: get
bucket: "{{ atl_backup_manifest.shared_home_dump | urlsplit('hostname') }}"
object: "{{ atl_backup_manifest.shared_home_dump | urlsplit('path') }}"
dest: "{{ atl_backup_home_dest }}"
when: atl_backup_manifest_url is defined and atl_backup_manifest_url != ''

View File

@@ -0,0 +1,12 @@
extends: default
rules:
braces:
max-spaces-inside: 1
level: error
brackets:
max-spaces-inside: 1
level: error
line-length: disable
truthy: disable
trailing-spaces: false

View File

@@ -0,0 +1,4 @@
---
atl_backup_home_restore_canary_filename: ".slingshot_home_restore"
atl_backup_home_restore_canary_path: "{{ atl_product_home_shared }}/{{ atl_backup_home_restore_canary_filename }}"

View File

@@ -0,0 +1,14 @@
# Molecule managed
{% if item.registry is defined %}
FROM {{ item.registry.url }}/{{ item.image }}
{% else %}
FROM {{ item.image }}
{% endif %}
RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates && apt-get clean; \
elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python*-dnf bash && dnf clean all; \
elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \
elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml && zypper clean -a; \
elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \
elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates && xbps-remove -O; fi

View File

@@ -0,0 +1,36 @@
---
dependency:
name: galaxy
driver:
name: docker
lint:
name: yamllint
platforms:
- name: amazon_linux2
image: amazonlinux:2
groups:
- aws_node_local
ulimits:
- nofile:262144:262144
- name: ubuntu_lts
image: ubuntu:bionic
groups:
- aws_node_local
ulimits:
- nofile:262144:262144
provisioner:
name: ansible
options:
skip-tags: runtime_pkg
lint:
name: ansible-lint
options:
x: ["701"]
inventory:
links:
group_vars: ../../../../group_vars/
verifier:
name: testinfra
lint:
name: flake8
enabled: false

View File

@@ -0,0 +1,10 @@
---
- name: Converge
hosts: all
vars:
atl_backup_manifest_url: ''
atl_backup_home_restore_canary_path: '/tmp/canary.tmp'
roles:
# Should be no-op
- role: restore_backups

View File

@@ -0,0 +1,10 @@
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_no_canary_file(host):
assert not host.file('atl_backup_home_restore_canary_path').exists

View File

@@ -0,0 +1,7 @@
---
# Amazon Linux 2 supplies extra packages via a special command.
- name: Enable Postgresql from 'extras'
command: amazon-linux-extras install -y "postgresql{{ postgres_version }}"
args:
creates: /usr/bin/psql

View File

@@ -0,0 +1,133 @@
---
# This role will attempt to fetch and load the backup manifest from a
# remote S3 URL. On successful completion the contents of JSON or YAML
# document will be in the var `atl_backup_manifest`.
#
# PREREQUISITES:
# * `atl_backup_manifest_url` points at the manifest.
# * The shared home filesystem is mounted if necessary (e.g. NFS/EFS).
# * The database has been created and the variable `db_created` is
# registered with the result (i.e: `register: db_created`).
#
# NOTE: The actual DB/FS restore operations could potentially be split
# out into discrete roles, but currently that is not required.
#
# TODO: Support HTTPS with authentication. Deferred until after the
# initial testing release.
- block:
- name: Ensure temp directory is present
file:
path: "{{ atl_installer_temp }}"
state: directory
mode: 0750
owner: "{{ atl_product_user }}"
group: "{{ atl_product_user }}"
changed_when: false # For Molecule idempotence check
- name: Parse the manifest URL
set_fact:
atl_backup_manifest_url: "{{ atl_backup_manifest_url | urlsplit }}"
- name: Extract manifest file information
set_fact:
atl_backup_manifest_bucket: "{{ atl_backup_manifest_url.hostname }}"
atl_backup_manifest_path: "{{ atl_backup_manifest_url.path }}"
atl_backup_manifest_dest: "{{ atl_installer_temp }}/{{ atl_backup_manifest_url.path | basename }}"
- name: Fetch the manifest from S3
aws_s3:
mode: get
overwrite: different
bucket: "{{ atl_backup_manifest_bucket }}"
object: "{{ atl_backup_manifest_path }}"
dest: "{{ atl_backup_manifest_dest }}"
when: atl_backup_manifest_url.scheme == 's3'
- name: Load parameters from manifest
include_vars:
file: "{{ atl_backup_manifest_dest }}"
name: atl_backup_manifest
- name: Define the DB and home dump destinations
set_fact:
# FIXME: The manifest format is still undecided so the
# following usages will need to be updated once it settles..
atl_backup_id: "{{ atl_backup_manifest.name }}"
atl_backup_db_dest: "{{ atl_installer_temp }}/{{ atl_backup_manifest.artifacts.db.location.value | basename }}"
atl_backup_home_dest: "{{ atl_installer_temp }}/{{ atl_backup_manifest.artifacts.sharedHome.location.value | basename }}"
# FIXME: Here we fetch the backups. However we may wish to stream
# these directly from S3 to the target DB/FS to avoid requiring
# disk-space for the intermediate files.
- name: Fetch DB backup from S3
aws_s3:
mode: get
overwrite: different
bucket: "{{ atl_backup_manifest.artifacts.db.location.value | urlsplit('hostname') }}"
object: "{{ atl_backup_manifest.artifacts.db.location.value | urlsplit('path') }}"
dest: "{{ atl_backup_db_dest }}"
- name: Fetch Home backup from S3
aws_s3:
mode: get
overwrite: different
bucket: "{{ atl_backup_manifest.artifacts.sharedHome.location.value | urlsplit('hostname') }}"
object: "{{ atl_backup_manifest.artifacts.sharedHome.location.value | urlsplit('path') }}"
dest: "{{ atl_backup_home_dest }}"
- name: Install distro-specific restore support packages
include_tasks: "{{ ansible_distribution|lower }}.yml"
- name: Restore application database
postgresql_db:
login_host: "{{ atl_db_host }}"
login_user: "{{ atl_db_root_user }}"
login_password: "{{ atl_db_root_password }}"
port: "{{ atl_db_port }}"
name: "{{ atl_jdbc_db_name }}"
owner: "{{ atl_jdbc_user }}"
encoding: "{{ atl_jdbc_encoding }}"
lc_collate: "{{ atl_jdbc_collation }}"
lc_ctype: "{{ atl_jdbc_ctype }}"
template: "{{ atl_jdbc_template }}"
# Depends on fetch_backup roles
state: restore
target: "{{ atl_backup_db_dest }}"
when: db_created.changed and atl_backup_db_dest is defined
- name: Check for the restore canary file
stat:
path: "{{ atl_backup_home_restore_canary_path }}"
register: restore_canary
- block:
- name: Create shared home if necessary
file:
path: "{{ atl_product_home_shared }}"
state: directory
mode: 0750
owner: "{{ atl_product_user }}"
group: "{{ atl_product_user }}"
- name: Restore the shared-home backup
unarchive:
src: "{{ atl_backup_home_dest }}"
dest: "{{ atl_product_home_shared }}"
owner: "{{ atl_product_user }}"
group: "{{ atl_product_user }}"
- name: Create restore-canary if necessary
copy:
dest: "{{ atl_backup_home_restore_canary_path }}"
content: "{{ atl_backup_id }}"
when: not restore_canary.stat.exists
when: atl_restore_required