AZURE-210 Deploy Crowd DC to Azure

This commit is contained in:
dbacon
2020-05-19 11:13:11 +01:00
parent 0fbc7609a1
commit e78af09b8b
37 changed files with 1396 additions and 200 deletions

1
.gitignore vendored
View File

@@ -7,3 +7,4 @@ __pycache__
\#*\# \#*\#
.idea .idea
.vscode .vscode
*.iml

35
az_crowd_dc_node.yml Normal file
View File

@@ -0,0 +1,35 @@
---
- hosts: az_node_local
become: true
vars:
atl_product_family: "crowd"
atl_product_user: "crowd"
atl_product_edition: "crowd"
atl_use_system_jdk: true
atl_download_format: "tarball"
atl_startup_systemd_params:
- "Environment=JDBC_DRIVER={{ atl_db_driver }}"
- "Environment=JDBC_DIALECT=org.hibernate.dialect.PostgreSQLDialect"
- "Environment=JDBC_USER={{ atl_db_root_user_login }}"
- "Environment=JDBC_PASSWORD={{ atl_jdbc_password }}"
- "Environment=JDBC_URL={{ atl_jdbc_url }}"
atl_jdbc_encoding: 'UNICODE'
atl_jdbc_collation: 'C'
atl_jdbc_ctype: 'C'
atl_jdbc_template: 'template0'
roles:
- role: linux_common
- role: az_common
- role: az_shared_fs_config
- role: product_common
- role: product_install
tags: [skip_on_stack_update]
- role: database_init
tags: [skip_on_stack_update]
- role: crowd_config
- role: az_app_insights_install
- role: product_startup

View File

@@ -18,7 +18,7 @@ pipelines:
- step: - step:
name: Pre Parallelization stage name: Pre Parallelization stage
script: script:
- echo "Running tests in 37 batches" - echo "Running tests in 39 batches"
- step: - step:
name: Check if number of batches match actual number of scenarios name: Check if number of batches match actual number of scenarios
script: script:
@@ -327,4 +327,20 @@ pipelines:
- apt-get update && ./bin/install-ansible --dev - apt-get update && ./bin/install-ansible --dev
- ./bin/run-tests-in-batches --batch 37 - ./bin/run-tests-in-batches --batch 37
- step:
name: Molecule Test Batch - 38
services:
- docker
script:
- apt-get update && ./bin/install-ansible --dev
- ./bin/run-tests-in-batches --batch 38
- step:
name: Molecule Test Batch - 39
services:
- docker
script:
- apt-get update && ./bin/install-ansible --dev
- ./bin/run-tests-in-batches --batch 39

160
group_vars/all.yml Normal file
View File

@@ -0,0 +1,160 @@
---
# This file is the place for cross-role defaults for all products, and
# common parameters from the infrastructure-deployment phase of the
# build (e.g. CloudFormation). Variables defined here will override
# those defined in `<role>/defaults/main.yml`, although defaults
# should usually be defined there too. For variable precedence
# information see:
#
# https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html
java_version: "1.8.0"
postgres_version: "9.6"
git_version: "2.14.4"
atl_shared_mountpoint: "/media/atl"
java_home: "{{ lookup('env', 'JAVA_HOME') or '/usr/lib/jvm/jre-{{ java_version }}-openjdk'}}"
# Simplify NFS mapping by using a fixed UID
atl_product_user_uid: "{{ lookup('env', 'ATL_PRODUCT_USER_UID') or '2001' }}"
# FIXME: Some of these should be overridden from the environment?
atl_home_base: "/var/atlassian/application-data"
atl_product_home: "{{ atl_home_base }}/{{ atl_product_family }}"
atl_product_shared_home_map:
confluence: "confluence/shared-home"
jira: "jira/shared"
stash: "bitbucket/shared"
crowd: "crowd/shared"
atl_product_home_shared: "{{ atl_shared_mountpoint }}/{{ atl_product_shared_home_map[atl_product_family] }}"
atl_product_shared_plugins: "{{ atl_product_home_shared }}/plugins/installed-plugins"
atl_installation_base: "/opt/atlassian"
atl_product_installation_base: "{{ atl_installation_base }}/{{ atl_product_edition }}"
atl_product_installation_versioned: "{{ atl_product_installation_base }}/{{ atl_product_version }}"
atl_product_installation_current: "{{ atl_product_installation_base }}/current"
atl_installer_temp: "{{ atl_installation_base }}/tmp"
atl_product_logs_default: &logs_default
- path: "{{ atl_product_installation_current }}/logs/*"
type: product
- path: "{{ atl_product_home }}/logs/*"
type: product
- path: "{{ atl_product_home }}/log/*"
type: product
- path: "{{ atl_product_home }}/logs/audit/*"
type: audit
- path: "{{ atl_product_home }}/log/audit/*"
type: audit
- path: "/var/log/ansible-bootstrap.log"
type: provisioning
- path: "/var/log/cfn-*.log"
type: provisioning
atl_product_logs:
confluence:
*logs_default
jira:
*logs_default
stash:
*logs_default
crowd:
*logs_default
# The following are imports from the environment. These are generally
# set in /etc/atl by the CloudFormation template and sourced before
# Ansible is run. See bin/ansible-with-atl-env for a convenient wrapper
atl_product_version: "{{ lookup('env', 'ATL_PRODUCT_VERSION') | lower }}"
atl_efs_id: "{{ lookup('env', 'ATL_EFS_ID') }}"
atl_aws_stack_name: "{{ lookup('env', 'ATL_AWS_STACK_NAME') }}"
atl_aws_region: "{{ lookup('env', 'ATL_AWS_REGION') }}"
atl_aws_iam_role: "{{ lookup('env', 'ATL_AWS_IAM_ROLE') }}"
atl_aws_iam_role_arn: "{{ lookup('env', 'ATL_AWS_IAM_ROLE_ARN') }}"
atl_aws_enable_cloudwatch: "{{ lookup('env', 'ATL_AWS_ENABLE_CLOUDWATCH')|bool or false }}"
atl_aws_enable_cloudwatch_logs: "{{ lookup('env', 'ATL_AWS_ENABLE_CLOUDWATCH_LOGS')|bool or false }}"
atl_db_engine: "{{ lookup('env', 'ATL_DB_ENGINE') }}"
atl_db_host: "{{ lookup('env', 'ATL_DB_HOST') }}"
atl_db_port: "{{ lookup('env', 'ATL_DB_PORT') or '5432' }}"
atl_db_root_db_name: "{{ lookup('env', 'ATL_DB_ROOT_DB_NAME') or 'postgres' }}"
atl_db_root_user: "{{ lookup('env', 'ATL_DB_ROOT_USER') or 'postgres' }}"
atl_db_root_password: "{{ lookup('env', 'ATL_DB_ROOT_PASSWORD') }}"
atl_db_driver: "{{ lookup('env', 'ATL_DB_DRIVER') or 'org.postgresql.Driver' }}"
atl_db_poolminsize: "{{ lookup('env', 'ATL_DB_POOLMINSIZE') or '20' }}"
atl_db_poolmaxsize: "{{ lookup('env', 'ATL_DB_POOLMAXSIZE') or '100' }}"
atl_db_minidle: "{{ lookup('env', 'ATL_DB_MINIDLE') or '10' }}"
atl_db_maxidle: "{{ lookup('env', 'ATL_DB_MAXIDLE') or '20' }}"
atl_db_maxwaitmillis: "{{ lookup('env', 'ATL_DB_MAXWAITMILLIS') or '30000' }}"
atl_db_timebetweenevictionrunsmillis: "{{ lookup('env', 'ATL_DB_TIMEBETWEENEVICTIONRUNSMILLIS') or '30000' }}"
atl_db_minevictableidletimemillis: "{{ lookup('env', 'ATL_DB_MINEVICTABLEIDLETIMEMILLIS') or '5000' }}"
atl_db_removeabandoned: "{{ lookup('env', 'ATL_DB_REMOVEABANDONED') or 'true' }}"
atl_db_removeabandonedtimeout: "{{ lookup('env', 'ATL_DB_REMOVEABANDONEDTIMEOUT') or '300' }}"
atl_db_testwhileidle: "{{ lookup('env', 'ATL_DB_TESTWHILEIDLE') or 'true'}}"
atl_db_testonborrow: "{{ lookup('env', 'ATL_DB_TESTONBORROW') or 'false' }}"
atl_db_engine_to_db_type_map:
aurora_postgres: "postgresaurora96"
rds_postgres: "postgres72"
atl_db_type: "{{ atl_db_engine_to_db_type_map[atl_db_engine] | default('postgres72') }}"
atl_jdbc_db_name: "{{ lookup('env', 'ATL_JDBC_DB_NAME') }}"
atl_jdbc_user: "{{ lookup('env', 'ATL_JDBC_USER') }}"
atl_jdbc_password: "{{ lookup('env', 'ATL_JDBC_PASSWORD') }}"
atl_jdbc_encoding: "{{ lookup('env', 'ATL_JDBC_ENCODING') or 'UTF-8' }}"
atl_jdbc_collation: "{{ lookup('env', 'ATL_JDBC_COLLATION') or 'en_US.UTF-8' }}"
atl_jdbc_ctype: "{{ lookup('env', 'ATL_JDBC_CTYPE') or 'en_US.UTF-8' }}"
atl_jdbc_template: "{{ lookup('env', 'ATL_JDBC_TEMPLATE') or 'template1' }}"
atl_jdbc_query_params_for_engine:
aurora_postgres: "?targetServerType=master"
atl_jdbc_url: "jdbc:postgresql://{{ atl_db_host }}:{{ atl_db_port }}/{{ atl_jdbc_db_name }}{{ atl_jdbc_query_params_for_engine[atl_db_engine]| default('') }}"
atl_jvm_heap: "{{ lookup('env', 'ATL_JVM_HEAP') or '2048m' }}"
atl_jvm_opts: "{{ lookup('env', 'ATL_JVM_OPTS') or '' }}"
atl_catalina_opts: "{{ lookup('env', 'ATL_CATALINA_OPTS') or '' }}"
atl_proxy_name: "{{ lookup('env', 'ATL_PROXY_NAME') | lower }}"
atl_proxy_port: "{{ lookup('env', 'ATL_TOMCAT_PROXYPORT') }}"
atl_tomcat_port: "{{ lookup('env', 'ATL_TOMCAT_DEFAULTCONNECTORPORT') or '8080' }}"
atl_ssl_proxy: "{{ lookup('env', 'ATL_SSL_PROXY') or 'false' }}"
atl_tomcat_acceptcount: "{{ lookup('env', 'ATL_TOMCAT_ACCEPTCOUNT') or '10' }}"
atl_tomcat_connectiontimeout: "{{ lookup('env', 'ATL_TOMCAT_CONNECTIONTIMEOUT') or '20000' }}"
atl_tomcat_contextpath: "{{ lookup('env', 'ATL_TOMCAT_CONTEXTPATH') or '' }}"
atl_tomcat_enablelookups: "{{ lookup('env', 'ATL_TOMCAT_ENABLELOOKUPS') or 'false' }}"
atl_tomcat_maxthreads: "{{ lookup('env', 'ATL_TOMCAT_MAXTHREADS') or '200' }}"
atl_tomcat_minsparethreads: "{{ lookup('env', 'ATL_TOMCAT_MINSPARETHREADS') or '10' }}"
atl_tomcat_protocol: "{{ lookup('env', 'ATL_TOMCAT_PROTOCOL') or 'HTTP/1.1' }}"
atl_tomcat_redirectport: "{{ lookup('env', 'ATL_TOMCAT_REDIRECTPORT') or '' }}"
atl_tomcat_scheme: "{{ lookup('env', 'ATL_TOMCAT_SCHEME') or 'http' }}"
atl_tomcat_secure: "{{ lookup('env', 'ATL_TOMCAT_SECURE') or 'false' }}"
atl_fileserver_host: "{{ lookup('env', 'ATL_FILESERVER_IP') }}"
atl_elasticsearch_host: "{{ lookup('env', 'ATL_ELASTICSEARCH_HOST') }}"
atl_elasticsearch_endpoint: "http://{{ atl_elasticsearch_host }}"
atl_elasticsearch_s3_bucket: "{{ lookup('env', 'ATL_ELASTICSEARCH_S3_BUCKET') }}"
atl_nfs_server_device: "{{ lookup('env', 'ATL_NFS_SERVER_DEVICE') }}"
atl_nfs_disk_volume_type: "{{ lookup('env', 'ATL_NFS_DISK_VOLUME_TYPE') }}"
atl_nfs_disk_volume_iops: "{{ lookup('env', 'ATL_NFS_DISK_VOLUME_IOPS') }}"
atl_rds_instance_id: "{{ lookup('env', 'ATL_RDS_INSTANCE_ID') }}"
atl_rds_instance_class: "{{ lookup('env', 'ATL_RDS_INSTANCE_CLASS') }}"
atl_rds_multi_az: "{{ lookup('env', 'ATL_RDS_MULTI_AZ') }}"
atl_rds_subnet_group_name: "{{ lookup('env', 'ATL_RDS_SUBNET_GROUP_NAME') }}"
atl_rds_security_group: "{{ lookup('env', 'ATL_RDS_SECURITY_GROUP') }}"
atl_backup_manifest_url: "{{ lookup('env', 'ATL_BACKUP_MANIFEST_URL') }}"
atl_restore_required: "{{ atl_backup_manifest_url is defined and atl_backup_manifest_url != '' }}"
atl_bitbucket_license_key: "{{ lookup('env', 'ATL_BB_LICENSEKEY') }}"
atl_bitbucket_admin_password: "{{ lookup('env', 'ATL_BB_ADMIN_PASSWORD') }}"
atl_bitbucket_dataset_url: "{{ lookup('env', 'ATL_DATASET_URL') }}"
atl_bitbucket_baseurl: "{{ lookup('env', 'ATL_BB_BASEURL') }}"
atl_db_root_user_login: "{{ atl_db_root_user }}"
atl_jdbc_user_login: "{{atl_jdbc_user}}"

View File

@@ -1,6 +1,6 @@
--- ---
# This file is the place for cross-role defaults for all products, and # This file is the place for AWS cross-role defaults for all products, and
# common parameters from the infrastructure-deployment phase of the # common parameters from the infrastructure-deployment phase of the
# build (e.g. CloudFormation). Variables defined here will override # build (e.g. CloudFormation). Variables defined here will override
# those defined in `<role>/defaults/main.yml`, although defaults # those defined in `<role>/defaults/main.yml`, although defaults
@@ -9,147 +9,5 @@
# #
# https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html # https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html
java_version: "1.8.0"
postgres_version: "9.6"
git_version: "2.14.4"
atl_shared_mountpoint: "/media/atl"
# Simplify NFS mapping by using a fixed UID
atl_product_user_uid: "{{ lookup('env', 'ATL_PRODUCT_USER_UID') or '2001' }}"
# FIXME: Some of these should be overridden from the environment?
atl_home_base: "/var/atlassian/application-data"
atl_product_home: "{{ atl_home_base }}/{{ atl_product_family }}"
atl_product_shared_home_map:
confluence: "confluence/shared-home"
jira: "jira/shared"
stash: "bitbucket/shared"
crowd: "crowd/shared"
atl_product_home_shared: "{{ atl_shared_mountpoint }}/{{ atl_product_shared_home_map[atl_product_family] }}"
atl_product_shared_plugins: "{{ atl_product_home_shared }}/plugins/installed-plugins"
atl_installation_base: "/opt/atlassian"
atl_product_installation_base: "{{ atl_installation_base }}/{{ atl_product_edition }}"
atl_product_installation_versioned: "{{ atl_product_installation_base }}/{{ atl_product_version }}"
atl_product_installation_current: "{{ atl_product_installation_base }}/current"
atl_installer_temp: "{{ atl_installation_base }}/tmp"
atl_product_logs_default: &logs_default
- path: "{{ atl_product_installation_current }}/logs/*"
type: product
- path: "{{ atl_product_home }}/logs/*"
type: product
- path: "{{ atl_product_home }}/log/*"
type: product
- path: "{{ atl_product_home }}/logs/audit/*"
type: audit
- path: "{{ atl_product_home }}/log/audit/*"
type: audit
- path: "/var/log/ansible-bootstrap.log"
type: provisioning
- path: "/var/log/cfn-*.log"
type: provisioning
atl_product_logs:
confluence:
*logs_default
jira:
*logs_default
stash:
*logs_default
crowd:
*logs_default
# The following are imports from the environment. These are generally
# set in /etc/atl by the CloudFormation template and sourced before
# Ansible is run. See bin/ansible-with-atl-env for a convenient wrapper
atl_product_version: "{{ lookup('env', 'ATL_PRODUCT_VERSION') | lower }}"
atl_efs_id: "{{ lookup('env', 'ATL_EFS_ID') }}"
atl_aws_stack_name: "{{ lookup('env', 'ATL_AWS_STACK_NAME') }}"
atl_aws_region: "{{ lookup('env', 'ATL_AWS_REGION') }}"
atl_aws_iam_role: "{{ lookup('env', 'ATL_AWS_IAM_ROLE') }}"
atl_aws_iam_role_arn: "{{ lookup('env', 'ATL_AWS_IAM_ROLE_ARN') }}"
atl_aws_enable_cloudwatch: "{{ lookup('env', 'ATL_AWS_ENABLE_CLOUDWATCH')|bool or false }}"
atl_aws_enable_cloudwatch_logs: "{{ lookup('env', 'ATL_AWS_ENABLE_CLOUDWATCH_LOGS')|bool or false }}"
atl_db_engine: "{{ lookup('env', 'ATL_DB_ENGINE') }}"
atl_db_host: "{{ lookup('env', 'ATL_DB_HOST') }}"
atl_db_port: "{{ lookup('env', 'ATL_DB_PORT') or '5432' }}"
atl_db_root_db_name: "{{ lookup('env', 'ATL_DB_ROOT_DB_NAME') or 'postgres' }}"
atl_db_root_user: "{{ lookup('env', 'ATL_DB_ROOT_USER') or 'postgres' }}"
atl_db_root_password: "{{ lookup('env', 'ATL_DB_ROOT_PASSWORD') }}"
atl_db_driver: "{{ lookup('env', 'ATL_DB_DRIVER') or 'org.postgresql.Driver' }}"
atl_db_poolminsize: "{{ lookup('env', 'ATL_DB_POOLMINSIZE') or '20' }}"
atl_db_poolmaxsize: "{{ lookup('env', 'ATL_DB_POOLMAXSIZE') or '100' }}"
atl_db_minidle: "{{ lookup('env', 'ATL_DB_MINIDLE') or '10' }}"
atl_db_maxidle: "{{ lookup('env', 'ATL_DB_MAXIDLE') or '20' }}"
atl_db_maxwaitmillis: "{{ lookup('env', 'ATL_DB_MAXWAITMILLIS') or '30000' }}"
atl_db_timebetweenevictionrunsmillis: "{{ lookup('env', 'ATL_DB_TIMEBETWEENEVICTIONRUNSMILLIS') or '30000' }}"
atl_db_minevictableidletimemillis: "{{ lookup('env', 'ATL_DB_MINEVICTABLEIDLETIMEMILLIS') or '5000' }}"
atl_db_removeabandoned: "{{ lookup('env', 'ATL_DB_REMOVEABANDONED') or 'true' }}"
atl_db_removeabandonedtimeout: "{{ lookup('env', 'ATL_DB_REMOVEABANDONEDTIMEOUT') or '300' }}"
atl_db_testwhileidle: "{{ lookup('env', 'ATL_DB_TESTWHILEIDLE') or 'true'}}"
atl_db_testonborrow: "{{ lookup('env', 'ATL_DB_TESTONBORROW') or 'false' }}"
atl_db_engine_to_db_type_map:
aurora_postgres: "postgresaurora96"
rds_postgres: "postgres72"
atl_db_type: "{{ atl_db_engine_to_db_type_map[atl_db_engine] | default('postgres72') }}"
atl_jdbc_db_name: "{{ lookup('env', 'ATL_JDBC_DB_NAME') }}"
atl_jdbc_user: "{{ lookup('env', 'ATL_JDBC_USER') }}"
atl_jdbc_password: "{{ lookup('env', 'ATL_JDBC_PASSWORD') }}"
atl_jdbc_encoding: "{{ lookup('env', 'ATL_JDBC_ENCODING') or 'UTF-8' }}"
atl_jdbc_collation: "{{ lookup('env', 'ATL_JDBC_COLLATION') or 'en_US.UTF-8' }}"
atl_jdbc_ctype: "{{ lookup('env', 'ATL_JDBC_CTYPE') or 'en_US.UTF-8' }}"
atl_jdbc_template: "{{ lookup('env', 'ATL_JDBC_TEMPLATE') or 'template1' }}"
atl_jdbc_query_params_for_engine: atl_jdbc_query_params_for_engine:
aurora_postgres: "?targetServerType=master" aurora_postgres: "?targetServerType=master"
atl_jdbc_url: "jdbc:postgresql://{{ atl_db_host }}:{{ atl_db_port }}/{{ atl_jdbc_db_name }}{{ atl_jdbc_query_params_for_engine[atl_db_engine]| default('') }}"
atl_jvm_heap: "{{ lookup('env', 'ATL_JVM_HEAP') or '2048m' }}"
atl_jvm_opts: "{{ lookup('env', 'ATL_JVM_OPTS') or '' }}"
atl_catalina_opts: "{{ lookup('env', 'ATL_CATALINA_OPTS') or '' }}"
atl_proxy_name: "{{ lookup('env', 'ATL_PROXY_NAME') | lower }}"
atl_proxy_port: "{{ lookup('env', 'ATL_TOMCAT_PROXYPORT') }}"
atl_tomcat_port: "{{ lookup('env', 'ATL_TOMCAT_DEFAULTCONNECTORPORT') or '8080' }}"
atl_ssl_proxy: "{{ lookup('env', 'ATL_SSL_PROXY') or 'false' }}"
atl_tomcat_acceptcount: "{{ lookup('env', 'ATL_TOMCAT_ACCEPTCOUNT') or '10' }}"
atl_tomcat_connectiontimeout: "{{ lookup('env', 'ATL_TOMCAT_CONNECTIONTIMEOUT') or '20000' }}"
atl_tomcat_contextpath: "{{ lookup('env', 'ATL_TOMCAT_CONTEXTPATH') or '' }}"
atl_tomcat_enablelookups: "{{ lookup('env', 'ATL_TOMCAT_ENABLELOOKUPS') or 'false' }}"
atl_tomcat_maxthreads: "{{ lookup('env', 'ATL_TOMCAT_MAXTHREADS') or '200' }}"
atl_tomcat_minsparethreads: "{{ lookup('env', 'ATL_TOMCAT_MINSPARETHREADS') or '10' }}"
atl_tomcat_protocol: "{{ lookup('env', 'ATL_TOMCAT_PROTOCOL') or 'HTTP/1.1' }}"
atl_tomcat_redirectport: "{{ lookup('env', 'ATL_TOMCAT_REDIRECTPORT') or '' }}"
atl_tomcat_scheme: "{{ lookup('env', 'ATL_TOMCAT_SCHEME') or 'http' }}"
atl_tomcat_secure: "{{ lookup('env', 'ATL_TOMCAT_SECURE') or 'false' }}"
atl_fileserver_host: "{{ lookup('env', 'ATL_FILESERVER_IP') }}"
atl_elasticsearch_host: "{{ lookup('env', 'ATL_ELASTICSEARCH_HOST') }}"
atl_elasticsearch_endpoint: "http://{{ atl_elasticsearch_host }}"
atl_elasticsearch_s3_bucket: "{{ lookup('env', 'ATL_ELASTICSEARCH_S3_BUCKET') }}"
atl_nfs_server_device: "{{ lookup('env', 'ATL_NFS_SERVER_DEVICE') }}"
atl_nfs_disk_volume_type: "{{ lookup('env', 'ATL_NFS_DISK_VOLUME_TYPE') }}"
atl_nfs_disk_volume_iops: "{{ lookup('env', 'ATL_NFS_DISK_VOLUME_IOPS') }}"
atl_rds_instance_id: "{{ lookup('env', 'ATL_RDS_INSTANCE_ID') }}"
atl_rds_instance_class: "{{ lookup('env', 'ATL_RDS_INSTANCE_CLASS') }}"
atl_rds_multi_az: "{{ lookup('env', 'ATL_RDS_MULTI_AZ') }}"
atl_rds_subnet_group_name: "{{ lookup('env', 'ATL_RDS_SUBNET_GROUP_NAME') }}"
atl_rds_security_group: "{{ lookup('env', 'ATL_RDS_SECURITY_GROUP') }}"
atl_backup_manifest_url: "{{ lookup('env', 'ATL_BACKUP_MANIFEST_URL') }}"
atl_restore_required: "{{ atl_backup_manifest_url is defined and atl_backup_manifest_url != '' }}"
atl_bitbucket_license_key: "{{ lookup('env', 'ATL_BB_LICENSEKEY') }}"
atl_bitbucket_admin_password: "{{ lookup('env', 'ATL_BB_ADMIN_PASSWORD') }}"
atl_bitbucket_dataset_url: "{{ lookup('env', 'ATL_DATASET_URL') }}"
atl_bitbucket_baseurl: "{{ lookup('env', 'ATL_BB_BASEURL') }}"

View File

@@ -0,0 +1,4 @@
atl_jdbc_query_params_for_engine:
rds_postgres: "?sslmode=require"
atl_db_root_user_login: "{{ atl_db_root_user+'@'+atl_db_host if atl_db_host is search('postgres.database.azure.com') else atl_db_root_user }}"
atl_jdbc_user_login: "{{ atl_jdbc_user+'@'+atl_db_host if atl_db_host is search('postgres.database.azure.com') else atl_jdbc_user }}"

2
inv/az_node_local Normal file
View File

@@ -0,0 +1,2 @@
[az_node_local]
localhost ansible_connection=local

View File

@@ -10,7 +10,7 @@
- name: Use EC2 instance ID for cluster node ID - name: Use EC2 instance ID for cluster node ID
set_fact: set_fact:
atl_cluster_node_id: "{{ ansible_ec2_instance_id }}" atl_cluster_node_id: "{{ ansible_ec2_instance_id }}-{{ ansible_ec2_local_ipv4 }}"
atl_local_ipv4: "{{ ansible_ec2_local_ipv4 | default(ansible_default_ipv4.address) }}" atl_local_ipv4: "{{ ansible_ec2_local_ipv4 | default(ansible_default_ipv4.address) }}"
- name: Generate CloudWatch config - name: Generate CloudWatch config

View File

@@ -0,0 +1,12 @@
extends: default
rules:
braces:
max-spaces-inside: 1
level: error
brackets:
max-spaces-inside: 1
level: error
line-length: disable
truthy: disable
trailing-spaces: false

View File

@@ -0,0 +1,5 @@
---
app_insights_version: "{{ lookup('env', 'APPINSIGHTS_VER') or '2.3.1' }}"
app_insights_instrumentation_key: "{{ lookup('env', 'APPINSIGHTS_INSTRUMENTATION_KEY') or 'XXX' }}"
app_insights_jaxb_version: 2.3.1

View File

@@ -0,0 +1,3 @@
---
- name: Restart Collectd
service: name=collectd state=restarted

View File

@@ -0,0 +1,14 @@
# Molecule managed
{% if item.registry is defined %}
FROM {{ item.registry.url }}/{{ item.image }}
{% else %}
FROM {{ item.image }}
{% endif %}
RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates && apt-get clean; \
elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python*-dnf bash && dnf clean all; \
elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \
elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml && zypper clean -a; \
elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \
elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates && xbps-remove -O; fi

View File

@@ -0,0 +1,28 @@
---
dependency:
name: galaxy
driver:
name: docker
lint:
name: yamllint
platforms:
- name: ubuntu_lts
image: ubuntu:bionic
groups:
- az_node_local
provisioner:
name: ansible
options:
skip-tags: runtime_pkg
lint:
name: ansible-lint
options:
x: ["701"]
inventory:
links:
group_vars: ../../../../group_vars/
verifier:
name: testinfra
lint:
name: flake8
enabled: false

View File

@@ -0,0 +1,13 @@
---
- name: Converge
hosts: all
vars:
atl_product_family: "crowd"
atl_product_edition: "crowd"
atl_product_user: "crowd"
atl_download_format: "tarball"
roles:
- role: linux_common
- role: product_common
- role: product_install
- role: az_app_insights_install

View File

@@ -0,0 +1,38 @@
import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_collectd_installed(host):
package = host.package('collectd')
assert package.is_installed
def test_collectd_file(host):
f = host.file('/etc/collectd/collectd.conf')
assert f.exists
assert f.contains('InstrumentationKey "XXX"')
assert f.mode == 0o0644
def test_jaxb_installed(host):
f = host.file('/usr/share/collectd/java/jaxb-api-2.3.1.jar')
assert f.exists
# @pytest.mark.parametrize('filename', [
# '/opt/atlassian/crowd/current/crowd-webapp/WEB-INF/lib/applicationinsights-core-2.3.1.jar',
# '/opt/atlassian/crowd/current/crowd-webapp/WEB-INF/lib/applicationinsights-web-2.3.1.jar',
# '/opt/atlassian/crowd/current/crowd-webapp/WEB-INF/lib/applicationinsights-collectd-2.3.1.jar'
# ])
# def test_app_insight_jars_downloaded(host, filename):
# f = host.file(filename)
# assert f.exists
def test_app_insights_collectd_file(host):
f = host.file('/usr/share/collectd/java/applicationinsights-collectd-2.3.1.jar')
assert f.exists
# def test_applicationinsights_xml_installed(host):
# f = host.file('/opt/atlassian/crowd/current/crowd-webapp/WEB-INF/classes/ApplicationInsights.xml')
# assert f.exists

View File

@@ -0,0 +1,46 @@
---
- name: Install collectd for app insights
package:
name:
- collectd
- name: Configure Collectd
template: src=collectd.conf.j2 dest=/etc/collectd/collectd.conf
notify:
- Restart Collectd
- name: Change collectd.conf permissions
file:
path: /etc/collectd/collectd.conf
mode: '+r'
- name: Install JAXB
get_url:
url: "https://repo1.maven.org/maven2/javax/xml/bind/jaxb-api/{{ app_insights_jaxb_version }}/jaxb-api-{{ app_insights_jaxb_version }}.jar"
dest: "/usr/share/collectd/java/"
# - name: Download App Insights jars
# get_url:
# url: "https://github.com/Microsoft/ApplicationInsights-Java/releases/download/{{ app_insights_version }}/{{ item }}"
# dest: "{{ atl_product_installation_versioned }}/crowd-webapp/WEB-INF/lib/"
# with_items:
# - "applicationinsights-core-{{ app_insights_version }}.jar"
# - "applicationinsights-web-{{ app_insights_version }}.jar"
# - "applicationinsights-collectd-{{ app_insights_version }}.jar"
#
# - name: Copy applicationinsights-collectd to collectd
# copy:
# src: "{{ atl_product_installation_versioned }}/crowd-webapp/WEB-INF/lib/applicationinsights-collectd-{{ app_insights_version }}.jar"
# dest: "/usr/share/collectd/java/applicationinsights-collectd-{{ app_insights_version }}.jar"
# remote_src: true
#
# - name: Add ApplicationInsights.xml configuration
# template: src=ApplicationInsights.xml.j2 dest={{ atl_product_installation_versioned }}/crowd-webapp/WEB-INF/classes/ApplicationInsights.xml
- name: Download applicationinsights-collectd to collectd
get_url:
url: "https://github.com/Microsoft/ApplicationInsights-Java/releases/download/{{ app_insights_version }}/{{ item }}"
dest: "/usr/share/collectd/java/applicationinsights-collectd-{{ app_insights_version }}.jar"
with_items:
- "applicationinsights-collectd-{{ app_insights_version }}.jar"

View File

@@ -0,0 +1,24 @@
<?xml version="1.0" encoding="utf-8"?>
<ApplicationInsights xmlns="http://schemas.microsoft.com/ApplicationInsights/2013/Settings" schemaVersion="2014-05-30">
<!-- The key from the portal: -->
<InstrumentationKey>{{ app_insights_instrumentation_key }}</InstrumentationKey>
<!-- HTTP request component (not required for bare API) -->
<TelemetryModules>
<Add type="com.microsoft.applicationinsights.web.extensibility.modules.WebRequestTrackingTelemetryModule"/>
<Add type="com.microsoft.applicationinsights.web.extensibility.modules.WebSessionTrackingTelemetryModule"/>
<Add type="com.microsoft.applicationinsights.web.extensibility.modules.WebUserTrackingTelemetryModule"/>
</TelemetryModules>
<!-- Events correlation (not required for bare API) -->
<!-- These initializers add context data to each event -->
<TelemetryInitializers>
<Add type="com.microsoft.applicationinsights.web.extensibility.initializers.WebOperationIdTelemetryInitializer"/>
<Add type="com.microsoft.applicationinsights.web.extensibility.initializers.WebOperationNameTelemetryInitializer"/>
<Add type="com.microsoft.applicationinsights.web.extensibility.initializers.WebSessionTelemetryInitializer"/>
<Add type="com.microsoft.applicationinsights.web.extensibility.initializers.WebUserTelemetryInitializer"/>
<Add type="com.microsoft.applicationinsights.web.extensibility.initializers.WebUserAgentTelemetryInitializer"/>
</TelemetryInitializers>
</ApplicationInsights>

View File

@@ -0,0 +1,764 @@
FQDNLookup true
BaseDir "/var/lib/collectd"
PIDFile "/var/run/collectd.pid"
PluginDir "/usr/lib/collectd"
TypesDB "/usr/share/collectd/types.db"
Interval 10
Timeout 2
ReadThreads 5
WriteThreads 5
LoadPlugin logfile
<Plugin "logfile">
LogLevel "info"
File "/var/log/collectd.log"
Timestamp true
</Plugin>
LoadPlugin aggregation
<Plugin aggregation>
<Aggregation>
Plugin "cpu"
Type "cpu"
GroupBy "Host"
GroupBy "TypeInstance"
CalculateSum true
CalculateAverage true
</Aggregation>
</Plugin>
# Collect CPU statistics
LoadPlugin cpu
# Write collected statistics in CSV format
LoadPlugin csv
<Plugin csv>
DataDir "/var/lib/collectd/csv"
StoreRates false
</Plugin>
# Collect partition usage statistics
LoadPlugin df
<Plugin df>
Device "/dev/xvda1"
MountPoint "/media/atl"
IgnoreSelected false
ReportByDevice false
ReportReserved false
ReportInodes false
ValuesAbsolute true
ValuesPercentage false
</Plugin>
# Collect disk IO statistics
LoadPlugin disk
<Plugin disk>
Disk "/^[hs]d[a-f][0-9]?$/"
IgnoreSelected false
</Plugin>
# Collect network interface usage statistics
LoadPlugin interface
<Plugin interface>
Interface "eth0"
IgnoreSelected false
</Plugin>
# Collect system load statistics
LoadPlugin load
# Collect memory utilization statistics
LoadPlugin memory
<Plugin memory>
# Don't use absolute as each AWS ec2 instance is different, use percentage instead to get a uniform view
ValuesAbsolute false
ValuesPercentage true
</Plugin>
LoadPlugin swap
<Plugin "swap">
ReportByDevice false
ReportBytes true
</Plugin>
LoadPlugin java
<Plugin java>
JVMArg "-verbose:jni"
JVMArg "-Djava.class.path=/usr/share/collectd/java/jaxb-api-2.3.1.jar:/usr/share/collectd/java/applicationinsights-collectd-{{ app_insights_version }}.jar:/usr/share/collectd/java/collectd-api.jar:/usr/share/collectd/java/generic-jmx.jar"
# Enabling Application Insights plugin
LoadPlugin "com.microsoft.applicationinsights.collectd.ApplicationInsightsWriter"
# Configuring Application Insights plugin
<Plugin ApplicationInsightsWriter>
InstrumentationKey "{{ app_insights_instrumentation_key }}"
SDKLogger true
</Plugin>
LoadPlugin "org.collectd.java.GenericJMX"
<Plugin "GenericJMX">
# Confluence indexing statistics
<MBean "confluence/IndexingStatistics">
ObjectName "Confluence:name=IndexingStatistics"
InstancePrefix "confluence-IndexingStatistics"
<Value>
InstancePrefix "last_elapsed_ms"
Type "total_time_in_ms"
Table false
Attribute "LastElapsedMilliseconds"
</Value>
<Value>
InstancePrefix "task_queue_length"
Type "queue_length"
Table false
Attribute "TaskQueueLength"
</Value>
</MBean>
<MBean "confluence/MailTaskQueue">
ObjectName "Confluence:name=MailTaskQueue"
InstancePrefix "confluence-MailTaskQueue"
<Value>
Type "email_count"
InstancePrefix "retry_count"
Table false
Attribute "RetryCount"
</Value>
<Value>
Type "email_count"
InstancePrefix "tasks"
Table false
Attribute "TasksSize"
</Value>
<Value>
Type "email_count"
InstancePrefix "error_queue"
Table false
Attribute "ErrorQueueSize"
</Value>
</MBean>
<MBean "confluence/RequestMetrics">
ObjectName "Confluence:name=RequestMetrics"
InstancePrefix "confluence-RequestMetrics"
<Value>
Type "requests"
InstancePrefix "avg_exec_time_for_last_ten_requests"
Table false
Attribute "AverageExecutionTimeForLastTenRequests"
</Value>
<Value>
Type "requests"
InstancePrefix "current_num_requests_being_served"
Table false
Attribute "CurrentNumberOfRequestsBeingServed"
</Value>
<Value>
Type "requests"
InstancePrefix "error_count"
Table false
Attribute "ErrorCount"
</Value>
<Value>
Type "requests"
InstancePrefix "num_requests_in_last_ten_secs"
Table false
Attribute "NumberOfRequestsInLastTenSeconds"
</Value>
<Value>
Type "requests"
InstancePrefix "requests_began"
Table false
Attribute "RequestsBegan"
</Value>
<Value>
Type "requests"
InstancePrefix "requests_served"
Table false
Attribute "RequestsServed"
</Value>
</MBean>
<MBean "confluence/SystemInformation">
ObjectName "Confluence:name=SystemInformation"
InstancePrefix "confluence-SystemInformation"
<Value>
Type "latency"
InstancePrefix "database_example_latency"
Table false
Attribute "DatabaseExampleLatency"
</Value>
<Value>
Type "response_time"
InstancePrefix "start_time"
Table false
Attribute "StartTime"
</Value>
</MBean>
# Hazelcast statistic JMX configuration
<MBean "com.hazelcast/HazelcastInstance.OperationService.hazelcast.operationServicehazelcast">
ObjectName "com.hazelcast:instance=confluence,name=operationServiceconfluence,type=HazelcastInstance.OperationService"
InstancePrefix "confluence-Hazelcast-OperationService"
<Value>
Type "derive"
InstancePrefix "executedOperationCount"
Attribute "executedOperationCount"
</Value>
<Value>
Type "gauge"
InstancePrefix "operationThreadCount"
Attribute "operationThreadCount"
</Value>
<Value>
Type "gauge"
InstancePrefix "remoteOperationCount"
Attribute "remoteOperationCount"
</Value>
<Value>
Type "gauge"
InstancePrefix "responseQueueSize"
Attribute "responseQueueSize"
</Value>
<Value>
Type "gauge"
InstancePrefix "runningOperationsCount"
Attribute "runningOperationsCount"
</Value>
</MBean>
<MBean "com.hazelcast/HazelcastInstance.EventService.hazelcast.hazelcast">
ObjectName "com.hazelcast:instance=confluence,name=confluence,type=HazelcastInstance.EventService"
InstancePrefix "confluence-Hazelcast-EventService"
<Value>
Type "gauge"
InstancePrefix "eventThreadCount"
Attribute "eventThreadCount"
</Value>
<Value>
Type "gauge"
InstancePrefix "eventQueueCapacity"
Attribute "eventQueueCapacity"
</Value>
<Value>
Type "gauge"
InstancePrefix "eventQueueSize"
Attribute "eventQueueSize"
</Value>
</MBean>
<MBean "com.atlassian.confluence/HibernateStatistics">
ObjectName "Confluence:name=HibernateStatistics"
InstancePrefix "confluence-HibernateStatistics"
<Value>
Type "gauge"
InstancePrefix "CloseStatementCount"
Attribute "CloseStatementCount"
</Value>
<Value>
Type "gauge"
InstancePrefix "CollectionFetchCount"
Attribute "CollectionFetchCount"
</Value>
<Value>
Type "gauge"
InstancePrefix "CollectionLoadCount"
Attribute "CollectionLoadCount"
</Value>
<Value>
Type "gauge"
InstancePrefix "CollectionRecreateCount"
Attribute "CollectionRecreateCount"
</Value>
<Value>
Type "gauge"
InstancePrefix "CollectionRemoveCount"
Attribute "CollectionRemoveCount"
</Value>
<Value>
Type "gauge"
InstancePrefix "CollectionUpdateCount"
Attribute "CollectionUpdateCount"
</Value>
<Value>
Type "gauge"
InstancePrefix "ConnectCount"
Attribute "ConnectCount"
</Value>
<Value>
Type "gauge"
InstancePrefix "EntityDeleteCount"
Attribute "EntityDeleteCount"
</Value>
<Value>
Type "gauge"
InstancePrefix "EntityFetchCount"
Attribute "EntityFetchCount"
</Value>
<Value>
Type "gauge"
InstancePrefix "EntityInsertCount"
Attribute "EntityInsertCount"
</Value>
<Value>
Type "gauge"
InstancePrefix "EntityLoadCount"
Attribute "EntityLoadCount"
</Value>
# fqname = org.hibernate.core/#EntityUpdateCount
<Value>
Type "gauge"
InstancePrefix "EntityUpdateCount"
Attribute "EntityUpdateCount"
</Value>
# fqname = org.hibernate.core/#FlushCount
<Value>
Type "gauge"
InstancePrefix "FlushCount"
Attribute "FlushCount"
</Value>
# fqname = org.hibernate.core/#NaturalIdCacheHitCount
<Value>
Type "gauge"
InstancePrefix "NaturalIdCacheHitCount"
Attribute "NaturalIdCacheHitCount"
</Value>
# fqname = org.hibernate.core/#NaturalIdCacheMissCount
<Value>
Type "gauge"
InstancePrefix "NaturalIdCacheMissCount"
Attribute "NaturalIdCacheMissCount"
</Value>
# fqname = org.hibernate.core/#NaturalIdCachePutCount
<Value>
Type "gauge"
InstancePrefix "NaturalIdCachePutCount"
Attribute "NaturalIdCachePutCount"
</Value>
# fqname = org.hibernate.core/#NaturalIdQueryExecutionCount
<Value>
Type "gauge"
InstancePrefix "NaturalIdQueryExecutionCount"
Attribute "NaturalIdQueryExecutionCount"
</Value>
# fqname = org.hibernate.core/#NaturalIdQueryExecutionMaxTime
<Value>
Type "gauge"
InstancePrefix "NaturalIdQueryExecutionMaxTime"
Attribute "NaturalIdQueryExecutionMaxTime"
</Value>
# fqname = org.hibernate.core/#OptimisticFailureCount
<Value>
Type "gauge"
InstancePrefix "OptimisticFailureCount"
Attribute "OptimisticFailureCount"
</Value>
# fqname = org.hibernate.core/#PrepareStatementCount
<Value>
Type "gauge"
InstancePrefix "PrepareStatementCount"
Attribute "PrepareStatementCount"
</Value>
# fqname = org.hibernate.core/#QueryCacheHitCount
<Value>
Type "gauge"
InstancePrefix "QueryCacheHitCount"
Attribute "QueryCacheHitCount"
</Value>
# fqname = org.hibernate.core/#QueryCacheMissCount
<Value>
Type "gauge"
InstancePrefix "QueryCacheMissCount"
Attribute "QueryCacheMissCount"
</Value>
# fqname = org.hibernate.core/#QueryCachePutCount
<Value>
Type "gauge"
InstancePrefix "QueryCachePutCount"
Attribute "QueryCachePutCount"
</Value>
# fqname = org.hibernate.core/#QueryExecutionCount
<Value>
Type "gauge"
InstancePrefix "QueryExecutionCount"
Attribute "QueryExecutionCount"
</Value>
# fqname = org.hibernate.core/#QueryExecutionMaxTime
<Value>
Type "gauge"
InstancePrefix "QueryExecutionMaxTime"
Attribute "QueryExecutionMaxTime"
</Value>
# fqname = org.hibernate.core/#SecondLevelCacheHitCount
<Value>
Type "gauge"
InstancePrefix "SecondLevelCacheHitCount"
Attribute "SecondLevelCacheHitCount"
</Value>
# fqname = org.hibernate.core/#SecondLevelCacheMissCount
<Value>
Type "gauge"
InstancePrefix "SecondLevelCacheMissCount"
Attribute "SecondLevelCacheMissCount"
</Value>
# fqname = org.hibernate.core/#SecondLevelCachePutCount
<Value>
Type "gauge"
InstancePrefix "SecondLevelCachePutCount"
Attribute "SecondLevelCachePutCount"
</Value>
# fqname = org.hibernate.core/#SessionCloseCount
<Value>
Type "gauge"
InstancePrefix "SessionCloseCount"
Attribute "SessionCloseCount"
</Value>
# fqname = org.hibernate.core/#TransactionCount
<Value>
Type "gauge"
InstancePrefix "TransactionCount"
Attribute "TransactionCount"
</Value>
# fqname = org.hibernate.core/#UpdateTimestampsCacheHitCount
<Value>
Type "gauge"
InstancePrefix "UpdateTimestampsCacheHitCount"
Attribute "UpdateTimestampsCacheHitCount"
</Value>
# fqname = org.hibernate.core/#UpdateTimestampsCacheMissCount
<Value>
Type "gauge"
InstancePrefix "UpdateTimestampsCacheMissCount"
Attribute "UpdateTimestampsCacheMissCount"
</Value>
# fqname = org.hibernate.core/#UpdateTimestampsCachePutCount
<Value>
Type "gauge"
InstancePrefix "UpdateTimestampsCachePutCount"
Attribute "UpdateTimestampsCachePutCount"
</Value>
</MBean>
# C3P0 Connection pool JMX Configuration
<MBean "com.mchange.v2.c3p0/PooledDataSource">
ObjectName "com.mchange.v2.c3p0:type=PooledDataSource,*"
InstancePrefix "confluence-c3p0-PooledDataSource"
<Value>
Type "gauge"
InstancePrefix "numBusyConnections"
Attribute "numBusyConnections"
</Value>
<Value>
Type "gauge"
InstancePrefix "numIdleConnections"
Attribute "numIdleConnections"
</Value>
<Value>
Type "gauge"
InstancePrefix "threadPoolNumIdleThreads"
Attribute "threadPoolNumIdleThreads"
</Value>
<Value>
Type "gauge"
InstancePrefix "numConnections"
Attribute "numConnections"
</Value>
</MBean>
# Apache Tomcat JMX configuration
<MBean "catalina/global_request_processor">
ObjectName "*:type=GlobalRequestProcessor,*"
InstancePrefix "catalina_request_processor-"
InstanceFrom "name"
<Value>
Type "io_octets"
InstancePrefix "global"
#InstanceFrom ""
Table false
Attribute "bytesReceived"
Attribute "bytesSent"
</Value>
<Value>
Type "total_requests"
InstancePrefix "global"
#InstanceFrom ""
Table false
Attribute "requestCount"
</Value>
<Value>
Type "total_time_in_ms"
InstancePrefix "global-processing"
#InstanceFrom ""
Table false
Attribute "processingTime"
</Value>
</MBean>
<MBean "catalina/detailed_request_processor">
ObjectName "*:type=RequestProcessor,*"
InstancePrefix "catalina_request_processor-"
InstanceFrom "worker"
<Value>
Type "io_octets"
#InstancePrefix ""
InstanceFrom "name"
Table false
Attribute "bytesReceived"
Attribute "bytesSent"
</Value>
<Value>
Type "total_requests"
#InstancePrefix ""
InstanceFrom "name"
Table false
Attribute "requestCount"
</Value>
<Value>
Type "total_time_in_ms"
InstancePrefix "processing-"
InstanceFrom "name"
Table false
Attribute "processingTime"
</Value>
</MBean>
<MBean "catalina/thread_pool">
ObjectName "*:type=ThreadPool,*"
InstancePrefix "request_processor-"
InstanceFrom "name"
<Value>
Type "threads"
InstancePrefix "total"
#InstanceFrom ""
Table false
Attribute "currentThreadCount"
</Value>
<Value>
Type "threads"
InstancePrefix "running"
#InstanceFrom ""
Table false
Attribute "currentThreadsBusy"
</Value>
</MBean>
# General JVM configuration
<MBean "memory">
ObjectName "java.lang:type=Memory,*"
InstancePrefix "java_memory"
#InstanceFrom "name"
<Value>
Type "memory"
InstancePrefix "heap-"
#InstanceFrom ""
Table true
Attribute "HeapMemoryUsage"
</Value>
<Value>
Type "memory"
InstancePrefix "nonheap-"
#InstanceFrom ""
Table true
Attribute "NonHeapMemoryUsage"
</Value>
</MBean>
<MBean "memory_pool">
ObjectName "java.lang:type=MemoryPool,*"
InstancePrefix "java_memory_pool-"
InstanceFrom "name"
<Value>
Type "memory"
#InstancePrefix ""
#InstanceFrom ""
Table true
Attribute "Usage"
</Value>
</MBean>
<MBean "classes">
ObjectName "java.lang:type=ClassLoading"
InstancePrefix "java"
#InstanceFrom ""
<Value>
Type "gauge"
InstancePrefix "loaded_classes"
#InstanceFrom ""
Table false
Attribute "LoadedClassCount"
</Value>
</MBean>
<MBean "compilation">
ObjectName "java.lang:type=Compilation"
InstancePrefix "java"
#InstanceFrom ""
<Value>
Type "total_time_in_ms"
InstancePrefix "compilation_time"
#InstanceFrom ""
Table false
Attribute "TotalCompilationTime"
</Value>
</MBean>
<MBean "garbage_collector">
ObjectName "java.lang:type=GarbageCollector,*"
InstancePrefix "java_gc-"
InstanceFrom "name"
<Value>
Type "invocations"
#InstancePrefix ""
#InstanceFrom ""
Table false
Attribute "CollectionCount"
</Value>
<Value>
Type "total_time_in_ms"
InstancePrefix "collection_time"
#InstanceFrom ""
Table false
Attribute "CollectionTime"
</Value>
</MBean>
<MBean "jvm_localhost_os">
ObjectName "java.lang:type=OperatingSystem"
# Open file descriptors
<Value>
Type "gauge"
InstancePrefix "os-open_fd_count"
Table false
Attribute "OpenFileDescriptorCount"
</Value>
# Max. allowed handles for user under which the JavaVM is running
<Value>
Type "gauge"
InstancePrefix "os-max_fd_count"
Table false
Attribute "MaxFileDescriptorCount"
</Value>
# Process time used by the JavaVM
<Value>
Type "counter"
InstancePrefix "os-process_cpu_time"
Table false
Attribute "ProcessCpuTime"
</Value>
</MBean>
<Connection>
#Host "localhost"
ServiceURL "service:jmx:rmi:///jndi/rmi://localhost:9999/jmxrmi"
User "monitorRole"
# Confluence
Collect "confluence/IndexingStatistics"
Collect "confluence/MailTaskQueue"
Collect "confluence/RequestMetrics"
Collect "confluence/SystemInformation"
# Hazelcast
Collect "com.hazelcast/HazelcastInstance.OperationService.hazelcast.operationServicehazelcast"
Collect "com.hazelcast/HazelcastInstance.EventService.hazelcast.hazelcast"
# Hibernate
Collect "com.atlassian.confluence/HibernateStatistics"
# C3P0
Collect "com.mchange.v2.c3p0/PooledDataSource"
# Tomcat
Collect "catalina/global_request_processor"
Collect "catalina/detailed_request_processor"
Collect "catalina/thread_pool"
# JVM
Collect "memory"
Collect "memory_pool"
Collect "classes"
Collect "compilation"
Collect "garbage_collector"
Collect "jvm_localhost_os"
</Connection>
</Plugin>
</Plugin>
Include "/etc/collectd.d"

12
roles/az_common/.yamllint Normal file
View File

@@ -0,0 +1,12 @@
extends: default
rules:
braces:
max-spaces-inside: 1
level: error
brackets:
max-spaces-inside: 1
level: error
line-length: disable
truthy: disable
trailing-spaces: false

View File

@@ -0,0 +1,7 @@
---
# Values taken from https://docs.microsoft.com/en-us/sql/connect/jdbc/connecting-to-an-azure-sql-database
# Windows values are milliseconds, Linux values are seconds
sysctl_config:
net.ipv4.tcp_keepalive_time: 30
net.ipv4.tcp_keepalive_intvl: 1
net.ipv6.tcp_keepalive_probes: 10

View File

@@ -0,0 +1 @@
---

View File

@@ -0,0 +1,14 @@
# Molecule managed
{% if item.registry is defined %}
FROM {{ item.registry.url }}/{{ item.image }}
{% else %}
FROM {{ item.image }}
{% endif %}
RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates && apt-get clean; \
elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python*-dnf bash && dnf clean all; \
elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \
elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml && zypper clean -a; \
elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \
elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates && xbps-remove -O; fi

View File

@@ -0,0 +1,28 @@
---
dependency:
name: galaxy
driver:
name: docker
lint:
name: yamllint
platforms:
- name: ubuntu_lts
image: ubuntu:bionic
groups:
- az_node_local
provisioner:
name: ansible
options:
skip-tags: runtime_pkg
lint:
name: ansible-lint
options:
x: ["701"]
inventory:
links:
group_vars: ../../../../group_vars/
verifier:
name: testinfra
lint:
name: flake8
enabled: false

View File

@@ -0,0 +1,23 @@
---
- name: Converge
hosts: all
vars:
atl_product_family: "confluence"
atl_product_edition: "confluence"
atl_product_user: "confluence"
atl_product_version: "latest"
atl_db_engine: "postgres"
atl_db_host: "postgres-db.ap-southeast-2.rds.amazonaws.com"
atl_jdbc_db_name: "confluence"
atl_jdbc_user: 'confluence'
atl_jdbc_password: 'molecule_password'
atl_jvm_heap: 'PLACEHOLDER'
atl_cluster_node_id: 'FAKEID'
atl_autologin_cookie_age: "COOKIEAGE"
atl_local_ipv4: "1.1.1.1"
atl_tomcat_scheme: "http"
atl_proxy_name: "localhost"
atl_proxy_port: "80"
roles:
- role: az_common

View File

@@ -0,0 +1,16 @@
import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.parametrize('pkg', [
'netcat',
'rsync',
'cifs-utils'
])
def test_pkg(host, pkg):
package = host.package(pkg)
assert package.is_installed

View File

@@ -0,0 +1,33 @@
---
- name: Install additional support packages
package:
name:
- netcat
- rsync
- cifs-utils
- name: Fetch VM ID
command: "dmidecode -s system-uuid"
register: az_vm_id
tags:
- runtime_pkg
changed_when: true
- name: Use VM ID for cluster node ID
set_fact:
atl_cluster_node_id: "{{ az_vm_id.stdout }}"
tags:
- runtime_pkg
- name: Tune TCP Keep Alive
sysctl:
name: '{{ item.key }}'
value: '{{ item.value }}'
reload: yes
ignoreerrors: yes
sysctl_file: /etc/sysctl.conf
sysctl_set: yes
with_dict: '{{ sysctl_config }}'
tags:
- runtime_pkg

View File

@@ -0,0 +1,9 @@
---
az_storage_account: "{{ lookup('env', 'AZ_STORAGE_ACCOUNT') }}"
az_storage_key: "{{ lookup('env', 'AZ_STORAGE_KEY') }}"
efs_target: "//{{ az_storage_account }}.file.core.windows.net"
efs_type: "cifs"
efs_src_dir: "{{ lookup('env', 'ATL_CROWD_SHARED_HOME_NAME') or 'crowd-home' }}"
efs_mount_options: "vers=3.0,uid=0,gid=0,dir_mode=0777,file_mode=077,username={{ az_storage_account }},password={{ az_storage_key }}"

View File

@@ -0,0 +1,3 @@
---
dependencies:
- az_common

View File

@@ -0,0 +1,15 @@
---
- name: Create mountpoint
file:
state: directory
path: "{{ atl_shared_mountpoint }}"
mode: 0755
- name: Enable mountpoint in fstab
mount:
path: "{{ atl_shared_mountpoint }}"
src: "{{ efs_target }}/{{ efs_src_dir }}"
fstype: "{{ efs_type }}"
opts: "{{ efs_mount_options }}"
state: mounted

View File

@@ -51,7 +51,7 @@
lineinfile: lineinfile:
path: "{{ atl_product_installation_versioned }}/apache-tomcat/bin/setenv.sh" path: "{{ atl_product_installation_versioned }}/apache-tomcat/bin/setenv.sh"
insertafter: "EOF" insertafter: "EOF"
line: 'export JAVA_HOME=/usr/lib/jvm/jre-{{ java_version }}-openjdk' line: "export JAVA_HOME={{ java_home | default('/usr/lib/jvm/jre-{{ java_version }}-openjdk')}}"
- name: Create application directories - name: Create application directories
file: file:
@@ -105,7 +105,7 @@
- name: Assert baseurl to same as atl_proxy_name - name: Assert baseurl to same as atl_proxy_name
postgresql_query: postgresql_query:
login_host: "{{ atl_db_host }}" login_host: "{{ atl_db_host }}"
login_user: "{{ atl_jdbc_user }}" login_user: "{{ atl_jdbc_user_login }}"
login_password: "{{ atl_jdbc_password }}" login_password: "{{ atl_jdbc_password }}"
db: "{{ atl_jdbc_db_name }}" db: "{{ atl_jdbc_db_name }}"
query: > query: >

View File

@@ -2,4 +2,4 @@
CATALINA_OPTS="${CATALINA_OPTS} -{{ item }}" CATALINA_OPTS="${CATALINA_OPTS} -{{ item }}"
{% endfor %} {% endfor %}
CATALINA_OPTS="${CATALINA_OPTS} -Dcluster.node.name={{ ansible_ec2_instance_id }}-{{ ansible_ec2_local_ipv4 }}" CATALINA_OPTS="${CATALINA_OPTS} -Dcluster.node.name={{ atl_cluster_node_id }}"

View File

@@ -4,7 +4,10 @@
shutdown="SHUTDOWN"> shutdown="SHUTDOWN">
<Service name="Catalina"> <Service name="Catalina">
<!-- Add the SSL properties when the load balancer is configured
secure="{{ atl_tomcat_secure }}"
scheme="{{ atl_tomcat_scheme }}"
-->
<Connector acceptCount="{{ atl_tomcat_acceptcount }}" <Connector acceptCount="{{ atl_tomcat_acceptcount }}"
connectionTimeout="{{ atl_tomcat_connectiontimeout }}" connectionTimeout="{{ atl_tomcat_connectiontimeout }}"
disableUploadTimeout="true" disableUploadTimeout="true"
@@ -19,8 +22,6 @@
compression="on" compression="on"
sendReasonPhrase="true" sendReasonPhrase="true"
compressableMimeType="text/html,text/xml,application/xml,text/plain,text/css,application/json,application/javascript,application/x-javascript" compressableMimeType="text/html,text/xml,application/xml,text/plain,text/css,application/json,application/javascript,application/x-javascript"
secure="{{ atl_tomcat_secure }}"
scheme="{{ atl_tomcat_scheme }}"
proxyName="{{ atl_proxy_name }}" proxyName="{{ atl_proxy_name }}"
proxyPort="{{ atl_proxy_port }}" proxyPort="{{ atl_proxy_port }}"
protocol="{{ atl_tomcat_protocol }}"> protocol="{{ atl_tomcat_protocol }}">

View File

@@ -1,19 +1,20 @@
---
- name: Create application DB user - name: Create application DB user
postgresql_user: postgresql_user:
db: "{{ atl_db_root_db_name }}"
login_host: "{{ atl_db_host }}" login_host: "{{ atl_db_host }}"
login_user: "{{ atl_db_root_user }}" login_user: "{{ atl_db_root_user_login }}"
login_password: "{{ atl_db_root_password }}" login_password: "{{ atl_db_root_password }}"
login_db: "{{ atl_db_root_db_name }}"
port: "{{ atl_db_port }}" port: "{{ atl_db_port }}"
name: "{{ atl_jdbc_user }}" name: "{{ atl_jdbc_user }}"
password: "{{ atl_jdbc_password }}" password: "{{ atl_jdbc_password }}"
expires: 'infinity' expires: 'infinity'
ssl_mode: 'require'
- name: Collect dbcluster db_names - name: Collect dbcluster db_names
postgresql_query: postgresql_query:
login_host: "{{ atl_db_host }}" login_host: "{{ atl_db_host }}"
login_user: "{{ atl_db_root_user }}" login_user: "{{ atl_db_root_user_login }}"
login_password: "{{ atl_db_root_password }}" login_password: "{{ atl_db_root_password }}"
db: "{{ atl_db_root_db_name }}" db: "{{ atl_db_root_db_name }}"
query: "SELECT datname FROM pg_database;" query: "SELECT datname FROM pg_database;"
@@ -24,7 +25,7 @@
- name: Update root privs for new user - name: Update root privs for new user
postgresql_privs: postgresql_privs:
login_host: "{{ atl_db_host }}" login_host: "{{ atl_db_host }}"
login_user: "{{ atl_db_root_user }}" login_user: "{{ atl_db_root_user_login }}"
login_password: "{{ atl_db_root_password }}" login_password: "{{ atl_db_root_password }}"
database: postgres database: postgres
roles: "{{ atl_db_root_user }}" roles: "{{ atl_db_root_user }}"
@@ -35,7 +36,7 @@
- name: Create new application database - name: Create new application database
postgresql_db: postgresql_db:
login_host: "{{ atl_db_host }}" login_host: "{{ atl_db_host }}"
login_user: "{{ atl_db_root_user }}" login_user: "{{ atl_db_root_user_login }}"
login_password: "{{ atl_db_root_password }}" login_password: "{{ atl_db_root_password }}"
port: "{{ atl_db_port }}" port: "{{ atl_db_port }}"
name: "{{ atl_jdbc_db_name }}" name: "{{ atl_jdbc_db_name }}"
@@ -53,7 +54,7 @@
- name: Assert ownership of public schema - name: Assert ownership of public schema
postgresql_query: postgresql_query:
login_host: "{{ atl_db_host }}" login_host: "{{ atl_db_host }}"
login_user: "{{ atl_db_root_user }}" login_user: "{{ atl_db_root_user_login }}"
login_password: "{{ atl_db_root_password }}" login_password: "{{ atl_db_root_password }}"
db: "{{ atl_jdbc_db_name }}" db: "{{ atl_jdbc_db_name }}"
query: "ALTER SCHEMA public OWNER to {{ atl_db_root_user }};" query: "ALTER SCHEMA public OWNER to {{ atl_db_root_user }};"
@@ -61,7 +62,7 @@
- name: Grant privs to root user on public schema - name: Grant privs to root user on public schema
postgresql_query: postgresql_query:
login_host: "{{ atl_db_host }}" login_host: "{{ atl_db_host }}"
login_user: "{{ atl_db_root_user }}" login_user: "{{ atl_db_root_user_login }}"
login_password: "{{ atl_db_root_password }}" login_password: "{{ atl_db_root_password }}"
db: "{{ atl_jdbc_db_name }}" db: "{{ atl_jdbc_db_name }}"
query: "GRANT ALL ON SCHEMA public TO {{ atl_db_root_user }};" query: "GRANT ALL ON SCHEMA public TO {{ atl_db_root_user }};"
@@ -69,7 +70,7 @@
- name: Grant privs to application user on public schema - name: Grant privs to application user on public schema
postgresql_query: postgresql_query:
login_host: "{{ atl_db_host }}" login_host: "{{ atl_db_host }}"
login_user: "{{ atl_db_root_user }}" login_user: "{{ atl_db_root_user_login }}"
login_password: "{{ atl_db_root_password }}" login_password: "{{ atl_db_root_password }}"
db: "{{ atl_jdbc_db_name }}" db: "{{ atl_jdbc_db_name }}"
query: "GRANT ALL ON SCHEMA public TO {{ atl_jdbc_user }};" query: "GRANT ALL ON SCHEMA public TO {{ atl_jdbc_user }};"

View File

@@ -14,6 +14,7 @@
- unzip - unzip
- fontconfig - fontconfig
- python-psycopg2 - python-psycopg2
- python-lxml
- name: Create product group - name: Create product group
group: group:

View File

@@ -8,6 +8,11 @@
tags: tags:
- runtime_pkg - runtime_pkg
- name: Set java_home variable
set_fact:
java_home: "/usr/lib/jvm/java-{{ java_version }}-openjdk-amd64"
when: atl_use_system_jdk
- name: Install other base packages on Ubuntu - name: Install other base packages on Ubuntu
package: package:
name: name:

View File

@@ -10,10 +10,14 @@ platforms:
image: amazonlinux:2 image: amazonlinux:2
groups: groups:
- aws_node_local - aws_node_local
- name: ubuntu_lts - name: ubuntu_lts_aws
image: ubuntu:bionic image: ubuntu:bionic
groups: groups:
- aws_node_local - aws_node_local
- name: ubuntu_lts_azure
image: ubuntu:bionic
groups:
- az_node_local
provisioner: provisioner:
name: ansible name: ansible
options: options: